id
stringlengths 3
8
| content
stringlengths 100
981k
|
|---|---|
1669215
|
import xapian
from twisted.internet import defer, reactor, threads
from twisted.python import log
from twisted.web import xmlrpc
from bnw.core import bnw_objects
from bnw.search.indexer import Indexer
class RPCSearch(xmlrpc.XMLRPC):
def __init__(self, dbpath, language):
xmlrpc.XMLRPC.__init__(self, allowNone=True)
self.indexer = Indexer(dbpath, language)
self.db = xapian.Database(dbpath)
self.stemmer = xapian.Stem(language)
self.query_parser = xapian.QueryParser()
self.query_parser.set_stemmer(self.stemmer)
self.query_parser.set_stemming_strategy(xapian.QueryParser.STEM_ALL)
self.query_parser.add_boolean_prefix('author', 'A')
self.query_parser.add_boolean_prefix('user', 'A')
self.query_parser.add_boolean_prefix('type', 'XTYPE')
self.query_parser.add_prefix('clubs', 'XCLUBS')
self.query_parser.add_prefix('tags', 'XTAGS')
date_proc = xapian.DateValueRangeProcessor(Indexer.DATE)
self.query_parser.add_valuerangeprocessor(date_proc)
self.run_incremental_indexing()
@defer.inlineCallbacks
def run_incremental_indexing(self):
self.indexed = 0
c1 = yield bnw_objects.Message.count({'indexed': {'$exists': False}})
c2 = yield bnw_objects.Comment.count({'indexed': {'$exists': False}})
self.total = c1 + c2
self._run_incremental_indexing()
@defer.inlineCallbacks
def _run_incremental_indexing(self):
bnw_o = bnw_objects.Message
objs = yield bnw_o.find({'indexed': {'$exists': False}}, limit=500)
objs = list(objs)
if not objs:
bnw_o = bnw_objects.Comment
objs = yield bnw_o.find({'indexed': {'$exists': False}}, limit=500)
objs = list(objs)
if not objs:
log.msg('=== Indexing is over. Will repeat an hour later. ===')
reactor.callLater(3600, self.run_incremental_indexing)
return
yield threads.deferToThread(self.indexer.create_index, objs)
ids = [obj['_id'] for obj in objs]
yield bnw_o.mupdate(
{'_id': {'$in': ids}}, {'$set': {'indexed': True}},
multi=True)
self.indexed += len(objs)
log.msg('Indexed %d/%d...' % (self.indexed, self.total))
reactor.callLater(0.01, self._run_incremental_indexing)
PAGE_SIZE = 20
def xmlrpc_search(self, text, page):
# TODO: Run queries in threads because it's blocking operation.
if page < 0:
return
try:
query = self.query_parser.parse_query(text)
except xapian.QueryParserError:
return
enquire = xapian.Enquire(self.db)
enquire.set_query(query)
self.db.reopen()
def process_match(match):
doc = match.document
return dict(
id=doc.get_value(Indexer.ID),
user=doc.get_value(Indexer.USER),
date=float(doc.get_value(Indexer.DATE_ORIG)),
type=doc.get_value(Indexer.TYPE),
tags_info=doc.get_value(Indexer.TAGS_INFO),
text=doc.get_data().decode('utf-8'),
percent=match.percent)
matches = enquire.get_mset(page * self.PAGE_SIZE, self.PAGE_SIZE)
estimated = matches.get_matches_estimated()
results = map(process_match, matches)
return dict(estimated=estimated, results=results)
|
1669334
|
import datetime
class FeedCollector (object):
def __date_to_datetime(self, d):
if isinstance(d, datetime.date):
return datetime.datetime(d.year, d.month, d.day)
else:
return d
def collect_new_content(self, feed, last_sent):
"""Returns exactly those items in the given feed that are newer than the
last_sent datetime. Converts dates to datetimes for comparison, if
necessary."""
content = []
last_sent = self.__date_to_datetime(last_sent)
for item in feed.get_content():
last_updated = feed.get_last_updated(item)
last_updated = self.__date_to_datetime(last_updated)
if last_updated > last_sent:
content.append(item)
return content
|
1669354
|
import unittest
from NFL_Draftkings.GetPlayerDKScores import *
class TestPublicApiMethods(unittest.TestCase):
def test_get_scores_empty(self):
data = get_scores(season=2015)
assert len(data) == 585
def test_get_scores_name(self):
data = get_scores(name='<NAME>', season=2015)
assert len(data) == 1 and data[0]['name'] == '<NAME>'
def test_get_scores_name_and_week(self):
data = get_scores(name='<NAME>', week=1, season=2015)
assert len(data) == 1 and data[0]['name'] == '<NAME>'
def test_get_scores_position(self):
data = get_scores(position='QB', season=2015)
assert len(data) == 72 and not [x for x in data if x['position'] != 'QB']
def test_get_scores_position_and_week(self):
data = get_scores(position='QB', week=1, season=2015)
assert len(data) == 35 and not [x for x in data if x['position'] != 'QB']
def test_get_scores_position_and_week_and_name_and_team(self):
data = get_scores(name='<NAME>', position='QB', team='NE', week=1, season=2015)
assert len(data) == 1 and not [x for x in data if x['position'] != 'QB']
def test_get_weekly_scores_empty(self):
data = get_weekly_scores(season=2015)
assert len(data) == 5960
def test_get_weekly_scores_name(self):
data = get_weekly_scores(name='<NAME>', season=2015)
assert len(data) == 16 and not [x for x in data if x['name'] != '<NAME>']
def test_get_weekly_scores_name_and_weeks(self):
data = get_weekly_scores(name='<NAME>', weeks=[1,2,3], season=2015)
assert len(data) == 3 and not [x for x in data if x['name'] != '<NAME>']
def test_get_weekly_scores_position(self):
data = get_weekly_scores(position='WR', season=2015)
assert len(data) == 2170 and not [x for x in data if x['position'] != 'WR']
def test_get_weekly_scores_position_and_week(self):
data = get_weekly_scores(position='WR', weeks=[1,2,3], season=2015)
assert len(data) == 402 and not [x for x in data if x['position'] != 'WR']
def test_get_weekly_scores_position_and_week_and_name_and_team(self):
data = get_weekly_scores(name='<NAME>', position='WR', team='NE', weeks=[1,2,3], season=2015)
assert len(data) == 3 and not [x for x in data if x['name'] != '<NAME>']
def test_get_top_performers_empty(self):
data = get_top_performers(season=2015)
assert len(data) == 10 and data[0]['name'] == '<NAME>'
def test_get_top_performers_position(self):
data = get_top_performers(position='RB', season=2015)
assert len(data) == 10 and not [x for x in data if x['position'] != 'RB']
def test_get_top_performers_team(self):
data = get_top_performers(team='NE', season=2015)
assert len(data) == 10 and not [x for x in data if x['teamAbbr'] != 'NE']
def test_get_top_performers_week(self):
data = get_top_performers(week=1, season=2015)
assert len(data) == 10 and data[0]['name'] == '<NAME>'
def test_get_top_performers_top_15(self):
data = get_top_performers(top=15, season=2015)
assert len(data) == 15 and data[0]['name'] == '<NAME>'
if __name__ == '__main__':
unittest.main()
|
1669389
|
import json
import requests
class GoogleMapPlotter(object):
def __init__(self, center_lat, center_lng, zoom, apikey=""):
self.center = (float(center_lat), float(center_lng))
self.zoom = int(zoom)
self.apikey = str(apikey)
self.points = []
def from_geocode(self, location_string, zoom=13):
lat, lng = self.geocode(location_string)
return (lat, lng, zoom)
def geocode(self, location_string):
r = requests.get(
"https://maps.googleapis.com/maps/api/geocode/json?address={location_string}&key={apikey}".format(
location_string=location_string, apikey=self.apikey
)
)
geocode_result = json.loads(r.text)
latlng_dict = geocode_result["results"][0]["geometry"]["location"]
return latlng_dict["lat"], latlng_dict["lng"]
def marker(self, lat, long, img, id=None):
self.points.append((lat, long, img, id))
def draw(self):
with open("template.txt", "r") as file:
template = str(file.read())
if not self.apikey:
self.apikey = ""
if not self.center:
self.center = ()
if not self.zoom:
self.zoom = ()
markers = self.write_points()
try:
return template.format(
api_key=self.apikey,
lat=self.center[0],
long=self.center[1],
zoom=self.zoom,
markers=markers,
)
except Exception as e:
pass
def write_points(self):
string = ""
for point in self.points:
point = self.write_point(point[0], point[1], point[2], point[3])
string += point
return string
def write_point(self, lat, long, img, id=None):
if id:
marker = """
var latlng = new google.maps.LatLng({lat}, {long});
var img = new google.maps.MarkerImage('{img}');
var marker_{id} = new google.maps.Marker({{
icon: '{img}',
animation: google.maps.Animation.DROP,
position: latlng
}});
marker_{id}.setMap(map);
marker_{id}.addListener('click', function() {{
window.parent.postMessage(["details","{id}"], "*");
}});
""".format(
lat=lat, long=long, img=img, id=id
)
else:
marker = """
var latlng = new google.maps.LatLng({lat}, {long});
var img = new google.maps.MarkerImage('{img}');
var marker = new google.maps.Marker({{
icon: '{img}',
animation: google.maps.Animation.DROP,
position: latlng
}});
marker.setMap(map);
""".format(
lat=lat, long=long, img=img
)
return marker
|
1669444
|
from future.utils import python_2_unicode_compatible
from viberbot.api.event_type import EventType
from viberbot.api.viber_requests.viber_request import ViberRequest
class ViberDeliveredRequest(ViberRequest):
def __init__(self):
super(ViberDeliveredRequest, self).__init__(EventType.DELIVERED)
self._message_token = None
self._user_id = None
self._chat_id = None
def from_dict(self, request_dict):
super(ViberDeliveredRequest, self).from_dict(request_dict)
self._message_token = request_dict['message_token']
self._user_id = request_dict.get('user_id', None)
self._chat_id = request_dict.get('chat_id', None)
return self
@property
def message_token(self):
return self._message_token
@property
def user_id(self):
return self._user_id
@property
def chat_id(self):
return self._chat_id
@python_2_unicode_compatible
def __str__(self):
return u"ViberDeliveredRequest [{0}, message_token={1}, user_id={2}]" \
.format(
super(ViberDeliveredRequest, self).__str__(),
self._message_token,
self._user_id)
|
1669448
|
import numpy as np
import scipy.stats
from scipy.interpolate import interp1d, UnivariateSpline
NFILES = 5
FILENAME = "omw_" # prefix for files followed by filenumber
class Prior_class(object):
'''Prior class'''
def __init__(self,priorname,hyperparams):
'''Input:
priorname - array of keywords ["uniform, "gauss"] for each param
hyperparams - array of arrays [[min,max], [mu, variance],...] for each param
'''
self.priorname=priorname
self.hyperparams = hyperparams
if self.priorname == "nonstandard": #first hyperparam = column from file to be read, specify filename and number of files above
'''useful for sampling from non-standard discrete pdf e.g. Planck/WMAP chain'''
self.read_data(hyperparams[0])
self.inv_transform_spline()
self.pdf_spline()
def read_data(self,colnum):
'''Only for "nonstandard". Method to read discrete pdf for parameter from file
Input: colnum: column number to be read from file
'''
self.param=[]
for i in range(1,NFILES):
d = np.loadtxt(FILENAME+str(i)+".txt")
for j in range(len(d[:,colnum])):
self.param.append(d[:,colnum][j])
def inv_transform_spline(self):
'''Only for "nonstandard". Method to create inverse spline to discrete cumulative distribution function
to allow drawing random variables.
Warning: user should check that spline faithfully matches actual cdf.
'''
srt_param=np.sort(self.param)
cdf = np.array(range(len(self.param)))/float(len(self.param))
#create a spline
self.spline2_cdf = UnivariateSpline(cdf,srt_param,k=5)
def pdf_spline(self):
'''Only for "nonstandard". Method creates a spline to the normalised PDF for discrete parameter values.
Warning: user should check that spline faithfully matches actual pdf.
'''
hist,nbins = np.histogram(self.param,normed=True,bins=200)
self.spline2_pdf = interp1d(nbins[1:],hist)
def return_priorprob(self,value):
'''Input:
value - random variable
Returns:
probability of rv given the prior dist
'''
if self.priorname =="gamma":
x = 1./self.hyperparams[1]
return scipy.stats.gamma.pdf(value, self.hyperparams[0],scale=x)
elif self.priorname =="normal":
return scipy.stats.norm.pdf(value, loc = self.hyperparams[0],scale=self.hyperparams[1])
elif self.priorname =="uniform":
width = self.hyperparams[1] - self.hyperparams[0]
return scipy.stats.uniform.pdf(value, loc = self.hyperparams[0],scale=width)
elif self.priorname == "nonstandard":
return self.spline2_pdf(value)
def prior(self):
'''
Returns a random variable from the prior distribution
'''
np.random.seed()
if self.priorname =="gamma":
k=self.hyperparams[0]
scale = 1./self.hyperparams[1]
return float(np.random.gamma(k,scale))
elif self.priorname =="normal":
return float(np.random.normal(self.hyperparams[0],self.hyperparams[1],size=1))
elif self.priorname =="uniform":
return float(np.random.uniform(low=self.hyperparams[0],high=self.hyperparams[1],size=1))
elif self.priorname == "nonstandard":
uni_rvs = np.random.uniform()
return float(self.spline2_cdf(uni_rvs))
|
1669471
|
from discord.ext import commands
class Help(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.command(name="mhelp")
async def mhelp(self, ctx):
await ctx.send(
"""A simple Manim rendering bot.
Use the `!manimate` command to render short and simple Manim scripts.
Code **must** be properly formatted and indented. Note that you can't animate through DM's.
Supported tags:
```
-t, --transparent, -i, --save_as_gif, -s, --save_last_frame
```
Example:
```
!manimate -s
\`\`\`py
def construct(self):
self.play(ReplacementTransform(Square(), Circle()))
\`\`\`
```
"""
)
def setup(bot):
bot.add_cog(Help(bot))
|
1669490
|
from SpheralCompiledPackages import *
from spheralDimensions import spheralDimensions
dims = spheralDimensions()
#-------------------------------------------------------------------------------
# The generic FluidNodeList pattern.
#-------------------------------------------------------------------------------
FluidNodeListFactoryString = """
def makeFluidNodeList%(dim)s(name,
eos,
numInternal = 0,
numGhost = 0,
hmin = 1.0e-20,
hmax = 1.0e20,
hminratio = 0.1,
nPerh = 2.01,
maxNumNeighbors = 500,
rhoMin = 1.0e-10,
rhoMax = 1e10,
# Neighboring stuff
NeighborType = TreeNeighbor%(dim)s,
searchType = GatherScatter,
kernelExtent = 2.0,
# Parameters only for NestedGridNeighbor (deprecated)
# numGridLevels = 31,
# topGridCellSize = 100.0,
# origin = Vector%(dim)s.zero,
# gridCellInfluenceRadius = 1,
# Parameters for TreeNeighbor
xmin = Vector%(dim)s.one * -10.0,
xmax = Vector%(dim)s.one * 10.0):
result = FluidNodeList%(dim)s(name, eos, numInternal, numGhost,
hmin, hmax, hminratio,
nPerh, maxNumNeighbors,
rhoMin, rhoMax)
if NeighborType == NestedGridNeighbor%(dim)s:
print "makeFluidNodeList Deprecation Warning: NestedGridNeighbor is deprecated: suggest using TreeNeighbor."
result._neighbor = NestedGridNeighbor%(dim)s(result, searchType,
kernelExtent = kernelExtent)
#numGridLevels, topGridCellSize,
#origin, kernelExtent,
#gridCellInfluenceRadius)
else:
result._neighbor = TreeNeighbor%(dim)s(result, searchType, kernelExtent, xmin, xmax)
result.registerNeighbor(result._neighbor)
result.eos = eos
return result
"""
#-------------------------------------------------------------------------------
# Create the different dimension implementations.
#-------------------------------------------------------------------------------
for dim in dims:
exec(FluidNodeListFactoryString % {"dim" : "%id" % dim})
|
1669543
|
import torch
from torchvision import datasets, models, transforms
from pytorch_adapt.containers import Models
from pytorch_adapt.datasets import (
CombinedSourceAndTargetDataset,
SourceDataset,
TargetDataset,
)
from pytorch_adapt.models import Classifier, Discriminator
class FakeDataForAdaptation(torch.utils.data.Dataset):
def __init__(self, size):
transform = transforms.Compose([transforms.ToTensor()])
self.dataset = datasets.FakeData(
size=size, image_size=(3, 224, 224), transform=transform
)
def __len__(self):
return len(self.dataset)
def __getitem__(self, idx):
return self.dataset[idx]
def get_source_model():
source_model = models.resnet18()
source_classifier = Classifier(in_size=source_model.fc.in_features, num_classes=10)
source_model.fc = torch.nn.Identity()
return source_model, source_classifier
def get_datasets():
src_train = SourceDataset(FakeDataForAdaptation(500))
target_train = TargetDataset(FakeDataForAdaptation(300))
return {
"train": CombinedSourceAndTargetDataset(src_train, target_train),
"src_train": src_train,
"src_val": SourceDataset(FakeDataForAdaptation(200)),
"target_train": target_train,
"target_val": TargetDataset(FakeDataForAdaptation(100)),
}
def get_gcd():
source_model, source_classifier = get_source_model()
discriminator = Discriminator(in_size=source_classifier.net[0].in_features)
return Models(
{
"G": source_model,
"C": source_classifier,
"D": discriminator,
}
)
|
1669608
|
import requests
import json
import sys
from flask.wrappers import Response
from requests.models import HTTPError
def send_post_request(url:str, body = {}, headers = {}) -> Response:
try:
resp = requests.post(url, data = json.dumps(body), headers=headers)
resp.raise_for_status()
return resp
except requests.exceptions.RequestException as e:
print(e, file=sys.stderr)
raise HTTPError
def send_update_request(url:str, body = {}, headers = {}) -> Response:
try:
resp = requests.put(url, data = json.dumps(body), headers=headers)
resp.raise_for_status()
except requests.exceptions.RequestException as e:
print(e, file=sys.stderr)
raise HTTPError
return resp
def send_get_request(url:str, body = {}, headers = {}) -> Response:
try:
resp = requests.get(url, data=json.dumps(body), headers=headers)
resp.raise_for_status()
except requests.exceptions.RequestException as e:
print(e, file=sys.stderr)
raise HTTPError
return resp
def send_delete_request(url:str, body = {}, headers = {}) -> Response:
try:
resp = requests.delete(url, data=json.dumps(body), headers=headers)
resp.raise_for_status()
except requests.exceptions.RequestException as e:
print(e, file=sys.stderr)
raise HTTPError
return resp
def send_new_request(url:str, method:str, body = {}, headers = {}) -> Response:
if method == 'POST':
return send_post_request(url,body,headers)
elif method == 'GET':
return send_get_request(url,body,headers)
elif method == 'PUT':
return send_update_request(url,body,headers)
elif method == 'DELETE':
return send_delete_request(url,body,headers)
else:
raise Exception(f"Error while send request: Method '{method}' not allowed. The allowed methods are POST,GET,UPDATE,DELETE.")
|
1669648
|
from earthchem import Query
from matplotlib import pyplot as plt
import unittest
class IntegrationTestRESTClientQuery(unittest.TestCase):
"Some integration tests to check that things are working"
def setUp(self):
self.query = Query(author='barnes')
# <50 for test speed, pagination is checked elsewhere
self.df = self.query.dataframe(max_rows=49)
def test_plot_latlon(self):
"Check that plotting works without any issues"
self.df.plot('longitude', 'latitude', 'scatter')
plt.close()
def test_plot_data(self):
"Check that plotting works with data inputs"
self.df.plot('al2o3', 'sio2', 'scatter')
plt.close()
if __name__ == '__main__':
unittest.main()
|
1669712
|
import io
import logging
import luigi
from luigi.contrib import gcs
from luigi_gcloud.gcore import get_default_client
logger = logging.getLogger('luigi-gcloud')
try:
import apiclient
from apiclient import discovery
except ImportError:
logger.warning("Loading gcloud module without google-api-client installed. Will crash at "
"runtime if gcloud functionality is used.")
# noinspection PyAbstractClass
class GCSFileSystem(gcs.GCSClient):
def touch(self, dest_path):
media = apiclient.http.MediaIoBaseUpload(io.BytesIO(''), 'application/octet-stream')
bucket, obj = self._path_to_bucket_and_key(dest_path)
return self.client.objects().insert(bucket=bucket, name=obj, media_body=media).execute()
def __init__(self, client=None, descriptor='', http_=None, chunksize=gcs.CHUNKSIZE):
client = client or get_default_client()
super(GCSFileSystem, self).__init__(client.oauth(), descriptor, http_, chunksize)
# noinspection PyAbstractClass
class GCSTarget(gcs.GCSTarget):
storage_api = None
def bucket(self):
ix = self.path.find('/', 5)
return self.path[5:ix]
def path_in_bucket(self):
ix = self.path.find('/', 5)
return self.path[ix + 1:]
def __repr__(self):
return self.path
def __init__(self, path, format=None, client=None):
client = client or get_default_client()
self.storage_api = client.storage_api()
super(GCSTarget, self).__init__(path, format, client=GCSFileSystem(client))
def touch(self):
out = self.fs.touch(self.path)
logger.warning("TODO: Need to handle out: " + str(out))
class GCSFlagTarget(GCSTarget):
def __init__(self, path, format=None, client=None, flag='_SUCCESS'):
if path[-1] != "/":
path += "/"
super(GCSFlagTarget, self).__init__(path + flag, format, client)
class AtomicGCSFile(luigi.target.AtomicLocalFile):
def __init__(self, path, client=None):
client = client or get_default_client()
self.gcs_client = gcs.GCSClient(client.oauth())
super(AtomicGCSFile, self).__init__(path)
def move_to_final_destination(self):
self.gcs_client.put(self.tmp_path, self.path)
class MarkerTask(luigi.Task):
def __init__(self, *args, **kwargs):
client = kwargs.get("api") or get_default_client()
self._gcs = client.storage_api()
super(MarkerTask, self).__init__(*args, **kwargs)
def run(self):
marker = self.output()
if hasattr(marker, "touch") and callable(getattr(marker, "touch")):
logger.info("Writing marker file " + str(marker))
marker.touch()
else:
logger.error("Output " + str(marker) + " not writable")
|
1669715
|
from CalculateEntropy import calculate_shannon_entropy
def test_calculate_shannon_entropy():
_, context_result, _ = calculate_shannon_entropy('1234', 1)
assert context_result == {'EntropyResult': {'checked_value': '1234', 'entropy': 2.0}}
_, context_result, _ = calculate_shannon_entropy('1234', 3)
assert context_result == {}
|
1669717
|
from rest_framework import mixins
from rest_framework.viewsets import ViewSetMixin
from rest_framework_mongoengine.generics import GenericAPIView
class GenericViewSet(ViewSetMixin, GenericAPIView):
""" Adaptation of DRF GenericViewSet """
pass
class ModelViewSet(mixins.CreateModelMixin,
mixins.RetrieveModelMixin,
mixins.UpdateModelMixin,
mixins.DestroyModelMixin,
mixins.ListModelMixin,
GenericViewSet):
""" Adaptation of DRF ModelViewSet """
pass
class ReadOnlyModelViewSet(mixins.RetrieveModelMixin,
mixins.ListModelMixin,
GenericViewSet):
""" Adaptation of DRF ReadOnlyModelViewSet """
pass
|
1669719
|
import logging
import os
import random
from typing import List, Tuple
import attr
import numpy as np
import pandas as pd
from flair.data import Sentence
from sklearn.datasets import dump_svmlight_file
from torch.utils.data import Dataset
from gleipnir.corpora import *
from gleipnir.data import CandidateGenerator
from gleipnir.kb import FusekiKnowledgeBase, WikidataKnowledgeBase, KnowledgeBase
from gleipnir.util import get_logger
logger = get_logger(__name__)
class LetorDataset:
pass
@attr.s
class TrainingData:
corpus_name: str = attr.ib()
corpus_train: Corpus = attr.ib()
corpus_dev: Corpus = attr.ib()
corpus_test: Corpus = attr.ib()
corpus_all: Corpus = attr.ib()
kb: KnowledgeBase = attr.ib()
cg: CandidateGenerator = attr.ib()
@pd.api.extensions.register_dataframe_accessor("ext")
class HandcraftedExtensionAccessor:
def __init__(self, pandas_obj: pd.DataFrame):
self._df = pandas_obj
self.name = ""
@property
def candidate_ids(self):
return self._df["candidate_id"]
@property
def number_of_groups(self) -> int:
return self._df["qid"].nunique()
@property
def X(self):
return self._df[self.features].astype('float32').values
@property
def y(self):
return self._df["score"].astype('float32').values
@property
def uris(self):
return self._df["uri"].values
@property
def features(self) -> List[str]:
return [f for f in self._df.columns if f.startswith("feat_")]
@property
def num_features(self) -> int:
return len(self.features)
@property
def group_sizes(self) -> List[int]:
return [int(x) for x in self._df["qid"].value_counts(sort=False)]
@property
def groupby_qid(self) -> List[pd.DataFrame]:
return [_slice for (_, _slice) in self._df.groupby(["qid"])]
@property
def group_X(self) -> List[np.array]:
return [_slice.ext.X for (_, _slice) in self._df.groupby(["qid"])]
@property
def group_y(self) -> List[np.array]:
return [_slice.ext.y for (_, _slice) in self._df.groupby(["qid"])]
@property
def mentions(self) -> List[str]:
return [_slice["mention"].iloc[0] for (_, _slice) in self._df.groupby(["qid"])]
@property
def labels(self) -> List[List[str]]:
return [_slice["label"] for (_, _slice) in self._df.groupby(["qid"])]
@property
def contexts(self) -> List[str]:
return [_slice["context"].iloc[0] for (_, _slice) in self._df.groupby(["qid"])]
@property
def gold_uris(self) -> List[str]:
return [_slice["gold"].iloc[0] for (_, _slice) in self._df.groupby(["qid"])]
@property
def gold_indices(self) -> List[int]:
return [_slice["gold_idx"].iloc[0] for (_, _slice) in self._df.groupby(["qid"])]
def split_by_qid(self, qid) -> Tuple[pd.DataFrame, pd.DataFrame]:
p1 = self._df.query(f"qid < {qid}")
p2 = self._df.query(f"qid >= {qid}")
return p1, p2
def to_csv(self):
assert self.name, "Need to set name when saving to csv"
self._df.to_csv(os.path.join(PATH_HANDCRAFTED, f"{self.name}.csv"), index=False, sep="\t")
def subsample(self, number_of_groups: int) -> pd.DataFrame:
""" Selects the first `number_of_groups` groups. """
# We assume that qids are sorted ascending
limit = self._df["qid"].values[0] + number_of_groups
result = self._df.query(f"qid < {limit}")
assert len(result.ext.groupby_qid) == number_of_groups
return result
def slice_by_qid(self, lower: int, upper: int) -> pd.DataFrame:
# We need to find the first qid as the offset
offset = self._df["qid"].values[0]
return self._df.query(f"qid >= {offset + lower} and qid < {offset + upper}")
def to_svmlight(self):
assert self.name, "Need to set name when saving to csv"
dump_svmlight_file(self.X, self.y, os.path.join(PATH_HANDCRAFTED, f"{self.name}.dat"), query_id=self._df["qid"])
def get_raw_corpus_data(s: str, caching: bool = True):
if s == "aida":
data_train = load_aida_train()
data_dev = load_aida_dev()
data_test = load_aida_test()
data_all = load_aida_all()
kb = WikidataKnowledgeBase(caching=caching)
elif s == "wwo-fuseki":
data_train = load_wwo_train()
data_dev = load_wwo_dev()
data_test = load_wwo_test()
data_all = load_wwo_all()
kb = FusekiKnowledgeBase(name="wwo", caching=caching)
elif s == "1641-fuseki":
data_train = load_depositions_train()
data_dev = load_depositions_dev()
data_test = load_depositions_test()
data_all = load_depositions_all()
kb = FusekiKnowledgeBase(name="depositions", caching=caching)
else:
raise Exception(f"Unknown corpus name: {s}")
cg = CandidateGenerator(kb)
return TrainingData(s, data_train, data_dev, data_test, data_all, kb, cg)
def load_dataframe_from_csv(name: str) -> pd.DataFrame:
p = os.path.join(PATH_HANDCRAFTED, f"{name}.csv")
df = pd.read_csv(p, sep="\t")
df.ext.name = name
return df
def load_handcrafted_data(name: str, evaluate_on_test: bool = False) -> Tuple[pd.DataFrame, pd.DataFrame]:
logger.info("Loading [%s]", name)
ds_train_name = name + "_train"
ds_test_name = name + ("_dev" if not evaluate_on_test else "_test")
df_train = load_dataframe_from_csv(ds_train_name)
df_eval = load_dataframe_from_csv(ds_test_name)
df_train.fillna('<unk>', inplace=True)
df_eval.fillna('<unk>', inplace=True)
return df_train, df_eval
def load_handcrafted_simulation_data(name: str) -> pd.DataFrame:
logger.info("Loading [%s]", name)
ds_name = f"{name}_full_sim"
df = load_dataframe_from_csv(ds_name)
df.fillna('<unk>', inplace=True)
return df
class HandcraftedLetorDataset(Dataset):
# https://github.com/yutayamazaki/RankNet-PyTorch/
def __init__(self, df: pd.DataFrame):
group_sizes = df.ext.group_sizes
qids = df["qid"]
indices = qids.unique()
large_enough_groups = {i for i, group_size in zip(indices, group_sizes) if group_size >= 2}
df = df[qids.isin(large_enough_groups)]
self.X_grouped = df.ext.group_X
self.y_grouped = df.ext.group_y
self.gold_indices = df.ext.gold_indices
assert len(self.X_grouped) == len(self.y_grouped) == len(self.gold_indices), "Groups have to have the same length!"
def __len__(self) -> int:
return len(self.y_grouped)
def __getitem__(self, group_idx: int):
X = self.X_grouped[group_idx]
y = self.y_grouped[group_idx]
gold_idx = self.gold_indices[group_idx]
assert gold_idx >= 0, "Group does not have gold label!"
assert y[gold_idx] == 1.0, "Gold should have score of 1!"
x_p = X[gold_idx]
y_p = y[gold_idx]
indices = list(range(len(y)))
indices.remove(gold_idx)
idx_n = random.choice(indices)
assert idx_n != gold_idx
x_n = X[idx_n]
y_n = y[idx_n]
return {
"x_p": x_p,
"x_n": x_n,
"y_p": y_p,
"y_n": y_n
}
class PairwiseFlairLetorDataset(Dataset):
# https://github.com/yutayamazaki/RankNet-PyTorch/
def __init__(self, df: pd.DataFrame):
mentions = []
grouped_kb_labels = []
grouped_descriptions = []
contexts = []
for group in df.ext.groupby_qid:
# The mention is identical for all items in the group
mentions.append(Sentence(group["mention"].values[0], use_tokenizer=False))
grouped_kb_labels.append([Sentence(x, use_tokenizer=True) for x in group["label"]])
grouped_descriptions.append([Sentence(x, use_tokenizer=True) for x in group["description"]])
contexts.append(Sentence(group["context"].values[0], use_tokenizer=True))
self.mentions = mentions
self.grouped_kb_labels = grouped_kb_labels
self.grouped_descriptions = grouped_descriptions
self.contexts = contexts
self.y_grouped = df.ext.group_y
self.gold_indices = df.ext.gold_indices
def __len__(self) -> int:
return len(self.y_grouped)
def __getitem__(self, group_idx: int):
mention = self.mentions[group_idx]
labels = self.grouped_kb_labels[group_idx]
descriptions = self.grouped_descriptions[group_idx]
context = self.contexts[group_idx]
y = self.y_grouped[group_idx]
gold_idx = self.gold_indices[group_idx]
assert gold_idx >= 0, "Group does not have gold label!"
assert y[gold_idx] == 1.0, "Gold should have score of 1!"
label_p = labels[gold_idx]
description_p = descriptions[gold_idx]
y_p = y[gold_idx]
indices = np.arange(start=1, stop=len(y))
idx_n = np.random.choice(indices)
label_n = labels[idx_n]
description_n = descriptions[idx_n]
y_n = y[idx_n]
return {
"mention": mention,
"label_p": label_p,
"description_p": description_p,
"y_p": y_p,
"label_n": label_n,
"description_n": description_n,
"y_n": y_n,
"context": context
}
|
1669724
|
from .. import item
@property
def consumable_items(player):
items = []
for inventory_item in player.inventory:
if inventory_item.type == item.CONSUMABLE:
items.append(inventory_item)
return items
"""property: A property that computes a list of the items the player has in
their inventory which are consumable."""
@property
def equippable_items(player):
items = []
for inventory_item in player.inventory:
if inventory_item.type == item.EQUIPPABLE:
items.append(inventory_item)
return items
"""property: A property that computes a list of the items the player has in
their inventory which are equippable."""
|
1669733
|
from isotp.tpsock import socket
from isotp.protocol import TransportLayer, CanStack, CanMessage
from isotp.address import *
from isotp.errors import *
|
1669848
|
from helper import *
def doTest():
_rule()
_getRuleSet()
def _getRuleSet():
ruleSet = RuleSet('.selector', 'width:100px;', '/* aa */', None)
rule = Rule("", "", "", ruleSet)
equal(rule.getRuleSet(), ruleSet, 'get rule set')
equal(rule.getRuleSet().selector, '.selector', 'it is what I need')
def _rule():
rule = Rule(" .test ", " _width ", " 100px; ", None)
equal(rule.selector, '.test', 'selector is ok')
equal(rule.roughSelector, ' .test ', 'roughSelector is ok')
equal(rule.roughName, ' _width ', 'roughName is ok')
equal(rule.name, 'width', 'name is ok')
equal(rule.roughValue, ' 100px; ', 'roughValue is ok')
equal(rule.value, '100px', 'value is ok')
equal(rule.strippedName, '_width', 'stripped name is ok')
equal(rule.strippedValue, '100px;', 'strippedValue is ok')
|
1669862
|
from __future__ import absolute_import
from .core import rootplot, rootplotmpl, plot, plotmpl
from .version import __version__
|
1669870
|
import numpy as np
import pickle
data = pickle.load(open('points_flatten2.pkl', 'rb'))
print(len(data))
print(data[0][0])
print(data[0][1])
|
1669875
|
import os
import numpy as np
# set ratio of the labeled samples
numerator = 1
denominator = 8
labeled_ratio = numerator / denominator
# read the samples list
samples_list = 'VOCdevkit/VOC2012/ImageSets/Segmentation/train_aug.txt'
if not os.path.exists(samples_list):
print('The PascalVOC 2012 dataset is not prepared.\n'
'Please run \'sh prepare.sh\' to prepare it.')
with open(samples_list, 'r') as f:
samples = f.read().splitlines()
np.random.shuffle(samples)
# get the sublabeled list
labeled_num = int(len(samples) * labeled_ratio + 1)
labeled_list = samples[:labeled_num]
# create the output path and save the sublabeled list
out_path = 'sublabeled_prefix/{0}-{1}'.format(numerator, denominator)
if not os.path.exists(out_path):
os.makedirs(out_path)
out_file = os.path.join(out_path, '{0}.txt'.format(len(os.listdir(out_path))))
with open(out_file, 'w') as f:
for sample in labeled_list:
f.write(sample + '\n')
|
1669893
|
from copy import deepcopy
from krisk.chart.api import Chart
import pandas as pd
def round_list(arr):
try:
return arr.values.round(3).tolist() # Numeric Array
except TypeError:
try:
return arr.unique().tolist() #String Array
except AttributeError:
return (arr.apply(lambda x: x.values.round(3) #Dataframe
if x.dtype.name.startswith('float') else x)
.values.tolist())
def insert_series_data(data, x, chart_type, chart, cat=None):
elem_series = {'name': '', 'type': chart_type, 'data': []}
series = deepcopy(elem_series)
series['data'] = round_list(data)
series['type'] = chart_type
if cat:
series['name'] = cat
chart.option['legend']['data'].append(str(cat))
else:
series['name'] = x
chart.option['series'].append(series)
return series
def make_chart(df, **kwargs):
from krisk.plot.make_bar_line import (set_bar_line_chart,
set_barline,
set_waterfall)
from krisk.plot.make_scatter_geo import set_scatter_chart
chart = Chart(**kwargs)
# try:
# df.columns
# kwargs['c'] = 'unnamed'
# except AttributeError:
# kwargs['c'] = None
#
# kwargs['x'] = df.index.name
try:
chart.kwargs['data_columns'] = df.columns
chart.set_xlabel(kwargs['x'])
except AttributeError:
if kwargs['type'] in ['line_tidy', 'bar_tidy']:
kwargs['c'] = None
if kwargs.get('y', None):
chart.set_ylabel(kwargs['y'])
if kwargs['type'] in ['line', 'line_tidy']:
chart.set_tooltip_style(trigger='axis', axis_pointer='shadow')
if kwargs['type'] in ['bar', 'line',
'bar_tidy', 'line_tidy',
'hist']:
set_bar_line_chart(chart, df, **kwargs)
elif kwargs['type'] == 'bar_line':
set_barline(chart, df, **kwargs)
elif kwargs['type'] == 'waterfall':
set_waterfall(chart, df, **kwargs)
elif kwargs['type'] == 'scatter':
set_scatter_chart(chart, df, **kwargs)
return chart
|
1669898
|
from nndet.preprocessing.crop import ImageCropper
from nndet.preprocessing.preprocessor import (
PreprocessorType,
AbstractPreprocessor,
GenericPreprocessor,
)
|
1669934
|
import airsim
import time
client = airsim.VehicleClient()
client.confirmConnection()
# Access an existing light in the world
lights = client.simListSceneObjects("PointLight.*")
pose = client.simGetObjectPose(lights[0])
scale = airsim.Vector3r(1, 1, 1)
# Destroy the light
client.simDestroyObject(lights[0])
time.sleep(1)
# Create a new light at the same pose
new_light_name = client.simSpawnObject("PointLight", "PointLightBP", pose, scale, False, True)
time.sleep(1)
# Change the light's intensity
for i in range(20):
client.simSetLightIntensity(new_light_name, i * 100)
time.sleep(0.5)
|
1669957
|
import unittest
import test_canonicalization
import test_e2e_client
import test_functions
import test_jsonizable
def suite():
load = unittest.TestLoader().loadTestsFromModule
modules = [
test_canonicalization,
test_e2e_client,
test_functions,
test_jsonizable,
]
suites = unittest.TestSuite(map(load, modules))
return suites
if __name__ == '__main__':
s = suite()
unittest.TextTestRunner(verbosity=2).run(s)
|
1669975
|
import scrapy
from scrapy.contrib.spiders import CrawlSpider, Rule
from scrapy.contrib.linkextractors import LinkExtractor
from igempnp.items import MemberItem, TrackItem
class MemberSpider(CrawlSpider):
dont_filter = True
name = "track"
allowed_domains = ["igem.org"]
start_urls = ["http://igem.org/Team_List?year=2007",
"http://igem.org/Team_List?year=2008",
"http://igem.org/Team_List?year=2009",
"http://igem.org/Team_List?year=2010",
"http://igem.org/Team_List?year=2011",
"http://igem.org/Team_List?year=2012",
"http://igem.org/Team_List?year=2013",
"http://igem.org/Team_List?year=2014"]
rules = (Rule(LinkExtractor(allow=('Team\.cgi', )), callback='parse_team'),)
def parse_team(self, response):
team = response.xpath('//td/text()').extract()[1]
region = response.xpath('//td/text()').extract()[11]
year = response.xpath('//title/text()').extract()[0].split()[1]
track = response.xpath('//table[@id="table_tracks"]/tr/td/text()')[0].extract()[16:-1]
if track == "t been assigned to a track.":
track = "Undefined"
teamtrack = TrackItem()
teamtrack['team_year'] = team+'_'+year
teamtrack['track'] = track
teamtrack['region'] = region
yield teamtrack
|
1669986
|
from rest_framework import serializers
from api.models.UserCreationRequest import UserCreationRequest
from api.serializers import UserCreateSerializer
class UserCreationRequestSerializer(serializers.Serializer):
"""
Serializer for creating a user
"""
user = UserCreateSerializer(allow_null=False)
email = serializers.EmailField(
allow_null=False, allow_blank=False,
error_messages={
'blank': 'A BCeID/IDIR Email Address is required',
'invalid': 'Please enter a valid BCeID/IDIR Email Address.'
})
username = serializers.CharField(
allow_null=True, allow_blank=True, required=False
)
def __init__(self, *args, **kwargs):
"""
This is to restrict non-government users from creating users
for other organizations
"""
super(UserCreationRequestSerializer, self).__init__(*args, **kwargs)
data = kwargs.get('data')
request = self.context.get('request')
if not request.user.is_government_user:
user = data['user']
user['organization'] = request.user.organization.id
def validate(self, data):
"""
Validation to check that the email hasn't been used yet.
"""
if UserCreationRequest.objects.filter(
keycloak_email=data.get('email'),
external_username=data.get('username')).exists():
raise serializers.ValidationError(
'This SSO email is already associated with that user')
return data
def create(self, validated_data):
context = self.context
user_serializer = UserCreateSerializer(
data=self.data['user'], context=context)
user_serializer.is_valid()
user = user_serializer.save()
return UserCreationRequest.objects.create(
keycloak_email=validated_data.get('email'),
external_username=validated_data.get('username'),
user=user
)
|
1670004
|
import sys
import numpy as np
infil = sys.argv[1] # original output from Fit-Hi-C
top_n = int(sys.argv[2]) # an integer
outfil = sys.argv[3] # output file name
qvalues = np.loadtxt(infil, usecols=[-1])
minq = qvalues[qvalues>0].min()
pool = []
with open(infil, 'r') as source:
for line in source:
parse = line.rstrip().split()
count = int(parse[6])
qvalue = float(parse[7])
if qvalue <= 0:
qvalue = minq
record = (-np.log(qvalue), count) + tuple(parse[:6])
pool.append(record)
pool.sort(reverse=True)
selected = pool[:top_n]
with open(outfil, 'w') as out:
for line in selected:
out.write('\t'.join(list(line[2:])+[str(line[0])])+'\n')
|
1670016
|
from flask import ( g, redirect, url_for )
from tmc.db import get_db, make_dicts
# Get list of all adversaries per event available in the database.
def get_adversaries_x_event():
db = get_db()
try:
db.row_factory = make_dicts
#db.row_factory = lambda cursor, row: {row: row[0]}
query = db.execute(
'select a.adversary_id, a.adversary_name, event_name, event_description from events e \
inner join adversaries_x_events ae on ae.event_id = e.id \
inner join adversaries a on a.id = ae.adversary_id ORDER BY adversary_name').fetchall()
return query
except TypeError:
#embed()
return False #Change this for something more meaningful -- warning/alert
|
1670029
|
import xpc.xpc as xpc
import rospy
import xplane_ros.msg as xplane_msgs
import rosplane_msgs.msg as rosplane_msgs
from geometry_msgs.msg import Twist
from geometry_msgs.msg import Pose, PoseStamped
from nav_msgs.msg import Odometry
from std_msgs.msg import Float32
from geometry_msgs.msg import Quaternion
from tf.transformations import quaternion_from_euler
import numpy as np
KNOTS_TO_MS = 0.51444444444
MS_TO_FPM = 60.0/0.305
angle_in_deg = True
'''Class to extract position and controls related information from XPlane '''
class StateReader:
def __init__(self, client):
'''instantiate connection to XPC'''
#self.client = xpc.XPlaneConnect()
self.client = client
self.initPose = Pose()
self.initPose.position.x = None
self.initPose.position.y = None
self.initPose.position.z = None
# Publish global state consisting of GlobalState msg (Latitutde Longitude instead of openGL coordinates)
self.globalStatePub = rospy.Publisher("/xplane/flightmodel/global_state", xplane_msgs.GlobalState, queue_size = 10)
self.odomPub = rospy.Publisher("/xplane/flightmodel/odom", Odometry, queue_size=10) # Odometry is also being provided in the NED format : XYZ <-> NED
# self.posePub = rospy.Publisher("/xplane/flightmodel/pose", Pose, queue_size=10)
# self.velPub = rospy.Publisher("/xplane/flightmodel/velocity", Twist, queue_size=10)
'''Publisher for data in rosplane format'''
self.statePub = rospy.Publisher("/fixedwing/xplane/state", rosplane_msgs.State, queue_size=10)
# self.diff_pub = rospy.Publisher("/xplane/height_diff", Float32, queue_size=10 )
self.transformPub = rospy.Publisher("/xplane/flightmodel/my_transform", xplane_msgs.TransformedPoint, queue_size=10)
self.odom = Odometry()
def sensor_update(self):
# get global position information from XPlane
pos = self.client.getPOSI()
# convert data to ros msgs
msg = xplane_msgs.Position()
msg.lat = pos[0]
msg.lon = pos[1]
msg.el = pos[2]
msg.roll = pos[3]
msg.pitch = pos[4]
msg.heading = pos[5]
msg.gear = pos[6]
self.posePub.publish(msg)
def control_update(self):
# get control surfaces information from XPlane
ctrl = self.client.getCTRL()
# convert data to ros msgs
msg = xplane_msgs.Controls()
msg.elevator = ctrl[0]
msg.aileron = ctrl[1]
msg.rudder = ctrl[2]
msg.throttle = ctrl[3]
msg.gear = ctrl[4]
msg.flaps = ctrl[5]
msg.speed_brakes = ctrl[6]
self.controlPub.publish(msg)
def sensor_update2(self):
drefs = []
''' Get sim time to generate timestamp and header for ROS msgs '''
drefs.append("sim/time/total_running_time_sec")
'''Global latitude (1), longitude(2) and elevation(3) datarefs'''
drefs.append("sim/flightmodel/position/latitude")
drefs.append("sim/flightmodel/position/longitude")
drefs.append("sim/flightmodel/position/elevation")
'''Position in local coordinates x(4), y(5), z(6)'''
drefs.append("sim/flightmodel/position/local_x")
drefs.append("sim/flightmodel/position/local_y")
drefs.append("sim/flightmodel/position/local_z")
'''Velocity in local coordinates vx(7), vy(8), vz(9)'''
drefs.append("sim/flightmodel/position/local_vx")
drefs.append("sim/flightmodel/position/local_vy")
drefs.append("sim/flightmodel/position/local_vz")
''' attitude information roll(10), pitch(11), yaw(12)'''
drefs.append("sim/flightmodel/position/phi")
drefs.append("sim/flightmodel/position/theta")
drefs.append("sim/flightmodel/position/psi")
'''Control surface information pitch(13), roll(14), yaw(15), throttle(16), flaps(17), speed brakes(18)'''
drefs.append("sim/joystick/yoke_pitch_ratio")
drefs.append("sim/joystick/yoke_roll_ratio")
drefs.append("sim/joystick/yoke_heading_ratio")
drefs.append("sim/flightmodel/engine/ENGN_thro")
drefs.append("sim/flightmodel/controls/flaprat")
drefs.append("sim/flightmodel/controls/sbrkrat")
''' rotation rate pitch(19), roll(20), yaw(21)'''
drefs.append("sim/flightmodel/position/Q")
drefs.append("sim/flightmodel/position/P")
drefs.append("sim/flightmodel/position/R")
''' Gear (22) '''
drefs.append("sim/aircraft/parts/acf_gear_deploy")
''' Quaternion (23)'''
drefs.append("sim/flightmodel/position/q")
''' alpha (24), beta(25) '''
drefs.append("sim/flightmodel/position/alpha")
drefs.append("sim/flightmodel/position/beta")
'''Wind speed (26) and x(27), y(28),z (29) components in openGL'''
drefs.append("sim/weather/wind_speed_kt")
drefs.append("sim/weather/wind_now_x_msc")
drefs.append("sim/weather/wind_now_y_msc")
drefs.append("sim/weather/wind_now_z_msc")
'''Airspeed (30) and groundspeed (31) '''
drefs.append("sim/flightmodel/position/indicated_airspeed")
drefs.append("sim/flightmodel/position/groundspeed")
''' Reference latitude(32) and longitude(33) '''
drefs.append("sim/flightmodel/position/lat_ref")
drefs.append("sim/flightmodel/position/lon_ref")
''' verticle velocity(34) '''
drefs.append("sim/flightmodel/position/vh_ind")
data = self.client.getDREFs(drefs)
'''For indices refer above where we append the datarefs'''
#print(data[8][0], data[34][0])
#print(data[34][0])
'''Set initial position so that this vector can be subtracted from subsequent local positions (Centre the frame)'''
if (not self.initPose.position.x):
self.initPose.position.x = data[4][0]
self.initPose.position.y = data[5][0]
self.initPose.position.z = data[6][0]
self.opengl_point_to_ned(self.initPose)
self.global_state = xplane_msgs.GlobalState() # Global coordinate information
'''Additional 0 index because the data is in the form of a tuple'''
self.global_state.latitude = data[1][0]
self.global_state.longitude = data[2][0]
self.global_state.elevation = data[3][0]
self.global_state.roll = data[10][0]
self.global_state.pitch = data[11][0]
self.global_state.heading = data[12][0]
self.global_state.gear = data[22][0]
pose = Pose() # position in local coordinates
velocity = Twist() # velocity in local coordinates
odom = Odometry()
'''pose and orientation in openGL coordinates. However, the angle convention is still NED so no change required there'''
pose.position.x = data[4][0]
pose.position.y = data[5][0]
pose.position.z = data[6][0]
pose.orientation.x = data[23][1]
pose.orientation.y = data[23][2]
pose.orientation.z = data[23][3]
pose.orientation.w = data[23][0]
''' Quaternion test '''
# q = quaternion_from_euler(self.global_state.roll * np.pi/180.0, self.global_state.pitch * np.pi/180.0, self.global_state.heading * np.pi/180.0)
# print(q)
# print(pose.orientation)
# print("-----------------------")
''' Current data seems good '''
''' Convert openGL (East Up South) to NED frame & apply translation'''
self.opengl_point_to_ned(pose)
self.shift_point(pose, self.initPose)
'''Although linear velocities must be transformed but it seems like XPlane provides Attitude rates according to conventional NED format'''
velocity.linear.x = data[7][0] # pn_dot
velocity.linear.y = data[8][0] # pe_dot
velocity.linear.z = data[9][0] # pd_dot
velocity.angular.x = data[20][0] # Roll rate
velocity.angular.y = data[19][0] # Pitch rate
velocity.angular.z = data[21][0] # Yaw rate
self.opengl_velocity_to_ned(velocity)
odom.header.frame_id = '/world'
'''TODO : In order to be able to plot on rqt with other data, we should instead use Time.now()'''
odom.header.stamp = rospy.Time(secs=data[0][0])
odom.pose.pose = pose
odom.twist.twist = velocity
self.odom = odom
''' rosplane state '''
state = rosplane_msgs.State()
state = self.get_rosplane_state(data)
# state.header.stamp = rospy.Time(secs=data[0][0])
state.header.stamp = rospy.Time.now()
# state.header.frame_id = "\x01"
state.header.frame_id = "world"
# state.initial_alt = 0.0
# state.initial_lat = 0.0
# state.initial_lon = 0.0
# state.quat_valid = False
# state.quat = [1.0, 0.0, 0.0, 0.0]
# state.chi_deg = 0.0
# state.psi_deg = 0.0
'''Print statements to check if local_vy can be used as vertical velocity indicator
vh_ms = self.client.getDREF("sim/flightmodel/position/vh_ind")[0]
vh_fpm = self.client.getDREF("sim/flightmodel/position/vh_ind_fpm")[0]
print("sensor : %f, %f, %f" % (vh_ms, -velocity.linear.z, vh_fpm*0.3048/60))'''
self.globalStatePub.publish(self.global_state)
self.odomPub.publish(odom)
# self.posePub.publish(pose)
# self.velPub.publish(velocity)
self.statePub.publish(state)
# self.diff_pub.publish(data[5][0] - data[3][0])
'''TODO : local_vx, vy, vz don't seem to give a magnitude equal to airspeed. It could be Vg instead ; investigate this'''
def get_rosplane_state(self, data):
state = rosplane_msgs.State()
state.position[0] = -data[6][0] - self.initPose.position.x
state.position[1] = data[4][0] - self.initPose.position.y
state.position[2] = -data[5][0] - self.initPose.position.z
state.Va = data[30][0] * KNOTS_TO_MS # dataref gives airspeed in knots; need to convert it to m/s
'''Sending angle values in degrees or in rad'''
if angle_in_deg:
state.alpha = data[24][0]
state.beta = data[25][0]
state.phi = data[10][0]
state.theta = data[11][0]
state.psi = data[12][0]
else:
state.alpha = data[24][0] * (np.pi / 180.0)
state.beta = data[25][0] * (np.pi / 180.0)
state.phi = data[10][0] * (np.pi/180)
state.theta = data[11][0] * (np.pi/180)
state.psi = data[12][0] * (np.pi/180)
state.p = data[20][0] * (np.pi/180) # roll rate in rad/s
state.q = data[19][0] * (np.pi/180) # pitch rate in rad/s
state.r = data[21][0] * (np.pi/180) # yaw rate in rad/s
state.Vg = data[31][0] # dataref gives groundspeed in m/s
wind_speed = data[26][0]
'''wn = w * -z_component
we = w * x_component '''
state.wn = wind_speed * (-data[29][0])
state.we = wind_speed * (data[27][0])
# state.wn = 0
# state.we = 0
state.vh = data[8][0] * MS_TO_FPM
'''Print statements to see if speed is in m/s or knots'''
# vx = self.odom.twist.twist.linear.x
# vy = self.odom.twist.twist.linear.y
# vz = self.odom.twist.twist.linear.z
# print("Airspeed Xplane : %f" % (state.Va))
# print("Airpspeed in m/s %f" % (state.Va * 0.51444444444 ))
# print("Self Airspeed : %f" % (np.sqrt(vx*vx + vy*vy + vz*vz)))
# print("Ground velocity : %f" % (state.Vg))
# print("Ground velocity in m/s : %f" % (state.Vg * 0.51444444444 ))
# print("self Groundspeed : %f" % (np.sqrt(vx*vx + vy*vy)))
# print("-------------------------------------")
'''Observations :
grounndspeed dataref infact gives groundspeed in m/s.
But airspeed is in knots.
sqrt(vx*vx + vy*vy + vz*vz) = groundspeed (Shoudl've been equal to airspeed)
airspeed seems slightly off from sqrt(vx*vx + vy*vy + vz*vz) probably because of wind
'''
state.chi = state.psi + state.beta # TODO : calculate course angle ; currently assume wind velocity is 0
if angle_in_deg:
if state.chi > 180.0:
state.chi = state.chi - 2*180.0
if state.chi < -180.0:
state.chi = state.chi + 2*180.0
'''Wrap the course angle between -PI and PI'''
if state.psi > 180.0:
state.psi -= 2*180.0
if state.psi < -180.0:
state.psi += 2*180.0
else:
if state.chi > np.pi:
state.chi = state.chi - 2*np.pi
if state.chi < -np.pi:
state.chi = state.chi + 2*np.pi
'''Wrap the course angle between -PI and PI'''
if state.psi > np.pi:
state.psi -= 2*np.pi
if state.psi < -np.pi:
state.psi += 2*np.pi
return state
def opengl_point_to_ned(self, pose):
''' [pn,pe,pd]^T = [0, 0, -1] [x,y,z]^T
[1, 0, 0 ]
[0, -1, 0] '''
pn = -pose.position.z
pe = pose.position.x
pd = -pose.position.y
pose.position.x = pn
pose.position.y = pe
pose.position.z = pd
def opengl_velocity_to_ned(self, vel):
''' [pn,pe,pd]^T = [0, 0, -1] [x,y,z]^T
[1, 0, 0 ]
[0, -1, 0] '''
pn_dot = -vel.linear.z
pe_dot = vel.linear.x
pd_dot = -vel.linear.y
vel.linear.x = pn_dot
vel.linear.y = pe_dot
vel.linear.z = pd_dot
def shift_point(self, pose, init):
pose.position.x = (pose.position.x - init.position.x)
pose.position.y = (pose.position.y - init.position.y)
pose.position.z = (pose.position.z - init.position.z)
|
1670031
|
import pytest
from pyball import PyBall
from pyball.models.config import SituationCode
@pytest.fixture(scope='module')
def test_situation_codes():
pyball = PyBall()
return pyball.get_situation_codes()
def test_get_situation_codes_returns_situation_codes(test_situation_codes):
assert isinstance(test_situation_codes, list)
assert isinstance(test_situation_codes[0], SituationCode)
|
1670063
|
import unittest
import numpy.testing as npt
import numpy as np
from doatools.model import UniformLinearArray, FarField1DSourcePlacement
from doatools.performance import crb_det_farfield_1d, crb_sto_farfield_1d, crb_stouc_farfield_1d
class TestCRB(unittest.TestCase):
def setUp(self):
self.wavelength = 1.0
def test_det_farfield_1d(self):
ula = UniformLinearArray(10, self.wavelength / 2)
sources = FarField1DSourcePlacement(np.linspace(-np.pi/3, np.pi/4, 3))
P = np.array([
[10.0 , 1.0+0.3j, 0.5-0.1j],
[1.0-0.3j, 11.0, 0.9-0.2j],
[0.5+0.1j, 0.9+0.2j, 9.0]
])
sigma = 1.0
n_snapshots = 100
CRB_actual = crb_det_farfield_1d(ula, sources, self.wavelength, P, sigma, n_snapshots)
CRB_expected = np.array([
[ 2.636339e-06, -2.947961e-08, -2.894480e-08],
[-2.947961e-08, 5.868361e-07, -1.254243e-08],
[-2.894480e-08, -1.254243e-08, 1.495266e-06]
])
npt.assert_allclose(CRB_actual, CRB_expected, rtol=1e-6)
def test_sto_farfield_1d(self):
ula = UniformLinearArray(10, self.wavelength / 2)
sources = FarField1DSourcePlacement(np.linspace(-np.pi/3, np.pi/4, 3))
P = np.array([
[10.0 , 1.0+0.3j, 0.5-0.1j],
[1.0-0.3j, 11.0, 0.9-0.2j],
[0.5+0.1j, 0.9+0.2j, 9.0]
])
sigma = 1.0
n_snapshots = 100
CRB_actual = crb_sto_farfield_1d(ula, sources, self.wavelength, P, sigma, n_snapshots)
CRB_expected = np.array([
[ 2.663109e-06, -3.037374e-08, -2.994223e-08],
[-3.037374e-08, 5.922466e-07, -1.287051e-08],
[-2.994223e-08, -1.287051e-08, 1.512018e-06]
])
npt.assert_allclose(CRB_actual, CRB_expected, rtol=1e-6)
def test_stouc_farfield_1d(self):
ula = UniformLinearArray(10, self.wavelength / 2)
sources = FarField1DSourcePlacement(np.linspace(-np.pi/3, np.pi/4, 3))
p = np.array([2.0, 3.0, 1.0])
sigma = 1.0
n_snapshots = 100
CRB_actual = crb_stouc_farfield_1d(ula, sources, self.wavelength, p, sigma, n_snapshots)
CRB_expected = np.array([
[ 1.3757938e-05, -3.7302575e-09, 4.3845873e-08],
[-3.7302575e-09, 2.2173076e-06, 5.0642214e-09],
[ 4.3845873e-08, 5.0642214e-09, 1.4740719e-05]
])
npt.assert_allclose(CRB_actual, CRB_expected, rtol=1e-6)
def test_convergence_farfield_1d(self):
# The three CRBs should converge when SNR is sufficiently high.
ula = UniformLinearArray(16, self.wavelength / 2)
sources = FarField1DSourcePlacement(np.linspace(-np.pi/3, np.pi/4, 5))
p = np.diag(np.full((sources.size,), 1000.0))
sigma = 1.0 # 30 dB SNR
n_snapshots = 10
CRB_stouc = crb_stouc_farfield_1d(ula, sources, self.wavelength, p, sigma, n_snapshots)
CRB_sto = crb_sto_farfield_1d(ula, sources, self.wavelength, p, sigma, n_snapshots)
CRB_det = crb_det_farfield_1d(ula, sources, self.wavelength, p, sigma, n_snapshots)
npt.assert_allclose(np.diag(CRB_sto), np.diag(CRB_stouc), rtol=1e-2)
npt.assert_allclose(np.diag(CRB_det), np.diag(CRB_stouc), rtol=1e-2)
if __name__ == '__main__':
unittest.main()
|
1670074
|
import tree_kernels
import tree
#
# lambda # kernel parameter
# PrologString_sa = "t(a t(b t(d nil nil) t(e nil nil)) t(c nil t(f t(g nil nil) nil))) "
# PrologString_sb = "t(a t(b t(d nil nil) t(e nil nil)) t(c nil t(f t(g nil nil) nil))) "
PrologString_sa = "Tree (ROOT\n (S\n (NP (DT A) (NN man))\n (VP (VBZ is)\n (VP (VBG carrying)\n (NP (DT a) (NN canoe))\n (PP (IN with)\n (NP (DT a) (NN dog)))))\n (. .)))"
PrologString_sb = "Tree (ROOT\n (S\n (NP (DT A) (NN dog))\n (VP (VBZ is)\n (VP (VBG carrying)\n (NP\n (NP (DT a) (NN man))\n (PP (IN in)\n (NP (DT a) (NN canoe))))))\n (. .)))"
PrologString_sc = "Tree (ROOT\n (S\n (NP (DT A) (NN man))\n (VP (VBZ is)\n (VP (VBG carrying)\n (NP\n (NP (DT a) (NN dog))\n (PP (IN in)\n (NP (DT a) (NN canoe))))))\n (. .)))"
PrologString_sd = "Tree (ROOT\n (NP\n (NP (DT A) (NN girl) (NN dancing))\n (PP (IN on)\n (NP (DT a) (JJ sandy) (NN beach)))\n (. .)))"
sa = tree.Tree.fromPrologString(PrologString_sa)
sb = tree.Tree.fromPrologString(PrologString_sb)
sc = tree.Tree.fromPrologString(PrologString_sc)
sd = tree.Tree.fromPrologString(PrologString_sd)
# k = tree_kernels.KernelST(l=0.95)
# k = tree_kernels.KernelSST(l=0.5)
# k = tree_kernels.KernelPdak(l=0.5, gamma=0.5, beta=0.5)
# print(k.kernel(sb, sb))
# print(k.kernel(sc, sb))
# print(k.kernel(sa, sb))
# print(k.kernel(sd, sb))
# k = tree_kernels.KernelPdakMine(l=0.5, gamma=0.5, beta=0.5)
# print(k.kernel(sb, sb))
# print(k.kernel(sc, sb))
# print(k.kernel(sa, sb))
# print(k.kernel(sd, sb))
# k = tree_kernels.KernelPdakFast(l=0.5, gamma=0.5, beta=0.5)
# print(k.kernel(sb, sb))
# print(k.kernel(sc, sb))
# print(k.kernel(sa, sb))
# print(k.kernel(sd, sb))
# k.printKernelMatrix(dat)
|
1670090
|
from __future__ import absolute_import, unicode_literals
from functools import wraps
from django.core.paginator import Paginator, PageNotAnInteger, EmptyPage
from django.shortcuts import get_object_or_404, redirect as _redirect
from django.views.generic import ListView, DetailView, TemplateView
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.core.urlresolvers import NoReverseMatch
from django.utils.decorators import method_decorator
from django.utils.translation import ugettext_lazy as _
from feincms.content.application.models import app_reverse
from feincms.module.mixins import ContentView
from . import forms, app_settings, payment_providers
from .emails import send_pledge_completed_message
from .models import Project, Pledge, Backer, Category, Update
from .utils import get_object_or_none
# -----------------------------------
# decorators and mixins
# -----------------------------------
def get_session_pledge(request):
""" returns the last created pledge for the current session or None """
pledge_id = request.session.get('pledge_id', None)
if pledge_id:
return get_object_or_none(Pledge, pk=pledge_id)
return None
def requires_pledge(func):
""" Decorator to enforce current pledge as second parameter.
If no pledge is is found in session, show an error message. """
@wraps(func)
def _decorator(request, *args, **kwargs):
pledge = get_session_pledge(request)
if pledge:
return func(request, pledge, *args, **kwargs)
else:
return redirect('zipfelchappe_pledge_lost')
return _decorator
class PledgeRequiredMixin(object):
@method_decorator(requires_pledge)
def dispatch(self, request, pledge, *args, **kwargs):
self.pledge = pledge
return super(PledgeRequiredMixin, self).dispatch(request, *args, **kwargs)
def use_pledge_if_available(func):
@wraps(func)
def _decorator(request, *args, **kwargs):
pledge = get_session_pledge(request)
return func(request, pledge, *args, **kwargs)
return _decorator
def requires_pledge_cbv(clazz):
""" Class based decorator to save current pledge in self.pledge """
orig_dispatch = clazz.dispatch
def monkey_dispatch(self, request, *args, **kwargs):
self.pledge = get_session_pledge(request)
if self.pledge is not None:
return orig_dispatch(self, request, *args, **kwargs)
else:
return redirect('zipfelchappe_pledge_lost')
clazz.dispatch = monkey_dispatch
return clazz
class PledgeContextMixin(object):
""" Mixin to add the current pledge to the template context """
def get_context_data(self, *args, **kwargs):
context = super(PledgeContextMixin, self).get_context_data(*args, **kwargs)
pledge = getattr(self, 'pledge', get_session_pledge(self.request))
if pledge:
context.update({
'pledge': pledge,
'project': pledge.project
})
return context
class FeincmsRenderMixin(object):
""" This is required to use django template inheritance with CBVs """
def render_to_response(self, context, **response_kwargs):
return self.get_template_names(), context
def reverse(view_name, *args, **kwargs):
""" Reverse within our app context """
return app_reverse(view_name, app_settings.ROOT_URLS, args=args, kwargs=kwargs)
def redirect(view_name, *args, **kwargs):
""" Imitate django redirect() within our app context """
try:
return _redirect(reverse(view_name, *args, **kwargs))
except NoReverseMatch:
return _redirect(view_name, *args, **kwargs)
# -----------------------------------
# views
# -----------------------------------
class ProjectListView(FeincmsRenderMixin, ListView):
""" List view of all projects that are active or finished.
To change pagination count set ZIPFELCHAPPE_PAGINATE_BY in settings.
"""
context_object_name = "project_list"
paginate_by = app_settings.PAGINATE_BY
model = Project
def get_queryset(self):
return Project.objects.online().select_related()
def get_context_data(self, **kwargs):
context = super(ProjectListView, self).get_context_data(**kwargs)
context['category_list'] = Category.objects.all()
return context
class ProjectCategoryListView(ProjectListView):
""" Filtered project list view for only one category """
def get_queryset(self):
category = get_object_or_404(Category, slug=self.kwargs['slug'])
online_projects = Project.objects.online().select_related()
return online_projects.filter(categories=category)
class ProjectDetailView(FeincmsRenderMixin, ContentView):
""" Show status, description, updates, backers and comments of a project """
context_object_name = "project"
model = Project
def get_queryset(self):
# limit queryset to projects that have started.
return Project.objects.online().select_related('rewards')
def get_context_data(self, **kwargs):
context = super(ProjectDetailView, self).get_context_data(**kwargs)
context['disqus_shortname'] = app_settings.DISQUS_SHORTNAME
context['updates'] = context['project'].updates.filter(
status=Update.STATUS_PUBLISHED
)
# create a paginated list of backers.
pledges = context['project'].authorized_pledges
paginator = Paginator(pledges, app_settings.PAGINATE_BACKERS_BY)
context['backer_count'] = len(pledges)
context['paginator'] = paginator
page = int(self.request.GET.get('backers-page', 1))
try:
context['page_obj'] = paginator.page(page)
except PageNotAnInteger:
# If page is not an integer, deliver first page.
context['page_obj'] = paginator.page(1)
except EmptyPage:
# If page is out of range (e.g. 9999), deliver last page of results.
context['page_obj'] = paginator.page(paginator.num_pages)
return context
class UpdateDetailView(FeincmsRenderMixin, DetailView):
""" Just a simple view of one project update for preview purposes """
context_object_name = 'update'
model = Update
def get_context_data(self, **kwargs):
context = super(UpdateDetailView, self).get_context_data(**kwargs)
context['project'] = self.get_object().project
return context
class ProjectDetailHasBackedView(ProjectDetailView):
""" This view is called at the end of the backing process. """
template_name = 'zipfelchappe/project_has_backed.html'
def get_queryset(self):
return Project.objects.online()
def get_context_data(self, **kwargs):
context = super(ProjectDetailHasBackedView, self).get_context_data(**kwargs)
if 'completed_pledge_id' in self.request.session:
pledge_id = self.request.session['completed_pledge_id']
del self.request.session['completed_pledge_id']
context['pledge'] = get_object_or_none(Pledge, pk=pledge_id)
return context
def backer_create_view(request, slug):
""" The main form to back a project. A lot of the magic here comes from
BackProjectForm including all validation. The main job of this view is
to save the pledge_id in the session and redirect to backer_authenticate.
A pledge is created but a user is not yet assigned.
"""
project = get_object_or_404(Project, slug=slug)
ExtraForm = project.extraform()
if project.is_over:
messages.info(request, _('This project has ended and does not accept'
' pledges anymore.'))
return redirect(
'zipfelchappe_project_detail', slug=project.slug)
session_pledge = get_session_pledge(request)
form_kwargs = {'project': project}
# If the session pledge has already been paid for, ignore it.
if session_pledge and session_pledge.project == project:
if session_pledge.status >= session_pledge.FAILED: # Force a new payment-ID.
del request.session['pledge_id']
else:
form_kwargs.update({'instance': session_pledge})
if request.method == 'POST':
form = forms.BackProjectForm(request.POST, **form_kwargs)
extraform = ExtraForm(request.POST, prefix="extra")
if form.is_valid() and extraform.is_valid():
pledge = form.save(commit=False)
pledge.extradata = extraform.clean()
pledge.save()
request.session['pledge_id'] = pledge.id
return redirect('zipfelchappe_backer_authenticate')
else:
form = forms.BackProjectForm(**form_kwargs)
extraform = ExtraForm(prefix="extra")
if request.session.get('pledge_id'):
del request.session['pledge_id']
return ('zipfelchappe/project_back_form.html', {
'project': project,
'form': form,
'extraform': extraform,
})
@requires_pledge
@login_required
def backer_authenticate(request, pledge):
""" save user to the current pledge and
redirect to the selected payment provider.
"""
payment_view = payment_providers[pledge.provider].payment_url()
backer, created = Backer.objects.get_or_create(user=request.user)
pledge.set_backer(backer)
pledge.save()
return redirect(payment_view)
def pledge_thankyou(request):
""" Send pledge completed message, redirect to thank you page """
pledge = get_session_pledge(request)
if not pledge:
return redirect('zipfelchappe_project_list')
else:
send_pledge_completed_message(request, pledge)
del request.session['pledge_id']
request.session['completed_pledge_id'] = pledge.pk
url = reverse('zipfelchappe_project_backed', slug=pledge.project.slug)
return redirect(url)
def pledge_cancel(request):
""" Remove current pledge from session """
pledge = get_session_pledge(request)
if not pledge:
return redirect('zipfelchappe_project_list')
else:
del request.session['pledge_id']
pledge.mark_failed('user cancelled payment')
messages.info(request, _('Your pledge was cancelled'))
return redirect('zipfelchappe_project_detail', slug=pledge.project.slug)
class PledgeLostView(FeincmsRenderMixin, TemplateView):
""" Error message showed by @pledge_required if not pledge was found """
template_name = "zipfelchappe/pledge_lost.html"
|
1670095
|
r"""
http://github.com/in4lio/yupp/
__ __ _____ _____
/\ \ /\ \ /\ _ \ _ \
\ \ \_\/ \_\/ \_\ \ \_\ \
\ \__ /\____/\ __/\ __/
\/_/\_\/___/\ \_\/\ \_\/
\/_/ \/_/ \/_/
yup.py -- shell of yupp preprocessor
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from past.builtins import execfile
from builtins import input
from builtins import str
from builtins import range
from builtins import open
import os
import sys
import re
import json
import traceback
from argparse import ArgumentParser
import stat
try:
import readline
except:
pass
from .yugen import log, trace
from .yugen import config, feedback, yushell, yuinit, yuparse, yueval, RESULT
from .yugen import make_ast_readable, reduce_emptiness, replace_steady
from .yulic import * #pylint: disable=wildcard-import
from .yuconfig import * #pylint: disable=wildcard-import,unused-wildcard-import
__version__ = VERSION
# * * * * * * * * * * * * * * * * * *
# * *
# * S H E L L *
# * *
# * * * * * * * * * * * * * * * * * *
TITLE = r""" __ __ _____ _____
/\ \ /\ \ /\ _ \ _ \ %(description)s
\ \ \_\/ \_\/ \_\ \ \_\ \ %(app)s %(version)s
\ \__ /\____/\ __/\ __/
\/_/\_\/___/\ \_\/\ \_\/ %(copyright)s
\/_/ \/_/ \/_/ %(holder)s (%(email)s)
""" % { 'description' : DESCRIPTION, 'app': APP, 'version': VERSION
, 'copyright': COPYRIGHT, 'holder': HOLDER, 'email': EMAIL }
PP_I = '<--'
PP_O = '-->'
PP_FILE = '[%s]'
OK = '* OK *'
FAIL = '\n%s: %s'
___ = '.' * 79
PROMPT = '[yupp]# '
REPL_TEST = 'test'
REPL_EXIT = 'exit'
TR_FILE = 'trace.file = %s\n'
E_YUGEN = '.yugen'
E_YUCFG = '.yuconfig'
re_e_yu = re.compile( r'\.yu(?:-([^.]+))?$', flags = re.IGNORECASE )
E_BAK = '.bak'
E_AST = '.ast'
QUIET_HELP = """
do not show usual greeting and other information
"""
QUIET = False
TYPE_OUTPUT_HELP = """
show content of an output file
"""
TYPE_OUTPUT = False
READ_ONLY_HELP = """
"do not make file read-only"
"""
READ_ONLY = True
def shell():
shell.quiet = QUIET
shell.type_output = TYPE_OUTPUT
shell.output_dir = ''
# -- traceback exceptions
shell.traceback = TRACEBACK
shell.read_only = READ_ONLY
shell()
SYSTEM_EXIT_HELP = 'Moreover, you can pass the arguments through a response file: `yup.py @FILE`.' \
' The preprocessor exit status is a number of unsuccessfully processed files multiplied by 4' \
' or an error of command line arguments (2) or a program execution error (1)' \
' or zero in case of successful execution.'
# ---------------------------------------------------------------------------
def shell_parse_cli_arguments( arglist ):
argp = ArgumentParser(
description = 'yupp, %(description)s' % { 'description': DESCRIPTION }
, epilog = SYSTEM_EXIT_HELP
)
argp.add_argument( '--version', action = 'version', version = '%(app)s %(version)s' % { 'app': APP, 'version': VERSION })
argp.add_argument( 'files', metavar = 'FILE', type = str, nargs = '*', help = "an input file" )
argp.add_argument( '-q', '--quiet', action = 'store_true', dest = 'quiet', default = shell.quiet
, help = QUIET_HELP )
argp.add_argument( '-d', action = 'append', metavar = 'DIR', dest = 'directory'
, help = "an import directory" )
argp.add_argument( '-o', '--output', metavar = 'DIR', dest = 'output_dir', default = ''
, help = "an output directory" )
argp.add_argument( '--no-read-only', action = 'store_false', dest = 'read_only', default = shell.read_only
, help = READ_ONLY_HELP )
# -- preprocessor options
argp.add_argument( '--pp-skip-comments', metavar = 'TYPE', type = int, dest = 'pp_skip_comments'
, choices = list( range( 0, 4 )), help = PP_SKIP_COMMENTS_HELP )
argp.add_argument( '--pp-no-trim-app-indent', action = 'store_false', dest = 'pp_trim_app_indent' )
argp.add_argument( '--pp-trim-app-indent', action = 'store_true', dest = 'pp_trim_app_indent'
, help = PP_TRIM_APP_INDENT_HELP )
argp.add_argument( '--pp-no-reduce-emptiness', action = 'store_false', dest = 'pp_reduce_emptiness' )
argp.add_argument( '--pp-reduce-emptiness', action = 'store_true', dest = 'pp_reduce_emptiness'
, help = PP_REDUCE_EMPTINESS_HELP )
argp.add_argument( '--pp-no-browse', action = 'store_false', dest = 'pp_browse' )
argp.add_argument( '--pp-browse', action = 'store_true', dest = 'pp_browse'
, help = PP_BROWSE_HELP )
argp.add_argument( '-D', '--pp-define', action = 'append', metavar = 'CONST', dest = 'pp_define'
, help = "definition of a constant" )
argp.add_argument( '-Wno-unbound', '--warn-no-unbound-application', action = 'store_false'
, dest = 'warn_unbound_application' )
argp.add_argument( '-Wunbound', '--warn-unbound-application', action = 'store_true'
, dest = 'warn_unbound_application', help = WARN_UNBOUND_APPLICATION_HELP )
# -- debug options
argp.add_argument( '-l', '--log', metavar = 'LEVEL', type = int, dest = 'log_level'
, default = ( LOG_LEVEL ), choices = list( range( 1, 6 ))
, help = LOG_LEVEL_HELP )
argp.add_argument( '-t', '--trace', metavar = 'STAGE', type = int, dest = 'trace_stage'
, default = TRACE_STAGE, choices = list( range( 0, 4 ))
, help = TRACE_STAGE_HELP )
argp.add_argument( '-b', '--traceback', metavar = 'TYPE', type = int, dest = 'traceback'
, default = TRACEBACK, choices = list( range( 0, 3 ))
, help = TRACEBACK_HELP )
argp.add_argument( '--type-file', action = 'store_true', dest = 'type_output', default = shell.type_output
, help = TYPE_OUTPUT_HELP )
argp.add_argument( '-i', '--input', metavar = 'TEXT', type = str, dest = 'text', default = ''
, help = "an input text (used by Web Console)" )
argp.add_argument( '--input-source', metavar = 'NAME', type = str, dest = 'text_source', default = ''
, help = "an input text source (used by Web Console)" )
argp.set_defaults(
directory = []
, pp_skip_comments = PP_SKIP_COMMENTS
, pp_trim_app_indent = PP_TRIM_APP_INDENT
, pp_reduce_emptiness = PP_REDUCE_EMPTINESS
, pp_browse = PP_BROWSE
, pp_define = []
, warn_unbound_application = WARN_UNBOUND_APPLICATION
)
if ( len( arglist ) == 1 ) and arglist[ 0 ].startswith( '@' ):
# -- get arguments from response file
try:
with open( arglist[ 0 ][ 1: ], 'r' ) as f:
return argp.parse_args( f.read().split())
except IOError as e:
# -- file operation failure
log.critical( FAIL, type( e ).__name__, str( e ))
sys.exit( 2 )
return argp.parse_args( arglist )
# ---------------------------------------------------------------------------
def _exec_yuconfig_script( fn_cfg, context ):
if os.path.isfile( fn_cfg ):
try:
execfile( fn_cfg, context )
except Exception as e: #pylint: disable=broad-except
log.error( 'unable to execute configuration script\n'
'File "%s"\n%s: %s', fn_cfg, type( e ).__name__, str( e ))
# ---------------------------------------------------------------------------
def shell_parse_yuconfig( fn ):
# -- default configuration
context = yuconfig_defaults()
context[ 'directory' ] = []
context[ 'dependency' ] = []
context[ 'pp_define' ] = []
# -- global configuration
_exec_yuconfig_script( '' + E_YUCFG, context )
# -- configuration for concrete source file
_exec_yuconfig_script( os.path.splitext( fn )[ 0 ] + E_YUCFG, context )
cfg = { k: val for k, val in list( context.items()) if isinstance( val, yuconfig_types )}
if isinstance( context[ 'directory' ], list ):
cfg[ 'directory' ] = context[ 'directory' ]
if isinstance( context[ 'dependency' ], list ):
cfg[ 'dependency' ] = context[ 'dependency' ]
if isinstance( context[ 'pp_define' ], list ):
cfg[ 'pp_define' ] = context[ 'pp_define' ]
return cfg
# ---------------------------------------------------------------------------
def shell_input():
try:
val = input( PROMPT )
return val if isinstance( val, str ) else val.decode( 'utf8' )
except ( EOFError, ValueError ):
# -- e.g. run into environment without terminal input
print()
return REPL_EXIT
# ---------------------------------------------------------------------------
def shell_backup( fn ):
if os.path.isfile( fn ):
fn_bak = fn + E_BAK
if os.path.isfile( fn_bak ):
os.chmod( fn_bak, stat.S_IWRITE )
os.remove( fn_bak )
os.rename( fn, fn_bak )
# ---------------------------------------------------------------------------
def shell_savetofile( fn, text ):
with open( fn, mode='w', encoding='utf8' ) as f:
f.write( text )
# * * * * * * * * * * * * * * * * * *
# * *
# * P P W R A P P E R *
# * *
# * * * * * * * * * * * * * * * * * *
# ---------------------------------------------------------------------------
def _pp_configure( cfg ):
log.setLevel( cfg.get( 'log_level', LOG_LEVEL ) * LOG_LEVEL__SCALE_ )
trace.stage = cfg.get( 'trace_stage', TRACE_STAGE )
config.pp_skip_comments = cfg.get( 'pp_skip_comments', PP_SKIP_COMMENTS )
config.pp_trim_app_indent = cfg.get( 'pp_trim_app_indent', PP_TRIM_APP_INDENT )
config.pp_reduce_emptiness = cfg.get( 'pp_reduce_emptiness', PP_REDUCE_EMPTINESS )
config.pp_browse = cfg.get( 'pp_browse', PP_BROWSE )
config.pp_define = cfg.get( 'pp_define', [])
config.warn_unbound_application = cfg.get( 'warn_unbound_application', WARN_UNBOUND_APPLICATION )
config.directory = cfg.get( 'directory', [])
shell.quiet = cfg.get( 'quiet', QUIET )
shell.type_output = cfg.get( 'type_output', TYPE_OUTPUT )
shell.output_dir = cfg.get( 'output_dir', '' )
shell.traceback = cfg.get( 'traceback', TRACEBACK )
shell.read_only = cfg.get( 'read_only', READ_ONLY )
# DEBUG OUTPUT
# if log.level != LOG_LEVEL * LOG_LEVEL__SCALE_:
# print( 'log_level', log.level )
# if trace.stage != TRACE_STAGE:
# print( 'stage', trace.stage )
# if config.pp_skip_comments != PP_SKIP_COMMENTS:
# print( 'pp_skip_comments', config.pp_skip_comments )
# if config.pp_trim_app_indent != PP_TRIM_APP_INDENT:
# print( 'pp_trim_app_indent', config.pp_trim_app_indent )
# if config.pp_reduce_emptiness != PP_REDUCE_EMPTINESS:
# print( 'pp_reduce_emptiness', config.pp_reduce_emptiness )
# if config.pp_browse != PP_BROWSE:
# print( 'pp_browse', config.pp_browse )
# if config.pp_define != []:
# print( 'pp_define', config.pp_define )
# if config.warn_unbound_application != WARN_UNBOUND_APPLICATION:
# print( 'warn_unbound_application', config.warn_unbound_application )
# if config.directory != []:
# print( 'directory', config.directory )
# if shell.quiet != QUIET:
# print( 'quiet', shell.quiet )
# if shell.type_output != TYPE_OUTPUT:
# print( 'type_output', shell.type_output )
# if shell.output_dir != '':
# print( 'output_dir', shell.output_dir )
# if shell.traceback != TRACEBACK:
# print( 'traceback', shell.traceback )
# if shell.read_only != READ_ONLY:
# print( 'read_only', shell.read_only )
# ---------------------------------------------------------------------------
def _pp(): #pylint: disable=too-many-statements
"""
return yueval( yuparse( yushell.input_file ))
(also tracing and logging)
"""
# ---------------
trace.set_current( TRACE_STAGE_PARSE )
TR2F = trace.enabled and trace.file
LOG = not trace.enabled or trace.file
# -- parse
try:
if TR2F:
trace.info( yushell.source[ yushell.input_file ][ 1 ])
ast = yuparse( yushell.input_file )
if trace.enabled:
trace.info( repr( ast ))
trace.info( trace.TEMPL_DEEPEST, trace.deepest )
except: #pylint: disable=bare-except
e_type, e, tb = sys.exc_info()
msg = '\n'
arg = e.args[ 0 ] if e.args else None
if (( shell.traceback == TRACEBACK_ALL ) or
( shell.traceback == TRACEBACK_PYTHON ) and isinstance( arg, str ) and arg.startswith( 'python' )):
# -- enabled traceback
msg += ''.join( traceback.format_tb( tb ))
msg += ''.join( traceback.format_exception_only( e_type, e ))
if TR2F:
trace.info( msg )
if LOG:
log.error( msg )
if trace.enabled:
trace.info( trace.TEMPL_DEEPEST, trace.deepest )
if TR2F:
trace.info( ___ )
return False, ''
# -- eval
trace.set_current( TRACE_STAGE_EVAL )
TR2F = trace.enabled and trace.file
LOG = not trace.enabled or trace.file
try:
plain = yueval( ast )
ok = isinstance( plain, str )
if ok:
plain = replace_steady( reduce_emptiness( plain ))
else:
plain = make_ast_readable( plain )
log.error( 'unable to translate input text into plain text' )
if yushell.hazard:
log.warn( 'the following usage of built-in function(s) can be the reason'
+ ''.join( x.loc() for x in yushell.hazard ))
if trace.enabled:
trace.info( plain )
trace.info( trace.TEMPL_DEEPEST, trace.deepest )
except: #pylint: disable=bare-except
e_type, e, tb = sys.exc_info()
msg = '\n'
arg = e.args[ 0 ] if e.args else None
if (( shell.traceback == TRACEBACK_ALL ) or
( shell.traceback == TRACEBACK_PYTHON ) and isinstance( arg, str ) and arg.startswith( 'python' )):
# -- enabled traceback
msg += ''.join( traceback.format_tb( tb ))
msg += ''.join( traceback.format_exception_only( e_type, e ))
if TR2F:
trace.info( msg )
if LOG:
log.error( msg )
if trace.enabled:
trace.info( trace.TEMPL_DEEPEST, trace.deepest )
if TR2F:
trace.info( ___ )
return False, ''
if TR2F:
trace.info( ___ )
return ( ok, plain )
# ---------------------------------------------------------------------------
def _output_fn( fn ):
fn_o, e = os.path.splitext( os.path.join( shell.output_dir, os.path.basename( fn )) if shell.output_dir else fn )
if not e:
# ---- * --> *.yugen
return fn_o + E_YUGEN
e_yu = re_e_yu.search( e )
if e_yu is None:
# ---- *.* --> *.yugen.*
return fn_o + E_YUGEN + e
if e_yu.group( 1 ):
# ---- *.yu-* --> *.*
return fn_o + '.' + e_yu.group( 1 )
if not os.path.splitext( fn_o )[ 1 ]:
# ---- *.yu --> *.yugen
return fn_o + E_YUGEN
# ---- *.*.yu --> *.*
return fn_o
# ---------------------------------------------------------------------------
def _pp_stream( _stream, fn, fn_o ):
ok = False
plain = None
try:
text = _stream.read()
# -- preprocessing
yushell( text, fn, fn_o )
yuinit()
ok, plain = _pp()
if ok:
# -- check if the name of output file is changed
if feedback.output_file:
fn_o = feedback.output_file
# -- output file backup
shell_backup( fn_o )
# -- output file writing
shell_savetofile( fn_o, plain )
if shell.read_only:
os.chmod( fn_o, stat.S_IREAD )
if isinstance( plain, RESULT ):
# -- browse writing
with open( fn_o + '.json', mode='w', encoding='utf8' ) as f:
f.write( str( json.dumps({
'files': sorted( RESULT.files, key=RESULT.files.get )
, 'browse': plain.browse
, 'offset': plain.offset
})))
else:
if plain:
# -- plain contains AST
fn_o = os.path.splitext( fn_o )[ 0 ] + E_AST
# -- output file writing
shell_savetofile( fn_o, plain )
log.warn( 'result was saved as AST file' )
except IOError as e:
# -- e.g. file operation failure
log.critical( FAIL, type( e ).__name__, str( e ))
return ( ok, plain, fn_o )
# ---------------------------------------------------------------------------
def _pp_file( fn ):
try:
# -- open input file
f = open( fn, 'r' )
except IOError as e:
# -- e.g. file operation failure
log.critical( FAIL, type( e ).__name__, str( e ))
return False
if not shell.quiet:
print( PP_I, PP_FILE % fn )
# -- figure out a name for output file
fn_o = _output_fn( fn )
ok, plain, fn_o = _pp_stream( f, fn, fn_o )
f.close()
print()
if ok:
if shell.type_output:
print( plain )
if not shell.quiet:
print( PP_O, PP_FILE % fn_o )
print( OK )
else:
if plain:
# -- plain contains AST
if shell.type_output:
print( plain )
if not shell.quiet:
print( PP_O, PP_FILE % fn_o )
return ok
# ---------------------------------------------------------------------------
def _pp_test( text, echo = True ):
if not text.strip():
# -- ignore empty text
return True
if echo:
print( PP_I, text )
yushell( text )
yuinit()
ok, plain = _pp()
print()
if plain:
print( PP_O, plain )
if ok:
print( OK )
return ok
# ---------------------------------------------------------------------------
def _pp_text( text, text_source = None ):
yushell( text, text_source )
yuinit()
ok, plain = _pp()
print()
if plain:
print( plain )
if ok:
print( OK )
return ok
# ---------------------------------------------------------------------------
def _getmtime( fn ):
try:
return os.path.getmtime( fn )
except Exception as e:
log.warn( 'unable to check dependency\n%s: %s', type( e ).__name__, str( e ))
raise
# ---------------------------------------------------------------------------
def proc_stream( _stream, fn ):
"""
Stream preprocessing (for Python package).
"""
# -- read shebang and magic comment again
_stream.seek( 0 )
# -- figure out a name for output file
fn_o = _output_fn( fn )
cfg = shell_parse_yuconfig( fn )
_pp_configure( cfg )
# -- check that we can skip re-preprocessing
if not cfg.get( 'force', False ) and os.path.isfile( fn_o ):
try:
deps = cfg.get( 'dependency', [])
deps.append( fn )
t = os.path.getmtime( fn_o )
# -- if sources of dependencies are not changed...
if all( _getmtime( d ) < t for d in deps ):
# -- ...just read output file
with open( fn_o, 'r' ) as f:
data = f.read()
# print( 'skipped yupp running' )
return ( True, data, fn_o, 1 if 'coding:' in _stream.readline() else 2 )
except: #pylint: disable=bare-except
# -- process input file in the usual way
pass
ok, data, _ = _pp_stream( _stream, fn, fn_o )
return ( ok, data, fn_o, yushell.shrink )
# ---------------------------------------------------------------------------
def proc_file( fn ):
"""
File preprocessing (for Python package).
"""
try:
# -- open input file
f = open( fn, 'r' )
except IOError as e:
# -- e.g. file operation failure
log.critical( FAIL, type( e ).__name__, str( e ))
return ( False, None )
ok, _, fn_o, _ = proc_stream( f, fn )
return ( ok, fn_o )
# ---------------------------------------------------------------------------
def cli( arglist ):
args = shell_parse_cli_arguments( arglist )
if not args.files:
args.pp_browse = False
_pp_configure( args.__dict__ )
if not shell.quiet:
print( TITLE )
if trace.stage > 0 and trace.file:
print( TR_FILE % ( trace.file ))
# -- startup testing
_pp_test( r"""($($\y:u.\m.\...(m y($\C.\p.(r)e p)($\ro.(ce)s)))so r)""" )
_pp_test( r"""
""" )
if args.text:
# -- input text preprocessing
_pp_text( args.text, args.text_source )
if args.files:
f_failed = 0
# -- input files preprocessing
for path in args.files:
config.passage = 0
while True:
if not _pp_file( path ):
f_failed += 1
break
if not feedback.repeat:
break
config.passage += 1
return f_failed << 2
else:
# -- Read-Eval-Print Loop
print( PROMPT + 'Type "%s" or source code + "%s".' % ( REPL_EXIT, REPL_TEST ))
test = ''
while True:
line = shell_input()
stripped = line.strip()
if stripped == REPL_EXIT:
# -- quit REPL
break
if stripped == REPL_TEST:
# -- run preprocessor
_pp_test( test, False )
test = ''
else:
test += line + '\n'
return 0
# ---------------------------------------------------------------------------
if __name__ == '__main__':
# -- sys.exit() redefined in Web Console
sys.exit( cli( sys.argv[ 1: ]))
|
1670097
|
from bs4 import BeautifulSoup
import requests
from datetime import datetime
from selenium import webdriver
from time import sleep
from dateutil.parser import parse
__author__ = '<NAME>'
class Tracker(object):
'''
This class contains the common features of each of the below trackers
Each has the following Attributes:
tracking_no: Tracking number of the shipment
page: Raw HTML data of the page
tracking_data: A list of checkpoints of the shipment
status: The current/overall status of the shipment
'''
def __init__(self,tracking_no):
'''
Returns a Scraper Object containing the above Attributes
'''
self.tracking_no = str(tracking_no)
self.page = None
self.tracking_data = []
self.status = None
def Get_Tracking_Data(self):
'''
Helper function to get the tracking_data
'''
self.Get_Page()
self.Extract_Checkpoints()
class BluedartTracker(Tracker):
'''
This class scrapes tracking data from the bluedart website.
'''
exclude_list = ['Location','Date','Waybill','Details','No.']
def __init__(self,tracking_no):
Tracker.__init__(self,tracking_no)
def Get_Page(self):
'''
Fetches raw HTML data from the site for a given tracking_no
'''
url = 'http://www.bluedart.com/servlet/RoutingServlet'
data = {'handler' : 'tnt',
'action' : 'awbquery',
'awb' : 'awb' ,
'numbers' : self.tracking_no}
# request the server for the HTML data
response = requests.post(url,data=data,verify=False)
self.page = response.content
def is_valid(self,text):
for unwanted in self.exclude_list:
if text is None or unwanted in text:
return False
return True
def Extract_Checkpoints(self):
'''
Extract the checkpoints and store in self.tracking_data
'''
# Make sure page is available
if self.page is None:
raise Exception("The HTML data was not fetched due to some reasons")
# Check for invalid tracking number
if 'Numbers Not Found -'in self.page or 'Invalid Query Numbers -' in self.page:
raise ValueError('The Tracking number is invalid')
soup = BeautifulSoup(self.page,'html.parser')
# Assign the current status of the shipment
if 'Returned To Origin' in self.page: # Prioritise this first
self.status = 'R'
elif 'SHIPMENT DELIVERED' in self.page: # If the above is false, only then check for this
self.status = 'C'
else: # The shipment is in Transit
self.status = 'T'
# Checkpoints extraction begins here
cells = []
'''
The below for loop goes through the table of checkpoints adding relevant cell data to cells[]
'''
for cell in soup.findAll('td', {"align" : "LEFT"}):
if cell.font["size"] == '1':
cell_text = cell.font.string
if self.is_valid(cell_text):
cells.append(cell_text)
# 4 cells in each row
rows = [cells[cell:cell + 4] for cell in xrange(0, len(cells), 4)]
for row in rows:
'''
Each row will have 4 columns: Location--Status--Date--Time
Merge column three and four and format it.
Append to tracking_data list
'''
location = row[0]
status = row[1]
date_time = ' '.join((row[2],row[3]))
date_time_format = "%d-%b-%Y %H:%M"
date_time = datetime.strptime(date_time,date_time_format)
self.tracking_data.append({'status':status,'date':date_time,'location':location})
# Sort the checkpoints based on Date and Time --- this is important
self.tracking_data = sorted(self.tracking_data, key=lambda k: k['date'])
class AramexTracker(Tracker):
'''
This class scrapes data from the Aramex website
'''
def __init__(self, tracking_no):
Tracker.__init__(self,tracking_no)
def wait_till_page_load(self,driver,max_wait_time):
'''
This method pauses execution until the page is loaded fully, including
data delayed by JavaScript
'''
sleepCount = max_wait_time # wait for a fixed max_wait_time only
# A page that's fully loaded has the word 'Current Status'
while 'Current Status' not in driver.page_source:
sleep(1)
sleepCount -= 1
if sleepCount is 0:
raise Exception('Request timed out!') # if max_wait_time is exceeded!
def remove_non_ascii(self,str_to_clean):
return ''.join([x for x in str_to_clean if ord(x) < 128])
def Get_Page(self):
'''
Fetches raw HTML data from the site for a given tracking_no
'''
# Simply encode the correct url as a string
url = 'https://www.aramex.com/express/track-results-multiple.aspx?ShipmentNumber='
url += self.tracking_no
driver = webdriver.PhantomJS() # create a selenium webdriver
driver.get(url) # make it send a request with the above url
self.wait_till_page_load(driver,10) # wait till the page is fully loaded
self.page = driver.page_source # store the html source
driver.quit() # stop the webdriver
def Extract_Checkpoints(self):
'''
Extract the checkpoints and store in self.tracking_data
'''
# Make sure page is available
if self.page is None:
raise Exception("The HTML data was not fetched due to some reasons")
# Check for invalid tracking number
if 'Invalid number / data not currently available' in self.page:
raise ValueError('Invalid number/data not currently available')
# Checkpoints extraction begins here
soup = BeautifulSoup(self.page,'html.parser')
# Assign the current status of the shipment - self.status
current_status = soup.find('span',id='spnCurrentStatusValue').text.strip()
if current_status == 'Supporting Document Returned to Shipper':
self.status = 'R'
elif current_status == 'Delivered':
self.status = 'C'
else: # The shipment is in Transit
self.status = 'T'
# Get all rows of the Checkpoints table (no particular order)
rows = soup.findAll('div',{'class':'fullWidth odd leftFloat bottomGreyBorder'})
rows += soup.findAll('div',{'class':'fullWidth even leftFloat bottomGreyBorder'})
for row in rows:
# Get the data
location = row.find('div',{'class':'leftFloat thirdWidth'}).string.strip()
date_time = row.find('div',{'class':'leftFloat shipmentSummaryLabel'}).string.strip()
status = row.find('div',{'class':'leftFloat shipmentHistoryActivityLabel'}).string.strip()
# Clean it
location = self.remove_non_ascii(location)
date_time_format = "%d-%b-%Y %H:%M"
date_time = parse(self.remove_non_ascii(date_time))
status = self.remove_non_ascii(status)
# Add it to the checkpoint list
self.tracking_data.append({'status':status,'date':date_time,'location':location})
self.tracking_data = sorted(self.tracking_data, key=lambda k: k['date'])
class DHLTracker(Tracker):
'''
This class scrapes data from the DHL website
'''
def __init__(self, tracking_no):
Tracker.__init__(self,tracking_no)
def wait_till_page_load(self,driver,max_wait_time):
'''
This method pauses execution until the page is loaded fully, including
data delayed by JavaScript
'''
sleepCount = max_wait_time # wait for a fixed max_wait_time only
# A page that's fully loaded has the word 'Current Status'
while self.tracking_no not in driver.page_source and 'Invalid Input' not in driver.page_source:
sleep(1)
sleepCount -= 1
if sleepCount is 0:
raise Exception('Request timed out!') # if max_wait_time is exceeded!
def Get_Page(self):
'''
Fetches raw HTML data from the site for a given tracking_no
'''
# Simply encode the correct url as a string
url = 'http://www.dhl.co.in/en/express/tracking.html?AWB={}&brand=DHL'.format(self.tracking_no)
driver = webdriver.PhantomJS() # create a selenium webdriver
driver.get(url) # make it send a request with the above url
self.wait_till_page_load(driver,10) # wait till the page is fully loaded
self.page = driver.page_source # store the html source
driver.quit() # stop the webdriver
def Extract_Checkpoints(self):
'''
Extract the checkpoints and store in self.tracking_data
'''
# Make sure page is available
if self.page is None:
raise Exception("The HTML data was not fetched due to some reasons")
soup = BeautifulSoup(self.page,'html.parser')
# Check for invalid tracking number by checking if table element is present
if soup.find('thead') == None:
raise ValueError('Invalid tracking number')
# Assign the current status of the shipment - self.status
if 'Returned' in self.page:
self.status = 'R'
elif 'Signed for by:' in self.page:
self.status = 'C'
else: # The shipment is in Transit
self.status = 'T'
# The full checkpoints table div.
table = soup.find('table',{'class':'result-checkpoints'}).contents
cur_date = None # The date of the next few checkpoints, initially None
checkpoint = None
for element in table:
if element.name == 'thead':
# This has the date for the next few checkpoints
cur_date = element.find('th',{'colspan':'2'}).string.strip() + ' '
elif element.name == 'tbody':
# A checkpoint whose date = cur_date
checkpoint = {'status':'','date':cur_date,'location':''}
tds = element.findAll('td')
checkpoint['status'] = tds[1].string.strip()
checkpoint['location'] = tds[2].string.strip()
checkpoint['date'] += tds[3].string.strip()
date_time_format = "%d-%b-%Y %H:%M"
checkpoint['date'] = parse(checkpoint['date'])
self.tracking_data.append(checkpoint)
self.tracking_data = sorted(self.tracking_data, key=lambda k: k['date'])
class Skynet_Tracker(Tracker):
'''
This class scrapes tracking data from the Skynet website.
'''
def __init__(self,tracking_no):
Tracker.__init__(self,tracking_no)
def Get_Page(self):
'''
Fetches raw HTML data from the site for a given tracking_no
'''
url = 'https://www.skynetwwe.info/ShipmentTrackSingle.aspx?textfield={}&radiobutton=SB'.format(self.tracking_no)
headers = {
'Host': 'www.skynetwwe.info',
'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:42.0) Gecko/20100101 Firefox/42.0',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Language': 'en-US,en;q=0.5',
'Accept-Encoding': 'gzip, deflate',
'DNT': '1',
'Cookie': 'ASP.NET_SessionId=aletb2fx1kqixq55kmblbvn4',
'Connection': 'keep-alive',
'Cache-Control': 'max-age=0'
}
# request the server for the HTML data
response = requests.post(url,headers=headers,verify=False)
self.page = response.content
def Extract_Checkpoints(self):
'''
Extract the checkpoints and store in self.tracking_data
'''
# Make sure page is available
if self.page is None:
raise Exception("The HTML data was not fetched due to some reasons")
soup = BeautifulSoup(self.page,'html.parser')
invalid_tracking_no = soup.find('span',{'id':'ctl00_ContentPlaceHolder1_lblsMsg','class':'ErrorMessage','style':'font-family:Calibri;font-size:9pt;font-weight:bold;','name':'lblsMsg'})
if invalid_tracking_no is not None:
raise ValueError('The Tracking number is invalid')
# Assign the current status of the shipment
if 'Delivered' in self.page:
self.status = 'C'
else: # The shipment is in Transit
self.status = 'T'
# Checkpoints extraction begins here
rows = soup.findAll('tr',{'class':'gridItem'}) + soup.findAll('tr',{'class':'gridAltItem'})
for row in rows:
'''
Each row will have 4 columns: Date--Time--Status--Location
Merge column one and two and format it.
Append to tracking_data list
'''
row_cells = row.findAll('td')
date = row_cells[0].string.strip()
time = row_cells[1].string.strip()
date_time = ' '.join([date,time])
date_time_format = "%d %b %Y %H:%M"
date_time = datetime.strptime(date_time,date_time_format)
status = row_cells[2].string.strip()
location = row_cells[3].string.strip()
self.tracking_data.append({'status':status,'date':date_time,'location':location})
# Sort the checkpoints based on Date and Time --- this is important
self.tracking_data = sorted(self.tracking_data, key=lambda k: k['date'])
class Overnite_Tracker(Tracker):
'''
This class scrapes tracking data from the Overnite express website.
'''
def __init__(self,tracking_no):
Tracker.__init__(self,tracking_no)
def Get_Page(self):
'''
Fetches raw HTML data from the site for a given tracking_no
'''
url = 'http://www.overnitenet.com/Web-Track.aspx'
data = {
'__EVENTTARGET':'',
'__EVENTARGUMENT':'',
'__VIEWSTATE':'/<KEY>',
'__EVENTVALIDATION':'/<KEY>',
'ctl00$Content$rb':'rdAwbNo',
'ctl00$Content$txtAWB':self.tracking_no,
'ctl00$Content$ValidatorCalloutExtender6_ClientState':'',
'ctl00$Content$imgbtnTrack.x':'28',
'ctl00$Content$imgbtnTrack.y':'8'
}
headers = {
'Host': 'www.overnitenet.com',
'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:43.0) Gecko/20100101 Firefox/43.0',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Language': 'en-US,en;q=0.5',
'Accept-Encoding': 'gzip, deflate',
'DNT': '1',
'Referer': 'http://www.overnitenet.com/Web-Track.aspx',
'Cookie': 'ASP.NET_SessionId=3ncsag55xq0z4vqltg3egbr4',
'Connection': 'keep-alive'
}
# request the server for the HTML data
response = requests.post(url,data=data,headers=headers,verify=False)
self.page = response.content
def Extract_Checkpoints(self):
'''
Extract the checkpoints and store in self.tracking_data
'''
# Make sure page is available
if self.page is None:
raise Exception("The HTML data was not fetched due to some reasons")
soup = BeautifulSoup(self.page,'html.parser')
if 'Delivery information not found' in self.page:
raise ValueError('The Tracking number is invalid/Tracking number is over 45 days old.')
# Assign the current status of the shipment
if 'Delivered on' in self.page:
self.status = 'C'
else: # The shipment is in Transit
self.status = 'T'
# Checkpoints extraction begins here
table = soup.findAll('table',{'cellpadding':'1','cellspacing':'1','border':'1','align':'center','style':"width:800px;border-color:#034291;"})[1]
rows = table.findAll('tr')[1:]
for row in rows:
'''
Each row will have 3 columns: Date--Location--Status
'''
row_cells = row.findAll('td')
date = row_cells[0].string.strip()
date = datetime.strptime(date,"%A, %B %d, %Y")
location = row_cells[1].find('a').string.strip()
if location is '': # ignore the days which are holidays
continue
status = row_cells[2].text.strip()
self.tracking_data.append({'status':status,'date':date,'location':location})
# Sort the checkpoints based on Date and Time --- this is important
self.tracking_data = sorted(self.tracking_data, key=lambda k: k['date'])
class Ecomm_Tracker(Tracker):
'''
This class scrapes tracking data from the Ecomm express website.
'''
def __init__(self,tracking_no):
Tracker.__init__(self,tracking_no)
def Get_Page(self):
'''
Fetches raw HTML data from the site for a given tracking_no
'''
url = 'https://billing.ecomexpress.in/track_me/multipleawb_open/?awb={}&order=&news_go=track+now'.format(self.tracking_no)
data = {
'awb':self.tracking_no,
'order':'',
'news_go':'track_now'
}
headers = {
'Host': 'billing.ecomexpress.in',
'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:43.0) Gecko/20100101 Firefox/43.0',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Language': 'en-US,en;q=0.5',
'Accept-Encoding': 'gzip, deflate',
'DNT': '1',
'Connection': 'keep-alive'
}
# request the server for the HTML data
response = requests.get(url,data=data,headers=headers,verify=False)
self.page = response.content
def Extract_Checkpoints(self):
'''
Extract the checkpoints and store in self.tracking_data
'''
# Make sure page is available
if self.page is None:
raise Exception("The HTML data was not fetched due to some reasons")
# use a different parser, page contains broken HTML
soup = BeautifulSoup(self.page,'html5lib')
if self.tracking_no not in self.page:
raise ValueError('The Tracking number is invalid.')
# Assign the current status of the shipment
table = soup.find('table',{'class':'table'}).find('tbody')
rows = table.findAll('tr')
present_status = rows[0].findAll('td')[1].text.strip()
if present_status is 'Delivered':
self.status = 'C'
elif 'Shipment Redirected under' in present_status:
self.status = 'R'
else: # If not the above two, then the shipment is in Transit
self.status = 'T'
# Checkpoints extraction begins here
for row in rows:
'''
Each row will have 2 columns: (Date|Time, Location) --- (Status)
'''
row_cells = row.findAll('td')
date,location = row_cells[0].string.strip().split(' , ')
date = datetime.strptime(date,"%d-%m-%Y | %H:%M:%S")
status = row_cells[1].text.strip()
self.tracking_data.append({'status':status,'date':date,'location':location})
# Sort the checkpoints based on Date and Time --- this is important
self.tracking_data = sorted(self.tracking_data, key=lambda k: k['date'])
class Gati_Tracker(Tracker):
'''
This class scrapes tracking data from the Gati website.
'''
def __init__(self,tracking_no):
Tracker.__init__(self,tracking_no)
def Get_Page(self):
'''
Fetches raw XML data from the site for a given tracking_no
'''
url = 'http://www.gati.com/webservices/gatiicedkttrack.jsp?dktno=' + self.tracking_no
response = requests.get(url)
self.page = response.text
def Extract_Checkpoints(self):
'''
Extract the checkpoints and store in self.tracking_data
'''
soup = BeautifulSoup(self.page,'xml')
if soup.find('result').string.strip() == 'failed':
raise ValueError('The Tracking number is invalid.')
status = soup.find('DOCKET_STATUS').string.strip()
if status == 'Delivered':
self.status = 'C'
elif status == 'Rebooked':
self.status = 'R'
else:
self.status = 'T'
# Checkpoints extraction begins here
rows = soup.findAll('ROW')
for row in rows:
'''
Each row has four columns:
date --- time --- location --- status
Merge #1 and #2
Append the 3 to self.tracking_data
'''
date = row.find('INTRANSIT_DATE').string.strip()
time = row.find('INTRANSIT_TIME').string.strip()
try:
location = row.find('INTRANSIT_LOCATION').string.strip()
except AttributeError:
location = ''
status = row.find('INTRANSIT_STATUS').string.strip()
date_time = datetime.strptime(' '.join([date,time]),"%d-%b-%Y %H:%M")
self.tracking_data.append({'status':status,'date':date_time,'location':location})
# Sort the checkpoints based on Date and Time --- this is important
self.tracking_data = sorted(self.tracking_data, key=lambda k: k['date'])
# 7 trackers defined till now!
|
1670126
|
import time
from turtle import Screen
from player import Player
from car_manager import CarManager
from scoreboard import Scoreboard
screen = Screen()
screen.setup(width=600, height=600)
screen.tracer(0)
#TODO1: Create the turtle and move it with keypress
player = Player()
screen.listen()
screen.onkey(player.go_up, "Up")
#TODO2: Create and move cars
cars = CarManager()
#TODO5: Create a scoreboard
scoreboard = Scoreboard()
game_is_on = True
while game_is_on:
time.sleep(0.1)
screen.update()
cars.create_car()
cars.move_car()
#TODO3: Detect turtle collission with cars
for car in cars.all_cars:
if car.distance(player) < 20:
game_is_on = False
scoreboard.game_over()
#TODO4: Detect when turtle crosses the finish line
if player.is_at_finish():
player.goto_start()
cars.level_up()
scoreboard.level_up()
screen.exitonclick()
|
1670157
|
from .processor_and_frequence_scaling import ProcessorAndFrequenceScalingController
from .battery_charge_thresholds import BatteryChargeThresholdsController
from .system_start_and_shutdown import SystemStartAndShutdownController
from .runtime_power_management import RuntimePowerManagementController
from .disks_and_controllers import DisksAndControllersController
from .drive_slot_ultrabay import DriveSlotUltrabayController
from .pci_express_bus import PciExpressBusController
from .graphics_cards import GraphicsCardsController
from .undervolting import UndervoltingController
from .file_system import FileSystemController
from .networking import NetworkingController
from .kernel import KernelController
from .audio import AudioController
from .usb import UsbController
|
1670212
|
import esphome.codegen as cg
import esphome.config_validation as cv
from esphome import automation, pins
from esphome.components import sensor
from esphome.const import (
CONF_ID,
CONF_INTERNAL_FILTER,
CONF_INTERNAL_FILTER_MODE,
CONF_PIN,
CONF_NUMBER,
CONF_TIMEOUT,
CONF_TOTAL,
CONF_VALUE,
ICON_PULSE,
STATE_CLASS_MEASUREMENT,
STATE_CLASS_TOTAL_INCREASING,
UNIT_PULSES,
UNIT_PULSES_PER_MINUTE,
)
from esphome.core import CORE
CODEOWNERS = ["@stevebaxter", "@cstaahl"]
pulse_meter_ns = cg.esphome_ns.namespace("pulse_meter")
PulseMeterSensor = pulse_meter_ns.class_(
"PulseMeterSensor", sensor.Sensor, cg.Component
)
PulseMeterInternalFilterMode = PulseMeterSensor.enum("InternalFilterMode")
FILTER_MODES = {
"EDGE": PulseMeterInternalFilterMode.FILTER_EDGE,
"PULSE": PulseMeterInternalFilterMode.FILTER_PULSE,
}
SetTotalPulsesAction = pulse_meter_ns.class_("SetTotalPulsesAction", automation.Action)
def validate_internal_filter(value):
return cv.positive_time_period_microseconds(value)
def validate_timeout(value):
value = cv.positive_time_period_microseconds(value)
if value.total_minutes > 70:
raise cv.Invalid("Maximum timeout is 70 minutes")
return value
def validate_pulse_meter_pin(value):
value = pins.internal_gpio_input_pin_schema(value)
if CORE.is_esp8266 and value[CONF_NUMBER] >= 16:
raise cv.Invalid(
"Pins GPIO16 and GPIO17 cannot be used as pulse counters on ESP8266."
)
return value
CONFIG_SCHEMA = sensor.sensor_schema(
PulseMeterSensor,
unit_of_measurement=UNIT_PULSES_PER_MINUTE,
icon=ICON_PULSE,
accuracy_decimals=2,
state_class=STATE_CLASS_MEASUREMENT,
).extend(
{
cv.Required(CONF_PIN): validate_pulse_meter_pin,
cv.Optional(CONF_INTERNAL_FILTER, default="13us"): validate_internal_filter,
cv.Optional(CONF_TIMEOUT, default="5min"): validate_timeout,
cv.Optional(CONF_TOTAL): sensor.sensor_schema(
unit_of_measurement=UNIT_PULSES,
icon=ICON_PULSE,
accuracy_decimals=0,
state_class=STATE_CLASS_TOTAL_INCREASING,
),
cv.Optional(CONF_INTERNAL_FILTER_MODE, default="EDGE"): cv.enum(
FILTER_MODES, upper=True
),
}
)
async def to_code(config):
var = await sensor.new_sensor(config)
await cg.register_component(var, config)
pin = await cg.gpio_pin_expression(config[CONF_PIN])
cg.add(var.set_pin(pin))
cg.add(var.set_filter_us(config[CONF_INTERNAL_FILTER]))
cg.add(var.set_timeout_us(config[CONF_TIMEOUT]))
cg.add(var.set_filter_mode(config[CONF_INTERNAL_FILTER_MODE]))
if CONF_TOTAL in config:
sens = await sensor.new_sensor(config[CONF_TOTAL])
cg.add(var.set_total_sensor(sens))
@automation.register_action(
"pulse_meter.set_total_pulses",
SetTotalPulsesAction,
cv.Schema(
{
cv.Required(CONF_ID): cv.use_id(PulseMeterSensor),
cv.Required(CONF_VALUE): cv.templatable(cv.uint32_t),
}
),
)
async def set_total_action_to_code(config, action_id, template_arg, args):
paren = await cg.get_variable(config[CONF_ID])
var = cg.new_Pvariable(action_id, template_arg, paren)
template_ = await cg.templatable(config[CONF_VALUE], args, int)
cg.add(var.set_total_pulses(template_))
return var
|
1670221
|
from . import network, sparse_vec, vectorizers
from .network import build_cooccurrence_network, build_similarity_network
from .sparse_vec import build_doc_term_matrix, build_grp_term_matrix
from .vectorizers import Vectorizer, GroupVectorizer
from .matrix_utils import (
get_term_freqs,
get_doc_freqs,
get_inverse_doc_freqs,
get_doc_lengths,
get_information_content,
apply_idf_weighting,
filter_terms_by_df,
filter_terms_by_ic,
)
|
1670244
|
from gunicorn.config import Config
from gunicorn.http.errors import LimitRequestHeaders
request = LimitRequestHeaders
cfg = Config()
cfg.set('limit_request_fields', 2)
|
1670250
|
import ravestate as rs
import ravestate_interloc as interloc
import ravestate_rawio as rawio
import ravestate_idle as idle
import ravestate_ontology as mem
import ravestate_verbaliser as verbaliser
import ravestate_visionio as visionio
import ravestate_hibye as hibye
from scientio.ontology.ontology import Ontology
from scientio.session import Session
from scientio.ontology.node import Node
import numpy as np
import rospy
import random
from roboy_cognition_msgs.msg import Faces, FacialFeatures
from reggol import get_logger, set_default_loglevel
logger = get_logger(__name__)
def test_known_person():
last_output = ""
with rs.Module(name="visionio_test"):
@rs.state(read=rawio.prop_out)
def raw_out(ctx: rs.ContextWrapper):
nonlocal last_output
last_output = ctx[rawio.prop_out]
logger.info(f"Output: {ctx[rawio.prop_out]}")
# Unfortunately needed until Context adopts Properties as clones.
interloc.prop_all.children.clear()
ctx = rs.Context(
"rawio",
"ontology",
"verbaliser",
"idle",
"interloc",
"nlp",
"hibye",
"visionio",
"visionio_test",
"-d", "ontology", "neo4j_pw", "test"
)
def register_dummy_known_person_to_db():
onto: Ontology = mem.get_ontology()
sess: Session = mem.get_session()
person_node = Node(metatype=onto.get_type("Person"))
person_node.set_properties({'name': 'visionio_test_person'})
person_node = sess.create(person_node)
return person_node
def delete_dummy_people():
onto: Ontology = mem.get_ontology()
sess: Session = mem.get_session()
person_node = Node(metatype=onto.get_type("Person"))
person_node.set_properties({'name': 'visionio_test_person'})
# TODO: Delete method is not working!
sess.delete(person_node)
@rs.receptor(ctx_wrap=ctx, write=visionio.prop_subscribe_faces)
def known_person_approaches(ctx: rs.ContextWrapper):
person = register_dummy_known_person_to_db()
faces = Faces()
faces.confidence = [0.85]
faces.ids = [person.get_id()]
facial_features = FacialFeatures()
facial_features.ff = np.zeros(128)
faces.face_encodings = [facial_features]
ctx[visionio.prop_subscribe_faces] = faces
mem.initialized.clear()
ctx.emit(rs.sig_startup)
ctx.run_once()
assert mem.initialized.wait()
# Vision io is started
assert visionio.reset.wait()
known_person_approaches()
# Wait until greeted
counter = 0
while not raw_out.wait(.1) and counter < 100:
ctx.run_once()
counter += 1
greeting_phrases = [phrase.replace('{name}', 'visionio_test_person') for phrase in verbaliser.get_phrase_list("greeting-with-name")]
assert last_output in greeting_phrases
assert visionio.recognize_faces.wait(0)
ctx.shutdown()
delete_dummy_people()
# Unfortunately needed until Context adopts Properties as clones.
interloc.prop_all.children.clear()
if __name__ == "__main__":
set_default_loglevel("DEBUG")
test_known_person()
exit()
|
1670253
|
def fibonacci(n,memo):
if n==0 or n==1:
return n
if memo[n]!=0:
return memo[n]
else:
memo[n]=fibonacci(n-1,memo)+fibonacci(n-2,memo)
return memo[n]
if __name__=="__main__":
n = int(input("Enter a whole number\n"));
memo = [0 for i in range(n+1)]
val=fibonacci(n,memo)
print(val)
|
1670265
|
import unittest
from approvaltests.approvals import verify
from scripts.deploy_release import DeployRelease
from scripts.project_details import ProjectDetails
from scripts.release_details import ReleaseDetails
from scripts.starter_project_release import DeployStarterProjectRelease
from scripts.version import Version
from tests.helpers import set_home_directory
class TestDeployRelease(unittest.TestCase):
def test_get_github_release_url(self) -> None:
deploy_release = self.get_deploy_release()
verify(deploy_release.get_github_release_url())
def test_get_tweet_text(self) -> None:
deploy_release = self.get_deploy_release()
verify(deploy_release.get_tweet_text())
def test_get_url_for_starter_project_single_header_for_version(self) -> None:
deploy_release = self.get_deploy_release()
verify(DeployStarterProjectRelease.get_url_for_starter_project_single_header_for_version(
deploy_release.details.project_details,
deploy_release.details.old_version.get_version_text_without_v()))
def get_deploy_release(self) -> DeployRelease:
set_home_directory()
old_version = Version(0, 0, 1)
new_version = Version(0, 1, 0)
deploy = False
release_details = ReleaseDetails(old_version, new_version, deploy, ProjectDetails())
return DeployRelease(release_details)
|
1670299
|
import jax
import numpy as np
import sys
sys.path.insert(0, "../")
import theanoxla
import theanoxla.tensor as T
image = T.Placeholder((512 ** 2,), "float32")
output = image.reshape((1, 1, 512, 512))
f = theanoxla.function(image, outputs=[output])
for i in range(10000):
print(i)
f(np.random.randn(512 ** 2))
|
1670313
|
import base64
import datetime
import hashlib
import delorean
from bloop import BaseModel, Binary, Column, Engine, Integer, String
from bloop.ext.delorean import Timestamp
DEFAULT_PASTE_LIFETIME_DAYS = 31
def new_expiry(days=DEFAULT_PASTE_LIFETIME_DAYS):
"""Return an expiration `days` in the future"""
now = delorean.Delorean()
return now + datetime.timedelta(days=days)
class SortByVersion:
"""Mixin for a string-based hash key and a version number for range_key"""
id = Column(String, hash_key=True)
version = Column(Integer, range_key=True, dynamo_name="v")
class Paste(SortByVersion, BaseModel):
class Meta:
ttl = {"column": "not_after"}
not_after = Column(Timestamp, default=new_expiry)
bucket = Column(String, dynamo_name="b")
key = Column(String, dynamo_name="k")
class UserImage(SortByVersion, BaseModel):
jpg = Column(Binary)
engine = Engine()
engine.bind(BaseModel)
def s3_upload(content: str) -> (str, str):
# TODO persist in s3
return "bucket-id", "key-id"
def b64sha256(content: str) -> str:
hash = hashlib.sha256(content.encode())
return base64.b64encode(hash.digest()).decode()
def new_paste(content: str) -> str:
id = b64sha256(content)
bucket, key = s3_upload(content)
paste = Paste(bucket=bucket, key=key, id=id, version=0)
engine.save(paste)
return id
def get_paste(id: str, version=None) -> Paste:
if version:
paste = Paste(id=id, version=version)
engine.load(paste)
return paste
else:
# Reverse ordering to get last value of version
query = engine.query(Paste, key=Paste.id == id, forward=False)
return query.first()
|
1670322
|
import socket
host = "Localhost"
port = 5656
Servidor = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
Servidor.bind((host, port))
Servidor.listen(1)
print ("Servidor a la espera de conexion")
active, addr=Servidor.accept()
while True:
recibido = active.recv(1024)
print ("Cliente:", recibido.decode(encoding="ascii", errors="ignore"))
enviar = input ("Server: " )
active.send(enviar.encode(encoding="ascii", errors="ignore"))
active.close()
|
1670333
|
import inspect
import linebot
class AioLineBotApiBuilder:
@classmethod
def build_class(cls, save_as, version):
# get class base source
from . import api_base
base_source = inspect.getsource(api_base)
# set line-bot-sdk version
base_source = base_source.replace(
"'LINE_BOT_API_VERSION'", f"'{version}'")
# make public methods async and add to base
for m in inspect.getmembers(linebot.api.LineBotApi):
if not m[0].startswith("_") and callable(m[1]):
method_source = inspect.getsource(m[1])
method_source = method_source.replace(
f"def {m[0]}(", f"async def {m[0]}_async(")
# call private method asynchronously
method_source = method_source.replace(
"self._get", "await self._get_async")
method_source = method_source.replace(
"self._post", "await self._post_async")
method_source = method_source.replace(
"self._delete", "await self._delete_async")
base_source += "\n" + method_source
# make get/post/delete private methods async
for m in ["get", "post", "delete"]:
method_source = inspect.getsource(getattr(linebot.api.LineBotApi, f"_{m}"))
method_source = method_source.replace(
f"def _{m}", f"async def _{m}_async")
method_source = method_source.replace(
f"self.http_client.{m}", f"await self.aiohttp_client.{m}")
method_source = method_source.replace(
"self.__check_error(response)", "response.check_error()")
base_source += "\n" + method_source
# save api module file
with open(save_as, "w") as f:
f.write(base_source)
|
1670345
|
from flask.ext.testing import TestCase
import os
import subprocess
import sys
import unittest
dir = os.path.join(os.path.dirname(__file__), "..")
sys.path.append(dir)
from app import app, db
from app.models import City, Job
from src.full_hn_reindex import process_page
from src.index_cities import index_cities
class BaseTestCase(TestCase):
""" Base test case """
def create_app(self):
app.config.from_object('config.TestConfiguration')
return app
def setUp(self):
db.create_all()
index_cities()
def tearDown(self):
db.session.remove()
db.drop_all()
subprocess.call('rm ../test.db', shell=True)
class IndexCities(BaseTestCase):
def test_cities_are_indexed(self):
assert City.query.filter(City.name == u'REMOTE').count() == 1
assert City.query.filter(City.name == u'San Francisco').count() == 1
class IndexJobs(BaseTestCase):
def test_full_integration(self):
# since processing the page takes ~8 sec all tests are crammed in here
# process a random HN page without blowing up
process_page(7829033)
process_page('local test page', update=False, localPage='resources/pages/072015.txt')
assert Job.query.filter(Job.month == 07, Job.year == 2015).count() == 1158
# manually refine the db
subprocess.call('sqlite3 ../test.db < src/refine_db.sql', shell=True)
# check cities
assert Job.query.filter(Job.month == 07, Job.year == 2015).count() == 1067
assert Job.query.join(City).filter(City.name == u'REMOTE', Job.month == 07, Job.year == 2015).count() == 101
assert Job.query.join(City).filter(City.name == u'San Francisco', Job.month == 07, Job.year == 2015).count() == 194
assert Job.query.join(City).filter(City.name == u'London', Job.month == 07, Job.year == 2015).count() == 64
assert Job.query.join(City).filter(City.name == u'Zurich', Job.month == 07, Job.year == 2015).count() == 5
# check countries
assert Job.query.join(City).filter(City.country == u'United States', Job.month == 07, Job.year == 2015).count() == 698
assert Job.query.join(City).filter(City.country == u'United Kingdom', Job.month == 07, Job.year == 2015).count() == 78
assert Job.query.join(City).filter(City.country == u'Singapore', Job.month == 07, Job.year == 2015).count() == 7
# update page; all the results should stay the same
process_page('local test page', update=True, localPage='resources/pages/072015.txt')
subprocess.call('sqlite3 ../test.db < src/refine_db.sql', shell=True)
# check cities
assert Job.query.filter(Job.month == 07, Job.year == 2015).count() == 1067
assert Job.query.join(City).filter(City.name == u'REMOTE', Job.month == 07, Job.year == 2015).count() == 101
assert Job.query.join(City).filter(City.name == u'San Francisco', Job.month == 07, Job.year == 2015).count() == 194
assert Job.query.join(City).filter(City.name == u'London', Job.month == 07, Job.year == 2015).count() == 64
assert Job.query.join(City).filter(City.name == u'Zurich', Job.month == 07, Job.year == 2015).count() == 5
# check countries
assert Job.query.join(City).filter(City.country == u'United States', Job.month == 07, Job.year == 2015).count() == 698
assert Job.query.join(City).filter(City.country == u'United Kingdom', Job.month == 07, Job.year == 2015).count() == 78
assert Job.query.join(City).filter(City.country == u'Singapore', Job.month == 07, Job.year == 2015).count() == 7
if __name__ == '__main__':
unittest.main()
|
1670346
|
import datetime
from countershape.html import *
import countershape as cs
import testpages
class TestUL(testpages.DummyState):
def test_one(self):
u = UL(["one", "two", "three"])
s = str(u)
assert "three" in s
def test_with_objects(self):
u = UL([LI("one"), LI("two"), "three"])
s = str(u)
assert "two" in s
assert "three" in s
def test_withclass(self):
u = UL(["one", "two", "three"], _class="testclass")
s = str(u)
assert "testclass" in s
class TestValue:
def test_call(self):
s = Value("foo")
assert not s.value
s = s(foo="one", bar="two")
assert s.value == "one"
assert str(s) == "one"
def test_noarg(self):
a = Value("foo")
b = a()
assert str(a) == str(b)
class TestGroup:
def test_render(self):
g = Group("foo", "bar")
str(g)
class TestHalfTag:
def test_makeAttrs(self):
ht = HalfTag(
"foo",
one="two",
two="!@#@#$#^%&&&**(&"
)
assert ht._makeAttrs()
def test_makeAttrs_special(self):
ht = HalfTag(
"foo",
_return="two",
_class="!@#@#$#^%&&&**(&"
)
assert ht._makeAttrs()
assert ht["return"]
assert not ht.attrs.has_key("_return")
def test_str(self):
ht = HalfTag("foo", one="foo", two="bar")
assert str(ht)
def test_has_key(self):
ht = HalfTag("foo", one="foo", two="bar")
assert not ht.has_key("wibble")
assert ht.has_key("one")
def test_setattrs(self):
ht = HalfTag("foo", id="bar")
assert ht.id == "bar"
def test_addClass(self):
ht = HalfTag("foo", _class="foo")
assert ht["class"] == "foo"
ht.addCSSClass("bar")
assert ht["class"] == "foo bar"
ht = HalfTag("foo")
assert not ht.has_key("class")
ht.addCSSClass("bar")
assert ht["class"] == "bar"
class TestFullTag(testpages.DummyState):
def test_str(self):
ft = FullTag("foo", "contents", one="foo", two="bar")
assert str(ft)
def test_nonzero(self):
ft = FullTag("foo")
assert bool(ft)
def test_tree(self):
s = DIV(
DIV("", _class="one-one"),
DIV(
DIV("", _class="one-two-one"),
DIV("", _class="one-two-two"),
_class="one-two"
),
DIV("", _class="one-three"),
)
s = str(s)
assert "one-two-two" in s
def test_unicode(self):
u = u"\u1234foober"
s = DIV(u"\u1234foober")
assert u in unicode(s)
|
1670350
|
import numpy as np
import time
import argparse
import roboverse
import roboverse.bullet as bullet
KEY_TO_ACTION_MAPPING = {
bullet.p.B3G_LEFT_ARROW: np.array([0.1, 0, 0, 0, 0, 0, 0]),
bullet.p.B3G_RIGHT_ARROW: np.array([-0.1, 0, 0, 0, 0, 0, 0]),
bullet.p.B3G_UP_ARROW: np.array([0, -0.1, 0, 0, 0, 0, 0]),
bullet.p.B3G_DOWN_ARROW: np.array([0, 0.1, 0, 0, 0, 0, 0]),
ord('j'): np.array([0, 0, 0.2, 0, 0, 0, 0]),
ord('k'): np.array([0, 0, -0.2, 0, 0, 0, 0]),
ord('h'): np.array([0, 0, 0, 0, 0, 0, -0.7]),
ord('l'): np.array([0, 0, 0, 0, 0, 0, 0.7])
}
ENV_COMMANDS = {
ord('r'): lambda env: env.reset()
}
def keyboard_control(args):
env = roboverse.make(args.env_name, gui=True)
while True:
take_action = False
action = np.array([0, 0, 0, 0, 0, 0, 0], dtype='float32')
keys = bullet.p.getKeyboardEvents()
for qKey in keys:
if qKey in KEY_TO_ACTION_MAPPING.keys():
action += KEY_TO_ACTION_MAPPING[qKey]
take_action = True
elif qKey in ENV_COMMANDS.keys():
ENV_COMMANDS[qKey](env)
take_action = False
if take_action:
obs, rew, done, info = env.step(action)
print(rew)
time.sleep(0.1)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("-e", "--env-name", type=str,
default='Widow250MultiTaskGrasp-v0')
args = parser.parse_args()
keyboard_control(args)
|
1670368
|
from django.db.models import Q
from drf_stripe.models import Product, Price, ProductFeature
from drf_stripe.stripe_webhooks.handler import handle_webhook_event
from tests.base import BaseTest
class TestWebhookProductPriceEvents(BaseTest):
def create_product_price(self):
event = self._load_test_data("2020-08-27/webhook_product_created.json")
handle_webhook_event(event)
event = self._load_test_data("2020-08-27/webhook_price_created.json")
handle_webhook_event(event)
def test_event_handler_product_created(self):
"""
Mock production and price creation events
"""
self.create_product_price()
# check product and price created
product = Product.objects.get(description='Test Product ABC')
price = Price.objects.get(product=product)
self.assertEqual(price.price, 100)
self.assertEqual(price.product, product)
# check Product-to-Feature relations created
ProductFeature.objects.get(product=product, feature__feature_id='A')
ProductFeature.objects.get(product=product, feature__feature_id='B')
ProductFeature.objects.get(product=product, feature__feature_id='C')
def test_event_handler_price_update(self):
"""
Mock price update events
"""
self.create_product_price()
# modify price
event = self._load_test_data("2020-08-27/webhook_price_updated.json")
handle_webhook_event(event)
# check price modifications
price = Price.objects.get(price_id="price_1KHkCLL14ex1CGCipzcBdnOp")
self.assertEqual(price.price, 50)
self.assertEqual(price.freq, "week_1")
self.assertEqual(price.nickname, "Weekly subscription")
self.assertEqual(price.product.product_id, "prod_KxfXRXOd7dnLbz")
def test_event_handler_product_update(self):
"""Mock product update event"""
self.create_product_price()
# modify product
product_mod = self._load_test_data("2020-08-27/webhook_product_updated.json")
handle_webhook_event(product_mod)
# check product modifications
product = Product.objects.get(product_id='prod_KxfXRXOd7dnLbz')
self.assertEqual(product.name, "Test Product ABD")
self.assertEqual(product.description, "Test Product ABD")
# check product is now associated with feature D
ProductFeature.objects.get(product=product, feature__feature_id='D')
ProductFeature.objects.get(product=product, feature__feature_id='A')
ProductFeature.objects.get(product=product, feature__feature_id='B')
# check product no longer associated with feature C
prod_feature_qs = ProductFeature.objects.filter(Q(product=product) & Q(feature__feature_id='C'))
self.assertEqual(len(prod_feature_qs), 0)
def test_event_handler_price_archived(self):
"""Mock price archived event"""
self.create_product_price()
event = self._load_test_data("2020-08-27/webhook_price_updated_archived.json")
handle_webhook_event(event)
price = Price.objects.get(price_id='price_1KHkCLL14ex1CGCieIBu8V2e')
self.assertFalse(price.active)
def test_event_handler_product_archived(self):
"""Mock product archived event"""
self.create_product_price()
event = self._load_test_data("2020-08-27/webhook_product_updated_archived.json")
handle_webhook_event(event)
product = Product.objects.get(product_id='prod_KxfXRXOd7dnLbz')
self.assertFalse(product.active)
|
1670382
|
from typing import Sequence
from libcst import Arg, Call, Integer, parse_expression
from libcst.codemod.visitors import AddImportsVisitor
from django_codemod.constants import DJANGO_2_2, DJANGO_3_1
from django_codemod.visitors.base import BaseFuncRenameTransformer
class FixedOffsetTransformer(BaseFuncRenameTransformer):
"""Replace `django.utils.timezone.FixedOffset` by `datetime.timezone`."""
deprecated_in = DJANGO_2_2
removed_in = DJANGO_3_1
rename_from = "django.utils.timezone.FixedOffset"
rename_to = "datetime.timezone"
def update_call_args(self, node: Call) -> Sequence[Arg]:
"""Update first argument to convert integer for minutes to timedelta."""
AddImportsVisitor.add_needed_import(
context=self.context,
module="datetime",
obj="timedelta",
)
offset_arg, *other_args = node.args
integer_value = offset_arg.value
if not isinstance(integer_value, Integer):
raise AssertionError(f"Unexpected type for: {integer_value}")
timedelta_call = parse_expression(f"timedelta(minutes={integer_value.value})")
new_offset_arg = offset_arg.with_changes(value=timedelta_call)
return (new_offset_arg, *other_args)
|
1670430
|
import torch
import matplotlib.pyplot as plt
from main import *
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
def plotter(g_losses, d_losses):
"""
Args:
g_losses (list): List of iteration-wise generator losses.
d_losses (list): List of iteration-wise discriminator losses.
"""
plt.plot(g_losses,label='G')
plt.plot(d_losses,label='D')
plt.legend()
plt.show()
def evaluate(idx, attr):
"""
Args:
idx (int): Index of the image from dataset which you want to translate.
attr (list): Pass a list with length=c_dims, to what you want to translate your image to.
Example: [0,0,1,0,1]
"""
D_.eval()
G_.eval() # Setting the models to eval mode.
attr=torch.tensor(attr)
img, lbl=dataset[idx]
plt.imshow(im1.squeeze().numpy().transpose((1,2,0))) # Plotting original image
sample=G_(im1.unsqueeze(0),attr.to(device))
plt.imshow(sample.squeeze().detach().cpu().numpy().transpose((1,2,0)))
plt.show()
print('Inital labels: {lbl} , Translated labels: {tst}'.format(lbl=lbl,tst=attr))
|
1670457
|
from model.model import captcha_model, model_conv, model_resnet
from data.datamodule import captcha_dm
from utils.arg_parsers import test_arg_parser
import pytorch_lightning as pl
def test(args):
dm = captcha_dm()
model = captcha_model.load_from_checkpoint(args.ckpt, model=model_resnet())
tb_logger = pl.loggers.TensorBoardLogger(
args.log_dir, name=args.test_name, version=2, default_hp_metric=False)
trainer = pl.Trainer(deterministic=True,
gpus=-1,
auto_select_gpus=True,
precision=32,
logger=tb_logger,
fast_dev_run=False,
max_epochs=5,
log_every_n_steps=50,
stochastic_weight_avg=True
)
trainer.test(model, dm)
if __name__ == "__main__":
args = test_arg_parser()
test(args)
|
1670499
|
import unittest
from unittest import mock
from groupy.api import base
class MangerTests(unittest.TestCase):
def setUp(self):
self.manager = base.Manager(mock.Mock(), path='foo')
def test_url_contains_path(self):
self.assertEqual(self.manager.url, self.manager.base_url + 'foo')
class ResourceTests(unittest.TestCase):
def setUp(self):
self.data = {'foo': 'bar'}
self.resource = base.Resource(**self.data)
def test_data(self):
self.assertEqual(self.data, self.resource.data)
def test_data_access_via_resource_attributes(self):
self.assertEqual(self.resource.foo, 'bar')
def test_data_access_raises_attribute_error(self):
with self.assertRaises(AttributeError):
self.resource.baz
|
1670507
|
import math
import torch
import copy
import torch.nn as nn
import torch.nn.functional as F
from transformers import BertTokenizer, BertModel
def linear_block(input_dim, hidden_dim):
linear = nn.Sequential(
nn.Linear(input_dim, hidden_dim),
nn.LeakyReLU(0.5))
return linear
class MLP(nn.Module):
def __init__(self, input_dim, hidden_dim, num_layers):
super(MLP, self).__init__()
self.num_layers = num_layers
self.hidden_size = hidden_dim
layers = []
for i in range(num_layers-1):
layers.extend(
linear_block(hidden_dim if i> 0 else input_dim, hidden_dim)
)
layers.extend([nn.Linear(hidden_dim, input_dim)])
self.model = nn.Sequential(*layers)
## initilize the model
for m in self.modules():
if isinstance(m, nn.Linear):
nn.init.kaiming_uniform_(m.weight, a=math.sqrt(5))
fan_in,_ = nn.init._calculate_fan_in_and_fan_out(m.weight)
bound = 1/math.sqrt(fan_in)
nn.init.uniform_(m.bias, -bound, bound)
def forward(self,x):
out = self.model(x)
return out
class SDSN(nn.Module):
"""docstring for SDSNA"""
# Replace simple dot product with SDSNA
# Scoring Lexical Entailment with a supervised directional similarity network
def __init__(self, arg):
super(SDSNA, self).__init__()
self.emb_dim = 300
self.hidden_dim = hidden_dim
self.num_layers = num_layers
self.map_linear_left = self.mlp(self.emb_dim, self.hidden_dim, self.num_layers)
self.map_linear_right = self.mlp(self.emb_dim, self.hidden_dim, self.num_layers)
self.final_linear = nn.Linear(2 * self.hidden_dim + self.emb_dim, 1)
def init_embs(self, w2v_weight):
self.embs = nn.Embedding.from_pretrained(w2v_weight, freeze=True)
def forward(self, inputs):
batch_size, _ = inputs.size()
left_w2v = self.embs(inputs[:,0])
right_w2v = self.embs(inputs[:,1])
left_trans = self.map_linear_left(left_w2v)
right_trans = self.map_linear_right(right_w2v)
def mlp(self, input_dim, hidden_dim, num_layers):
layers = []
for i in range(num_layers-1):
layers.extend(
linear_block(hidden_dim if i> 0 else input_dim, hidden_dim)
)
layers.extend([nn.Linear(hidden_dim, input_dim)])
return nn.Sequential(*layers)
class Word2Score(nn.Module):
"""docstring for Word2Score"""
def __init__(self, hidden_dim, num_layers):
super(Word2Score, self).__init__()
self.emb_dim = 300
self.hidden_dim = hidden_dim
self.num_layers = num_layers
self.map_linear_left = self.mlp(self.emb_dim, self.hidden_dim, self.num_layers)
self.map_linear_right = self.mlp(self.emb_dim, self.hidden_dim, self.num_layers)
def init_emb(self, w2v_weight):
self.embs = nn.Embedding.from_pretrained(w2v_weight, freeze=True)
def mlp(self, input_dim, hidden_dim, num_layers):
layers = []
for i in range(num_layers-1):
layers.extend(
linear_block(hidden_dim if i> 0 else input_dim, hidden_dim)
)
layers.extend([nn.Linear(hidden_dim, input_dim)])
return nn.Sequential(*layers)
def forward(self, inputs):
# inputs: [batch_size, 2]
batch_size, _ = inputs.size()
left_w2v = self.embs(inputs[:,0])
right_w2v = self.embs(inputs[:,1])
left_trans = self.map_linear_left(left_w2v)
right_trans = self.map_linear_right(right_w2v)
output = torch.einsum('ij,ij->i', [left_trans, right_trans])
left_norm = torch.norm(left_trans, dim=1).sum()
right_norm = torch.norm(right_trans, dim=1).sum()
return output, (left_norm+right_norm)
def inference(self, left_w2v, right_w2v):
left_trans = self.map_linear_left(left_w2v)
right_trans = self.map_linear_right(right_w2v)
output = torch.einsum('ij,ij->i', [left_trans, right_trans])
return output
class MEAN_Max(nn.Module):
"""docstring for MEAN"""
def __init__(self, input_dim, hidden_dim):
super(MEAN_Max, self).__init__()
self.input_dim = input_dim
self.hidden_dim = hidden_dim
self.dropout_layer = nn.Dropout(0)
self.output_layer = nn.Sequential(
nn.Linear(input_dim, hidden_dim),
nn.ReLU(),
nn.Linear(hidden_dim, input_dim)
)
def forward(self, embed_input_left, embed_input_right):
# input: [batch, context, seq, emb]
batch_size, num_context, seqlen, emb_dim = embed_input_left.size()
# [batch, context, seq, emb]
embed_input_left = self.dropout_layer(embed_input_left)
embed_input_right = self.dropout_layer(embed_input_right)
oe = torch.cat((embed_input_left, embed_input_right), 2)
oe = oe.mean(2)
oe = self.output_layer(oe)
oe = oe.max(1)[0]
return oe
class MEAN(nn.Module):
"""docstring for MEAN"""
def __init__(self, input_dim, hidden_dim):
super(MEAN, self).__init__()
self.input_dim = input_dim
self.hidden_dim = hidden_dim
self.dropout_layer = nn.Dropout(0)
self.output_layer = nn.Sequential(
nn.Linear(input_dim, hidden_dim),
nn.ReLU(),
nn.Linear(hidden_dim, input_dim)
)
def forward(self, embed_input_left, embed_input_right):
# input: [batch, context, seq, emb]
batch_size, num_context, seqlen, emb_dim = embed_input_left.size()
# [batch, context, seq, emb]
embed_input_left = self.dropout_layer(embed_input_left)
embed_input_right = self.dropout_layer(embed_input_right)
oe = torch.cat((embed_input_left, embed_input_right), 2)
oe = oe.mean(2)
oe = self.output_layer(oe)
oe = oe.mean(1)
return oe
class LSTM(nn.Module):
"""docstring for LSTM"""
def __init__(self, input_dim, hidden_dim):
super(LSTM, self).__init__()
self.input_dim = input_dim
self.hidden_dim = hidden_dim
self.dropout_layer = nn.Dropout(p=0)
self.left_context_encoder = nn.LSTM(input_dim, hidden_dim, 1, batch_first=True)
self.right_context_encoder = nn.LSTM(input_dim, hidden_dim, 1, batch_first=True)
self.output_layer = nn.Sequential(
nn.Linear(hidden_dim*2, hidden_dim*2),
nn.ReLU(),
nn.Linear(hidden_dim*2, input_dim)
)
def forward(self, embed_input_left, embed_input_right):
# input: [batch, context, seq, emb]
batch_size, num_context, seqlen, emb_dim = embed_input_left.size()
# [batch, context, seq, dim]
embed_input_left = embed_input_left.view(-1, seqlen, self.input_dim)
embed_input_left = self.dropout_layer(embed_input_left)
embed_input_right = embed_input_right.view(-1, seqlen, self.input_dim)
embed_input_right = self.dropout_layer(embed_input_right)
# hidden = (torch.zeros(1, batch_size*num_context, self.hidden_dim),
# torch.zeros(1, batch_size*num_context, self.hidden_dim))
output_left, (final_hidden_state_left, final_cell_state_left) = self.left_context_encoder(embed_input_left) #, hidden)
output_right,(final_hidden_state_right, final_cell_state_left) = self.right_context_encoder(embed_input_right) #, hidden)
encode_context_left = final_hidden_state_left.view(-1, num_context, self.hidden_dim)
encode_context_right = final_hidden_state_right.view(-1, num_context, self.hidden_dim)
# concat + mean_pooling + fully_connect
oe = torch.cat((encode_context_left, encode_context_right), 2)
oe = self.output_layer(oe)
oe = oe.mean(1)
return oe
class SelfAttention(nn.Module):
"""docstring for SelfAttention"""
def __init__(self, input_dim, hidden_dim):
super(SelfAttention, self).__init__()
self.input_dim = input_dim
self.hidden_dim = hidden_dim
self.dropout_layer = nn.Dropout(0)
self.att_w = nn.Linear(input_dim, hidden_dim)
self.att_v = nn.Parameter(torch.rand(hidden_dim))
self.output_layer = nn.Sequential(
nn.Linear(hidden_dim, hidden_dim),
nn.ReLU(),
nn.Linear(hidden_dim, input_dim)
)
def forward(self, embed_input_left, embed_input_right):
batch_size, num_context, seqlen, emb_dim = embed_input_left.size()
# [batch, context, seq, dim]
embed_input_left = self.dropout_layer(embed_input_left)
embed_input_right = self.dropout_layer(embed_input_right)
# [batch_size, context_num, seq_length, dim]
left_right_context = torch.cat((embed_input_left, embed_input_right),2)
#print(left_right_context.size())
att_weight = torch.matmul(self.att_w(left_right_context), self.att_v)
att_weight = nn.functional.softmax(att_weight, dim=2).view(batch_size, num_context, 2*seqlen, 1)
#print(att_weight.size())
oe = (left_right_context * att_weight).sum(2)
oe = self.output_layer(oe)
oe = oe.mean(1)
return oe ,att_weight
class HierAttention(nn.Module):
def __init__(self, input_dim, hidden_dim):
super(HierAttention, self).__init__()
self.input_dim = input_dim
self.hidden_dim = hidden_dim
self.dropout_layer = nn.Dropout(0)
self.att_w = nn.Linear(input_dim, hidden_dim)
self.att_v = nn.Parameter(torch.rand(hidden_dim))
self.att_h = nn.Linear(input_dim, hidden_dim)
self.att_hv = nn.Parameter(torch.rand(hidden_dim))
self.output_layer = nn.Sequential(
nn.Linear(input_dim, hidden_dim),
nn.ReLU(),
nn.Linear(hidden_dim, input_dim)
)
def forward(self, embed_input_left, embed_input_right):
batch_size, num_context, seqlen, emb_dim = embed_input_left.size()
# [batch, context, seq, dim]
embed_input_left = self.dropout_layer(embed_input_left)
embed_input_right = self.dropout_layer(embed_input_right)
# [batch_size, context_num, seq_length, dim]
left_right_context = torch.cat((embed_input_left, embed_input_right),2)
#print(left_right_context.size())
att_weight = torch.matmul(self.att_w(left_right_context), self.att_v)
att_weight = nn.functional.softmax(att_weight, dim=2).view(batch_size, num_context, 2*seqlen, 1)
oe = (left_right_context * att_weight).sum(2)
#print(oe.size())
hier_att_weight = torch.matmul(self.att_h(oe), self.att_hv)
#print(hier_att_weight.size())
hier_att_weight = nn.functional.softmax(hier_att_weight, dim=1).view(batch_size, num_context, 1)
#print(hier_att_weight.size())
oe = (oe * hier_att_weight).sum(1)
oe = self.output_layer(oe)
return oe, att_weight, hier_att_weight
class HierAttentionEnsemble(nn.Module):
def __init__(self, input_dim, hidden_dim):
super(HierAttention, self).__init__()
self.input_dim = input_dim
self.hidden_dim = hidden_dim
self.dropout_layer = nn.Dropout(0)
self.att_w = nn.Linear(input_dim, hidden_dim)
self.att_v = nn.Parameter(torch.rand(hidden_dim))
self.att_h = nn.Linear(input_dim, hidden_dim)
self.att_hv = nn.Parameter(torch.rand(hidden_dim))
self.output_layer = nn.Sequential(
nn.Linear(input_dim, hidden_dim),
nn.ReLU(),
nn.Linear(hidden_dim, input_dim)
)
def forward(self, embed_input_left, embed_input_right):
batch_size, num_context, seqlen, emb_dim = embed_input_left.size()
# [batch, context, seq, dim]
embed_input_left = self.dropout_layer(embed_input_left)
embed_input_right = self.dropout_layer(embed_input_right)
# [batch_size, context_num, seq_length, dim]
left_right_context = torch.cat((embed_input_left, embed_input_right),2)
#print(left_right_context.size())
att_weight = torch.matmul(self.att_w(left_right_context), self.att_v)
att_weight = nn.functional.softmax(att_weight, dim=2).view(batch_size, num_context, 2*seqlen, 1)
oe = (left_right_context * att_weight).sum(2)
#print(oe.size())
hier_att_weight = torch.matmul(self.att_h(oe), self.att_hv)
#print(hier_att_weight.size())
hier_att_weight = nn.functional.softmax(hier_att_weight, dim=1).view(batch_size, num_context, 1)
#print(hier_att_weight.size())
oe = (oe * hier_att_weight).sum(1)
oe = self.output_layer(oe)
return oe, att_weight, hier_att_weight
class ATTENTION(nn.Module):
"""docstring for ATTENTION"""
def __init__(self, input_dim, hidden_dim):
super(ATTENTION, self).__init__()
self.input_dim = input_dim
self.hidden_dim = hidden_dim
self.dropout_layer = nn.Dropout(0)
self.left_context_encoder = nn.LSTM(input_dim, hidden_dim, 1, batch_first=True)
self.right_context_encoder = nn.LSTM(input_dim, hidden_dim, 1, batch_first=True)
self.att_w = nn.Linear(hidden_dim*2, hidden_dim)
self.att_v = nn.Parameter(torch.rand(hidden_dim))
self.output_layer = nn.Sequential(
nn.Linear(hidden_dim*2, hidden_dim*2),
nn.ReLU(),
nn.Linear(hidden_dim*2, input_dim)
)
def forward(self, embed_input_left, embed_input_right):
# input: [batch, context, seq, emb]
batch_size, num_context, seqlen, emb_dim = embed_input_left.size()
# [batch, context, seq, dim] -> [batch*context, seq, dim]
embed_input_left = embed_input_left.view(-1, seqlen, self.input_dim)
embed_input_left = self.dropout_layer(embed_input_left)
embed_input_right = embed_input_right.view(-1, seqlen, self.input_dim)
embed_input_right = self.dropout_layer(embed_input_right)
# hidden = (torch.zeros(1, batch_size*num_context, self.hidden_dim),
# torch.zeros(1, batch_size*num_context, self.hidden_dim))
output_left, (final_hidden_state_left, final_cell_state_left) = self.left_context_encoder(embed_input_left) #, hidden)
output_right,(final_hidden_state_right, final_cell_state_left) = self.right_context_encoder(embed_input_right) #, hidden)
encode_context_left = final_hidden_state_left.view(-1, num_context, self.hidden_dim)
encode_context_right = final_hidden_state_right.view(-1, num_context, self.hidden_dim)
# concat + mean_pooling + fully_connect
oe = torch.cat((encode_context_left, encode_context_right), 2)
print(oe.size())
att_weight = torch.matmul(self.att_w(oe), self.att_v)
print(att_weight.size())
att_weight = nn.functional.softmax(att_weight, dim=1).view(batch_size, num_context, 1)
print(att_weight.size())
oe = (oe * att_weight).sum(1)
print("--------")
oe = self.output_layer(oe)
return oe
class BertEncoder(nn.Module):
def __init__(self, bert_dir, model_type="base"):
super(BertEncoder, self).__init__()
self.model_type = model_type
self.model = BertModel.from_pretrained(bert_dir)
self.set_finetune("full")
def set_finetune(self, finetune_type):
if finetune_type == "none":
for param in self.model.parameters():
param.requires_grad = False
elif finetune_type == "full":
for param in self.model.parameters():
param.requires_grad = True
elif finetune_type == "last":
for param in self.model.parameters():
param.require_grad = False
for param in self.encoder.layer[-1].parameters():
param.require_grad = True
def forward(self, input_ids, mask=None):
# [batch_size, context_num, seq_length]
batch_size, context_num, seq_length = input_ids.size()
flat_input_ids = input_ids.reshape(-1, input_ids.size(-1))
flat_mask = mask.reshape(-1, mask.size(-1))
pooled_cls = self.model(input_ids = flat_input_ids, attention_mask=flat_mask)[1]
# [batch_size * context_num, dim]
#print(pooled_cls.size())
reshaped_pooled_cls = pooled_cls.view(batch_size, context_num, -1)
# [batch_size, context_num, dim]
output = reshaped_pooled_cls.mean(1)
# [batch_size, dim]
return output
def get_output_dim(self):
if self.model_type == "large":
return 1024
else:
return 768
class Bert2Score(nn.Module):
def __init__(self, encoder, bert_dir, hidden_dim, drop_prob):
super(Bert2Score, self).__init__()
self.hidden_dim = hidden_dim
if "large" in encoder:
self.encoder = BertEncoder(bert_dir, "large")
else:
self.encoder = BertEncoder(bert_dir)
bert_dim = self.encoder.get_output_dim()
self.mlp1 = nn.Linear(bert_dim, hidden_dim)
self.mlp2 = nn.Linear(bert_dim, hidden_dim)
self.dropout = nn.Dropout(drop_prob)
def forward(self, input_ids, masks):
## input: [batch_size, 2, context, seq]
left_ids = input_ids[:,0,:,:]
right_ids = input_ids[:,1,:,:]
left_masks = masks[:,0,:,:]
right_masks = masks[:,1,:,:]
left_emb = self.encoder(left_ids, left_masks)
right_emb = self.encoder(right_ids, right_masks)
# [batch_size, hidden_dim]
tran_left = self.mlp1(self.dropout(left_emb))
tran_right = self.mlp2(self.dropout(right_emb))
output = torch.einsum('ij,ij->i', [tran_left, tran_right])
return output
class Context2Score(nn.Module):
"""docstring for Context2Score"""
def __init__(self, encoder, input_dim, hidden_dim, device, multiple=False):
super(Context2Score, self).__init__()
self.input_dim = input_dim
self.hidden_dim = hidden_dim
self.device = device
self.attention = False
self.hier = False
#self.name = encoder
if 'lstm' in encoder:
if multiple:
self.encoder1 = nn.DataParallel(LSTM(input_dim, hidden_dim), device_ids=[0,1,2,3])
self.encoder2 = nn.DataParallel(LSTM(input_dim, hidden_dim), device_ids=[0,1,2,3])
else:
self.encoder1 = LSTM(input_dim, hidden_dim).to(device)
self.encoder2 = LSTM(input_dim, hidden_dim).to(device)
elif 'attention' in encoder:
if multiple:
self.encoder1 = ATTENTION(input_dim, hidden_dim)
self.encoder2 = ATTENTION(input_dim, hidden_dim)
else:
self.encoder1 = ATTENTION(input_dim, hidden_dim).to(device)
self.encoder2 = ATTENTION(input_dim, hidden_dim).to(device)
elif 'max' in encoder:
self.encoder1 = MEAN_Max(input_dim, hidden_dim).to(device)
self.encoder2 = MEAN_Max(input_dim, hidden_dim).to(device)
elif 'self' in encoder:
#self.encoder1, self.atten1 = SelfAttention(input_dim, hidden_dim).to(device)
self.encoder1 = SelfAttention(input_dim, hidden_dim).to(device)
self.encoder2 = SelfAttention(input_dim, hidden_dim).to(device)
self.attention = True
elif 'han' in encoder:
self.encoder1 = HierAttention(input_dim, hidden_dim).to(device)
self.encoder2 = HierAttention(input_dim, hidden_dim).to(device)
self.hier = True
else:
if multiple:
self.encoder1 = MEAN(input_dim, hidden_dim)
self.encoder2 = MEAN(input_dim, hidden_dim)
else:
self.encoder1 = MEAN(input_dim, hidden_dim).to(device)
self.encoder2 = MEAN(input_dim, hidden_dim).to(device)
def init_emb(self, w2v_weight):
self.word_embedding = nn.Embedding.from_pretrained(w2v_weight, freeze=True)
def forward(self, input_idx):
# input: [batch, 2, context, 2, seq]
embed_input1_left = self.word_embedding(input_idx[:, 0, :, 0]).to(self.device)
embed_input1_right = self.word_embedding(input_idx[:, 0, :, 1]).to(self.device)
embed_input2_left = self.word_embedding(input_idx[:, 1, :, 0]).to(self.device)
embed_input2_right = self.word_embedding(input_idx[:, 1, :, 1]).to(self.device)
if self.attention:
embed_hypo, atten1 = self.encoder1(embed_input1_left, embed_input1_right)
embed_hype, atten2 = self.encoder2(embed_input2_left, embed_input2_right)
output = torch.einsum('ij,ij->i', [embed_hypo, embed_hype])
return output, atten1, atten2
elif self.hier:
embed_hypo, atten1, hier_atten1 = self.encoder1(embed_input1_left, embed_input1_right)
embed_hype, atten2, hier_atten2 = self.encoder2(embed_input2_left, embed_input2_right)
output = torch.einsum('ij,ij->i', [embed_hypo, embed_hype])
atten_w = (atten1, hier_atten1, atten2, hier_atten2)
return output, atten_w
else:
embed_hypo = self.encoder1(embed_input1_left, embed_input1_right)
embed_hype = self.encoder2(embed_input2_left,embed_input2_right)
output = torch.einsum('ij,ij->i', [embed_hypo, embed_hype])
return output
|
1670510
|
from django.db import models
from django.utils.translation import ugettext_lazy as _
from wagtail.contrib.settings.models import (
BaseSetting,
register_setting
)
# Create your models here.
@register_setting
class RSSFeedsSettings(BaseSetting):
feed_app_label = models.CharField(
_('Feed app label'),
max_length=255,
help_text=_('blog App whose Feed is to be generated'),
null=True,
blank=True
)
feed_model_name = models.CharField(
_('Feed model name'),
max_length=255,
help_text=_('Model to be used for feed generation'),
null=True,
blank=True
)
feed_title = models.CharField(
_('Feed title'), max_length=255, help_text=_('Title of Feed'), null=True, blank=True
)
feed_link = models.URLField(
_('Feed link'), max_length=255, help_text=_('link for Feed'), null=True, blank=True
)
feed_description = models.CharField(
_('Feed description'),
max_length=255,
help_text=_('Description of field'),
null=True,
blank=True
)
feed_author_email = models.EmailField(
_('Feed author email'),
max_length=255,
help_text=_('Email of author'),
null=True,
blank=True
)
feed_author_link = models.URLField(
_('Feed author link'),
max_length=255,
help_text=_('Link of author'),
null=True,
blank=True
)
feed_item_description_field = models.CharField(
_('Feed item description field'),
max_length=255,
help_text=_('Description field for feed item'),
null=True,
blank=True
)
feed_item_content_field = models.CharField(
_('Feed item content field'),
max_length=255,
help_text=_('Content Field for feed item'),
null=True,
blank=True
)
feed_image_in_content = models.BooleanField(
_('Feed image in content'),
help_text=_('Add feed image to content encoded field'),
default=True
)
feed_item_date_field = models.CharField(
_('Feed item date field'),
max_length=255,
help_text=_('(Optional). Date Field for feed item. By default use date'),
blank=True
)
is_feed_item_date_field_datetime = models.BooleanField(
_('Is Feed item date field Datetime Field'),
help_text=_('If the above date field is DateTime field, tick this.'),
default=False
)
class Meta:
verbose_name = _('RSS feed setting')
verbose_name_plural = _('RSS feed settings')
|
1670538
|
import time
import numpy as np
from sklearn.neighbors import NearestCentroid
from sklearn.neighbors import KNeighborsClassifier
from sklearn.naive_bayes import GaussianNB, MultinomialNB
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import LinearSVC
from sklearn import decomposition
def run(x_train, y_train, x_test, y_test, clf):
s = time.time()
clf.fit(x_train, y_train)
e_train = time.time() - s
s = time.time()
score = clf.score(x_test, y_test)
e_test = time.time() - s
print("score = %0.4f (time, train=%8.3f, test=%8.3f)" % (score, e_train, e_test))
def train(x_train, y_train, x_test, y_test):
print(" Nearest centroid : ", end='')
run(x_train, y_train, x_test, y_test, NearestCentroid())
print(" k-NN classifier (k=3) : ", end='')
run(x_train, y_train, x_test, y_test, KNeighborsClassifier(n_neighbors=3))
print(" k-NN classifier (k=7) : ", end='')
run(x_train, y_train, x_test, y_test, KNeighborsClassifier(n_neighbors=7))
print(" Naive Bayes (Gaussian) : ", end='')
run(x_train, y_train, x_test, y_test, GaussianNB())
print(" Decision Tree : ", end='')
run(x_train, y_train, x_test, y_test, DecisionTreeClassifier())
print(" Random Forest (trees= 5) : ", end='')
run(x_train, y_train, x_test, y_test, RandomForestClassifier(n_estimators=5))
print(" Random Forest (trees= 50) : ", end='')
run(x_train, y_train, x_test, y_test, RandomForestClassifier(n_estimators=50))
print(" Random Forest (trees=500) : ", end='')
run(x_train, y_train, x_test, y_test, RandomForestClassifier(n_estimators=500))
print(" Random Forest (trees=1000): ", end='')
run(x_train, y_train, x_test, y_test, RandomForestClassifier(n_estimators=1000))
print(" LinearSVM (C=0.01) : ", end='')
run(x_train, y_train, x_test, y_test, LinearSVC(C=0.01))
print(" LinearSVM (C=0.1) : ", end='')
run(x_train, y_train, x_test, y_test, LinearSVC(C=0.1))
print(" LinearSVM (C=1.0) : ", end='')
run(x_train, y_train, x_test, y_test, LinearSVC(C=1.0))
print(" LinearSVM (C=10.0) : ", end='')
run(x_train, y_train, x_test, y_test, LinearSVC(C=10.0))
def main():
x_train = np.load("../data/mnist/mnist_train_vectors.npy").astype("float64")
y_train = np.load("../data/mnist/mnist_train_labels.npy")
x_test = np.load("../data/mnist/mnist_test_vectors.npy").astype("float64")
y_test = np.load("../data/mnist/mnist_test_labels.npy")
print("Models trained on raw [0,255] images:")
train(x_train, y_train, x_test, y_test)
print("Models trained on raw [0,1) images:")
train(x_train/256.0, y_train, x_test/256.0, y_test)
m = x_train.mean(axis=0)
s = x_train.std(axis=0) + 1e-8
x_ntrain = (x_train - m) / s
x_ntest = (x_test - m) / s
print("Models trained on normalized images:")
train(x_ntrain, y_train, x_ntest, y_test)
pca = decomposition.PCA(n_components=15)
pca.fit(x_ntrain)
x_ptrain = pca.transform(x_ntrain)
x_ptest = pca.transform(x_ntest)
print("Models trained on first 15 PCA components of normalized images:")
train(x_ptrain, y_train, x_ptest, y_test)
main()
|
1670550
|
from sympy.utilities.iterables import \
flatten, connected_components
from .common import NonSquareMatrixError
def _connected_components(M):
"""Returns the list of connected vertices of the graph when
a square matrix is viewed as a weighted graph.
Examples
========
>>> from sympy import symbols, Matrix
>>> a, b, c, d, e, f, g, h = symbols('a:h')
>>> A = Matrix([
... [a, 0, b, 0],
... [0, e, 0, f],
... [c, 0, d, 0],
... [0, g, 0, h]])
>>> A.connected_components()
[[0, 2], [1, 3]]
Notes
=====
Even if any symbolic elements of the matrix can be indeterminate
to be zero mathematically, this only takes the account of the
structural aspect of the matrix, so they will considered to be
nonzero.
"""
if not M.is_square:
raise NonSquareMatrixError
V = range(M.rows)
E = sorted(M.todok().keys())
return connected_components((V, E))
def _connected_components_decomposition(M):
"""Decomposes a square matrix into block diagonal form only
using the permutations.
Explanation
===========
The decomposition is in a form of $A = P B P^{-1}$ where $P$ is a
permutation matrix and $B$ is a block diagonal matrix.
Returns
=======
P, B : PermutationMatrix, BlockDiagMatrix
*P* is a permutation matrix for the similarity transform
as in the explanation. And *B* is the block diagonal matrix of
the result of the permutation.
If you would like to get the diagonal blocks from the
BlockDiagMatrix, see
:meth:`~sympy.matrices.expressions.blockmatrix.BlockDiagMatrix.get_diag_blocks`.
Examples
========
>>> from sympy import symbols, Matrix
>>> a, b, c, d, e, f, g, h = symbols('a:h')
>>> A = Matrix([
... [a, 0, b, 0],
... [0, e, 0, f],
... [c, 0, d, 0],
... [0, g, 0, h]])
>>> P, B = A.connected_components_decomposition()
>>> P = P.as_explicit()
>>> P_inv = P.inv().as_explicit()
>>> B = B.as_explicit()
>>> P
Matrix([
[1, 0, 0, 0],
[0, 0, 1, 0],
[0, 1, 0, 0],
[0, 0, 0, 1]])
>>> B
Matrix([
[a, b, 0, 0],
[c, d, 0, 0],
[0, 0, e, f],
[0, 0, g, h]])
>>> P * B * P_inv
Matrix([
[a, 0, b, 0],
[0, e, 0, f],
[c, 0, d, 0],
[0, g, 0, h]])
Notes
=====
This problem corresponds to the finding of the connected components
of a graph, when a matrix is viewed as a weighted graph.
"""
from sympy.combinatorics.permutations import Permutation
from sympy.matrices.expressions.blockmatrix import BlockDiagMatrix
from sympy.matrices.expressions.permutation import PermutationMatrix
iblocks = M.connected_components()
p = Permutation(flatten(iblocks))
P = PermutationMatrix(p)
blocks = []
for b in iblocks:
blocks.append(M[b, b])
B = BlockDiagMatrix(*blocks)
return P, B
|
1670555
|
import os
import serial
import threading
import time
import random
SIO_ACK = 0x41 # A
SIO_NACK = 0x4E # N
SIO_COMPLETE = 0x43 # C
SIO_ERROR = 0x45 # E
US_PER_S = 1000000.0
# Constants from sio2bsd
BASIC_DELAY = 2000
POKEY_PAL_HZ = 1773447.0
POKEY_NTSC_HZ = 1789790.0
POKEY_AVG_HZ = (POKEY_NTSC_HZ + POKEY_PAL_HZ) / 2.0
POKEY_CONST = 7.1861
# From https://github.com/jzatarski/RespeQt/blob/2e50a405e5ee65c15bb36177351e7185ddac259c/serialport-unix.cpp
SLEEP_FACTOR = 10000
FRAME_DELAY = 50.0 / 1000000.0
WRITE_DELAY = (1.0 * SLEEP_FACTOR) / 1000000.0
HANDSHAKE_DELAY = 500.0 / 1000000.0
COMP_DELAY = 800.0 / 1000000.0
diskimagedata = []
_sio_port = None
_sio_inputhandler = None
_sio_diskimage = None
_sio_restarthandler = None
_sio_resumehandler = None
_sio_outqueue = []
_sio_thread_alive = False
_sio_thread = None
port = None
def sio_setport(port):
global _sio_port
_sio_port = port
def sio_setdiskimage(path):
global _sio_diskimage
_sio_diskimage = path
def sio_setinputhandler(cb):
global _sio_inputhandler
_sio_inputhandler = cb
def sio_setrestarthandler(cb):
global _sio_restarthandler
_sio_restarthandler = cb
def sio_setresumehandler(cb):
global _sio_resumehandler
_sio_resumehandler = cb
def sio_write(data):
global _sio_outqueue
print ("Sio: Appending to output queue: %r" % (hexbytes(data)))
_sio_outqueue += data
def sioChecksum(bytearrayinput):
sum = 0
for k in bytearrayinput:
sum += k
if sum > 255:
sum -= 255
return sum
def hexbytes(bytearrayinput):
return ' '.join('%02x' % i for i in bytearrayinput)
def writeResponseBytes(bytearrayinput):
checksum = sioChecksum(bytearrayinput)
# print ("Writing bytes: %s (checksum %02x)" % (hexbytes(bytearrayinput), checksum))
bytearrayinput.append(checksum)
bytes = map(lambda x: chr(x), bytearrayinput)
x = port.write(bytes)
# print ("Wrote %d bytes" % (x))
def sendRaw(bytearrayinput):
# print ("Sending raw: %s" % hexbytes(bytearrayinput))
bytes = map(lambda x: chr(x), bytearrayinput)
x = port.write(bytes)
# print ("Wrote %d bytes" % (x))
def sendACK():
d = (BASIC_DELAY*1000) / (POKEY_AVG_HZ/1000) / US_PER_S;
# print ("delaying", d)
time.sleep(d);
sendRaw([SIO_ACK])
def sendNACK():
sendRaw([SIO_NACK])
def sendComplete():
time.sleep(WRITE_DELAY)
sendRaw([SIO_COMPLETE])
def sendError():
time.sleep(WRITE_DELAY)
sendRaw([SIO_ERROR])
def sendResponse(bytearrayinput):
checksum = sioChecksum(bytearrayinput)
bytearrayinput.append(checksum)
time.sleep(FRAME_DELAY)
time.sleep(WRITE_DELAY)
sendRaw(bytearrayinput)
def handleGetStatus(bytearrayinput):
print ("Get status")
status = [0, 0, 0, 0]
status[0] = 8 # | 64 | 128
status[1] = 0
status[2] = 0
status[3] = 1
sendResponse(status)
def handleReadSector(bytearrayinput):
global diskimagedata
global _sio_diskimage
global _sio_restarthandler
global _sio_outqueue
with open(_sio_diskimage, 'rb') as f:
diskimagedata = bytearray(f.read())
print ("Read %d bytes disk image" % (len(diskimagedata)))
si = bytearrayinput[2] + (bytearrayinput[3] * 256)
o = 16 + ((si - 1) * 128)
print ("Read sector #%d (offset %d)" % (si, o))
resp = diskimagedata[o:o+128]
sendResponse(resp)
if si == 1:
# we requested the last sector, maybe
print ("Requested first sector (restart handler)")
_sio_outqueue = []
if _sio_restarthandler:
_sio_restarthandler()
def handleWritePercomBlock(bytearrayinput):
print ("Write percom")
def readFromSerial(l):
global port
bytes = port.read(l)
mapped = map(lambda x: ord(x), bytes)
return mapped
def handleFloppyCommand(device):
rest = readFromSerial(4)
command = [device] + rest
# print("Handling disk command: %s" % hexbytes(command))
if len(command) <> 5:
print("Invalid disk command: %s" % hexbytes(command))
sendNACK()
return
if command[1] == 0x53:
print("Get disk status command: %s" % hexbytes(command))
sendACK()
sendComplete()
handleGetStatus(command)
elif command[1] == 0x52:
print("Read disk sector command: %s" % hexbytes(command))
sendACK()
sendComplete()
handleReadSector(command)
else:
print("Invalid disk command: %s" % hexbytes(command))
sendNACK()
def handleSerialPayload(bytes):
if len(bytes) < 2:
print ("Got invalid serial payload")
return
if bytes[0] == ord('K'):
print ("Got keyboard press: %d" % bytes[1])
_sio_inputhandler(bytes)
elif bytes[0] == ord('J'):
print ("Got joystick move: %d" % bytes[1])
_sio_inputhandler(bytes)
elif bytes[0] == ord('X'):
if _sio_resumehandler:
_sio_resumehandler()
else:
print ("Got unhandled serial payload: %s" % hexbytes(bytes))
# See (ack/complete): http://abbuc.de/~montezuma/Sio2BT%20Networking.pdf
def handleSerialCommand(device):
global _sio_outqueue
rest = readFromSerial(4)
command = [device] + rest
if len(command) <> 5:
print("Invalid serial command (length): %s" % hexbytes(command))
sendNACK()
return
if command[1] == 0x50:
# print("Serial data write: %s" % hexbytes(command))
sendACK()
sendComplete()
l = command[2]
data = readFromSerial(l)
handleSerialPayload(data)
# print ("Got serial data: %s" % hexbytes(data))
sendACK()
# gotkey = True
elif command[1] == 0x52:
# print("Serial data read: %s" % hexbytes(command))
sendACK()
sendComplete()
response = []
l = len(_sio_outqueue)
if l > 60:
l = 60
for k in range(l):
popped = _sio_outqueue.pop(0)
response.append(popped)
for k in range(64):
if (len(response) < 64):
response.append(0)
response = [l] + response
# response.append(random.randint(1, 60))
# for k in range(64):
# # if response
# response.append(k)
# response = []
if l > 0:
print ("Sending serial data: %s" % hexbytes(response))
sendResponse(response)
else:
print("Invalid serial command (command): %s" % hexbytes(command))
sendNACK()
def handleFirstByte(firstbyte):
if firstbyte[0] == 0x31:
handleFloppyCommand(firstbyte[0])
elif firstbyte[0] == 0x50:
handleSerialCommand(firstbyte[0])
else:
# print("Invalid first byte: %s" % hexbytes(firstbyte))
sendNACK()
def sio_thread():
global _sio_thread_alive
global _sio_port
global port
print("Opening serial port: %s" % _sio_port)
try:
port = serial.Serial(_sio_port, 19200, timeout=0.01, rtscts=0)
except serial.SerialException, e:
print("Failed to open port!", e)
return
print("Opened serial port.")
while _sio_thread_alive:
firstbyte = readFromSerial(1)
if firstbyte:
handleFirstByte(firstbyte)
if port:
print("Closing serial port...")
port.close()
def sio_start():
global _sio_thread
global _sio_thread_alive
_sio_thread_alive = True
_sio_thread = threading.Thread(target = sio_thread)
_sio_thread.start()
def sio_stop():
global _sio_thread
global _sio_thread_alive
_sio_thread_alive = False
_sio_thread.join()
|
1670586
|
from abc import ABC, abstractmethod
from typing import List
class ICommand(ABC):
"""Интерфейсный класс для выполняемых операций"""
@abstractmethod
def execute(self) -> None:
...
class ChiefAssistant:
def prepare_pizza_dough(self):
print("Ассистент подготавливает тесто для пиццы")
def prepare_topping(self):
print("Ассистент нарезает начинку для пиццы")
def prepare_sauce(self):
print("Ассистент готовит соус")
class Stove:
def prepare_stove(self):
print("Печь разогревается")
def cooking_pizza(self):
print("Пицца готовится в печи")
class ChiefCooker:
def make_pizza_base(self):
print("Шеф раскатывает основу для пиццы")
def applied_sauce(self):
print("Шеф наносит соус на основу пиццы")
def add_topping_to_pizza(self):
print("Шеф добавляет начинку на пиццу")
def bon_appetit(self):
print("Шеф желает клиенту приятного аппетита!")
class PrepareStoveCommand(ICommand):
"""Класс команды для разогрева печи"""
def __init__(self, executor: Stove):
self.__executor = executor
def execute(self) -> None:
self.__executor.prepare_stove()
class PrepareDoughCommand(ICommand):
"""Класс команды для подготовки теста пиццы"""
def __init__(self, executor: ChiefAssistant):
self.__executor = executor
def execute(self) -> None:
self.__executor.prepare_pizza_dough()
class PrepareToppingCommand(ICommand):
"""Класс команды для нарезки начинки пиццы"""
def __init__(self, executor: ChiefAssistant):
self.__executor = executor
def execute(self) -> None:
self.__executor.prepare_topping()
class PrepareSauceCommand(ICommand):
"""Класс команды для приготовления соуса"""
def __init__(self, executor: ChiefAssistant):
self.__executor = executor
def execute(self) -> None:
self.__executor.prepare_sauce()
class CookingPizzaCommand(ICommand):
"""Класс команды для приготовления пиццы в печи"""
def __init__(self, executor: Stove):
self.__executor = executor
def execute(self) -> None:
self.__executor.cooking_pizza()
class MakePizzaBaseCommand(ICommand):
"""Класс команды для приготовления основы для пиццы"""
def __init__(self, executor: ChiefCooker):
self.__executor = executor
def execute(self) -> None:
self.__executor.make_pizza_base()
class AppliedSauceCommand(ICommand):
"""Класс команды для нанесения соуса на пиццу"""
def __init__(self, executor: ChiefCooker):
self.__executor = executor
def execute(self) -> None:
self.__executor.applied_sauce()
class AddToppingCommand(ICommand):
"""Класс команды для добавления начинки на пиццу"""
def __init__(self, executor: ChiefCooker):
self.__executor = executor
def execute(self) -> None:
self.__executor.add_topping_to_pizza()
class BonAppetitCommand(ICommand):
"""Класс команды для пожелания клиенту
приятного аппетита"""
def __init__(self, executor: ChiefCooker):
self.__executor = executor
def execute(self) -> None:
self.__executor.bon_appetit()
class Pizzeria:
"""Класс агрегации всех команд для приготовления
пиццы"""
def __init__(self):
self.history: List[ICommand] = []
def addCommand(self, command: ICommand) -> None:
self.history.append(command)
def cook(self) -> None:
if not self.history:
print("Не задана очередность выполнения"
" команд приготовления пиццы")
else:
for executor in self.history:
executor.execute()
self.history.clear()
if __name__ == "__main__":
chief = ChiefCooker()
assistant = ChiefAssistant()
stove = Stove()
pizzeria = Pizzeria()
# формируем последовательность команд для приготовления пиццы
pizzeria.addCommand(PrepareDoughCommand(assistant))
pizzeria.addCommand(MakePizzaBaseCommand(chief))
pizzeria.addCommand(PrepareSauceCommand(assistant))
pizzeria.addCommand(AppliedSauceCommand(chief))
pizzeria.addCommand(PrepareStoveCommand(stove))
pizzeria.addCommand(PrepareToppingCommand(assistant))
pizzeria.addCommand(AddToppingCommand(chief))
pizzeria.addCommand(CookingPizzaCommand(stove))
pizzeria.addCommand(BonAppetitCommand(chief))
# запускаем процесс приготовления пиццы
pizzeria.cook()
|
1670622
|
from os import path
import subprocess
import anndata as ad
import scipy
## VIASH START
# This code block will be replaced by viash at runtime.
meta = { 'functionality_name': 'foo' }
## VIASH END
method_id = meta['functionality_name']
command = "./" + method_id
# define some filenames
testpar = {
"input_mod1": "resources_test/joint_embedding/openproblems_bmmc_multiome_starter/openproblems_bmmc_multiome_starter.mod1.h5ad",
"input_mod2": "resources_test/joint_embedding/openproblems_bmmc_multiome_starter/openproblems_bmmc_multiome_starter.mod2.h5ad",
"output": "output.h5ad",
}
print("> Running method")
out = subprocess.check_output([
command,
"--input_mod1", testpar['input_mod1'],
"--input_mod2", testpar['input_mod2'],
"--output", testpar['output']
]).decode("utf-8")
print("> Checking whether output files were created")
assert path.exists(testpar['output'])
print("> Reading h5ad files")
input_mod1 = ad.read_h5ad(testpar['input_mod1'])
output = ad.read_h5ad(testpar['output'])
print("> Checking contents of output.h5ad")
assert output.uns['dataset_id'] == input_mod1.uns['dataset_id']
assert output.uns['method_id'] == method_id
assert output.n_obs == input_mod1.n_obs
assert output.n_vars >= 1
assert output.n_vars <= 100
assert all(output.obs_names == input_mod1.obs_names)
assert not scipy.sparse.issparse(output.X)
print("> Test succeeded!")
|
1670634
|
import os
import subprocess
import sys
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import Callable, Dict, List, Union
from cookiecutter.main import cookiecutter
root = str(Path(__file__).parent.parent)
expected_files_base = [
'.editorconfig',
'.gitignore',
'.pre-commit-config.yaml',
'README.md',
'src/{module_name}/__init__.py',
'src/{module_name}/main.py',
'src/{module_name}/version.py',
]
def get_module_name(project_dir: Path) -> str:
module_name = os.listdir(project_dir / 'src')[0]
assert (project_dir / 'src' / module_name).is_dir()
return module_name
def resolve_module_dir(files: List[str], module_name: str) -> List[str]:
return [(s.format(module_name=module_name) if '{' in s else s) for s in files] if files else []
def check_files(project_dir: Path, files: List[str], exist=True):
for file in files:
path = (project_dir / file).resolve()
assert path.exists() == exist, f"file '{path}' should {'' if exist else 'not '}have existed"
def list_files(base_dir, indent=4):
for base, dirs, files in os.walk(base_dir):
level = len(Path(base).relative_to(base_dir).parents)
space = ' ' * indent * level
print('{}{}/'.format(space, os.path.basename(base)))
space_sub = ' ' * indent * (level + 1)
for f in files:
print('{}{}'.format(space_sub, f))
def check_project(
project_name="Test Project", settings: Dict[str, str] = None, files_existent: List = None,
files_non_existent: List = None, test_cli=False, run_pytest=False,
fun: Callable[[Path], None] = None):
# define cookiecutter settings
if settings is None:
settings = {'project_name': project_name}
else:
settings['project_name'] = project_name
# work in a new temporary directory
with TemporaryDirectory() as temp_dir:
# create the project files from the cookiecutter template
project_dir = cookiecutter(root, extra_context=settings, no_input=True, output_dir=temp_dir)
project_dir = Path(project_dir)
module_name = get_module_name(project_dir)
src_dir = str(project_dir / 'src')
list_files(project_dir)
# check that certain files exist and make sure that others do not exist
paths_pos = resolve_module_dir(expected_files_base + (files_existent or []), module_name)
paths_neg = resolve_module_dir(files_non_existent, module_name)
check_files(project_dir, paths_pos)
check_files(project_dir, paths_neg, exist=False)
# test the CLI
if test_cli:
result = subprocess.run([sys.executable, '-m', module_name, '--version'], cwd=src_dir)
assert result.returncode == 0, "cli call returned a nonzero exit code"
# run pytests, if specified
if run_pytest:
result = subprocess.run([sys.executable, '-m', 'pytest', '..'], cwd=src_dir)
assert result.returncode == 0, "some pytest cases had errors"
# run additional code, if specified
if fun:
fun(project_dir)
def assert_file_contains(file: Union[str, Path], contains: str = None, not_contains: str = None):
with open(file, 'r') as fp:
content = fp.read()
if contains:
assert contains in content, f"'{contains}' should have been found in '{file}'"
if not_contains:
assert not_contains not in content, f"'{contains}' should not exist in '{file}'"
|
1670642
|
import xml.etree.ElementTree as ET
tree = ET.parse('4e3c.xml')
root = tree.getroot()
# all item attributes
print('\nAll attributes:')
for elem in root:
print(elem.text.encode("utf-8"))
print("elem")
for subelem in elem:
#print(subelem)
print(subelem.attrib["x"])
|
1670741
|
import os
import sys
import yaml
def inside_workspace():
return bool(locate_bootstrap())
def locate_bootstrap():
"""Return the path to the closest parade bootstrap file in current directory
"""
clue = os.environ.get('PARADE_WORKSPACE', '.')
path = os.path.abspath(clue)
bootfile = os.path.join(path, 'parade.bootstrap.yml')
if os.path.exists(bootfile):
return bootfile
return None
def load_bootstrap(addpath=True):
"""Initialize environment to use command-line tool from inside a project
dir. This sets the Scrapy settings module and modifies the Python path to
be able to locate the project module.
"""
bootfile = locate_bootstrap()
if not bootfile:
return []
workspace = os.path.dirname(bootfile)
if addpath and workspace not in sys.path:
sys.path.append(workspace)
with open(bootfile) as f:
content = f.read()
config_dict = yaml.load(content, Loader=yaml.FullLoader)
config_dict['workspace']['path'] = workspace
return config_dict
return []
|
1670745
|
import os
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
sys.path.append(r'D:\DeepLearning\Kaggle\Datahandling')
import utils_for_datasets
import glob
import numpy as np
import cv2
import re
import os.path
import scipy
import time
from skimage.measure import label
import skimage.transform as ski_transform
import matplotlib.pyplot as plt
DATASETROOT = 'CVSP\Cameratrap'
DATASETROOT_CVL = 'CVSP\CVL'
UNETROOT = 'D:\DeepLearning\Semantic_segmentation\Cameratrap_Dataset'
UNETROOT_CVL = 'D:\DeepLearning\Semantic_segmentation\CVL_Dataset'
DATASET_FOLDER_TISQUANT = r'D:\DeepLearning\SCCHCode\TisQuantValidation\data'
#DATASET_FOLDER_KAGGLE = r'D:\\DeepLearning\\SCCHCode\\data\\kaggle-dsbowl-2018-dataset-fixes-master\\stage1_train'
#DATASET_FOLDER_KAGGLE = r'D:\\DeepLearning\\SCCHCode\\data\\Kaggle\\stage1_train'
from scipy.io import loadmat, savemat
from tifffile import tifffile
from Config.Config import UNETSettings
from tqdm import tqdm
class TisquantDataset(utils_for_datasets.Dataset):
def load_data(self,width=None,height=None,ids=None,mode=1):
self.add_class("Nuclei",1,'Nucleus')
if (mode==1):
data_file = "256x256_TisQuantTrainingData_Evaluation1_new.mat"
else:
data_file = "256x256_TisQuantTestData_Evaluation1_new.mat"
print('... LOADING DATA')
Images, Labels, FileNames = [], [], []
raw_data = loadmat(os.path.join(DATASET_FOLDER_TISQUANT, data_file), struct_as_record=True)
if (mode==1):
raw_data = raw_data['trainingset']
else:
raw_data = raw_data['testset']
Images, Masks = [], []
slice_size = 256
masks = raw_data['groundtruth'][0]
raw_images = raw_data['rawimage'][0]
n_images = len(raw_images)
for i,img in enumerate(raw_images):
#img_new = np.zeros((3, img.shape[0], img.shape[1]))
#img_new[0] = img
#img_new[1] = img
#img_new[2] = img
#Images.append(img_new / 255.0)
#Images.append(img / 255.0)
#Images.append(img / 255.0)
Images.append(img)
#Masks.append(label(masks[i]>0))
Masks.append(masks[i])
# convert to conv net format
img_size = Images[0].shape
#Images = np.asarray(Images, dtype=np.float32).reshape(-1, img_size[0], img_size[1],img_size[2])
#Images = np.transpose(Images, (0, 2, 3, 1))
Images = np.asarray(Images, dtype=np.float32).reshape(-1, img_size[0], img_size[1])
#Masks = np.asarray(Masks, dtype=np.float32).reshape(-1, 1, img_size[1], img_size[2])
#Masks = np.transpose(Masks, (0, 2, 3, 1))
Masks = np.asarray(Masks, dtype=np.float32).reshape(-1, img_size[0], img_size[1])
train_val = 0.8
ret_val = 0
n_tr = int(round(Images.shape[0] * 0.8))
ids = np.arange(Images.__len__())
if (mode == 1): # Trainingset
np.random.shuffle(ids)
self.images = Images
self.masks = Masks
for i in range(self.images.shape[0]):
self.add_image("Nuclei", image_id=i, path=None,width=width, height=height)
self.train_cnt = int(self.images.__len__()*0.8)
#self.images = np.transpose(self.images,(0,3,1,2))
#self.masks = np.transpose(self.masks,(0,3,1,2))
return ids
def getMeanMaskObjectSize(self, image_id):
masks = self.load_mask(image_id)
masks_new = masks[0][:, :, 1:]
print("Summe: {0}, Laenge: {1}".format(masks_new.sum(), masks_new.shape[2]))
if (np.isnan(masks_new.sum() / masks_new.shape[2])):
return 0
else:
return int(masks_new.sum() / masks_new.shape[2])
def load_image(self, image_id):
return self.images[image_id]
def image_reference(self, image_id):
"""Return the shapes data of the image."""
info = self.image_info[image_id]
if info["source"] == "shapes":
return info["shapes"]
else:
super(self.__class__).image_reference(self, image_id)
def load_mask(self, image_id):
"""Generate instance masks for shapes of the given image ID.
"""
info = self.image_info[image_id]
mask = self.masks[image_id]
count = int(mask.max())
mask_new = np.zeros([info['height'], info['width'], count+1], dtype=np.uint8) # one more for background
for i in range(count+1):
#mask_new[:, :, i:i+1] = (mask == i).transpose(1, 2, 0)
mask_new[:, :, i:i + 1] = (mask==i).reshape(mask.shape[0], mask.shape[1], -1)
# mask_new[:, :, i:i+1] = (mask==i).transpose(1,2,0)
# Map class names to class IDs.
class_ids = np.ones(count+1) # one more fore background
#add Background
#class_ids[count] = 0 # add Background
#mask_new[:, :, count:count + 1] = (mask == 0).transpose(1, 2, 0)
#class_ids[count] = 0 # add Background
class_ids[0] = 0 # add Background
# End add Background
return mask_new, class_ids.astype(np.int32)
def load_mask_one_layer(self,image_id):
return self.masks[image_id]#[0]
class KaggleDataset(utils_for_datasets.Dataset):
def load_data(self,width=None,height=None,ids=None,mode=1,folders=None):
self.image_path = []
self.mask_path = []
self.add_class("Nucleus",1,'Nucleus')
self.setImagePaths(folders)
ids = np.arange(self.image_path.__len__())
np.random.seed(1)
np.random.shuffle(ids)
self.ids = ids
for i in self.ids:
self.add_image("Nucleus", image_id=i, path=None)
return ids
def load_image(self, image_id):
info = self.image_info[image_id]
img = cv2.imread(self.image_path[self.ids[image_id]])
#img = ski_transform.resize(img, (info['height'], info['width']), mode='reflect')
return img
def setImagePaths(self,folders=""):
for folder in os.listdir(folders):
file_pattern = os.path.join(folders,folder,'images',"*.png")
#print(file_pattern)
img_files = glob.glob(file_pattern)
for i in img_files:
self.image_path.append(i)
self.mask_path.append(os.path.join(folders,folder,'masks'))
def image_reference(self, image_id):
"""Return the shapes data of the image."""
info = self.image_info[image_id]
if info["source"] == "shapes":
return info["shapes"]
else:
super(self.__class__).image_reference(self, image_id)
def load_mask(self, image_id):
"""Generate instance masks for shapes of the given image ID.
"""
mask_path = self.mask_path[self.ids[image_id]]
file_pattern = os.path.join(mask_path, "*.png")
info = self.image_info[image_id]
mask_files = glob.glob(file_pattern)
#mask_tmp = cv2.imread(mask_files[0])
mask_new = np.zeros([info['height'], info['width'], mask_files.__len__()+1], dtype=np.uint8) # one more for background
count = 1
mask_total = 0
for i in mask_files:
mask = cv2.imread(i)
mask = mask[:, :, 1] / 255.0
#mask = ski_transform.resize(mask, (info['height'], info['width']), mode='reflect')
mask_new[:, :, count] = (mask)
mask_total = mask_total + (mask>0) * count
count = count + 1
# Map class names to class IDs.
class_ids = np.ones(count) # one more fore background
#add Background
class_ids[0] = 0; # Background
mask_new[:, :, 0] = np.invert(mask_total.astype(np.bool))
# End add Background
return mask_new, class_ids.astype(np.int32)
def load_mask_one_layer(self, image_id):
"""Generate instance masks for shapes of the given image ID.
"""
mask_path = self.mask_path[self.ids[image_id]]
file_pattern = os.path.join(mask_path, "*.png")
info = self.image_info[image_id]
mask_files = glob.glob(file_pattern)
#mask_tmp = cv2.imread(mask_files[0])
mask_new = np.zeros([info['width'], info['height'], mask_files.__len__()+1], dtype=np.uint8) # one more for background
count = 1
mask_total = 0
for i in mask_files:
mask = cv2.imread(i)
mask = mask[:, :, 1] / 255.0
#mask = ski_transform.resize(mask, (info['height'], info['width']), mode='reflect')
mask_new[:, :, count] = (mask)
mask_total = mask_total * (mask == 0)
mask_total = mask_total + (mask>0) * count
count = count + 1
return mask_total
def getMeanMaskObjectSize(self, image_id):
mask_path = self.mask_path[self.ids[image_id]]
file_pattern = os.path.join(mask_path, "*.png")
mask_files = glob.glob(file_pattern)
total_sum = 0;
for i in mask_files:
mask = cv2.imread(i)
total_sum = total_sum + (mask>0).sum()
return (total_sum / mask_files.__len__()).astype(np.int16)
def pre_process_img(self,img, color):
"""
Preprocess image
"""
if color is 'gray':
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
elif color is 'rgb':
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
else:
pass
img = img.astype(np.float32)
img /= 255.0
return img
class ArtificialNucleiDataset(utils_for_datasets.Dataset):
img_prefix = 'Img_'
img_postfix = '-outputs.png'
mask_prefix = 'Mask_'
mask_postfix = '.tif'
settings = UNETSettings()
def load_data(self, width=256, height=256, ids=None, mode=1):
# Load settings
self.image_path = []
self.mask_path = []
self.add_class("ArtificialNuclei", 1, 'ArtificialNuclei')
train_cnt = 0
val_cnt = 0
print("Loading train data ...")
if self.settings.network_info["traintestmode"] == 'train':
for i in self.settings.network_info["dataset_dirs_train"].split(';'):
img_range = self.setImagePaths(folders=[i + "\\images"])
self.setMaskPaths(folders=[i + "\\masks"],img_range=img_range)
print("Checking train path ...")
self.checkPath()
print("Loading val data ...")
train_cnt = self.image_path.__len__()
for i in self.settings.network_info["dataset_dirs_val"].split(';'):
img_range = self.setImagePaths(folders=[i + "\\images"])
self.setMaskPaths(folders=[i + "\\masks"],img_range=img_range)
print("Checking val path ...")
self.checkPath()
val_cnt += self.image_path.__len__() - train_cnt
#ids = np.arange(self.image_path.__len__())
ids_train = np.arange(0,train_cnt)
ids_val = np.arange(train_cnt, train_cnt+val_cnt)
self.train_cnt = train_cnt
self.val_cnt = val_cnt
np.random.shuffle(ids_train)
np.random.shuffle(ids_val)
self.ids = np.concatenate((ids_train,ids_val),axis=0)
else:
for i in self.settings.network_info["dataset_dirs_test"].split(';'):
img_range = self.setImagePaths(folders=[i + "\\images"])
self.setMaskPaths(folders=[i + "\\masks"],img_range=img_range)
print("Checking train path ...")
self.checkPath()
self.ids = np.arange(0,self.image_path.__len__())
for i in self.ids:
self.add_image("ArtificialNuclei", image_id=i, path=None, width=width, height=height)
return ids
def checkPath(self):
to_delete = []
for index,i in tqdm(enumerate(self.image_path)):
if not os.path.exists(i):
to_delete.append(index)
to_delete.sort(reverse=True)
for i in to_delete:
del self.image_path[i]
del self.mask_path[i]
def load_image(self, image_id):
info = self.image_info[image_id]
img_final = cv2.imread(self.image_path[self.ids[image_id]])
try:
img_final = img_final[:,:,0]
except:
None
#return img_final / 255.0
if self.settings.network_info["netinfo"] == 'maskrcnn': # mask rcnn need an rgb image
img_new = np.zeros((img_final.shape[0],img_final.shape[1],3))
img_new[:,:,0] = img_new[:,:,1] = img_new[:,:,2] = img_final
img_final = img_new
return img_final
def setImagePaths(self, folders=""):
for folder in folders:
file_pattern = os.path.join(folder, self.img_prefix + "*" + self.img_postfix) #"Img_*-outputs.png")
print(file_pattern)
img_files = glob.glob(file_pattern)
img_files.sort()
img_range = range(0,img_files.__len__())
for i in img_range:
#self.image_path.append(os.path.join(folder, "Img_" + str(i) + "-outputs.png"))
self.image_path.append(os.path.join(folder, self.img_prefix + str(i) + self.img_postfix))
# for i in img_files:
# self.image_path.append(i)
return img_range
def setMaskPaths(self, folders="",img_range=None):
for folder in folders:
file_pattern = os.path.join(folder, self.mask_prefix + "*" + self.mask_postfix) #"Mask_*.tif")
print(file_pattern)
img_files = glob.glob(file_pattern)
img_files.sort()
#for i in range(0,img_files.__len__()):
for i in img_range:
self.mask_path.append(os.path.join(folder, self.mask_prefix + str(i) + self.mask_postfix))
#self.mask_path.append(os.path.join(folder, "Mask_" + str(i) + ".tif"))
def image_reference(self, image_id):
"""Return the shapes data of the image."""
info = self.image_info[image_id]
if info["source"] == "shapes":
return info["shapes"]
else:
super(self.__class__).image_reference(self, image_id)
def load_mask(self, image_id):
"""Generate instance masks for shapes of the given image ID.
"""
info = self.image_info[image_id]
mask = tifffile.imread(self.mask_path[self.ids[image_id]])
if np.unique(mask).__len__() > 1:
count = np.unique(mask).__len__()-1 # one less because of 0
mask_new = np.zeros([info['height'], info['width'], count], dtype=np.uint8) # one more for background
running = 0
for i in np.unique(mask): #range(1, count):
if ((i > 0) & ((mask == i).sum() > 0)):
mask_new[:, :, running] = (mask == i)
running = running + 1
# Map class names to class IDs.
class_ids = np.ones(count)
else:
mask_new = np.zeros([info['height'], info['width'], 1], dtype=np.uint8)
class_ids = np.zeros([1])
return mask_new, class_ids.astype(np.int32)
def load_mask_one_layer(self, image_id,relabel=False):
mask = tifffile.imread(self.mask_path[self.ids[image_id]])
if (mask.ndim > 2):
mask = mask[:,:,0]
if (relabel):
mask_tmp = np.zeros((mask.shape[0],mask.shape[1]))
running=1
for i in np.unique(mask):
if i > 0:
mask_tmp = mask_tmp + running * (mask==i)
running = running + 1
mask = mask_tmp.astype(np.float)
return mask #mask.astype(np.float)
def pre_process_img(self, img, color):
"""
Preprocess image
"""
if color is 'gray':
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
elif color is 'rgb':
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
else:
pass
img = img.astype(np.float32)
img /= 255.0
return img
def split_train_test(self,width=256, height=256):
dataset_train = ArtificialNucleiDataset()
dataset_test = ArtificialNucleiDataset()
dataset_train.image_path = []
dataset_train.mask_path = []
dataset_train.add_class("ArtificialNuclei", 1, 'ArtificialNuclei')
dataset_test.image_path = []
dataset_test.mask_path = []
dataset_test.add_class("ArtificialNuclei", 1, 'ArtificialNuclei')
image_path_train = []
image_path_val = []
mask_path_train = []
mask_path_val = []
self.ids = []
run = 0
dataset_train.image_path.extend(self.image_path[0:self.train_cnt])
dataset_train.mask_path.extend(self.mask_path[0:self.train_cnt])
dataset_train.train_cnt = self.image_path.__len__()
dataset_test.image_path.extend(self.image_path[self.train_cnt:])
dataset_test.mask_path.extend(self.mask_path[self.train_cnt:])
dataset_test.train_cnt = self.image_path.__len__() - self.train_cnt
ids_train = np.arange(0,self.train_cnt)
ids_val = np.arange(0,self.val_cnt)
np.random.shuffle(ids_train)
np.random.shuffle(ids_val)
dataset_train.ids = ids_train
dataset_test.ids = ids_val
for i in dataset_train.ids:
dataset_train.add_image("ArtificialNuclei", image_id=i, path=None, width=width, height=height)
for i in dataset_test.ids:
dataset_test.add_image("ArtificialNuclei", image_id=i, path=None, width=width, height=height)
dataset_train.prepare()
dataset_test.prepare()
return dataset_train, dataset_test
class TisquantDatasetNew(ArtificialNucleiDataset):
def setImagePaths(self, folders=""):
self.img_postfix = ".jpg"
for folder in folders:
#self.img_prefix = os.path.basename(folder) + "_"
folder_names = folder.split('\\')
self.img_prefix = "Img_" + folder_names[folder_names.__len__() - 2] + "_"
file_pattern = os.path.join(folder, self.img_prefix + "*" + self.img_postfix) #"Img_*-outputs.png")
print(file_pattern)
img_files = glob.glob(file_pattern)
img_files.sort()
img_range = range(0,img_files.__len__())
for i in img_range:
#self.image_path.append(os.path.join(folder, "Img_" + str(i) + "-outputs.png"))
self.image_path.append(os.path.join(folder, self.img_prefix + str(i) + self.img_postfix))
# for i in img_files:
# self.image_path.append(i)
return img_range
def setMaskPaths(self, folders="",img_range=None):
self.mask_postfix = ".tif"
for folder in folders:
#self.mask_prefix = os.path.basename(folder) + "_"
#self.mask_prefix = "Mask_"
folder_names = folder.split('\\')
self.mask_prefix = "Mask_" + folder_names[folder_names.__len__() - 2] + "_"
file_pattern = os.path.join(folder, self.mask_prefix + "*" + self.mask_postfix) #"Mask_*.tif")
print(file_pattern)
img_files = glob.glob(file_pattern)
img_files.sort()
#for i in range(0,img_files.__len__()):
for i in img_range:
self.mask_path.append(os.path.join(folder, self.mask_prefix + str(i) + self.mask_postfix))
#self.mask_path.append(os.path.join(folder, "Mask_" + str(i) + ".tif"))
class SpecificNucleiDataset(ArtificialNucleiDataset):
def setImagePaths(self, folders=""):
self.img_postfix = "-outputs.png"
for folder in folders:
#self.img_prefix = os.path.basename(folder) + "_"
folder_names = folder.split('\\')
self.img_prefix = "Specific_"
file_pattern = os.path.join(folder, self.img_prefix + "*" + self.img_postfix) #"Img_*-outputs.png")
print(file_pattern)
img_files = glob.glob(file_pattern)
img_files.sort()
img_range = range(0,img_files.__len__())
for i in img_range:
#self.image_path.append(os.path.join(folder, "Img_" + str(i) + "-outputs.png"))
self.image_path.append(os.path.join(folder, self.img_prefix + str(i) + self.img_postfix))
# for i in img_files:
# self.image_path.append(i)
return img_range
def setMaskPaths(self, folders="",img_range=None):
self.mask_postfix = ".tif"
for folder in folders:
#self.mask_prefix = os.path.basename(folder) + "_"
#self.mask_prefix = "Mask_"
folder_names = folder.split('\\')
self.mask_prefix = "Specific_Mask_"
file_pattern = os.path.join(folder, self.mask_prefix + "*" + self.mask_postfix) #"Mask_*.tif")
print(file_pattern)
img_files = glob.glob(file_pattern)
img_files.sort()
#for i in range(0,img_files.__len__()):
for i in img_range:
self.mask_path.append(os.path.join(folder, self.mask_prefix + str(i) + self.mask_postfix))
#self.mask_path.append(os.path.join(folder, "Mask_" + str(i) + ".tif"))
class MergedDataset(ArtificialNucleiDataset):
def __init__(self,datasets):
super(MergedDataset, self).__init__(self)
self.image_path = []
self.mask_path = []
self.add_class("ArtificialNuclei", 1, 'ArtificialNuclei')
image_path_train = []
image_path_val = []
mask_path_train = []
mask_path_val = []
self.ids = []
run = 0
for dataset in datasets:
self.image_path.extend(dataset.image_path[0:dataset.train_cnt])
self.mask_path.extend(dataset.mask_path[0:dataset.train_cnt])
# self.ids.extend(dataset.ids[0:dataset.train_cnt]+self.ids.__len__())
self.train_cnt = self.image_path.__len__()
for dataset in datasets:
self.image_path.extend(dataset.image_path[dataset.train_cnt:])
self.mask_path.extend(dataset.mask_path[dataset.train_cnt:])
self.val_cnt = self.image_path.__len__() - self.train_cnt
ids_train = np.arange(0,self.train_cnt)
ids_val = np.arange(self.train_cnt, self.train_cnt+self.val_cnt)
np.random.shuffle(ids_train)
np.random.shuffle(ids_val)
self.ids = np.concatenate((ids_train,ids_val),axis=0)
def load_data(self, width=256, height=256, ids=None, mode=1):
for i in self.ids:
self.add_image("ArtificialNuclei", image_id=i, path=None, width=width, height=height)
def load_image(self, image_id):
info = self.image_info[image_id]
img_final = cv2.imread(self.image_path[self.ids[image_id]])
try:
img_final = img_final[:,:,0]
except:
None
#return img_final / 255.0
try:
img_final = img_final[:,0:256]
except:
e=1
if self.settings.network_info["netinfo"] == 'maskrcnn': # mask rcnn need an rgb image
img_new = np.zeros((img_final.shape[0],img_final.shape[1],3))
img_new[:,:,0] = img_new[:,:,1] = img_new[:,:,2] = img_final
img_final = img_new
return img_final
class ArtificialNucleiDatasetNotConverted(ArtificialNucleiDataset):
img_prefix = 'Img_'
img_postfix = '.jpg' #'-inputs.png'
mask_prefix = "Mask_"
def setImagePaths(self, folders=""):
for folder in folders:
#self.img_prefix = os.path.basename(folder) + "_"
folder_names = folder.split('\\')
file_pattern = os.path.join(folder, self.img_prefix + "*" + self.img_postfix) #"Img_*-outputs.png")
print(file_pattern)
img_files = glob.glob(file_pattern)
img_files.sort()
img_range = range(0,img_files.__len__())
for i in img_range:
#self.image_path.append(os.path.join(folder, "Img_" + str(i) + "-outputs.png"))
self.image_path.append(os.path.join(folder, self.img_prefix + str(i) + self.img_postfix))
# for i in img_files:
# self.image_path.append(i)
return img_range
def setMaskPaths(self, folders="",img_range=None):
self.mask_postfix = ".tif"
for folder in folders:
#self.mask_prefix = os.path.basename(folder) + "_"
#self.mask_prefix = "Mask_"
folder_names = folder.split('\\')
file_pattern = os.path.join(folder, self.mask_prefix + "*" + self.mask_postfix) #"Mask_*.tif")
print(file_pattern)
img_files = glob.glob(file_pattern)
img_files.sort()
#for i in range(0,img_files.__len__()):
for i in img_range:
self.mask_path.append(os.path.join(folder, self.mask_prefix + str(i) + self.mask_postfix))
#self.mask_path.append(os.path.join(folder, "Mask_" + str(i) + ".tif"))
def load_image(self, image_id):
info = self.image_info[image_id]
img_final = cv2.imread(self.image_path[self.ids[image_id]])
try:
img_final = img_final[:,:,0]
except:
None
#return img_final / 255.0
img_final = img_final[:,0:256]
if self.settings.network_info["netinfo"] == 'maskrcnn': # mask rcnn need an rgb image
img_new = np.zeros((img_final.shape[0],img_final.shape[1],3))
img_new[:,:,0] = img_new[:,:,1] = img_new[:,:,2] = img_final
img_final = img_new
return img_final
def load_mask_one_layer(self, image_id,relabel=False):
mask = tifffile.imread(self.mask_path[self.ids[image_id]])
if (mask.ndim > 2):
mask = mask[:,:,0]
#mask = mask[:, 0:256]
if (relabel):
mask_tmp = np.zeros((mask.shape[0],mask.shape[1]))
running=1
for i in np.unique(mask):
if i > 0:
mask_tmp = mask_tmp + running * (mask==i)
running = running + 1
mask = mask_tmp.astype(np.float)
return mask #mask.astype(np.float)
class SampleInference(ArtificialNucleiDataset):
def setImagePaths(self, folders=""):
self.img_postfix = ".jpg"
for folder in folders:
#self.img_prefix = os.path.basename(folder) + "_"
folder_names = folder.split('\\')
self.img_prefix = "Img"
file_pattern = os.path.join(folder, self.img_prefix + "*" + self.img_postfix) #"Img_*-outputs.png")
print(file_pattern)
img_files = glob.glob(file_pattern)
img_files.sort()
img_range = range(0,img_files.__len__())
for i in img_range:
#self.image_path.append(os.path.join(folder, "Img_" + str(i) + "-outputs.png"))
self.image_path.append(img_files[i])
# for i in img_files:
# self.image_path.append(i)
return img_range
def setMaskPaths(self, folders="",img_range=None):
self.mask_postfix = ".tif"
for folder in folders:
#self.mask_prefix = os.path.basename(folder) + "_"
#self.mask_prefix = "Mask_"
folder_names = folder.split('\\')
self.mask_prefix = "Img"
file_pattern = os.path.join(folder, self.mask_prefix + "*" + self.mask_postfix) #"Mask_*.tif")
print(file_pattern)
img_files = glob.glob(file_pattern)
img_files.sort()
#for i in range(0,img_files.__len__()):
for i in img_range:
self.mask_path.append(img_files[i])
#self.mask_path.append(os.path.join(folder, "Mask_" + str(i) + ".tif"))
def load_data(self, width=256, height=256, ids=None, mode=1):
# Load settings
self.image_path = []
self.mask_path = []
self.add_class("ArtificialNuclei", 1, 'ArtificialNuclei')
train_cnt = 0
val_cnt = 0
print("Loading train data ...")
for i in self.settings.network_info["dataset_dirs_test"].split(';'):
img_range = self.setImagePaths(folders=[i + "\\images"])
self.setMaskPaths(folders=[i + "\\masks"],img_range=img_range)
print("Checking train path ...")
self.checkPath()
self.ids = np.arange(0,self.image_path.__len__())
for i in self.ids:
self.add_image("ArtificialNuclei", image_id=i, path=None, width=width, height=height)
return ids
class DataLoading:
def load(self,phase='train'):
# Load settings
settings = UNETSettings()
# Load Dataset
print("Load dataset ...")
if UNETSettings().network_info["dataset"] == 'tisquant':
dataset = TisquantDatasetNew()
# dataset = TisquantDataset()
elif UNETSettings().network_info["dataset"] == 'artificialNuclei':
dataset = ArtificialNucleiDataset()
elif UNETSettings().network_info["dataset"] == 'artificialNucleiNotConverted':
dataset = ArtificialNucleiDatasetNotConverted()
elif UNETSettings().network_info["dataset"] == 'mergeTisquantArtificialNotConverted':
datasets = []
dataset1 = TisquantDatasetNew()
dataset1.load_data(mode=1)
dataset2 = ArtificialNucleiDatasetNotConverted()
dataset2.load_data(mode=1)
datasets.append(dataset1)
datasets.append(dataset2)
dataset = MergedDataset(datasets)
elif UNETSettings().network_info["dataset"] == 'mergeTisquantArtificial':
datasets = []
dataset1 = TisquantDatasetNew()
dataset1.load_data(mode=1)
dataset2 = ArtificialNucleiDataset()
dataset2.load_data(mode=1)
datasets.append(dataset1)
datasets.append(dataset2)
dataset = MergedDataset(datasets)
else:
print('Dataset not valid')
sys.exit("Error")
# Load Dataset
if phase == 'train':
dataset.load_data(mode=1)
else:
dataset.load_data(mode=2)
dataset.prepare()
return dataset
def getID(self):
settings = UNETSettings()
return settings.network_info["net_description"]
def getResultsPath(self):
settings = UNETSettings()
return settings.network_info["results_folder"]
|
1670835
|
import typing
from types import MappingProxyType
from cryptography.hazmat.backends import default_backend as defb
from cryptography.hazmat.primitives import hashes as h
from ... import base, exc
HASHES = MappingProxyType(
{
"sha1": h.SHA1,
"sha224": h.SHA224,
"sha256": h.SHA256,
"sha384": h.SHA384,
"sha512": h.SHA512,
"sha3_224": h.SHA3_224,
"sha3_256": h.SHA3_256,
"sha3_384": h.SHA3_384,
"sha3_512": h.SHA3_512,
"sha512_224": h.SHA512_224,
"sha512_256": h.SHA512_256,
"shake128": h.SHAKE128,
"shake256": h.SHAKE256,
"blake2b": lambda digest_size=None: h.BLAKE2b(digest_size or 64),
"blake2s": lambda digest_size=None: h.BLAKE2s(digest_size or 32),
}
)
VAR_DIGEST_SIZE = frozenset(
(
"shake128",
"shake256",
"blake2b",
"blake2s",
)
)
XOFS = frozenset(
(
"shake128",
"shake256",
)
)
# the ASN.1 Object IDs
OIDS = MappingProxyType(
{
"sha224": "2.16.840.1.101.3.4.2.4",
"sha256": "2.16.840.1.101.3.4.2.1",
"sha384": "2.16.840.1.101.3.4.2.2",
"sha512": "2.16.840.1.101.3.4.2.3",
"sha512_224": "2.16.840.1.101.3.4.2.5",
"sha512_256": "2.16.840.1.101.3.4.2.6",
"sha3_224": "2.16.840.1.101.3.4.2.7",
"sha3_256": "2.16.840.1.101.3.4.2.8",
"sha3_384": "2.16.840.1.101.3.4.2.9",
"sha3_512": "2.16.840.1.101.3.4.2.10",
"shake128": "2.16.840.1.101.3.4.2.11",
"shake256": "2.16.840.1.101.3.4.2.12",
}
)
del MappingProxyType
class Hash(base.BaseHash):
__slots__ = "_name", "_digest", "_ctx", "_digest_size", "_block_size"
def __init__(self, name, data=b"", *, digest_size=None):
self._name = name
self._digest = None
self._ctx = self._construct_hash(name, data, digest_size)
# get values directly from the algorithm object
algo = self._ctx.algorithm
self._digest_size = algo.digest_size
self._block_size = getattr(algo, "block_size", None)
@staticmethod
def _construct_hash(name, data=b"", digest_size=None):
if name in VAR_DIGEST_SIZE:
if digest_size is None and name in XOFS: # pragma: no cover
raise ValueError("value of digest-size is required")
hash_ = h.Hash(HASHES[name](digest_size), defb())
else:
hash_ = h.Hash(HASHES[name](), defb())
hash_.update(data)
return hash_
@property
def digest_size(self):
return self._digest_size
@property
def block_size(self):
if self._block_size is None:
return NotImplemented
return self._block_size
@property
def name(self):
return self._name
@property
def oid(self):
"""ASN.1 Object ID of the hash algorithm."""
if self.name in OIDS:
return OIDS[self.name]
# for BLAKE
if self.name == "blake2b":
if self.digest_size != 64:
raise AttributeError( # pragma: no cover
"oid is avaliable only when digest size == 64"
)
return "1.3.6.1.4.1.1722.12.2.1." + str(self.digest_size)
if self.name == "blake2s":
if self.digest_size != 32:
raise AttributeError( # pragma: no cover
"oid is avaliable only when digest size == 32"
)
return "1.3.6.1.4.1.1722.12.2.2." + str(self.digest_size)
def update(self, data):
if self._ctx is None:
raise exc.AlreadyFinalized
self._ctx.update(data)
def copy(self):
if self._ctx is None:
raise exc.AlreadyFinalized
hashobj = type(self)(self.name, digest_size=self.digest_size)
hashobj._ctx = self._ctx.copy()
return hashobj
def digest(self):
if self._ctx is None:
return self._digest
ctx, self._ctx = self._ctx, None
self._digest = ctx.finalize()
return self._digest
def new(self, data=b"", *, digest_size=None):
"""Create a fresh hash object.
See also:
:py:func:`new` for more information.
"""
return type(self)(
self.name,
data,
digest_size=digest_size or self.digest_size,
)
def algorithms_available() -> typing.Set[str]:
"""Return the names of the available hash algorithms.
Returns:
set[str]: Names of hash algorithms.
"""
return set(HASHES)
def new(
name: str,
data: bytes = b"",
*,
digest_size: typing.Optional[int] = None,
) -> Hash:
"""Instantiate an hash object with given parameters.
Args:
name (str):
Name of the hash algorithm. It must be compatible with
``hashlib.new``.
data (bytes, bytearray, memoryview):
Initial data to pass to the hash algorithm.
digest_size (int): An integer value.
Returns:
Hash: Hash object.
"""
return Hash(name, data, digest_size=digest_size)
def _get_hash_algorithm(hashfunc):
"""
Get the cryptography backend specific ``hash algorithm`` object from
the given hash ``hashfunc``.
"""
return new(
hashfunc.name,
digest_size=hashfunc.digest_size,
)._ctx.algorithm
|
1670861
|
from collections import namedtuple
from typing import Dict
class SentryRater:
RATE_LIMIT = 60 * 60 * 24
def __init__(self, daily_events: int, issues: namedtuple, *args, **kwargs):
self.daily_events = daily_events
self.issues = issues
rate_limit_reason = """
{percentage} of the rate limit reached, that's {comparison}. Come on, we can do better 💪🏻
"""
self.available_grades = {
"S": f"""{self.issues.total} issues found… but none of them seem problematic!
Right on track, as it should be 👍🏻""",
"A": f"""{self.issues.total} issues found, and {self.issues.problematic} of them seem problematic.
Not too bad, but it can be better 😊""",
"B": f"""{self.issues.total} issues found, and {self.issues.problematic} of them need to be handled 🧐
Come on, we can do better 💪🏻""",
"C": f"""{self.issues.total} issues found, and {self.issues.stale} of them are stale.
These issues usually indicate someting broken by design. Come on, we can do better 💪🏻""",
"D": rate_limit_reason,
"E": rate_limit_reason,
"F": rate_limit_reason,
}
def according_to_event_count(self) -> Dict[str, str]:
if self.daily_events / self.RATE_LIMIT >= 0.9:
return {
"grade": "F",
"reason": self.available_grades["F"].format(
percentage="90%", comparison="one event per second"
),
}
if self.daily_events / self.RATE_LIMIT >= 0.5:
return {
"grade": "E",
"reason": self.available_grades["E"].format(
percentage="50%", comparison="30 errors every minute"
),
}
if self.daily_events / self.RATE_LIMIT >= 0.1:
return {
"grade": "D",
"reason": self.available_grades["D"].format(
percentage="10%", comparison="one error every 10 seconds"
),
}
return None
def according_to_issue_count(self) -> Dict[str, str]:
if self.issues.problematic == 0:
grade = "S"
elif (
self.issues.spoiled < 10
and self.issues.decaying < 5
and self.issues.stale == 0
):
grade = "A"
elif self.issues.stale <= 10 and self.issues.total <= 100:
grade = "B"
else:
grade = "C"
return {"grade": grade, "reason": self.available_grades[grade]}
def get_rating(self) -> Dict[str, str]:
return self.according_to_event_count() or self.according_to_issue_count()
|
1670950
|
import torch
def safeSign(tensor):
result = torch.sign(tensor)
result[result==0] = 1
return result
def front(claaz):
"""
Return a Module proxy of your claaz.
"""
class fronteur(torch.nn.Module):
def __init__(self):
super(fronteur, self).__init__()
def forward(self, x):
return claaz.apply(x)
return fronteur()
def front2(claaz):
class fronteur(torch.nn.Module):
def __init__(self):
super(fronteur, self).__init__()
self.core = claaz
def forward(self, x):
return self.core.apply(x)
return fronteur()
|
1670972
|
import unittest
from securityheaders.checkers.cors import AccessControlMaxAgeTooLongChecker
class AccessControlMaxAgeTooLongCheckerTest(unittest.TestCase):
def setUp(self):
self.x = AccessControlMaxAgeTooLongChecker()
def test_checkNoHeader(self):
nox = dict()
nox['test'] = 'value'
self.assertEqual(self.x.check(nox), [])
def test_checkNone(self):
nonex = None
self.assertEqual(self.x.check(nonex), [])
def test_checkNone2(self):
hasx = dict()
hasx['access-control-max-age'] = None
self.assertEqual(self.x.check(hasx), [])
def test_checkInvalid(self):
hasx2 = dict()
hasx2['access-control-max-age'] = "20000"
result = self.x.check(hasx2)
self.assertIsNotNone(result)
self.assertEqual(len(result), 1)
def test_checkValid(self):
hasx5 = dict()
hasx5['access-control-max-age'] = "100"
result = self.x.check(hasx5)
self.assertEqual(result, [])
def test_checkValid2(self):
hasx5 = dict()
hasx5['access-control-max-age'] = "-1"
self.assertEqual(self.x.check(hasx5), [])
if __name__ == '__main__':
unittest.main()
|
1670974
|
from rdfalchemy.sparql.sesame2 import SesameGraph
import os
url = os.environ.get('SESAME2_URL', 'http://example.com/sparql')
if 'example.com' in url:
from nose import SkipTest
raise SkipTest('Please provide a functioning Sesame2 endpoint URL')
g = SesameGraph(url)
q1 = "select ?s ?p ?o where {?s ?p ?o} limit 100"
responses = {}
x = set(list(g.query(q1, resultMethod='xml')))
# j = set(list(g.query(q1, resultMethod='json')))
# b = set(list(g.query(q1, resultMethod='brtr')))
b = j = x
def sizes_test():
assert len(b) == len(x) == len(j)
def eq_bx_test():
assert b == x
def eq_bj_test():
assert b == j
def eq_jx_test():
assert j == x
|
1671013
|
from collections import defaultdict
class Matcher:
def __init__(self, men, women):
'''
Constructs a Matcher instance.
Takes a dict of men's spousal preferences, `men`,
and a dict of women's spousal preferences, `women`.
'''
self.M = men
self.W = women
self.M_size = len(men)
self.W_size = len(women)
self.wives = {}
self.pairs = []
# we index spousal preferences at initialization
# to avoid expensive lookups when matching
self.mrank = defaultdict(dict) # `mrank[m][w]` is m's ranking of w
self.wrank = defaultdict(dict) # `wrank[w][m]` is w's ranking of m
for m, prefs in men.items():
for i, w in enumerate(prefs):
self.mrank[m][w] = i
for w, prefs in women.items():
for i, m in enumerate(prefs):
self.wrank[w][m] = i
def __call__(self):
return self.match()
def prefers(self, w, m, h):
'''Test whether w prefers m over h.'''
return self.wrank[w][m] < self.wrank[w][h]
def after(self, m, w):
'''Return the woman favored by m after w.'''
i = self.mrank[m][w] + 1 # index of woman following w in list of prefs
if i < self.W_size:
return self.M[m][i]
return None # No wives left for man "m"
def match(self):
'''
Try to match all men with their next preferred spouse.
'''
men = list(self.M.keys()) # get the complete list of men
# Map each man to their first preference
next_ = dict((m, rank[0]) for m, rank in self.M.items())
wives = {} # mapping from women to current spouse
while men:
m, men = men[0], men[1:]
w = next_[m] # next woman for m to propose to
# Check if there are still possible wives, control needed for unequal sets!
if w:
next_[m] = self.after(m, w) # woman after w in m's list of prefs
if w in wives:
h = wives[w] # current husband
if self.prefers(w, m, h):
men.append(h) # husband becomes available again
wives[w] = m # w becomes wife of m
else:
men.append(m) # m remains unmarried
else:
wives[w] = m # w becomes wife of m
self.pairs = [(h, w) for w, h in wives.items()]
self.wives = wives
return wives
#return self.match(men, next_, wives)
|
1671068
|
import torch
import torchvision
import os.path as osp
from torch import nn
from .zhang_unet import ZhangUNet
from .hexunet import HexUNet
from spherical_distortion.util import distributed as distr_util
def build_model(cfg, num_classes):
distr_util.dprint('Initializing network')
in_ch = 4 if cfg.USE_DEPTH else 3
if cfg.MODEL_TYPE == 'zhangunet':
model = ZhangUNet(
in_ch=in_ch, out_ch=num_classes, input_nonlin=cfg.INPUT_NONLIN)
elif cfg.MODEL_TYPE == 'resnet101':
model = torchvision.models.segmentation.fcn_resnet101(
pretrained=False, num_classes=num_classes)
elif cfg.MODEL_TYPE == 'hexunet':
model = HexUNet(num_classes)
else:
raise AttributeError('Model type {} is not supported.'.format(
cfg.MODEL_TYPE))
if cfg.MODEL_TYPE == 'resnet101':
url = 'https://download.pytorch.org/models/fcn_resnet101_coco-7ecb50ca.pth'
state_dict = torch.hub.load_state_dict_from_url(url)
model_dict = model.state_dict()
state_dict = {
k: v
for k, v in state_dict.items()
if k in model_dict and v.shape == model_dict[k].shape
}
model.load_state_dict(state_dict, strict=False)
if cfg.USE_DEPTH:
old_conv = model.backbone.conv1
inplanes = old_conv.out_channels
model.backbone.conv1 = nn.Conv2d(
4, inplanes, kernel_size=7, stride=2, padding=3, bias=False)
nn.init.constant_(model.backbone.conv1.weight, 0.0)
model.backbone.conv1.weight.data[:, :3] = old_conv.weight.detach()
return model
def get_checkpoint_path(checkpoint_dir, evaluate, start_epoch, model_path,
load_weights_only):
if start_epoch == 0:
checkpoint_path = None if not evaluate \
else osp.join(checkpoint_dir, 'model_best.pth')
elif start_epoch == -1:
checkpoint_path = osp.join(checkpoint_dir, 'checkpoint_latest.pth')
else:
checkpoint_path = osp.join(checkpoint_dir,
'checkpoint_{:03d}.pth'.format(start_epoch))
# Overrides above if a model path is provided
if model_path:
checkpoint_path = model_path
# If load_weights_only is set, let it pass through. Otherwise use the default parameters which is to set it to True only during eval
if not load_weights_only:
load_weights_only = evaluate
return checkpoint_path, load_weights_only
LABEL_RATIOS = {
'synthia-none': [
0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05,
0.05, 0.05
],
'synthia-ours': [
0, 2.2332e-01, 2.5028e-01, 3.0387e-01, 3.7913e-02, 4.6966e-02,
4.3975e-02, 1.1833e-02, 5.0253e-02, 1.8071e-03, 1.7912e-03, 6.8025e-06,
2.7346e-02, 6.4137e-04
],
'stanford-ours': [
0.0, 0.01574929912162261, 0.027120215697671146, 0.07173240823098485,
0.09940005941682817, 0.03867709226962055, 0.11732465598112281,
0.030507559126801635, 0.10977762878937447, 0.08451750469249816,
0.003289480129178341, 0.02881729930964915, 0.3366731957269465,
0.028805890997152264
],
'stanford-thirdparty': [
0.04233976974675504, 0.014504436907968913, 0.017173225930738712,
0.048004778186652164, 0.17384037404789865, 0.028626771620973622,
0.087541966989014, 0.019508096683310605, 0.08321331842901526,
0.17002664771895903, 0.002515611224467519, 0.020731298851232174,
0.2625963729249342, 0.016994731594287146, 0.012382599143792165
][:-1],
}
def build_criterion(cfg):
distr_util.dprint('Initializing training criteria')
drop = [0]
label_ratio = LABEL_RATIOS[cfg.LABEL_WEIGHT]
label_ratio = torch.tensor(label_ratio)
label_weight = torch.tensor(1.0) / torch.log(1.02 + label_ratio)
label_weight[drop] = 0.
label_weight = label_weight.to(dtype=torch.float32, device=cfg.DEVICE)
if cfg.DROP_UNKNOWN:
criterion = nn.CrossEntropyLoss(
weight=label_weight[1:], ignore_index=-1)
else:
criterion = nn.CrossEntropyLoss(weight=label_weight, ignore_index=0)
criterion = criterion.to(cfg.DEVICE)
return criterion
def build_optimizer(cfg, model):
optimizer = torch.optim.Adam(
params=model.parameters(),
lr=cfg.LR,
)
return optimizer
def build_scheduler(cfg, optimizer):
if cfg.SCHEDULER == 'multistep':
scheduler = torch.optim.lr_scheduler.MultiStepLR(
# optimizer, milestones=[10, 50], gamma=0.1)
optimizer, milestones=[100, 150], gamma=0.1)
elif cfg.SCHEDULER == 'step':
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, 200, gamma=0.1)
else:
scheduler = torch.optim.lr_scheduler.StepLR(
optimizer, step_size=20, gamma=0.9)
return scheduler
|
1671131
|
import torch
import torch.nn as nn
import torch.nn.functional as F
try:
from .modules import *
except ImportError:
from modules import *
class StatisticalPooling(nn.Module):
def forward(self, x):
# x is 3-D with axis [B, feats, T]
mu = x.mean(dim=2, keepdim=True)
std = x.std(dim=2, keepdim=True)
return torch.cat((mu, std), dim=1)
class TDNN(Model):
# Architecture taken from x-vectors extractor
# https://www.danielpovey.com/files/2018_icassp_xvectors.pdf
def __init__(self, num_inputs, num_outputs,
method='cls',
name='TDNN'):
super().__init__(name=name)
self.method = method
self.model = nn.Sequential(
nn.Conv1d(num_inputs, 512, 5, padding=2),
nn.BatchNorm1d(512),
nn.ReLU(inplace=True),
nn.Conv1d(512, 512, 3, dilation=2, padding=2),
nn.BatchNorm1d(512),
nn.ReLU(inplace=True),
nn.Conv1d(512, 512, 3, dilation=3, padding=3),
nn.BatchNorm1d(512),
nn.ReLU(inplace=True),
nn.Conv1d(512, 512, 1),
nn.BatchNorm1d(512),
nn.ReLU(inplace=True),
nn.Conv1d(512, 1500, 1),
nn.BatchNorm1d(1500),
nn.ReLU(inplace=True),
StatisticalPooling(),
nn.Conv1d(3000, 512, 1),
nn.BatchNorm1d(512),
nn.ReLU(inplace=True),
nn.Conv1d(512, 512, 1),
nn.BatchNorm1d(512),
nn.ReLU(inplace=True),
nn.Conv1d(512, num_outputs, 1),
nn.LogSoftmax(dim=1)
)
if method == 'cls':
print('Using cls TDNN method')
elif method == 'xvector':
# get output features at affine after stats pooling
self.model = nn.Sequential(*list(self.model.children())[:-5])
print('Using xvector TDNN method')
elif method == 'unpooled':
# get output features right before the pooling
self.model = nn.Sequential(*list(self.model.children())[:-9])
print('Using unpooled TDNN method')
else:
raise TypeError('Unrecognized TDNN method: ', method)
self.emb_dim = 1500
def forward(self, x):
return self.model(x)
def load_pretrained(self, ckpt_path, verbose=True):
if self.method != 'cls':
# remove last layers from dict first
ckpt = torch.load(ckpt_path,
map_location=lambda storage, loc: storage)
sdict = ckpt['state_dict']
curr_keys = list(dict(self.named_parameters()).keys())
del_keys = [k for k in sdict.keys() if k not in curr_keys]
# delete other keys from ckpt
for k in del_keys:
del sdict[k]
# now load the weights remaining as feat extractor
self.load_state_dict(sdict)
else:
# load everything
super().load_pretrained(ckpt_path, load_last=True,
verbose=verbose)
if __name__ == '__main__':
"""
sp = StatisticalPooling()
x = torch.randn(1, 100, 1000)
y = sp(x)
print('y size: ', y.size())
tdnn = TDNN(24, 1200, xvector=True)
x = torch.randn(1, 24, 27000)
y = tdnn(x)
print('y size: ', y.size())
tdnn.load_pretrained('/tmp/xvector.ckpt')
"""
x = torch.randn(2, 24, 1000)
tdnn = TDNN(24, 2, method='unpooled')
print(tdnn(x).shape)
|
1671182
|
from takeyourmeds.utils.test import TestCase
class SmokeTest(TestCase):
def test_token(self):
self.user.profile.group.access_tokens.create()
|
1671244
|
import taichi as ti
import taichi_glsl as ts
from abc import ABCMeta, abstractmethod
from .Sampler import LinearSampler2D, LinearSampler3D
from enum import Enum, IntEnum
class GRIDTYPE(IntEnum):
CELL_GRID = 0
FACE_GRID = 1
Bimocq_GRID = 2
def __init__(self, *args):
super().__init__()
self.map = ['UniformGrid', 'MacGrid', 'MacGrid']
def __str__(self):
return self.map[self.value]
@ti.data_oriented
class Grid(metaclass=ABCMeta):
"""
the abstract class for the wrapper
that stores the data
"""
def __init__(self,
dim,
dx=ts.vec3(1.0),
o=ts.vec3(0.0)):
"""
:param dim: dimension of the grid, expected to be 2 or 3
:param dx: the physical length of a cell
:param o: offset on grid
"""
assert (dim == dx.n)
assert (dim == o.n)
self.dim = dim
# should not be zero
# or too small
self.dx = dx
self.inv_dx = 1.0 / self.dx
self.o = o
self.GRID_TYPE = None
self._sampler = None
if dim == 2:
self._sampler = LinearSampler2D()
elif dim == 3:
self._sampler = LinearSampler3D()
else:
raise NotImplemented
@abstractmethod
def __getitem__(self, I):
pass
@abstractmethod
def __setitem__(self, I, value):
# actually this would never be called in kernel
# since Taichi would always call this.__getitem__().assign()
pass
# @property
# def shape(self):
# pass
@abstractmethod
def fill(self, value):
pass
@ti.pyfunc
def getW(self, G):
"""
get world position from Grid Coordinate
:param G:
:return:
"""
return (G + self.o) * self.dx
@ti.pyfunc
def getG(self, W):
"""
:param W: physical position
:return:
"""
return W * self.inv_dx - self.o
@abstractmethod
def interpolate(self, P):
"""
self explained, mainly called in advection
:param P:
:return:
"""
pass
@abstractmethod
def copy(self, src):
"""
copy src data to myself
:param src:
:return:
"""
pass
@abstractmethod
def subself(self, src):
"""
self = src - self
helper for Bimocq
:param src:
:return:
"""
pass
# @abstractmethod
# def clampPos(self, P):
# """
# clamp world pos and output clamped space
# mainly used in Bimocq
# :param P: world pos
# :return:
# """
# pass
@ti.func
def GisNearBoundary(self, g, howNear):
"""
Assume g is cell center
:param g:
:param howNear:
note: Taichi use -1 as True
:return:
"""
# print("<", g, ts.vecND(self.dim, howNear), g < ts.vecND(self.dim, howNear))
# print(">", g, ti.Vector(self.shape) - howNear, g >= ti.Vector(self.shape) - howNear)
return ((g < ts.vecND(self.dim, howNear)).sum() > 0) or ((g >= ti.Vector(self.shape) - howNear).sum() > 0)
@ti.func
def WisNearBoundary(self, w, howNear):
"""
whether a World pos is near the out boundary
assume cell in the middle
:param w: position in World
:param howNear:
:return:
"""
g = ti.cast((w - self.o) / self.dx, ti.i32)
return self.GisNearBoundary(g, howNear)
|
1671267
|
import numpy as np
import threading
import gil_load
N_THREADS = 4
NPTS = 4096
gil_load.init()
def do_some_work():
for i in range(2):
x = np.random.randn(NPTS, NPTS)
x[:] = np.fft.fft2(x).real
gil_load.start()
threads = []
for i in range(N_THREADS):
thread = threading.Thread(target=do_some_work, daemon=True)
threads.append(thread)
thread.start()
for thread in threads:
thread.join()
gil_load.stop()
stats = gil_load.get()
print(gil_load.format(stats))
|
1671276
|
from typing import NamedTuple
from datetime import date as Date
from aiopg.connection import Connection
class Review(NamedTuple):
id: int
date: Date
course_id: int
review_text: str
@classmethod
def from_raw(cls, raw: tuple):
return cls(*raw) if raw else None
@staticmethod
async def get_for_course(conn: Connection, course_id: int):
q = ('SELECT id, date, course_id, review_text '
'FROM course_reviews WHERE course_id = %s '
'ORDER BY date')
params = (course_id,)
async with conn.cursor() as cur:
await cur.execute(q, params)
result = await cur.fetchall()
return [Review.from_raw(r) for r in result]
@staticmethod
async def create(conn: Connection, course_id: int,
review_text: str):
q = ('INSERT INTO course_reviews (course_id, review_text) '
'VALUES (%(course_id)s, %(review_text)s)')
params = {'course_id': course_id,
'review_text': review_text}
async with conn.cursor() as cur:
await cur.execute(q, params)
|
1671313
|
from functools import partial
from string import ascii_lowercase
size = 60 # try different sizes
with open('fear.txt') as stream:
while True:
data = stream.read(size)
if data.strip():
print('===>', data, '<===')
else:
break
|
1671349
|
import FWCore.ParameterSet.Config as cms
# Delta-R matching value maps
ak8PFJetsCHSPrunedMass = cms.EDProducer("RecoJetDeltaRValueMapProducer",
src = cms.InputTag("ak8PFJetsCHS"),
matched = cms.InputTag("ak8PFJetsCHSPruned"),
distMax = cms.double(0.8),
value = cms.string('mass')
)
ak8PFJetsCHSTrimmedMass = cms.EDProducer("RecoJetDeltaRValueMapProducer",
src = cms.InputTag("ak8PFJetsCHS"),
matched = cms.InputTag("ak8PFJetsCHSTrimmed"),
distMax = cms.double(0.8),
value = cms.string('mass')
)
ak8PFJetsCHSFilteredMass = cms.EDProducer("RecoJetDeltaRValueMapProducer",
src = cms.InputTag("ak8PFJetsCHS"),
matched = cms.InputTag("ak8PFJetsCHSFiltered"),
distMax = cms.double(0.8),
value = cms.string('mass')
)
ak8PFJetsCHSSoftDropMass = cms.EDProducer("RecoJetDeltaRValueMapProducer",
src = cms.InputTag("ak8PFJetsCHS"),
matched = cms.InputTag("ak8PFJetsCHSSoftDrop"),
distMax = cms.double(0.8),
value = cms.string('mass')
)
|
1671357
|
from collections import defaultdict
from dataclasses import MISSING, Field, fields
from typing import Dict, Tuple, Type, Union, Callable, Optional, Any
from .abstractions import W, AbstractLoader, AbstractDumper, AbstractParser
from .bases import M, AbstractMeta
from .models import JSONField, JSON, Extras, _PatternedDT
from .type_def import ExplicitNull, ExplicitNullType, T
from .utils.dict_helper import DictWithLowerStore
from .utils.typing_compat import (
is_annotated, get_args, eval_forward_ref_if_needed
)
# A cached mapping of dataclass to the list of fields, as returned by
# `dataclasses.fields()`.
_FIELDS: Dict[Type, Tuple[Field]] = {}
# Mapping of main dataclass to its `load` function.
_CLASS_TO_LOAD_FUNC: Dict[Type, Any] = {}
# Mapping of main dataclass to its `dump` function.
_CLASS_TO_DUMP_FUNC: Dict[Type, Any] = {}
# A mapping of dataclass to its loader.
_CLASS_TO_LOADER: Dict[Type, Type[AbstractLoader]] = {}
# A mapping of dataclass to its dumper.
_CLASS_TO_DUMPER: Dict[Type, Type[AbstractDumper]] = {}
# A cached mapping of a dataclass to each of its case-insensitive field names
# and load hook.
#
# Note: need to create a `ForwardRef` here, because Python 3.6 complains.
_FIELD_NAME_TO_LOAD_PARSER: Dict[
Type, 'DictWithLowerStore[str, AbstractParser]'] = {}
# Since the dump process doesn't use Parsers currently, we use a sentinel
# mapping to confirm if we need to setup the dump config for a dataclass
# on an initial run.
_IS_DUMP_CONFIG_SETUP: Dict[Type, bool] = {}
# A cached mapping, per dataclass, of JSON field to instance field name
_JSON_FIELD_TO_DATACLASS_FIELD: Dict[
Type, Dict[str, Union[str, ExplicitNullType]]] = defaultdict(dict)
# A cached mapping, per dataclass, of instance field name to JSON field
_DATACLASS_FIELD_TO_JSON_FIELD: Dict[Type, Dict[str, str]] = defaultdict(dict)
# A mapping of dataclass name to its Meta initializer (defined in
# :class:`bases.BaseJSONWizardMeta`), which is only set when the
# :class:`JSONSerializable.Meta` is sub-classed.
_META_INITIALIZER: Dict[
str, Callable[[Type[W]], None]] = {}
# Mapping of dataclass to its Meta inner class, which will only be set when
# the :class:`JSONSerializable.Meta` is sub-classed.
_META: Dict[Type, M] = {}
def dataclass_to_loader(cls):
"""
Returns the loader for a dataclass.
"""
return _CLASS_TO_LOADER[cls]
def dataclass_to_dumper(cls: Type):
"""
Returns the dumper for a dataclass.
"""
return _CLASS_TO_DUMPER[cls]
def set_class_loader(class_or_instance, loader: Type[AbstractLoader]):
"""
Set (and return) the loader for a dataclass.
"""
cls = get_class(class_or_instance)
loader_cls = get_class(loader)
_CLASS_TO_LOADER[cls] = get_class(loader_cls)
return _CLASS_TO_LOADER[cls]
def set_class_dumper(cls: Type, dumper: Type[AbstractDumper]):
"""
Set (and return) the dumper for a dataclass.
"""
_CLASS_TO_DUMPER[cls] = get_class(dumper)
return _CLASS_TO_DUMPER[cls]
def json_field_to_dataclass_field(cls: Type):
"""
Returns a mapping of JSON field to dataclass field.
"""
return _JSON_FIELD_TO_DATACLASS_FIELD[cls]
def dataclass_field_to_json_field(cls):
"""
Returns a mapping of dataclass field to JSON field.
"""
return _DATACLASS_FIELD_TO_JSON_FIELD[cls]
def dataclass_field_to_load_parser(
cls_loader: Type[AbstractLoader],
cls: Type,
config: M,
save: bool = True) -> 'DictWithLowerStore[str, AbstractParser]':
"""
Returns a mapping of each lower-cased field name to its annotated type.
"""
if cls not in _FIELD_NAME_TO_LOAD_PARSER:
return _setup_load_config_for_cls(cls_loader, cls, config, save)
return _FIELD_NAME_TO_LOAD_PARSER[cls]
def _setup_load_config_for_cls(cls_loader: Type[AbstractLoader],
cls: Type,
config: M,
save: bool = True
) -> 'DictWithLowerStore[str, AbstractParser]':
"""
This function processes a class `cls` on an initial run, and sets up the
load process for `cls` by iterating over each dataclass field. For each
field, it performs the following tasks:
* Lookup the Parser (dispatcher) for the field based on its type
annotation, and then cache it so we don't need to lookup each time.
* Check if the field's annotation is of type ``Annotated``. If so,
we iterate over each ``Annotated`` argument and find any special
:class:`JSON` objects (this can also be set via the helper function
``json_key``). Assuming we find it, the class-specific mapping of
JSON key to dataclass field name is then updated with the input
passed in to this object.
* Check if the field type is a :class:`JSONField` object (this can
also be set by the helper function ``json_field``). Assuming this is
the case, the class-specific mapping of JSON key to dataclass field
name is then updated with the input passed in to the :class:`JSON`
attribute.
"""
json_to_dataclass_field = _JSON_FIELD_TO_DATACLASS_FIELD[cls]
name_to_parser = {}
for f in dataclass_init_fields(cls):
field_extras: Extras = {'config': config}
f.type = eval_forward_ref_if_needed(f.type, cls)
field_type = f.type
# Check if the field is a `Field` type or a subclass. If so, update
# the class-specific mapping of JSON key to dataclass field name.
if isinstance(f, Field):
if isinstance(f, JSONField):
for key in f.json.keys:
json_to_dataclass_field[key] = f.name
else:
value = f.metadata.get('__remapping__')
if value and isinstance(value, JSON):
for key in value.keys:
json_to_dataclass_field[key] = f.name
# Check if the field annotation is an `Annotated` type. If so,
# look for any `JSON` objects in the arguments; for each object,
# update the class-specific mapping of JSON key to dataclass field
# name.
if is_annotated(field_type):
ann_type, *extras = get_args(field_type)
for extra in extras:
if isinstance(extra, JSON):
for key in extra.keys:
json_to_dataclass_field[key] = f.name
elif isinstance(extra, _PatternedDT):
field_extras['pattern'] = extra
# Lookup the Parser (dispatcher) for each field based on its annotated
# type, and then cache it so we don't need to lookup each time.
name_to_parser[f.name] = cls_loader.get_parser_for_annotation(
field_type, cls, field_extras
)
parser_dict = DictWithLowerStore(name_to_parser)
# only cache the load parser for the class if `save` is enabled
if save:
_FIELD_NAME_TO_LOAD_PARSER[cls] = parser_dict
return parser_dict
def setup_dump_config_for_cls_if_needed(cls: Type):
"""
This function processes a class `cls` on an initial run, and sets up the
dump process for `cls` by iterating over each dataclass field. For each
field, it performs the following tasks:
* Check if the field's annotation is of type ``Annotated``. If so,
we iterate over each ``Annotated`` argument and find any special
:class:`JSON` objects (this can also be set via the helper function
``json_key``). Assuming we find it, the class-specific mapping of
dataclass field name to JSON key is then updated with the input
passed in to this object.
* Check if the field type is a :class:`JSONField` object (this can
also be set by the helper function ``json_field``). Assuming this is
the case, the class-specific mapping of dataclass field name to JSON
key is then updated with the input passed in to the :class:`JSON`
attribute.
"""
if cls in _IS_DUMP_CONFIG_SETUP:
return
dataclass_to_json_field = _DATACLASS_FIELD_TO_JSON_FIELD[cls]
for f in dataclass_fields(cls):
# Check if the field is a `Field` type or a subclass. If so, update
# the class-specific mapping of dataclass field name to JSON key.
if isinstance(f, Field):
if isinstance(f, JSONField):
if not f.json.dump:
dataclass_to_json_field[f.name] = ExplicitNull
elif f.json.all:
dataclass_to_json_field[f.name] = f.json.keys[0]
else:
value = f.metadata.get('__remapping__')
if value and isinstance(value, JSON) and value.all:
dataclass_to_json_field[f.name] = value.keys[0]
# Check if the field annotation is an `Annotated` type. If so,
# look for any `JSON` objects in the arguments; for each object,
# update the class-specific mapping of dataclass field name to JSON
# key.
f.type = eval_forward_ref_if_needed(f.type, cls)
if is_annotated(f.type):
for extra in get_args(f.type)[1:]:
if isinstance(extra, JSON):
if not extra.dump:
dataclass_to_json_field[f.name] = ExplicitNull
elif extra.all:
dataclass_to_json_field[f.name] = extra.keys[0]
# Mark the dataclass as processed, as the initial dump process is set up.
_IS_DUMP_CONFIG_SETUP[cls] = True
def call_meta_initializer_if_needed(cls: Type[W]):
"""
Calls the Meta initializer when the inner :class:`Meta` is sub-classed.
"""
cls_name = get_class_name(cls)
if cls_name in _META_INITIALIZER:
_META_INITIALIZER[cls_name](cls)
def get_meta(cls: Type) -> M:
"""
Retrieves the Meta config for the :class:`AbstractJSONWizard` subclass.
This config is set when the inner :class:`Meta` is sub-classed.
"""
return _META.get(cls, AbstractMeta)
def dataclass_fields(cls) -> Tuple[Field]:
"""
Cache the `dataclasses.fields()` call for each class, as overall that
ends up around 5x faster than making a fresh call each time.
"""
if cls not in _FIELDS:
_FIELDS[cls] = fields(cls)
return _FIELDS[cls]
def dataclass_init_fields(cls) -> Tuple[Field]:
"""Get only the dataclass fields that would be passed into the constructor."""
return tuple(f for f in dataclass_fields(cls) if f.init)
def dataclass_field_names(cls) -> Tuple[str, ...]:
"""Get the names of all dataclass fields"""
return tuple(f.name for f in dataclass_fields(cls))
def dataclass_field_to_default(cls) -> Dict[str, Any]:
"""Get default values for the (optional) dataclass fields."""
defaults = {}
for f in dataclass_fields(cls):
if f.default is not MISSING:
defaults[f.name] = f.default
elif f.default_factory is not MISSING:
defaults[f.name] = f.default_factory()
return defaults
def create_new_class(
class_or_instance, bases: Tuple[T, ...],
suffix: Optional[str] = None, attr_dict=None) -> T:
"""
Create (dynamically) and return a new class that sub-classes from a list
of `bases`.
"""
if not suffix and bases:
suffix = get_class_name(bases[0])
new_cls_name = f'{get_class_name(class_or_instance)}{suffix}'
return type(
new_cls_name,
bases,
attr_dict or {'__slots__': ()}
)
def get_class_name(class_or_instance) -> str:
"""Return the fully qualified name of a class."""
try:
return class_or_instance.__qualname__
except AttributeError:
# We're dealing with a dataclass instance
return type(class_or_instance).__qualname__
def get_outer_class_name(inner_cls, default=None, raise_=True):
"""
Attempt to return the fully qualified name of the outer (enclosing) class,
given a reference to the inner class.
If any errors occur - such as when `inner_cls` is not a real inner
class - then an error will be raised if `raise_` is true, and if not
we will return `default` instead.
"""
try:
name = get_class_name(inner_cls).rsplit('.', 1)[-2]
# This is mainly for our test cases, where we nest the class
# definition in the test func. Either way, it's not a valid class.
assert not name.endswith('<locals>')
except (IndexError, AssertionError):
if raise_:
raise
return default
else:
return name
def get_class(obj):
"""Get the class for an object `obj`"""
return obj if isinstance(obj, type) else type(obj)
def is_subclass(obj, base_cls: Type) -> bool:
"""Check if `obj` is a sub-class of `base_cls`"""
cls = obj if isinstance(obj, type) else type(obj)
return issubclass(cls, base_cls)
def is_subclass_safe(cls, class_or_tuple) -> bool:
"""Check if `obj` is a sub-class of `base_cls` (safer version)"""
try:
return issubclass(cls, class_or_tuple)
except TypeError:
return cls is class_or_tuple
|
1671388
|
from .. utils import TranspileTestCase, BuiltinFunctionTestCase
class IntTests(TranspileTestCase):
def test_pylong_marshal(self):
self.assertCodeExecution("print(179769313486231590772930519078902473361797697894230657273430081157732675805500963132708477322407536021120113879871393357658789768814416622492847430639474124377767893424865485276302219601246094119453082952085005768838150682342462881473913110540827237163350510684586298239947245938479716304835356329624224137219)")
def test_pylong_marshal_negative(self):
self.assertCodeExecution("print(-179769313486231590772930519078902473361797697894230657273430081157732675805500963132708477322407536021120113879871393357658789768814416622492847430639474124377767893424865485276302219601246094119453082952085005768838150682342462881473913110540827237163350510684586298239947245938479716304835356329624224137219)")
class BuiltinIntFunctionTests(BuiltinFunctionTestCase, TranspileTestCase):
function = "int"
|
1671432
|
from polylogyx.models import Query, db, Pack
def get_query_by_name(query_name):
return Query.query.filter(Query.name == query_name).first()
def add_query(query_name, **query):
return Query.create(name=query_name, **query)
def get_all_queries():
queries = Query.query \
.options(
db.joinedload(Query.tags),
db.joinedload(Query.packs),
db.joinedload(Query.packs, Pack.queries, innerjoin=True)).all()
return queries
def get_all_packed_queries():
queries = Query.query \
.options(
db.joinedload(Query.tags),
db.joinedload(Query.packs),
db.joinedload(Query.packs, Pack.queries, innerjoin=True)
).filter(Query.packs.any()).all()
return queries
def get_query_by_id(query_id):
return Query.query.options(
db.joinedload(Query.tags)
).filter(Query.id == query_id).first()
def create_query_obj(name,sql,interval,platform,version,description,value,shard,snapshot=False):
return Query(name=name,sql=sql,interval=interval,platform=platform,version=version,description=description,value=value,shard=shard,snapshot=snapshot)
|
1671437
|
Export Script toc2csv.py
-------------------------
import fitz
import argparse
#--------------------------------------------------------------------
# use argparse to handle invocation arguments
#--------------------------------------------------------------------
parser = argparse.ArgumentParser(description="Enter CSV delimiter [;] and documment filename")
parser.add_argument('-d', help='CSV delimiter [;]', default = ';')
parser.add_argument('doc', help='document filename')
args = parser.parse_args()
delim = args.d # requested CSV delimiter character
fname = args.doc # input document filename
doc = fitz.open(fname)
toc = doc.getToC(simple = False)
ext = fname[-3:].lower()
fname1 = fname[:-4] + "-toc.csv"
outf = open(fname1, "w")
for t in toc:
t4 = t[3]
if ext == "pdf":
if t4["kind"] == 1:
p4 = str(t4["to"].y) # add vertical destination if present
else:
p4 = ""
else:
p4 = ""
rec = delim.join([str(t[0]), t[1].strip(), str(t[2]), p4])
outf.writelines([rec, "\n"])
outf.close()
-----------------------------------------------------------------------------------------------------
Import Script csv2toc.py
-------------------------
import csv
import fitz
import argparse
'''
load a PDF TOC from CSV file contents
-------------------------------------
!!! All existing outline entries (bookmarks) of the PDF will be replaced by this. !!!
Each CSV line must contain 3 or 4 entries:
lvl A positive integer indicating the hierarchy level of the entry. First line must have lvl = 1.
Hierarchy level of lines may increase by at most 1 but decrease by any number.
title A string containing the entry's title. Must not be empty.
page An integer 1-based page number (1st page has number 1). Must be in PDF's page range.
height An optional positive number indicating the positioning of the entry on the page,
given as points and counting from page bottom.
If omitted, 36 points (half an inch) below top of page are taken.
Notes
-----
(1) Page numbers do not need to be in any particular order
(2) The PDF will be updated during the process
'''
parser = argparse.ArgumentParser(description="Enter CSV delimiter [;], CSV filename and PDF filename")
parser.add_argument('-d', help='CSV delimiter [;]', default = ';')
parser.add_argument('-csv', help='CSV filename')
parser.add_argument('-pdf', help='PDF filename')
args = parser.parse_args()
delim = args.d # requested CSV delimiter character
assert args.csv, "missing CSV filename"
assert args.pdf, "missing PDF filename"
doc = fitz.open(args.pdf)
toc = []
with open(args.csv) as tocfile:
tocreader = csv.reader(tocfile, delimiter = delim)
for row in tocreader:
assert len(row) <= 4, "cannot handle more than 4 entries:\n %s" % (str(row),)
if len(row) == 4:
p4 = float(row[3])
toc.append([int(row[0]), row[1], int(row[2]), p4])
else:
toc.append([int(row[0]), row[1], int(row[2])])
doc.setToC(toc)
doc.saveIncr() # incremental update: extremely fast
# use doc.save("new.pdf",...) to save to a new copy instead
|
1671464
|
from hwt.interfaces.std import Handshaked
from hwt.simulator.simTestCase import SimTestCase
from hwtLib.handshaked.handshakedToAxiStream import HandshakedToAxiStream
from hwtSimApi.constants import CLK_PERIOD
class HandshakedToAxiStream_MAX_FRAME_WORDS_TC(SimTestCase):
@classmethod
def setUpClass(cls):
u = HandshakedToAxiStream(Handshaked)
u.MAX_FRAME_WORDS = 5
cls.u = u
cls.compileSim(u)
def test_basic(self, N=10, randomized=True):
u:HandshakedToAxiStream = self.u
MAX_FRAME_WORDS = u.MAX_FRAME_WORDS
expected = []
for i in range(N * MAX_FRAME_WORDS):
u.dataIn._ag.data.append(i)
last = (i + 1) % MAX_FRAME_WORDS == 0
expected.append((i, int(last)))
t = (N * MAX_FRAME_WORDS + 10) * CLK_PERIOD
if randomized:
self.randomize(u.dataIn)
self.randomize(u.dataOut)
t *= 4
self.runSim(t)
self.assertValSequenceEqual(u.dataOut._ag.data, expected)
class HandshakedToAxiStream_IN_TIMEOUT_TC(SimTestCase):
@classmethod
def setUpClass(cls):
u = HandshakedToAxiStream(Handshaked)
u.IN_TIMEOUT = 3
cls.u = u
cls.compileSim(u)
def test_basic_no_timeout(self, N=100, randomized=False, expected_frame_lens={100}):
self.test_basic(N=N, randomized=randomized, expected_frame_lens=expected_frame_lens)
def test_basic(self, N=100, randomized=True, expected_frame_lens={1, 2, 3, 4, 5, 6}):
u: HandshakedToAxiStream = self.u
for i in range(N):
u.dataIn._ag.data.append(i)
t = (N + 10) * CLK_PERIOD
if randomized:
self.randomize(u.dataIn)
self.randomize(u.dataOut)
t *= 4
self.runSim(t)
data = []
frame_lens = set()
actual_len = 0
for (d, last) in u.dataOut._ag.data:
d = int(d)
data.append(d)
last = bool(last)
actual_len += 1
if last:
frame_lens.add(actual_len)
actual_len = 0
expected_data = list(range(N))
self.assertSequenceEqual(data, expected_data)
self.assertSetEqual(frame_lens, expected_frame_lens) # N dependent
class HandshakedToAxiStream_IN_TIMEOUT_AND_MAX_FRAME_WORDS_TC(HandshakedToAxiStream_IN_TIMEOUT_TC):
@classmethod
def setUpClass(cls):
u = HandshakedToAxiStream(Handshaked)
u.IN_TIMEOUT = 3
u.MAX_FRAME_WORDS = 4
cls.u = u
cls.compileSim(u)
def test_basic(self, N=100, randomized=True, expected_frame_lens={1, 2, 3, 4}):
super(HandshakedToAxiStream_IN_TIMEOUT_AND_MAX_FRAME_WORDS_TC, self).test_basic(
N=N, randomized=randomized, expected_frame_lens=expected_frame_lens)
def test_basic_no_timeout(self, N=101, randomized=False, expected_frame_lens={1, 4}):
super(HandshakedToAxiStream_IN_TIMEOUT_AND_MAX_FRAME_WORDS_TC, self).test_basic_no_timeout(
N=N, randomized=randomized, expected_frame_lens=expected_frame_lens)
HandshakedToAxiStreamTCs = [
HandshakedToAxiStream_MAX_FRAME_WORDS_TC,
HandshakedToAxiStream_IN_TIMEOUT_TC,
HandshakedToAxiStream_IN_TIMEOUT_AND_MAX_FRAME_WORDS_TC,
]
if __name__ == "__main__":
import unittest
suite = unittest.TestSuite()
# suite.addTest(HandshakedToAxiStream_MAX_FRAME_WORDS_TC('test_stuckedData'))
for tc in HandshakedToAxiStreamTCs:
suite.addTest(unittest.makeSuite(tc))
runner = unittest.TextTestRunner(verbosity=3)
runner.run(suite)
|
1671484
|
import os
import click
from shutil import copyfile
from mlapp.integrations.sm.cli_help import cli_sm_help
from mlapp.mlapp_cli.common.cli_utilities import create_directory, create_file
from mlapp.mlapp_cli.common.files import dockerignore_file, default_config_file
from mlapp.utils.general import get_project_root
from mlapp.integrations.sm.scripts.run_training_job import run_training_job as run_training_job_script
from mlapp.integrations.sm.scripts.run_tuning_job import run_tuning_job as run_tuning_job_script
init_files_sm = {
'sm_deployment.py': {
'dir': 'deployment'
}
}
@click.group("sm")
def commands():
"""
MLApp SM Command
Use it to run Sage Maker commands.
type --help on sm command to get more information.
"""
pass
@commands.command("setup", help=cli_sm_help.get('setup'))
@click.option("-f", "--force", is_flag=True, default=False,
help="Flag force setup if some of the SageMaker files already exists in your project.")
def setup(force):
if not force:
is_initiated = False
for file_name in init_files_sm:
file_options = init_files_sm.get(file_name, file_name)
directory = file_options.get('dir', 'root')
if directory == 'root':
full_path = os.path.join(os.getcwd(), file_name)
else:
if os.path.isdir(directory):
full_path = os.path.join(os.getcwd(), directory, file_name)
else:
continue
if os.path.exists(full_path):
is_initiated = True
break
if is_initiated:
click.secho(
"ERROR: " + file_name + " already exists.\nHint: you can use 'mlapp sm setup --force' option to force setup (caution: force may override exsiting files).",
fg='red')
return
# setup azure machine learning deployment files
_setup_sm()
else:
_setup_sm()
def _setup_sm(skip_dockerignore=False):
# creates the deployment directory if not exists.
create_directory(directory_name='deployment', include_init=False)
# creates the environment directory if not exists.
create_directory(directory_name='env', include_init=False)
create_file(file_name='.env', path='env', content=default_config_file)
# sets config.py environment file
default_env_filename = ''
config_file_content = default_config_file.replace("<FILENAME>", default_env_filename)
create_file(file_name='config.py', content=config_file_content)
for file_name in init_files_sm:
file_options = init_files_sm.get(file_name, file_name)
directory = file_options.get('dir', 'root')
src = os.path.join(get_project_root(), 'mlapp', 'integrations', 'sm', 'generated_files', file_name)
if directory == 'root':
dst = os.path.join(os.getcwd(), file_name)
else:
dst = os.path.join(os.getcwd(), directory, file_name)
# copy file from src to dst
copyfile(src, dst)
# creates dockerignore
if not skip_dockerignore:
if not os.path.exists(os.path.join(os.getcwd(), '.dockerignore')):
create_file('.dockerignore', content=dockerignore_file)
@commands.command("run-training-job", help=cli_sm_help.get('run_training_job'))
@click.argument("base-job-name", required=True)
@click.argument("image-name", required=True)
@click.argument("config", required=True)
@click.argument("metrics", required=True)
@click.option('-it', '--instance-type', default='ml.m4.xlarge', help="Default value is `ml.m4.xlarge`.")
@click.option('-ic', '--instance-count', default=1, help="Number of instances. Default is 1.", type=int)
def run_training_job(base_job_name, image_name, config, metrics, instance_type, instance_count):
try:
_ = _get_sm_objects()
run_training_job_script(base_job_name, image_name, config, metrics, instance_type, instance_count)
except Exception as e:
click.secho(str(e), fg='red')
@commands.command("run-tuning-job", help=cli_sm_help.get('run_tuning_job'))
@click.argument("base-job-name", required=True)
@click.argument("image-name", required=True)
@click.argument("config", required=True)
@click.argument("metrics", required=True)
@click.argument("objective-metric-name", required=True)
@click.argument("hyperparameter-ranges", required=True)
@click.option('-it', '--instance-type', default='ml.m4.xlarge', help="Default value is `ml.m4.xlarge`.")
@click.option('-ic', '--instance-count', default=1, help="Number of instances. Default is 1.", type=int)
@click.option('-mpj', '--max-parallel-jobs', default=1, help="Number of instances. Default is 3.", type=int)
@click.option('-mx', '--max-jobs', default=1, help="Number of instances. Default is 3.", type=int)
def run_tuning_job(base_job_name, image_name, config, metrics, objective_metric_name, hyperparameter_ranges,
instance_type, instance_count, max_parallel_jobs, max_jobs):
try:
_ = _get_sm_objects()
run_tuning_job_script(base_job_name, image_name, config, metrics, objective_metric_name, hyperparameter_ranges,
instance_type, instance_count, max_parallel_jobs, max_jobs)
except Exception as e:
click.secho(str(e), fg='red')
def _get_sm_objects():
try:
with open(os.path.join(os.getcwd(), 'config.py'), 'r') as f:
config_content = f.read()
exec(config_content)
d: dict = eval("settings")
# check if Workspace credentials exists
aml = d.get('aml', {})
if not aml:
raise Exception("ERROR: please add AML Workspace credentials in your config.py under aml parameter.")
tenant_id = aml.get('tenant_id')
subscription_id = aml.get('subscription_id')
resource_group = aml.get('resource_group')
workspace_name = aml.get('workspace_name')
datastore_name = aml.get('datastore_name', 'workspaceblobstore')
if subscription_id is not None and resource_group is not None and workspace_name is not None:
ws: Workspace = init_workspace(tenant_id, subscription_id, resource_group, workspace_name)
datastore = get_datastore(ws, datastore_name)
return ws, datastore
else:
raise Exception(
"ERROR: credentials must include properties: subscription_id, resource_group, workspace_name.")
except Exception as e:
raise e
|
1671517
|
from .datetime_ import Datetime
from .timedelta import Timedelta
__all__ = ["Datetime", "Timedelta"]
|
1671569
|
n=int(input())
arr=list(map(int, input().split()))
pos=[0 for i in range(n+1)]
neg=[0 for i in range(n+1)]
itr=0
res=0
pos[0]=1
for i in arr:
if i%2==0:
itr+=1
else:
itr-=1
if itr<0:
res+=neg[-itr]
neg[-itr]=neg[-itr]+1
else:
res+=pos[itr]
pos[itr]=pos[itr]+1
print(res)
|
1671574
|
from setuptools import setup
APP = ['MacShrew.py']
DATA_FILES = [('', ['resources'])]
OPTIONS = {
'argv_emulation': True,
'plist': {
'LSUIElement': True,
'CFBundleIconFile': 'app.icns'
},
'packages': [
'rumps',
'pexpect',
'argparse',
'ConfigParser',
]
}
OPTIONS2 = {
'iconfile': 'app.icns',
'plist': {
'LSUIElement': True,
'CFBundleIconFile': 'app.icns'
}
}
setup(
app=APP,
data_files=DATA_FILES,
version="1.0.1",
options={'pyc2app': OPTIONS, 'py2app': OPTIONS2},
setup_requires=['py2app'],
)
|
1671615
|
import copy
import os
import tempfile
import time
import numpy as np
from ray.rllib.agents.pg import PGTrainer, PGTorchPolicy
from ray.tune.logger import UnifiedLogger
from ray.tune.result import DEFAULT_RESULTS_DIR
from marltoolbox.examples.rllib_api.pg_ipd import get_rllib_config
from marltoolbox.envs.matrix_sequential_social_dilemma import (
IteratedPrisonersDilemma,
)
from marltoolbox.utils import log, miscellaneous
from marltoolbox.utils import rollout
CONSTANT_REWARD = 1.0
EPI_LENGTH = 33
class FakeEnvWtCstReward(IteratedPrisonersDilemma):
def step(self, actions: dict):
observations, rewards, epi_is_done, info = super().step(actions)
for k in rewards.keys():
rewards[k] = CONSTANT_REWARD
return observations, rewards, epi_is_done, info
def make_FakePolicyWtDefinedActions(list_actions_to_play):
class FakePolicyWtDefinedActions(PGTorchPolicy):
def compute_actions(self, *args, **kwargs):
action = list_actions_to_play.pop(0)
return np.array([action]), [], {}
return FakePolicyWtDefinedActions
def init_worker(actions_list=None):
train_n_replicates = 1
debug = True
stop_iters = 200
tf = False
seeds = miscellaneous.get_random_seeds(train_n_replicates)
exp_name, _ = log.log_in_current_day_dir("testing")
rllib_config, stop_config = get_rllib_config(seeds, debug, stop_iters, tf)
rllib_config["env"] = FakeEnvWtCstReward
rllib_config["env_config"]["max_steps"] = EPI_LENGTH
rllib_config["seed"] = int(time.time())
if actions_list is not None:
for policy_id in FakeEnvWtCstReward({}).players_ids:
policy_to_modify = list(
rllib_config["multiagent"]["policies"][policy_id]
)
policy_to_modify[0] = make_FakePolicyWtDefinedActions(
copy.deepcopy(actions_list)
)
rllib_config["multiagent"]["policies"][
policy_id
] = policy_to_modify
pg_trainer = PGTrainer(
rllib_config, logger_creator=_get_logger_creator(exp_name)
)
return pg_trainer.workers._local_worker
def _get_logger_creator(exp_name):
logdir_prefix = exp_name + "/"
tail, head = os.path.split(exp_name)
tail_bis, _ = os.path.split(tail)
def default_logger_creator(config):
"""Creates a Unified logger with a default logdir prefix
containing the agent name and the env id
"""
if not os.path.exists(DEFAULT_RESULTS_DIR):
os.makedirs(DEFAULT_RESULTS_DIR)
if not os.path.exists(os.path.join(DEFAULT_RESULTS_DIR, tail_bis)):
os.mkdir(os.path.join(DEFAULT_RESULTS_DIR, tail_bis))
if not os.path.exists(os.path.join(DEFAULT_RESULTS_DIR, tail)):
os.mkdir(os.path.join(DEFAULT_RESULTS_DIR, tail))
if not os.path.exists(os.path.join(DEFAULT_RESULTS_DIR, exp_name)):
os.mkdir(os.path.join(DEFAULT_RESULTS_DIR, exp_name))
logdir = tempfile.mkdtemp(
prefix=logdir_prefix, dir=DEFAULT_RESULTS_DIR
)
return UnifiedLogger(config, logdir, loggers=None)
return default_logger_creator
def test_rollout_constant_reward():
policy_agent_mapping = lambda policy_id: policy_id
def assert_(rollout_length, num_episodes):
worker = init_worker()
rollout_results = rollout.internal_rollout(
worker,
num_steps=rollout_length,
policy_agent_mapping=policy_agent_mapping,
reset_env_before=True,
num_episodes=num_episodes,
)
assert (
rollout_results._num_episodes == num_episodes
or rollout_results._total_steps == rollout_length
)
steps_in_last_epi = rollout_results._current_rollout
if rollout_results._total_steps == rollout_length:
n_steps_in_last_epi = rollout_results._total_steps % EPI_LENGTH
elif rollout_results._num_episodes == num_episodes:
n_steps_in_last_epi = EPI_LENGTH
# Verify rewards
for policy_id in worker.env.players_ids:
rewards = [step[3][policy_id] for step in steps_in_last_epi]
assert sum(rewards) == n_steps_in_last_epi * CONSTANT_REWARD
assert len(rewards) == n_steps_in_last_epi
all_steps = []
for epi_rollout in rollout_results._rollouts:
all_steps.extend(epi_rollout)
for policy_id in worker.env.players_ids:
rewards = [step[3][policy_id] for step in all_steps]
assert (
sum(rewards)
== min(rollout_length, num_episodes * EPI_LENGTH)
* CONSTANT_REWARD
)
assert len(rewards) == min(
rollout_length, num_episodes * EPI_LENGTH
)
assert_(rollout_length=20, num_episodes=1)
assert_(rollout_length=40, num_episodes=1)
assert_(rollout_length=77, num_episodes=2)
assert_(rollout_length=77, num_episodes=3)
assert_(rollout_length=6, num_episodes=3)
def test_rollout_specified_actions():
policy_agent_mapping = lambda policy_id: policy_id
def assert_(rollout_length, num_episodes, actions_list):
worker = init_worker(actions_list=actions_list)
rollout_results = rollout.internal_rollout(
worker,
num_steps=rollout_length,
policy_agent_mapping=policy_agent_mapping,
reset_env_before=True,
num_episodes=num_episodes,
)
assert (
rollout_results._num_episodes == num_episodes
or rollout_results._total_steps == rollout_length
)
steps_in_last_epi = rollout_results._current_rollout
if rollout_results._total_steps == rollout_length:
n_steps_in_last_epi = rollout_results._total_steps % EPI_LENGTH
elif rollout_results._num_episodes == num_episodes:
n_steps_in_last_epi = EPI_LENGTH
# Verify actions
all_steps = []
for epi_rollout in rollout_results._rollouts:
all_steps.extend(epi_rollout)
for policy_id in worker.env.players_ids:
actions_played = [step[1][policy_id] for step in all_steps]
assert len(actions_played) == min(
rollout_length, num_episodes * EPI_LENGTH
)
print(actions_list[1 : 1 + len(all_steps)], actions_played)
for action_required, action_played in zip(
actions_list[: len(all_steps)], actions_played
):
assert action_required == action_played
for policy_id in worker.env.players_ids:
actions_played = [step[1][policy_id] for step in steps_in_last_epi]
assert len(actions_played) == n_steps_in_last_epi
actions_required_during_last_epi = actions_list[: len(all_steps)][
-n_steps_in_last_epi:
]
for action_required, action_played in zip(
actions_required_during_last_epi, actions_played
):
assert action_required == action_played
assert_(rollout_length=20, num_episodes=1, actions_list=[0, 1] * 100)
assert_(rollout_length=40, num_episodes=1, actions_list=[1, 1] * 100)
assert_(rollout_length=77, num_episodes=2, actions_list=[0, 0] * 100)
assert_(rollout_length=77, num_episodes=3, actions_list=[0, 1] * 100)
assert_(rollout_length=6, num_episodes=3, actions_list=[1, 0] * 100)
|
1671714
|
expected_output = {
'interface': {
'ethernet': {
'Ethernet1/1': {
'mode': 'routed',
'port_ch': '--',
'reason': 'none',
'speed': '1000(D)',
'status': 'up',
'type': 'eth',
'vlan': '--'
},
'Ethernet1/3': {
'mode': 'access',
'port_ch': '--',
'reason': 'Administratively '
'down',
'speed': 'auto(D)',
'status': 'down',
'type': 'eth',
'vlan': '1'
},
'Ethernet1/6': {
'mode': 'access',
'port_ch': '--',
'reason': 'Link not '
'connected',
'speed': 'auto(D)',
'status': 'down',
'type': 'eth',
'vlan': '1'
}
},
'loopback': {
'Loopback0': {
'description': '--',
'status': 'up'
}
},
'port': {
'mgmt0': {
'ip_address': '172.25.143.76',
'mtu': 1500,
'speed': '1000',
'status': 'up',
'vrf': '--'
}
},
'nve': {
'nve1': {
'mtu': '9216',
'reason': 'none',
'status': 'up'
}
},
'port_channel': {
'Port-channel8': {
'mode': 'access',
'protocol': 'none',
'reason': 'No operational '
'members',
'speed': 'auto(I)',
'status': 'down',
'type': 'eth',
'vlan': '1'
}
}
}
}
|
1671745
|
import json
from banal import is_mapping
from followthemoney.compare import compare
from followthemoney.util import get_entity_id
class Match(object):
SAME = True
DIFFERENT = False
UNDECIDED = None
def __init__(self, model, data):
self.model = model
self._data = data
# Support output from Aleph's linkage API (profile_id):
self.id = data.get("canonical_id", data.get("profile_id"))
self.id = self.id or get_entity_id(data.get("canonical"))
self._canonical = None
self.entity_id = data.get("entity_id")
self.entity_id = self.entity_id or get_entity_id(data.get("entity"))
self._entity = None
self.decision = data.get("decision")
self._score = data.get("score", None)
@property
def entity(self):
if self._entity is None:
data = self._data.get("entity")
if is_mapping(data) and "schema" in data:
self._entity = self.model.get_proxy(data)
return self._entity
@entity.setter
def entity(self, entity):
self._entity = entity
self.entity_id = get_entity_id(entity)
@property
def canonical(self):
if self._canonical is None:
data = self._data.get("canonical")
if is_mapping(data) and "schema" in data:
self._canonical = self.model.get_proxy(data)
return self._canonical
@canonical.setter
def canonical(self, entity):
self._canonical = entity
self.id = get_entity_id(entity)
def to_dict(self):
data = {
"canonical_id": self.id,
"entity_id": self.entity_id,
}
if self.decision is not None:
data["decision"] = self.decision
if self._score is not None:
data["score"] = self.score
if self.entity is not None:
data["entity"] = self.entity.to_dict()
if self.canonical is not None:
data["canonical"] = self.canonical.to_dict()
return data
@property
def score(self):
if self._score is not None:
return self._score
if self.entity and self.canonical:
self._score = compare(self.model, self.entity, self.canonical)
return self._score
@classmethod
def from_file(cls, model, fh):
while True:
line = fh.readline()
if not line:
break
data = json.loads(line)
yield cls(model, data)
def __repr__(self):
return "<Match(%r, %r, %s)>" % (self.id, self.entity_id, self.decision)
|
1671775
|
from typing import Iterable, List, Tuple, Set, Dict, Union
from collections import defaultdict
from hashlib import blake2b
import numpy as np
from rdkit.Chem import AllChem
from rdkit.Chem.rdchem import Mol
from rdkit import RDLogger
RDLogger.DisableLog("rdApp.*")
class NoReactionError(Exception):
"""Raised when the encoder attempts to encode a non-reaction SMILES.
Attributes:
message: a message containing the non-reaction SMILES
"""
def __init__(self, message: str):
self.message = message
super().__init__(self.message)
class DrfpEncoder:
"""A class for encoding SMILES as drfp fingerprints."""
@staticmethod
def shingling_from_mol(
in_mol: Mol, radius: int = 3, rings: bool = True, min_radius: int = 0
) -> List[str]:
"""Creates a molecular shingling from a RDKit molecule (rdkit.Chem.rdchem.Mol).
Arguments:
in_mol: A RDKit molecule instance
radius: The drfp radius (a radius of 3 corresponds to drfp6)
rings: Whether or not to include rings in the shingling
min_radius: The minimum radius that is used to extract n-grams
Returns:
The molecular shingling.
"""
shingling = []
if rings:
for ring in AllChem.GetSymmSSSR(in_mol):
bonds = set()
ring = list(ring)
for i in ring:
for j in ring:
if i != j:
bond = in_mol.GetBondBetweenAtoms(i, j)
if bond is not None:
bonds.add(bond.GetIdx())
shingling.append(
AllChem.MolToSmiles(
AllChem.PathToSubmol(in_mol, list(bonds)),
canonical=True,
allHsExplicit=True,
).encode("utf-8")
)
if min_radius == 0:
for i, atom in enumerate(in_mol.GetAtoms()):
shingling.append(atom.GetSmarts().encode("utf-8"))
for index, _ in enumerate(in_mol.GetAtoms()):
for i in range(1, radius + 1):
p = AllChem.FindAtomEnvironmentOfRadiusN(in_mol, i, index)
amap = {}
submol = AllChem.PathToSubmol(in_mol, p, atomMap=amap)
if index not in amap:
continue
smiles = AllChem.MolToSmiles(
submol,
rootedAtAtom=amap[index],
canonical=True,
allHsExplicit=True,
)
if smiles != "":
shingling.append(smiles.encode("utf-8"))
# Set ensures that the same shingle is not hashed multiple times
# (which would not change the hash, since there would be no new minima)
return list(set(shingling))
@staticmethod
def internal_encode(
in_smiles: str,
radius: int = 3,
min_radius: int = 0,
rings: bool = True,
) -> Tuple[np.ndarray, np.ndarray]:
"""Creates an drfp array from a reaction SMILES string.
Arguments:
in_smiles: A valid reaction SMILES string
radius: The drfp radius (a radius of 3 corresponds to drfp6)
min_radius: The minimum radius that is used to extract n-grams
rings: Whether or not to include rings in the shingling
Returns:
A tuple with two arrays, the first containing the drfp hash values, the second the substructure SMILES
"""
sides = in_smiles.split(">")
if len(sides) < 3:
raise NoReactionError(
f"The following is not a valid reaction SMILES: '{in_smiles}'"
)
if len(sides[1]) > 0:
sides[0] += "." + sides[1]
left = sides[0].split(".")
right = sides[2].split(".")
left_shingles = set()
right_shingles = set()
for l in left:
mol = AllChem.MolFromSmiles(l)
if not mol:
continue
sh = DrfpEncoder.shingling_from_mol(
mol,
radius=radius,
rings=rings,
min_radius=min_radius,
)
for s in sh:
right_shingles.add(s)
for r in right:
mol = AllChem.MolFromSmiles(r)
if not mol:
continue
sh = DrfpEncoder.shingling_from_mol(
mol,
radius=radius,
rings=rings,
min_radius=min_radius,
)
for s in sh:
left_shingles.add(s)
s = right_shingles.symmetric_difference(left_shingles)
if len(s) == 0:
s = left_shingles
return DrfpEncoder.hash(list(s)), list(s)
@staticmethod
def hash(shingling: List[str]) -> np.ndarray:
"""Directly hash all the SMILES in a shingling to a 32-bit integerself.
Arguments:
shingling: A list of n-grams
Returns:
A list of hashed n-grams
"""
hash_values = []
for t in shingling:
hash_values.append(int(blake2b(t, digest_size=4).hexdigest(), 16))
return np.array(hash_values, dtype=np.int32)
@staticmethod
def fold(
hash_values: np.ndarray, length: int = 2048
) -> Tuple[np.ndarray, np.ndarray]:
"""Folds the hash values to a binary vector of a given length.
Arguments:
hash_value: An array containing the hash values
length: The length of the folded fingerprint
Returns:
A tuple containing the folded fingerprint and the indices of the on bits
"""
folded = np.zeros(length, dtype=np.uint8)
on_bits = hash_values % length
folded[on_bits] = 1
return folded, on_bits
@staticmethod
def encode(
X: Union[Iterable, str],
n_folded_length: int = 2048,
min_radius: int = 0,
radius: int = 3,
rings: bool = True,
mapping: bool = False,
) -> Union[List[np.ndarray], Tuple[List[np.ndarray], Dict[int, Set[str]]]]:
"""Encodes a list of reaction SMILES using the drfp fingerprint.
Args:
X: An iterable (e.g. List) of reaction SMILES or a single reaction SMILES to be encoded
n_folded_length: The folded length of the fingerprint (the parameter for the modulo hashing)
min_radius: The minimum radius of a substructure (0 includes single atoms)
radius: The maximum radius of a substructure
rings: Whether to include full rings as substructures
mapping: Return a feature to substructure mapping in addition to the fingerprints
Returns:
A list of drfp fingerprints or, if mapping is enabled, a tuple containing a list of drfp fingerprints and a mapping dict.
"""
if isinstance(X, str):
X = [X]
result = []
result_map = defaultdict(set)
for _, x in enumerate(X):
hashed_diff, smiles_diff = DrfpEncoder.internal_encode(
x, min_radius=min_radius, radius=radius, rings=rings
)
difference_folded, on_bits = DrfpEncoder.fold(
hashed_diff,
length=n_folded_length,
)
if mapping:
for unfolded_index, folded_index in enumerate(on_bits):
result_map[folded_index].add(
smiles_diff[unfolded_index].decode("utf-8")
)
result.append(difference_folded)
if mapping:
return result, result_map
else:
return result
|
1671778
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
class PaCoLoss(nn.Module):
def __init__(self, alpha, beta=1.0, gamma=1.0, supt=1.0, temperature=1.0, base_temperature=None, K=128, num_classes=1000):
super(PaCoLoss, self).__init__()
self.temperature = temperature
self.base_temperature = temperature if base_temperature is None else base_temperature
self.K = K
self.alpha = alpha
self.beta = beta
self.gamma = gamma
self.supt = supt
self.num_classes = num_classes
def cal_weight_for_classes(self, cls_num_list):
cls_num_list = torch.Tensor(cls_num_list).view(1, self.num_classes)
self.weight = cls_num_list / cls_num_list.sum()
self.weight = self.weight.to(torch.device('cuda'))
def forward(self, features, labels=None, sup_logits=None, mask=None, epoch=None):
device = (torch.device('cuda')
if features.is_cuda
else torch.device('cpu'))
ss = features.shape[0]
batch_size = ( features.shape[0] - self.K ) // 2
labels = labels.contiguous().view(-1, 1)
mask = torch.eq(labels[:batch_size], labels.T).float().to(device)
# compute logits
anchor_dot_contrast = torch.div(
torch.matmul(features[:batch_size], features.T),
self.temperature)
# add supervised logits
anchor_dot_contrast = torch.cat(( (sup_logits + torch.log(self.weight + 1e-9) ) / self.supt, anchor_dot_contrast), dim=1)
# for numerical stability
logits_max, _ = torch.max(anchor_dot_contrast, dim=1, keepdim=True)
logits = anchor_dot_contrast - logits_max.detach()
# mask-out self-contrast cases
logits_mask = torch.scatter(
torch.ones_like(mask),
1,
torch.arange(batch_size).view(-1, 1).to(device),
0
)
mask = mask * logits_mask
# add ground truth
one_hot_label = torch.nn.functional.one_hot(labels[:batch_size,].view(-1,), num_classes=self.num_classes).to(torch.float32)
mask = torch.cat((one_hot_label * self.beta, mask * self.alpha), dim=1)
# compute log_prob
logits_mask = torch.cat((torch.ones(batch_size, self.num_classes).to(device), self.gamma * logits_mask), dim=1)
exp_logits = torch.exp(logits) * logits_mask
log_prob = logits - torch.log(exp_logits.sum(1, keepdim=True) + 1e-12)
# compute mean of log-likelihood over positive
mean_log_prob_pos = (mask * log_prob).sum(1) / mask.sum(1)
# loss
loss = - (self.temperature / self.base_temperature) * mean_log_prob_pos
loss = loss.mean()
return loss
|
1671783
|
import os
import math
import torch
import torch.nn as nn
import model.lau
def dataparallel(model, gpu_list):
ngpus = len(gpu_list)
assert ngpus != 0, "only support gpu mode"
assert torch.cuda.device_count() >= ngpus, "Invalid Number of GPUs"
assert isinstance(model, list), "Invalid Type of Dual model"
for i in range(len(model)):
if ngpus >= 2:
model[i] = nn.DataParallel(model[i], gpu_list).cuda()
else:
model[i] = model[i].cuda()
return model
class Model(nn.Module):
def __init__(self, opt, ckp):
super(Model, self).__init__()
print('Making model...')
self.opt = opt
self.scale = opt.scale
self.idx_scale = 0
self.self_ensemble = opt.self_ensemble
self.cpu = opt.cpu
self.device = torch.device('cpu' if opt.cpu else 'cuda')
self.n_GPUs = opt.n_GPUs
self.model = lau.make_model(opt).to(self.device)
self.a = self.model.state_dict()
if not opt.cpu and opt.n_GPUs > 1:
self.model = nn.DataParallel(self.model, range(opt.n_GPUs))
self.load(opt.pre_train, cpu=opt.cpu)
if not opt.test_only:
print(self.model, file=ckp.log_file)
# compute parameter
num_parameter = self.count_parameters(self.model)
ckp.write_log(f"The number of parameters is {num_parameter / 1000 ** 2:.2f}M")
def forward(self, x, idx_scale=0):
self.idx_scale = idx_scale
target = self.get_model()
if hasattr(target, 'set_scale'):
target.set_scale(idx_scale)
return self.model(x)
def get_model(self):
if self.n_GPUs == 1:
return self.model
else:
return self.model.module
def state_dict(self, **kwargs):
target = self.get_model()
return target.state_dict(**kwargs)
def count_parameters(self, model):
if self.opt.n_GPUs > 1:
return sum(p.numel() for p in model.parameters() if p.requires_grad)
return sum(p.numel() for p in model.parameters() if p.requires_grad)
def save(self, path, is_best=False):
target = self.get_model()
torch.save(
target.state_dict(),
os.path.join(path, 'model', 'model_latest.pt')
)
if is_best:
torch.save(
target.state_dict(),
os.path.join(path, 'model', 'model_best.pt')
)
def load(self, pre_train='.', pre_train_dual='.', cpu=False):
if cpu:
kwargs = {'map_location': lambda storage, loc: storage}
else:
kwargs = {}
#### load primal model ####
if pre_train != '.':
print('Loading model from {}'.format(pre_train))
self.get_model().load_state_dict(
torch.load(pre_train, **kwargs),
strict=False
)
|
1671823
|
from .density_tensor import DensityTensor
from .tt_circuit import TTCircuit, tt_dagger
from .tt_gates import RotY, build_binary_gates_unitary, exp_pauli_y, UnaryGatesUnitary, BinaryGatesUnitary, o4_phases, so4, cnot, cz, SO4LR, CNOTL, CNOTR, CZL, CZR, Unitary, IDENTITY
from .tt_sum import tt_matrix_sum, tt_sum
from .maxcut import brute_force_calculate_maxcut, calculate_cut
from .tt_state import spins_to_tt_state, tt_norm
from .tt_operators import unary_hamiltonian, binary_hamiltonian, pauli_z, pauli_y, pauli_x, identity
from .tt_precontraction import layers_contract, qubits_contract
from .tt_contraction import contraction_eq
__version__ = '0.1.0'
|
1671836
|
from hparams.hparams import clear_config
from hparams.hparams import set_lazy_resolution
import pytest
@pytest.fixture(autouse=True)
def run_before_test():
# Reset the global state before every test
clear_config()
set_lazy_resolution(False)
yield
|
1671867
|
import logging
import re
from emonitor.utils import Module
from emonitor.extensions import babel
from emonitor.modules.persons.content_admin import getAdminContent, getAdminData
logger = logging.getLogger(__name__)
logger.setLevel(logging.ERROR)
class PersonsModule(object, Module):
"""
Definition of persons module with admin
"""
info = dict(area=['admin'], name='persons', path='persons', icon='fa-users', version='0.1')
def __repr__(self):
return "persons"
def __init__(self, app):
"""
Add specific parameters and configuration to app object
:param app: flask wsgi application
"""
Module.__init__(self, app)
# add template path
app.jinja_loader.searchpath.append("{}/emonitor/modules/persons/templates".format(app.config.get('PROJECT_ROOT')))
# subnavigation
self.updateAdminSubNavigation()
# create database tables
from emonitor.modules.persons.persons import Person
# eventhandlers
# signals and handlers
from emonitor.modules.persons.message_birthday import BirthdayWidget
from emonitor.modules.messages import addMessageType
addMessageType(BirthdayWidget('message_birthday'))
# translations
babel.gettext(u'module.persons')
babel.gettext(u'module.persons.0')
babel.gettext(u'persons.upload.states-1')
babel.gettext(u'persons.upload.states0')
babel.gettext(u'persons.upload.states1')
babel.gettext(u'birthday')
def updateAdminSubNavigation(self):
"""
Add submenu entries for admin area
"""
from emonitor.modules.settings.department import Department
self.adminsubnavigation = []
for dep in Department.getDepartments():
self.adminsubnavigation.append(('/admin/persons/%s' % dep.id, dep.name))
self.adminsubnavigation.append(('/admin/persons/0', babel.gettext('admin.persons.edit...')))
def getHelp(self, area="frontend", name=""): # frontend help template
name = name.replace('help/', '').replace('/', '.')
if not name.endswith('.0'):
name = re.sub(".\d+", "", name)
return super(PersonsModule, self).getHelp(area=area, name=name)
def frontendContent(self):
return 1
def getAdminContent(self, **params):
"""
Call *getAdminContent* of alarms class
:param params: send given parameters to :py:class:`emonitor.modules.alarms.content_admin.getAdminContent`
"""
return getAdminContent(self, **params)
def getAdminData(self):
"""
Call *getAdminData* method of alarms class and return values
:return: return result of method
"""
return getAdminData(self)
|
1671943
|
import sys
import os
import platform
import re
import imp
from Tkinter import *
import tkSimpleDialog
import tkMessageBox
import webbrowser
from idlelib.MultiCall import MultiCallCreator
from idlelib import WindowList
from idlelib import SearchDialog
from idlelib import GrepDialog
from idlelib import ReplaceDialog
from idlelib import PyParse
from idlelib.configHandler import idleConf
from idlelib import aboutDialog, textView, configDialog
from idlelib import macosxSupport
from idlelib import help
# The default tab setting for a Text widget, in average-width characters.
TK_TABWIDTH_DEFAULT = 8
_py_version = ' (%s)' % platform.python_version()
def _sphinx_version():
"Format sys.version_info to produce the Sphinx version string used to install the chm docs"
major, minor, micro, level, serial = sys.version_info
release = '%s%s' % (major, minor)
if micro:
release += '%s' % (micro,)
if level == 'candidate':
release += 'rc%s' % (serial,)
elif level != 'final':
release += '%s%s' % (level[0], serial)
return release
def _find_module(fullname, path=None):
"""Version of imp.find_module() that handles hierarchical module names"""
file = None
for tgt in fullname.split('.'):
if file is not None:
file.close() # close intermediate files
(file, filename, descr) = imp.find_module(tgt, path)
if descr[2] == imp.PY_SOURCE:
break # find but not load the source file
module = imp.load_module(tgt, file, filename, descr)
try:
path = module.__path__
except AttributeError:
raise ImportError, 'No source for module ' + module.__name__
if descr[2] != imp.PY_SOURCE:
# If all of the above fails and didn't raise an exception,fallback
# to a straight import which can find __init__.py in a package.
m = __import__(fullname)
try:
filename = m.__file__
except AttributeError:
pass
else:
file = None
base, ext = os.path.splitext(filename)
if ext == '.pyc':
ext = '.py'
filename = base + ext
descr = filename, None, imp.PY_SOURCE
return file, filename, descr
class HelpDialog(object):
def __init__(self):
self.parent = None # parent of help window
self.dlg = None # the help window iteself
def display(self, parent, near=None):
""" Display the help dialog.
parent - parent widget for the help window
near - a Toplevel widget (e.g. EditorWindow or PyShell)
to use as a reference for placing the help window
"""
import warnings as w
w.warn("EditorWindow.HelpDialog is no longer used by Idle.\n"
"It will be removed in 3.6 or later.\n"
"It has been replaced by private help.HelpWindow\n",
DeprecationWarning, stacklevel=2)
if self.dlg is None:
self.show_dialog(parent)
if near:
self.nearwindow(near)
def show_dialog(self, parent):
self.parent = parent
fn=os.path.join(os.path.abspath(os.path.dirname(__file__)),'help.txt')
self.dlg = dlg = textView.view_file(parent,'Help',fn, modal=False)
dlg.bind('<Destroy>', self.destroy, '+')
def nearwindow(self, near):
# Place the help dialog near the window specified by parent.
# Note - this may not reposition the window in Metacity
# if "/apps/metacity/general/disable_workarounds" is enabled
dlg = self.dlg
geom = (near.winfo_rootx() + 10, near.winfo_rooty() + 10)
dlg.withdraw()
dlg.geometry("=+%d+%d" % geom)
dlg.deiconify()
dlg.lift()
def destroy(self, ev=None):
self.dlg = None
self.parent = None
helpDialog = HelpDialog() # singleton instance, no longer used
class EditorWindow(object):
from idlelib.Percolator import Percolator
from idlelib.ColorDelegator import ColorDelegator
from idlelib.UndoDelegator import UndoDelegator
from idlelib.IOBinding import IOBinding, filesystemencoding, encoding
from idlelib import Bindings
from Tkinter import Toplevel
from idlelib.MultiStatusBar import MultiStatusBar
help_url = None
def __init__(self, flist=None, filename=None, key=None, root=None):
if EditorWindow.help_url is None:
dochome = os.path.join(sys.prefix, 'Doc', 'index.html')
if sys.platform.count('linux'):
# look for html docs in a couple of standard places
pyver = 'python-docs-' + '%s.%s.%s' % sys.version_info[:3]
if os.path.isdir('/var/www/html/python/'): # "python2" rpm
dochome = '/var/www/html/python/index.html'
else:
basepath = '/usr/share/doc/' # standard location
dochome = os.path.join(basepath, pyver,
'Doc', 'index.html')
elif sys.platform[:3] == 'win':
chmfile = os.path.join(sys.prefix, 'Doc',
'Python%s.chm' % _sphinx_version())
if os.path.isfile(chmfile):
dochome = chmfile
elif sys.platform == 'darwin':
# documentation may be stored inside a python framework
dochome = os.path.join(sys.prefix,
'Resources/English.lproj/Documentation/index.html')
dochome = os.path.normpath(dochome)
if os.path.isfile(dochome):
EditorWindow.help_url = dochome
if sys.platform == 'darwin':
# Safari requires real file:-URLs
EditorWindow.help_url = 'file://' + EditorWindow.help_url
else:
EditorWindow.help_url = "https://docs.python.org/%d.%d/" % sys.version_info[:2]
self.flist = flist
root = root or flist.root
self.root = root
try:
sys.ps1
except AttributeError:
sys.ps1 = '>>> '
self.menubar = Menu(root)
self.top = top = WindowList.ListedToplevel(root, menu=self.menubar)
if flist:
self.tkinter_vars = flist.vars
#self.top.instance_dict makes flist.inversedict available to
#configDialog.py so it can access all EditorWindow instances
self.top.instance_dict = flist.inversedict
else:
self.tkinter_vars = {} # keys: Tkinter event names
# values: Tkinter variable instances
self.top.instance_dict = {}
self.recent_files_path = os.path.join(idleConf.GetUserCfgDir(),
'recent-files.lst')
self.text_frame = text_frame = Frame(top)
self.vbar = vbar = Scrollbar(text_frame, name='vbar')
self.width = idleConf.GetOption('main','EditorWindow','width', type='int')
text_options = {
'name': 'text',
'padx': 5,
'wrap': 'none',
'highlightthickness': 0,
'width': self.width,
'height': idleConf.GetOption('main', 'EditorWindow', 'height', type='int')}
if TkVersion >= 8.5:
# Starting with tk 8.5 we have to set the new tabstyle option
# to 'wordprocessor' to achieve the same display of tabs as in
# older tk versions.
text_options['tabstyle'] = 'wordprocessor'
self.text = text = MultiCallCreator(Text)(text_frame, **text_options)
self.top.focused_widget = self.text
self.createmenubar()
self.apply_bindings()
self.top.protocol("WM_DELETE_WINDOW", self.close)
self.top.bind("<<close-window>>", self.close_event)
if macosxSupport.isAquaTk():
# Command-W on editorwindows doesn't work without this.
text.bind('<<close-window>>', self.close_event)
# Some OS X systems have only one mouse button, so use
# control-click for popup context menus there. For two
# buttons, AquaTk defines <2> as the right button, not <3>.
text.bind("<Control-Button-1>",self.right_menu_event)
text.bind("<2>", self.right_menu_event)
else:
# Elsewhere, use right-click for popup menus.
text.bind("<3>",self.right_menu_event)
text.bind("<<cut>>", self.cut)
text.bind("<<copy>>", self.copy)
text.bind("<<paste>>", self.paste)
text.bind("<<center-insert>>", self.center_insert_event)
text.bind("<<help>>", self.help_dialog)
text.bind("<<python-docs>>", self.python_docs)
text.bind("<<about-idle>>", self.about_dialog)
text.bind("<<open-config-dialog>>", self.config_dialog)
text.bind("<<open-module>>", self.open_module)
text.bind("<<do-nothing>>", lambda event: "break")
text.bind("<<select-all>>", self.select_all)
text.bind("<<remove-selection>>", self.remove_selection)
text.bind("<<find>>", self.find_event)
text.bind("<<find-again>>", self.find_again_event)
text.bind("<<find-in-files>>", self.find_in_files_event)
text.bind("<<find-selection>>", self.find_selection_event)
text.bind("<<replace>>", self.replace_event)
text.bind("<<goto-line>>", self.goto_line_event)
text.bind("<<smart-backspace>>",self.smart_backspace_event)
text.bind("<<newline-and-indent>>",self.newline_and_indent_event)
text.bind("<<smart-indent>>",self.smart_indent_event)
text.bind("<<indent-region>>",self.indent_region_event)
text.bind("<<dedent-region>>",self.dedent_region_event)
text.bind("<<comment-region>>",self.comment_region_event)
text.bind("<<uncomment-region>>",self.uncomment_region_event)
text.bind("<<tabify-region>>",self.tabify_region_event)
text.bind("<<untabify-region>>",self.untabify_region_event)
text.bind("<<toggle-tabs>>",self.toggle_tabs_event)
text.bind("<<change-indentwidth>>",self.change_indentwidth_event)
text.bind("<Left>", self.move_at_edge_if_selection(0))
text.bind("<Right>", self.move_at_edge_if_selection(1))
text.bind("<<del-word-left>>", self.del_word_left)
text.bind("<<del-word-right>>", self.del_word_right)
text.bind("<<beginning-of-line>>", self.home_callback)
if flist:
flist.inversedict[self] = key
if key:
flist.dict[key] = self
text.bind("<<open-new-window>>", self.new_callback)
text.bind("<<close-all-windows>>", self.flist.close_all_callback)
text.bind("<<open-class-browser>>", self.open_class_browser)
text.bind("<<open-path-browser>>", self.open_path_browser)
self.set_status_bar()
vbar['command'] = text.yview
vbar.pack(side=RIGHT, fill=Y)
text['yscrollcommand'] = vbar.set
text['font'] = idleConf.GetFont(self.root, 'main', 'EditorWindow')
text_frame.pack(side=LEFT, fill=BOTH, expand=1)
text.pack(side=TOP, fill=BOTH, expand=1)
text.focus_set()
# usetabs true -> literal tab characters are used by indent and
# dedent cmds, possibly mixed with spaces if
# indentwidth is not a multiple of tabwidth,
# which will cause Tabnanny to nag!
# false -> tab characters are converted to spaces by indent
# and dedent cmds, and ditto TAB keystrokes
# Although use-spaces=0 can be configured manually in config-main.def,
# configuration of tabs v. spaces is not supported in the configuration
# dialog. IDLE promotes the preferred Python indentation: use spaces!
usespaces = idleConf.GetOption('main', 'Indent', 'use-spaces', type='bool')
self.usetabs = not usespaces
# tabwidth is the display width of a literal tab character.
# CAUTION: telling Tk to use anything other than its default
# tab setting causes it to use an entirely different tabbing algorithm,
# treating tab stops as fixed distances from the left margin.
# Nobody expects this, so for now tabwidth should never be changed.
self.tabwidth = 8 # must remain 8 until Tk is fixed.
# indentwidth is the number of screen characters per indent level.
# The recommended Python indentation is four spaces.
self.indentwidth = self.tabwidth
self.set_notabs_indentwidth()
# If context_use_ps1 is true, parsing searches back for a ps1 line;
# else searches for a popular (if, def, ...) Python stmt.
self.context_use_ps1 = False
# When searching backwards for a reliable place to begin parsing,
# first start num_context_lines[0] lines back, then
# num_context_lines[1] lines back if that didn't work, and so on.
# The last value should be huge (larger than the # of lines in a
# conceivable file).
# Making the initial values larger slows things down more often.
self.num_context_lines = 50, 500, 5000000
self.per = per = self.Percolator(text)
self.undo = undo = self.UndoDelegator()
per.insertfilter(undo)
text.undo_block_start = undo.undo_block_start
text.undo_block_stop = undo.undo_block_stop
undo.set_saved_change_hook(self.saved_change_hook)
# IOBinding implements file I/O and printing functionality
self.io = io = self.IOBinding(self)
io.set_filename_change_hook(self.filename_change_hook)
# Create the recent files submenu
self.recent_files_menu = Menu(self.menubar, tearoff=0)
self.menudict['file'].insert_cascade(3, label='Recent Files',
underline=0,
menu=self.recent_files_menu)
self.update_recent_files_list()
self.color = None # initialized below in self.ResetColorizer
if filename:
if os.path.exists(filename) and not os.path.isdir(filename):
io.loadfile(filename)
else:
io.set_filename(filename)
self.ResetColorizer()
self.saved_change_hook()
self.set_indentation_params(self.ispythonsource(filename))
self.load_extensions()
menu = self.menudict.get('windows')
if menu:
end = menu.index("end")
if end is None:
end = -1
if end >= 0:
menu.add_separator()
end = end + 1
self.wmenu_end = end
WindowList.register_callback(self.postwindowsmenu)
# Some abstractions so IDLE extensions are cross-IDE
self.askyesno = tkMessageBox.askyesno
self.askinteger = tkSimpleDialog.askinteger
self.showerror = tkMessageBox.showerror
def _filename_to_unicode(self, filename):
"""convert filename to unicode in order to display it in Tk"""
if isinstance(filename, unicode) or not filename:
return filename
else:
try:
return filename.decode(self.filesystemencoding)
except UnicodeDecodeError:
# XXX
try:
return filename.decode(self.encoding)
except UnicodeDecodeError:
# byte-to-byte conversion
return filename.decode('iso8859-1')
def new_callback(self, event):
dirname, basename = self.io.defaultfilename()
self.flist.new(dirname)
return "break"
def home_callback(self, event):
if (event.state & 4) != 0 and event.keysym == "Home":
# state&4==Control. If <Control-Home>, use the Tk binding.
return
if self.text.index("iomark") and \
self.text.compare("iomark", "<=", "insert lineend") and \
self.text.compare("insert linestart", "<=", "iomark"):
# In Shell on input line, go to just after prompt
insertpt = int(self.text.index("iomark").split(".")[1])
else:
line = self.text.get("insert linestart", "insert lineend")
for insertpt in xrange(len(line)):
if line[insertpt] not in (' ','\t'):
break
else:
insertpt=len(line)
lineat = int(self.text.index("insert").split('.')[1])
if insertpt == lineat:
insertpt = 0
dest = "insert linestart+"+str(insertpt)+"c"
if (event.state&1) == 0:
# shift was not pressed
self.text.tag_remove("sel", "1.0", "end")
else:
if not self.text.index("sel.first"):
self.text.mark_set("my_anchor", "insert") # there was no previous selection
else:
if self.text.compare(self.text.index("sel.first"), "<", self.text.index("insert")):
self.text.mark_set("my_anchor", "sel.first") # extend back
else:
self.text.mark_set("my_anchor", "sel.last") # extend forward
first = self.text.index(dest)
last = self.text.index("my_anchor")
if self.text.compare(first,">",last):
first,last = last,first
self.text.tag_remove("sel", "1.0", "end")
self.text.tag_add("sel", first, last)
self.text.mark_set("insert", dest)
self.text.see("insert")
return "break"
def set_status_bar(self):
self.status_bar = self.MultiStatusBar(self.top)
sep = Frame(self.top, height=1, borderwidth=1, background='grey75')
if sys.platform == "darwin":
# Insert some padding to avoid obscuring some of the statusbar
# by the resize widget.
self.status_bar.set_label('_padding1', ' ', side=RIGHT)
self.status_bar.set_label('column', 'Col: ?', side=RIGHT)
self.status_bar.set_label('line', 'Ln: ?', side=RIGHT)
self.status_bar.pack(side=BOTTOM, fill=X)
sep.pack(side=BOTTOM, fill=X)
self.text.bind("<<set-line-and-column>>", self.set_line_and_column)
self.text.event_add("<<set-line-and-column>>",
"<KeyRelease>", "<ButtonRelease>")
self.text.after_idle(self.set_line_and_column)
def set_line_and_column(self, event=None):
line, column = self.text.index(INSERT).split('.')
self.status_bar.set_label('column', 'Col: %s' % column)
self.status_bar.set_label('line', 'Ln: %s' % line)
menu_specs = [
("file", "_File"),
("edit", "_Edit"),
("format", "F_ormat"),
("run", "_Run"),
("options", "_Options"),
("windows", "_Window"),
("help", "_Help"),
]
def createmenubar(self):
mbar = self.menubar
self.menudict = menudict = {}
for name, label in self.menu_specs:
underline, label = prepstr(label)
menudict[name] = menu = Menu(mbar, name=name, tearoff=0)
mbar.add_cascade(label=label, menu=menu, underline=underline)
if macosxSupport.isCarbonTk():
# Insert the application menu
menudict['application'] = menu = Menu(mbar, name='apple',
tearoff=0)
mbar.add_cascade(label='IDLE', menu=menu)
self.fill_menus()
self.base_helpmenu_length = self.menudict['help'].index(END)
self.reset_help_menu_entries()
def postwindowsmenu(self):
# Only called when Windows menu exists
menu = self.menudict['windows']
end = menu.index("end")
if end is None:
end = -1
if end > self.wmenu_end:
menu.delete(self.wmenu_end+1, end)
WindowList.add_windows_to_menu(menu)
rmenu = None
def right_menu_event(self, event):
self.text.mark_set("insert", "@%d,%d" % (event.x, event.y))
if not self.rmenu:
self.make_rmenu()
rmenu = self.rmenu
self.event = event
iswin = sys.platform[:3] == 'win'
if iswin:
self.text.config(cursor="arrow")
for item in self.rmenu_specs:
try:
label, eventname, verify_state = item
except ValueError: # see issue1207589
continue
if verify_state is None:
continue
state = getattr(self, verify_state)()
rmenu.entryconfigure(label, state=state)
rmenu.tk_popup(event.x_root, event.y_root)
if iswin:
self.text.config(cursor="ibeam")
rmenu_specs = [
# ("Label", "<<virtual-event>>", "statefuncname"), ...
("Close", "<<close-window>>", None), # Example
]
def make_rmenu(self):
rmenu = Menu(self.text, tearoff=0)
for item in self.rmenu_specs:
label, eventname = item[0], item[1]
if label is not None:
def command(text=self.text, eventname=eventname):
text.event_generate(eventname)
rmenu.add_command(label=label, command=command)
else:
rmenu.add_separator()
self.rmenu = rmenu
def rmenu_check_cut(self):
return self.rmenu_check_copy()
def rmenu_check_copy(self):
try:
indx = self.text.index('sel.first')
except TclError:
return 'disabled'
else:
return 'normal' if indx else 'disabled'
def rmenu_check_paste(self):
try:
self.text.tk.call('tk::GetSelection', self.text, 'CLIPBOARD')
except TclError:
return 'disabled'
else:
return 'normal'
def about_dialog(self, event=None):
"Handle Help 'About IDLE' event."
# Synchronize with macosxSupport.overrideRootMenu.about_dialog.
aboutDialog.AboutDialog(self.top,'About IDLE')
def config_dialog(self, event=None):
"Handle Options 'Configure IDLE' event."
# Synchronize with macosxSupport.overrideRootMenu.config_dialog.
configDialog.ConfigDialog(self.top,'Settings')
def help_dialog(self, event=None):
"Handle Help 'IDLE Help' event."
# Synchronize with macosxSupport.overrideRootMenu.help_dialog.
if self.root:
parent = self.root
else:
parent = self.top
help.show_idlehelp(parent)
def python_docs(self, event=None):
if sys.platform[:3] == 'win':
try:
os.startfile(self.help_url)
except WindowsError as why:
tkMessageBox.showerror(title='Document Start Failure',
message=str(why), parent=self.text)
else:
webbrowser.open(self.help_url)
return "break"
def cut(self,event):
self.text.event_generate("<<Cut>>")
return "break"
def copy(self,event):
if not self.text.tag_ranges("sel"):
# There is no selection, so do nothing and maybe interrupt.
return
self.text.event_generate("<<Copy>>")
return "break"
def paste(self,event):
self.text.event_generate("<<Paste>>")
self.text.see("insert")
return "break"
def select_all(self, event=None):
self.text.tag_add("sel", "1.0", "end-1c")
self.text.mark_set("insert", "1.0")
self.text.see("insert")
return "break"
def remove_selection(self, event=None):
self.text.tag_remove("sel", "1.0", "end")
self.text.see("insert")
def move_at_edge_if_selection(self, edge_index):
"""Cursor move begins at start or end of selection
When a left/right cursor key is pressed create and return to Tkinter a
function which causes a cursor move from the associated edge of the
selection.
"""
self_text_index = self.text.index
self_text_mark_set = self.text.mark_set
edges_table = ("sel.first+1c", "sel.last-1c")
def move_at_edge(event):
if (event.state & 5) == 0: # no shift(==1) or control(==4) pressed
try:
self_text_index("sel.first")
self_text_mark_set("insert", edges_table[edge_index])
except TclError:
pass
return move_at_edge
def del_word_left(self, event):
self.text.event_generate('<Meta-Delete>')
return "break"
def del_word_right(self, event):
self.text.event_generate('<Meta-d>')
return "break"
def find_event(self, event):
SearchDialog.find(self.text)
return "break"
def find_again_event(self, event):
SearchDialog.find_again(self.text)
return "break"
def find_selection_event(self, event):
SearchDialog.find_selection(self.text)
return "break"
def find_in_files_event(self, event):
GrepDialog.grep(self.text, self.io, self.flist)
return "break"
def replace_event(self, event):
ReplaceDialog.replace(self.text)
return "break"
def goto_line_event(self, event):
text = self.text
lineno = tkSimpleDialog.askinteger("Goto",
"Go to line number:",parent=text)
if lineno is None:
return "break"
if lineno <= 0:
text.bell()
return "break"
text.mark_set("insert", "%d.0" % lineno)
text.see("insert")
def open_module(self, event=None):
# XXX Shouldn't this be in IOBinding or in FileList?
try:
name = self.text.get("sel.first", "sel.last")
except TclError:
name = ""
else:
name = name.strip()
name = tkSimpleDialog.askstring("Module",
"Enter the name of a Python module\n"
"to search on sys.path and open:",
parent=self.text, initialvalue=name)
if name:
name = name.strip()
if not name:
return
# XXX Ought to insert current file's directory in front of path
try:
(f, file_path, (suffix, mode, mtype)) = _find_module(name)
except (NameError, ImportError) as msg:
tkMessageBox.showerror("Import error", str(msg), parent=self.text)
return
if mtype != imp.PY_SOURCE:
tkMessageBox.showerror("Unsupported type",
"%s is not a source module" % name, parent=self.text)
return
if f:
f.close()
if self.flist:
self.flist.open(file_path)
else:
self.io.loadfile(file_path)
return file_path
def open_class_browser(self, event=None):
filename = self.io.filename
if not (self.__class__.__name__ == 'PyShellEditorWindow'
and filename):
filename = self.open_module()
if filename is None:
return
head, tail = os.path.split(filename)
base, ext = os.path.splitext(tail)
from idlelib import ClassBrowser
ClassBrowser.ClassBrowser(self.flist, base, [head])
def open_path_browser(self, event=None):
from idlelib import PathBrowser
PathBrowser.PathBrowser(self.flist)
def gotoline(self, lineno):
if lineno is not None and lineno > 0:
self.text.mark_set("insert", "%d.0" % lineno)
self.text.tag_remove("sel", "1.0", "end")
self.text.tag_add("sel", "insert", "insert +1l")
self.center()
def ispythonsource(self, filename):
if not filename or os.path.isdir(filename):
return True
base, ext = os.path.splitext(os.path.basename(filename))
if os.path.normcase(ext) in (".py", ".pyw"):
return True
try:
f = open(filename)
line = f.readline()
f.close()
except IOError:
return False
return line.startswith('#!') and line.find('python') >= 0
def close_hook(self):
if self.flist:
self.flist.unregister_maybe_terminate(self)
self.flist = None
def set_close_hook(self, close_hook):
self.close_hook = close_hook
def filename_change_hook(self):
if self.flist:
self.flist.filename_changed_edit(self)
self.saved_change_hook()
self.top.update_windowlist_registry(self)
self.ResetColorizer()
def _addcolorizer(self):
if self.color:
return
if self.ispythonsource(self.io.filename):
self.color = self.ColorDelegator()
# can add more colorizers here...
if self.color:
self.per.removefilter(self.undo)
self.per.insertfilter(self.color)
self.per.insertfilter(self.undo)
def _rmcolorizer(self):
if not self.color:
return
self.color.removecolors()
self.per.removefilter(self.color)
self.color = None
def ResetColorizer(self):
"Update the color theme"
# Called from self.filename_change_hook and from configDialog.py
self._rmcolorizer()
self._addcolorizer()
theme = idleConf.CurrentTheme()
normal_colors = idleConf.GetHighlight(theme, 'normal')
cursor_color = idleConf.GetHighlight(theme, 'cursor', fgBg='fg')
select_colors = idleConf.GetHighlight(theme, 'hilite')
self.text.config(
foreground=normal_colors['foreground'],
background=normal_colors['background'],
insertbackground=cursor_color,
selectforeground=select_colors['foreground'],
selectbackground=select_colors['background'],
)
if TkVersion >= 8.5:
self.text.config(
inactiveselectbackground=select_colors['background'])
def ResetFont(self):
"Update the text widgets' font if it is changed"
# Called from configDialog.py
self.text['font'] = idleConf.GetFont(self.root, 'main','EditorWindow')
def RemoveKeybindings(self):
"Remove the keybindings before they are changed."
# Called from configDialog.py
self.Bindings.default_keydefs = keydefs = idleConf.GetCurrentKeySet()
for event, keylist in keydefs.items():
self.text.event_delete(event, *keylist)
for extensionName in self.get_standard_extension_names():
xkeydefs = idleConf.GetExtensionBindings(extensionName)
if xkeydefs:
for event, keylist in xkeydefs.items():
self.text.event_delete(event, *keylist)
def ApplyKeybindings(self):
"Update the keybindings after they are changed"
# Called from configDialog.py
self.Bindings.default_keydefs = keydefs = idleConf.GetCurrentKeySet()
self.apply_bindings()
for extensionName in self.get_standard_extension_names():
xkeydefs = idleConf.GetExtensionBindings(extensionName)
if xkeydefs:
self.apply_bindings(xkeydefs)
#update menu accelerators
menuEventDict = {}
for menu in self.Bindings.menudefs:
menuEventDict[menu[0]] = {}
for item in menu[1]:
if item:
menuEventDict[menu[0]][prepstr(item[0])[1]] = item[1]
for menubarItem in self.menudict.keys():
menu = self.menudict[menubarItem]
end = menu.index(END)
if end is None:
# Skip empty menus
continue
end += 1
for index in range(0, end):
if menu.type(index) == 'command':
accel = menu.entrycget(index, 'accelerator')
if accel:
itemName = menu.entrycget(index, 'label')
event = ''
if menubarItem in menuEventDict:
if itemName in menuEventDict[menubarItem]:
event = menuEventDict[menubarItem][itemName]
if event:
accel = get_accelerator(keydefs, event)
menu.entryconfig(index, accelerator=accel)
def set_notabs_indentwidth(self):
"Update the indentwidth if changed and not using tabs in this window"
# Called from configDialog.py
if not self.usetabs:
self.indentwidth = idleConf.GetOption('main', 'Indent','num-spaces',
type='int')
def reset_help_menu_entries(self):
"Update the additional help entries on the Help menu"
help_list = idleConf.GetAllExtraHelpSourcesList()
helpmenu = self.menudict['help']
# first delete the extra help entries, if any
helpmenu_length = helpmenu.index(END)
if helpmenu_length > self.base_helpmenu_length:
helpmenu.delete((self.base_helpmenu_length + 1), helpmenu_length)
# then rebuild them
if help_list:
helpmenu.add_separator()
for entry in help_list:
cmd = self.__extra_help_callback(entry[1])
helpmenu.add_command(label=entry[0], command=cmd)
# and update the menu dictionary
self.menudict['help'] = helpmenu
def __extra_help_callback(self, helpfile):
"Create a callback with the helpfile value frozen at definition time"
def display_extra_help(helpfile=helpfile):
if not helpfile.startswith(('www', 'http')):
helpfile = os.path.normpath(helpfile)
if sys.platform[:3] == 'win':
try:
os.startfile(helpfile)
except WindowsError as why:
tkMessageBox.showerror(title='Document Start Failure',
message=str(why), parent=self.text)
else:
webbrowser.open(helpfile)
return display_extra_help
def update_recent_files_list(self, new_file=None):
"Load and update the recent files list and menus"
rf_list = []
if os.path.exists(self.recent_files_path):
with open(self.recent_files_path, 'r') as rf_list_file:
rf_list = rf_list_file.readlines()
if new_file:
new_file = os.path.abspath(new_file) + '\n'
if new_file in rf_list:
rf_list.remove(new_file) # move to top
rf_list.insert(0, new_file)
# clean and save the recent files list
bad_paths = []
for path in rf_list:
if '\0' in path or not os.path.exists(path[0:-1]):
bad_paths.append(path)
rf_list = [path for path in rf_list if path not in bad_paths]
ulchars = "1234567890ABCDEFGHIJK"
rf_list = rf_list[0:len(ulchars)]
try:
with open(self.recent_files_path, 'w') as rf_file:
rf_file.writelines(rf_list)
except IOError as err:
if not getattr(self.root, "recentfilelist_error_displayed", False):
self.root.recentfilelist_error_displayed = True
tkMessageBox.showwarning(title='IDLE Warning',
message="Cannot update File menu Recent Files list. "
"Your operating system says:\n%s\n"
"Select OK and IDLE will continue without updating."
% str(err),
parent=self.text)
# for each edit window instance, construct the recent files menu
for instance in self.top.instance_dict.keys():
menu = instance.recent_files_menu
menu.delete(0, END) # clear, and rebuild:
for i, file_name in enumerate(rf_list):
file_name = file_name.rstrip() # zap \n
# make unicode string to display non-ASCII chars correctly
ufile_name = self._filename_to_unicode(file_name)
callback = instance.__recent_file_callback(file_name)
menu.add_command(label=ulchars[i] + " " + ufile_name,
command=callback,
underline=0)
def __recent_file_callback(self, file_name):
def open_recent_file(fn_closure=file_name):
self.io.open(editFile=fn_closure)
return open_recent_file
def saved_change_hook(self):
short = self.short_title()
long = self.long_title()
if short and long:
title = short + " - " + long + _py_version
elif short:
title = short
elif long:
title = long
else:
title = "Untitled"
icon = short or long or title
if not self.get_saved():
title = "*%s*" % title
icon = "*%s" % icon
self.top.wm_title(title)
self.top.wm_iconname(icon)
def get_saved(self):
return self.undo.get_saved()
def set_saved(self, flag):
self.undo.set_saved(flag)
def reset_undo(self):
self.undo.reset_undo()
def short_title(self):
filename = self.io.filename
if filename:
filename = os.path.basename(filename)
else:
filename = "Untitled"
# return unicode string to display non-ASCII chars correctly
return self._filename_to_unicode(filename)
def long_title(self):
# return unicode string to display non-ASCII chars correctly
return self._filename_to_unicode(self.io.filename or "")
def center_insert_event(self, event):
self.center()
def center(self, mark="insert"):
text = self.text
top, bot = self.getwindowlines()
lineno = self.getlineno(mark)
height = bot - top
newtop = max(1, lineno - height//2)
text.yview(float(newtop))
def getwindowlines(self):
text = self.text
top = self.getlineno("@0,0")
bot = self.getlineno("@0,65535")
if top == bot and text.winfo_height() == 1:
# Geometry manager hasn't run yet
height = int(text['height'])
bot = top + height - 1
return top, bot
def getlineno(self, mark="insert"):
text = self.text
return int(float(text.index(mark)))
def get_geometry(self):
"Return (width, height, x, y)"
geom = self.top.wm_geometry()
m = re.match(r"(\d+)x(\d+)\+(-?\d+)\+(-?\d+)", geom)
tuple = (map(int, m.groups()))
return tuple
def close_event(self, event):
self.close()
def maybesave(self):
if self.io:
if not self.get_saved():
if self.top.state()!='normal':
self.top.deiconify()
self.top.lower()
self.top.lift()
return self.io.maybesave()
def close(self):
reply = self.maybesave()
if str(reply) != "cancel":
self._close()
return reply
def _close(self):
if self.io.filename:
self.update_recent_files_list(new_file=self.io.filename)
WindowList.unregister_callback(self.postwindowsmenu)
self.unload_extensions()
self.io.close()
self.io = None
self.undo = None
if self.color:
self.color.close(False)
self.color = None
self.text = None
self.tkinter_vars = None
self.per.close()
self.per = None
self.top.destroy()
if self.close_hook:
# unless override: unregister from flist, terminate if last window
self.close_hook()
def load_extensions(self):
self.extensions = {}
self.load_standard_extensions()
def unload_extensions(self):
for ins in self.extensions.values():
if hasattr(ins, "close"):
ins.close()
self.extensions = {}
def load_standard_extensions(self):
for name in self.get_standard_extension_names():
try:
self.load_extension(name)
except:
print "Failed to load extension", repr(name)
import traceback
traceback.print_exc()
def get_standard_extension_names(self):
return idleConf.GetExtensions(editor_only=True)
def load_extension(self, name):
try:
mod = __import__(name, globals(), locals(), [])
except ImportError:
print "\nFailed to import extension: ", name
return
cls = getattr(mod, name)
keydefs = idleConf.GetExtensionBindings(name)
if hasattr(cls, "menudefs"):
self.fill_menus(cls.menudefs, keydefs)
ins = cls(self)
self.extensions[name] = ins
if keydefs:
self.apply_bindings(keydefs)
for vevent in keydefs.keys():
methodname = vevent.replace("-", "_")
while methodname[:1] == '<':
methodname = methodname[1:]
while methodname[-1:] == '>':
methodname = methodname[:-1]
methodname = methodname + "_event"
if hasattr(ins, methodname):
self.text.bind(vevent, getattr(ins, methodname))
def apply_bindings(self, keydefs=None):
if keydefs is None:
keydefs = self.Bindings.default_keydefs
text = self.text
text.keydefs = keydefs
for event, keylist in keydefs.items():
if keylist:
text.event_add(event, *keylist)
def fill_menus(self, menudefs=None, keydefs=None):
"""Add appropriate entries to the menus and submenus
Menus that are absent or None in self.menudict are ignored.
"""
if menudefs is None:
menudefs = self.Bindings.menudefs
if keydefs is None:
keydefs = self.Bindings.default_keydefs
menudict = self.menudict
text = self.text
for mname, entrylist in menudefs:
menu = menudict.get(mname)
if not menu:
continue
for entry in entrylist:
if not entry:
menu.add_separator()
else:
label, eventname = entry
checkbutton = (label[:1] == '!')
if checkbutton:
label = label[1:]
underline, label = prepstr(label)
accelerator = get_accelerator(keydefs, eventname)
def command(text=text, eventname=eventname):
text.event_generate(eventname)
if checkbutton:
var = self.get_var_obj(eventname, BooleanVar)
menu.add_checkbutton(label=label, underline=underline,
command=command, accelerator=accelerator,
variable=var)
else:
menu.add_command(label=label, underline=underline,
command=command,
accelerator=accelerator)
def getvar(self, name):
var = self.get_var_obj(name)
if var:
value = var.get()
return value
else:
raise NameError, name
def setvar(self, name, value, vartype=None):
var = self.get_var_obj(name, vartype)
if var:
var.set(value)
else:
raise NameError, name
def get_var_obj(self, name, vartype=None):
var = self.tkinter_vars.get(name)
if not var and vartype:
# create a Tkinter variable object with self.text as master:
self.tkinter_vars[name] = var = vartype(self.text)
return var
# Tk implementations of "virtual text methods" -- each platform
# reusing IDLE's support code needs to define these for its GUI's
# flavor of widget.
# Is character at text_index in a Python string? Return 0 for
# "guaranteed no", true for anything else. This info is expensive
# to compute ab initio, but is probably already known by the
# platform's colorizer.
def is_char_in_string(self, text_index):
if self.color:
# Return true iff colorizer hasn't (re)gotten this far
# yet, or the character is tagged as being in a string
return self.text.tag_prevrange("TODO", text_index) or \
"STRING" in self.text.tag_names(text_index)
else:
# The colorizer is missing: assume the worst
return 1
# If a selection is defined in the text widget, return (start,
# end) as Tkinter text indices, otherwise return (None, None)
def get_selection_indices(self):
try:
first = self.text.index("sel.first")
last = self.text.index("sel.last")
return first, last
except TclError:
return None, None
# Return the text widget's current view of what a tab stop means
# (equivalent width in spaces).
def get_tabwidth(self):
current = self.text['tabs'] or TK_TABWIDTH_DEFAULT
return int(current)
# Set the text widget's current view of what a tab stop means.
def set_tabwidth(self, newtabwidth):
text = self.text
if self.get_tabwidth() != newtabwidth:
pixels = text.tk.call("font", "measure", text["font"],
"-displayof", text.master,
"n" * newtabwidth)
text.configure(tabs=pixels)
# If ispythonsource and guess are true, guess a good value for
# indentwidth based on file content (if possible), and if
# indentwidth != tabwidth set usetabs false.
# In any case, adjust the Text widget's view of what a tab
# character means.
def set_indentation_params(self, ispythonsource, guess=True):
if guess and ispythonsource:
i = self.guess_indent()
if 2 <= i <= 8:
self.indentwidth = i
if self.indentwidth != self.tabwidth:
self.usetabs = False
self.set_tabwidth(self.tabwidth)
def smart_backspace_event(self, event):
text = self.text
first, last = self.get_selection_indices()
if first and last:
text.delete(first, last)
text.mark_set("insert", first)
return "break"
# Delete whitespace left, until hitting a real char or closest
# preceding virtual tab stop.
chars = text.get("insert linestart", "insert")
if chars == '':
if text.compare("insert", ">", "1.0"):
# easy: delete preceding newline
text.delete("insert-1c")
else:
text.bell() # at start of buffer
return "break"
if chars[-1] not in " \t":
# easy: delete preceding real char
text.delete("insert-1c")
return "break"
# Ick. It may require *inserting* spaces if we back up over a
# tab character! This is written to be clear, not fast.
tabwidth = self.tabwidth
have = len(chars.expandtabs(tabwidth))
assert have > 0
want = ((have - 1) // self.indentwidth) * self.indentwidth
# Debug prompt is multilined....
if self.context_use_ps1:
last_line_of_prompt = sys.ps1.split('\n')[-1]
else:
last_line_of_prompt = ''
ncharsdeleted = 0
while 1:
if chars == last_line_of_prompt:
break
chars = chars[:-1]
ncharsdeleted = ncharsdeleted + 1
have = len(chars.expandtabs(tabwidth))
if have <= want or chars[-1] not in " \t":
break
text.undo_block_start()
text.delete("insert-%dc" % ncharsdeleted, "insert")
if have < want:
text.insert("insert", ' ' * (want - have))
text.undo_block_stop()
return "break"
def smart_indent_event(self, event):
# if intraline selection:
# delete it
# elif multiline selection:
# do indent-region
# else:
# indent one level
text = self.text
first, last = self.get_selection_indices()
text.undo_block_start()
try:
if first and last:
if index2line(first) != index2line(last):
return self.indent_region_event(event)
text.delete(first, last)
text.mark_set("insert", first)
prefix = text.get("insert linestart", "insert")
raw, effective = classifyws(prefix, self.tabwidth)
if raw == len(prefix):
# only whitespace to the left
self.reindent_to(effective + self.indentwidth)
else:
# tab to the next 'stop' within or to right of line's text:
if self.usetabs:
pad = '\t'
else:
effective = len(prefix.expandtabs(self.tabwidth))
n = self.indentwidth
pad = ' ' * (n - effective % n)
text.insert("insert", pad)
text.see("insert")
return "break"
finally:
text.undo_block_stop()
def newline_and_indent_event(self, event):
text = self.text
first, last = self.get_selection_indices()
text.undo_block_start()
try:
if first and last:
text.delete(first, last)
text.mark_set("insert", first)
line = text.get("insert linestart", "insert")
i, n = 0, len(line)
while i < n and line[i] in " \t":
i = i+1
if i == n:
# the cursor is in or at leading indentation in a continuation
# line; just inject an empty line at the start
text.insert("insert linestart", '\n')
return "break"
indent = line[:i]
# strip whitespace before insert point unless it's in the prompt
i = 0
last_line_of_prompt = sys.ps1.split('\n')[-1]
while line and line[-1] in " \t" and line != last_line_of_prompt:
line = line[:-1]
i = i+1
if i:
text.delete("insert - %d chars" % i, "insert")
# strip whitespace after insert point
while text.get("insert") in " \t":
text.delete("insert")
# start new line
text.insert("insert", '\n')
# adjust indentation for continuations and block
# open/close first need to find the last stmt
lno = index2line(text.index('insert'))
y = PyParse.Parser(self.indentwidth, self.tabwidth)
if not self.context_use_ps1:
for context in self.num_context_lines:
startat = max(lno - context, 1)
startatindex = repr(startat) + ".0"
rawtext = text.get(startatindex, "insert")
y.set_str(rawtext)
bod = y.find_good_parse_start(
self.context_use_ps1,
self._build_char_in_string_func(startatindex))
if bod is not None or startat == 1:
break
y.set_lo(bod or 0)
else:
r = text.tag_prevrange("console", "insert")
if r:
startatindex = r[1]
else:
startatindex = "1.0"
rawtext = text.get(startatindex, "insert")
y.set_str(rawtext)
y.set_lo(0)
c = y.get_continuation_type()
if c != PyParse.C_NONE:
# The current stmt hasn't ended yet.
if c == PyParse.C_STRING_FIRST_LINE:
# after the first line of a string; do not indent at all
pass
elif c == PyParse.C_STRING_NEXT_LINES:
# inside a string which started before this line;
# just mimic the current indent
text.insert("insert", indent)
elif c == PyParse.C_BRACKET:
# line up with the first (if any) element of the
# last open bracket structure; else indent one
# level beyond the indent of the line with the
# last open bracket
self.reindent_to(y.compute_bracket_indent())
elif c == PyParse.C_BACKSLASH:
# if more than one line in this stmt already, just
# mimic the current indent; else if initial line
# has a start on an assignment stmt, indent to
# beyond leftmost =; else to beyond first chunk of
# non-whitespace on initial line
if y.get_num_lines_in_stmt() > 1:
text.insert("insert", indent)
else:
self.reindent_to(y.compute_backslash_indent())
else:
assert 0, "bogus continuation type %r" % (c,)
return "break"
# This line starts a brand new stmt; indent relative to
# indentation of initial line of closest preceding
# interesting stmt.
indent = y.get_base_indent_string()
text.insert("insert", indent)
if y.is_block_opener():
self.smart_indent_event(event)
elif indent and y.is_block_closer():
self.smart_backspace_event(event)
return "break"
finally:
text.see("insert")
text.undo_block_stop()
# Our editwin provides an is_char_in_string function that works
# with a Tk text index, but PyParse only knows about offsets into
# a string. This builds a function for PyParse that accepts an
# offset.
def _build_char_in_string_func(self, startindex):
def inner(offset, _startindex=startindex,
_icis=self.is_char_in_string):
return _icis(_startindex + "+%dc" % offset)
return inner
def indent_region_event(self, event):
head, tail, chars, lines = self.get_region()
for pos in range(len(lines)):
line = lines[pos]
if line:
raw, effective = classifyws(line, self.tabwidth)
effective = effective + self.indentwidth
lines[pos] = self._make_blanks(effective) + line[raw:]
self.set_region(head, tail, chars, lines)
return "break"
def dedent_region_event(self, event):
head, tail, chars, lines = self.get_region()
for pos in range(len(lines)):
line = lines[pos]
if line:
raw, effective = classifyws(line, self.tabwidth)
effective = max(effective - self.indentwidth, 0)
lines[pos] = self._make_blanks(effective) + line[raw:]
self.set_region(head, tail, chars, lines)
return "break"
def comment_region_event(self, event):
head, tail, chars, lines = self.get_region()
for pos in range(len(lines) - 1):
line = lines[pos]
lines[pos] = '##' + line
self.set_region(head, tail, chars, lines)
def uncomment_region_event(self, event):
head, tail, chars, lines = self.get_region()
for pos in range(len(lines)):
line = lines[pos]
if not line:
continue
if line[:2] == '##':
line = line[2:]
elif line[:1] == '#':
line = line[1:]
lines[pos] = line
self.set_region(head, tail, chars, lines)
def tabify_region_event(self, event):
head, tail, chars, lines = self.get_region()
tabwidth = self._asktabwidth()
if tabwidth is None: return
for pos in range(len(lines)):
line = lines[pos]
if line:
raw, effective = classifyws(line, tabwidth)
ntabs, nspaces = divmod(effective, tabwidth)
lines[pos] = '\t' * ntabs + ' ' * nspaces + line[raw:]
self.set_region(head, tail, chars, lines)
def untabify_region_event(self, event):
head, tail, chars, lines = self.get_region()
tabwidth = self._asktabwidth()
if tabwidth is None: return
for pos in range(len(lines)):
lines[pos] = lines[pos].expandtabs(tabwidth)
self.set_region(head, tail, chars, lines)
def toggle_tabs_event(self, event):
if self.askyesno(
"Toggle tabs",
"Turn tabs " + ("on", "off")[self.usetabs] +
"?\nIndent width " +
("will be", "remains at")[self.usetabs] + " 8." +
"\n Note: a tab is always 8 columns",
parent=self.text):
self.usetabs = not self.usetabs
# Try to prevent inconsistent indentation.
# User must change indent width manually after using tabs.
self.indentwidth = 8
return "break"
# XXX this isn't bound to anything -- see tabwidth comments
## def change_tabwidth_event(self, event):
## new = self._asktabwidth()
## if new != self.tabwidth:
## self.tabwidth = new
## self.set_indentation_params(0, guess=0)
## return "break"
def change_indentwidth_event(self, event):
new = self.askinteger(
"Indent width",
"New indent width (2-16)\n(Always use 8 when using tabs)",
parent=self.text,
initialvalue=self.indentwidth,
minvalue=2,
maxvalue=16)
if new and new != self.indentwidth and not self.usetabs:
self.indentwidth = new
return "break"
def get_region(self):
text = self.text
first, last = self.get_selection_indices()
if first and last:
head = text.index(first + " linestart")
tail = text.index(last + "-1c lineend +1c")
else:
head = text.index("insert linestart")
tail = text.index("insert lineend +1c")
chars = text.get(head, tail)
lines = chars.split("\n")
return head, tail, chars, lines
def set_region(self, head, tail, chars, lines):
text = self.text
newchars = "\n".join(lines)
if newchars == chars:
text.bell()
return
text.tag_remove("sel", "1.0", "end")
text.mark_set("insert", head)
text.undo_block_start()
text.delete(head, tail)
text.insert(head, newchars)
text.undo_block_stop()
text.tag_add("sel", head, "insert")
# Make string that displays as n leading blanks.
def _make_blanks(self, n):
if self.usetabs:
ntabs, nspaces = divmod(n, self.tabwidth)
return '\t' * ntabs + ' ' * nspaces
else:
return ' ' * n
# Delete from beginning of line to insert point, then reinsert
# column logical (meaning use tabs if appropriate) spaces.
def reindent_to(self, column):
text = self.text
text.undo_block_start()
if text.compare("insert linestart", "!=", "insert"):
text.delete("insert linestart", "insert")
if column:
text.insert("insert", self._make_blanks(column))
text.undo_block_stop()
def _asktabwidth(self):
return self.askinteger(
"Tab width",
"Columns per tab? (2-16)",
parent=self.text,
initialvalue=self.indentwidth,
minvalue=2,
maxvalue=16)
# Guess indentwidth from text content.
# Return guessed indentwidth. This should not be believed unless
# it's in a reasonable range (e.g., it will be 0 if no indented
# blocks are found).
def guess_indent(self):
opener, indented = IndentSearcher(self.text, self.tabwidth).run()
if opener and indented:
raw, indentsmall = classifyws(opener, self.tabwidth)
raw, indentlarge = classifyws(indented, self.tabwidth)
else:
indentsmall = indentlarge = 0
return indentlarge - indentsmall
# "line.col" -> line, as an int
def index2line(index):
return int(float(index))
# Look at the leading whitespace in s.
# Return pair (# of leading ws characters,
# effective # of leading blanks after expanding
# tabs to width tabwidth)
def classifyws(s, tabwidth):
raw = effective = 0
for ch in s:
if ch == ' ':
raw = raw + 1
effective = effective + 1
elif ch == '\t':
raw = raw + 1
effective = (effective // tabwidth + 1) * tabwidth
else:
break
return raw, effective
import tokenize
_tokenize = tokenize
del tokenize
class IndentSearcher(object):
# .run() chews over the Text widget, looking for a block opener
# and the stmt following it. Returns a pair,
# (line containing block opener, line containing stmt)
# Either or both may be None.
def __init__(self, text, tabwidth):
self.text = text
self.tabwidth = tabwidth
self.i = self.finished = 0
self.blkopenline = self.indentedline = None
def readline(self):
if self.finished:
return ""
i = self.i = self.i + 1
mark = repr(i) + ".0"
if self.text.compare(mark, ">=", "end"):
return ""
return self.text.get(mark, mark + " lineend+1c")
def tokeneater(self, type, token, start, end, line,
INDENT=_tokenize.INDENT,
NAME=_tokenize.NAME,
OPENERS=('class', 'def', 'for', 'if', 'try', 'while')):
if self.finished:
pass
elif type == NAME and token in OPENERS:
self.blkopenline = line
elif type == INDENT and self.blkopenline:
self.indentedline = line
self.finished = 1
def run(self):
save_tabsize = _tokenize.tabsize
_tokenize.tabsize = self.tabwidth
try:
try:
_tokenize.tokenize(self.readline, self.tokeneater)
except (_tokenize.TokenError, SyntaxError):
# since we cut off the tokenizer early, we can trigger
# spurious errors
pass
finally:
_tokenize.tabsize = save_tabsize
return self.blkopenline, self.indentedline
### end autoindent code ###
def prepstr(s):
# Helper to extract the underscore from a string, e.g.
# prepstr("Co_py") returns (2, "Copy").
i = s.find('_')
if i >= 0:
s = s[:i] + s[i+1:]
return i, s
keynames = {
'bracketleft': '[',
'bracketright': ']',
'slash': '/',
}
def get_accelerator(keydefs, eventname):
keylist = keydefs.get(eventname)
# issue10940: temporary workaround to prevent hang with OS X Cocoa Tk 8.5
# if not keylist:
if (not keylist) or (macosxSupport.isCocoaTk() and eventname in {
"<<open-module>>",
"<<goto-line>>",
"<<change-indentwidth>>"}):
return ""
s = keylist[0]
s = re.sub(r"-[a-z]\b", lambda m: m.group().upper(), s)
s = re.sub(r"\b\w+\b", lambda m: keynames.get(m.group(), m.group()), s)
s = re.sub("Key-", "", s)
s = re.sub("Cancel","Ctrl-Break",s) # <EMAIL>
s = re.sub("Control-", "Ctrl-", s)
s = re.sub("-", "+", s)
s = re.sub("><", " ", s)
s = re.sub("<", "", s)
s = re.sub(">", "", s)
return s
def fixwordbreaks(root):
# Make sure that Tk's double-click and next/previous word
# operations use our definition of a word (i.e. an identifier)
tk = root.tk
tk.call('tcl_wordBreakAfter', 'a b', 0) # make sure word.tcl is loaded
tk.call('set', 'tcl_wordchars', '[a-zA-Z0-9_]')
tk.call('set', 'tcl_nonwordchars', '[^a-zA-Z0-9_]')
def _editor_window(parent): # htest #
# error if close master window first - timer event, after script
root = parent
fixwordbreaks(root)
if sys.argv[1:]:
filename = sys.argv[1]
else:
filename = None
macosxSupport.setupApp(root, None)
edit = EditorWindow(root=root, filename=filename)
edit.text.bind("<<close-all-windows>>", edit.close_event)
# Does not stop error, neither does following
# edit.text.bind("<<close-window>>", edit.close_event)
if __name__ == '__main__':
from idlelib.idle_test.htest import run
run(_editor_window)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.