id stringlengths 1 7 | text stringlengths 6 1.03M | dataset_id stringclasses 1
value |
|---|---|---|
4833005 | # Copyright 2017 F5 Networks Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from f5.bigip.cm.system import Tmos
from f5.sdk_exception import UnsupportedMethod
import mock
import pytest
@pytest.fixture
def FakeTmos():
mo = mock.MagicMock()
resource = Tmos(mo)
return resource
class TestTmos(object):
def test_create(self, FakeTmos):
with pytest.raises(UnsupportedMethod):
FakeTmos.create()
def test_modify(self, FakeTmos):
with pytest.raises(UnsupportedMethod):
FakeTmos.modify()
def test_update(self, FakeTmos):
with pytest.raises(UnsupportedMethod):
FakeTmos.update()
def test_delete(self, FakeTmos):
with pytest.raises(UnsupportedMethod):
FakeTmos.delete()
| StarcoderdataPython |
4804213 |
from collections import defaultdict
from random import sample, seed
import scrapy
import re
from datetime import datetime
from nature_news_scraper.spiders import article_crawl
class NewsSpider(scrapy.Spider):
name = "doi_crawl"
def start_requests(self):
year = int(self.target_year)
type_article = self.target_type
url = 'https://www.nature.com/nature/articles?type=%s&year=%d' % (type_article, year)
yield scrapy.Request(url=url, callback=self.parse_article_list, cb_kwargs=dict(year=year), dont_filter=True)
def parse_article_list(self, response, year=None):
# produce a bunch of scrape tasks for each article on the page
# and finally produce a task for the next page, if it exists
# here (and all other places) we are using attribute selectors in CSS
articles = response.css('article[itemtype="http://schema.org/ScholarlyArticle"]')
for article in articles:
link = article.css('a[itemprop="url"]::attr(href)').get()
if link is not None:
full_link = response.urljoin(link)
print("Article: %s" % full_link)
yield scrapy.Request(url=full_link, callback=self.parse_article, cb_kwargs=dict(year=year), dont_filter=True)
# also see if there's a next page and yield that, too
next_page = response.css('ul.c-pagination > li[data-page="next"] > a::attr(href)').get()
if next_page is not None:
next_page = response.urljoin(next_page)
print("next page: %s" % next_page)
yield scrapy.Request(next_page, callback=self.parse_article_list, cb_kwargs=dict(year=year))
def parse_article(self, response, year=None):
import re
# get doi's that are in an anchor tag
doi_box = response.css('#article-refrences a')
href_doi = [x.attrib.get('href') for x in doi_box if 'doi' in x.attrib.get('href', '')]
# get doi's that are written in text
refbox = response.css('#article-refrences')
doi_re = re.compile(r"doi:10\.[.0-9]+/[^\s<>]+")
text_doi = doi_re.findall(" ".join(refbox.getall()))
# starting around 2011, format changed
if len(text_doi) == 0 and len(href_doi) == 0 and year > 2010:
# doi box in anchor tag
doi_box = response.css('#references a')
href_doi = [x.attrib.get('href') for x in doi_box if 'doi' in x.attrib.get('href', '')]
# doi in text, not hyperlinked
refbox = response.css('#references')
doi_re_url = [re.sub(r"http[s]?://(dx[./])?doi\.org/", "doi:", x) for x in refbox.getall()]
doi_re = re.compile(r"doi:10\.[.0-9]+/[^\s<>]+")
text_doi = doi_re.findall(" ".join(doi_re_url))
if len(text_doi) == 0 and len(href_doi) == 0:
# if it's *still* empty, it's probably b/c the refs box changed from id="references" to data-container-section="references"
doi_box = response.css('div[data-container-section="references"] a')
href_doi = [x.attrib.get('href') for x in doi_box if 'doi' in x.attrib.get('href', '')]
# doi in text, not hyperlinked
refbox = response.css('div[data-container-section="references"]')
doi_re_url = [re.sub(r"http[s]?://(dx[./])?doi\.org/", "doi:", x) for x in refbox.getall()]
doi_re = re.compile(r"doi:10\.[.0-9]+/[^\s<>]+")
text_doi = doi_re.findall(" ".join(doi_re_url))
# format the doi's
all_doi = set(re.sub(r"http[s]?://(dx[./])?doi\.org/", "doi:", x) for x in href_doi + text_doi)
all_doi = set(re.sub(r"[%][2][F]", "/", x) for x in all_doi)
doi_str = ", ".join(all_doi)
doi_str
yield {
"file_id": response.url.split("/")[-1],
"year": year,
"dois": doi_str
}
| StarcoderdataPython |
3661 | <reponame>meysam81/sheypoor
from fastapi import FastAPI
from fastapi.middleware.cors import CORSMiddleware
from app import api
from app.core.config import config
app = FastAPI(title="Sheypoor")
# Set all CORS enabled origins
app.add_middleware(
CORSMiddleware,
allow_origins=["*"],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
app.include_router(api.router, prefix=config.API_URI)
| StarcoderdataPython |
1783262 | <gh_stars>0
from LinkedList import LinkedList, Node
def naiveFillSmallLinkedList(val, nodeList, node):
if node is None:
node = Node(val)
nodeList.head = node
else:
temp = Node(val)
node.next = temp
node = node.next
return node, nodeList
def naivePartition(ll, x):
if ll.head is None:
return None
small = LinkedList()
large = LinkedList()
smallNode = None
largeNode = None
node = ll.head
while node:
if node.data < x:
smallNode, small = naiveFillSmallLinkedList(
node.data, small, smallNode)
else:
largeNode, large = naiveFillSmallLinkedList(
node.data, large, largeNode)
node = node.next
if small.head:
if large.head:
smallNode.next = large.head
return small
else:
return large.head
def optimizedPartition(ll, x):
if ll.head is None:
return None
head = ll.head
tail = ll.head
inspecting = ll.head
while inspecting:
next = inspecting.next
if inspecting.data < x:
inspecting.next = head
head = inspecting
else:
tail.next = inspecting
tail = inspecting
inspecting = next
tail.next = None
result = LinkedList()
result.head = head
return result
ll = LinkedList()
first = Node(5)
second = Node(3)
third = Node(7)
last = Node(1)
ll.head = first
first.next = second
second.next = third
third.next = last
naivePartition(ll, 5).prettyPrint()
optimizedPartition(ll, 5).prettyPrint()
| StarcoderdataPython |
19877 | <gh_stars>0
import png
import numpy
import pprint
import math
import re
def gen_background(width, height, mag, b_col):
bg = numpy.zeros((width * mag, height * mag, 4), dtype=numpy.uint8)
for y in range(0, height * mag, mag):
for x in range(0, width * mag):
bg[y][x] = b_col
for x in range(0, width * mag, mag):
for y in range(0, height * mag):
bg[y][x] = b_col
for y in range(-1, height * mag, mag):
if y < 0: continue
for x in range(0, width * mag):
bg[y][x] = b_col
for x in range(-1, width * mag, mag):
if x < 0: continue
for y in range(0, height * mag):
bg[y][x] = b_col
return bg
class picture(object):
def __init__(self, width, height):
self.__array = numpy.full((height,width*4), 220, dtype=numpy.uint8)
self.__view = self.__array.view().reshape(-1,4)
self.__width = width
self.__height = height
self.__mag = 32
bg = gen_background(width, height, self.__mag, numpy.array((192, 192, 192, 64), dtype=numpy.uint8))
self.__dst_rgb = bg[..., :3].astype(numpy.float32) / 255.0
def put_pixel(self, x, y, color):
row = y
col = x
c = numpy.array(color)
sa_01 = c[3] / 255.0
o_m_a = 1.0 - sa_01
sc = c * sa_01
idx = self.__width * y + x
self.__view[idx] = sc + o_m_a * self.__view[idx]
def save(self, filename):
mag = self.__mag
v = self.__array.view().reshape(self.__height, self.__width, 4)
a = v.repeat(mag, axis=0).repeat(mag, axis=1)
src_rgb = a[..., :3].astype(numpy.float32) / 255.0
src_a = a[..., 3].astype(numpy.float32) / 255.0
out_a = src_a.view()
out_rgb = (src_rgb * src_a[..., None] + self.__dst_rgb * (1.0 - src_a[..., None]))
out = numpy.zeros_like(a)
out[..., :3] = out_rgb * 255
out[..., 3] = 255
sv = out.view().reshape(self.__height * mag, self.__width * mag * 4)
png.from_array(sv, mode='RGBA').save(filename)
TRUE_RE = re.compile(".*True.*")
def load_map(filename, canvas_size):
c_width, c_height = canvas_size
player_pos = (0,0)
enemy_pos = (0,0)
expected_result = False
lines = None
with open(filename, "r") as f:
lines = f.readlines()
expected_result = TRUE_RE.match(lines[0]) is not None
lines = lines[1:]
height = len(lines)
width = max([len(w.rstrip("\n")) for w in lines])
if height > c_height:
print("Map {0} height dimension doesn't fit the canvas!".format(filename))
exit(1)
if width > c_width:
print("Map {0} width dimenstion doesn't fit the canvas!".format(filename))
exit(1)
# let's calculate canvas padding
x_pad = (c_width - width) // 2
y_pad = (c_height - height) // 2
m = numpy.zeros((c_height,c_width),numpy.uint8)
for y,l in enumerate(lines):
for x,c in enumerate(l):
if c == "\n":
continue
if c == "#":
m[y+y_pad][x+x_pad] = 1
if c == "@":
m[y+y_pad][x+x_pad] = 2
player_pos = (x+x_pad,y+y_pad)
if c== "h":
m[y+y_pad][x+x_pad] = 3
enemy_pos = (x+x_pad,y+y_pad)
return (m,player_pos,enemy_pos,expected_result,width,height)
def draw_map(m,p):
for y,r in enumerate(m):
for x,c in enumerate(r):
if c == 1:
p.put_pixel(x,y,(64, 64, 64, 220))
if c == 2:
p.put_pixel(x,y,(0, 255, 0, 220))
if c == 3:
p.put_pixel(x,y,(255, 0, 0, 220))
def draw_route(route,p,c):
for e in route:
x,y = e
p.put_pixel(x,y,c)
| StarcoderdataPython |
182911 | from pudzu.charts import *
from pudzu.dates import *
import dateparser
# -------------
# G7 time chart
# -------------
START = dateparser.parse('1 January 1960').date()
END = datetime.date.today()
def duration(d):
return dateparser.parse(get_non(d, 'end', END.isoformat())).date() - max(START, dateparser.parse(d['start']).date())
def percentage_left(df):
return sum((duration(d) for _,d in df[df.spectrum == "left"].iterrows()), datetime.timedelta(0)) / sum((duration(d) for _,d in df.iterrows()), datetime.timedelta(0))
df = pd.read_csv("datasets/g7.csv")
groups = df.groupby(by=lambda idx: "{} ({})".format(df['country'][idx], df['office'][idx]))
group_order = sorted(list(groups.groups), key=lambda s: percentage_left(groups.get_group(s)), reverse=True)
data = [groups.get_group(g) for g in group_order]
colorfn = lambda d: {"left": "#d62728", "right": "#393b79", "centre": "#e7ba52"}[d['spectrum']]
startfn = lambda d: dateparser.parse(d['start']).date()
endfn = lambda d: dateparser.parse(get_non(d, 'end', END.isoformat())).date()
labelfn = lambda d: Image.from_text(d['name'].split(" ")[-1], arial(10), padding=(2), fg="white", bg=colorfn(d))
labels = ["{:.0%}".format(percentage_left(df)) for df in data]
title = Image.from_text("G7 countries by time spent under left-of-centre governments (1960-present)", arial(30, bold=True), fg="white").pad((0,5,0,30),bg="black")
chart = time_chart(1200, 40, data, startfn, endfn, colorfn, interval_label_key=labelfn,
xmin=START, label_font=arial(16), labels_left=group_order, labels_right=labels, title=title,
grid_interval=DateInterval(years=10), grid_font=arial(16), grid_labels=lambda v: str(v.year)).pad(5, "black")
def box(s): return Image.new("RGBA", (20,20), colorfn({"spectrum": s}))
def label(s): return Image.from_text(s, arial(12), fg="white")
footer_row = [box("left"), label("left-of-centre"), box("centre"), label("centrist"), box("right"), label("right-of-centre"),
Image.new("RGBA", (50,0)),
Image.from_text("Colours are standard UK colours for conservatism, liberalism and social democracy.", arial(16), fg="white"),
Image.from_text("Note that they differ from the ones used in the US since 2000.", arial(16, bold=True), fg="white")]
footer = Image.from_row(footer_row, padding=3, bg="black")
img = Image.from_column([chart, footer], bg="black", padding=(0,20))
img.save("output/politics_g7.png")
| StarcoderdataPython |
1682108 | <filename>asynchronous_qiwi/call/API/QIWIWallet/balance_api/create_balance.py
from loguru import logger
from aiohttp import ClientError
from .....data.URL import QIWIWalletURLS
from .....connector.aiohttp_connector import Connector
from .....data_types.connector.request_type import POST
class CreateBalanceAPI:
@staticmethod
async def create_balance(wallet_api_key: str, phone_number: str, alias: str) -> bool:
url = QIWIWalletURLS.Balance.balance.format(phone_number)
headers = {"Accept": "application/json",
"Content-type": "application/json",
"Authorization": f"Bearer {wallet_api_key}"}
json = {"alias": alias}
try:
await Connector.request(url=url, headers=headers, json=json, request_type=POST)
except ClientError:
logger.warning(f"You can't create balance with this alias or this alias is created: {alias}")
else:
return True
return False
| StarcoderdataPython |
3231792 | <reponame>jonathanvevance/predicting_fgroups_ddp<filename>src/utils/train_utils.py
"""MIL functions."""
import torch
import torch.nn as nn
from sklearn.metrics import precision_recall_fscore_support
from tqdm import tqdm
class weightedLoss(nn.Module):
def __init__(self, torch_loss, weight, device):
"""
Args:
torch_loss : torch criterion class (NOT OBJECT)
weight : torch tensor dealing with class imbalance
"""
super(weightedLoss, self).__init__()
self.weight = weight.to(device)
self.criterion = torch_loss(reduction = 'none')
def forward(self, output, labels):
"""Forward function."""
weight_ = self.weight[labels.data.view(-1).long()].view_as(labels)
loss = self.criterion(output, labels)
loss_class_weighted = loss * weight_
loss_class_weighted = loss_class_weighted.mean()
return loss_class_weighted
class EarlyStopping():
"""A simple Early Stopping implementation."""
def __init__(self, patience = 10, delta = 0):
self.patience = patience
self.delta = delta
self.val_loss_min = None
self.saved_state_dict = None
self.counter = 0
def __call__(self, val_loss, model):
"""Call function."""
if self.val_loss_min is None:
self.val_loss_min = val_loss
self.saved_state_dict = model.state_dict()
return False
change = (self.val_loss_min - val_loss) / self.val_loss_min
if change >= self.delta:
self.counter = 0
self.val_loss_min = val_loss
self.saved_state_dict = model.state_dict()
return False
else:
self.counter += 1
if self.counter > self.patience:
return True
else:
return False
def evaluate_model(model, criterion, val_loader, device, epoch):
"""Function to evaluate a model."""
model.eval()
val_losses_total = 0
with tqdm(val_loader, unit = "batch", leave = True) as tqdm_progressbar:
for idx, (inputs, labels) in enumerate(tqdm_progressbar):
tqdm_progressbar.set_description(f"Epoch {epoch} (validating)")
inputs, labels = inputs.to(device), labels.to(device)
outputs = model(inputs)
loss = criterion(outputs, labels)
val_losses_total += loss.item()
val_losses_avg = val_losses_total / (idx + 1)
tqdm_progressbar.set_postfix(val_loss = val_losses_avg)
print_model_accuracy(outputs, labels, loss, threshold = 0.5, mode = "val")
return val_losses_avg
def get_classification_metrics(bin_outputs, labels):
bin_outputs = torch.flatten(bin_outputs).cpu()
labels = torch.flatten(labels).cpu()
precision, recall, fscore, __ = precision_recall_fscore_support(labels, bin_outputs)
accuracy = torch.sum(bin_outputs == labels) / labels.nelement()
return precision, recall, fscore, accuracy
def print_model_accuracy(outputs, labels, loss, threshold = 0.5, mode = 'train'):
bin_outputs = (outputs > threshold).float()
precision, recall, fscore, accuracy = get_classification_metrics(bin_outputs, labels)
print(
f"{mode} minibatch :: accuracy =", accuracy.item(),
f"loss =", loss.item(), f"f1 score = {sum(fscore) / len(fscore)}"
)
def save_model(model, save_path):
"""Save torch model."""
torch.save(model.state_dict(), save_path)
def load_model(model, load_path, device):
"""Load torch model."""
model.load_state_dict(torch.load(load_path, map_location = device))
return model | StarcoderdataPython |
4821759 | <gh_stars>10-100
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('icekit_plugins_file', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='file',
name='categories',
field=models.ManyToManyField(related_name='icekit_plugins_file_file_related', to='icekit.MediaCategory', blank=True),
),
migrations.AlterModelTable(
name='file',
table=None,
),
migrations.AlterModelTable(
name='fileitem',
table='contentitem_icekit_plugins_file_fileitem',
),
migrations.RunSQL(
"UPDATE django_content_type SET app_label='icekit_plugins_file' WHERE app_label='file';"
),
]
| StarcoderdataPython |
6428 | import os
import math
import time
import geohash
import geojson
from geojson import MultiLineString
from shapely import geometry
import shapefile
import numpy
import datetime as dt
import pandas as pd
import logging
logger = logging.getLogger(__name__)
source_shape_file_path = "C:/temp/2018/"
threshold = 60*60
cols = ['start', 'end','start_epoch_round','end_epoch_round','start_epoch_round_dt','end_epoch_round_dt']
times = []
for root,dirs,files in os.walk(source_shape_file_path):
for file in files:
with open(os.path.join(root,file),"r") as auto:
if file.endswith(".shp"):
try:
filename = file.replace(".shp","")
shape=shapefile.Reader(source_shape_file_path+filename+"/"+file)
for r in shape.iterRecords():
start_time = dt.datetime.strptime(r[1], '%Y%j %H%M')
end_time = dt.datetime.strptime(r[2], '%Y%j %H%M')
epoch_s = dt.datetime.timestamp(dt.datetime.strptime(r[1], '%Y%j %H%M'))
epoch_e = dt.datetime.timestamp(dt.datetime.strptime(r[2], '%Y%j %H%M'))
# sometimes start is later than end time, we'll assume the earlier time is start
epoch_end_round = round(max(epoch_s,epoch_e) / threshold) * threshold
epoch_start_round = round(min(epoch_s,epoch_e) / threshold) * threshold
epoch_end_round_dt = dt.datetime.utcfromtimestamp(3600 * ((max(epoch_s,epoch_e) + 1800) // 3600))
epoch_start_round_dt = dt.datetime.utcfromtimestamp(3600 * ((min(epoch_s,epoch_e) + 1800) // 3600))
times.append([start_time,end_time,epoch_start_round,epoch_end_round,epoch_start_round_dt,epoch_end_round_dt])
break
except:
logger.error('failed to parse file:'+source_shape_file_path+filename+"/")
continue
df = pd.DataFrame(times, columns=cols)
df.to_csv('noaa_times.csv')
| StarcoderdataPython |
3202532 | <gh_stars>1-10
# -*- coding: utf-8 -*-
from django.conf.urls import url, include
from rest_framework_tus.views import UploadViewSet
from .routers import TusAPIRouter
router = TusAPIRouter()
router.register(r'files', UploadViewSet, basename='upload')
urlpatterns = [
url(r'', include((router.urls, 'rest_framework_tus'), namespace='api'))
]
app_name = 'rest_framework_tus'
| StarcoderdataPython |
80941 | # coding: utf-8
"""
Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
"""
"""
ProductAdvertisingAPI
https://webservices.amazon.com/paapi5/documentation/index.html # noqa: E501
"""
import pprint
import re # noqa: F401
import six
from .availability import Availability # noqa: F401,E501
from .condition import Condition # noqa: F401,E501
from .delivery_flag import DeliveryFlag # noqa: F401,E501
from .max_price import MaxPrice # noqa: F401,E501
from .merchant import Merchant # noqa: F401,E501
from .min_price import MinPrice # noqa: F401,E501
from .min_reviews_rating import MinReviewsRating # noqa: F401,E501
from .min_saving_percent import MinSavingPercent # noqa: F401,E501
from .offer_count import OfferCount # noqa: F401,E501
from .partner_type import PartnerType # noqa: F401,E501
from .properties import Properties # noqa: F401,E501
from .search_items_resource import SearchItemsResource # noqa: F401,E501
from .sort_by import SortBy # noqa: F401,E501
class SearchItemsRequest(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'actor': 'str',
'artist': 'str',
'author': 'str',
'availability': 'Availability',
'brand': 'str',
'browse_node_id': 'str',
'condition': 'Condition',
'currency_of_preference': 'str',
'delivery_flags': 'list[DeliveryFlag]',
'item_count': 'int',
'item_page': 'int',
'keywords': 'str',
'languages_of_preference': 'list[str]',
'marketplace': 'str',
'max_price': 'MaxPrice',
'merchant': 'Merchant',
'min_price': 'MinPrice',
'min_reviews_rating': 'MinReviewsRating',
'min_saving_percent': 'MinSavingPercent',
'offer_count': 'OfferCount',
'partner_tag': 'str',
'partner_type': 'PartnerType',
'properties': 'Properties',
'resources': 'list[SearchItemsResource]',
'search_index': 'str',
'sort_by': 'SortBy',
'title': 'str'
}
attribute_map = {
'actor': 'Actor',
'artist': 'Artist',
'author': 'Author',
'availability': 'Availability',
'brand': 'Brand',
'browse_node_id': 'BrowseNodeId',
'condition': 'Condition',
'currency_of_preference': 'CurrencyOfPreference',
'delivery_flags': 'DeliveryFlags',
'item_count': 'ItemCount',
'item_page': 'ItemPage',
'keywords': 'Keywords',
'languages_of_preference': 'LanguagesOfPreference',
'marketplace': 'Marketplace',
'max_price': 'MaxPrice',
'merchant': 'Merchant',
'min_price': 'MinPrice',
'min_reviews_rating': 'MinReviewsRating',
'min_saving_percent': 'MinSavingPercent',
'offer_count': 'OfferCount',
'partner_tag': 'PartnerTag',
'partner_type': 'PartnerType',
'properties': 'Properties',
'resources': 'Resources',
'search_index': 'SearchIndex',
'sort_by': 'SortBy',
'title': 'Title'
}
def __init__(self, actor=None, artist=None, author=None, availability=None, brand=None, browse_node_id=None, condition=None, currency_of_preference=None, delivery_flags=None, item_count=None, item_page=None, keywords=None, languages_of_preference=None, marketplace=None, max_price=None, merchant=None, min_price=None, min_reviews_rating=None, min_saving_percent=None, offer_count=None, partner_tag=None, partner_type=None, properties=None, resources=None, search_index=None, sort_by=None, title=None): # noqa: E501
"""SearchItemsRequest - a model defined in Swagger""" # noqa: E501
self._actor = None
self._artist = None
self._author = None
self._availability = None
self._brand = None
self._browse_node_id = None
self._condition = None
self._currency_of_preference = None
self._delivery_flags = None
self._item_count = None
self._item_page = None
self._keywords = None
self._languages_of_preference = None
self._marketplace = None
self._max_price = None
self._merchant = None
self._min_price = None
self._min_reviews_rating = None
self._min_saving_percent = None
self._offer_count = None
self._partner_tag = None
self._partner_type = None
self._properties = None
self._resources = None
self._search_index = None
self._sort_by = None
self._title = None
self.discriminator = None
if actor is not None:
self.actor = actor
if artist is not None:
self.artist = artist
if author is not None:
self.author = author
if availability is not None:
self.availability = availability
if brand is not None:
self.brand = brand
if browse_node_id is not None:
self.browse_node_id = browse_node_id
if condition is not None:
self.condition = condition
if currency_of_preference is not None:
self.currency_of_preference = currency_of_preference
if delivery_flags is not None:
self.delivery_flags = delivery_flags
if item_count is not None:
self.item_count = item_count
if item_page is not None:
self.item_page = item_page
if keywords is not None:
self.keywords = keywords
if languages_of_preference is not None:
self.languages_of_preference = languages_of_preference
if marketplace is not None:
self.marketplace = marketplace
if max_price is not None:
self.max_price = max_price
if merchant is not None:
self.merchant = merchant
if min_price is not None:
self.min_price = min_price
if min_reviews_rating is not None:
self.min_reviews_rating = min_reviews_rating
if min_saving_percent is not None:
self.min_saving_percent = min_saving_percent
if offer_count is not None:
self.offer_count = offer_count
self.partner_tag = partner_tag
self.partner_type = partner_type
if properties is not None:
self.properties = properties
if resources is not None:
self.resources = resources
if search_index is not None:
self.search_index = search_index
if sort_by is not None:
self.sort_by = sort_by
if title is not None:
self.title = title
@property
def actor(self):
"""Gets the actor of this SearchItemsRequest. # noqa: E501
:return: The actor of this SearchItemsRequest. # noqa: E501
:rtype: str
"""
return self._actor
@actor.setter
def actor(self, actor):
"""Sets the actor of this SearchItemsRequest.
:param actor: The actor of this SearchItemsRequest. # noqa: E501
:type: str
"""
self._actor = actor
@property
def artist(self):
"""Gets the artist of this SearchItemsRequest. # noqa: E501
:return: The artist of this SearchItemsRequest. # noqa: E501
:rtype: str
"""
return self._artist
@artist.setter
def artist(self, artist):
"""Sets the artist of this SearchItemsRequest.
:param artist: The artist of this SearchItemsRequest. # noqa: E501
:type: str
"""
self._artist = artist
@property
def author(self):
"""Gets the author of this SearchItemsRequest. # noqa: E501
:return: The author of this SearchItemsRequest. # noqa: E501
:rtype: str
"""
return self._author
@author.setter
def author(self, author):
"""Sets the author of this SearchItemsRequest.
:param author: The author of this SearchItemsRequest. # noqa: E501
:type: str
"""
self._author = author
@property
def availability(self):
"""Gets the availability of this SearchItemsRequest. # noqa: E501
:return: The availability of this SearchItemsRequest. # noqa: E501
:rtype: Availability
"""
return self._availability
@availability.setter
def availability(self, availability):
"""Sets the availability of this SearchItemsRequest.
:param availability: The availability of this SearchItemsRequest. # noqa: E501
:type: Availability
"""
self._availability = availability
@property
def brand(self):
"""Gets the brand of this SearchItemsRequest. # noqa: E501
:return: The brand of this SearchItemsRequest. # noqa: E501
:rtype: str
"""
return self._brand
@brand.setter
def brand(self, brand):
"""Sets the brand of this SearchItemsRequest.
:param brand: The brand of this SearchItemsRequest. # noqa: E501
:type: str
"""
self._brand = brand
@property
def browse_node_id(self):
"""Gets the browse_node_id of this SearchItemsRequest. # noqa: E501
:return: The browse_node_id of this SearchItemsRequest. # noqa: E501
:rtype: str
"""
return self._browse_node_id
@browse_node_id.setter
def browse_node_id(self, browse_node_id):
"""Sets the browse_node_id of this SearchItemsRequest.
:param browse_node_id: The browse_node_id of this SearchItemsRequest. # noqa: E501
:type: str
"""
self._browse_node_id = browse_node_id
@property
def condition(self):
"""Gets the condition of this SearchItemsRequest. # noqa: E501
:return: The condition of this SearchItemsRequest. # noqa: E501
:rtype: Condition
"""
return self._condition
@condition.setter
def condition(self, condition):
"""Sets the condition of this SearchItemsRequest.
:param condition: The condition of this SearchItemsRequest. # noqa: E501
:type: Condition
"""
self._condition = condition
@property
def currency_of_preference(self):
"""Gets the currency_of_preference of this SearchItemsRequest. # noqa: E501
:return: The currency_of_preference of this SearchItemsRequest. # noqa: E501
:rtype: str
"""
return self._currency_of_preference
@currency_of_preference.setter
def currency_of_preference(self, currency_of_preference):
"""Sets the currency_of_preference of this SearchItemsRequest.
:param currency_of_preference: The currency_of_preference of this SearchItemsRequest. # noqa: E501
:type: str
"""
self._currency_of_preference = currency_of_preference
@property
def delivery_flags(self):
"""Gets the delivery_flags of this SearchItemsRequest. # noqa: E501
:return: The delivery_flags of this SearchItemsRequest. # noqa: E501
:rtype: list[DeliveryFlag]
"""
return self._delivery_flags
@delivery_flags.setter
def delivery_flags(self, delivery_flags):
"""Sets the delivery_flags of this SearchItemsRequest.
:param delivery_flags: The delivery_flags of this SearchItemsRequest. # noqa: E501
:type: list[DeliveryFlag]
"""
self._delivery_flags = delivery_flags
@property
def item_count(self):
"""Gets the item_count of this SearchItemsRequest. # noqa: E501
:return: The item_count of this SearchItemsRequest. # noqa: E501
:rtype: int
"""
return self._item_count
@item_count.setter
def item_count(self, item_count):
"""Sets the item_count of this SearchItemsRequest.
:param item_count: The item_count of this SearchItemsRequest. # noqa: E501
:type: int
"""
self._item_count = item_count
@property
def item_page(self):
"""Gets the item_page of this SearchItemsRequest. # noqa: E501
:return: The item_page of this SearchItemsRequest. # noqa: E501
:rtype: int
"""
return self._item_page
@item_page.setter
def item_page(self, item_page):
"""Sets the item_page of this SearchItemsRequest.
:param item_page: The item_page of this SearchItemsRequest. # noqa: E501
:type: int
"""
self._item_page = item_page
@property
def keywords(self):
"""Gets the keywords of this SearchItemsRequest. # noqa: E501
:return: The keywords of this SearchItemsRequest. # noqa: E501
:rtype: str
"""
return self._keywords
@keywords.setter
def keywords(self, keywords):
"""Sets the keywords of this SearchItemsRequest.
:param keywords: The keywords of this SearchItemsRequest. # noqa: E501
:type: str
"""
self._keywords = keywords
@property
def languages_of_preference(self):
"""Gets the languages_of_preference of this SearchItemsRequest. # noqa: E501
:return: The languages_of_preference of this SearchItemsRequest. # noqa: E501
:rtype: list[str]
"""
return self._languages_of_preference
@languages_of_preference.setter
def languages_of_preference(self, languages_of_preference):
"""Sets the languages_of_preference of this SearchItemsRequest.
:param languages_of_preference: The languages_of_preference of this SearchItemsRequest. # noqa: E501
:type: list[str]
"""
self._languages_of_preference = languages_of_preference
@property
def marketplace(self):
"""Gets the marketplace of this SearchItemsRequest. # noqa: E501
:return: The marketplace of this SearchItemsRequest. # noqa: E501
:rtype: str
"""
return self._marketplace
@marketplace.setter
def marketplace(self, marketplace):
"""Sets the marketplace of this SearchItemsRequest.
:param marketplace: The marketplace of this SearchItemsRequest. # noqa: E501
:type: str
"""
self._marketplace = marketplace
@property
def max_price(self):
"""Gets the max_price of this SearchItemsRequest. # noqa: E501
:return: The max_price of this SearchItemsRequest. # noqa: E501
:rtype: MaxPrice
"""
return self._max_price
@max_price.setter
def max_price(self, max_price):
"""Sets the max_price of this SearchItemsRequest.
:param max_price: The max_price of this SearchItemsRequest. # noqa: E501
:type: MaxPrice
"""
self._max_price = max_price
@property
def merchant(self):
"""Gets the merchant of this SearchItemsRequest. # noqa: E501
:return: The merchant of this SearchItemsRequest. # noqa: E501
:rtype: Merchant
"""
return self._merchant
@merchant.setter
def merchant(self, merchant):
"""Sets the merchant of this SearchItemsRequest.
:param merchant: The merchant of this SearchItemsRequest. # noqa: E501
:type: Merchant
"""
self._merchant = merchant
@property
def min_price(self):
"""Gets the min_price of this SearchItemsRequest. # noqa: E501
:return: The min_price of this SearchItemsRequest. # noqa: E501
:rtype: MinPrice
"""
return self._min_price
@min_price.setter
def min_price(self, min_price):
"""Sets the min_price of this SearchItemsRequest.
:param min_price: The min_price of this SearchItemsRequest. # noqa: E501
:type: MinPrice
"""
self._min_price = min_price
@property
def min_reviews_rating(self):
"""Gets the min_reviews_rating of this SearchItemsRequest. # noqa: E501
:return: The min_reviews_rating of this SearchItemsRequest. # noqa: E501
:rtype: MinReviewsRating
"""
return self._min_reviews_rating
@min_reviews_rating.setter
def min_reviews_rating(self, min_reviews_rating):
"""Sets the min_reviews_rating of this SearchItemsRequest.
:param min_reviews_rating: The min_reviews_rating of this SearchItemsRequest. # noqa: E501
:type: MinReviewsRating
"""
self._min_reviews_rating = min_reviews_rating
@property
def min_saving_percent(self):
"""Gets the min_saving_percent of this SearchItemsRequest. # noqa: E501
:return: The min_saving_percent of this SearchItemsRequest. # noqa: E501
:rtype: MinSavingPercent
"""
return self._min_saving_percent
@min_saving_percent.setter
def min_saving_percent(self, min_saving_percent):
"""Sets the min_saving_percent of this SearchItemsRequest.
:param min_saving_percent: The min_saving_percent of this SearchItemsRequest. # noqa: E501
:type: MinSavingPercent
"""
self._min_saving_percent = min_saving_percent
@property
def offer_count(self):
"""Gets the offer_count of this SearchItemsRequest. # noqa: E501
:return: The offer_count of this SearchItemsRequest. # noqa: E501
:rtype: OfferCount
"""
return self._offer_count
@offer_count.setter
def offer_count(self, offer_count):
"""Sets the offer_count of this SearchItemsRequest.
:param offer_count: The offer_count of this SearchItemsRequest. # noqa: E501
:type: OfferCount
"""
self._offer_count = offer_count
@property
def partner_tag(self):
"""Gets the partner_tag of this SearchItemsRequest. # noqa: E501
:return: The partner_tag of this SearchItemsRequest. # noqa: E501
:rtype: str
"""
return self._partner_tag
@partner_tag.setter
def partner_tag(self, partner_tag):
"""Sets the partner_tag of this SearchItemsRequest.
:param partner_tag: The partner_tag of this SearchItemsRequest. # noqa: E501
:type: str
"""
if partner_tag is None:
raise ValueError("Invalid value for `partner_tag`, must not be `None`") # noqa: E501
self._partner_tag = partner_tag
@property
def partner_type(self):
"""Gets the partner_type of this SearchItemsRequest. # noqa: E501
:return: The partner_type of this SearchItemsRequest. # noqa: E501
:rtype: PartnerType
"""
return self._partner_type
@partner_type.setter
def partner_type(self, partner_type):
"""Sets the partner_type of this SearchItemsRequest.
:param partner_type: The partner_type of this SearchItemsRequest. # noqa: E501
:type: PartnerType
"""
if partner_type is None:
raise ValueError("Invalid value for `partner_type`, must not be `None`") # noqa: E501
self._partner_type = partner_type
@property
def properties(self):
"""Gets the properties of this SearchItemsRequest. # noqa: E501
:return: The properties of this SearchItemsRequest. # noqa: E501
:rtype: Properties
"""
return self._properties
@properties.setter
def properties(self, properties):
"""Sets the properties of this SearchItemsRequest.
:param properties: The properties of this SearchItemsRequest. # noqa: E501
:type: Properties
"""
self._properties = properties
@property
def resources(self):
"""Gets the resources of this SearchItemsRequest. # noqa: E501
:return: The resources of this SearchItemsRequest. # noqa: E501
:rtype: list[SearchItemsResource]
"""
return self._resources
@resources.setter
def resources(self, resources):
"""Sets the resources of this SearchItemsRequest.
:param resources: The resources of this SearchItemsRequest. # noqa: E501
:type: list[SearchItemsResource]
"""
self._resources = resources
@property
def search_index(self):
"""Gets the search_index of this SearchItemsRequest. # noqa: E501
:return: The search_index of this SearchItemsRequest. # noqa: E501
:rtype: str
"""
return self._search_index
@search_index.setter
def search_index(self, search_index):
"""Sets the search_index of this SearchItemsRequest.
:param search_index: The search_index of this SearchItemsRequest. # noqa: E501
:type: str
"""
self._search_index = search_index
@property
def sort_by(self):
"""Gets the sort_by of this SearchItemsRequest. # noqa: E501
:return: The sort_by of this SearchItemsRequest. # noqa: E501
:rtype: SortBy
"""
return self._sort_by
@sort_by.setter
def sort_by(self, sort_by):
"""Sets the sort_by of this SearchItemsRequest.
:param sort_by: The sort_by of this SearchItemsRequest. # noqa: E501
:type: SortBy
"""
self._sort_by = sort_by
@property
def title(self):
"""Gets the title of this SearchItemsRequest. # noqa: E501
:return: The title of this SearchItemsRequest. # noqa: E501
:rtype: str
"""
return self._title
@title.setter
def title(self, title):
"""Sets the title of this SearchItemsRequest.
:param title: The title of this SearchItemsRequest. # noqa: E501
:type: str
"""
self._title = title
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(SearchItemsRequest, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, SearchItemsRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| StarcoderdataPython |
3247240 | import glob
import os
import shutil
from subprocess import check_call
import sys
SCRIPT_DIR = os.path.abspath(os.path.dirname(__file__))
ROOT_DIR = os.path.abspath(os.path.join(SCRIPT_DIR, ".."))
STANDALONE_DIR = os.path.join(ROOT_DIR, "standalone-build")
PROJECT_NAME = "VTK"
def get_dummy_python_lib():
"""Since the python interpreter exports its symbol (see [1]), python modules
should not link against any python libraries. To ensure it is not the case,
we configure the project using an empty file as python library.
[1] "Note that libpythonX.Y.so.1 is not on the list of libraries that a
manylinux1 extension is allowed to link to. Explicitly linking to
libpythonX.Y.so.1 is unnecessary in almost all cases: the way ELF linking
works, extension modules that are loaded into the interpreter automatically
get access to all of the interpreter's symbols, regardless of whether or
not the extension itself is explicitly linked against libpython. [...]"
Source: https://www.python.org/dev/peps/pep-0513/#libpythonx-y-so-1
"""
py_lib = os.path.join(
SCRIPT_DIR, 'internal',
'libpython-not-needed-symbols-exported-by-interpreter'
)
if not os.path.exists(py_lib):
with open(py_lib, 'w') as fp:
fp.write('')
return py_lib
def get_python_info():
py_exe = sys.executable
version = sys.version_info[:2]
py_ver = '{0}.{1}'.format(*version)
prefix = os.path.abspath(sys.prefix)
py_inc_dir = glob.glob(os.path.join(prefix, 'include', 'python*'))[0]
py_lib_dir = os.path.join(prefix, 'lib')
py_lib = get_dummy_python_lib()
return py_exe, py_ver, py_inc_dir, py_lib
def build_wheel(cleanup=False):
py_exe, py_ver, py_inc_dir, py_lib = get_python_info()
build_type = 'Release'
source_path = "%s/%s-source" % (STANDALONE_DIR, PROJECT_NAME)
build_path = "%s/%s-osx_%s" % (ROOT_DIR, PROJECT_NAME, py_ver)
osx_target="10.9"
# Clean up previous invocations
if cleanup and os.path.exists(build_path):
shutil.rmtree(build_path)
print("#")
print("# Build single %s wheel" % PROJECT_NAME)
print("#")
# Generate wheel
check_call([
py_exe,
"setup.py", "bdist_wheel",
"--build-type", build_type,
"-G", "Ninja",
"--",
"-DVTK_SOURCE_DIR:PATH=%s" % source_path,
"-DVTK_BINARY_DIR:PATH=%s" % build_path,
"-DCMAKE_OSX_DEPLOYMENT_TARGET:STRING=%s" % osx_target,
"-DPYTHON_EXECUTABLE:FILEPATH=%s" % py_exe,
"-DPYTHON_INCLUDE_DIR:PATH=%s" % py_inc_dir,
"-DPYTHON_LIBRARY:FILEPATH=%s" % py_lib
])
# Cleanup
check_call([py_exe, "setup.py", "clean"])
def main():
build_wheel()
if __name__ == '__main__':
main()
| StarcoderdataPython |
4815028 | <gh_stars>1-10
#!/usr/bin/python
# coding=UTF-8
# -*- coding: UTF-8 -*-
#input file of folder of fasta files
#Uses BLAST to search for homologous PDB structures
# This file is part of asa_uta.py.
#
# asa_uta is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# asa_uta is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with asa_uta. If not, see <http://www.gnu.org/licenses/>.
## Authors: <NAME>, <NAME>
## Institute of Biomedical Technology
## University of Tampere, Tampere, Finland
## and
## BioMediTech, Tampere, Finland
## 14-11-2013
# Please cite the authors if you find this script useful."
import sys
import os
import re
import time
import random
import signal
import glob
import textwrap
import getopt
import logging
import shutil
from datetime import datetime, timedelta
from multiprocessing import Lock, Process
try: from cStringIO import StringIO
except: from StringIO import StringIO
from Bio import SeqIO
from Bio.Application import ApplicationError
from Bio.Blast.Applications import NcbiblastxCommandline
from Bio.PDB.PDBParser import PDBParser
#from Bio.PDB.PDBList import PDBList
#from blast_repository import *
from blast_sqlite3_repository import *
curdir = os.path.dirname(os.path.abspath(__file__))
sys.path.append( "%s/../utils" % curdir)
from fileops import *
from struct_methods import *#FetchObsoletes, GetRecordIDs, GetRecordACC, SyncPrint, SyncErrPrint
from delayed_keyboard_interrupt import DelayedKeyboardInterrupt
import online_pdb
sys.path.append( "%s/../align" % curdir)
from align_seq_with_structure_multithread import ProcessBlastResultsAsync
#Globals
BLAST_INPUT_MODE = None
align_processes = []
blast_print_lock = Lock()
original_sigint = signal.getsignal( signal.SIGINT)
def sigIntHandler( aSignal, aFrame):
global align_processes, blast_print_lock, original_sigint
signal.signal( signal.SIGINT, original_sigint) #Restore
SetSyncPrintBuffered( False)
SyncPrint( "Process interrupted by user.", blast_print_lock)
TerminateThreads()
sys.exit( -99)
#raise KeyboardInterrupt
def TerminateThreads():
global align_processes, blast_print_lock
SetSyncPrintBuffered( False)
for p in align_processes:
SyncPrint( "Terminating remaining alignment process %i... " % int( p[ 2]), blast_print_lock, False )
try:
p[ 0].terminate()
SyncPrint( "[OK]", blast_print_lock)
except:
SyncPrint( "[Fail]", blast_print_lock)
pass
def printDictionary( dict):
print ""
for k in sorted( dict.iterkeys()):
print k,
print ":",
print dict[ k]
print ""
def WriteArrayToFile( filehandle, linearray ):
for line in linearray:
filehandle.write( line + "\n")
def ReportSpeed( aCurrent, aTotal, aPrintLock=False):
#Using function attributes
if not hasattr( ReportSpeed, "prev_time"):
ReportSpeed.prev_time = time.time()
ReportSpeed.prev_count = aCurrent
ReportSpeed.processing_speed_per_minute = []
return "Speed: Calculating processing speed..."
now = time.time()
seconds_passed = int( now - ReportSpeed.prev_time)
if seconds_passed < 10: return #report interval
n_processed = aCurrent - ReportSpeed.prev_count
entries_per_minute = round( n_processed * (60.0 / seconds_passed))
#Calc average
if len( ReportSpeed.processing_speed_per_minute) > 19: ReportSpeed.processing_speed_per_minute.pop( 0)
ReportSpeed.processing_speed_per_minute.append( entries_per_minute)
avg_speed = int( sum( ReportSpeed.processing_speed_per_minute) / len( ReportSpeed.processing_speed_per_minute))
if avg_speed == 0: avg_speed = 1
n_minutes_remaining = (aTotal - aCurrent) / avg_speed
completion = datetime.now() + timedelta(minutes = n_minutes_remaining)
if n_minutes_remaining <= 1:
# Blast Progress:
speed_msg = " Speed: %i/min. Estimated time of completion: %s (almost done)" % (avg_speed, completion.strftime('%H:%M'))
elif n_minutes_remaining <= 60:
speed_msg = " Speed: %i/min. Estimated time of completion: %s (%i minutes remaining)" % (avg_speed, completion.strftime('%H:%M'), n_minutes_remaining)
elif n_minutes_remaining < 60*24: #in 24h
h = n_minutes_remaining / 60 #h is float without conversion?
m = n_minutes_remaining - (h*60)
speed_msg = " Speed: %i/min. Estimated time of completion: %s (in %ih %imin)" % (avg_speed, completion.strftime('%H:%M'), h, m)
else:
h = n_minutes_remaining / 60
speed_msg = " Speed: %i/min. Estimated time of completion: %s (in >%i hours)" % (avg_speed, completion.strftime('%a %b %d %Y %H:%M'), h)
ReportSpeed.prev_time = now
ReportSpeed.prev_count = aCurrent
if aPrintLock: aPrintLock.acquire()
print "\n--------------------------------------------------------------------------"
print "Blast Progress: Processed records %i/%i (%.1f%%)\n%s" % (aCurrent, aTotal, (float( aCurrent)/aTotal)*100.0, speed_msg)
print "--------------------------------------------------------------------------\n"
if aPrintLock: aPrintLock.release()
return speed_msg
#Test if blast can be run without writing an input file
def TestBlast( aExeFolder, aDBFolder, aVerbose=False ):
global BLAST_INPUT_MODE
if BLAST_INPUT_MODE != None: return #Already tested
import subprocess
#from Bio.Blast.Applications import NcbiblastxCommandline
#Check file existence
executable_file = "blastp" + (".exe" if os.name == "nt" else "")
if aExeFolder == "": #Need to find exe in os path
for path in os.environ["PATH"].split(os.pathsep):
path = path.strip('"')
exe_test_file = os.path.join(path, executable_file)
if os.path.isfile( exe_test_file):
aExeFolder = path
if aVerbose: print "BLAST INFO: Blast found in system path at: '%s'" % path
break
exe = InsertFolderSeparator( aExeFolder)+"blastp"
if not os.path.isfile( exe):
exe = exe + ".exe"
if not os.path.isfile( exe):
if len( aExeFolder): sys.stderr.write( "BLAST ERROR: Blast executable not found in '%s'.\n" % aExeFolder )
else: sys.stderr.write( "BLAST ERROR: Blast executable not found in './' or system path.\n")
return False
#Check permissions
if not os.access( exe, os.X_OK):
sys.stderr.write( "BLAST ERROR: Blast executable '%s' cannot be accessed (file permissions).\n" % exe )
return False
#Check db
db = InsertFolderSeparator( aDBFolder)+"pdbaa"
if not os.path.isfile( db + ".00.phr"):
sys.stderr.write( "BLAST ERROR: Blast database 'pdbaa' not found in '%s'.\n" % aDBFolder )
return False
#Run test query
print "BLAST INFO: Testing if Blast accepts stdin input (faster)... ",
query = ">Test_seq\nMIVSDIEANALLESVTKFHCGVIYDYSTAEYVSYRPSDFGAYLDALEAEVARGGLIVFHNGHKYDVPALT\n"
blastp_cmdline = NcbiblastxCommandline( cmd=exe, db=db, outfmt=6, max_target_seqs=2) #format Blast command
#print "CMD:", str(blastp_cmdline)
try:
process = subprocess.Popen( str( blastp_cmdline), stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True, shell=False) #setup the process
out, err = process.communicate( input=query) #run the process
#print out
if len( out) == 0 or len( err) > 0:
#No results. Blast versions above blast-2.2.28+
print "[FAILED]"
BLAST_INPUT_MODE = "file"
else:
#Blast version blast-2.2.28+
BLAST_INPUT_MODE = "stdin"
print "[OK]"
except Exception as ex:
print "[FAILED]"
BLAST_INPUT_MODE = "file"
#sys.stderr.write( "BLAST ERROR: Exception occurred while running blast test '%s'.\n" % str( blastp_cmdline))
#sys.stderr.write( type( ex).__name__ + ": '" + str( ex)+"'\n")
return True
def FindStructures( blast_exe_folder, input_file, output_file = "", PDB_save_folder="", BLAST_DB_folder="", parallel_align=True,\
max_threads=0, max_results=5, e_val_threshold=0.001, \
fetch_files=True, reset_progress=False, obsoletes=[], verbose=False, sequence_window=30, use_repository=True, debug=False):
global blast_print_lock
retval = -1
SetSyncPrintBuffered( True)
#use_repository = len( BLAST_DB_folder) > 0
try:
retval = DoFindStructures( blast_exe_folder, input_file, output_file, PDB_save_folder, BLAST_DB_folder, parallel_align, max_threads, max_results, e_val_threshold, fetch_files, reset_progress, obsoletes, verbose, sequence_window=sequence_window, use_repository=use_repository)
except KeyboardInterrupt:
if use_repository: CloseRepository()
SyncErrPrint( "BLAST ERROR: Process interrupted by user.", blast_print_lock)
TerminateThreads()
except Exception as ex:
SyncErrPrint( "BLAST ERROR: Exception caught running FindStructures in '%s'." % (__file__), blast_print_lock)
SyncErrPrint( " An exception of type {0} occured. Arguments: {1!r}".format( type( ex).__name__, ex.args), blast_print_lock)
TerminateThreads()
if debug: raise
#exc_type, exc_obj, exc_tb = sys.exc_info()
#fname = os.path.split( exc_tb.tb_frame.f_code.co_filename)[1]
#print( exc_type, fname, exc_tb.tb_lineno)
SetSyncPrintBuffered( False)
return retval
#blastp has a -remote flag for doing queries in online DBs
def DoFindStructures( blast_exe_folder, input_file, output_file = "", PDB_save_folder="", BLAST_DB_folder="", parallel_align=True, max_threads=0, max_results=5, e_val_threshold=0.001, fetch_files=True, reset_progress=False, obsoletes=[], verbose=False, sequence_window=30, use_repository=True):
global align_processes, blast_print_lock, BLAST_INPUT_MODE
PDB_save_folder = InsertFolderSeparator( PDB_save_folder)
BLAST_DB_folder = InsertFolderSeparator( BLAST_DB_folder)
BLAST_EXE_FOLDER = InsertFolderSeparator( blast_exe_folder)
repository_file = BLAST_DB_folder + "blast_repository.sqlite"
repository_fetch_arg = max_results
if use_repository:
print "INFO: Using repository of previous results: '%s'" % repository_file
SetRepositoryFile( repository_file)
if max_results <= 0: repository_fetch_arg = -1
if max_results >= 100: repository_fetch_arg = -1
sequence_window += sequence_window % 2 #Make window even
if sequence_window > 0:
if verbose: sys.stderr.write( "INFO: Using sequence window of %i amino acids around each POI for BLASTing.\n" % sequence_window )
if sequence_window < 10: sys.stderr.write( "WARNING: Blasting with a small sequence window (%i) might yield unexpected results.\n" % sequence_window )
elif verbose: sys.stderr.write( "INFO: Using full sequences for BLASTing.\n" % sequence_window )
#if not specified, limit to 8 threads
if max_threads == 0: max_threads = 8
#0 or less means all possible results
if max_results <= 0: max_results = 999
if verbose and max_results > 0: sys.stderr.write( "INFO: Using maximum of %i homologous structures for each POI.\n" % max_results )
elif verbose: sys.stderr.write( "INFO: Using all available homologous structures for each POI.\n" )
#Reserve 1 thread for blasting and the rest for alignments
if parallel_align and max_threads == 1:
sys.stderr.write( "INFO: Cannot use only one thread for parallel alignments.\n" )
sys.stderr.write( " No parallel alignments are done.\n" )
parallel_align = False
elif max_threads > 1:
max_threads -= 1
if parallel_align: print "INFO: using %i threads for parallel alignments." % max_threads
#Terminate parallel alignment threads in own handler
if parallel_align:
signal.signal(signal.SIGINT, sigIntHandler)
#Format default output filename if necessary
if len( output_file) == 0: output_file = input_file + ".blast"
#Check folder
if not os.path.isdir( PDB_save_folder):
sys.stderr.write( "ERROR: '%s' does not specify a valid directory.\n" % PDB_save_folder)
return -1
print "BLAST progress: Indexing file '%s'..." % (input_file)
record_index = SeqIO.index( input_file, "fasta", key_function=GetRecordACC)
n_records = len( record_index)
print "BLAST progress: Read %i sequence records from file '%s'" % (n_records, input_file)
if n_records <= 0:
sys.stderr.write( "ERROR: No records to process.\n")
return -1
if len( obsoletes) == 0:
obsoletes = online_pdb.FetchObsoletes( True, PDB_save_folder);
seq_record_ids = GetRecordIDsFromFile( input_file, aWithPos=True)
missing_blast_record_ids = seq_record_ids
n_progress = 0
#Sanity check
if n_records != len( missing_blast_record_ids):
sys.stderr.write( "ERROR: Missmatching number of sequence IDs. Quitting.\n")
return -4
#Get current progress
if not reset_progress:
#Get sequense ids from blast output file
#Check if work is already done
if os.path.isfile( output_file):
blast_record_ids = GetRecordIDsFromFile( output_file, aWithPos=True)
#blast_record_ids = []
#with open( output_file, "r") as opf:
# for line in opf:
# if line[ 0] == ">":
# blast_record_ids.append( GetRecordACC( line[1:])) #skip ">"
missing_blast_record_ids = list( set( seq_record_ids) - set( blast_record_ids))
n_progress = n_records-len( missing_blast_record_ids)
if n_progress > 0:
print "BLAST progress: %i/%i (%.1f%%) records already BLASTed." % (n_progress, n_records, (float( n_progress)/n_records*100.0))
print "BLAST progress: Remove file '%s' to re-process." % output_file
elif os.path.isfile( output_file):
#Reset
#Remove existing output file (if any)
try:
print "BLAST progress: Resetting any previous BLAST progress... ",
os.remove( output_file)
print "Done."
except OSError:
sys.stderr.write("\nERROR: Could not remove BLAST progress file: '%s'. Remove it to reset BLAST progress." % output_file)
sys.exit(-2)
n_missing = len( missing_blast_record_ids)
if n_missing > 0:
#Open output file for writing
print "BLAST progress: Writing output to file '%s'." % output_file
else:
print "BLAST progress: All Done."
RemoveParallelAlignFiles( GetFolder(output_file))
return 0
fetched_pdbs = {}
cur_result_chunk_file = None
cur_result_chunk_filename = None
chunk_size = 20
file_written = False
#cur_chunk = 0
#align_processes = []
align_file_queue = []
align_max_queue = max( 10, max_threads*2 + 1)
align_errors = 0
#blast_paralign_%i.blast
#Create align folder
align_folder = InsertFolderSeparator( GetFolder( output_file) + "align" )
if not os.path.exists( align_folder): os.makedirs( align_folder)
#Remove previous parallel alignment files (if any)
RemoveParallelAlignFiles( GetFolder(output_file))
with open( output_file, "a+") as results_file:
previous_sequence = ""
previous_id_stub = ""
lines = []
n_lines = 0
line_ok = []
n_blasted = 0
#re_pos = re.compile( "_Pos\d+")
for i in range( max(0, n_progress - 10), n_records):
#print "I:%i" % i
seq_id = seq_record_ids[ i]
#cur_record = records[ i]
cur_record = record_index[ seq_id]
poipos = GetRecordPOI( cur_record.description)
cur_seq = ""
if sequence_window > 0:
#Blast with partial (windowed) sequence
start_pos = poipos - (sequence_window / 2)
end_pos = start_pos + sequence_window
if start_pos < 0:
start_pos = 0
end_pos = sequence_window
if end_pos > len( cur_record.seq):
end_pos = len( cur_record.seq)
start_pos = max( 0, end_pos - sequence_window)
cur_seq = str( cur_record.seq[ start_pos:end_pos])
else:
#Blast with full sequence
cur_seq = str( cur_record.seq)
same_id_as_previous = False
fetched_from_repository = False
if sequence_window <= 0:
#For full sequence blasting, same id means same results
same_id_as_previous = len( previous_id_stub) and previous_id_stub == StripPosTag( seq_id) #re.sub("_Pos\d+", "", seq_id) #StripPosTag
#Sanity check
if i < n_progress and seq_id in missing_blast_record_ids:
sys.stderr.write( "ERROR: Number of processed entries (%i) does not match with current blast file.\n" % n_progress)
sys.stderr.write( "ERROR: Entry '%s' should already be processed according to its index in the file. \n" % seq_id)
sys.stderr.write( "ERROR: Consider restarting BLAST processing by removing the file '%s'\n" % output_file)
sys.stderr.write( " or using the '--reset' flag.\n")
sys.exit( -3)
if seq_id not in missing_blast_record_ids:
msg = "Entry %i: %s already blasted." % (i, seq_id)
print msg.ljust( 55), "[OK]"
continue #Already fetched
#DEBUG
#PrintEntry( cur_seq)
#sys.exit( 0)
#If the sequence is exactly the same as for the previous fasta entry, use those results
if cur_seq and ( same_id_as_previous or (len( cur_seq) == len( previous_sequence) and str(cur_seq) == previous_sequence)):
#Use previous results
if verbose: SyncPrint( "Blast search already processed for %s (#%i), using existing results." % (cur_record.id, i), blast_print_lock)
elif not cur_seq or len(cur_seq) == 0:
SyncErrPrint( "ERROR: No sequence for %s (#%i)." % (cur_record.id, i), blast_print_lock)
sys.exit( -2)
else:
#print "\nProcessing record %i/%i (%.1f%%): %s" % (n_progress+1, n_records, ((n_progress+1.0)/n_records)*100.0, cur_record.id)
previous_sequence = str( cur_seq)
previous_id_stub = StripPosTag( seq_id) #re.sub("_Pos\d+", "", seq_id)
#blastp -query "A:\3IKM_a.fasta" -db pdbaa -outfmt 6
lines = None
if use_repository: lines = FetchFromRepository( cur_seq, aCount=repository_fetch_arg, aMaxDaysAgo=30)
SyncPrint( "[%s] Blasting record '%s'" % ("+" if lines != None else "-", cur_record.id), blast_print_lock, aBypassBuffer=True)
if verbose: SyncPrint( " Seq: '%s%s'" % (cur_seq[0:50], ("" if sequence_window <= 50 else "...")))
if lines != None:
#Found in repository
fetched_from_repository = True
else:
#Blast can be run using stdin if the used version accepts it, Test!
if BLAST_INPUT_MODE == None:
retval = TestBlast( BLAST_EXE_FOLDER, BLAST_DB_folder, verbose)
if retval == False: sys.exit( -72)
query_param = "-"
query_seq = ">%s\n%s\n" % (cur_record.description, cur_seq)
if BLAST_INPUT_MODE == "file":
#Stdin not usable, write input to file
input_tmp_file = 'current_blast.fasta'
#Write current sequence into a temp file for blasting
with open( input_tmp_file, 'w') as tempf:
tempf.write( query_seq)
query_param = input_tmp_file
query_seq = None
#SeqIO.write( cur_record, 'current.fasta', "fasta")
#####################
#BLAST IT
#SyncPrint( "Blasting...", blast_print_lock, aBypassBuffer=True )
retry_blast = 3
while retry_blast:
errors = False
#if verbose and sequence_window <= 100: SyncPrint( "Blasting seq: '%s'" % cur_seq )
#blastp_cline = NcbiblastxCommandline( cmd=BLAST_EXE_FOLDER+'blastp', out='out.blast', outfmt=6, query='current.fasta', db=(BLAST_DB_folder+'pdbaa'), evalue=e_val_threshold, max_target_seqs=max_results)
#Blast without using Disk I/O
blastp_cline = NcbiblastxCommandline( cmd=BLAST_EXE_FOLDER+'blastp', out='-', outfmt=6, query=query_param, db=(BLAST_DB_folder+'pdbaa'), evalue=e_val_threshold, max_target_seqs=max_results)
blast_output = StringIO() #stdout
try:
out, err = blastp_cline( stdin=query_seq)
if err and len( err): raise ApplicationError( err)
print >> blast_output, out
#print >> blast_output, blastp_cline( stdin=query_seq)[ 0]
except ApplicationError as ex:
#Sometimes blast cannot find "pdbaa.00.phr" even though it is there
SyncErrPrint( "ERROR: Exception caught running NcbiblastxCommandline in '%s'.\n" % (__file__), blast_print_lock)
SyncErrPrint( " An exception of type {0} occured. Arguments: {1!r}".format(type(ex).__name__, ex.args), blast_print_lock )
errors = True
retry_blast -= 1
time.sleep( 3)
except Exception as ex:
errors = True
SyncErrPrint( "ERROR: Exception caught running NcbiblastxCommandline in '%s'.\n" % (__file__), blast_print_lock)
SyncErrPrint( " An exception of type {0} occured. Arguments: {1!r}".format(type(ex).__name__, ex.args), blast_print_lock )
sys.exit( -66)
if errors:
if retry_blast > 0:
SyncErrPrint( "Retrying blast...", blast_print_lock )
elif retry_blast == 0:
SyncErrPrint( "Retries exhausted. Exitting.", blast_print_lock )
sys.exit(-67)
else:
retry_blast = 0 #exit loop
n_blasted += 1
# Fields: query id, subject id, % identity, alignment length, mismatches, gap opens, q. start, q. end, s. start, s. end, evalue, bit score
#read blast output
#f = open('out.blast')
#lines = map( str.strip, f.readlines())
#f.close()
#Remove whitespace and empty rows
lines = filter(None, map( str.strip, blast_output.getvalue().split("\n")))
n_lines = len( lines)
line_ok = [True]*n_lines
if n_lines < 1:
SyncPrint( " NO RESULTS", blast_print_lock, aBypassBuffer=True )
else:
#print lines
#for line in lines: print "'%s'" % line
desc_len = max( 0, lines[ 0].find( "\t"))
print "\n".join( line[desc_len+1:] for line in lines) #Do not print descriptions
#print "\n".join( line for line in lines) #print descriptions
#PDB
#p = PDBParser( PERMISSIVE=1)
for l in range( n_lines):
cols = lines[ l].split( "\t")
if len( cols) != 12:
SyncErrPrint( "BLAST ERROR: line error: '%s'" % lines[ l], blast_print_lock )
line_ok[ l] = False
continue
#Two possible formats in BLAST results pdb chain separated either by "|" or "_"
if cols[ 1].find( "|") >= 0:
#gn|P23919|DTYMK gi|359545626|pdb|2XX3|A 100.00 212 0 0 1 212 21 232 7e-156 436
id_col = cols[ 1].split("|")
pdb_id = id_col[ 3]
else:
#gn|P30613_Pos2|PKLR 4IP7_A 99.815 541 1 0 34 574 3 543 0.0 1096
id_col = cols[ 1].split("_")
pdb_id = id_col[ 0]
if pdb_id in obsoletes:
SyncPrint( "%s is obsolete." % pdb_id, blast_print_lock, aBypassBuffer=True )
#Get rid of obsoletes
line_ok[ l] = False
continue
else:
if len( pdb_id) != 4:
SyncErrPrint( "PDB ID ERROR:" + pdb_id, blast_print_lock )
elif fetch_files and pdb_id not in fetched_pdbs:
#Get file from online RSCB database
online_pdb.FetchPDBFile( pdb_id, PDB_save_folder, False, obsoletes, verbose=False )
fetched_pdbs[ pdb_id] = True
#For parallel align processing
if parallel_align:
#print "Clearing buffer..."
#SyncPrintFlushBuffer( blast_print_lock, aTitle="Alignments:\n")
try:
new_chunk_filename = GetFolder( output_file) + "blast_paralign_%i.blast" % (i - (i % chunk_size))
#New chunk of BLAST results
if new_chunk_filename != cur_result_chunk_filename:
#Close current file
if cur_result_chunk_file: cur_result_chunk_file.close()
#Remove processed entries
CleanParallelAlignFiles( align_processes)
queue_full = len( align_file_queue) >= align_max_queue
#Insert current file to queue, if queue not full
if cur_result_chunk_file:
if not queue_full and file_written: #Max size for file queue
SyncErrPrint( "\nINFO: Queueing file for alignment: '%s'" % cur_result_chunk_filename, blast_print_lock )
align_file_queue.append( cur_result_chunk_filename)
else:
#Queue full, or file empty?
#File not needed
try: os.remove( cur_result_chunk_filename)
except: SyncErrPrint( "WARNING: Could not remove previous blast result file '%s'." % cur_result_chunk_filename, blast_print_lock )
#Start new alignment processes for first file in queue
while len( align_file_queue) > 0 and len( align_processes) < max_threads:
next_align_file = align_file_queue.pop( 0)
if not os.path.isfile( next_align_file):
SyncErrPrint( "\n\nWARNING: File '%s' does not exist." % next_align_file, blast_print_lock )
continue
if int( os.stat( next_align_file).st_size) <= 0:
SyncErrPrint( "\n\nWARNING: File '%s' is empty." % next_align_file, blast_print_lock )
continue
process_id = 0
if len( align_processes): process_id = min( set( range(len( align_processes)+1))-set( zip(*align_processes)[ 2])) #Lowest unused integer
if verbose: SyncPrint( "Align Progress: Starting new alignment process for entries %i-%i... (id:%i)\n" % (( i-(i % chunk_size)), i-1, process_id), blast_print_lock, aBypassBuffer=True )
process = ProcessBlastResultsAsync( next_align_file, input_file, align_folder, process_id, PDB_save_folder, obsoletes, aScoreIt=False, aSkipExisting=True, aLock=blast_print_lock)
align_processes.append( (process, next_align_file, process_id))
#open file for next chunk
cur_result_chunk_filename = new_chunk_filename
#Stop writing new files if queue already full
if not queue_full: cur_result_chunk_file = open( new_chunk_filename, "a+")
else: cur_result_chunk_file = None
file_written = False #Making sure not to add empty files to queue
except Exception as ex:
SyncErrPrint( "WARNING: Error with parallel alignment processing:", blast_print_lock )
SyncErrPrint( " An exception of type {0} occured. Arguments: {1!r}\n".format( type(ex).__name__, ex.args), blast_print_lock )
cur_result_chunk_file = None
align_errors += 1
if align_errors > 3:
SyncErrPrint( "ERROR: Too many parallel alignment errors. Use '-n' to not use parallel aligning during BALST queries. Exitting.", blast_print_lock )
sys.exit(-68)
#Disable keyboard interrupt during writing of result file(s)
#sig = signal.signal(signal.SIGINT, signal.SIG_IGN)
with DelayedKeyboardInterrupt():
blast_record = []
blast_record_header = ">%s\n" % cur_record.id
blast_record.append( blast_record_header)
results_file.write( blast_record_header.rstrip() + (", [%i]\n" % i) )
if cur_result_chunk_file: cur_result_chunk_file.write( blast_record_header.rstrip() + (", [%i]\n" % i) )
structures_found = 0
for l in range( n_lines):
if line_ok[ l]: #Get rid of obsoletes
blast_record.append( lines[ l])
results_file.write( "%s\n" % lines[ l])
if cur_result_chunk_file: cur_result_chunk_file.write( "%s\n" % lines[ l])
structures_found += 1
results_file.write( "\n" )
if cur_result_chunk_file: cur_result_chunk_file.write( "\n" )
results_file.flush()
if cur_result_chunk_file: cur_result_chunk_file.flush()
file_written = True
if not same_id_as_previous and not fetched_from_repository and use_repository:
#cache_result_lines = [("blast_cache" + l[ l.index("\t"):]) for l in blast_record[1:]] # Skip header, change first col to "blast_cache"
AddToRepository( cur_seq, blast_record[1:], max_results ) # Skip header
#Log
logging.info(('STRUCT_COUNT:%02i: ' % structures_found) + cur_record.id )
#Re-enable keyboard interrupt
#signal.signal(signal.SIGINT, sig)
n_progress += 1
SyncPrintFlushBuffer( blast_print_lock, aTitle="\nAlignments:\n")
if not fetched_from_repository: ReportSpeed( n_progress, n_records, blast_print_lock)
if use_repository: CloseRepository()
if cur_result_chunk_file: cur_result_chunk_file.close()
CleanParallelAlignFiles( align_processes)
if len( align_processes):
SyncPrint( "Waiting for 180 secs for remaining %i alignment process%s to finnish..." % (len( align_processes), ("" if len( align_processes) == 1 else "es")), blast_print_lock, aBypassBuffer=True )
timer = 0
while timer < 180:
time.sleep( 2)
SyncPrintFlushBuffer( blast_print_lock)
timer += 2
alive = 0
for ap in align_processes:
if ap[ 0].is_alive(): alive += 1
if alive == 0:
print "INFO: All threads have finished."
break
CleanParallelAlignFiles( align_processes)
for p in align_processes:
SyncPrint( "Terminating process %i... " % p[ 2], blast_print_lock, False, aBypassBuffer=True )
p[ 0].terminate()
SyncPrint( "Done.", blast_print_lock, aBypassBuffer=True )
print "BLASTED %i entries.\nFile: '%s' written.\n" % ( n_blasted, output_file )
RemoveParallelAlignFiles( GetFolder(output_file))
signal.signal( signal.SIGINT, original_sigint)
return 0 #All OK
def RemoveParallelAlignFiles( aBlastFolder):
global blast_print_lock
#Remove previous parallel alignment files (if any)
retval, files = GetFolderFiles( aBlastFolder, "blast_paralign_*.blast")
if retval == 0 and len( files):
#align_file_queue.append( ChangeToFolder( files.pop( 0), align_folder))
for cf in files:
try:
os.remove( ChangeToFolder( cf, aBlastFolder))
except:
SyncErrPrint( "WARNING: could not remove previous blast result file '%s'." % cf, blast_print_lock)
def CleanParallelAlignFiles( align_processes):
global blast_print_lock
for ap in reversed( align_processes):
if not ap[ 0].is_alive():
try:
thread_id = ap[ 2]
file_to_be_removed = ap[ 1]
exit_code = ap[ 0].exitcode
align_processes.remove( ap)
SyncPrint( "Align Progress: File '%s' processed in thread %i. %s" % (file_to_be_removed, thread_id, ("[OK]" if exit_code == 0 else ("[FAIL-%i]" % exit_code))), blast_print_lock)
if os.path.isfile( file_to_be_removed): os.remove( file_to_be_removed)
except (OSError, WindowsError) as oex:
SyncErrPrint( "\nWARNING: Could not delete parallel alignment file '%s'." % file_to_be_removed, blast_print_lock)
SyncErrPrint( " An exception of type {0} occured. Arguments: {1!r}".format( type( oex).__name__, oex.args), blast_print_lock)
except Exception as ex:
SyncErrPrint( "\nWARNING: Could not clean parallel align files.")
SyncErrPrint( " An exception of type {0} occured. Arguments: {1!r}".format( type( ex).__name__, ex.args), blast_print_lock)
def GetFolder( filepath):
path, filename = os.path.split( filepath)
return InsertFolderSeparator( path)
def ChangeToFolder( filepath, folder):
path, filename = os.path.split( filepath)
return InsertFolderSeparator( folder) + filename
def PrintHelp():
print """
Usage: blast_structure_finder.py [flags] input_file output_file
-e --evalue FLOAT E-value threshold for BLAST queries. default = 0.001
-m --max INT Max number of results per file.
-d --db_folder PATH Path and name to local BLAST database.
-s --save_folder PATH Specify where fetched PDB files are saved.
-r --reset Reset progress and reprocess files from the beginning
-n --nofetch Do not fetch structures.
-f --fetch Fetch result structures from PDB database [default]
-h --help Print this message.
-v --version 1.0
"""
def main():
#DEBUG
#TestBlast( "C:/Program Files/NCBI/blast-2.2.28+/bin/", "I:/anSsi/Blast_DB/")
#TestBlast( "", "I:/anSsi/Blast_DB/", True)
#sys.exit( 0)
input_file = ""
output_file = ""
try:
opts, args = getopt.getopt(sys.argv[1:], 'e:m:d:s:rnfhv', ['evalue=', 'max=', 'db_folder=', 'save_folder=', 'reset', 'nofetch', 'fetch', 'help', 'version'])
except getopt.GetoptError as err:
sys.stderr.write( err.msg)
sys.stderr.write( "\n")
PrintHelp()
#sys.stderr.write("See -h for usage.\n")
sys.exit( 2)
if len( sys.argv) < 2:
sys.stderr.write("Too few arguments.\n")
PrintHelp()
sys.exit( -2) #Exit
#Input & Output files
for arg in args:
if len( input_file) == 0:
input_file = arg
elif len( output_file) == 0:
output_file = arg
else:
sys.stderr.write("Too many arguments.\n")
sys.stderr.write("See -h for usage.\n")
sys.exit( 2)
file_list = []
outfile_list = []
if os.path.isfile( input_file):
#Input File
print "MAIN: Processing file '%s' using BLAST to find homologous structures." % input_file
file_list.append( input_file)
if len( output_file) and os.path.isdir( output_file):
outfile_list.append( ChangeToFolder( input_file, output_file) + ".blast")
else:
outfile_list.append( input_file + ".blast")
elif os.path.isdir( input_file):
input_file = InsertFolderSeparator( input_file)
#Input Folder
file_list = GetFastaFilesInFolder( input_file)
print "MAIN: Processing all fasta files in folder: '%s'" % input_file
print "MAIN: Found %i fasta files." % len( file_list)
if len( output_file):
if not os.path.isdir( output_file):
sys.stderr.write("'%s' does not specify a valid folder.\n" % output_file)
sys.exit( 2) #Exit
else:
output_file = InsertFolderSeparator( output_file)
for f in file_list:
outfile_list.append( ChangeToFolder( f, output_file) + ".blast")
else:
for f in file_list:
outfile_list.append( f + ".blast")
else:
sys.stderr.write("'%s' does not specify a valid file or folder.\n" % input_file)
sys.exit( -2) #Exit
#e:m:d:s:
e_val = 0.001
max_results = 5
db="I:/anSsi/Blast_DB/"
save_path="I:/anSsi/PDB/"
reset=False
fetch=True
#Flags
for opt, arg in opts:
if opt in ('-h', '--help'):
PrintHelp()
sys.exit( 0)
elif opt in ('-v', '--version'):
PrintHelp()
sys.exit( 0)
elif opt in ('-r', '--reset'): reset = True
elif opt in ('-f', '--fetch'): fetch=True
elif opt in ('-n', '--nofetch'): fetch=False
elif opt in ('-d', '--db_folder'):
db = arg
if arg and arg.upper() == "NONE": db = ""
elif opt in ('-s', '--save_folder'): save_path = arg
elif opt in ('-e', '--evalue'):
try:
e_val = float( arg)
except ValueError:
sys.stderr.write( "Bad evalue given: '%s'. Using default: 0.001\n" % arg)
elif opt in ('-m', '--max'):
try:
max_results = int( arg)
except ValueError:
sys.stderr.write( u"Bad max_results given: '%s'. Using default: 4\n" % arg)
else:
sys.stderr.write("Unknown option: '%s'.\n" % opt)
sys.stderr.write("See -h for usage.\n")
sys.exit( 2)
if len( file_list) < 1:
print "No files to parse."
sys.exit( 0)
#Find structures
for i in range( len( file_list)):
#def FindStructures( blast_exe_folder, input_file, output_file = "", PDB_save_folder="", BLAST_DB_folder="", parallel_align=True, max_threads=0, max_results=5, e_val_threshold=0.001, fetch_files=True, reset_progress=False, obsoletes=[], verbose=False, sequence_window=30):
FindStructures( blast_exe_folder="C:/blast-2.4.0+/bin", input_file=file_list[ i], output_file=outfile_list[ i], PDB_save_folder=save_path, BLAST_DB_folder=db, parallel_align=False,
max_results=max_results, e_val_threshold=e_val, fetch_files=fetch, reset_progress=reset, verbose=True, sequence_window=20)
print ""
print "All done."
if __name__ == "__main__":
main()
| StarcoderdataPython |
62698 | from nltk.corpus import stopwords
stop_words = set(stopwords.words("indonesian"))
print(stop_words)
| StarcoderdataPython |
3347771 | #! /usr/bin/env python
"""
This script allows for the search of Sentinel-1 data on scihub.
Based on some search parameters the script will create a query on
www.scihub.copernicus.eu and return the results either as shapefile,
sqlite, or PostGreSQL database.
"""
# import modules
import getpass
import os
import logging
try:
import ogr
except ModuleNotFoundError as e:
from osgeo import ogr
except ModuleNotFoundError:
raise e
import psycopg2 as pg
from ost.helpers.vector import get_proj4, reproject_geometry
logger = logging.getLogger(__name__)
# see if the pg-file is there
def pgHandler(dbConnectFile = '{}/.phiSAR/pgdb'.format(os.getenv("HOME"))):
"""
This function connects to an existing PostGreSQL database,
with the access parameters stored in the dbConnectFile as follows:
"database name"
"database user"
"database password"
"database host"
"database port"
:param dbConnectFile: path to the connect file
:return: the psycopg2 database connection object
"""
try:
f = open(dbConnectFile)
except (FileNotFoundError, IOError):
logger.info('ERROR: No PostGreSQL connection established. Make sure to configure a connection to phiSAR.')
# read out dbname, username
lines = f.read().splitlines()
dbname = lines[0]
uname = lines[1]
pwDb = lines[2]
host = lines[3]
port = lines[4]
logger.info('Connecting to PostGreSQL database: {}'.format(dbname))
dbConnect = pgConnect(uname, pwDb, dbname, host, port)
return dbConnect
class pgConnect:
def __init__(self, uname=None, pword=None, dbname='sat', host='localhost', port='5432'):
"""
Establish a connection to the Scihub-catalogue db
"""
# ask for username and password in case you have not defined as command line options
if uname == None:
uname = input(' Your PostGreSQL database username:')
if pword == None:
pword = getpass.getpass(' Your PostGreSQL database password:')
# try connecting
try:
self.connection = pg.connect(
dbname=dbname, user=uname, host=host, password=pword, port=port)
self.connection.autocommit = True
self.cursor = self.connection.cursor()
except:
logger.info('Cannot connect to database')
def pgCreateS1(self, tablename):
f_list = ('id serial PRIMARY KEY, identifier varchar(100), \
polarisation varchar(100), orbitdirection varchar(12), \
acquisitiondate date, relativeorbit smallint, \
orbitnumber integer, producttype varchar(4), \
slicenumber smallint, size varchar(12), \
beginposition timestamp, endposition timestamp, \
lastrelativeorbitnumber smallint, lastorbitnumber int, \
uuid varchar(40), platformidentifier varchar(10), \
missiondatatakeid integer, swathidentifer varchar(21), \
ingestiondate timestamp, sensoroperationalmode varchar(3), \
geometry geometry')
sql_cmd = 'CREATE TABLE {} ({})'.format(tablename, f_list)
self.cursor.execute(sql_cmd)
def pgGetUUID(self, sceneID, tablename):
sql_cmd = 'SELECT uuid FROM {} WHERE identifier = \'{}\''.format(tablename, sceneID)
self.cursor.execute(sql_cmd)
uuid = self.cursor.fetchall()[0][0]
return uuid
def pgDrop(self, tablename):
sql_cmd = 'DROP TABLE {}'.format(tablename)
self.cursor.execute(sql_cmd)
def pgInsert(self, tablename, values):
"""
This function inserts a table into the connected database object.
"""
sql_cmd = 'INSERT INTO {} VALUES {}'.format(tablename, values)
self.cursor.execute(sql_cmd)
def pgSQL(self, sql):
"""
This is a wrapper for a sql input that does get all responses.
"""
self.cursor.execute(sql)
return self.cursor.fetchall()
def pgSQLnoResp(self, sql):
"""
This is a wrapper for a sql input that does not get any response.
"""
self.cursor.execute(sql)
def shpGeom2pg(self, aoi, tablename):
"""
This function is a wrapper to import a shapefile geometry to a PostGreSQL database
"""
sqlCmd = 'DROP TABLE IF EXISTS {}'.format(tablename)
self.cursor.execute(sqlCmd)
fList = 'id smallint, geometry geometry'
sqlCmd = 'CREATE TABLE {} ({})'.format(tablename, fList)
self.cursor.execute(sqlCmd)
prjFile = '{}.prj'.format(aoi[:-4])
inProj4 = get_proj4(prjFile)
sf = ogr.Open(aoi)
layer = sf.GetLayer(0)
for i in range(layer.GetFeatureCount()):
feature = layer.GetFeature(i)
wkt = feature.GetGeometryRef().ExportToWkt()
if inProj4 != '+proj=longlat +datum=WGS84 +no_defs':
wkt = reproject_geometry(wkt, inProj4, 4326)
wkt = 'St_GeomFromText(\'{}\', 4326)'.format(wkt)
values = '(\'{}\', {})'.format(i, wkt)
sql_cmd = 'INSERT INTO {} VALUES {}'.format(tablename, values)
self.cursor.execute(sql_cmd)
def pgDateline(self, tablename, uuid):
"""
This function splits the acquisition footprint
into a geometry collection if it crosses the dateline
"""
# edited after https://www.mundialis.de/update-for-our-maps-mundialis-application-solves-dateline-wrap/
sql_cmd = 'UPDATE {} SET (geometry) = \
(SELECT \
ST_SetSRID( \
ST_CollectionExtract( \
ST_AsText( \
ST_Split( \
ST_ShiftLongitude(geometry), \
ST_SetSRID( \
ST_MakeLine( \
ST_MakePoint(180,-90), \
ST_MakePoint(180,90) \
), \
4326 \
) \
) \
), \
3 \
), \
4326 \
) geometry \
FROM {} \
WHERE uuid = \'{}\' \
) \
WHERE uuid = \'{}\' \
AND ( \
ST_Intersects( \
geometry, \
ST_SetSRID( \
ST_MakeLine( \
ST_MakePoint(-90,-90), \
ST_MakePoint(-90,90) \
), \
4326 \
) \
) \
AND \
ST_Intersects( \
geometry, \
ST_SetSRID( \
ST_MakeLine( \
ST_MakePoint(90,-90), \
ST_MakePoint(90,90) \
), \
4326 \
) \
) \
) \
AND \
geometry IS NOT NULL'.format(tablename, tablename, uuid, uuid)
self.cursor.execute(sql_cmd)
| StarcoderdataPython |
15471 | <gh_stars>1-10
from __future__ import print_function
from __future__ import division
from builtins import str
from builtins import object
__copyright__ = "Copyright 2015 Contributing Entities"
__license__ = """
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import datetime
import os
import numpy as np
import pandas as pd
from .Error import NetworkInputError
from .Logger import FastTripsLogger
from .Route import Route
from .Stop import Stop
from .Transfer import Transfer
class TAZ(object):
"""
TAZ class.
One instance represents all of the Transportation Analysis Zones
as well as their access links and egress links.
.. todo:: This is really about the access and egress links; perhaps it should be renamed?
Stores access link information in :py:attr:`TAZ.walk_access`, and :py:attr:`TAZ.drive_access`,
both instances of :py:class:`pandas.DataFrame`.
"""
#: File with fasttrips walk access information.
#: See `walk_access specification <https://github.com/osplanning-data-standards/GTFS-PLUS/blob/master/files/walk_access_ft.md>`_.
INPUT_WALK_ACCESS_FILE = "walk_access_ft.txt"
#: Walk access links column name: TAZ Identifier. String.
WALK_ACCESS_COLUMN_TAZ = 'taz'
#: Walk access links column name: Stop Identifier. String.
WALK_ACCESS_COLUMN_STOP = 'stop_id'
#: Walk access links column name: Direction (access or egress)
WALK_ACCESS_COLUMN_DIRECTION = "direction"
#: Walk access links column name: Walk Distance
WALK_ACCESS_COLUMN_DIST = 'dist'
#: fasttrips Walk access links column name: Elevation Gain, feet gained along link.
WALK_ACCESS_COLUMN_ELEVATION_GAIN = 'elevation_gain'
#: fasttrips Walk access links column name: Population Density, people per square mile. Float.
WALK_ACCESS_COLUMN_POPULATION_DENSITY = 'population_density'
#: fasttrips Walk access links column name: Employment Density, employees per square mile. Float.
WALK_ACCESS_COLUMN_EMPLOYMENT_DENSITY = 'employment_density'
#: fasttrips Walk access links column name: Retail Density, employees per square mile. Float.
# WALK_ACCESS_COLUMN_RETAIL_DENSITY = 'retail_density'
#: fasttrips Walk access links column name: Employment Density, employees per square mile. Float.
WALK_ACCESS_COLUMN_EMPLOYMENT_DENSITY = 'employment_density'
#: fasttrips Walk access links column name: Auto Capacity, vehicles per hour per mile. Float.
WALK_ACCESS_COLUMN_AUTO_CAPACITY = 'auto_capacity'
#: fasttrips Walk access links column name: Indirectness, ratio of Manhattan distance to crow-fly distance. Float.
WALK_ACCESS_COLUMN_INDIRECTNESS = 'indirectness'
# ========== Added by fasttrips =======================================================
#: Walk access links column name: TAZ Numerical Identifier. Int.
WALK_ACCESS_COLUMN_TAZ_NUM = 'taz_num'
#: Walk access links column name: Stop Numerical Identifier. Int.
WALK_ACCESS_COLUMN_STOP_NUM = 'stop_id_num'
#: Walk access links column name: Link walk time. This is a TimeDelta
WALK_ACCESS_COLUMN_TIME = 'time'
#: Walk access links column name: Link walk time in minutes. This is float.
WALK_ACCESS_COLUMN_TIME_MIN = 'time_min'
#: Walk acess cost column name: Link generic cost for accessing stop from TAZ. Float.
WALK_ACCESS_COLUMN_ACC_COST = 'access_cost'
#: Walk acess cost column name: Link generic cost for egressing to TAZ from stop. Float.
WALK_ACCESS_COLUMN_EGR_COST = 'egress_cost'
#: Walk access links column name: Supply mode. String.
WALK_ACCESS_COLUMN_SUPPLY_MODE = 'supply_mode'
#: Walk access links column name: Supply mode number. Int.
WALK_ACCESS_COLUMN_SUPPLY_MODE_NUM = 'supply_mode_num'
#: File with fasttrips drive access information.
#: See `drive_access specification <https://github.com/osplanning-data-standards/GTFS-PLUS/blob/master/files/drive_access_ft.md>`_.
INPUT_DRIVE_ACCESS_FILE = "drive_access_ft.txt"
#: Drive access links column name: TAZ Identifier. String.
DRIVE_ACCESS_COLUMN_TAZ = WALK_ACCESS_COLUMN_TAZ
#: Drive access links column name: Stop Identifier. String.
DRIVE_ACCESS_COLUMN_LOT_ID = 'lot_id'
#: Drive access links column name: Direction ('access' or 'egress')
DRIVE_ACCESS_COLUMN_DIRECTION = 'direction'
#: Drive access links column name: Drive distance
DRIVE_ACCESS_COLUMN_DISTANCE = 'dist'
#: Drive access links column name: Drive cost in cents (integer)
DRIVE_ACCESS_COLUMN_COST = 'cost'
#: Drive access links column name: Driving time in minutes between TAZ and lot (TimeDelta)
DRIVE_ACCESS_COLUMN_TRAVEL_TIME = 'travel_time'
#: Drive access links column name: Start time (e.g. time period these attributes apply), minutes after midnight
DRIVE_ACCESS_COLUMN_START_TIME_MIN = 'start_time_min'
#: Drive access links column name: Start time (e.g. time period these attributes apply). A DateTime instance
DRIVE_ACCESS_COLUMN_START_TIME = 'start_time'
#: Drive access links column name: End time (e.g. time period these attributes apply), minutes after midnight
DRIVE_ACCESS_COLUMN_END_TIME_MIN = 'end_time_min'
#: Drive access links column name: End time (e.g. time period these attributes apply). A DateTime instance
DRIVE_ACCESS_COLUMN_END_TIME = 'end_time'
#: fasttrips Drive access links column name: Elevation Gain, feet gained along link.
DRIVE_ACCESS_COLUMN_ELEVATION_GAIN = 'elevation_gain'
#: fasttrips Drive access links column name: Population Density, people per square mile. Float.
DRIVE_ACCESS_COLUMN_POPULATION_DENSITY = 'population_density'
#: fasttrips Drive access links column name: Retail Density, employees per square mile. Float.
DRIVE_ACCESS_COLUMN_RETAIL_DENSITY = 'retail_density'
#: fasttrips Drive access links column name: Auto Capacity, vehicles per hour per mile. Float.
DRIVE_ACCESS_COLUMN_AUTO_CAPACITY = 'auto_capacity'
#: fasttrips Drive access links column name: Indirectness, ratio of Manhattan distance to crow-fly distance. Float.
DRIVE_ACCESS_COLUMN_INDIRECTNESS = 'indirectness'
# ========== Added by fasttrips =======================================================
#: fasttrips These are the original attributes but renamed to be clear they are the drive component (as opposed to the walk)
DRIVE_ACCESS_COLUMN_DRIVE_DISTANCE = 'drive_dist'
DRIVE_ACCESS_COLUMN_DRIVE_TRAVEL_TIME = 'drive_travel_time'
#: Drive access links column name: Driving time in minutes between TAZ and lot (float)
DRIVE_ACCESS_COLUMN_DRIVE_TRAVEL_TIME_MIN = 'drive_time_min'
#: fasttrips Drive access links column name: TAZ Numerical Identifier. Int.
DRIVE_ACCESS_COLUMN_TAZ_NUM = WALK_ACCESS_COLUMN_TAZ_NUM
#: fasttrips Drive access links column name: Stop Numerical Identifier. Int.
DRIVE_ACCESS_COLUMN_STOP = WALK_ACCESS_COLUMN_STOP
#: fasttrips Drive access links column name: Stop Numerical Identifier. Int.
DRIVE_ACCESS_COLUMN_STOP_NUM = WALK_ACCESS_COLUMN_STOP_NUM
#: fasttrips Drive access links column name: Walk distance from lot to transit. Miles. Float.
DRIVE_ACCESS_COLUMN_WALK_DISTANCE = 'walk_dist'
#: fasttrips Drive access links column name: Walk time from lot to transit. TimeDelta.
DRIVE_ACCESS_COLUMN_WALK_TIME = 'walk_time'
#: fasttrips Drive access links column name: Walk time from lot to transit. Int.
DRIVE_ACCESS_COLUMN_WALK_TIME_MIN = 'walk_time_min'
#: fasttrips Drive access links column name: Supply mode. String.
DRIVE_ACCESS_COLUMN_SUPPLY_MODE = WALK_ACCESS_COLUMN_SUPPLY_MODE
#: Drive access links column name: Supply mode number. Int.
DRIVE_ACCESS_COLUMN_SUPPLY_MODE_NUM = WALK_ACCESS_COLUMN_SUPPLY_MODE_NUM
#: File with fasttrips drive access points information.
#: See `Drive access points specification <https://github.com/osplanning-data-standards/GTFS-PLUS/blob/master/files/drive_access_points_ft.md>`_.
INPUT_DAP_FILE = 'drive_access_points_ft.txt'
#: fasttrips DAP column name: Lot ID. String.
DAP_COLUMN_LOT_ID = DRIVE_ACCESS_COLUMN_LOT_ID
#: fasttrips DAP column name: Lot Latitude (WGS 84)
DAP_COLUMN_LOT_LATITUDE = 'lot_lat'
#: fasttrips DAP column name: Lot Longitude (WGS 84)
DAP_COLUMN_LOT_LONGITUDE = 'lot_lon'
#: fasttrips DAP column name: Name of the Lot. String.
DAP_COLUMN_NAME = 'name'
#: fasttrips DAP column name: Drop-Off. Boolean.
DAP_COLUMN_DROP_OFF = 'drop_off'
#: fasttrips DAP column name: Capacity (number of parking spaces)
DAP_COLUMN_CAPACITY = 'capacity'
#: fasttrips DAP column name: Hourly Cost in cents. Integer.
DAP_COLUMN_HOURLY_COST = 'hourly_cost'
#: fasttrips DAP column name: Maximum Daily Cost in cents. Integer.
DAP_COLUMN_MAXIMUM_COST = 'max_cost'
#: fasttrips DAP column name: Type
DAP_COLUMN_TYPE = 'type'
#: mode column
MODE_COLUMN_MODE = 'mode'
#: mode number
MODE_COLUMN_MODE_NUM = 'mode_num'
#: access and egress modes. First is default.
ACCESS_EGRESS_MODES = ["walk", "bike_own", "bike_share", "PNR", "KNR"]
#: Access mode: Walk
MODE_ACCESS_WALK = 101
#: Access mode: Bike (own)
MODE_ACCESS_BIKE_OWN = 102
#: Access mode: Bike (share)
MODE_ACCESS_BIKE_SHARE = 103
#: Access mode: Drive to PNR
MODE_ACCESS_PNR = 104
#: Access mode: Drive to KNR
MODE_ACCESS_KNR = 105
#: Egress mode: Walk
MODE_EGRESS_WALK = 201
#: Egress mode: Bike (own)
MODE_EGRESS_BIKE_OWN = 202
#: Egress mode: Bike (share)
MODE_EGRESS_BIKE_SHARE = 203
#: Egress mode: Drive to PNR
MODE_EGRESS_PNR = 204
#: Egress mode: Drive to KNR
MODE_EGRESS_KNR = 205
#: Access mode number list, in order of ACCESS_EGRESS_MODES
ACCESS_MODE_NUMS = [MODE_ACCESS_WALK,
MODE_ACCESS_BIKE_OWN, MODE_ACCESS_BIKE_SHARE,
MODE_ACCESS_PNR, MODE_ACCESS_KNR]
#: Egress mode number list, in order of ACCESS_EGRESS_MODES
EGRESS_MODE_NUMS = [MODE_EGRESS_WALK,
MODE_EGRESS_BIKE_OWN, MODE_EGRESS_BIKE_SHARE,
MODE_EGRESS_PNR, MODE_EGRESS_KNR]
#: Walk mode number list
WALK_MODE_NUMS = [MODE_ACCESS_WALK,
MODE_EGRESS_WALK]
#: Bike mode number list
BIKE_MODE_NUMS = [MODE_ACCESS_BIKE_OWN, MODE_ACCESS_BIKE_SHARE,
MODE_EGRESS_BIKE_OWN, MODE_EGRESS_BIKE_SHARE]
#: Drive mode number list
DRIVE_MODE_NUMS = [MODE_ACCESS_PNR, MODE_ACCESS_KNR,
MODE_EGRESS_PNR, MODE_EGRESS_KNR]
#: File with access/egress links for C++ extension
#: It's easier to pass it via a file rather than through the
#: initialize_fasttrips_extension() because of the strings involved, I think.
OUTPUT_ACCESS_EGRESS_FILE = "ft_intermediate_access_egress.txt"
def __init__(self, output_dir, gtfs, today, stops, transfers, routes):
"""
Constructor. Reads the TAZ data from the input files in *input_archive*.
"""
from .Assignment import Assignment
self.access_modes_df = pd.DataFrame(data={TAZ.MODE_COLUMN_MODE: TAZ.ACCESS_EGRESS_MODES,
TAZ.MODE_COLUMN_MODE_NUM: TAZ.ACCESS_MODE_NUMS})
self.access_modes_df[TAZ.MODE_COLUMN_MODE] = self.access_modes_df[TAZ.MODE_COLUMN_MODE] \
.apply(lambda x: '%s_%s' % (x, Route.MODE_TYPE_ACCESS))
self.egress_modes_df = pd.DataFrame(data={TAZ.MODE_COLUMN_MODE: TAZ.ACCESS_EGRESS_MODES,
TAZ.MODE_COLUMN_MODE_NUM: TAZ.EGRESS_MODE_NUMS})
self.egress_modes_df[TAZ.MODE_COLUMN_MODE] = self.egress_modes_df[TAZ.MODE_COLUMN_MODE] \
.apply(lambda x: '%s_%s' % (x, Route.MODE_TYPE_EGRESS))
routes.add_access_egress_modes(self.access_modes_df, self.egress_modes_df)
#: Walk access links table. Make sure TAZ ID and stop ID are read as strings.
self.walk_access_df = gtfs.get(TAZ.INPUT_WALK_ACCESS_FILE)
# verify required columns are present
walk_access_cols = list(self.walk_access_df.columns.values)
assert (TAZ.WALK_ACCESS_COLUMN_TAZ in walk_access_cols)
assert (TAZ.WALK_ACCESS_COLUMN_STOP in walk_access_cols)
assert (TAZ.WALK_ACCESS_COLUMN_DIRECTION in walk_access_cols)
assert (TAZ.WALK_ACCESS_COLUMN_DIST in walk_access_cols)
# printing this before setting index
FastTripsLogger.debug("=========== WALK ACCESS ===========\n" + str(self.walk_access_df.head()))
FastTripsLogger.debug("As read\n" + str(self.walk_access_df.dtypes))
# Verify direction is valid
invalid_direction = self.walk_access_df.loc[
self.walk_access_df[TAZ.WALK_ACCESS_COLUMN_DIRECTION].isin(["access", "egress"]) == False]
if len(invalid_direction) > 0:
error_msg = "Invalid direction in walk access links: \n%s" % str(invalid_direction)
FastTripsLogger.fatal(error_msg)
raise NetworkInputError(TAZ.INPUT_WALK_ACCESS_FILE, error_msg)
# TODO: remove? Or put walk speed some place?
self.walk_access_df[TAZ.WALK_ACCESS_COLUMN_TIME_MIN] = self.walk_access_df[
TAZ.WALK_ACCESS_COLUMN_DIST] * 60.0 / 2.7;
# convert time column from float to timedelta
self.walk_access_df[TAZ.WALK_ACCESS_COLUMN_TIME] = \
self.walk_access_df[TAZ.WALK_ACCESS_COLUMN_TIME_MIN].map(lambda x: datetime.timedelta(minutes=x))
# make sure WALK_ACCESS_COLUMN_TAZ/WALK_ACCESS_COLUMN_DIST is unique
walk_access_dupes = self.walk_access_df.duplicated(subset=[TAZ.WALK_ACCESS_COLUMN_TAZ,
TAZ.WALK_ACCESS_COLUMN_STOP,
TAZ.WALK_ACCESS_COLUMN_DIRECTION], keep=False)
if walk_access_dupes.sum() > 0:
self.walk_access_df["duplicates"] = walk_access_dupes
error_msg = "Duplicate taz/stop pairs in walk access links: \n%s" % str(
self.walk_access_df.loc[self.walk_access_df["duplicates"]])
FastTripsLogger.fatal(error_msg)
raise NetworkInputError(TAZ.INPUT_WALK_ACCESS_FILE, error_msg)
FastTripsLogger.debug("Final\n" + str(self.walk_access_df.dtypes))
FastTripsLogger.info("Read %7d %15s from %25s" %
(len(self.walk_access_df), "walk access", TAZ.INPUT_WALK_ACCESS_FILE))
self.dap_df = gtfs.get(TAZ.INPUT_DAP_FILE)
if not self.dap_df.empty:
# verify required columns are present
dap_cols = list(self.dap_df.columns.values)
assert (TAZ.DAP_COLUMN_LOT_ID in dap_cols)
assert (TAZ.DAP_COLUMN_LOT_LATITUDE in dap_cols)
assert (TAZ.DAP_COLUMN_LOT_LONGITUDE in dap_cols)
# default capacity = 0
if TAZ.DAP_COLUMN_CAPACITY not in dap_cols:
self.dap_df[TAZ.DAP_COLUMN_CAPACITY] = 0
# default drop-off = True
if TAZ.DAP_COLUMN_DROP_OFF not in dap_cols:
self.dap_df[TAZ.DAP_COLUMN_DROP_OFF] = True
else:
self.dap_df = pd.DataFrame()
FastTripsLogger.debug("=========== DAPS ===========\n" + str(self.dap_df.head()))
FastTripsLogger.debug("\n" + str(self.dap_df.dtypes))
FastTripsLogger.info("Read %7d %15s from %25s" %
(len(self.dap_df), "DAPs", TAZ.INPUT_DAP_FILE))
#: Drive access links table. Make sure TAZ ID and lot ID are read as strings.
self.drive_access_df = gtfs.get(TAZ.INPUT_DRIVE_ACCESS_FILE)
if not self.drive_access_df.empty:
# verify required columns are present
drive_access_cols = list(self.drive_access_df.columns.values)
assert (TAZ.DRIVE_ACCESS_COLUMN_TAZ in drive_access_cols)
assert (TAZ.DRIVE_ACCESS_COLUMN_LOT_ID in drive_access_cols)
assert (TAZ.DRIVE_ACCESS_COLUMN_DIRECTION in drive_access_cols)
assert (TAZ.DRIVE_ACCESS_COLUMN_DISTANCE in drive_access_cols)
assert (TAZ.DRIVE_ACCESS_COLUMN_COST in drive_access_cols)
assert (TAZ.DRIVE_ACCESS_COLUMN_TRAVEL_TIME in drive_access_cols)
assert (TAZ.DRIVE_ACCESS_COLUMN_START_TIME in drive_access_cols)
assert (TAZ.DRIVE_ACCESS_COLUMN_END_TIME in drive_access_cols)
# printing this before setting index
FastTripsLogger.debug("=========== DRIVE ACCESS ===========\n" + str(self.drive_access_df.head()))
FastTripsLogger.debug("As read\n" + str(self.drive_access_df.dtypes)) # Rename dist to drive_dist
# the distance and times here are for DRIVING
self.drive_access_df.rename(
columns={TAZ.DRIVE_ACCESS_COLUMN_DISTANCE: TAZ.DRIVE_ACCESS_COLUMN_DRIVE_DISTANCE,
TAZ.DRIVE_ACCESS_COLUMN_TRAVEL_TIME: TAZ.DRIVE_ACCESS_COLUMN_DRIVE_TRAVEL_TIME},
inplace=True)
self.drive_access_df[TAZ.DRIVE_ACCESS_COLUMN_DRIVE_TRAVEL_TIME_MIN] = \
self.drive_access_df[TAZ.DRIVE_ACCESS_COLUMN_DRIVE_TRAVEL_TIME]
# if there are any that go past midnight, duplicate
sim_day_end = Assignment.NETWORK_BUILD_DATE_START_TIME + datetime.timedelta(days=1)
dupes = self.drive_access_df.loc[self.drive_access_df[TAZ.DRIVE_ACCESS_COLUMN_END_TIME] > sim_day_end,
:].copy()
if len(dupes) > 0:
# e.g. 18:00 - 27:00
# dupe: 00:00 - 3:00
dupes.loc[dupes[
TAZ.DRIVE_ACCESS_COLUMN_END_TIME] > sim_day_end, TAZ.DRIVE_ACCESS_COLUMN_START_TIME] = Assignment.NETWORK_BUILD_DATE_START_TIME
dupes.loc[dupes[TAZ.DRIVE_ACCESS_COLUMN_END_TIME] > sim_day_end, TAZ.DRIVE_ACCESS_COLUMN_END_TIME] = \
dupes[TAZ.DRIVE_ACCESS_COLUMN_END_TIME] - datetime.timedelta(days=1)
# orig: 18:00 - 24:00
self.drive_access_df.loc[self.drive_access_df[
TAZ.DRIVE_ACCESS_COLUMN_END_TIME] > sim_day_end, TAZ.DRIVE_ACCESS_COLUMN_END_TIME] = sim_day_end
FastTripsLogger.debug(
"Added %d morning hour drive access links. Head:\n%s" % (len(dupes), dupes.head().to_string()))
# combine
self.drive_access_df = self.drive_access_df.append(dupes)
# drive access period start/end time: float version
self.drive_access_df[TAZ.DRIVE_ACCESS_COLUMN_START_TIME_MIN] = \
(self.drive_access_df[
TAZ.DRIVE_ACCESS_COLUMN_START_TIME] - Assignment.NETWORK_BUILD_DATE_START_TIME) / np.timedelta64(1,
'm')
self.drive_access_df[TAZ.DRIVE_ACCESS_COLUMN_END_TIME_MIN] = \
(self.drive_access_df[
TAZ.DRIVE_ACCESS_COLUMN_END_TIME] - Assignment.NETWORK_BUILD_DATE_START_TIME) / np.timedelta64(1,
'm')
# convert time column from number to timedelta
self.drive_access_df[TAZ.DRIVE_ACCESS_COLUMN_DRIVE_TRAVEL_TIME] = \
self.drive_access_df[TAZ.DRIVE_ACCESS_COLUMN_DRIVE_TRAVEL_TIME_MIN].map(
lambda x: datetime.timedelta(minutes=float(x)))
# need PNRs and KNRs - get them from the dap
knr_dap_df = self.dap_df.loc[self.dap_df[TAZ.DAP_COLUMN_DROP_OFF] == True].copy()
pnr_dap_df = self.dap_df.loc[self.dap_df[TAZ.DAP_COLUMN_CAPACITY] > 0].copy()
knr_dap_df['dap_type'] = 'KNR'
pnr_dap_df['dap_type'] = 'PNR'
self.drive_access_df = pd.merge(left=self.drive_access_df,
right=pd.concat([knr_dap_df, pnr_dap_df], axis=0),
on=TAZ.DRIVE_ACCESS_COLUMN_LOT_ID,
how='left')
# look for required column being null
lots_not_found = self.drive_access_df.loc[pd.isnull(self.drive_access_df[TAZ.DAP_COLUMN_LOT_LATITUDE])]
if len(lots_not_found) > 0:
error_msg = "Found %d drive access links in %s with lots not specified in %s" % \
(len(lots_not_found), TAZ.INPUT_DRIVE_ACCESS_FILE, TAZ.INPUT_DAP_FILE)
FastTripsLogger.fatal(error_msg)
FastTripsLogger.fatal("\nFirst five drive access links with lots not found:\n%s" % \
str(lots_not_found.head().to_string()))
raise NetworkInputError(TAZ.INPUT_DAP_FILE, error_msg)
self.drive_access_df[TAZ.DRIVE_ACCESS_COLUMN_SUPPLY_MODE] = \
self.drive_access_df['dap_type'] + '_' + \
self.drive_access_df[TAZ.DRIVE_ACCESS_COLUMN_DIRECTION]
# done with this
self.drive_access_df.drop(['dap_type'], axis=1, inplace=True)
# We're going to join this with stops to get drive-to-stop
drive_access = self.drive_access_df.loc[self.drive_access_df[TAZ.DRIVE_ACCESS_COLUMN_DIRECTION] == 'access']
drive_egress = self.drive_access_df.loc[self.drive_access_df[TAZ.DRIVE_ACCESS_COLUMN_DIRECTION] == 'egress']
# join with transfers to go from taz -> lot -> stop
drive_access = pd.merge(left=drive_access,
right=transfers.transfers_df,
left_on=TAZ.DRIVE_ACCESS_COLUMN_LOT_ID,
right_on=Transfer.TRANSFERS_COLUMN_FROM_STOP,
how='left')
drive_access[TAZ.DRIVE_ACCESS_COLUMN_STOP] = drive_access[Transfer.TRANSFERS_COLUMN_TO_STOP]
# join with transfers to go from stop -> lot -> taz
drive_egress = pd.merge(left=drive_egress,
right=transfers.transfers_df,
left_on=TAZ.DRIVE_ACCESS_COLUMN_LOT_ID,
right_on=Transfer.TRANSFERS_COLUMN_TO_STOP,
how='left')
drive_egress[TAZ.DRIVE_ACCESS_COLUMN_STOP] = drive_egress[Transfer.TRANSFERS_COLUMN_FROM_STOP]
self.drive_access_df = pd.concat([drive_access, drive_egress], axis=0)
# drop redundant columns
# TODO: assuming min_transfer_type and transfer_type from GTFS aren't relevant here, since
# the time and dist are what matter.
# Assuming schedule_precedence doesn't make sense in the drive access/egress context
self.drive_access_df.drop([Transfer.TRANSFERS_COLUMN_FROM_STOP,
Transfer.TRANSFERS_COLUMN_TO_STOP,
Transfer.TRANSFERS_COLUMN_TRANSFER_TYPE,
Transfer.TRANSFERS_COLUMN_MIN_TRANSFER_TIME,
Transfer.TRANSFERS_COLUMN_SCHEDULE_PRECEDENCE,
Transfer.TRANSFERS_COLUMN_PENALTY], axis=1, inplace=True)
# not relevant for drive access
if Transfer.TRANSFERS_COLUMN_FROM_ROUTE in list(self.drive_access_df.columns.values):
self.drive_access_df.drop([Transfer.TRANSFERS_COLUMN_FROM_ROUTE], axis=1, inplace=True)
if Transfer.TRANSFERS_COLUMN_TO_ROUTE in list(self.drive_access_df.columns.values):
self.drive_access_df.drop([Transfer.TRANSFERS_COLUMN_TO_ROUTE], axis=1, inplace=True)
if Transfer.TRANSFERS_COLUMN_MIN_TRANSFER_TIME_MIN in list(self.drive_access_df.columns.values):
self.drive_access_df.drop([Transfer.TRANSFERS_COLUMN_MIN_TRANSFER_TIME_MIN], axis=1, inplace=True)
# some may have no lot to stop connections -- check for null stop ids
null_stop_ids = self.drive_access_df.loc[pd.isnull(self.drive_access_df[TAZ.DRIVE_ACCESS_COLUMN_STOP])]
if len(null_stop_ids) > 0:
FastTripsLogger.warn("Dropping %d drive links that don't connect to stops:\n%s" % (
len(null_stop_ids), str(null_stop_ids)))
# drop them
self.drive_access_df = self.drive_access_df.loc[
pd.notnull(self.drive_access_df[TAZ.DRIVE_ACCESS_COLUMN_STOP])]
# rename walk attributes to be clear
self.drive_access_df.rename(
columns={
Transfer.TRANSFERS_COLUMN_DISTANCE: TAZ.DRIVE_ACCESS_COLUMN_WALK_DISTANCE,
Transfer.TRANSFERS_COLUMN_TIME: TAZ.DRIVE_ACCESS_COLUMN_WALK_TIME,
Transfer.TRANSFERS_COLUMN_TIME_MIN: TAZ.DRIVE_ACCESS_COLUMN_WALK_TIME_MIN},
inplace=True)
# add generic distance and time
self.drive_access_df[TAZ.DRIVE_ACCESS_COLUMN_DISTANCE] = self.drive_access_df[
TAZ.DRIVE_ACCESS_COLUMN_WALK_DISTANCE] + \
self.drive_access_df[
TAZ.DRIVE_ACCESS_COLUMN_DRIVE_DISTANCE]
self.drive_access_df["time_min"] = self.drive_access_df[TAZ.DRIVE_ACCESS_COLUMN_WALK_TIME_MIN] + \
self.drive_access_df[TAZ.DRIVE_ACCESS_COLUMN_DRIVE_TRAVEL_TIME_MIN]
FastTripsLogger.debug("Final (%d) types:\n%s\nhead:\n%s" % (
len(self.drive_access_df), str(self.drive_access_df.dtypes), str(self.drive_access_df.head())))
FastTripsLogger.info("Read %7d %15s from %25s" %
(len(self.drive_access_df), "drive access", TAZ.INPUT_DRIVE_ACCESS_FILE))
self.has_drive_access = True
else:
self.has_drive_access = False
self.drive_access_df = pd.DataFrame(columns=[TAZ.DRIVE_ACCESS_COLUMN_TAZ, TAZ.DRIVE_ACCESS_COLUMN_LOT_ID])
FastTripsLogger.debug("=========== NO DRIVE ACCESS ===========\n")
# add DAPs IDs and TAZ IDs to stop ID list
stops.add_daps_tazs_to_stops(self.drive_access_df[[TAZ.DRIVE_ACCESS_COLUMN_LOT_ID]],
TAZ.DRIVE_ACCESS_COLUMN_LOT_ID,
pd.concat([self.walk_access_df[[TAZ.WALK_ACCESS_COLUMN_TAZ]],
self.drive_access_df[[TAZ.DRIVE_ACCESS_COLUMN_TAZ]]], axis=0),
TAZ.WALK_ACCESS_COLUMN_TAZ)
# transfers can add stop numeric IDs now that DAPs are available
transfers.add_numeric_stop_id(stops)
# Add numeric stop ID to walk access links
self.walk_access_df = stops.add_numeric_stop_id(self.walk_access_df,
id_colname=TAZ.WALK_ACCESS_COLUMN_STOP,
numeric_newcolname=TAZ.WALK_ACCESS_COLUMN_STOP_NUM,
warn=True,
warn_msg="Numeric stop id not found for walk access links")
# Add TAZ stop ID to walk and drive access links
self.walk_access_df = stops.add_numeric_stop_id(self.walk_access_df,
id_colname=TAZ.WALK_ACCESS_COLUMN_TAZ,
numeric_newcolname=TAZ.WALK_ACCESS_COLUMN_TAZ_NUM)
# These have direction now. Set supply mode string
self.walk_access_df[TAZ.WALK_ACCESS_COLUMN_SUPPLY_MODE] = "walk_" + self.walk_access_df[
TAZ.WALK_ACCESS_COLUMN_DIRECTION]
self.walk_access_df = routes.add_numeric_mode_id(self.walk_access_df,
id_colname=TAZ.WALK_ACCESS_COLUMN_SUPPLY_MODE,
numeric_newcolname=TAZ.WALK_ACCESS_COLUMN_SUPPLY_MODE_NUM)
if self.has_drive_access:
print(self.drive_access_df.loc[self.drive_access_df[TAZ.DRIVE_ACCESS_COLUMN_STOP] == "9065"])
self.drive_access_df = stops.add_numeric_stop_id(self.drive_access_df,
id_colname=TAZ.DRIVE_ACCESS_COLUMN_STOP,
numeric_newcolname=TAZ.DRIVE_ACCESS_COLUMN_STOP_NUM,
warn=True,
warn_msg="Drive access stops missing ids")
self.drive_access_df = stops.add_numeric_stop_id(self.drive_access_df,
id_colname=TAZ.DRIVE_ACCESS_COLUMN_TAZ,
numeric_newcolname=TAZ.DRIVE_ACCESS_COLUMN_TAZ_NUM)
self.drive_access_df = routes.add_numeric_mode_id(self.drive_access_df,
id_colname=TAZ.DRIVE_ACCESS_COLUMN_SUPPLY_MODE,
numeric_newcolname=TAZ.DRIVE_ACCESS_COLUMN_SUPPLY_MODE_NUM)
# warn on stops that have no walk access
self.warn_on_stops_without_walk_access(stops)
# write this to communicate to extension
self.write_access_egress_for_extension(output_dir)
def add_distance(self, links_df, dist_col):
"""
Sets distance column value for access and egress links.
.. todo:: This neglects the start_time/end_time issue. Don't use without fixing.
"""
############## walk ##############
walk_dists = self.walk_access_df[[TAZ.WALK_ACCESS_COLUMN_TAZ_NUM,
TAZ.WALK_ACCESS_COLUMN_STOP_NUM,
TAZ.WALK_ACCESS_COLUMN_SUPPLY_MODE_NUM,
TAZ.WALK_ACCESS_COLUMN_DIST]].copy()
walk_dists.rename(columns={TAZ.WALK_ACCESS_COLUMN_DIST: "walk_dist"}, inplace=True)
# walk access
links_df = pd.merge(left=links_df,
left_on=["A_id_num", "B_id_num", "mode_num"],
right=walk_dists,
right_on=[TAZ.WALK_ACCESS_COLUMN_TAZ_NUM, TAZ.WALK_ACCESS_COLUMN_STOP_NUM,
TAZ.WALK_ACCESS_COLUMN_SUPPLY_MODE_NUM],
how="left")
links_df.loc[pd.notnull(links_df["walk_dist"]), dist_col] = links_df["walk_dist"]
links_df.drop([TAZ.WALK_ACCESS_COLUMN_TAZ_NUM,
TAZ.WALK_ACCESS_COLUMN_STOP_NUM,
TAZ.WALK_ACCESS_COLUMN_SUPPLY_MODE_NUM,
"walk_dist"], axis=1, inplace=True)
# walk egress
links_df = pd.merge(left=links_df,
left_on=["A_id_num", "B_id_num", "mode_num"],
right=walk_dists,
right_on=[TAZ.WALK_ACCESS_COLUMN_STOP_NUM, TAZ.WALK_ACCESS_COLUMN_TAZ_NUM,
TAZ.WALK_ACCESS_COLUMN_SUPPLY_MODE_NUM],
how="left")
links_df.loc[pd.notnull(links_df["walk_dist"]), dist_col] = links_df["walk_dist"]
links_df.drop([TAZ.WALK_ACCESS_COLUMN_TAZ_NUM,
TAZ.WALK_ACCESS_COLUMN_STOP_NUM,
TAZ.WALK_ACCESS_COLUMN_SUPPLY_MODE_NUM,
"walk_dist"], axis=1, inplace=True)
############## drive ##############
FastTripsLogger.debug("drive_access_df=\n%s" % self.drive_access_df.head())
if len(self.drive_access_df) > 0:
drive_dists = self.drive_access_df[[TAZ.DRIVE_ACCESS_COLUMN_TAZ_NUM,
TAZ.DRIVE_ACCESS_COLUMN_STOP_NUM,
TAZ.DRIVE_ACCESS_COLUMN_SUPPLY_MODE_NUM,
TAZ.DRIVE_ACCESS_COLUMN_DRIVE_DISTANCE,
TAZ.DRIVE_ACCESS_COLUMN_WALK_DISTANCE,
TAZ.DRIVE_ACCESS_COLUMN_START_TIME,
TAZ.DRIVE_ACCESS_COLUMN_END_TIME]].copy()
drive_dists["drive_total_dist"] = drive_dists[TAZ.DRIVE_ACCESS_COLUMN_DRIVE_DISTANCE] + drive_dists[
TAZ.DRIVE_ACCESS_COLUMN_WALK_DISTANCE]
drive_dists.drop([TAZ.DRIVE_ACCESS_COLUMN_DRIVE_DISTANCE, TAZ.DRIVE_ACCESS_COLUMN_WALK_DISTANCE], axis=1,
inplace=True)
# drive access
links_df = pd.merge(left=links_df,
left_on=["A_id_num", "B_id_num", "mode_num"],
right=drive_dists,
right_on=[TAZ.DRIVE_ACCESS_COLUMN_TAZ_NUM, TAZ.DRIVE_ACCESS_COLUMN_STOP_NUM,
TAZ.DRIVE_ACCESS_COLUMN_SUPPLY_MODE_NUM],
how="left")
# TODO: drop those with drive access links covering different times
links_df.loc[pd.notnull(links_df["drive_total_dist"]), dist_col] = links_df["drive_total_dist"]
links_df.drop([TAZ.DRIVE_ACCESS_COLUMN_TAZ_NUM,
TAZ.DRIVE_ACCESS_COLUMN_STOP_NUM,
TAZ.DRIVE_ACCESS_COLUMN_SUPPLY_MODE_NUM,
"drive_total_dist"], axis=1, inplace=True)
# drive egress
links_df = pd.merge(left=links_df,
left_on=["A_id_num", "B_id_num", "mode_num"],
right=drive_dists,
right_on=[TAZ.DRIVE_ACCESS_COLUMN_STOP_NUM, TAZ.DRIVE_ACCESS_COLUMN_TAZ_NUM,
TAZ.DRIVE_ACCESS_COLUMN_SUPPLY_MODE_NUM],
how="left")
links_df.loc[pd.notnull(links_df["drive_total_dist"]), dist_col] = links_df["drive_total_dist"]
links_df.drop([TAZ.DRIVE_ACCESS_COLUMN_TAZ_NUM,
TAZ.DRIVE_ACCESS_COLUMN_STOP_NUM,
TAZ.DRIVE_ACCESS_COLUMN_SUPPLY_MODE_NUM,
"drive_total_dist"], axis=1, inplace=True)
FastTripsLogger.debug("links_df=\n%s" % links_df.head(30).to_string())
return links_df
def warn_on_stops_without_walk_access(self, stops):
"""
Do any stops lack *any* walk access?
"""
# FastTripsLogger.debug("warn_on_stops_without_walk_access: \n%s", stops.stops_df.head() )
# FastTripsLogger.debug("warn_on_stops_without_walk_access: \n%s", self.walk_access_df.head() )
# join stops to walk access
no_access_stops = pd.merge(left=stops.stops_df[[Stop.STOPS_COLUMN_STOP_ID]],
right=self.walk_access_df[[TAZ.WALK_ACCESS_COLUMN_STOP, TAZ.WALK_ACCESS_COLUMN_TAZ]],
how="left")
no_access_stops = no_access_stops.loc[pd.isnull(no_access_stops[TAZ.WALK_ACCESS_COLUMN_TAZ])]
if len(no_access_stops) > 0:
FastTripsLogger.warn("The following %d stop ids have no walk access: \n%s" % (
len(no_access_stops), no_access_stops.to_string()))
def write_access_egress_for_extension(self, output_dir):
"""
Write the access and egress links to a single output file for the C++ extension to read.
It's in this form because I'm not sure how to pass the strings to C++ in
Assignment.initialize_fasttrips_extension so I know that's inconsistent, but it's a
time sink to investigate, so I'll leave this for now
.. todo:: clean this up? Rename intermediate files (they're not really output)
"""
# ========== Walk access/egres =================================================
# print "walk_access columns"
# for col in list(self.walk_access_df.columns): print " %s" % col
# start with all walk columns
self.walk_df = self.walk_access_df.copy()
# drop the redundant columns
drop_fields = [TAZ.WALK_ACCESS_COLUMN_TAZ, # use numerical version
TAZ.WALK_ACCESS_COLUMN_STOP, # use numerical version
TAZ.WALK_ACCESS_COLUMN_DIRECTION, # it's in the supply mode num
TAZ.WALK_ACCESS_COLUMN_SUPPLY_MODE, # use numerical version
TAZ.WALK_ACCESS_COLUMN_TIME, # use numerical version
]
# we can only drop fields that are in the dataframe
walk_fields = list(self.walk_df.columns.values)
valid_drop_fields = []
for field in drop_fields:
if field in walk_fields: valid_drop_fields.append(field)
self.walk_df.drop(valid_drop_fields, axis=1, inplace=True)
# make walk access valid all times -- need this for consistency
self.walk_df[TAZ.DRIVE_ACCESS_COLUMN_START_TIME_MIN] = 0.0
self.walk_df[TAZ.DRIVE_ACCESS_COLUMN_END_TIME_MIN] = 60.0 * 24.0
# the index is TAZ num, supply mode num, and stop num
self.walk_df.set_index([TAZ.WALK_ACCESS_COLUMN_TAZ_NUM,
TAZ.WALK_ACCESS_COLUMN_SUPPLY_MODE_NUM,
TAZ.WALK_ACCESS_COLUMN_STOP_NUM,
TAZ.DRIVE_ACCESS_COLUMN_START_TIME_MIN,
TAZ.DRIVE_ACCESS_COLUMN_END_TIME_MIN], inplace=True)
# ========== Drive access/egres =================================================
self.drive_df = self.drive_access_df.copy()
# print "drive_access columns"
# for col in list(self.drive_access_df.columns): print " %s" % col
# TEMP
drive_fields = list(self.drive_df.columns.values)
# drop some of the attributes
drop_fields = [TAZ.DRIVE_ACCESS_COLUMN_TAZ, # use numerical version
TAZ.DRIVE_ACCESS_COLUMN_STOP, # use numerical version
TAZ.DRIVE_ACCESS_COLUMN_SUPPLY_MODE, # use numerical version
TAZ.DRIVE_ACCESS_COLUMN_DRIVE_TRAVEL_TIME, # use numerical version
TAZ.DRIVE_ACCESS_COLUMN_START_TIME, # use numerical version
TAZ.DRIVE_ACCESS_COLUMN_END_TIME, # use numerical version
TAZ.DRIVE_ACCESS_COLUMN_WALK_TIME, # use numerical version
TAZ.DRIVE_ACCESS_COLUMN_DIRECTION, # redundant with supply mode
TAZ.DAP_COLUMN_DROP_OFF, # redundant with supply mode
TAZ.DAP_COLUMN_LOT_LATITUDE, # probably not useful
TAZ.DAP_COLUMN_LOT_LONGITUDE, # probably not useful
TAZ.DRIVE_ACCESS_COLUMN_LOT_ID, # probably not useful
]
valid_drop_fields = []
for field in drop_fields:
if field in drive_fields: valid_drop_fields.append(field)
self.drive_df.drop(valid_drop_fields, axis=1, inplace=True)
# the index is TAZ num, supply mode num, and stop num
if len(self.drive_df) > 0:
self.drive_df.set_index([TAZ.DRIVE_ACCESS_COLUMN_TAZ_NUM,
TAZ.DRIVE_ACCESS_COLUMN_SUPPLY_MODE_NUM,
TAZ.DRIVE_ACCESS_COLUMN_STOP_NUM,
TAZ.DRIVE_ACCESS_COLUMN_START_TIME_MIN,
TAZ.DRIVE_ACCESS_COLUMN_END_TIME_MIN], inplace=True)
# stack() this will make it so beyond taz num, supply mode num, and stop num
# the remaining columns collapse to variable name, variable value
# put walk and drive together
access_df = pd.concat([self.walk_df.stack(), self.drive_df.stack()], axis=0).to_frame()
else:
access_df = self.walk_df.stack().to_frame()
access_df.reset_index(inplace=True)
# rename from these default column names
access_df.rename(columns={"level_3": "attr_name", 0: "attr_value"}, inplace=True)
# make attr_value a float instead of an object
access_df["attr_value"] = access_df["attr_value"].astype(float)
FastTripsLogger.debug("\n" + str(access_df.head()))
FastTripsLogger.debug("\n" + str(access_df.tail()))
# Check for null stop ids
null_stop_ids = access_df.loc[pd.isnull(access_df["stop_id_num"])]
if len(null_stop_ids) > 0:
FastTripsLogger.warn("write_access_egress_for_extension null_stop_ids:\n%s" % str(null_stop_ids))
# for now, drop rows with null stop id nums
access_df = access_df.loc[pd.notnull(access_df["stop_id_num"])]
access_df["stop_id_num"] = access_df["stop_id_num"].astype(int)
access_df.to_csv(os.path.join(output_dir, TAZ.OUTPUT_ACCESS_EGRESS_FILE),
sep=" ", index=False)
FastTripsLogger.debug("Wrote %s" % os.path.join(output_dir, TAZ.OUTPUT_ACCESS_EGRESS_FILE))
| StarcoderdataPython |
1734089 | import logging
import logging.config
import os
from src import const
class Log:
def debug2(self, msg, *args, **kwargs):
"""Log with severity 'DEBUG2'."""
self.log.log(const.LogLevel.DEBUG2, msg, *args, **kwargs)
def debug3(self, msg, *args, **kwargs):
"""Log with severity 'DEBUG3'."""
self.log.log(const.LogLevel.DEBUG3, msg, *args, **kwargs)
def __init__(self):
logging.config.dictConfig({
'version': 1,
'disable_existing_loggers': True,
})
# Add new logging levels
logging.addLevelName(const.LogLevel.DEBUG2, "DEBUG2")
logging.addLevelName(const.LogLevel.DEBUG3, "DEBUG3")
# LOGGERS
self.log = logging.getLogger("WalBot")
self.log.setLevel(const.LogLevel.DEBUG3)
# FORMATTERS
formatter = logging.Formatter(const.LOGGING_FORMAT)
# HANDLERS
# Console handler
console_handler = logging.StreamHandler()
console_handler.setLevel(const.LogLevel.DEBUG)
console_handler.setFormatter(formatter)
self.log.addHandler(console_handler)
# Create logs folder
if not os.path.exists(const.LOGS_DIRECTORY):
os.makedirs(const.LOGS_DIRECTORY)
# File handler (logs/error.log)
err_log_file_hdl = logging.handlers.RotatingFileHandler(
os.path.join(const.LOGS_DIRECTORY, "error.log"), encoding="utf-8",
maxBytes=const.MAX_LOG_FILESIZE, backupCount=20)
err_log_file_hdl.setLevel(const.LogLevel.ERROR)
err_log_file_hdl.setFormatter(formatter)
self.log.addHandler(err_log_file_hdl)
# File handler (logs/walbot.log)
general_log_file_hdl = logging.handlers.RotatingFileHandler(
os.path.join(const.LOGS_DIRECTORY, "walbot.log"), encoding="utf-8",
maxBytes=const.MAX_LOG_FILESIZE, backupCount=20)
general_log_file_hdl.setLevel(const.LogLevel.DEBUG)
general_log_file_hdl.setFormatter(formatter)
self.log.addHandler(general_log_file_hdl)
# Add basic log functions
self.debug = self.log.debug
self.info = self.log.info
self.error = self.log.error
self.warning = self.log.warning
self.info("Logging system is set up")
log = Log()
| StarcoderdataPython |
67233 | <filename>Libraries/Python.framework/Versions/2.7/lib/python2.7/site-packages/pydicom-0.9.8-py2.7.egg/dicom/test/test_charset.py
# -*- coding: latin_1 -*-
# test_charset.py
"""unittest cases for dicom.charset module"""
# Copyright (c) 2008 <NAME>
# This file is part of pydicom, released under a modified MIT license.
# See the file license.txt included with this distribution, also
# available at http://pydicom.googlecode.com
import unittest
import dicom
import os.path
from pkg_resources import Requirement, resource_filename
testcharset_dir = resource_filename(Requirement.parse("pydicom"),
"dicom/testcharsetfiles")
latin1_file = os.path.join(testcharset_dir, "chrFren.dcm")
jp_file = os.path.join(testcharset_dir, "chrH31.dcm")
multiPN_file = os.path.join(testcharset_dir, "chrFrenMulti.dcm")
sq_encoding_file = os.path.join(testcharset_dir, "chrSQEncoding.dcm")
test_dir = resource_filename(Requirement.parse("pydicom"), "dicom/testfiles")
normal_file = os.path.join(test_dir, "CT_small.dcm")
class charsetTests(unittest.TestCase):
def testLatin1(self):
"""charset: can read and decode latin_1 file........................"""
ds = dicom.read_file(latin1_file)
ds.decode()
# Make sure don't get unicode encode error on converting to string
expected = u'Buc^J\xe9r\xf4me'
got = ds.PatientName
self.assertEqual(expected, got,
"Expected %r, got %r" % (expected, got))
def testNestedCharacterSets(self):
"""charset: can read and decode SQ with different encodings........."""
ds = dicom.read_file(sq_encoding_file)
ds.decode()
# These datasets inside of the SQ cannot be decoded with default_encoding
# OR UTF-8 (the parent dataset's encoding). Instead, we make sure that it
# is decoded using the (0008,0005) tag of the dataset
expected = u'\uff94\uff8f\uff80\uff9e^\uff80\uff9b\uff73=\u5c71\u7530^\u592a\u90ce=\u3084\u307e\u3060^\u305f\u308d\u3046'
got = ds[0x32, 0x1064][0].PatientName
self.assertEqual(expected, got,
"Expected %r, got %r" % (expected, got))
def testStandardFile(self):
"""charset: can read and decode standard file without special char.."""
ds = dicom.read_file(normal_file)
ds.decode()
def testMultiPN(self):
"""charset: can decode file with multi-valued data elements........."""
ds = dicom.read_file(multiPN_file)
ds.decode()
if __name__ == "__main__":
# This is called if run alone, but not if loaded through run_tests.py
# If not run from the directory where the sample images are,
# then need to switch there
import sys
import os
import os.path
dir_name = os.path.dirname(sys.argv[0])
save_dir = os.getcwd()
if dir_name:
os.chdir(dir_name)
os.chdir("../testfiles")
unittest.main()
os.chdir(save_dir)
| StarcoderdataPython |
27737 | #!/usr/bin/env python
import sys
sys.path.insert(1, "..")
from SOAPpy.Errors import Error
from SOAPpy.Parser import parseSOAPRPC
original = """<?xml version="1.0"?>
<SOAP-ENV:Envelope
SOAP-ENV:encodingStyle="http://schemas.xmlsoap.org/soap/encoding/"
xmlns:SOAP-ENC="http://schemas.xmlsoap.org/soap/encoding/"
xmlns:SOAP-ENV="http://schemas.xmlsoap.org/soap/envelope/">
<SOAP-ENV:Body>
<doSingleRecord SOAP-ENC:root="1">
</doSingleRecord>
</SOAP-ENV:Body>
<ErrorString>The CustomerID tag could not be found or the number contained in the tag was invalid</ErrorString></SOAP-ENV:Envelope>
"""
try:
parseSOAPRPC(original, attrs = 1)
except Error, e:
if e.msg != "expected nothing, got `ErrorString'":
raise AssertionError, "Incorrect error message generated: " + e.msg
else:
raise AssertionError, "Incorrect error message generated"
print "Success"
| StarcoderdataPython |
73707 | <reponame>facebookresearch/worldsheet<filename>mmf/neural_rendering/metrics/perc_sim.py<gh_stars>10-100
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. All rights reserved.
# The code in this file is heavily based on the code
# from in https://github.com/richzhang/PerceptualSimilarity
# which is available under the following BSD Licence:
# Copyright (c) 2018, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from collections import namedtuple
import torch
import torch.nn as nn
from torchvision import models
def normalize_tensor(in_feat, eps=1e-10):
norm_factor = torch.sqrt(torch.sum(in_feat ** 2, dim=1)).view(
in_feat.size()[0], 1, in_feat.size()[2], in_feat.size()[3]
)
return in_feat / (norm_factor.expand_as(in_feat) + eps)
def cos_sim(in0, in1):
in0_norm = normalize_tensor(in0)
in1_norm = normalize_tensor(in1)
N = in0.size()[0]
X = in0.size()[2]
Y = in0.size()[3]
return torch.mean(
torch.mean(
torch.sum(in0_norm * in1_norm, dim=1).view(N, 1, X, Y), dim=2
).view(N, 1, 1, Y),
dim=3,
).view(N)
# Off-the-shelf deep network
class PNet(nn.Module):
"""Pre-trained network with all channels equally weighted by default"""
def __init__(self, pnet_type="vgg", pnet_rand=False, use_gpu=True):
super(PNet, self).__init__()
self.use_gpu = use_gpu
self.pnet_type = pnet_type
self.pnet_rand = pnet_rand
self.shift = torch.Tensor([-0.030, -0.088, -0.188]).view(1, 3, 1, 1)
self.scale = torch.Tensor([0.458, 0.448, 0.450]).view(1, 3, 1, 1)
if self.pnet_type in ["vgg", "vgg16"]:
self.net = vgg16(pretrained=not self.pnet_rand, requires_grad=False)
elif self.pnet_type == "alex":
self.net = alexnet(
pretrained=not self.pnet_rand, requires_grad=False
)
elif self.pnet_type[:-2] == "resnet":
self.net = resnet(
pretrained=not self.pnet_rand,
requires_grad=False,
num=int(self.pnet_type[-2:]),
)
elif self.pnet_type == "squeeze":
self.net = squeezenet(
pretrained=not self.pnet_rand, requires_grad=False
)
self.L = self.net.N_slices
if use_gpu:
self.net.cuda()
self.shift = self.shift.cuda()
self.scale = self.scale.cuda()
def forward(self, in0, in1, retPerLayer=False):
in0_sc = (in0 - self.shift.expand_as(in0)) / self.scale.expand_as(in0)
in1_sc = (in1 - self.shift.expand_as(in0)) / self.scale.expand_as(in0)
outs0 = self.net.forward(in0_sc)
outs1 = self.net.forward(in1_sc)
if retPerLayer:
all_scores = []
for (kk, out0) in enumerate(outs0):
cur_score = 1.0 - cos_sim(outs0[kk], outs1[kk])
if kk == 0:
val = 1.0 * cur_score
else:
val = val + cur_score
if retPerLayer:
all_scores += [cur_score]
if retPerLayer:
return (val, all_scores)
else:
return val
class squeezenet(torch.nn.Module):
def __init__(self, requires_grad=False, pretrained=True):
super(squeezenet, self).__init__()
pretrained_features = models.squeezenet1_1(
pretrained=pretrained
).features
self.slice1 = torch.nn.Sequential()
self.slice2 = torch.nn.Sequential()
self.slice3 = torch.nn.Sequential()
self.slice4 = torch.nn.Sequential()
self.slice5 = torch.nn.Sequential()
self.slice6 = torch.nn.Sequential()
self.slice7 = torch.nn.Sequential()
self.N_slices = 7
for x in range(2):
self.slice1.add_module(str(x), pretrained_features[x])
for x in range(2, 5):
self.slice2.add_module(str(x), pretrained_features[x])
for x in range(5, 8):
self.slice3.add_module(str(x), pretrained_features[x])
for x in range(8, 10):
self.slice4.add_module(str(x), pretrained_features[x])
for x in range(10, 11):
self.slice5.add_module(str(x), pretrained_features[x])
for x in range(11, 12):
self.slice6.add_module(str(x), pretrained_features[x])
for x in range(12, 13):
self.slice7.add_module(str(x), pretrained_features[x])
if not requires_grad:
for param in self.parameters():
param.requires_grad = False
def forward(self, X):
h = self.slice1(X)
h_relu1 = h
h = self.slice2(h)
h_relu2 = h
h = self.slice3(h)
h_relu3 = h
h = self.slice4(h)
h_relu4 = h
h = self.slice5(h)
h_relu5 = h
h = self.slice6(h)
h_relu6 = h
h = self.slice7(h)
h_relu7 = h
vgg_outputs = namedtuple(
"SqueezeOutputs",
["relu1", "relu2", "relu3", "relu4", "relu5", "relu6", "relu7"],
)
out = vgg_outputs(
h_relu1, h_relu2, h_relu3, h_relu4, h_relu5, h_relu6, h_relu7
)
return out
class alexnet(torch.nn.Module):
def __init__(self, requires_grad=False, pretrained=True):
super(alexnet, self).__init__()
alexnet_pretrained_features = models.alexnet(
pretrained=pretrained
).features
self.slice1 = torch.nn.Sequential()
self.slice2 = torch.nn.Sequential()
self.slice3 = torch.nn.Sequential()
self.slice4 = torch.nn.Sequential()
self.slice5 = torch.nn.Sequential()
self.N_slices = 5
for x in range(2):
self.slice1.add_module(str(x), alexnet_pretrained_features[x])
for x in range(2, 5):
self.slice2.add_module(str(x), alexnet_pretrained_features[x])
for x in range(5, 8):
self.slice3.add_module(str(x), alexnet_pretrained_features[x])
for x in range(8, 10):
self.slice4.add_module(str(x), alexnet_pretrained_features[x])
for x in range(10, 12):
self.slice5.add_module(str(x), alexnet_pretrained_features[x])
if not requires_grad:
for param in self.parameters():
param.requires_grad = False
def forward(self, X):
h = self.slice1(X)
h_relu1 = h
h = self.slice2(h)
h_relu2 = h
h = self.slice3(h)
h_relu3 = h
h = self.slice4(h)
h_relu4 = h
h = self.slice5(h)
h_relu5 = h
alexnet_outputs = namedtuple(
"AlexnetOutputs", ["relu1", "relu2", "relu3", "relu4", "relu5"]
)
out = alexnet_outputs(h_relu1, h_relu2, h_relu3, h_relu4, h_relu5)
return out
class vgg16(torch.nn.Module):
def __init__(self, requires_grad=False, pretrained=True):
super(vgg16, self).__init__()
vgg_pretrained_features = models.vgg16(pretrained=pretrained).features
self.slice1 = torch.nn.Sequential()
self.slice2 = torch.nn.Sequential()
self.slice3 = torch.nn.Sequential()
self.slice4 = torch.nn.Sequential()
self.slice5 = torch.nn.Sequential()
self.N_slices = 5
for x in range(4):
self.slice1.add_module(str(x), vgg_pretrained_features[x])
for x in range(4, 9):
self.slice2.add_module(str(x), vgg_pretrained_features[x])
for x in range(9, 16):
self.slice3.add_module(str(x), vgg_pretrained_features[x])
for x in range(16, 23):
self.slice4.add_module(str(x), vgg_pretrained_features[x])
for x in range(23, 30):
self.slice5.add_module(str(x), vgg_pretrained_features[x])
if not requires_grad:
for param in self.parameters():
param.requires_grad = False
def forward(self, X):
h = self.slice1(X)
h_relu1_2 = h
h = self.slice2(h)
h_relu2_2 = h
h = self.slice3(h)
h_relu3_3 = h
h = self.slice4(h)
h_relu4_3 = h
h = self.slice5(h)
h_relu5_3 = h
vgg_outputs = namedtuple(
"VggOutputs",
["relu1_2", "relu2_2", "relu3_3", "relu4_3", "relu5_3"],
)
out = vgg_outputs(h_relu1_2, h_relu2_2, h_relu3_3, h_relu4_3, h_relu5_3)
return out
class resnet(torch.nn.Module):
def __init__(self, requires_grad=False, pretrained=True, num=18):
super(resnet, self).__init__()
if num == 18:
self.net = models.resnet18(pretrained=pretrained)
elif num == 34:
self.net = models.resnet34(pretrained=pretrained)
elif num == 50:
self.net = models.resnet50(pretrained=pretrained)
elif num == 101:
self.net = models.resnet101(pretrained=pretrained)
elif num == 152:
self.net = models.resnet152(pretrained=pretrained)
self.N_slices = 5
self.conv1 = self.net.conv1
self.bn1 = self.net.bn1
self.relu = self.net.relu
self.maxpool = self.net.maxpool
self.layer1 = self.net.layer1
self.layer2 = self.net.layer2
self.layer3 = self.net.layer3
self.layer4 = self.net.layer4
def forward(self, X):
h = self.conv1(X)
h = self.bn1(h)
h = self.relu(h)
h_relu1 = h
h = self.maxpool(h)
h = self.layer1(h)
h_conv2 = h
h = self.layer2(h)
h_conv3 = h
h = self.layer3(h)
h_conv4 = h
h = self.layer4(h)
h_conv5 = h
outputs = namedtuple(
"Outputs", ["relu1", "conv2", "conv3", "conv4", "conv5"]
)
out = outputs(h_relu1, h_conv2, h_conv3, h_conv4, h_conv5)
return out
| StarcoderdataPython |
3243701 | """
Object that can read/write meteostations metadata and extract related
measurements
"""
from pyowm.commons.http_client import HttpClient
from pyowm.stationsapi30.station_parser import StationParser
from pyowm.stationsapi30.aggregated_measurement_parser import AggregatedMeasurementParser
from pyowm.constants import STATIONS_API_VERSION
class StationsManager(object):
"""
A manager objects that provides a full interface to OWM Stations API. Mainly
it implements CRUD methods on Station entities and the corresponding
measured datapoints.
:param API_key: the OWM web API key (defaults to ``None``)
:type API_key: str
:returns: a *StationsManager* instance
:raises: *AssertionError* when no API Key is provided
"""
def __init__(self, API_key):
assert API_key is not None, 'You must provide a valid API Key'
self.API_key = API_key
self.stations_parser = StationParser()
self.aggregated_measurements_parser = AggregatedMeasurementParser()
self.http_client = HttpClient()
def stations_api_version(self):
return STATIONS_API_VERSION
# STATIONS Methods
def get_stations(self):
"""
Retrieves all of the user's stations registered on the Stations API.
:returns: list of *pyowm.stationsapi30.station.Station* objects
"""
status, data = self.http_client.get_json(
'http://api.openweathermap.org/data/3.0/stations',
params={'appid': self.API_key},
headers={'Content-Type': 'application/json'})
return [self.stations_parser.parse_dict(item) for item in data]
def get_station(self, id):
"""
Retrieves a named station registered on the Stations API.
:param id: the ID of the station
:type id: str
:returns: a *pyowm.stationsapi30.station.Station* object
"""
status, data = self.http_client.get_json(
'http://api.openweathermap.org/data/3.0/stations/%s' % str(id),
params={'appid': self.API_key},
headers={'Content-Type': 'application/json'})
return self.stations_parser.parse_dict(data)
def create_station(self, external_id, name, lat, lon, alt=None):
"""
Create a new station on the Station API with the given parameters
:param external_id: the user-given ID of the station
:type external_id: str
:param name: the name of the station
:type name: str
:param lat: latitude of the station
:type lat: float
:param lon: longitude of the station
:type lon: float
:param alt: altitude of the station
:type alt: float
:returns: the new *pyowm.stationsapi30.station.Station* object
"""
assert external_id is not None
assert name is not None
assert lon is not None
assert lat is not None
if lon < -180.0 or lon > 180.0:
raise ValueError("'lon' value must be between -180 and 180")
if lat < -90.0 or lat > 90.0:
raise ValueError("'lat' value must be between -90 and 90")
if alt is not None:
if alt < 0.0:
raise ValueError("'alt' value must not be negative")
status, payload = self.http_client.post(
'http://api.openweathermap.org/data/3.0/stations',
params={'appid': self.API_key},
data=dict(external_id=external_id, name=name, lat=lat,
lon=lon, alt=alt),
headers={'Content-Type': 'application/json'})
return self.stations_parser.parse_dict(payload)
def update_station(self, station):
"""
Updates the Station API record identified by the ID of the provided
*pyowm.stationsapi30.station.Station* object with all of its fields
:param station: the *pyowm.stationsapi30.station.Station* object to be updated
:type station: *pyowm.stationsapi30.station.Station*
:returns: `None` if update is successful, an exception otherwise
"""
assert station.id is not None
status, _ = self.http_client.put(
'http://api.openweathermap.org/data/3.0/stations/%s' % str(station.id),
params={'appid': self.API_key},
data=dict(external_id=station.external_id, name=station.name,
lat=station.lat, lon=station.lon, alt=station.alt),
headers={'Content-Type': 'application/json'})
def delete_station(self, station):
"""
Deletes the Station API record identified by the ID of the provided
*pyowm.stationsapi30.station.Station*, along with all its related
measurements
:param station: the *pyowm.stationsapi30.station.Station* object to be deleted
:type station: *pyowm.stationsapi30.station.Station*
:returns: `None` if deletion is successful, an exception otherwise
"""
assert station.id is not None
status, _ = self.http_client.delete(
'http://api.openweathermap.org/data/3.0/stations/%s' % str(station.id),
params={'appid': self.API_key},
headers={'Content-Type': 'application/json'})
# Measurements-related methods
def send_measurement(self, measurement):
"""
Posts the provided Measurement object's data to the Station API.
:param measurement: the *pyowm.stationsapi30.measurement.Measurement*
object to be posted
:type measurement: *pyowm.stationsapi30.measurement.Measurement* instance
:returns: `None` if creation is successful, an exception otherwise
"""
assert measurement is not None
assert measurement.station_id is not None
status, _ = self.http_client.post(
'http://api.openweathermap.org/data/3.0/measurements',
params={'appid': self.API_key},
data=[measurement.to_dict()],
headers={'Content-Type': 'application/json'})
def send_measurements(self, list_of_measurements):
"""
Posts data about the provided list of Measurement objects to the
Station API. The objects may be related to different station IDs.
:param list_of_measurements: list of *pyowm.stationsapi30.measurement.Measurement*
objects to be posted
:type list_of_measurements: list of *pyowm.stationsapi30.measurement.Measurement*
instances
:returns: `None` if creation is successful, an exception otherwise
"""
assert list_of_measurements is not None
assert all([m.station_id is not None for m in list_of_measurements])
msmts = [m.to_dict() for m in list_of_measurements]
status, _ = self.http_client.post(
'http://api.openweathermap.org/data/3.0/measurements',
params={'appid': self.API_key},
data=msmts,
headers={'Content-Type': 'application/json'})
def get_measurements(self, station_id, aggregated_on, from_timestamp,
to_timestamp, limit=100):
"""
Reads measurements of a specified station recorded in the specified time
window and aggregated on minute, hour or day. Optionally, the number of
resulting measurements can be limited.
:param station_id: unique station identifier
:type station_id: str
:param aggregated_on: aggregation time-frame for this measurement
:type aggregated_on: string between 'm','h' and 'd'
:param from_timestamp: Unix timestamp corresponding to the beginning of
the time window
:type from_timestamp: int
:param to_timestamp: Unix timestamp corresponding to the end of the
time window
:type to_timestamp: int
:param limit: max number of items to be returned. Defaults to 100
:type limit: int
:returns: list of *pyowm.stationsapi30.measurement.AggregatedMeasurement*
objects
"""
assert station_id is not None
assert aggregated_on is not None
assert from_timestamp is not None
assert from_timestamp > 0
assert to_timestamp is not None
assert to_timestamp > 0
if to_timestamp < from_timestamp:
raise ValueError("End timestamp can't be earlier than begin timestamp")
assert isinstance(limit, int)
assert limit >= 0
query = {'appid': self.API_key,
'station_id': station_id,
'type': aggregated_on,
'from': from_timestamp,
'to': to_timestamp,
'limit': limit}
status, data = self.http_client.get_json(
'http://api.openweathermap.org/data/3.0/measurements',
params=query,
headers={'Content-Type': 'application/json'})
return [self.aggregated_measurements_parser.parse_dict(item) for item in data]
def send_buffer(self, buffer):
"""
Posts to the Stations API data about the Measurement objects contained
into the provided Buffer instance.
:param buffer: the *pyowm.stationsapi30.buffer.Buffer* instance whose
measurements are to be posted
:type buffer: *pyowm.stationsapi30.buffer.Buffer* instance
:returns: `None` if creation is successful, an exception otherwise
"""
assert buffer is not None
msmts = []
for x in buffer.measurements:
m = x.to_dict()
item = dict()
item['station_id'] = m['station_id']
item['dt'] = m['timestamp']
item['temperature'] = m['temperature']
item['wind_speed'] = m['wind_speed']
item['wind_gust'] = m['wind_gust']
item['wind_deg'] = m['wind_deg']
item['pressure'] = m['pressure']
item['humidity'] = m['humidity']
item['rain_1h'] = m['rain_1h']
item['rain_6h'] = m['rain_6h']
item['rain_24h'] = m['rain_24h']
item['snow_1h'] = m['snow_1h']
item['snow_6h'] = m['snow_6h']
item['snow_24h'] = m['snow_24h']
item['dew_point'] = m['dew_point']
item['humidex'] = m['humidex']
item['heat_index'] = m['heat_index']
item['visibility_distance'] = m['visibility_distance']
item['visibility_prefix'] = m['visibility_prefix']
item['clouds'] = [dict(distance=m['clouds_distance']),
dict(condition=m['clouds_condition']),
dict(cumulus=m['clouds_cumulus'])]
item['weather'] = [
dict(precipitation=m['weather_precipitation']),
dict(descriptor=m['weather_descriptor']),
dict(intensity=m['weather_intensity']),
dict(proximity=m['weather_proximity']),
dict(obscuration=m['weather_obscuration']),
dict(other=m['weather_other'])]
msmts.append(item)
status, _ = self.http_client.post(
'http://api.openweathermap.org/data/3.0/measurements',
params={'appid': self.API_key},
data=msmts,
headers={'Content-Type': 'application/json'})
| StarcoderdataPython |
176645 | <filename>yggdrasil/tests/scripts/python_model.py
from yggdrasil.tools import sleep
sleep(1)
print('Python model')
| StarcoderdataPython |
3364152 | from setuptools import setup, find_packages
# Install with 'pip install -e .'
setup(
name="xomx",
version="0.1.0",
author="<NAME>",
description="xomx: a python library for computational omics",
url="https://github.com/perrin-isir/xomx",
packages=find_packages(),
install_requires=[
"argparse>=1.1",
"numpy>=1.21.1",
"matplotlib>=3.1.3",
"joblib>=1.0.1",
"pandas>=1.3.0",
"scipy>=1.4.1",
"torch>=1.7.1",
"scikit-learn>=0.24.2",
"requests>=2.23.0",
"leidenalg>=0.8.8",
"holoviews>=1.14.8",
"bokeh>=2.3.3",
],
license="LICENSE",
)
| StarcoderdataPython |
84859 | <reponame>Skyross/eventsourcing
from unittest import TestCase
from eventsourcing.utils import retry
class TestRetryDecorator(TestCase):
def test_bare(self):
@retry
def f():
pass
f()
def test_no_args(self):
@retry()
def f():
pass
f()
def test_exception_single_value(self):
@retry(ValueError)
def f():
pass
f()
def test_exception_sequence(self):
@retry((ValueError, TypeError))
def f():
pass
f()
def test_exception_type_error(self):
with self.assertRaises(TypeError):
@retry(1)
def _():
pass
with self.assertRaises(TypeError):
@retry((ValueError, 1))
def _():
pass
def test_exception_raised_no_retry(self):
self.call_count = 0
@retry(ValueError)
def f():
self.call_count += 1
raise ValueError
with self.assertRaises(ValueError):
f()
self.assertEqual(self.call_count, 1)
def test_max_attempts(self):
self.call_count = 0
@retry(ValueError, max_attempts=2)
def f():
self.call_count += 1
raise ValueError
with self.assertRaises(ValueError):
f()
self.assertEqual(self.call_count, 2)
def test_max_attempts_not_int(self):
with self.assertRaises(TypeError):
@retry(ValueError, max_attempts="a")
def f():
pass
def test_wait(self):
self.call_count = 0
@retry(ValueError, max_attempts=2, wait=0.001)
def f():
self.call_count += 1
raise ValueError
with self.assertRaises(ValueError):
f()
self.assertEqual(self.call_count, 2)
def test_wait_not_float(self):
with self.assertRaises(TypeError):
@retry(ValueError, max_attempts=1, wait="a")
def f():
pass
def test_stall(self):
self.call_count = 0
@retry(ValueError, max_attempts=2, stall=0.001)
def f():
self.call_count += 1
raise ValueError
with self.assertRaises(ValueError):
f()
self.assertEqual(self.call_count, 2)
def test_stall_not_float(self):
with self.assertRaises(TypeError):
@retry(ValueError, max_attempts=1, stall="a")
def f():
pass
| StarcoderdataPython |
120892 | <filename>tests/fl_simulation/server/test_malicious_activity_prevention.py
from fl_simulation.client.update import ModelUpdate
from fl_simulation.server.aggregation import DistanceBasedModelAssigner
import pytest
import torch
from fl_simulation.server.update import AggregatedUpdate
from fl_simulation.utils.types import ModelDiff
@pytest.fixture
def model_assigner():
return DistanceBasedModelAssigner()
@pytest.fixture
def representatives():
return {
1: AggregatedUpdate(ModelDiff([torch.tensor([0.0]), torch.tensor([1.0])])),
2: AggregatedUpdate(ModelDiff([torch.tensor([1.0]), torch.tensor([0.0])])),
3: AggregatedUpdate(ModelDiff([torch.tensor([-1.0]), torch.tensor([0.0])]))
}
@pytest.fixture
def assignable_updates():
"""Return updates assignable to the representatives.
Returns:
List[ModelUpdate]: updates, which should be assigned to representatives 1, 2, and 3 respectively.
"""
return [
ModelUpdate(ModelDiff([torch.tensor([0.0]), torch.tensor([1.0001])]), 1),
ModelUpdate(ModelDiff([torch.tensor([1.0001]), torch.tensor([0.0])]), 1),
ModelUpdate(ModelDiff([torch.tensor([-1.0001]), torch.tensor([0.0])]), 1)
]
@pytest.fixture
def cluster_updates():
return [
ModelUpdate(ModelDiff([torch.tensor([0.0]), torch.tensor([1.0])]), 1),
ModelUpdate(ModelDiff([torch.tensor([0.0]), torch.tensor([1.001])]), 1),
ModelUpdate(ModelDiff([torch.tensor([0.0]), torch.tensor([1.002])]), 1),
ModelUpdate(ModelDiff([torch.tensor([1.0]), torch.tensor([0.0])]), 1),
ModelUpdate(ModelDiff([torch.tensor([1.001]), torch.tensor([0.0])]), 1),
ModelUpdate(ModelDiff([torch.tensor([1.002]), torch.tensor([0.0])]), 1),
]
def test_updates_assigned_to_models(model_assigner, representatives, assignable_updates):
assignments = model_assigner(assignable_updates, representatives)
# for each update in `assignable_updates` indicates to which representative it should be assigned
expected_assignments = [1, 2, 3]
for u, expected in zip(assignable_updates, expected_assignments):
assert expected in assignments, f"update {u} has not been assigned to expected representative {expected}"
assert any(
all(t1.eq(t2) for t1, t2 in zip(u.values, upd.values)) for upd in assignments[expected]
), f"update {u} has not been assigned to expected representative {expected}"
def test_updates_clustered(model_assigner, cluster_updates):
def clusters_equal(c1, c2):
if len(c1) != len(c2):
return False
all_found = True
for elem1 in c1:
all_found = all_found and any(all(t1.eq(t2) for t1, t2 in zip(elem1.values, elem2.values)) for elem2 in c2)
for elem1 in c2:
all_found = all_found and any(all(t1.eq(t2) for t1, t2 in zip(elem1.values, elem2.values)) for elem2 in c1)
return all_found
assignments = model_assigner(cluster_updates, {})
expected_cluster1 = cluster_updates[:3]
expected_cluster2 = cluster_updates[3:]
assert len(assignments) == 2, f"expected 2 clusters, got {len(assignments)}"
_, cl1 = assignments.popitem()
_, cl2 = assignments.popitem()
assert clusters_equal(expected_cluster1, cl1) or clusters_equal(
expected_cluster1, cl2
), f"clusters where not formed correctly: expected {expected_cluster1} to be preset, got {cl1}, {cl2}"
assert clusters_equal(expected_cluster2, cl1) or clusters_equal(
expected_cluster2, cl2
), f"clusters where not formed correctly: expected {expected_cluster2} to be preset, got {cl1}, {cl2}"
| StarcoderdataPython |
188047 | <filename>cfgov/regulations3k/tests/test_hooks.py
from __future__ import unicode_literals
from django.test import TestCase
from wagtail.tests.utils import WagtailTestUtils
class TestRegs3kHooks(TestCase, WagtailTestUtils):
def setUp(self):
self.login()
def test_part_model_admin(self):
response = self.client.get('/admin/regulations3k/part/')
self.assertEqual(response.status_code, 200)
def test_effectiveversion_model_admin(self):
response = self.client.get('/admin/regulations3k/effectiveversion/')
self.assertEqual(response.status_code, 200)
def test_subpart_model_admin(self):
response = self.client.get('/admin/regulations3k/subpart/')
self.assertEqual(response.status_code, 200)
def test_section_model_admin(self):
response = self.client.get('/admin/regulations3k/section/')
self.assertEqual(response.status_code, 200)
| StarcoderdataPython |
1780236 | <reponame>rescapes/rescape-python-helpers
from django.contrib.gis.geos import GEOSGeometry, GeometryCollection
from snapshottest import TestCase
from .geometry_helpers import ewkt_from_feature_collection
from rescape_python_helpers import ewkt_from_feature, geometry_from_feature, geometrycollection_from_feature_collection
class GeometryHelepersTest(TestCase):
client = None
def test_geometry_from_feature(self):
result = geometry_from_feature({
"type": "Feature",
"geometry": {
"type": "Polygon",
"coordinates": [
[[49.5294835476, 2.51357303225], [51.4750237087, 2.51357303225], [51.4750237087, 6.15665815596],
[49.5294835476, 6.15665815596], [49.5294835476, 2.51357303225]]]
}
})
assert isinstance(result, GEOSGeometry)
def test_ewkt_from_feature(self):
result = ewkt_from_feature({
"type": "Feature",
"geometry": {
"type": "Polygon",
"coordinates": [
[[49.5294835476, 2.51357303225], [51.4750237087, 2.51357303225], [51.4750237087, 6.15665815596],
[49.5294835476, 6.15665815596], [49.5294835476, 2.51357303225]]]
}
})
assert isinstance(result, str)
def test_geometry_collection_from_feature_collection(self):
result = geometrycollection_from_feature_collection({
'type': 'FeatureCollection',
'generator': 'overpass-turbo',
'copyright': 'The data included in this document is from www.openstreetmap.org. The data is made available under ODbL.',
'timestamp': '2017-04-06T22:46:03Z',
'features': [{
"type": "Feature",
"geometry": {
"type": "Polygon",
"coordinates": [
[[49.5294835476, 2.51357303225], [51.4750237087, 2.51357303225], [51.4750237087, 6.15665815596],
[49.5294835476, 6.15665815596], [49.5294835476, 2.51357303225]]]
}
},
{
"type": "Feature",
"geometry": {
"type": "Polygon",
"coordinates": [
[[49.5294835476, 2.51357303225], [51.4750237087, 2.51357303225],
[51.4750237087, 6.15665815596],
[49.5294835476, 6.15665815596], [49.5294835476, 2.51357303225]]]
}
}
]
})
assert isinstance(result, GeometryCollection)
def test_ewkt_from_feature_collection(self):
result = ewkt_from_feature_collection({
'type': 'FeatureCollection',
'generator': 'overpass-turbo',
'copyright': 'The data included in this document is from www.openstreetmap.org. The data is made available under ODbL.',
'timestamp': '2017-04-06T22:46:03Z',
'features': [{
"type": "Feature",
"geometry": {
"type": "Polygon",
"coordinates": [
[[49.5294835476, 2.51357303225], [51.4750237087, 2.51357303225], [51.4750237087, 6.15665815596],
[49.5294835476, 6.15665815596], [49.5294835476, 2.51357303225]]]
}
},
{
"type": "Feature",
"geometry": {
"type": "Polygon",
"coordinates": [
[[49.5294835476, 2.51357303225], [51.4750237087, 2.51357303225],
[51.4750237087, 6.15665815596],
[49.5294835476, 6.15665815596], [49.5294835476, 2.51357303225]]]
}
}
]
})
assert isinstance(result, str) | StarcoderdataPython |
61837 | <reponame>Southampton-Maritime-Robotics/autonomous-sailing-robot<gh_stars>1-10
"""
Set of test functions
so BeagleBone specific GPIO
functions can be tested
For obvious reasons these values
are ONLY for testing!
TODO:
Expand to use test values instead of set values
"""
def begin():
print("WARNING, not using actual GPIO")
def write(address, a, b):
#print("WARNING, not actual GPIO")
pass
def read(address, start, lenght):
return [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
| StarcoderdataPython |
3371324 | """
Given an array and a number k
Find the max elements of each of its sub-arrays of length k.
Keep indexes of good candidates in deque d.
The indexes in d are from the current window, they're increasing,
and their corresponding nums are decreasing.
Then the first deque element is the index of the largest window value.
For each index i:
1. Pop (from the end) indexes of smaller elements (they'll be useless).
2. Append the current index.
3. Pop (from the front) the index i - k, if it's still in the deque
(it falls out of the window).
4. If our window has reached size k,
append the current window maximum to the output.
"""
import collections
def max_sliding_window(arr, k):
qi = collections.deque() # queue storing indexes of elements
result = []
for i, n in enumerate(arr):
while qi and arr[qi[-1]] < n:
qi.pop()
qi.append(i)
if qi[0] == i - k:
qi.popleft()
if i >= k - 1:
result.append(arr[qi[0]])
return result
| StarcoderdataPython |
3292300 | <reponame>CrazyDi/Python1
import asyncio
async def handle_echo(reader, writer):
data = await reader.read(1024)
message = data.decode()
addr = writer.get_extra_info("peername")
print("received %r from %r" % (message, addr))
# writer.close()
if __name__ == "__main__":
loop = asyncio.new_event_loop()
coro = asyncio.start_server(handle_echo, "127.0.0.1", 10001, loop=loop)
server = loop.run_until_complete(coro)
try:
loop.run_forever()
except KeyboardInterrupt:
pass
server.close()
loop.run_until_complete(server.wait_closed())
loop.close()
| StarcoderdataPython |
3230200 | <reponame>qychen13/ClusterAlignReID<filename>utils/construct_engine.py
import time
import os
import torch
import torch.nn as nn
from torch.utils.tensorboard import SummaryWriter
from .engine import Engine
from .evaluation import test
from .center import calculate_id_features, update_id_features
def construct_engine(engine_args, log_freq, log_dir, checkpoint_dir, checkpoint_freq, lr_scheduler, lr_scheduler_iter, metric_dict, query_iterator, gallary_iterator, id_feature_params, id_iterator, test_params):
if lr_scheduler is not None and lr_scheduler_iter is not None:
raise RuntimeError(
'Either lr_scheduler or lr_scheduler_iter can be used')
engine = Engine(**engine_args)
# tensorboard log setting
writer = SummaryWriter(log_dir)
# id features
global id_features
id_features = None
########## helper functions ##########
# TODO: serialization of id feature training
def save_model(state, filename):
torch.save({
'state_dict': state['network'].state_dict(),
'optimizer': state['optimizer'].state_dict(),
'metrics': {key: metric_dict[key].value() for key in metric_dict},
'id_features': id_features
}, filename)
def reset_metrics():
for key in metric_dict:
metric_dict[key].reset()
def update_metrics(state):
paras = dict(logits=state['output'],
target=state['sample'][1], loss=state['loss'])
with torch.no_grad():
for key in metric_dict:
metric_dict[key](**paras)
def wrap_data(state):
if state['gpu_ids'] is not None:
if len(state['gpu_ids']) == 1:
state['sample'][0] = state['sample'][0].cuda(
state['gpu_ids'][0], non_blocking=True)
for key in state['sample'][1]:
if isinstance(state['sample'][1][key], list):
continue
state['sample'][1][key] = state['sample'][1][key].cuda(
state['gpu_ids'][0], non_blocking=True)
########## callback functions ##########
def on_start(state):
reset_metrics()
if state['train']:
if state['gpu_ids'] is None:
print('Training/Validating without gpus ...')
else:
if not torch.cuda.is_available():
raise RuntimeError('Cuda is not available')
print(
'Training/Validating on gpu: {}'.format(state['gpu_ids']))
if state['iteration'] == 0:
filename = os.path.join(checkpoint_dir, 'init_model.pth.tar')
save_model(state, filename)
else:
print('-------------Start Validation at {} For Epoch {}-------------'.format(
time.strftime('%c'), state['epoch']))
def on_start_epoch(state):
print('-------------Start Training at {} For Epoch {}------------'.format(
time.strftime('%c'), state['epoch']))
if lr_scheduler is not None:
scheduler = lr_scheduler
else:
scheduler = lr_scheduler_iter
lr_scheduler_iter.step(state['iteration'])
for i, lr in enumerate(scheduler.get_lr()):
writer.add_scalar(
'global/learning_rate_{}'.format(i), lr, state['epoch'])
reset_metrics()
if id_feature_params['warm_up_epochs'] is not None and state['epoch'] > id_feature_params['warm_up_epochs']:
global id_features
if id_feature_params['update_freq'] == 'epoch' or id_features is None:
id_features = calculate_id_features(
state['network'], id_iterator, state['gpu_ids'], method=id_feature_params['method'])
def on_end_sample(state):
wrap_data(state)
global id_features
if state['train'] and id_features is not None: # add id feature as label
state['sample'][1]['id_features'] = id_features[[
state['sample'][1]['pid']]]
state['sample'][1]['id_feature_dict'] = id_features
if lr_scheduler_iter is not None:
lr_scheduler_iter.step(state['iteration'])
for i, lr in enumerate(lr_scheduler_iter.get_lr()):
writer.add_scalar(
'training_iter/learning_rate_{}'.format(i), lr, state['iteration'])
def on_end_forward(state):
update_metrics(state)
global id_features
if state['train'] and id_features is not None and id_feature_params['update_freq'] == 'iteration':
id_features = update_id_features(state['output'], state['sample'][1])
def on_end_update(state):
if state['iteration'] % log_freq == 0:
for key in metric_dict:
writer.add_scalar('training_iter/{}'.format(key),
metric_dict[key].value(), state['iteration'])
if lr_scheduler_iter is not None:
lr_scheduler_iter.step(state['iteration']+1)
def on_end_epoch(state):
for key in metric_dict:
writer.add_scalar('training/{}'.format(key),
metric_dict[key].value(), state['epoch'])
if (state['epoch'] + 1) % checkpoint_freq == 0:
file_name = 'e{}t{}.pth.tar'.format(
state['epoch'], state['iteration'])
file_name = os.path.join(checkpoint_dir, file_name)
save_model(state, file_name)
# start testing
t = time.strftime('%c')
print(
'*************************Start testing at {}**********************'.format(t))
result = test(state['network'], query_iterator,
gallary_iterator, state['gpu_ids'],**test_params)
for key in result:
writer.add_scalar('test/{}'.format(key),
result[key], state['epoch'])
# Note: adjust learning after calling optimizer.step() as required by update after pytorch 1.1.0
if lr_scheduler is not None:
lr_scheduler.step(state['epoch']+1)
def on_end(state):
t = time.strftime('%c')
if state['train']:
save_model(state, os.path.join(
checkpoint_dir, 'final_model.pth.tar'))
print(
'*********************Training done at {}***********************'.format(t))
writer.close()
else:
for key in metric_dict:
writer.add_scalar('validation/{}'.format(key),
metric_dict[key].value(), state['epoch'])
engine.hooks['on_start'] = on_start
engine.hooks['on_start_epoch'] = on_start_epoch
engine.hooks['on_end_sample'] = on_end_sample
engine.hooks['on_end_forward'] = on_end_forward
engine.hooks['on_end_update'] = on_end_update
engine.hooks['on_end_epoch'] = on_end_epoch
engine.hooks['on_end'] = on_end
return engine
| StarcoderdataPython |
187872 | <reponame>pddg/qkouserver
import os
from typing import List, Union
from ast import literal_eval
# dir name or file path
BASE_DIR = os.path.abspath(os.path.dirname(__file__))
PRJ_DIR = os.path.dirname(BASE_DIR)
SQLITE_DIR_PATH = os.getenv("SQLITE_PATH", BASE_DIR)
SQLITE_PATH = "sqlite:///{path}".format(path=os.path.join(SQLITE_DIR_PATH, "sqlite.db"))
# MySQL
USE_MYSQL = literal_eval(os.getenv("USE_MYSQL", "False").capitalize())
MYSQL_USERNAME = os.getenv("MYSQL_USERNAME", None)
MYSQL_PASSWORD = os.getenv("MYSQL_PASSWORD", None)
MYSQL_HOST = os.getenv("MYSQL_HOST", None)
MYSQL_DATABASE_NAME = os.getenv("MYSQL_DATABASE_NAME", None)
if MYSQL_USERNAME and MYSQL_PASSWORD and MYSQL_HOST and MYSQL_DATABASE_NAME:
MYSQL_PATH = 'mysql+pymysql://' + MYSQL_USERNAME + ":" + MYSQL_PASSWORD \
+ "@" + MYSQL_HOST + "/" + MYSQL_DATABASE_NAME + "?charset=utf8"
else:
MYSQL_PATH = None
# Twitter authentication
CONSUMER_KEY = os.getenv("CONSUMER_KEY", None)
CONSUMER_SECRET = os.getenv("CONSUMER_SECRET", None)
ACCESS_TOKEN = os.getenv("ACCESS_TOKEN", None)
ACCESS_SECRET = os.getenv("ACCESS_SECRET", None)
# Shibboleth authentication
SHIBBOLETH_USERNAME = os.getenv("SHIBBOLETH_USERNAME", None)
SHIBBOLETH_PASSWORD = os.getenv("SHIBBOLETH_PASSWORD", None)
# URLs
# SYLLABUS_URL = "http://www.syllabus.kit.ac.jp/"
LEC_INFO_URL = "https://portal.student.kit.ac.jp/ead/?c=lecture_information"
LEC_CANCEL_URL = "https://portal.student.kit.ac.jp/ead/?c=lecture_cancellation"
NEWS_URL = "https://portal.student.kit.ac.jp/"
# Other settings
# EXCEPTION_TITLES = ["English", "Reading", "Writing", "Basic", "Speaking", "Learning",
# "Advanced", "Intermediate", "Acquisition", "Communication"]
TESTING = literal_eval(os.getenv("TESTING", "False").capitalize())
INITIALIZE = literal_eval(os.getenv("INITIALIZE", "True").capitalize())
# EXPIRE_ON = int(os.getenv("EXPIRE_ON", "60"))
SCRAPING_INTERVAL = int(os.getenv("SCRAPING_INTERVAL", "300"))
LOGIN_FAILURE_TWEET_INTERVAL = int(os.getenv("LOGIN_FAILURE_TWEET_INTERVAL", "1800"))
DAILY_TWEET_HOUR = int(os.getenv("DAILY_TWEET_HOUR", "7"))
LOG_FORMAT = os.getenv("LOG_FORMAT", "%(asctime)s - %(processName)s - %(levelname)s - %(message)s")
DEFAULT_LOG_LOCATION = os.getenv("LOG_LOCATION", "log")
REPLY_ACTION_REGEX = ".*(詳し|くわし).*"
# UNDEFINED = "-"
# UNKNOWN = "不明"
# INTENSIVE = "集中"
LEC_INFO_ID_TEMPLATE = " #lec{id}"
LEC_INFO_ACTION_REGEX = "(?<=lec)[0-9]+"
LEC_INFO_TEMPLATE = "講義名:{subject}\n" \
"講師名:{teacher}\n" \
"時限:{week} {period}限\n" \
"概要:{abstract}\n" \
"詳細:{detail}"
LEC_CANCEL_ID_TEMPLATE = " #cancel{id}"
LEC_CANCEL_ACTION_REGEX = "(?<=cancel)[0-9]+"
LEC_CANCEL_TEMPLATE = "講義名:{subject}\n" \
"講師名:{teacher}\n" \
"休講日:{str_day}\n" \
"時限:{week} {period}限\n" \
"概要:{abstract}"
NEWS_ID_TEMPLATE = " #news{id}"
NEWS_ACTION_REGEX = "(?<=news)[0-9]+"
NEWS_TEMPLATE_WITH_LINK = "掲載日:{str_first}\n発信課: {division}\n概要: {category}\n詳細:{detail}\nリンク:{link}"
NEWS_TEMPLATE_WITHOUT_LINK = "掲載日:{str_first}\n発信課: {division}\n概要: {category}\n詳細:{detail}"
THERE_IS_NO_INFORMATION_MSG = "お問い合わせされた情報は現在存在しません."
DATABASE_ERROR_MSG = "DBエラーです.管理者までご連絡ください."
LOGIN_FAILURE_START_MSG = "[障害検知]\n障害検知時刻:{created_at}\n現在,学務課ホームページへのログインができない," \
"または情報が正常に取得できないエラーが発生しています."
LOGIN_FAILURE_CONTINUE_MSG = "[障害継続中]\n障害検知時刻: {created_at}\n最終確認時刻: {last_confirmed}\n" \
"学務課ホームページへログインできない,または情報が正常に取得できないエラーが継続中です."
LOGIN_FAILURE_END_MSG = "[障害復旧]\n障害検知時刻: {created_at}\n復旧確認時刻: {fixed_at}\n" \
"学務課ホームページへのログイン及び情報の取得に成功しました."
TODAY_CANCEL_TEMPLATE = "{date} 本日の休講\n{titles}"
TODAY_CANCEL_NONE_TEMPLATE = "{date} 本日休講はありません."
TODAY_CANCEL_TEMPLATE_CONTINUE = "{date} 本日の休講 続き\n{titles}"
TODAY_IS_HOLIDAY_TEMPLATE = "{date} 今日は{holiday_name}です.{msg}"
HOLIDAY_MSG_ARRAY = ["レポートや課題は終わりましたか?有意義な祝日をお過ごしください.",
"進捗どうですか?",
"今日くらいはこのbotもお休みをいただいても良いですか?まぁダメですよね.",
"ところでこのbotはPythonというプログラミング言語で書かれています.せっかくの休日ですし新しいことを始めてみては?"]
def create_dict(keys: List[str], value_list: List[List[Union[str, int]]]) -> List[dict]:
"""
Create dict with given keys.
Args:
keys: list of dict key
value_list: list of list of dict value
Returns:
List of dict
"""
return [{k: v for k, v in zip(keys, values)} for values in value_list]
# Test data
INFO_MODEL_KEYS = ["title", "teacher", "week", "period", "abstract", "detail", "first", "updated_date"]
INFO_MODEL_DATAS = [["休講情報bot入門", "pudding", "月", "1", "授業連絡", "上条ちゃん補習でーす", "2017/2/21", "2017/2/21"],
["休講情報bot実践", "pudding", "月", "1~3", "授業連絡",
"上条ちゃん補習でーす.そしてこれは文字数のテストなのでーす.めんどいでーす誰かかわってくださーい."
"お願いしまーす眠いでーーす.ほんまめんどいんやが???????え????????",
"2017/2/21", "2017/2/21"],
["休講情報API入門", "pudding", "火", "2", "授業連絡", "上条ちゃん補習でーす", "2017/2/18", "2017/2/18"],
["休講情報Client実践", "pudding", "集中", "-", "授業連絡", "上条ちゃん補習でーす", "2017/2/23", "2017/2/23"]]
INFO_MODEL_DATA_DICTS = create_dict(INFO_MODEL_KEYS, INFO_MODEL_DATAS)
CANCEL_MODEL_KEYS = ["title", "teacher", "week", "period", "abstract", "day", "first"]
CANCEL_MODEL_DATAS = [["休講情報bot入門", "pudding", "月", "1", "-", "2017/3/3", "2017/2/21"],
["休講情報bot実践", "pudding", "月", "1~3", "-", "2017/2/25", "2017/2/21"],
["休講情報API入門", "pudding", "火", "2", "サボりたくなった", "2017/3/1", "2017/2/18"],
["休講情報Client実践", "pudding", "集中", "-", "疲れた", "2017/4/1", "2017/2/23"]]
CANCEL_MODEL_DATA_DICTS = create_dict(CANCEL_MODEL_KEYS, CANCEL_MODEL_DATAS)
NEWS_MODEL_KEYS = ["first", "detail", "link", "division", "category"]
NEWS_MODEL_DATAS = [["2017.2.10", "2/21は情報工学課程の卒研発表の日です.", "http://hoge.hoge.com/fuga",
"〈学務課〉", "《-》"],
["2017.2.12", "今日は文字数テストをしたいと思います.これは140文字を超えるように書いています.今はとても眠いです."
"テストを書く作業はとてもつらいので時給が欲しいという気持ちが高まっています."
"僕の脳内の仕様を読み取って良い感じにして欲しい…お願い…ソフトウェア工学の力で解決して欲しい…", "",
"〈学務課〉", "《-》"]]
NEWS_MODEL_DATA_DICTS = create_dict(NEWS_MODEL_KEYS, NEWS_MODEL_DATAS)
| StarcoderdataPython |
65849 | # Not picklable!
import os # noqa
| StarcoderdataPython |
4834431 | # -*- coding: utf-8 -*-
"""
Created on Sun Oct 18 19:55:42 2020
@author: Dell
"""
import ifaddr
from lifxlan import LifxLAN, group, device, light
foco2=light.Light("D0:73:D5:5E:25:BD","192.168.0.3")
foco1=light.Light("D0:73:D5:5C:A7:DB","192.168.0.9")
foco = LifxLAN()
#foco.set_power_all_lights("on", rapid = False)
def brillo():
valor = int(input("Ingresa el valor: "))
cual = int(input("que foco quieres modificar: "))
if cual==1:
foco1.set_brightness(valor)
brillo()
elif cual==2:
foco2.set_brightness(valor)
brillo()
brillo()
| StarcoderdataPython |
1658006 | import pytest
from hypothesis import given
from strategies import atlas_results_metas
from fetchmesh.atlas import AtlasAnchor, Country
from fetchmesh.filters import (
AnchorRegionFilter,
HalfPairFilter,
PairRegionSampler,
PairSampler,
SelfPairFilter,
)
def test_anchor_region_filter():
anchors = [
AtlasAnchor(1, 1, "1", Country("FR"), None, None),
AtlasAnchor(2, 2, "2", Country("FR"), None, None),
AtlasAnchor(3, 3, "3", Country("FR"), None, None),
AtlasAnchor(4, 4, "4", Country("NL"), None, None),
AtlasAnchor(5, 5, "5", Country("NL"), None, None),
AtlasAnchor(6, 6, "6", Country("NL"), None, None),
AtlasAnchor(7, 7, "7", Country("US"), None, None),
]
f = AnchorRegionFilter("Europe")
assert f(anchors) == anchors[:-1]
def test_half_pair_filter():
# The filter must produce reproducible results,
# no matter the initial ordering of the pairs.
f = HalfPairFilter()
pairs = [("a", "b"), ("b", "a")]
assert f(pairs) == f(reversed(pairs))
def test_self_pair_filter():
pairs = [("a", "a"), ("a", "b")]
f = SelfPairFilter()
assert f(pairs) == [("a", "b")]
f = SelfPairFilter(reverse=True)
assert f(pairs) == [("a", "a")]
def test_pair_sampler():
pairs = [("a", "b") for _ in range(10)]
f = PairSampler(0.5)
assert f(pairs) == pairs[:5]
f = PairSampler(1.0)
assert f(pairs) == pairs
f = PairSampler(1)
assert f(pairs) == pairs[:1]
f = PairSampler(5)
assert f(pairs) == pairs[:5]
with pytest.raises(ValueError):
f = PairSampler(2.0)
f(pairs)
def test_pair_region_sampler():
anchors = [
AtlasAnchor(1, 1, "1", Country("FR"), None, None),
AtlasAnchor(2, 2, "2", Country("FR"), None, None),
AtlasAnchor(3, 3, "3", Country("FR"), None, None),
AtlasAnchor(4, 4, "4", Country("US"), None, None),
AtlasAnchor(5, 5, "5", Country("NL"), None, None),
AtlasAnchor(6, 6, "6", Country("NL"), None, None),
AtlasAnchor(7, 7, "7", Country("NL"), None, None),
]
pairs = [
(anchors[0], anchors[1]),
(anchors[0], anchors[1]),
(anchors[0], anchors[1]),
(anchors[0], anchors[6]),
(anchors[3], anchors[3]),
]
st = PairRegionSampler(1000, ["Europe", "Americas"])
# print(st.sample(pairs, 100))
# def test_anchor_region_sampler():
# # TODO: Hypothesis for anchors generation
# anchors = [
# AtlasAnchor(1, 1, "1", Country("FR"), None, None),
# AtlasAnchor(2, 2, "2", Country("FR"), None, None),
# AtlasAnchor(3, 3, "3", Country("FR"), None, None),
# AtlasAnchor(4, 4, "4", Country("US"), None, None),
# AtlasAnchor(5, 5, "5", Country("NL"), None, None),
# AtlasAnchor(6, 6, "6", Country("NL"), None, None),
# AtlasAnchor(7, 7, "7", Country("NL"), None, None),
# ]
#
# st = AnchorRegionSampler(1000, ["Europe", "Northern America"])
# assert set(st(anchors)) == set(anchors)
| StarcoderdataPython |
1696668 | <reponame>e5120/EDAs<gh_stars>1-10
import os
import csv
import json
import logging
import datetime
from collections import OrderedDict
from types import MappingProxyType
import numpy as np
logging.basicConfig(level=logging.INFO,
format="[%(asctime)s %(levelname)s] %(message)s")
class Logger(object):
"""
A class to log a optimization process.
"""
def __init__(self, dir_path, args, logging_step=10, display_step=10):
"""
Parameters
----------
dir_path : str
Directory path to output logs.
logging_step : int, default 10
Interval of outputting logs to directory.
display_step : int, default 10
Interval of displaying logs to stdout.
"""
if dir_path is not None:
dir_path = "{}_{}".format(dir_path,
datetime.datetime.now().strftime("%Y_%m_%d_%H_%M_%S"))
os.makedirs(dir_path, exist_ok=False)
self.dir_path = dir_path
self.trial_path = None
self.logging_step = logging_step
self.display_step = display_step
self.args = args
self.logger = logging.getLogger()
self.log = OrderedDict()
self.display = OrderedDict()
# save arguments
if self.dir_path and args:
args.log_dir = self.dir_path
with open("{}/settings.json".format(self.dir_path), "w", encoding="utf-8") as f:
json.dump(args.__dict__, f, cls=JsonEncoder, ensure_ascii=True, indent=4)
def set_log_columns(self, columns):
"""
Set a column name of each log to be output in log file.
Parameters
----------
columns : array-like
List of column names.
"""
self.log = self._set_columns(columns)
if self.trial_path:
self.csv_file.writerow(columns)
def set_display_columns(self, columns):
"""
Set a column name of each log to be displayed in stdout.
Parameters
----------
columns : array-like
List of column names.
"""
self.display = self._set_columns(columns)
def _set_columns(self, columns):
"""
Set columns.
Parameters
----------
columns : array-like
List of column names.
Returns
-------
collections.OrderedDict
The key-value data, where each of key is a column name and each of value is a observed value.
"""
dic = OrderedDict({column: None for column in columns})
return dic
def add(self, key, val, step, force=False):
"""
Add a log.
Parameters
----------
key : str
Column name.
val : any
Observed value such as scalar, vector, and matrix.
step : int
Iteration.
force : bool, default False
If True, force to add logs.
"""
if key in self.log and (step % self.logging_step == 0 or force):
self.log[key] = val
if key in self.display and (step % self.display_step == 0 or force):
self.display[key] = val
def output(self, step, force=False):
"""
Output logs.
Parameters
----------
step : int
Iteration.
force : bool, default False
If True, force to output logs.
"""
if (step % self.logging_step == 0 or force) and self.trial_path:
for key, val in self.log.items():
if isinstance(val, (list, tuple, np.ndarray)):
val = np.array(val)
np_dir = "{}/{}".format(self.trial_path, key)
os.makedirs(np_dir, exist_ok=True)
np_file = "{}/{}_step".format(np_dir, step)
np.save(np_file, val)
self.log[key] = np_file
self.csv_file.writerow(self.log.values())
if step % self.display_step == 0 or force:
msg = ", ".join(["{}: {}".format(key, val) for key, val in self.display.items()
if isinstance(val, (int, float, str, bool, *np.typeDict.values()))])
self.logger.info(msg)
def result(self, info, filename="results.csv"):
"""
Output results.
Parameters
----------
info : dict
Information.
filename : str, default "result.csv"
Filename to which the information will be output.
"""
if self.trial_path:
with open("{}/{}".format(self.trial_path, filename), "w") as f:
result_file = csv.writer(f)
result_file.writerow(info.keys())
result_file.writerow(info.values())
def open(self, trial, filename="logs.csv"):
"""
Start logging of each independent trial.
Parameters
----------
trial : int
The number of trials.
filename : str, default "logs.csv"
Filename which is output logs.
"""
if self.dir_path:
self.trial_path = "{}/{}".format(self.dir_path, trial)
os.makedirs(self.trial_path, exist_ok=False)
self.f = open("{}/{}".format(self.trial_path, filename), "w")
self.csv_file = csv.writer(self.f)
def close(self):
"""
Finish logging of each independent trial.
"""
if self.trial_path:
self.trial_path = None
self.f.close()
def info(self, msg, step=0):
if step % self.display_step == 0:
self.logger.info(msg)
class JsonEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, np.integer):
return int(obj)
elif isinstance(obj, np.floating):
return float(obj)
elif isinstance(obj, np.ndarray):
return "shape of numpy.ndarray: {}".format(obj.shape)
elif isinstance(obj, MappingProxyType):
return obj["__module__"]
elif isinstance(obj, object):
return obj.__dict__
else:
return super(JsonEncoder, self).default(obj)
| StarcoderdataPython |
3381362 | <filename>a2c_ppo_acktr/algo/ppo.py
import torch
import torch.nn as nn
import torch.optim as optim
from a2c_ppo_acktr.algo.sog import BlockCoordinateSearch, OneHotSearch
class PPO:
def __init__(self,
actor_critic,
args,
lr=None,
eps=None,
max_grad_norm=None,
use_clipped_value_loss=True):
self.actor_critic = actor_critic
self.clip_param = args.clip_param
self.ppo_epoch = args.ppo_epoch
self.num_mini_batch = args.num_mini_batch
self.value_loss_coef = args.value_loss_coef
self.entropy_coef = args.entropy_coef
# sog gail
self.sog_gail = args.sog_gail
self.sog_gail_coef = args.sog_gail_coef if self.sog_gail else None
if args.latent_optimizer == 'bcs':
SOG = BlockCoordinateSearch
elif args.latent_optimizer == 'ohs':
SOG = OneHotSearch
else:
raise NotImplementedError
self.sog = SOG(actor_critic, args) if args.sog_gail else None
self.max_grad_norm = max_grad_norm
self.use_clipped_value_loss = use_clipped_value_loss
self.optimizer = optim.Adam(actor_critic.parameters(), lr=lr, eps=eps)
self.device = args.device
def update(self, rollouts, sog_train_loader, obsfilt):
advantages = rollouts.returns[:-1] - rollouts.value_preds[:-1]
advantages = (advantages - advantages.mean()) / (
advantages.std() + 1e-5)
value_loss_epoch = 0
action_loss_epoch = 0
dist_entropy_epoch = 0
sog_loss_epoch = 0
for e in range(self.ppo_epoch):
data_generator = rollouts.feed_forward_generator(advantages, self.num_mini_batch)
for sample in data_generator:
obs_batch, latent_codes_batch, actions_batch, \
value_preds_batch, return_batch, masks_batch, old_action_log_probs_batch, \
adv_targ = sample
# Reshape to do in a single forward pass for all steps
values, action_log_probs, dist_entropy = self.actor_critic.evaluate_actions(
obs_batch, latent_codes_batch, actions_batch)
ratio = torch.exp(action_log_probs -
old_action_log_probs_batch)
surr1 = ratio * adv_targ
surr2 = torch.clamp(ratio, 1.0 - self.clip_param,
1.0 + self.clip_param) * adv_targ
action_loss = -torch.min(surr1, surr2).mean()
if self.use_clipped_value_loss:
value_pred_clipped = value_preds_batch + \
(values - value_preds_batch).clamp(-self.clip_param, self.clip_param)
value_losses = (values - return_batch).pow(2)
value_losses_clipped = (
value_pred_clipped - return_batch).pow(2)
value_loss = 0.5 * torch.max(value_losses,
value_losses_clipped).mean()
else:
value_loss = 0.5 * (return_batch - values).pow(2).mean()
loss = value_loss * self.value_loss_coef + action_loss - dist_entropy * self.entropy_coef
if self.sog_gail:
expert_state, expert_action = next(sog_train_loader)
expert_state = obsfilt(expert_state.numpy(), update=False)
expert_state = torch.tensor(expert_state, dtype=torch.float32, device=self.device)
expert_action = expert_action.to(self.device)
sog_loss = self.sog.predict_loss(expert_state, expert_action)
loss += sog_loss * self.sog_gail_coef
self.optimizer.zero_grad()
loss.backward()
nn.utils.clip_grad_norm_(self.actor_critic.parameters(), self.max_grad_norm)
self.optimizer.step()
value_loss_epoch += value_loss.item()
action_loss_epoch += action_loss.item()
dist_entropy_epoch += dist_entropy.item()
if self.sog_gail:
sog_loss_epoch += sog_loss.item()
num_updates = self.ppo_epoch * self.num_mini_batch
value_loss_epoch /= num_updates
action_loss_epoch /= num_updates
dist_entropy_epoch /= num_updates
if self.sog_gail:
sog_loss_epoch /= num_updates
else:
sog_loss_epoch = None
return value_loss_epoch, action_loss_epoch, dist_entropy_epoch, sog_loss_epoch
| StarcoderdataPython |
3328299 | """
Traffic/VPN subparser package.
"""
import api_parser._traffic.subparsers as sps
def create_traffic_subparser(subparsers):
"""
Creates the _traffic subparser.
Args:
subparsers: Subparser object from argparse obtined from calling ArgumentParser.add_subparsers().
"""
p = subparsers.add_parser('vpn', description="Subparser for VPN/Traffic Forwarding management.")
sp = p.add_subparsers(required=True,
description='Functionalities of the traffic fwd management module',
dest='Any of the subcommands'
)
# Subparsers
sps.get_vpn_creds_sp(sp)
sps.add_vpn_creds_sp(sp)
sps.bulk_del_vpn_creds_sp(sp)
sps.get_vpn_cred_info_sp(sp)
sps.upd_vpn_cred_sp(sp)
sps.del_vpn_cred_sp(sp)
sps.ip_gre_tunnel_info_sp(sp)
sps.get_vips_sp(sp)
| StarcoderdataPython |
3240690 | from flask import request
import time
import threading
from libs import ddbb
from hashlib import md5
import json
limiter = {}
llimiter = threading.Lock()
count_limit = 5
def get_ip():
proxy = request.headers.get('X-Real-Ip')
real = request.remote_addr
if proxy != None and real == ddbb.settings.proxy:
return proxy
return real
def count(login=False):
ip = get_ip()
if login:
headers = json.dumps({k: v for k, v in request.headers.items()})
hash = md5(json.dumps(headers).encode()).hexdigest()
else:
cookies = json.dumps({k: v for k, v in request.headers.items()})
hash = md5(json.dumps(cookies).encode()).hexdigest()
with llimiter:
limit = limiter.get(ip)
if limit == None or (time.time() - limit[1]) >= 0:
limiter[ip] = [[hash], time.time() + 30]
return
if hash in limit[0]:
return
limit[0].append(hash)
limit[1] = time.time() + 30
limiter[ip] = limit
def check():
ip = get_ip()
with llimiter:
limit = limiter.get(ip)
if limit != None and len(limit[0]) >= count_limit and (time.time() - limit[1]) < 0:
limit[1] = time.time() + 30
limiter[ip] = limit
return False
return True
| StarcoderdataPython |
1636092 | <filename>ersilia/hub/content/card.py<gh_stars>10-100
import os
import json
import collections
import tempfile
import requests
from pyairtable import Table
from ... import ErsiliaBase
from ...utils.terminal import run_command
from ...auth.auth import Auth
from ...default import (
AIRTABLE_READONLY_API_KEY,
AIRTABLE_MODEL_HUB_BASE_ID,
AIRTABLE_MODEL_HUB_TABLE_NAME,
)
from ...default import CARD_FILE
AIRTABLE_MAX_ROWS = 100000
AIRTABLE_PAGE_SIZE = 100
class ReadmeCard(ErsiliaBase):
def __init__(self, config_json):
ErsiliaBase.__init__(self, config_json=config_json)
def _raw_readme_url(self, model_id):
url = (
"https://raw.githubusercontent.com/ersilia-os/{0}/master/README.md".format(
model_id
)
)
return url
def _gh_view(self, model_id):
tmp_folder = tempfile.mkdtemp()
tmp_file = os.path.join(tmp_folder, "view.md")
cmd = "gh repo view {0}/{1} > {2}".format("ersilia-os", model_id, tmp_file)
run_command(cmd)
with open(tmp_file, "r") as f:
text = f.read()
return text
def _title(self, lines):
"""Title is determined by the first main header in markdown"""
for l in lines:
if l[:2] == "# ":
s = l[2:].strip()
return s
def _description(self, lines):
"""Description is what comes after the title and before the next header"""
text = "\n".join(lines)
return text.split("# ")[1].split("\n")[1].split("#")[0].strip()
def _model_github_url(self, model_id):
return "https://github.com/ersilia-os/{0}".format(model_id)
def parse(self, model_id):
readme = os.path.join(self._dest_dir, model_id, "README.md")
if os.path.exists(readme):
with open(readme, "r") as f:
text = f.read()
else:
if Auth().is_contributor():
text = self._gh_view(model_id)
if not text:
return None
text = "--".join(text.split("--")[1:])
else:
r = requests.get(self._raw_readme_url(model_id))
if r.status_code != 200:
return None
text = r.text
lines = text.split(os.linesep)
title = self._title(lines)
descr = self._description(lines)
results = {
"model_id": model_id,
"title": self._title(lines),
"description": self._description(lines),
"github_url": self._model_github_url(model_id),
}
return results
def get(self, model_id):
return self.parse(model_id)
class AirtableCard(ErsiliaBase):
def __init__(self, config_json):
ErsiliaBase.__init__(self, config_json=config_json)
self.api_key = AIRTABLE_READONLY_API_KEY
self.base_id = AIRTABLE_MODEL_HUB_BASE_ID
self.table_name = AIRTABLE_MODEL_HUB_TABLE_NAME
self.max_rows = AIRTABLE_MAX_ROWS
self.page_size = AIRTABLE_PAGE_SIZE
self.table = Table(self.api_key, self.base_id, self.table_name)
def _find_card(self, text, field):
card = None
for records in self.table.iterate(
page_size=self.page_size, max_records=self.max_rows
):
for record in records:
fields = record["fields"]
if field not in fields:
continue
if text == record["fields"][field]:
card = record["fields"]
return card
def find_card_by_model_id(self, model_id):
return self._find_card(model_id, "Identifier")
def find_card_by_slug(self, slug):
return self._find_card(slug, "Slug")
def get(self, model_id):
return self.find_card_by_model_id(model_id)
class LocalCard(ErsiliaBase):
def __init__(self, config_json):
ErsiliaBase.__init__(self, config_json=config_json)
def get(self, model_id):
model_path = self._model_path(model_id)
card_path = os.path.join(model_path, CARD_FILE)
if os.path.exists(card_path):
with open(card_path, "r") as f:
card = json.load(f)
return card
else:
return None
class ModelCard(object):
def __init__(self, config_json=None):
self.lc = LocalCard(config_json=config_json)
self.ac = AirtableCard(config_json=config_json)
self.rc = ReadmeCard(config_json=config_json)
def _get(self, model_id):
card = self.lc.get(model_id)
if card is not None:
return card
card = self.ac.get(model_id)
if card is not None:
return card
card = self.rc.get(model_id)
if card is not None:
return card
def get(self, model_id, as_json=False):
card = self._get(model_id)
if card is None:
return
if as_json:
return json.dumps(card, indent=4)
else:
return card
| StarcoderdataPython |
1678287 | # coding=utf-8
"""API to most common queries to the dataset."""
import collections
import os
import sqlite3
from typing import AnyStr
import tqdm
def main():
db_path = os.path.normpath(os.path.join(os.path.dirname(__file__), '../data/dataset/evalution2.db'))
# use verbose=1 for debugging.
db = EvaldDB(db_path, verbose=0)
# get id from name or name from id. Can be used with lang, name, and rel.
db.lang_id('en')
db.lang_name(6)
# get all words in a language
db.all_words('en')
# get all synonyms in a language
syns = db.synonyms()
# return True of two words are synonyms.
print(db.are_syns('Behaviorism', 'Behaviourism'))
print(db.are_syns('Behaviorism', 'banking'))
# yield all synsets the argument word appears in.
# returns all pairs of synsets in a relation
db.rel_pairs('hypernym')
# returns all words in a synset
hypernyms = db.words_in_synset(1)
all_syns = db.all_synsets()
# TODO: db.are_rel('Auto serviço', 'livello', 'isa')
pass
class EvaldDB:
"""A connection object to an evalution db with some useful queries as methods."""
def __init__(self, db_name, verbose=0):
if not os.path.exists(db_name):
answer = input(db_name + ' does not exist. Do you want to download it (192MB)? [Y/n]')
# TODO: download the file
raise ValueError(answer)
self.verbose = verbose
self.conn = sqlite3.connect(db_name)
self.cursor = self.conn.cursor()
self.result_history_len = 5
self.result_history = collections.deque(maxlen=self.result_history_len)
self.relations = dict()
# TODO: refactor this garbage to use an ORM.
def lang_id(self, lang_name):
"""Return the lang id from the two character language code."""
return self.query('select language_id from language where language_value like "%s"' %
str(lang_name.lower()))[0][0]
def lang_name(self, lang_code):
"""Return the lang name from the lang id."""
return self.query('select language_value from language where language_id = %s' % str(lang_code))[0][0]
def word_id(self, word_name):
"""Return a word value from it's id(s)."""
return self.query('select word_id from word where word_value = "%s"' % str(word_name.lower()))[0][0]
def word_name(self, word_id):
"""Return a word value from it's id(s)."""
return self.query('select word_value from word where word_id = %s' % str(word_id))[0][0]
def rel_id(self, rel_name):
"""Return a relation id from a relation name."""
try:
return self.query('select relationName_id from relationname where relationName_value = "%s"'
% rel_name.lower())[0][0]
except IndexError as e:
raise IndexError(str(e) + '. Relation does not exist.')
def rel_name(self, rel_id):
"""Return a relation name from a relation id."""
return self.query('select relationName_value from relationname where relationName_id = %s' % rel_id)[0][0]
def all_words(self, lang: AnyStr = 'en') -> set():
"""Returns all words in a language.
Args:
lang: the two character identifier of the language (e.g. 'en') or its id.
See docs/langs.txt for a list of lgs.
Returns:
A set containing the words in the dataset.
"""
try:
int(lang)
except ValueError:
lang = self.lang_id(lang)
# Select subsqueries seem to be way slower.
word_ids = "select word_id from allwordsenses where language_id = %s" % str(lang)
# TODO: return self.query(self.word_values(word_ids))
return [w[0] for w in self.query('select word_value from word where '
'word_id in (%s)' % word_ids)]
def rel_pairs(self, rel: AnyStr) -> set():
"""Return a set of pairs of synsets related by rel. If rel is None, returns a set of all synsets related by any rel.
Args:
rel: the relation to detected. See docs/relations.txt for the list of supported relations.
Returns:
A set containing tuples with the two synsets related by the relation rel.
"""
try:
int(rel)
except ValueError:
rel = self.rel_id(rel)
# TODO add support for lang.
pairs = self.query('select sourcesynset_id, targetsynset_id from synsetrelations where relation_id="%s"' % rel)
return pairs
def synonyms(self, lang='en'):
"""Returns a dictionary with keys = synset ID and value = set of tuples with all synonyms in a language."""
# return every sense with more than one word (i.e. a sense with synonyms).
syns = dict()
sense_ids = self.query('select wordsense_id, count(*) as c from allwordsenses where language_id = %s '
'group by wordsense_id having c = 23' % self.lang_id(lang))
for no, sense in enumerate(tqdm.tqdm(sense_ids, mininterval=0.5, total=len(sense_ids))):
words = set(self.query('select ( select word_value from word where word_id = allwordsenses.word_id ) '
'from allwordsenses where wordsense_id = %s' % sense[0]))
syns[sense[0]] = {w[0].lower() for w in words}
return syns
def all_synsets(self):
"""Returns all synset IDs"""
return [s[0] for s in self.query('select synset_id from synsetID')]
def random_synset(self):
"""Returns a random synset."""
def which_rels(self, w1: AnyStr, w2: AnyStr) -> set():
"""Returns a set containing the relations from w1 to w2 (order sensitive)."""
pass
def are_rel(self, w1: AnyStr, w2: AnyStr, rel) -> bool:
"""Returns True if w1 and w2 are related by a rel, if rel is None, return True if w1 and w2 are related by any rel.
Return False otherwise."""
pass
def are_syns(self, w1, w2, lang='en'):
"""Returns true if two words are synonyms."""
result = self.query('select count(*) from allwordsenses where (word_id = %s or word_id = %s) '
'and language_id = %s group by wordsense_id' % (
self.word_id(w1), self.word_id(w2), self.lang_id(lang)))
return True if result[0][0] > 1 else False
def synset_of(self, word, lang='en', min_len=2):
"""Returns the synsets where `word` appears."""
# get all the synsets a word appears in
synsets = self.query('select wordsense_id from allwordsenses '
'where language_id = %s and word_id = %s' % (self.lang_id(lang), self.word_id(word)))
# then get all the words in each of those synsets.
for sense_id in synsets:
synsets = [self.word_name(word_id[0]) for word_id in \
self.query('select word_id from allwordsenses where wordsense_id = %s' % sense_id[0])]
# TODO: is it normal that there are so many singleton synsets?
if len(synsets) >= min_len:
yield (sense_id, synsets)
def words_in_synset(self, synset: int, lang='en') -> set:
"""Returns the set of words in a synset"""
words = self.query('select ( select word_value from word where word_id = word2Synset.word_id ) '
'from word2Synset where synset_id = %s and language_id = %s' % (str(synset), self.lang_id(lang)))
#print(words)
words = {w[0].lower() for w in words if type(w[0])==str}
return words
def query(self, sql: AnyStr) -> AnyStr:
"""Execute an arbitrary query."""
try:
self.cursor.execute(sql)
except sqlite3.OperationalError as e:
print(e)
result = self.cursor.fetchall()
if self.verbose:
print("entries: %d\n\t%s -> %s" % (len(result), sql, result[:5]))
self.result_history.append((sql, result))
return self.result_history[-1][1]
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if self.conn:
self.conn.close()
return exc_type, exc_val, exc_tb
if __name__ == "__main__":
main() | StarcoderdataPython |
3233640 | <filename>app.py
import dash
from dash import dcc
from dash import html
import dash_bootstrap_components as dbc
app = dash.Dash(__name__, suppress_callback_exceptions=True, external_stylesheets=[dbc.themes.FLATLY])
app.title = 'Crypto Dollar Cost Calculator'
server = app.server
app.config.suppress_callback_exceptions = True | StarcoderdataPython |
197998 | from trp.t_pipeline import add_page_orientation, order_blocks_by_geo
from typing import List
from trp.t_pipeline import add_page_orientation, order_blocks_by_geo, pipeline_merge_tables, add_kv_ocr_confidence
from trp.t_tables import MergeOptions, HeaderFooterType
import trp.trp2 as t2
import trp as t1
import json
import os
import pytest
from trp import Document
from uuid import uuid4
import logging
current_folder = os.path.dirname(os.path.realpath(__file__))
def return_json_for_file(filename):
with open(os.path.join(current_folder, filename)) as test_json:
return json.load(test_json)
@pytest.fixture
def json_response():
return return_json_for_file("test-response.json")
def test_serialization():
"""
testing that None values are removed when serializing
"""
bb_1 = t2.TBoundingBox(0.4, 0.3, 0.1, top=None) # type:ignore forcing some None/null values
bb_2 = t2.TBoundingBox(0.4, 0.3, 0.1, top=0.2)
p1 = t2.TPoint(x=0.1, y=0.1)
p2 = t2.TPoint(x=0.3, y=None) # type:ignore
geo = t2.TGeometry(bounding_box=bb_1, polygon=[p1, p2])
geo_s = t2.TGeometrySchema()
s: str = geo_s.dumps(geo)
assert not "null" in s
geo = t2.TGeometry(bounding_box=bb_2, polygon=[p1, p2])
s: str = geo_s.dumps(geo)
assert not "null" in s
def test_tblock_order_blocks_by_geo():
p = os.path.dirname(os.path.realpath(__file__))
f = open(os.path.join(p, "data/gib.json"))
j = json.load(f)
t_document: t2.TDocument = t2.TDocumentSchema().load(j)
new_order = order_blocks_by_geo(t_document)
doc = t1.Document(t2.TDocumentSchema().dump(new_order))
assert "Value 1.1.1" == doc.pages[0].tables[0].rows[0].cells[0].text.strip()
assert "Value 2.1.1" == doc.pages[0].tables[1].rows[0].cells[0].text.strip()
assert "Value 3.1.1" == doc.pages[0].tables[2].rows[0].cells[0].text.strip()
def test_tblock_order_block_by_geo_multi_page():
p = os.path.dirname(os.path.realpath(__file__))
f = open(os.path.join(p, "data/gib_multi_page_tables.json"))
j = json.load(f)
t_document: t2.TDocument = t2.TDocumentSchema().load(j)
t_document = order_blocks_by_geo(t_document)
doc = t1.Document(t2.TDocumentSchema().dump(t_document))
assert "Page 1 - Value 1.1.1" == doc.pages[0].tables[0].rows[0].cells[0].text.strip()
assert "Page 1 - Value 2.1.1" == doc.pages[0].tables[1].rows[0].cells[0].text.strip()
def test_tblock():
p = os.path.dirname(os.path.realpath(__file__))
f = open(os.path.join(p, "data/gib.json"))
j = json.load(f)
t_document: t2.TDocument = t2.TDocumentSchema().load(j)
new_order = order_blocks_by_geo(t_document)
doc = t1.Document(t2.TDocumentSchema().dump(new_order))
assert "Value 1.1.1" == doc.pages[0].tables[0].rows[0].cells[0].text.strip()
assert "Value 2.1.1" == doc.pages[0].tables[1].rows[0].cells[0].text.strip()
assert "Value 3.1.1" == doc.pages[0].tables[2].rows[0].cells[0].text.strip()
def test_custom_tblock():
p = os.path.dirname(os.path.realpath(__file__))
f = open(os.path.join(p, "data/gib.json"))
j = json.load(f)
t_document: t2.TDocument = t2.TDocumentSchema().load(j)
t_document.custom = {'testblock': {'here': 'is some fun stuff'}}
assert 'testblock' in t2.TDocumentSchema().dumps(t_document)
def test_custom_page_orientation(json_response):
doc = Document(json_response)
assert 1 == len(doc.pages)
lines = [line for line in doc.pages[0].lines]
assert 22 == len(lines)
words = [word for line in lines for word in line.words]
assert 53 == len(words)
t_document: t2.TDocument = t2.TDocumentSchema().load(json_response)
t_document.custom = {'orientation': 180}
new_t_doc_json = t2.TDocumentSchema().dump(t_document)
assert "Custom" in new_t_doc_json
assert "orientation" in new_t_doc_json["Custom"]
assert new_t_doc_json["Custom"]["orientation"] == 180
p = os.path.dirname(os.path.realpath(__file__))
f = open(os.path.join(p, "data/gib.json"))
j = json.load(f)
t_document: t2.TDocument = t2.TDocumentSchema().load(j)
t_document = add_page_orientation(t_document)
assert -1 < t_document.pages[0].custom['PageOrientationBasedOnWords'] < 2
p = os.path.dirname(os.path.realpath(__file__))
f = open(os.path.join(p, "data/gib_10_degrees.json"))
j = json.load(f)
t_document: t2.TDocument = t2.TDocumentSchema().load(j)
t_document = add_page_orientation(t_document)
assert 5 < t_document.pages[0].custom['PageOrientationBasedOnWords'] < 15
p = os.path.dirname(os.path.realpath(__file__))
f = open(os.path.join(p, "data/gib__15_degrees.json"))
j = json.load(f)
t_document: t2.TDocument = t2.TDocumentSchema().load(j)
t_document = add_page_orientation(t_document)
assert 10 < t_document.pages[0].custom['PageOrientationBasedOnWords'] < 20
p = os.path.dirname(os.path.realpath(__file__))
f = open(os.path.join(p, "data/gib__25_degrees.json"))
j = json.load(f)
t_document: t2.TDocument = t2.TDocumentSchema().load(j)
t_document = add_page_orientation(t_document)
assert 17 < t_document.pages[0].custom['PageOrientationBasedOnWords'] < 30
p = os.path.dirname(os.path.realpath(__file__))
f = open(os.path.join(p, "data/gib__180_degrees.json"))
j = json.load(f)
t_document: t2.TDocument = t2.TDocumentSchema().load(j)
t_document = add_page_orientation(t_document)
assert 170 < t_document.pages[0].custom['PageOrientationBasedOnWords'] < 190
p = os.path.dirname(os.path.realpath(__file__))
f = open(os.path.join(p, "data/gib__270_degrees.json"))
j = json.load(f)
t_document: t2.TDocument = t2.TDocumentSchema().load(j)
t_document = add_page_orientation(t_document)
assert -100 < t_document.pages[0].custom['PageOrientationBasedOnWords'] < -80
p = os.path.dirname(os.path.realpath(__file__))
f = open(os.path.join(p, "data/gib__90_degrees.json"))
j = json.load(f)
t_document: t2.TDocument = t2.TDocumentSchema().load(j)
t_document = add_page_orientation(t_document)
assert 80 < t_document.pages[0].custom['PageOrientationBasedOnWords'] < 100
p = os.path.dirname(os.path.realpath(__file__))
f = open(os.path.join(p, "data/gib__minus_10_degrees.json"))
j = json.load(f)
t_document: t2.TDocument = t2.TDocumentSchema().load(j)
t_document = add_page_orientation(t_document)
assert -10 < t_document.pages[0].custom['PageOrientationBasedOnWords'] < 5
doc = t1.Document(t2.TDocumentSchema().dump(t_document))
for page in doc.pages:
assert page.custom['PageOrientationBasedOnWords']
def test_filter_blocks_by_type():
block_list = [t2.TBlock(id="1", block_type=t2.TextractBlockTypes.WORD.name)]
assert t2.TDocument.filter_blocks_by_type(block_list=block_list,
textract_block_type=[t2.TextractBlockTypes.WORD]) == block_list
def test_next_token_response():
p = os.path.dirname(os.path.realpath(__file__))
f = open(os.path.join(p, "data/gib.json"))
j = json.load(f)
assert j['NextToken']
t_document: t2.TDocument = t2.TDocumentSchema().load(j)
t_document = add_page_orientation(t_document)
assert t_document.pages[0].custom
def test_rotate_point():
assert t2.TPoint(2, 2) == t2.TPoint(2, 2)
p = t2.TPoint(2, 2).rotate(degrees=180, origin_y=0, origin_x=0, force_limits=False)
assert t2.TPoint(x=round(p.x), y=round(p.y)) == t2.TPoint(-2, -2)
p = t2.TPoint(3, 4).rotate(degrees=-30, origin_y=0, origin_x=0, force_limits=False)
assert t2.TPoint(x=round(p.x), y=round(p.y)) == t2.TPoint(5, 2)
p = t2.TPoint(3, 4).rotate(degrees=-77, origin_y=0, origin_x=0, force_limits=False)
assert t2.TPoint(x=round(p.x), y=round(p.y)) == t2.TPoint(5, -2)
p = t2.TPoint(3, 4).rotate(degrees=-90, origin_y=0, origin_x=0, force_limits=False)
assert t2.TPoint(x=round(p.x), y=round(p.y)) == t2.TPoint(4, -3)
p = t2.TPoint(3, 4).rotate(degrees=-270, origin_y=0, origin_x=0, force_limits=False)
assert t2.TPoint(x=round(p.x), y=round(p.y)) == t2.TPoint(-4, 3)
p = t2.TPoint(2, 2).rotate(degrees=180, origin_x=1, origin_y=1)
assert t2.TPoint(x=round(p.x), y=round(p.y)) == t2.TPoint(0, 0)
p = t2.TPoint(3, 4).rotate(degrees=-30, origin_y=0, origin_x=0, force_limits=False)
assert t2.TPoint(x=round(p.x), y=round(p.y)) == t2.TPoint(5, 2)
p = t2.TPoint(3, 4).rotate(degrees=-77, origin_x=4, origin_y=4, force_limits=False)
assert t2.TPoint(x=round(p.x), y=round(p.y)) == t2.TPoint(4, 5)
p = t2.TPoint(3, 4).rotate(degrees=-90, origin_x=4, origin_y=6, force_limits=False)
assert t2.TPoint(x=round(p.x), y=round(p.y)) == t2.TPoint(2, 7)
p = t2.TPoint(3, 4).rotate(degrees=-270, origin_x=4, origin_y=6, force_limits=False)
assert t2.TPoint(x=round(p.x), y=round(p.y)) == t2.TPoint(6, 5)
def test_rotate():
points = []
width = 0.05415758863091469
height = 0.011691284365952015
left = 0.13994796574115753
top = 0.8997916579246521
origin: t2.TPoint = t2.TPoint(x=0.5, y=0.5)
degrees: float = 180
points.append(t2.TPoint(x=left, y=top).rotate(origin_x=origin.x, origin_y=origin.y, degrees=degrees))
points.append(t2.TPoint(x=left + width, y=top).rotate(origin_x=origin.x, origin_y=origin.y, degrees=degrees))
points.append(t2.TPoint(x=left, y=top + height).rotate(origin_x=origin.x, origin_y=origin.y, degrees=degrees))
points.append(
t2.TPoint(x=left + width, y=top + height).rotate(origin_x=origin.x, origin_y=origin.y, degrees=degrees))
assert not None in points
def test_adjust_bounding_boxes_and_polygons_to_orientation():
# p = os.path.dirname(os.path.realpath(__file__))
# f = open(os.path.join(p, "data/gib.json"))
# j = json.load(f)
# t_document: t2.TDocument = t2.TDocumentSchema().load(j)
# t_document = add_page_orientation(t_document)
# doc = t1.Document(t2.TDocumentSchema().dump(t_document))
# key = "Date:"
# fields = doc.pages[0].form.searchFieldsByKey(key)
# for field in fields:
# print(f"Field: Key: {field.key}, Value: {field.value}, Geo: {field.geometry} ")
p = os.path.dirname(os.path.realpath(__file__))
f = open(os.path.join(p, "data/gib__180_degrees.json"))
j = json.load(f)
t_document: t2.TDocument = t2.TDocumentSchema().load(j)
t_document = add_page_orientation(t_document)
new_order = order_blocks_by_geo(t_document)
doc = t1.Document(t2.TDocumentSchema().dump(t_document))
# for line in doc.pages[0].lines:
# print("Line: {}".format(line.text))
# print("=========================== after rotation ========================")
# doc = t1.Document(t2.TDocumentSchema().dump(t_document))
# key = "Date:"
# fields = doc.pages[0].form.searchFieldsByKey(key)
# rotate_point = t2.TPoint(x=0.5, y=0.5)
# for field in fields:
# print(f"Field: Key: {field.key}, Value: {field.value}, Geo: {field.geometry} ")
# bbox = field.geometry.boundingBox
# new_point = t_pipeline.__rotate(origin=rotate_point,
# point=t2.TPoint(x=bbox.left, y=bbox.top),
# angle_degrees=180)
# print(f"new point: {new_point}")
# FIXME: remove duplicates in relationship_recursive!
# [b.rotate(origin=t2.TPoint(0.5, 0.5), degrees=180) for b in t_document.relationships_recursive(block=t_document.pages[0])]
# t_document.rotate(page=t_document.pages[0], degrees=180)
# new_order = order_blocks_by_geo(t_document)
# with open("/Users/schadem/temp/rotation/rotate_json2.jon", "w") as out_file:
# out_file.write(t2.TDocumentSchema().dumps(t_document))
# doc = t1.Document(t2.TDocumentSchema().dump(t_document))
# for line in doc.pages[0].lines:
# print("Line: {}".format(line.text))
# p = t2.TPoint(x=0.75, y=0.03)
# p.rotate(origin_x=0.5, origin_y=0.5, degrees=180)
# print(p)
# new_point = rotate(origin=t2.TPoint(x=0.5, y=0.5), point = )
# print(f"new_point: {new_point.x:.2f}, {new_point.y:.2f}")
# print(rotate(origin=t2.TPoint(x=0.5, y=0.5), point = t2.TPoint(x=.75, y=0.03)))
def test_scale(caplog):
p1: t2.TPoint = t2.TPoint(x=0.5, y=0.5)
p1.scale(doc_width=10, doc_height=10)
assert (p1 == t2.TPoint(x=5, y=5))
b1: t2.TBoundingBox = t2.TBoundingBox(width=0.1, height=0.1, left=0.5, top=0.5)
b1.scale(doc_width=10, doc_height=10)
assert (b1 == t2.TBoundingBox(width=1, height=1, left=5, top=5))
p1: t2.TPoint = t2.TPoint(x=0.5, y=0.5)
b1: t2.TBoundingBox = t2.TBoundingBox(width=0.1, height=0.1, left=0.5, top=0.5)
g1: t2.TGeometry = t2.TGeometry(bounding_box=b1, polygon=[p1])
g1.scale(doc_width=10, doc_height=10)
assert (g1 == t2.TGeometry(bounding_box=t2.TBoundingBox(width=1, height=1, left=5, top=5),
polygon=[t2.TPoint(x=5, y=5)]))
def test_ratio(caplog):
p1: t2.TPoint = t2.TPoint(x=0.5, y=0.5)
p2: t2.TPoint = t2.TPoint(x=5, y=5)
p2.ratio(doc_width=10, doc_height=10)
assert (p1 == p2)
b1: t2.TBoundingBox = t2.TBoundingBox(width=0.1, height=0.1, left=0.5, top=0.5)
b2: t2.TBoundingBox = t2.TBoundingBox(width=1, height=1, left=5, top=5)
b2.ratio(doc_width=10, doc_height=10)
assert (b1 == b2)
p1: t2.TPoint = t2.TPoint(x=0.5, y=0.5)
p2: t2.TPoint = t2.TPoint(x=5, y=5)
b1: t2.TBoundingBox = t2.TBoundingBox(width=0.1, height=0.1, left=0.5, top=0.5)
b2: t2.TBoundingBox = t2.TBoundingBox(width=1, height=1, left=5, top=5)
g1: t2.TGeometry = t2.TGeometry(bounding_box=b1, polygon=[p1])
g2: t2.TGeometry = t2.TGeometry(bounding_box=b2, polygon=[p2])
g2.ratio(doc_width=10, doc_height=10)
assert (g1 == g2)
def test_get_blocks_for_relationship(caplog):
caplog.set_level(logging.DEBUG)
# existing relationships
p = os.path.dirname(os.path.realpath(__file__))
with open(os.path.join(p, "data/gib.json")) as f:
j = json.load(f)
t_document: t2.TDocument = t2.TDocumentSchema().load(j)
page = t_document.pages[0]
block = t_document.get_block_by_id("458a9301-8a9d-4eb2-9469-70302c62622e")
relationships = block.get_relationships_for_type()
relationships_value = block.get_relationships_for_type(relationship_type="VALUE")
if relationships and relationships_value:
rel = t_document.get_blocks_for_relationships(relationship=relationships)
assert len(rel) == 1
rel_value = t_document.get_blocks_for_relationships(relationship=relationships_value)
assert len(rel_value) == 1
child_rel: List[t2.TBlock] = list()
for value_block in rel_value:
child_rel.extend(t_document.get_blocks_for_relationships(value_block.get_relationships_for_type()))
assert len(child_rel) == 1
else:
assert False
def test_add_ids_to_relationships(caplog):
tdocument = t2.TDocument()
page_block = t2.TBlock(
id=str(uuid4()),
block_type="PAGE",
geometry=t2.TGeometry(bounding_box=t2.TBoundingBox(width=1, height=1, left=0, top=0),
polygon=[t2.TPoint(x=0, y=0), t2.TPoint(x=1, y=1)]),
)
tblock = t2.TBlock(id=str(uuid4()),
block_type="WORD",
text="sometest",
geometry=t2.TGeometry(bounding_box=t2.TBoundingBox(width=0, height=0, left=0, top=0),
polygon=[t2.TPoint(x=0, y=0), t2.TPoint(x=0, y=0)]),
confidence=99,
text_type="VIRTUAL")
tdocument.add_block(page_block)
tdocument.add_block(tblock)
page_block.add_ids_to_relationships([tblock.id])
tblock.add_ids_to_relationships(["1", "2"])
assert page_block.relationships and len(page_block.relationships) > 0
assert tblock.relationships and len(tblock.relationships) > 0
def test_key_value_set_key_name(caplog):
caplog.set_level(logging.DEBUG)
# existing relationships
p = os.path.dirname(os.path.realpath(__file__))
with open(os.path.join(p, "data/gib.json")) as f:
j = json.load(f)
t_document: t2.TDocument = t2.TDocumentSchema().load(j)
page = t_document.pages[0]
keys = list(t_document.keys(page=page))
assert keys and len(keys) > 0
for key_value in keys:
child_relationship = key_value.get_relationships_for_type('CHILD')
if child_relationship:
for id in child_relationship.ids:
k_b = t_document.get_block_by_id(id=id)
print(k_b.text)
print(' '.join([x.text for x in t_document.value_for_key(key_value)]))
def test_get_relationships_for_type(caplog):
# existing relationships
p = os.path.dirname(os.path.realpath(__file__))
with open(os.path.join(p, "data/gib.json")) as f:
j = json.load(f)
t_document: t2.TDocument = t2.TDocumentSchema().load(j)
page = t_document.pages[0]
new_block = t2.TBlock(id=str(uuid4()))
t_document.add_block(new_block)
page.add_ids_to_relationships([new_block.id])
assert t_document.get_block_by_id(new_block.id) == new_block
#empty relationships
t_document: t2.TDocument = t2.TDocument()
t_document.add_block(t2.TBlock(id=str(uuid4()), block_type="PAGE"))
page = t_document.pages[0]
new_block = t2.TBlock(id=str(uuid4()))
t_document.add_block(new_block)
page.add_ids_to_relationships([new_block.id])
assert t_document.get_block_by_id(new_block.id) == new_block
def test_merge_tables():
p = os.path.dirname(os.path.realpath(__file__))
f = open(os.path.join(p, "data/gib_multi_page_tables.json"))
j = json.load(f)
t_document: t2.TDocument = t2.TDocumentSchema().load(j)
tbl_id1 = 'fed02fb4-1996-4a15-98dc-29da193cc476'
tbl_id2 = '47c6097f-02d5-4432-8423-13c05fbfacbd'
pre_merge_tbl1_cells_no = len(t_document.get_block_by_id(tbl_id1).relationships[0].ids) # type: ignore
pre_merge_tbl2_cells_no = len(t_document.get_block_by_id(tbl_id2).relationships[0].ids) # type: ignore
pre_merge_tbl1_lastcell = t_document.get_block_by_id(tbl_id1).relationships[0].ids[-1] # type: ignore
pre_merge_tbl2_lastcell = t_document.get_block_by_id(tbl_id2).relationships[0].ids[-1] # type: ignore
pre_merge_tbl1_last_row = t_document.get_block_by_id(pre_merge_tbl1_lastcell).row_index # type: ignore
pre_merge_tbl2_last_row = t_document.get_block_by_id(pre_merge_tbl2_lastcell).row_index # type: ignore
t_document.merge_tables([[tbl_id1, tbl_id2]])
post_merge_tbl1_cells_no = len(t_document.get_block_by_id(tbl_id1).relationships[0].ids) # type: ignore
post_merge_tbl1_lastcell = t_document.get_block_by_id(tbl_id1).relationships[0].ids[-1] # type: ignore
post_merge_tbl1_last_row = t_document.get_block_by_id(post_merge_tbl1_lastcell).row_index # type: ignore
assert post_merge_tbl1_cells_no == pre_merge_tbl1_cells_no + pre_merge_tbl2_cells_no
assert pre_merge_tbl2_last_row
assert post_merge_tbl1_last_row == pre_merge_tbl1_last_row + pre_merge_tbl2_last_row # type: ignore
def test_delete_blocks():
p = os.path.dirname(os.path.realpath(__file__))
f = open(os.path.join(p, "data/gib_multi_page_tables.json"))
j = json.load(f)
t_document: t2.TDocument = t2.TDocumentSchema().load(j)
tbl_id1 = 'fed02fb4-1996-4a15-98dc-29da193cc476'
tbl_id2 = '47c6097f-02d5-4432-8423-13c05fbfacbd'
pre_delete_block_no = len(t_document.blocks)
t_document.delete_blocks([tbl_id1, tbl_id2])
post_delete_block_no = len(t_document.blocks)
assert post_delete_block_no == pre_delete_block_no - 2
def test_link_tables():
p = os.path.dirname(os.path.realpath(__file__))
f = open(os.path.join(p, "data/gib_multi_page_tables.json"))
j = json.load(f)
t_document: t2.TDocument = t2.TDocumentSchema().load(j)
tbl_id1 = 'fed02fb4-1996-4a15-98dc-29da193cc476'
tbl_id2 = '47c6097f-02d5-4432-8423-13c05fbfacbd'
t_document.link_tables([[tbl_id1, tbl_id2]])
assert t_document.get_block_by_id(tbl_id1).custom['next_table'] == tbl_id2 # type: ignore
assert t_document.get_block_by_id(tbl_id2).custom['previous_table'] == tbl_id1 # type: ignore
def test_pipeline_merge_tables():
p = os.path.dirname(os.path.realpath(__file__))
f = open(os.path.join(p, "data/gib_multi_page_table_merge.json"))
j = json.load(f)
t_document: t2.TDocument = t2.TDocumentSchema().load(j)
tbl_id1 = '5685498d-d196-42a7-8b40-594d6d886ca9'
tbl_id2 = 'a9191a66-0d32-4d36-8fd6-58e6917f4ea6'
tbl_id3 = 'e0368543-c9c3-4616-bd6c-f25e66c859b2'
pre_merge_tbl1_cells_no = len(t_document.get_block_by_id(tbl_id1).relationships[0].ids) # type: ignore
pre_merge_tbl2_cells_no = len(t_document.get_block_by_id(tbl_id2).relationships[0].ids) # type: ignore
pre_merge_tbl3_cells_no = len(t_document.get_block_by_id(tbl_id3).relationships[0].ids) # type: ignore
t_document = pipeline_merge_tables(t_document, MergeOptions.MERGE, None, HeaderFooterType.NONE)
post_merge_tbl1_cells_no = len(t_document.get_block_by_id(tbl_id1).relationships[0].ids) # type: ignore
assert post_merge_tbl1_cells_no == pre_merge_tbl1_cells_no + pre_merge_tbl2_cells_no + pre_merge_tbl3_cells_no
def test_pipeline_merge_multiple_tables():
p = os.path.dirname(os.path.realpath(__file__))
f = open(os.path.join(p, "data/gib_multi_tables_multi_page_sample.json"))
j = json.load(f)
t_document: t2.TDocument = t2.TDocumentSchema().load(j)
tbl_id1 = '4894d2ba-0479-4196-9cbd-c0fea4d28762'
tbl_id2 = 'b5e061ec-05be-48d5-83fc-6719fdd4397a'
tbl_id3 = '8bbc3f4f-0354-4999-a001-4585631bb7fe'
tbl_id4 = 'cf8e09a1-c317-40c1-9c45-e830e14167d5'
pre_merge_tbl1_cells_no = len(t_document.get_block_by_id(tbl_id1).relationships[0].ids) # type: ignore
pre_merge_tbl2_cells_no = len(t_document.get_block_by_id(tbl_id2).relationships[0].ids) # type: ignore
pre_merge_tbl3_cells_no = len(t_document.get_block_by_id(tbl_id3).relationships[0].ids) # type: ignore
pre_merge_tbl4_cells_no = len(t_document.get_block_by_id(tbl_id4).relationships[0].ids) # type: ignore
t_document = pipeline_merge_tables(t_document, MergeOptions.MERGE, None, HeaderFooterType.NONE)
post_merge_tbl1_cells_no = len(t_document.get_block_by_id(tbl_id1).relationships[0].ids) # type: ignore
post_merge_tbl2_cells_no = len(t_document.get_block_by_id(tbl_id3).relationships[0].ids) # type: ignore
assert post_merge_tbl1_cells_no == pre_merge_tbl1_cells_no + pre_merge_tbl2_cells_no
assert post_merge_tbl2_cells_no == pre_merge_tbl3_cells_no + pre_merge_tbl4_cells_no
def test_kv_ocr_confidence(caplog):
caplog.set_level(logging.DEBUG)
p = os.path.dirname(os.path.realpath(__file__))
f = open(os.path.join(p, "data/employment-application.json"))
j = json.load(f)
t_document: t2.TDocument = t2.TDocumentSchema().load(j)
t_document = add_kv_ocr_confidence(t_document)
doc = t1.Document(t2.TDocumentSchema().dump(t_document))
for page in doc.pages:
k1 = page.form.getFieldByKey("Home Address:")
k1.key.custom['OCRConfidence'] == {'mean': 99.60698318481445}
k1.value.custom['OCRConfidence'] == {'mean': 99.8596928914388}
k1 = page.form.getFieldByKey("Phone Number:")
k1.key.custom['OCRConfidence'] == {'mean': 99.55334854125977}
k1.value.custom['OCRConfidence'] == {'mean': 99.23233032226562}
# for field in page.form.fields:
# print(
# f"{field.key.text} - {field.key.custom['OCRConfidence']}, {field.value.text} - {field.value.custom['OCRConfidence']}"
# )
| StarcoderdataPython |
3236515 | <reponame>ciandt/tech-gallery-chat-bot<filename>tests/test_dependencies.py<gh_stars>1-10
from unittest import mock
from unittest.mock import ANY
import pytest
from tech_gallery_bot.dependencies import get_dependencies
from tech_gallery_bot.repositories import UserRepository, UserProfileRepository
@pytest.mark.parametrize("config", [None, "", ("a", "b"), True, False])
def test_get_dependencies_with_invalid_argument(config):
with pytest.raises(TypeError):
get_dependencies(config=config)
def test_get_dependencies_with_incomplete_argument():
with pytest.raises(ValueError):
get_dependencies(config={})
def test_get_dependencies_with_correct_argument():
with mock.patch("tech_gallery_bot.dependencies.Client", autospec=True) as client:
client.from_service_account_json.return_value = client
dependencies = get_dependencies(
config={"TECH_GALLERY_SERVICE_ACCOUNT": "path/to/file.json"}
)
client.from_service_account_json.assert_called_once_with(
"path/to/file.json", namespace=ANY
)
assert isinstance(dependencies["user_repository"], UserRepository)
assert isinstance(
dependencies["user_profile_repository"], UserProfileRepository
)
| StarcoderdataPython |
3357016 | from signal import siginterrupt
from tkinter.tix import REAL
Algoritmo: Descuento:
#Para este ejercicio tenemos que crear un algoritmo para calcular el descuento de una compra
#teniendo en cuenta que se aplicará un descuento de un 5 % a compras valoradas entre 100 y 500 euros y de un 8% para compras valoradas en más de 500 euros
#función:
Descuento (precio, REAl): REAL
precondicion:
precio > 0
si
precio < 100
entonces
precio < 100 => No aplica descuento
resultado = 0
si no si
precio <= 500
entonces
100 <= precio <= 500 => Aplica descuento (5%)
resultado = precio * 5/100
si no si
precio > 500
entonces
precio > 500 = > Aplica descuento (8%)
resultado precio * 8/100
fin descuento | StarcoderdataPython |
125609 | <gh_stars>0
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# pylint: disable=line-too-long
def load_command_table(self, _):
with self.command_group("approle", is_preview=True) as g:
g.custom_command("list", "list_app_roles")
g.custom_command("assignment list", "list_role_assignments")
g.custom_command("assignment add", "add_role_assignment")
g.custom_command("assignment remove", "remove_role_assignment")
| StarcoderdataPython |
3276820 | <gh_stars>0
import os
from app.lookups import base as lookups
from app.drivers.base import BaseDriver
from app.drivers.options import mslookup_options
class LookupDriver(BaseDriver):
outfile, outdir = None, None
def __init__(self):
super().__init__()
self.parser_options = mslookup_options
def set_options(self):
super().set_options()
del(self.options['-o'])
del(self.options['-d'])
self.options.update(self.define_options(['lookupfn'],
mslookup_options))
def initialize_lookup(self, outfile=None):
if self.lookup is None:
# FIXME MUST be a set or mzml lookup? here is place to assert
# correct lookuptype!
if outfile is None and self.outfile is None:
self.outfile = os.path.join(self.outdir,
'mslookup_db.sqlite')
lookupfile = self.outfile
elif outfile is not None:
lookupfile = outfile
elif self.outfile is not None:
lookupfile = self.outfile
self.lookup = lookups.create_new_lookup(lookupfile,
self.lookuptype)
self.lookup.add_tables()
def run(self):
self.initialize_lookup()
self.create_lookup()
| StarcoderdataPython |
4835874 |
import matplotlib
matplotlib.use('Agg')
import os
import pandas as pd
import numpy as np
import sys
import pickle
from scipy.spatial.distance import cdist
import math
import networkx as nx
import networkx.algorithms.components.connected as nxacc
import networkx.algorithms.dag as nxadag
import matplotlib.pyplot as plt
import seaborn as sns
import itertools
import mygene
import re
selected_exp_gene_list = pd.read_csv("/data/common_exp_features.tsv",sep="\t")["Gene"].tolist()
selected_mut_gene_list = pd.read_csv("/data/common_mut_features.tsv",sep="\t")["Gene"].tolist()
def load_network(network_file_list, valid_gene_list):
gene_neighbor_map = {}
for file_name in network_file_list:
## print 'Load network', file_name
file_handle = open(file_name)
for line in file_handle:
line = line.rstrip().split()
gene1, gene2 = line[0], line[1]
if gene1 not in valid_gene_list or gene2 not in valid_gene_list:
continue
if gene1 not in gene_neighbor_map:
gene_neighbor_map[gene1] = set()
if gene2 not in gene_neighbor_map:
gene_neighbor_map[gene2] = set()
gene_neighbor_map[gene1].add(gene2)
gene_neighbor_map[gene2].add(gene1)
file_handle.close()
return gene_neighbor_map
def load_name_space():
go_tab_map = {}
file_handle = open(go_name_space_file)
for line in file_handle:
line = line.rstrip().split()
go_tab_map[line[0]] = line[1]
file_handle.close()
return go_tab_map
def list2index(cell_line_list, cell_line2id):
cell_line_idx_list = []
for cell_line in cell_line_list:
cell_line_idx_list.append(cell_line2id[cell_line])
return np.asarray(cell_line_idx_list)
PDTC_data_file = '/data/PTDC/'
PDTC_exp_data_file = PDTC_data_file + 'ExpressionModels.tsv'
PDTC_drug_cell_line_file = PDTC_data_file + 'DrugResponsesAUCModels.tsv'
#download at https://ftp.sanger.ac.uk/pub4/cancerrxgene/releases/release-6.0/v17_fitted_dose_response.xlsx
#cell_line_detail_file = data_file + 'Cell_Lines_Details.csv'
PDTC_mutation_data_file = PDTC_data_file + 'SNVsModels.tsv'
PDTC_drug_target_file ='/data/GDSC/drug_target_list.csv'
inbiomap_file = 'InBioMap_Symbol.sif'
pathwaycomm_file = 'PathwayCommons_Symbol.sif'
pd.set_option('display.max_columns', 20)
pd.set_option('display.max_row', 10)
data_file = '/data/GDSC/'
new_network_file = '/data/'
exp_data_file = data_file + 'Cell_line_RMA_proc_basalExp.txt'
drug_cell_line_file = data_file + 'v17_fitted_dose_response.csv'
#download at https://ftp.sanger.ac.uk/pub4/cancerrxgene/releases/release-6.0/v17_fitted_dose_response.xlsx
cell_line_detail_file = data_file + 'Cell_Lines_Details.csv'
mutation_data_file = data_file + 'WES_variants.csv'
drug_target_file ='/data/GDSC/drug_target_list.csv'
feature_folder = 'feature/'
inbiomap_file = 'InBioMap_Symbol.sif'
pathwaycomm_file = 'PathwayCommons_Symbol.sif'
pd.set_option('display.max_columns', 20)
pd.set_option('display.max_row', 10)
exp_df = pd.read_csv(exp_data_file, sep='\t', index_col=0)
exp_df = exp_df.T[1:]
exp_df = exp_df.rename(columns={np.nan: 'NO_GENE_NAME'})
exp_df = exp_df.drop('NO_GENE_NAME',axis=1)
def stripNumber(line):
m = re.match('DATA\.([0-9]+)\.?', line)
return int(m.group(1))
exp_df.index = exp_df.index.map(stripNumber)
exp_df = exp_df.groupby(level=0).first()
exp_df = exp_df[selected_exp_gene_list]
exp_gene_list = list(exp_df.columns)
exp_cell_line_list = list(exp_df.index.unique())
PDTC_exp_df = pd.read_csv(PDTC_exp_data_file, sep='\t', index_col=0).fillna(0)
PDTC_exp_df = PDTC_exp_df.T[1:]
#exp_df = exp_df.rename(columns={np.nan: 'NO_GENE_NAME'})
#exp_df = exp_df.drop('NO_GENE_NAME',axis=1)
def stripNumber(line):
m = re.match('DATA\.([0-9]+)\.?', line)
return int(m.group(1))
#exp_df.index = exp_df.index.map(stripNumber)
#exp_df = exp_df.groupby(level=0).first()
PDTC_exp_df = PDTC_exp_df[selected_exp_gene_list]
PDTC_exp_gene_list = list(PDTC_exp_df.columns)
PDTC_exp_cell_line_list = list(PDTC_exp_df.index.unique())
exp_df = pd.concat([exp_df,PDTC_exp_df])
exp_gene_list = list(exp_df.columns)
exp_cell_line_list = list(exp_df.index.unique())
maf = pd.read_csv(mutation_data_file, sep=',', index_col=0).fillna(0)
mutation_df = maf.groupby(['COSMIC_ID', 'Gene']).size().unstack().fillna(0)
mutation_df = mutation_df[selected_mut_gene_list]
mutation_gene_list = list(mutation_df.columns)
mutation_cell_line_list = list(mutation_df.index.unique())
PDTC_maf = pd.read_csv(PDTC_mutation_data_file, sep='\t', index_col=0).fillna(0)
PDTC_mutation_df= PDTC_maf.replace(to_replace="NO",value=0.0)
PDTC_mutation_df= PDTC_mutation_df.replace(to_replace="chr*",value=1.0,regex=True)
# print len(mutation_cell_line_list), len(mutation_gene_list)
PDTC_mutation_df = PDTC_mutation_df.transpose()
PDTC_mutation_df = PDTC_mutation_df[selected_mut_gene_list]
PDTC_mutation_gene_list = list(PDTC_mutation_df.columns)
PDTC_mutation_cell_line_list = list(PDTC_mutation_df.index.unique())
PDTC_mutation_df
mutation_df = pd.concat([mutation_df,PDTC_mutation_df])
mutation_gene_list = list(mutation_df.columns)
mutation_cell_line_list = list(mutation_df.index.unique())
file_handle = open(drug_target_file)
drug_target_map = {}
drug_target_list = []
for line in file_handle:
new_line = line.rstrip().split(",")
drug = new_line[0]
target_list=new_line[1].split(',')
if drug != "Drug":
target_list_str = ""
for i in range(0,len(target_list)):
if i == len(target_list) - 1:
target_list_str += target_list[i].replace('"','')
else:
target_list_str += target_list[i].replace('"','') + ","
drug = drug.strip()
drug_target_map[drug] = []
if ',' not in target_list_str:
drug_target_map[drug].append(target_list_str.strip())
drug_target_list.append(target_list_str.strip())
else:
target_list = target_list_str.split(',')
for target in target_list:
drug_target_map[drug].append(target.strip())
drug_target_list.append(target.strip())
drugs_legend = pd.read_csv('/data/GDSC/Screened_Compounds.csv', sep=',', index_col=0)
drug2id_mapping = {}
for index in list(drugs_legend.index) :
drug_name = drugs_legend.loc[index,'Drug Name']
drug2id_mapping[ drug_name ] = index
valid_gene_list = list(set(drug_target_list) | set(exp_gene_list) | set(mutation_gene_list))
network_list = [new_network_file+inbiomap_file, new_network_file+pathwaycomm_file]
gene_neighbor_map = load_network(network_list, valid_gene_list)
gene_name_df = pd.read_table('/data/HUGO_protein-coding_gene.tsv',index_col=25, sep='\t')
gene_name_map = {}
for uniprot_gene in gene_name_df.index:
## print uniprot_gene
if isinstance(uniprot_gene, type('aaa')) == False:
continue
if isinstance(gene_name_df.loc[uniprot_gene, 'symbol'], type('aaa')) == False:
gene_name_map[uniprot_gene] = gene_name_df.loc[uniprot_gene, 'symbol'][0]
else:
gene_name_map[uniprot_gene] = gene_name_df.loc[uniprot_gene, 'symbol']
corum_df = pd.read_table(new_network_file + 'allComplexes.txt', index_col=0)
uniprot_gene_set = set()
for index in corum_df.index:
if corum_df.loc[index, 'Organism'] != 'Human':
continue
complex_list = corum_df.loc[index, 'subunits(UniProt IDs)'].split(';')
for gene in complex_list:
uniprot_gene_set.add(gene)
# print len(uniprot_gene_set), 'genes'
query_gene_set = []
for gene in uniprot_gene_set:
if gene not in gene_name_map:
query_gene_set.append(gene)
# print 'Need to query', len(query_gene_set)
query_gene_list = list(query_gene_set)
mg = mygene.MyGeneInfo()
out = mg.querymany(query_gene_list, scopes='uniprot', fields='symbol', species='human')
not_found_gene_list = []
for i, gene in enumerate(query_gene_list):
if 'notfound' in out[i]:
not_found_gene_list.append(gene)
else:
gene_name_map[gene] = out[i]['symbol']
# print len(not_found_gene_list), 'symbol name not found', len(gene_name_map)
corum_df = pd.read_table(new_network_file + 'allComplexes.txt', index_col=0)
for index in corum_df.index:
if corum_df.loc[index, 'Organism'] != 'Human':
continue
complex_list = corum_df.loc[index, 'subunits(UniProt IDs)'].split(';')
complex_symbol_list = []
for gene in complex_list:
if gene in gene_name_map:
complex_symbol_list.append( gene_name_map[gene] )
for gene1, gene2 in itertools.combinations(complex_symbol_list,2):
if gene1 not in gene_neighbor_map:
gene_neighbor_map[gene1] = set()
if gene2 not in gene_neighbor_map:
gene_neighbor_map[gene2] = set()
gene_neighbor_map[gene1].add(gene2)
gene_neighbor_map[gene2].add(gene1)
gene_exp_neighbor_map = {}
exp_matrix = exp_df.values
P = 1 - cdist(np.transpose(exp_matrix), np.transpose(exp_matrix),'correlation')
for i in range(len(exp_gene_list)):
gene1 = exp_gene_list[i]
gene_exp_neighbor_map[gene1] = set()
for j in range(len(exp_gene_list)):
gene2 = exp_gene_list[j]
if math.fabs(P[i, j]) > 0.4:
gene_exp_neighbor_map[gene1].add(gene2)
if gene1 not in gene_exp_neighbor_map[gene1]:
print (gene1, 'not in itself?', P[i,i])
drug_feature_list = []
drug_neighbor_map = {}
selected_drug_list = []
for drug, target_list in drug_target_map.items():
drug_neighbor_map[drug] = set()
for gene in target_list:
if gene not in gene_exp_neighbor_map and gene not in gene_neighbor_map:
continue
if gene in gene_exp_neighbor_map:
drug_neighbor_map[drug] = drug_neighbor_map[drug] | gene_exp_neighbor_map[gene]
if gene in gene_neighbor_map:
drug_neighbor_map[drug] = drug_neighbor_map[drug] | gene_neighbor_map[gene]
if len(drug_neighbor_map[drug]) != 0:
selected_drug_list.append(drug)
drug_feature_list.append( len(drug_neighbor_map[drug]) )
sns.set_style("whitegrid")
sns.set_context("talk")
sns.distplot(drug_feature_list,color='r',bins=60,kde=False,norm_hist=False)
drugs = pd.read_csv(drug_cell_line_file,index_col=2)
drugs = drugs.drop(["DATASET_VERSION","IC50_RESULTS_ID","MAX_CONC_MICROMOLAR","RMSE"],axis=1)
drugs_cell_line_list = list(drugs.index.unique())
# print len(drugs_cell_line_list)
drug_list = drugs["DRUG_ID"]
new_drug_id = []
PDTC_drugs = pd.read_csv(PDTC_drug_cell_line_file,sep='\t',index_col=0)
PDTC_drugs_cell_line_list = list(PDTC_drugs.index.unique())
# print len(drugs_cell_line_list)
drug_list = PDTC_drugs["Drug"].tolist()
new_drug_id = []
for i in drug_list:
if i in drug2id_mapping.keys():
new_drug_id.append(drug2id_mapping[i])
else:
new_drug_id.append(0)
PDTC_drugs["DRUG_ID"] = new_drug_id
PDTC_drugs["LN_IC50"] = np.log(PDTC_drugs["iC50"])
PDTC_drugs = PDTC_drugs.drop(["Drug","iC50","D1_CONC","D5_CONC","perc.iC50"],axis=1)
drugs = pd.concat([drugs,PDTC_drugs])
drugs_cell_line_list = list(drugs.index.unique())
cell_line_list = list(set(drugs_cell_line_list)&set(exp_cell_line_list)&set(mutation_cell_line_list) )
cell_line_legend = pd.read_csv(cell_line_detail_file, index_col=1)
PDTC_cell_line = pd.DataFrame({'Line': ["BRCA"]*len(PDTC_exp_cell_line_list), 'Site':["PDTC"]*len(PDTC_exp_cell_line_list),"Histology":["breast"]*len(PDTC_exp_cell_line_list)},index=PDTC_exp_cell_line_list)
cell_line_legend = pd.concat([cell_line_legend,PDTC_cell_line])
tissue_map = {}
for cell_line in cell_line_list:
tissue = cell_line_legend.loc[cell_line,'Site']
if tissue not in tissue_map:
tissue_map[tissue] = []
tissue_map[tissue].append(cell_line)
large_tissue_number = 0
for tissue, cell_line in tissue_map.items():
if len(cell_line) >= 15:
large_tissue_number += 1
print (tissue, len(cell_line))
print('How many tissues', len(tissue_map))
print('Large tissues', large_tissue_number)
new_exp_gene_list = []
for i in exp_gene_list:
if i in valid_gene_list:
new_exp_gene_list.append(i)
exp_stdev = np.std(exp_df.values, axis=0)
exp_perc = np.percentile(exp_stdev,10)
filtered_exp_gene_list = np.asarray(exp_gene_list)[exp_stdev > exp_perc]
mut_sum = np.sum(mutation_df.values,axis=0)
filtered_mut_gene_list = np.asarray(mutation_gene_list)[mut_sum > 5]
new_exp_df = exp_df.loc[ :, list(filtered_exp_gene_list) ]
new_mutation_df = mutation_df.loc[ :, list(filtered_mut_gene_list) ]
new_data_file = ''
exp_stdev = np.std(exp_df.values, axis=0)
exp_perc = np.percentile(exp_stdev,10)
filtered_exp_gene_list = np.asarray(exp_gene_list)[exp_stdev > exp_perc]
mut_sum = np.sum(mutation_df.values,axis=0)
filtered_mut_gene_list = np.asarray(mutation_gene_list)[mut_sum > 5]
new_exp_df = exp_df.loc[ :, list(filtered_exp_gene_list) ]
new_mutation_df = mutation_df.loc[ :, list(filtered_mut_gene_list) ]
rename_selected_drug_list = []
for drug in selected_drug_list:
print(drug)
if drug not in drug2id_mapping:
print('drug name wrong', drug)
else:
cell_line_drug_matrix = drugs.loc[drugs['DRUG_ID'] == drug2id_mapping[drug]]
feature_exp_gene_list = list( set(drug_neighbor_map[drug]) & set(filtered_exp_gene_list) )
feature_mut_gene_list = list( set(drug_neighbor_map[drug]) & set(filtered_mut_gene_list) )
print(len(feature_exp_gene_list) + len(feature_mut_gene_list))
if len(feature_exp_gene_list) + len(feature_mut_gene_list) == 0:
continue
feature_description = []
drug_tissue_map = {}
drug = drug.replace(' ','_')
rename_selected_drug_list.append(drug)
# print drug
if drug == 'Nutlin-3a_(-)':
drug = 'Nutlin-3a'
drug_folder = '/data/merged/drug_feature/' + drug + '/'
if not os.path.exists(drug_folder):
os.makedirs(drug_folder)
# print 'Generate features', drug
for tissue, tissue_cell_line_list in tissue_map.items():
if tissue=="PDTC":
drug_specific_cell_line = set( cell_line_drug_matrix.index ) & set( tissue_cell_line_list )
drug_specific_cell_line = list(drug_specific_cell_line)
drug_tissue_map[tissue] = drug_specific_cell_line
feature_list = []
if len(feature_exp_gene_list) != 0:
feature_list.append( new_exp_df.loc[ drug_specific_cell_line, feature_exp_gene_list ].values )
for gene in feature_exp_gene_list:
feature_description.append(gene+'_expression')
if len(feature_mut_gene_list) != 0:
feature_list.append( mutation_df.loc[ drug_specific_cell_line, feature_mut_gene_list ].values )
for gene in feature_mut_gene_list:
feature_description.append(gene+'_mutation')
feature = np.concatenate(feature_list, axis=1)
label = cell_line_drug_matrix.loc[ drug_specific_cell_line,'LN_IC50'].values
#label = new_crispr_df.loc[ tissue_cell_line_list, label_gene ].values
# print feature.shape, label.shape
np.save(drug_folder + tissue + '_' + drug + '_feature.npy', feature )
np.save(drug_folder + tissue + '_' + drug + '_label.npy', label)
np.save(drug_folder + tissue + '_feature_description.npy', np.asarray(feature_description))
file_handle = open("/data/merged/" + drug+'_tissue_cell_line_list.pkl',"wb")
pickle.dump(drug_tissue_map,file_handle)
file_handle.close()
file_handle = open('rename_selected_drug_list', 'w')
for drug in rename_selected_drug_list:
file_handle.writelines(drug+ '\n')
file_handle.close()
| StarcoderdataPython |
147201 | """
BlackHole.py
Author: <NAME>
Affiliation: University of Colorado at Boulder
Created on: Mon Jul 8 09:56:38 MDT 2013
Description:
"""
import numpy as np
from .Star import _Planck
from .Source import Source
from types import FunctionType
from scipy.integrate import quad
from ..util.ReadData import read_lit
from ..util.SetDefaultParameterValues import BlackHoleParameters
from ..physics.CrossSections import PhotoIonizationCrossSection as sigma_E
from ..physics.Constants import s_per_myr, G, g_per_msun, c, t_edd, m_p, \
sigma_T, sigma_SB
sptypes = ['pl', 'mcd', 'simpl']
class BlackHole(Source):
def __init__(self, **kwargs):
"""
Initialize a black hole object.
Parameters
----------
pf: dict
Full parameter file.
src_pars: dict
Contains source-specific parameters.
spec_pars: dict
Contains spectrum-specific parameters.
"""
self.pf = BlackHoleParameters()
self.pf.update(kwargs)
Source.__init__(self)
self._name = 'bh'
self.M0 = self.pf['source_mass']
self.epsilon = self.pf['source_eta']
# Duty cycle parameters
self.tau = self.pf['source_lifetime'] * s_per_myr
self.fduty = self.pf['source_fduty']
self.variable = self.fduty < 1
#if self.src_pars['fduty'] == 1:
# self.variable = self.tau < self.pf['stop_time']
self.toff = self.tau * (self.fduty**-1. - 1.)
# Disk properties
self.last_renormalized = 0.0
self.r_in = self._DiskInnermostRadius(self.M0)
self.r_out = self.pf['source_rmax'] * self._GravitationalRadius(self.M0)
self.T_in = self._DiskInnermostTemperature(self.M0)
self.T_out = self._DiskTemperature(self.M0, self.r_out)
self.Lbol = self.Luminosity(0.0)
self.disk_history = {}
#if 'mcd' in self.spec_pars['type']:
# self.fcol = self.spec_pars['fcol'][self.spec_pars['type'].index('mcd')]
#if 'simpl' in self.spec_pars['type']:
# self.fcol = self.spec_pars['fcol'][self.spec_pars['type'].index('simpl')]
#if 'zebra' in self.pf['source_sed']:
# self.T = self.src_pars['temperature']#[self.spec_pars['type'].index('zebra')]
if self.pf['source_sed'] in sptypes:
pass
elif type(self.pf['source_sed']) is FunctionType:
self._UserDefined = self.pf['source_sed']
else:
from_lit = read_lit(self.pf['source_sed'])
src = from_lit.Source()
self._UserDefined = src.Spectrum
# Convert spectral types to strings
#self.N = len(self.spec_pars['type'])
#self.type_by_num = []
#self.type_by_name = []
#for i, sptype in enumerate(self.spec_pars['type']):
# if type(sptype) != int:
#
# if sptype in sptypes:
# self.type_by_name.append(sptype)
# self.type_by_num.append(sptypes[sptype])
# elif type(sptype) is FunctionType:
# self._UserDefined = sptype
# else:
# from_lit = read_lit(sptype)
# self._UserDefined = from_lit.Spectrum
#
# continue
#
# self.type_by_num.append(sptype)
# self.type_by_name.append(sptypes.keys()[sptypes.values().index(sptype)])
def _SchwartzchildRadius(self, M):
return 2. * self._GravitationalRadius(M)
def _GravitationalRadius(self, M):
""" Half the Schwartzchild radius. """
return G * M * g_per_msun / c**2
def _MassAccretionRate(self, M=None):
return self.Luminosity(0, M=M) / self.epsilon / c**2
def _DiskInnermostRadius(self, M):
"""
Inner radius of disk. Unless SourceISCO > 0, will be set to the
inner-most stable circular orbit for a BH of mass M.
"""
return self.pf['source_isco'] * self._GravitationalRadius(M)
def _DiskInnermostTemperature(self, M):
"""
Temperature (in Kelvin) at inner edge of the disk.
"""
return (3. * G * M * g_per_msun * self._MassAccretionRate(M) / \
8. / np.pi / self._DiskInnermostRadius(M)**3 / sigma_SB)**0.25
def _DiskTemperature(self, M, r):
return ((3. * G * M * g_per_msun * self._MassAccretionRate(M) / \
8. / np.pi / r**3 / sigma_SB) * \
(1. - (self._DiskInnermostRadius(M) / r)**0.5))**0.25
def _PowerLaw(self, E, t=0.0):
"""
A simple power law X-ray spectrum - this is proportional to the
*energy* emitted at E, not the number of photons.
"""
return E**self.pf['source_alpha']
def _SIMPL(self, E, t=0.0):
"""
Purpose:
--------
Convolve an input spectrum with a Comptonization kernel.
Inputs:
-------
Gamma - Power-law index, LE ~ E**(-Gamma)
fsc - Fraction of seed photons that get scattered
(assumes all input photons have same probability of being scattered
and that scattering is energy-independent)
fref - Of the photons that impact the disk after a scattering, this is the
fraction that reflect back off the disk to the observer instead of
being absorbed and thermalized (default 1)
uponly - False: SIMPL-2, non-rel Comptonization, up- and down-scattering
True: SIMPL-1, relativistic Comptoniztion, up-scattering only
Outputs: (dictionary)
--------
LE - Absorbed power-law luminosity array [keV s^-1]
E - Energy array [keV]
dE - Differential energy array [keV]
References
----------
Steiner et al. (2009). Thanks <NAME> for the code!
"""
# Input photon distribution
if self.pf['source_sed'] == 'zebra':
nin = lambda E0: _Planck(E0, self.T) / E0
else:
nin = lambda E0: self._MultiColorDisk(E0, t) / E0
fsc = self.pf['source_fsc']
# Output photon distribution - integrate in log-space
integrand = lambda E0: nin(10**E0) \
* self._GreensFunctionSIMPL(10**E0, E) * 10**E0
nout = (1.0 - fsc) * nin(E) + fsc \
* quad(integrand, np.log10(self.Emin),
np.log10(self.Emax))[0] * np.log(10.)
# Output spectrum
return nout * E
def _GreensFunctionSIMPL(self, Ein, Eout):
"""
Must perform integral transform to compute output photon distribution.
"""
# Careful with Gamma...
# In Steiner et al. 2009, Gamma is n(E) ~ E**(-Gamma),
# but n(E) and L(E) are different by a factor of E (see below)
Gamma = -self.pf['source_alpha'] + 1.0
if self.pf['source_uponly']:
if Eout >= Ein:
return (Gamma - 1.0) * (Eout / Ein)**(-1.0 * Gamma) / Ein
else:
return 0.0
else:
if Eout >= Ein:
return (Gamma - 1.0) * (Gamma + 2.0) / (1.0 + 2.0 * Gamma) * \
(Eout / Ein)**(-1.0 * Gamma) / Ein
else:
return (Gamma - 1.0) * (Gamma + 2.0) / (1.0 + 2.0 * Gamma) * \
(Eout / Ein)**(Gamma + 1.0) / Ein
def _MultiColorDisk(self, E, t=0.0):
"""
Soft component of accretion disk spectra.
References
----------
Mitsuda et al. 1984, PASJ, 36, 741.
"""
# If t > 0, re-compute mass, inner radius, and inner temperature
if t > 0 and self.pf['source_evolving'] \
and t != self.last_renormalized:
self.M = self.Mass(t)
self.r_in = self._DiskInnermostRadius(self.M)
self.r_out = self.pf['source_rmax'] * self._GravitationalRadius(self.M)
self.T_in = self._DiskInnermostTemperature(self.M)
self.T_out = self._DiskTemperature(self.M, self.r_out)
integrand = lambda T: (T / self.T_in)**(-11. / 3.) \
* _Planck(E, T) / self.T_in
return quad(integrand, self.T_out, self.T_in)[0]
def SourceOn(self, t):
""" See if source is on. Provide t in code units. """
if not self.variable:
return True
if t < self.tau:
return True
if self.fduty == 1:
return False
nacc = t / (self.tau + self.toff)
if nacc % 1 < self.fduty:
return True
else:
return False
def _Intensity(self, E, t=0, absorb=True):
"""
Return quantity *proportional* to fraction of bolometric luminosity
emitted at photon energy E. Normalization handled separately.
"""
if self.pf['source_sed'] == 'pl':
Lnu = self._PowerLaw(E, t)
elif self.pf['source_sed'] == 'mcd':
Lnu = self._MultiColorDisk(E, t)
elif self.pf['source_sed'] == 'sazonov2004':
Lnu = self._UserDefined(E, t)
elif self.pf['source_sed'] == 'simpl':
Lnu = self._SIMPL(E, t)
elif self.pf['source_sed'] == 'zebra':
Lnu = self._SIMPL(E, t)
else:
Lnu = 0.0
if self.pf['source_logN'] > 0 and absorb:
Lnu *= self._hardening_factor(E)
return Lnu
#def _NormalizeSpectrum(self, t=0.):
# Lbol = self.Luminosity()
# # Treat simPL spectrum special
# if self.pf['source_sed'] == 'simpl':
# integral, err = quad(self._MultiColorDisk,
# self.EminNorm, self.EmaxNorm, args=(t, False))
# else:
# integral, err = quad(self._Intensity,
# self.EminNorm, self.EmaxNorm, args=(t, False))
#
# norms = Lbol / integral
#
# return norms
def Luminosity(self, t=0.0, M=None):
"""
Returns the bolometric luminosity of a source in units of erg/s.
For accreting black holes, the bolometric luminosity will increase
with time, hence the optional 't' and 'M' arguments.
"""
if not self.SourceOn(t):
return 0.0
Mnow = self.Mass(t)
if M is not None:
Mnow = M
return self.epsilon * 4.0 * np.pi * G * Mnow * g_per_msun * m_p \
* c / sigma_T
def Mass(self, t):
"""
Compute black hole mass after t (seconds) have elapsed. Relies on
initial mass self.M, and (constant) radiaitive efficiency self.epsilon.
"""
if self.variable:
nlifetimes = int(t / (self.tau + self.toff))
dtacc = nlifetimes * self.tau
M0 = self.M0 * np.exp(((1.0 - self.epsilon) / self.epsilon) * dtacc / t_edd)
dt = t - nlifetimes * (self.tau + self.toff)
else:
M0 = self.M0
dt = t
return M0 * np.exp(((1.0 - self.epsilon) / self.epsilon) * dt / t_edd)
def Age(self, M):
"""
Compute age of black hole based on current time, current mass, and initial mass.
"""
return np.log(M / self.M0) * (self.epsilon / (1. - self.epsilon)) * t_edd
| StarcoderdataPython |
182089 | <reponame>coogger/coogger<filename>apps/cooggerapp/models/userextra.py
from django.contrib.auth.models import User
from django.db import models
from django.utils.translation import gettext as _
from apps.cooggerapp.choices import FOLLOW, TITLES, make_choices
class OtherAddressesOfUsers(models.Model):
"maybe ManyToManyField in UserProfile"
choices = models.CharField(
blank=True,
null=True,
max_length=15,
choices=make_choices(FOLLOW),
verbose_name=_("website"),
)
address = models.CharField(
blank=True,
null=True,
max_length=50,
verbose_name=_("write address / username"),
)
def __str__(self):
return f"{self.choices} - {self.address}"
class UserProfile(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE)
about = models.TextField(
help_text=_(
"Write a long article about yourself, see; /u/@your_username/about/"
),
verbose_name=_("About Yourself"),
blank=True,
null=True,
)
bio = models.CharField(
help_text=_(
"Write something short about yourself, this will appear in your profile."
),
max_length=260,
blank=True,
null=True,
)
address = models.ManyToManyField(OtherAddressesOfUsers, blank=True)
email_permission = models.BooleanField(
help_text=_("Allow email notifications."), default=True
)
title = models.CharField(
max_length=30,
default="user",
choices=make_choices(TITLES),
verbose_name=_("title"),
help_text=_("Title"),
)
# company = TODO
def __str__(self):
return str(self.user)
| StarcoderdataPython |
1692338 | <filename>python/batch-compute-with-step-functions/workshop/construct/cicdpipeline/cicd_web.py<gh_stars>0
from aws_cdk import (
core,
aws_iam as _iam,
aws_codepipeline as _codepipeline,
aws_codepipeline_actions as _codepipeline_actions,
aws_codecommit as _codecommit,
aws_codebuild as _codebuild
)
class CICDWeb(core.Construct):
def __init__(self, scope: core.Construct, id: str,UserName="default",Repo="default",WebService="default",**kwargs):
super().__init__(scope, id, **kwargs)
self.My_CodeBuild_Role = _iam.Role(self, 'CodeBuildRole-Web-' + UserName,
assumed_by=_iam.CompositePrincipal(
_iam.ServicePrincipal('ec2.amazonaws.com'),
_iam.ServicePrincipal('codebuild.amazonaws.com')
)
)
for repo in Repo.getRepositoriesList():
Repo.getRepositories(repo).grant_pull_push(self.My_CodeBuild_Role)
self.My_CodeCommit_Web = _codecommit.Repository(self,
"CodeCommit-Web-" + UserName,
repository_name="Workshop-Web-" + UserName,
description="CodeCommit for Web Project,Owner:" + UserName
)
self.My_CodeBuild_Web = _codebuild.PipelineProject(self,
"CodeBuild-Web-" + UserName,
project_name="CodeBuild-Web" + UserName,
role=self.My_CodeBuild_Role,
environment=_codebuild.BuildEnvironment(
build_image=_codebuild.LinuxBuildImage.STANDARD_2_0,
privileged=True
)
)
self.CodeCommit_Web_Source = _codepipeline.Artifact("CodeCommit_Web_Source-" + UserName)
self.EcsImage_Web_Source = _codepipeline.Artifact('EcsImage_Web_Source-' + UserName)
self.FargateImage_Web_Source = _codepipeline.Artifact('FargateImage_Web_Source-' + UserName)
self.My_CodePipeline_Web = _codepipeline.Pipeline(self,
"CodePipeline-Web-" + UserName,
stages=[
_codepipeline.StageProps(
stage_name="Source",
actions=[
_codepipeline_actions.CodeCommitSourceAction(
action_name="CodeCommit_Web_Source",
repository=self.My_CodeCommit_Web,
branch="master",
output=self.CodeCommit_Web_Source
)
]
),
_codepipeline.StageProps(
stage_name="Build",
actions=[
_codepipeline_actions.CodeBuildAction(
action_name="CodeCommit_Web_Build",
project=self.My_CodeBuild_Web,
input=self.CodeCommit_Web_Source,
outputs=[self.FargateImage_Web_Source]
)
]
),
_codepipeline.StageProps(
stage_name="Deploy",
actions = [
_codepipeline_actions.EcsDeployAction(
action_name='CodeDeploy_Web_Deploy',
service=WebService.getFargateService("WebApplicationService"),
input=self.FargateImage_Web_Source
)
]
)
]
)
core.CfnOutput(self,
"CodeCommit For WebApplication",
value = self.My_CodeCommit_Web.repository_clone_url_http
) | StarcoderdataPython |
30074 | <reponame>mariocesar/boot.py<filename>setup.py
#!/usr/bin/env python3
import sys
from setuptools import find_packages, setup
if sys.version_info < (3, 6):
sys.exit('Python 3.6 is the minimum required version')
description, long_description = (
open('README.rst', 'rt').read().split('\n\n', 1))
setup(
name='boot.py',
author='<NAME>',
author_email='<EMAIL>',
version='0.16',
url='https://github.com/mariocesar/boot.py',
description=description,
long_description=f'\n{long_description}',
package_dir={'': 'src'},
packages=find_packages('src'),
python_requires='>=3.6',
setup_requires=['pytest-runner'],
tests_require=['pytest', 'pytest-cov'],
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'License :: OSI Approved :: MIT License',
'Topic :: Software Development :: Libraries :: Python Modules',
],
)
| StarcoderdataPython |
193455 | <gh_stars>1-10
#
# Copyright (c) 2014-2015 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
# vim: tabstop=4 shiftwidth=4 softtabstop=4
import logging
from django.urls import reverse # noqa
from django.utils.translation import ugettext_lazy as _
from horizon import tables
from starlingx_dashboard import api as stx_api
LOG = logging.getLogger(__name__)
class EditDevice(tables.LinkAction):
name = "update"
verbose_name = _("Edit Device")
url = "horizon:admin:inventory:editdevice"
classes = ("ajax-modal", "btn-edit")
def get_link_url(self, device=None):
host_id = self.table.kwargs['host_id']
return reverse(self.url, args=(host_id, device.uuid))
def allowed(self, request, datum):
host = self.table.kwargs['host']
return (host._administrative == 'locked' and
stx_api.sysinv.SUBFUNCTIONS_WORKER in host.subfunctions)
def get_viewdevice_link_url(device):
return reverse("horizon:admin:inventory:viewdevice",
args=(device.host_id, device.uuid))
class DevicesTable(tables.DataTable):
"""Devices Table per host under Host Tab"""
name = tables.Column('name',
verbose_name=_('Name'),
link=get_viewdevice_link_url)
address = tables.Column('pciaddr',
verbose_name=_('Address'))
device_id = tables.Column('pdevice_id',
verbose_name=_('Device Id'))
device_name = tables.Column('pdevice',
verbose_name=_('Device Name'))
numa_node = tables.Column('numa_node',
verbose_name=_('Numa Node'))
enabled = tables.Column('enabled',
verbose_name=_('Enabled'))
def get_object_id(self, datum):
return str(datum.uuid)
def get_object_display(self, datum):
return datum.name
class Meta(object):
name = "devices"
verbose_name = _("Devices")
multi_select = False
row_actions = (EditDevice,)
class UsageTable(tables.DataTable):
"""Detail usage table for a device under Device Usage tab"""
host = tables.Column('host',
verbose_name=_('Host'))
pci_pfs_configured = tables.Column('pci_pfs_configured',
verbose_name=_('PFs configured'))
pci_pfs_used = tables.Column('pci_pfs_configured',
verbose_name=_('PFs used'))
pci_vfs_configured = tables.Column('pci_vfs_configured',
verbose_name=_('VFs configured'))
pci_vfs_used = tables.Column('pci_vfs_used',
verbose_name=_('VFs used'))
def get_object_id(self, datum):
return str(datum.id)
def get_object_display(self, datum):
return datum.host
class Meta(object):
name = "usage"
verbose_name = _("Usage")
multi_select = False
def get_viewusage_link_url(usage):
return reverse("horizon:admin:inventory:viewusage",
args=(usage.device_id,))
class DeviceUsageTable(tables.DataTable):
"""Device Usage table for all devices (i.e Device Usage tab)"""
device_name = tables.Column('device_name',
link=get_viewusage_link_url,
verbose_name=_('PCI Alias'))
description = tables.Column('description', verbose_name=_('Description'))
device_id = tables.Column('device_id',
verbose_name=_('Device Id'))
vendor_id = tables.Column('vendor_id',
verbose_name=_('Vendor Id'))
class_id = tables.Column('class_id',
verbose_name=_('Class Id'))
pci_pfs_configured = tables.Column('pci_pfs_configured',
verbose_name=_("PFs configured"))
pci_pfs_used = tables.Column('pci_pfs_used',
verbose_name=_("PFs used"))
pci_vfs_configured = tables.Column('pci_vfs_configured',
verbose_name=_("VFs configured"))
pci_vfs_used = tables.Column('pci_vfs_used',
verbose_name=_("VFs used"))
def get_object_id(self, datum):
return str(datum.device_id)
def get_object_display(self, datum):
return datum.device_name
class Meta(object):
name = "deviceusage"
verbose_name = _("Device Usage")
| StarcoderdataPython |
3216177 | # -*- coding:utf-8 -*-
import copy
import json
import logging
import os
import sys
from io import open
logger = logging.getLogger(__name__)
CONFIG_NAME = "config.json"
class PretrainedConfig(object):
pretrained_config_archive_map = {}
def __init__(self, **kwargs):
pass
def save_pretrained(self, save_directory):
""" Save a configuration object to the directory `save_directory`, so that it
can be re-loaded using the :func:`~transformers.PretrainedConfig.from_pretrained` class method.
"""
assert os.path.isdir(
save_directory), "Saving path should be a directory where the model and configuration can be saved"
# If we save using the predefined names, we can load using `from_pretrained`
output_config_file = os.path.join(save_directory, CONFIG_NAME)
self.to_json_file(output_config_file)
logger.info("Configuration saved in {}".format(output_config_file))
@classmethod
def from_pretrained(cls, pretrained_path, **kwargs):
json_file = os.path.join(pretrained_path)
# Load config
config = cls.from_json_file(json_file)
# Update config with kwargs if needed
for key, value in kwargs.items():
setattr(config, key, value)
logger.info("Model config %s", config)
return config
@classmethod
def from_dict(cls, json_object):
"""Constructs a `Config` from a Python dictionary of parameters."""
config = cls(vocab_size_or_config_json_file=-1)
for key, value in json_object.items():
setattr(config, key, value)
return config
@classmethod
def from_json_file(cls, json_file):
"""Constructs a `BertConfig` from a json file of parameters."""
with open(json_file, "r", encoding='utf-8') as reader:
text = reader.read()
return cls.from_dict(json.loads(text))
def __eq__(self, other):
return self.__dict__ == other.__dict__
def __repr__(self):
return str(self.to_json_string())
def to_dict(self):
"""Serializes this instance to a Python dictionary."""
output = copy.deepcopy(self.__dict__)
return output
def to_json_string(self):
"""Serializes this instance to a JSON string."""
return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n"
def to_json_file(self, json_file_path):
""" Save this instance to a json file."""
with open(json_file_path, "w", encoding='utf-8') as writer:
writer.write(self.to_json_string())
class BertConfig(PretrainedConfig):
r"""
:class:`~transformers.BertConfig` is the configuration class to store the configuration of a
`BertModel`.
Arguments:
vocab_size_or_config_json_file: Vocabulary size of `inputs_ids` in `BertModel`.
hidden_size: Size of the encoder layers and the pooler layer.
num_hidden_layers: Number of hidden layers in the Transformer encoder.
num_attention_heads: Number of attention heads for each attention layer in
the Transformer encoder.
intermediate_size: The size of the "intermediate" (i.e., feed-forward)
layer in the Transformer encoder.
hidden_act: The non-linear activation function (function or string) in the
encoder and pooler. If string, "gelu", "relu", "swish" and "gelu_new" are supported.
hidden_dropout_prob: The dropout probabilitiy for all fully connected
layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob: The dropout ratio for the attention
probabilities.
max_position_embeddings: The maximum sequence length that this model might
ever be used with. Typically set this to something large just in case
(e.g., 512 or 1024 or 2048).
type_vocab_size: The vocabulary size of the `token_type_ids` passed into
`BertModel`.
initializer_range: The sttdev of the truncated_normal_initializer for
initializing all weight matrices.
layer_norm_eps: The epsilon used by LayerNorm.
"""
def __init__(self,
vocab_size_or_config_json_file=21128,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=2,
initializer_range=0.02,
layer_norm_eps=1e-12,
**kwargs):
super(BertConfig, self).__init__(**kwargs)
if isinstance(vocab_size_or_config_json_file, str) or (sys.version_info[0] == 2
and isinstance(vocab_size_or_config_json_file, unicode)):
with open(vocab_size_or_config_json_file, "r", encoding='utf-8') as reader:
json_config = json.loads(reader.read())
for key, value in json_config.items():
self.__dict__[key] = value
elif isinstance(vocab_size_or_config_json_file, int):
self.vocab_size = vocab_size_or_config_json_file
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps
else:
raise ValueError("First argument must be either a vocabulary size (int)"
" or the path to a pretrained model config file (str)")
class DistillBertConfig(PretrainedConfig):
r"""
Arguments:
vocab_size_or_config_json_file: Vocabulary size of `inputs_ids` in `BertModel`.
hidden_size: Size of the encoder layers and the pooler layer.
num_hidden_layers: Number of hidden layers in the Transformer encoder.
num_attention_heads: Number of attention heads for each attention layer in
the Transformer encoder.
intermediate_size: The size of the "intermediate" (i.e., feed-forward)
layer in the Transformer encoder.
hidden_act: The non-linear activation function (function or string) in the
encoder and pooler. If string, "gelu", "relu", "swish" and "gelu_new" are supported.
hidden_dropout_prob: The dropout probabilitiy for all fully connected
layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob: The dropout ratio for the attention
probabilities.
max_position_embeddings: The maximum sequence length that this model might
ever be used with. Typically set this to something large just in case
(e.g., 512 or 1024 or 2048).
initializer_range: The sttdev of the truncated_normal_initializer for
initializing all weight matrices.
layer_norm_eps: The epsilon used by LayerNorm.
"""
def __init__(self,
vocab_size_or_config_json_file=21128,
hidden_size=768,
num_hidden_layers=6,
num_attention_heads=12,
intermediate_size=3072,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
initializer_range=0.02,
layer_norm_eps=1e-12,
sequence_classif_dropout_prob=0.2,
**kwargs):
super(DistillBertConfig, self).__init__(**kwargs)
if isinstance(vocab_size_or_config_json_file, str) or (sys.version_info[0] == 2
and isinstance(vocab_size_or_config_json_file, unicode)):
with open(vocab_size_or_config_json_file, "r", encoding='utf-8') as reader:
json_config = json.loads(reader.read())
for key, value in json_config.items():
self.__dict__[key] = value
elif isinstance(vocab_size_or_config_json_file, int):
self.vocab_size = vocab_size_or_config_json_file
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps
self.sequence_classif_dropout_prob = sequence_classif_dropout_prob
else:
raise ValueError("First argument must be either a vocabulary size (int)"
" or the path to a pretrained model config file (str)")
class ALBertConfig(PretrainedConfig):
r"""Constructs AlbertConfig.
Args:
vocab_size_or_config_json_file: Vocabulary size of `inputs_ids` in `BertModel`.
hidden_size: Size of the encoder layers and the pooler layer.
num_hidden_layers: Number of hidden layers in the Transformer encoder.
num_hidden_groups: Number of group for the hidden layers, parameters in
the same group are shared.
num_attention_heads: Number of attention heads for each attention layer in
the Transformer encoder.
intermediate_size: The size of the "intermediate" (i.e., feed-forward)
layer in the Transformer encoder.
inner_group_num: int, number of inner repetition of attention and ffn.
down_scale_factor: float, the scale to apply
hidden_act: The non-linear activation function (function or string) in the
encoder and pooler. If string, "gelu", "relu", "swish" and "gelu_new" are supported.
hidden_dropout_prob: The dropout probabilitiy for all fully connected
layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob: The dropout ratio for the attention
probabilities.
max_position_embeddings: The maximum sequence length that this model might
ever be used with. Typically set this to something large just in case
(e.g., 512 or 1024 or 2048).
type_vocab_size: The vocabulary size of the `token_type_ids` passed into
`BertModel`.
initializer_range: The sttdev of the truncated_normal_initializer for
initializing all weight matrices.
layer_norm_eps: The epsilon used by LayerNorm.
"""
def __init__(self,
vocab_size_or_config_json_file=21128,
embedding_size=128,
hidden_size=768,
num_hidden_layers=12,
num_hidden_groups=1,
num_attention_heads=64,
intermediate_size=16384,
inner_group_num=1,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=2,
initializer_range=0.02,
layer_norm_eps=1e-12,
**kwargs):
super(ALBertConfig, self).__init__(**kwargs)
if isinstance(vocab_size_or_config_json_file, str) or (sys.version_info[0] == 2
and isinstance(vocab_size_or_config_json_file, unicode)):
with open(vocab_size_or_config_json_file, "r", encoding='utf-8') as reader:
json_config = json.loads(reader.read())
for key, value in json_config.items():
self.__dict__[key] = value
elif isinstance(vocab_size_or_config_json_file, int):
self.vocab_size = vocab_size_or_config_json_file
self.embedding_size = embedding_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_hidden_groups = num_hidden_groups
self.num_attention_heads = num_attention_heads
self.inner_group_num = inner_group_num
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps
else:
raise ValueError("First argument must be either a vocabulary size (int)"
" or the path to a pretrained model config file (str)")
| StarcoderdataPython |
1782012 | <filename>py/invoke/gen-py/map_service/MapService.py<gh_stars>0
#
# Autogenerated by Thrift Compiler (0.10.0)
#
# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
#
# options string: py
#
from thrift.Thrift import TType, TMessageType, TFrozenDict, TException, TApplicationException
from thrift.protocol.TProtocol import TProtocolException
import sys
import logging
from .ttypes import *
from thrift.Thrift import TProcessor
from thrift.transport import TTransport
class Iface(object):
def ping(self):
pass
def pointToPointRoute(self, request):
"""
Parameters:
- request
"""
pass
def batchPointToPointRoute(self, requests):
"""
Parameters:
- requests
"""
pass
class Client(Iface):
def __init__(self, iprot, oprot=None):
self._iprot = self._oprot = iprot
if oprot is not None:
self._oprot = oprot
self._seqid = 0
def ping(self):
self.send_ping()
return self.recv_ping()
def send_ping(self):
self._oprot.writeMessageBegin('ping', TMessageType.CALL, self._seqid)
args = ping_args()
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_ping(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = ping_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
raise TApplicationException(TApplicationException.MISSING_RESULT, "ping failed: unknown result")
def pointToPointRoute(self, request):
"""
Parameters:
- request
"""
self.send_pointToPointRoute(request)
return self.recv_pointToPointRoute()
def send_pointToPointRoute(self, request):
self._oprot.writeMessageBegin('pointToPointRoute', TMessageType.CALL, self._seqid)
args = pointToPointRoute_args()
args.request = request
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_pointToPointRoute(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = pointToPointRoute_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
raise TApplicationException(TApplicationException.MISSING_RESULT, "pointToPointRoute failed: unknown result")
def batchPointToPointRoute(self, requests):
"""
Parameters:
- requests
"""
self.send_batchPointToPointRoute(requests)
return self.recv_batchPointToPointRoute()
def send_batchPointToPointRoute(self, requests):
self._oprot.writeMessageBegin('batchPointToPointRoute', TMessageType.CALL, self._seqid)
args = batchPointToPointRoute_args()
args.requests = requests
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_batchPointToPointRoute(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = batchPointToPointRoute_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
raise TApplicationException(TApplicationException.MISSING_RESULT, "batchPointToPointRoute failed: unknown result")
class Processor(Iface, TProcessor):
def __init__(self, handler):
self._handler = handler
self._processMap = {}
self._processMap["ping"] = Processor.process_ping
self._processMap["pointToPointRoute"] = Processor.process_pointToPointRoute
self._processMap["batchPointToPointRoute"] = Processor.process_batchPointToPointRoute
def process(self, iprot, oprot):
(name, type, seqid) = iprot.readMessageBegin()
if name not in self._processMap:
iprot.skip(TType.STRUCT)
iprot.readMessageEnd()
x = TApplicationException(TApplicationException.UNKNOWN_METHOD, 'Unknown function %s' % (name))
oprot.writeMessageBegin(name, TMessageType.EXCEPTION, seqid)
x.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
return
else:
self._processMap[name](self, seqid, iprot, oprot)
return True
def process_ping(self, seqid, iprot, oprot):
args = ping_args()
args.read(iprot)
iprot.readMessageEnd()
result = ping_result()
try:
result.success = self._handler.ping()
msg_type = TMessageType.REPLY
except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
raise
except Exception as ex:
msg_type = TMessageType.EXCEPTION
logging.exception(ex)
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("ping", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_pointToPointRoute(self, seqid, iprot, oprot):
args = pointToPointRoute_args()
args.read(iprot)
iprot.readMessageEnd()
result = pointToPointRoute_result()
try:
result.success = self._handler.pointToPointRoute(args.request)
msg_type = TMessageType.REPLY
except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
raise
except Exception as ex:
msg_type = TMessageType.EXCEPTION
logging.exception(ex)
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("pointToPointRoute", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_batchPointToPointRoute(self, seqid, iprot, oprot):
args = batchPointToPointRoute_args()
args.read(iprot)
iprot.readMessageEnd()
result = batchPointToPointRoute_result()
try:
result.success = self._handler.batchPointToPointRoute(args.requests)
msg_type = TMessageType.REPLY
except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
raise
except Exception as ex:
msg_type = TMessageType.EXCEPTION
logging.exception(ex)
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("batchPointToPointRoute", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
# HELPER FUNCTIONS AND STRUCTURES
class ping_args(object):
thrift_spec = (
)
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('ping_args')
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class ping_result(object):
"""
Attributes:
- success
"""
thrift_spec = (
(0, TType.I32, 'success', None, None, ), # 0
)
def __init__(self, success=None,):
self.success = success
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.I32:
self.success = iprot.readI32()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('ping_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.I32, 0)
oprot.writeI32(self.success)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class pointToPointRoute_args(object):
"""
Attributes:
- request
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'request', (PointToPointRequest, PointToPointRequest.thrift_spec), None, ), # 1
)
def __init__(self, request=None,):
self.request = request
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.request = PointToPointRequest()
self.request.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('pointToPointRoute_args')
if self.request is not None:
oprot.writeFieldBegin('request', TType.STRUCT, 1)
self.request.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class pointToPointRoute_result(object):
"""
Attributes:
- success
"""
thrift_spec = (
(0, TType.STRUCT, 'success', (PointToPointResponse, PointToPointResponse.thrift_spec), None, ), # 0
)
def __init__(self, success=None,):
self.success = success
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.STRUCT:
self.success = PointToPointResponse()
self.success.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('pointToPointRoute_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.STRUCT, 0)
self.success.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class batchPointToPointRoute_args(object):
"""
Attributes:
- requests
"""
thrift_spec = (
None, # 0
(1, TType.LIST, 'requests', (TType.STRUCT, (PointToPointRequest, PointToPointRequest.thrift_spec), False), None, ), # 1
)
def __init__(self, requests=None,):
self.requests = requests
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.LIST:
self.requests = []
(_etype17, _size14) = iprot.readListBegin()
for _i18 in range(_size14):
_elem19 = PointToPointRequest()
_elem19.read(iprot)
self.requests.append(_elem19)
iprot.readListEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('batchPointToPointRoute_args')
if self.requests is not None:
oprot.writeFieldBegin('requests', TType.LIST, 1)
oprot.writeListBegin(TType.STRUCT, len(self.requests))
for iter20 in self.requests:
iter20.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class batchPointToPointRoute_result(object):
"""
Attributes:
- success
"""
thrift_spec = (
(0, TType.LIST, 'success', (TType.STRUCT, (PointToPointResponse, PointToPointResponse.thrift_spec), False), None, ), # 0
)
def __init__(self, success=None,):
self.success = success
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.LIST:
self.success = []
(_etype24, _size21) = iprot.readListBegin()
for _i25 in range(_size21):
_elem26 = PointToPointResponse()
_elem26.read(iprot)
self.success.append(_elem26)
iprot.readListEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('batchPointToPointRoute_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.LIST, 0)
oprot.writeListBegin(TType.STRUCT, len(self.success))
for iter27 in self.success:
iter27.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
| StarcoderdataPython |
1795423 | <filename>xcv/WIP/hud.py
"""This is for drawing game/debug info on the OpenCV output frame.
See gui.py for displaying information within the GUI Window.
"""
from collections import namedtuple
import cv2
# ========================================
# Color Stuff
Color = namedtuple('Color', ['r', 'g', 'b'])
GREEN = Color(0, 255, 0)
RED = Color(0, 0, 255)
BLUE = Color(255, 75, 0)
WHITE = Color(255, 255, 255)
BLACK = Color(0, 0, 0)
YELLOW = Color(0, 255, 255)
TEAL = Color(255, 255, 0)
PINK = Color(255, 0, 255)
ORANGE = Color(0, 130, 255)
GRAY1 = Color(20, 20, 20)
GRAY2 = Color(50, 50, 50)
GRAY4 = Color(200, 200, 200)
font = cv2.FONT_HERSHEY_SIMPLEX
elapsedTime = 666
# HUD functions
def draw_HUD_FPS(frame, fps: int=0) -> None:
if fps is not 0:
cv2.putText(frame, "FPS", (5, 15), font, 0.25, GRAY4, 1)
cv2.putText(frame, str(fps), (25, 15), font, 0.5, GRAY4, 1)
def draw_HUD_elapsedTime(frame) -> None:
if elapsedTime is not 0:
cv2.putText(frame, "Elapsed", (530, 20), font, 0.25, GRAY2, 1)
cv2.putText(frame, str(elapsedTime), (530, 35), font, 0.5, GRAY2, 1)
def draw_HUD_elapsedGameTime(frame) -> None:
if elapsedTime is not 0:
cv2.putText(frame, "Game", (410, 20), font, 0.25, GRAY2, 1)
cv2.putText(frame, str(elapsedTime), (410, 35), font, 0.5, GREEN, 1)
def draw_HUD_HomeAway(frame) -> None:
if FifaFlags.HomeAway == 1:
cv2.putText(frame, "Home", (275, 25), font, 0.5, GREEN, 1)
elif FifaFlags.HomeAway == 2:
cv2.putText(frame, "Away", (275, 25), font, 0.5, GREEN, 1)
def draw_HUD_DefendingSide(frame):
# # Display the detected game state
# cv2.putText(frame, "Game State", (10, 435), font, 0.5, GRAY2, 1)
# cv2.putText(frame, FifaFlags.gameStates[FifaFlags.State], (10, 470), font, 1, TEAL, 2)
# # Defense
# if FifaFlags.Defending == 1:
# cv2.putText(frame, "Defend Left", (275, 50), font, 0.5, GREEN, 1)
# elif FifaFlags.Defending == 2:
# cv2.putText(frame, "Defend Right", (275, 50), font, 0.5, GREEN, 1)
return
# # ===========================================================================
# # Controller
# # ===========================================================================
def draw_HUD_controller(frame, press:str=None) -> None:
# A
if press == 'a':
cv2.putText(frame, "A", (480, 470), font, 0.5, GREEN, 2)
cv2.circle(frame, (485, 465), 9, GREEN, 2)
else:
cv2.putText(frame, "A", (480, 470), font, 0.5, GRAY2, 2)
cv2.circle(frame, (485, 465), 9, GRAY2, 1)
# B
if press == 'b':
cv2.putText(frame, "B", (495, 455), font, 0.5, RED, 2)
cv2.circle(frame, (500, 450), 9, RED, 2)
else:
cv2.putText(frame, "B", (495, 455), font, 0.5, GRAY2, 2)
cv2.circle(frame, (500, 450), 9, GRAY2, 1)
# X
if press == 'x':
cv2.putText(frame, "X", (465, 455), font, 0.5, BLUE, 2)
cv2.circle(frame, (470, 450), 9, BLUE, 2)
else:
cv2.putText(frame, "X", (465, 455), font, 0.5, GRAY2, 2)
cv2.circle(frame, (470, 450), 9, GRAY2, 1)
# Y
if press == 'y':
cv2.putText(frame, "Y", (480, 440), font, 0.5, YELLOW, 2)
cv2.circle(frame, (485, 435), 9, YELLOW, 1)
else:
cv2.putText(frame, "Y", (480, 440), font, 0.5, GRAY2, 2)
cv2.circle(frame, (485, 435), 9, GRAY2, 1)
cv2.putText(frame, "Xbox", (270, 435), font, 0.5, GRAY2, 1)
# # D-Pad Display
# D Up
if press == '8':
cv2.rectangle(frame, (390, 440), (380, 450), YELLOW, 1)
else:
cv2.rectangle(frame, (390, 440), (380, 450), GRAY2, 1)
# D L
if press == '4':
cv2.rectangle(frame, (370, 450), (380, 460), YELLOW, 1)
else:
cv2.rectangle(frame, (370, 450), (380, 460), GRAY2, 1)
# D Dn
if press == '2':
cv2.rectangle(frame, (390, 460), (380, 470), YELLOW, 1)
else:
cv2.rectangle(frame, (390, 460), (380, 470), GRAY2, 1)
# D R
if press == '6':
cv2.rectangle(frame, (390, 450), (400, 460), YELLOW, 1)
else:
cv2.rectangle(frame, (390, 450), (400, 460), GRAY2, 1)
# LS Display
cv2.circle(frame, (350, 440), 1, YELLOW, 1)
cv2.circle(frame, (350, 440), 15, GRAY2, 1)
# RS Display
cv2.circle(frame, (440, 460), 1, YELLOW, 1)
cv2.circle(frame, (440, 460), 15, GRAY2, 1)
cv2.putText(frame, "Select", (270, 475), font, 0.5, GRAY2, 1)
if press == '3':
cv2.putText(frame, "Start", (270, 455), font, 0.5, YELLOW, 1)
else:
cv2.putText(frame, "Start", (270, 455), font, 0.5, GRAY2, 1)
| StarcoderdataPython |
1757664 | #!/usr/bin/env python
# Standard Python libraries.
# Third party Python libraries.
import requests
from requests.packages.urllib3.exceptions import InsecureRequestWarning
# Custom Python libraries.
from . import logger
# Disable warning: "InsecureRequestWarning: Unverified HTTPS request is being made.
# Adding certificate verification is strongly advised"
# https://stackoverflow.com/questions/27981545/suppress-insecurerequestwarning-unverified-https-request-is-being-made-in-pytho
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
def check_for_scan_jobs(config_data):
"""Check for new scans through the API."""
# Build URL to pull new scan jobs. Server determines jobs based off agent (user) making request.
master_address = config_data["master_address"]
master_port = config_data["master_port"]
api_token = config_data["api_token"]
url = f"{master_address}:{master_port}/api/scheduled_scans"
logger.ROOT_LOGGER.info("check_for_scans URL: {}".format(url))
# Update User-Agent and add API token.
# fmt:off
headers = {
"user-agent": config_data["scan_agent"],
"Authorization": f"Token {api_token}",
}
# fmt:on
try:
# Make the HTTP GET request.
response = requests.get(url, headers=headers, verify=False, timeout=15)
# Return response as JSON if request is successful.
if response.status_code == 200:
return response.json()
else:
logger.ROOT_LOGGER.error(f"Could not access {url}. HTTP status code: {response.status_code}")
return None
except Exception as e:
logger.ROOT_LOGGER.error(f"api.check_for_scan_jobs function exception: {e}")
def update_scan_information(config_data, scan_job, update_info):
"""Update scan information using a PATCH API request."""
master_address = config_data["master_address"]
master_port = config_data["master_port"]
api_token = config_data["api_token"]
scan_agent = config_data["scan_agent"]
scan_job_id = scan_job["id"]
# Build URL to update scan job.
url = f"{master_address}:{master_port}/api/scheduled_scans/{scan_job_id}"
logger.ROOT_LOGGER.info(f"update_scan_information URL: {url}")
# Update the User-Agent, API token, and Content-Type.
# fmt:off
headers = {
"user-agent": scan_agent,
"Authorization": f"Token {api_token}",
"Content-Type": "application/json",
}
# fmt:on
# Make the HTTP PATCH request.
response = requests.patch(url, headers=headers, verify=False, timeout=15, json=update_info)
if response.status_code == 200:
logger.ROOT_LOGGER.info(
f"Successfully updated scan information for scan ID {scan_job_id} with data {update_info}"
)
return None
else:
logger.ROOT_LOGGER.error(
f"Could not access {url} or failed to update scan ID {scan_job_id}. HTTP status code: {response.status_code}"
)
logger.ROOT_LOGGER.error(f"Response content: {response.content}".format())
return None
| StarcoderdataPython |
15908 | r"""
This is the base module for all other objects of the package.
+ `LaTeX` returns a LaTeX string out of an `Irene` object.
+ `base` is the parent of all `Irene` objects.
"""
def LaTeX(obj):
r"""
Returns LaTeX representation of Irene's objects.
"""
from sympy.core.core import all_classes
from Irene import SDPRelaxations, SDRelaxSol, Mom
inst = isinstance(obj, SDPRelaxations) or isinstance(
obj, SDRelaxSol) or isinstance(obj, Mom)
if inst:
return obj.__latex__()
elif isinstance(obj, tuple(all_classes)):
from sympy import latex
return latex(obj)
class base(object):
r"""
All the modules in `Irene` extend this class which perform some common
tasks such as checking existence of certain softwares.
"""
def __init__(self):
from sys import platform
self.os = platform
if self.os == 'win32':
import os
BASE = os.sep.join(os.path.dirname(os.path.realpath(__file__)).split(os.sep)) + os.sep
self.Path = dict(csdp=BASE+"csdp.exe", sdpa=BASE+"sdpa.exe")
else:
self.Path = dict(csdp="csdp", sdpa="sdpa")
def which(self, program):
r"""
Check the availability of the `program` system-wide.
Returns the path of the program if exists and returns
'None' otherwise.
"""
import os
def is_exe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
fpath, fname = os.path.split(program)
if fpath:
if is_exe(program):
return program
else:
for path in os.environ["PATH"].split(os.pathsep):
path = path.strip('"')
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return None
def AvailableSDPSolvers(self):
r"""
find the existing sdp solvers.
"""
existsing = []
# CVXOPT
try:
import cvxopt
existsing.append('CVXOPT')
except ImportError:
pass
if self.os == 'win32':
from os.path import isfile
# DSDP
if ('dsdp' in self.Path):
if isfile(self.Path['dsdp']):
existsing.append('DSDP')
# SDPA
if ('sdpa' in self.Path):
if isfile(self.Path['sdpa']):
existsing.append('SDPA')
if ('csdp' in self.Path):
if isfile(self.Path['csdp']):
existsing.append('CSDP')
else:
# DSDP
if self.which('dsdp5') is not None:
existsing.append('DSDP')
# SDPA
if self.which('sdpa') is not None:
existsing.append('SDPA')
# CSDP
if self.which('csdp') is not None:
existsing.append('CSDP')
return existsing
| StarcoderdataPython |
3379252 | <gh_stars>0
import unittest
from models import Newssources
class SourcesTest(unittest.TestCase):
'''
test class to test behaviour of news article class
'''
def setUp(self):
'''
set up method that will rub
before every test
'''
self.new_source = Newssource('The Wall Street Journal',
'<NAME>',
'Trump to Promote U.S. as Open for Business in Davos Speech',
'"<NAME> is expected to promote the U.S. as “open for business,” while highlighting the nation’s commitment to global trade in an address to foreign leaders and business executives at the World Economic Forum.'
,'https://www.wsj.com/articles/trump-to-promote-u-s-as-open-for-business-in-davos-speech-1516962420','https://si.wsj.net/public/resources/images/BN-XE643_3nQuF_TOP_20180126053039.jpg'
,'2018-01-26T11:04:48Z')
def test_instance(self):
self.assertTrue(isinstance(self.new_source,Newssource))
if __name__ == '__main__':
unittest.main() | StarcoderdataPython |
132119 | <filename>holoviews/plotting/mpl/graphs.py<gh_stars>0
import param
import numpy as np
from matplotlib.collections import LineCollection, PolyCollection
from ...core.data import Dataset
from ...core.options import Cycle
from ...core.util import basestring, unique_array, search_indices, max_range
from ..util import process_cmap
from .element import ColorbarPlot
class GraphPlot(ColorbarPlot):
color_index = param.ClassSelector(default=None, class_=(basestring, int),
allow_None=True, doc="""
Index of the dimension from which the color will the drawn""")
edge_color_index = param.ClassSelector(default=None, class_=(basestring, int),
allow_None=True, doc="""
Index of the dimension from which the color will the drawn""")
style_opts = ['edge_alpha', 'edge_color', 'edge_linestyle', 'edge_linewidth',
'node_alpha', 'node_color', 'node_edgecolors', 'node_facecolors',
'node_linewidth', 'node_marker', 'node_size', 'visible', 'cmap',
'edge_cmap']
_style_groups = ['node', 'edge']
filled = False
def _compute_styles(self, element, ranges, style):
elstyle = self.lookup_options(element, 'style')
color = elstyle.kwargs.get('node_color')
cdim = element.nodes.get_dimension(self.color_index)
cmap = elstyle.kwargs.get('cmap', 'tab20')
if cdim:
cs = element.nodes.dimension_values(self.color_index)
# Check if numeric otherwise treat as categorical
if cs.dtype.kind == 'f':
style['c'] = cs
else:
factors = unique_array(cs)
cmap = color if isinstance(color, Cycle) else cmap
if isinstance(cmap, dict):
colors = [cmap.get(f, cmap.get('NaN', {'color': self._default_nan})['color'])
for f in factors]
else:
colors = process_cmap(cmap, len(factors))
cs = search_indices(cs, factors)
style['node_facecolors'] = [colors[v%len(colors)] for v in cs]
style.pop('node_color', None)
if 'c' in style:
self._norm_kwargs(element.nodes, ranges, style, cdim)
elif color:
style['c'] = style.pop('node_color')
style['node_edgecolors'] = style.pop('node_edgecolors', 'none')
edge_cdim = element.get_dimension(self.edge_color_index)
if not edge_cdim:
return style
elstyle = self.lookup_options(element, 'style')
cycle = elstyle.kwargs.get('edge_color')
idx = element.get_dimension_index(edge_cdim)
cvals = element.dimension_values(edge_cdim)
if idx in [0, 1]:
factors = element.nodes.dimension_values(2, expanded=False)
elif idx == 2 and cvals.dtype.kind in 'uif':
factors = None
else:
factors = unique_array(cvals)
if factors is None or (factors.dtype.kind == 'f' and idx not in [0, 1]):
style['edge_array'] = cvals
else:
cvals = search_indices(cvals, factors)
factors = list(factors)
cmap = elstyle.kwargs.get('edge_cmap', 'tab20')
cmap = cycle if isinstance(cycle, Cycle) else cmap
if isinstance(cmap, dict):
colors = [cmap.get(f, cmap.get('NaN', {'color': self._default_nan})['color'])
for f in factors]
else:
colors = process_cmap(cmap, len(factors))
style['edge_colors'] = [colors[v%len(colors)] for v in cvals]
style.pop('edge_color', None)
if 'edge_array' in style:
self._norm_kwargs(element, ranges, style, edge_cdim, 'edge_')
else:
style.pop('edge_cmap', None)
if 'edge_vmin' in style:
style['edge_clim'] = (style.pop('edge_vmin'), style.pop('edge_vmax'))
return style
def get_data(self, element, ranges, style):
xidx, yidx = (1, 0) if self.invert_axes else (0, 1)
pxs, pys = (element.nodes.dimension_values(i) for i in range(2))
dims = element.nodes.dimensions()
self._compute_styles(element, ranges, style)
paths = element._split_edgepaths.split(datatype='array', dimensions=element.edgepaths.kdims)
if self.invert_axes:
paths = [p[:, ::-1] for p in paths]
return {'nodes': (pxs, pys), 'edges': paths}, style, {'dimensions': dims}
def get_extents(self, element, ranges, range_type='combined'):
return super(GraphPlot, self).get_extents(element.nodes, ranges, range_type)
def init_artists(self, ax, plot_args, plot_kwargs):
# Draw edges
color_opts = ['c', 'cmap', 'vmin', 'vmax', 'norm']
groups = [g for g in self._style_groups if g != 'edge']
edge_opts = {k[5:] if 'edge_' in k else k: v
for k, v in plot_kwargs.items()
if not any(k.startswith(p) for p in groups)
and k not in color_opts}
paths = plot_args['edges']
if self.filled:
coll = PolyCollection
if 'colors' in edge_opts:
edge_opts['facecolors'] = edge_opts.pop('colors')
else:
coll = LineCollection
edges = coll(paths, **edge_opts)
ax.add_collection(edges)
# Draw nodes
xs, ys = plot_args['nodes']
groups = [g for g in self._style_groups if g != 'node']
node_opts = {k[5:] if 'node_' in k else k: v
for k, v in plot_kwargs.items()
if not any(k.startswith(p) for p in groups)}
if 'size' in node_opts: node_opts['s'] = node_opts.pop('size')**2
nodes = ax.scatter(xs, ys, **node_opts)
return {'nodes': nodes, 'edges': edges}
def _update_nodes(self, element, data, style):
nodes = self.handles['nodes']
xs, ys = data['nodes']
nodes.set_offsets(np.column_stack([xs, ys]))
cdim = element.nodes.get_dimension(self.color_index)
if cdim and 'c' in style:
nodes.set_clim((style['vmin'], style['vmax']))
nodes.set_array(style['c'])
if 'norm' in style:
nodes.norm = style['norm']
def _update_edges(self, element, data, style):
edges = self.handles['edges']
paths = data['edges']
edges.set_paths(paths)
edges.set_visible(style.get('visible', True))
cdim = element.get_dimension(self.edge_color_index)
if cdim:
if 'edge_array' in style:
edges.set_clim(style['edge_clim'])
edges.set_array(style['edge_array'])
if 'norm' in style:
edges.norm = style['edge_norm']
elif 'edge_colors' in style:
if self.filled:
edges.set_facecolors(style['edge_colors'])
else:
edges.set_edgecolors(style['edge_colors'])
def update_handles(self, key, axis, element, ranges, style):
data, style, axis_kwargs = self.get_data(element, ranges, style)
self._update_nodes(element, data, style)
self._update_edges(element, data, style)
return axis_kwargs
class TriMeshPlot(GraphPlot):
filled = param.Boolean(default=False, doc="""
Whether the triangles should be drawn as filled.""")
style_opts = GraphPlot.style_opts + ['edge_facecolors']
def get_data(self, element, ranges, style):
simplex_dim = element.get_dimension(self.edge_color_index)
vertex_dim = element.nodes.get_dimension(self.edge_color_index)
if not isinstance(self.edge_color_index, int) and vertex_dim and not simplex_dim:
simplices = element.array([0, 1, 2])
z = element.nodes.dimension_values(vertex_dim)
z = z[simplices].mean(axis=1)
element = element.add_dimension(vertex_dim, len(element.vdims), z, vdim=True)
# Ensure the edgepaths for the triangles are generated before plotting
element.edgepaths
return super(TriMeshPlot, self).get_data(element, ranges, style)
class ChordPlot(GraphPlot):
label_index = param.ClassSelector(default=None, class_=(basestring, int),
allow_None=True, doc="""
Index of the dimension from which the node labels will be drawn""")
style_opts = GraphPlot.style_opts + ['text_font_size', 'label_offset']
_style_groups = ['edge', 'node', 'arc']
def get_extents(self, element, ranges, range_type='combined'):
"""
A Chord plot is always drawn on a unit circle.
"""
xdim, ydim = element.nodes.kdims[:2]
if range_type not in ('combined', 'data'):
return xdim.range[0], ydim.range[0], xdim.range[1], ydim.range[1]
rng = 1.1 if element.nodes.get_dimension(self.label_index) is None else 1.4
x0, x1 = max_range([xdim.range, (-rng, rng)])
y0, y1 = max_range([ydim.range, (-rng, rng)])
return (x0, y0, x1, y1)
def get_data(self, element, ranges, style):
data, style, plot_kwargs = super(ChordPlot, self).get_data(element, ranges, style)
if isinstance(style.get('node_facecolors'), list):
angles = element._angles
paths = []
for i in range(len(element.nodes)):
start, end = angles[i:i+2]
vals = np.linspace(start, end, 20)
paths.append(np.column_stack([np.cos(vals), np.sin(vals)]))
data['arcs'] = paths
style['arc_colors'] = style['node_facecolors']
style['arc_linewidth'] = 10
lidx = element.nodes.get_dimension(self.label_index)
if lidx is None:
if self.label_index is not None:
dims = element.nodes.dimensions()[2:]
self.warning("label_index supplied to Chord not found, "
"expected one of %s, got %s." %
(dims, self.label_index))
return data, style, plot_kwargs
nodes = element.nodes
if element.vdims:
values = element.dimension_values(element.vdims[0])
if values.dtype.kind in 'uif':
edges = Dataset(element)[values>0]
nodes = list(np.unique([edges.dimension_values(i) for i in range(2)]))
nodes = element.nodes.select(**{element.nodes.kdims[2].name: nodes})
offset = style.get('label_offset', 1.05)
xs, ys = (nodes.dimension_values(i)*offset for i in range(2))
labels = [lidx.pprint_value(v) for v in nodes.dimension_values(lidx)]
angles = np.rad2deg(np.arctan2(ys, xs))
data['text'] = (xs, ys, labels, angles)
return data, style, plot_kwargs
def init_artists(self, ax, plot_args, plot_kwargs):
artists = {}
if 'arcs' in plot_args:
color_opts = ['c', 'cmap', 'vmin', 'vmax', 'norm']
groups = [g for g in self._style_groups if g != 'arc']
edge_opts = {k[4:] if 'arc_' in k else k: v
for k, v in plot_kwargs.items()
if not any(k.startswith(p) for p in groups)
and k not in color_opts}
paths = plot_args['arcs']
edges = LineCollection(paths, **edge_opts)
ax.add_collection(edges)
artists['arcs'] = edges
artists.update(super(ChordPlot, self).init_artists(ax, plot_args, plot_kwargs))
if 'text' in plot_args:
fontsize = plot_kwargs.get('text_font_size', 8)
labels = []
for (x, y, l, a) in zip(*plot_args['text']):
label = ax.annotate(l, xy=(x, y), xycoords='data', rotation=a,
horizontalalignment='left', fontsize=fontsize,
verticalalignment='center', rotation_mode='anchor')
labels.append(label)
artists['labels'] = labels
return artists
def _update_arcs(self, element, data, style):
edges = self.handles['arcs']
paths = data['arcs']
edges.set_paths(paths)
edges.set_visible(style.get('visible', True))
def _update_labels(self, ax, element, data, style):
labels = self.handles.get('labels', [])
for label in labels:
try:
label.remove()
except:
pass
if 'text' not in data:
self.handles['labels'] = []
return
labels = []
fontsize = style.get('text_font_size', 8)
for (x, y, l, a) in zip(*data['text']):
label = ax.annotate(l, xy=(x, y), xycoords='data', rotation=a,
horizontalalignment='left', fontsize=fontsize,
verticalalignment='center', rotation_mode='anchor')
labels.append(label)
self.handles['labels'] = labels
def update_handles(self, key, axis, element, ranges, style):
data, style, axis_kwargs = self.get_data(element, ranges, style)
self._update_nodes(element, data, style)
self._update_edges(element, data, style)
self._update_arcs(element, data, style)
self._update_labels(axis, element, data, style)
return axis_kwargs
| StarcoderdataPython |
1681897 | import os
import json
import six
from girder.models.collection import Collection
from girder.models.folder import Folder
from girder.models.item import Item
from girder.models.upload import Upload
from girder.models.user import User
from tests import base
def setUpModule():
base.enabledPlugins.append('dicom_viewer')
base.startServer()
global _removeUniqueMetadata
global _extractFileData
from girder.plugins.dicom_viewer import _removeUniqueMetadata, _extractFileData
def tearDownModule():
base.stopServer()
class DicomViewerTest(base.TestCase):
def setUp(self):
base.TestCase.setUp(self)
self.dataDir = os.path.join(
os.environ['GIRDER_TEST_DATA_PREFIX'], 'plugins', 'dicom_viewer')
self.users = [User().createUser(
'usr%s' % num, 'passwd', 'tst', 'usr', '<EMAIL>' % num)
for num in [0, 1]]
def testRemoveUniqueMetadata(self):
dicomMeta = {
'key1': 'value1',
'key2': 'value2',
'key3': 'value3',
'key4': 35,
'key5': 54,
'key6': 'commonVal',
'uniqueKey1': 'commonVal'
}
additionalMeta = {
'key1': 'value1',
'key2': 'value2',
'key3': 'value3',
'key4': 35,
'key5': 54,
'key6': 'uniqueVal',
'uniqueKey2': 'commonVal',
}
commonMeta = {
'key1': 'value1',
'key2': 'value2',
'key3': 'value3',
'key4': 35,
'key5': 54
}
self.assertEqual(_removeUniqueMetadata(dicomMeta, additionalMeta), commonMeta)
def testExtractFileData(self):
dicomFile = {
'_id': '599c4cf3c9c5cb11f1ff5d97',
'assetstoreId': '599c4a19c9c5cb11f1ff5d32',
'creatorId': '5984b9fec9c5cb370447068c',
'exts': ['dcm'],
'itemId': '599c4cf3c9c5cb11f1ff5d96',
'mimeType': 'application/dicom',
'name': '000000.dcm',
'size': 133356
}
dicomMeta = {
'SeriesNumber': 1,
'InstanceNumber': 1,
'SliceLocation': 0
}
result = {
'_id': '599c4cf3c9c5cb11f1ff5d97',
'name': '000000.dcm',
'dicom': {
'SeriesNumber': 1,
'InstanceNumber': 1,
'SliceLocation': 0
}
}
self.assertEqual(_extractFileData(dicomFile, dicomMeta), result)
def testFileProcessHandler(self):
admin, user = self.users
# Create a collection, folder, and item
collection = Collection().createCollection('collection1', admin, public=True)
folder = Folder().createFolder(collection, 'folder1', parentType='collection', public=True)
item = Item().createItem('item1', admin, folder)
# Upload non-DICOM files
self._uploadNonDicomFiles(item, admin)
nonDicomItem = Item().load(item['_id'], force=True)
self.assertIsNone(nonDicomItem.get('dicom'))
# Upload DICOM files
self._uploadDicomFiles(item, admin)
# Check if the 'dicomItem' is well processed
dicomItem = Item().load(item['_id'], force=True)
self.assertIn('dicom', dicomItem)
self.assertHasKeys(dicomItem['dicom'], ['meta', 'files'])
# Check if the files list contain the good keys and all the file are well sorted
for i in range(0, 4):
self.assertTrue('_id' in dicomItem['dicom']['files'][i])
self.assertTrue('name' in dicomItem['dicom']['files'][i])
self.assertEqual(dicomItem['dicom']['files'][i]['name'], 'dicomFile{}.dcm'.format(i))
self.assertTrue('SeriesNumber' in dicomItem['dicom']['files'][i]['dicom'])
self.assertTrue('InstanceNumber' in dicomItem['dicom']['files'][i]['dicom'])
self.assertTrue('SliceLocation' in dicomItem['dicom']['files'][i]['dicom'])
# Check the common metadata
self.assertIsNotNone(dicomItem['dicom']['meta'])
def testMakeDicomItem(self):
admin, user = self.users
# create a collection, folder, and item
collection = Collection().createCollection('collection2', admin, public=True)
folder = Folder().createFolder(collection, 'folder2', parentType='collection', public=True)
item = Item().createItem('item2', admin, folder)
# Upload files
self._uploadDicomFiles(item, admin)
# Check the endpoint 'parseDicom' for an admin user
dicomItem = Item().load(item['_id'], force=True)
dicomItem = self._purgeDicomItem(dicomItem)
path = '/item/%s/parseDicom' % dicomItem.get('_id')
resp = self.request(path=path, method='POST', user=admin)
self.assertStatusOk(resp)
dicomItem = Item().load(item['_id'], force=True)
self.assertIn('dicom', dicomItem)
self.assertHasKeys(dicomItem['dicom'], ['meta', 'files'])
# Check the endpoint 'parseDicom' for an non admin user
dicomItem = Item().load(item['_id'], force=True)
dicomItem = self._purgeDicomItem(dicomItem)
path = '/item/%s/parseDicom' % dicomItem.get('_id')
resp = self.request(path=path, method='POST', user=user)
self.assertStatus(resp, 403)
def _uploadNonDicomFiles(self, item, user):
# Upload a fake file to check that the item is not traited
nonDicomContent = b'hello world\n'
ndcmFile = Upload().uploadFromFile(
obj=six.BytesIO(nonDicomContent),
size=len(nonDicomContent),
name='nonDicom.txt',
parentType='item',
parent=item,
mimeType='text/plain',
user=user
)
self.assertIsNotNone(ndcmFile)
def _uploadDicomFiles(self, item, user):
from girder.plugins.dicom_viewer.event_helper import _EventHelper
# Upload the files in the reverse order to check if they're well sorted
for i in [1, 3, 0, 2]:
file = os.path.join(self.dataDir, '00000%i.dcm' % i)
with open(file, 'rb') as fp, _EventHelper('dicom_viewer.upload.success') as helper:
dcmFile = Upload().uploadFromFile(
obj=fp,
size=os.path.getsize(file),
name='dicomFile{}.dcm'.format(i),
parentType='item',
parent=item,
mimeType='application/dicom',
user=user
)
self.assertIsNotNone(dcmFile)
# Wait for handler success event
handled = helper.wait()
self.assertTrue(handled)
def _purgeDicomItem(self, item):
item.pop('dicom')
return item
def testSearchForDicomItem(self):
admin, user = self.users
# Create a collection, folder, and item
collection = Collection().createCollection('collection3', admin, public=True)
folder = Folder().createFolder(collection, 'folder3', parentType='collection', public=True)
item = Item().createItem('item3', admin, folder)
# Upload files
self._uploadDicomFiles(item, admin)
# Search for DICOM item with 'brain research' as common key/value
resp = self.request(path='/resource/search', params={
'q': 'brain research',
'mode': 'dicom',
'types': json.dumps(["item"])
})
self.assertStatusOk(resp)
self.assertEqual(len(resp.json['item']), 1)
self.assertEqual(resp.json['item'][0]['name'], 'item3')
# Search for DICOM item with substring 'in resea' as common key/value
resp = self.request(path='/resource/search', params={
'q': 'in resea',
'mode': 'dicom',
'types': json.dumps(["item"])
})
self.assertStatusOk(resp)
self.assertEqual(len(resp.json['item']), 1)
self.assertEqual(resp.json['item'][0]['name'], 'item3')
# TODO: Add test to search for a private DICOM item with an other user
# this test should not found anything
def testDicomWithIOError(self):
import pydicom
from girder.plugins.dicom_viewer.event_helper import _EventHelper
# One of the test files in the pydicom module will throw an IOError
# when parsing metadata. We should work around that and still be able
# to import the file
samplePath = os.path.join(os.path.dirname(os.path.abspath(
pydicom.__file__)), 'data', 'test_files', 'CT_small.dcm')
admin, user = self.users
# Create a collection, folder, and item
collection = Collection().createCollection('collection4', admin, public=True)
folder = Folder().createFolder(collection, 'folder4', parentType='collection', public=True)
item = Item().createItem('item4', admin, folder)
# Upload this dicom file
with open(samplePath, 'rb') as fp, _EventHelper('dicom_viewer.upload.success') as helper:
dcmFile = Upload().uploadFromFile(
obj=fp,
size=os.path.getsize(samplePath),
name=os.path.basename(samplePath),
parentType='item',
parent=item,
mimeType='application/dicom',
user=user
)
self.assertIsNotNone(dcmFile)
# Wait for handler success event
handled = helper.wait()
self.assertTrue(handled)
# Check if the 'dicomItem' is well processed
dicomItem = Item().load(item['_id'], force=True)
self.assertIn('dicom', dicomItem)
self.assertHasKeys(dicomItem['dicom'], ['meta', 'files'])
def testDicomWithBinaryValues(self):
import pydicom
from girder.plugins.dicom_viewer.event_helper import _EventHelper
# One of the test files in the pydicom module will throw an IOError
# when parsing metadata. We should work around that and still be able
# to import the file
samplePath = os.path.join(os.path.dirname(os.path.abspath(
pydicom.__file__)), 'data', 'test_files', 'OBXXXX1A.dcm')
admin, user = self.users
# Create a collection, folder, and item
collection = Collection().createCollection('collection5', admin, public=True)
folder = Folder().createFolder(collection, 'folder5', parentType='collection', public=True)
item = Item().createItem('item5', admin, folder)
# Upload this dicom file
with open(samplePath, 'rb') as fp, _EventHelper('dicom_viewer.upload.success') as helper:
dcmFile = Upload().uploadFromFile(
obj=fp,
size=os.path.getsize(samplePath),
name=os.path.basename(samplePath),
parentType='item',
parent=item,
mimeType='application/dicom',
user=user
)
self.assertIsNotNone(dcmFile)
# Wait for handler success event
handled = helper.wait()
self.assertTrue(handled)
# Check if the 'dicomItem' is well processed
dicomItem = Item().load(item['_id'], force=True)
self.assertIn('dicom', dicomItem)
self.assertHasKeys(dicomItem['dicom'], ['meta', 'files'])
| StarcoderdataPython |
3207184 | <reponame>bhatiadivij/kgtk<filename>examples/obtain_stats.py
import kgtk.gt.io_utils as gtio
import kgtk.gt.analysis_utils as gtanalysis
datadir='data/'
mowgli_nodes=f'{datadir}nodes_v002.csv'
mowgli_edges=f'{datadir}edges_v002.csv'
output_gml=f'{datadir}graph.graphml'
g=gtio.load_gt_graph(output_gml.replace(".graphml", '.gt'))
print('graph loaded. now computing centrality.')
node_pagerank=gtanalysis.compute_pagerank(g)
print('pagerank computed')
hits=gtanalysis.compute_hits(g)
print('hits computed')
bt=gtanalysis.compute_betweenness(g)
print('bt computed')
| StarcoderdataPython |
40576 | from django.http import HttpResponseRedirect
from thedaily.models import OAuthState
from thedaily.views import get_or_create_user_profile
def get_phone_number(backend, uid, user=None, social=None, *args, **kwargs):
subscriber = get_or_create_user_profile(user)
if not subscriber.phone:
state = kwargs['request'].GET['state']
try:
oas = OAuthState.objects.get(user=user)
oas.state = state
oas.save()
except OAuthState.DoesNotExist:
OAuthState.objects.create(user=user, state=state, fullname=kwargs['details'].get('fullname'))
is_new, query_params = kwargs.get('is_new'), ''
if is_new:
query_params = '?is_new=1'
return HttpResponseRedirect('/usuarios/registrate/google/' + query_params)
| StarcoderdataPython |
1633821 | from typing import Optional
from .package_metadata import \
DamlModelInfo, \
IntegrationTypeFieldInfo, \
IntegrationTypeInfo, \
CatalogInfo, \
PackageMetadata, \
DABL_META_NAME, \
DIT_META_NAME, \
DIT_META_NAMES, \
DIT_META_KEY_NAME, \
TAG_EXPERIMENTAL, \
normalize_catalog, \
normalize_package_metadata, \
getIntegrationLogger
from .integration_runtime_spec import \
METADATA_COMMON_RUN_AS_PARTY, \
METADATA_TRIGGER_NAME, \
METADATA_INTEGRATION_ID, \
METADATA_INTEGRATION_TYPE_ID, \
METADATA_INTEGRATION_COMMENT, \
METADATA_INTEGRATION_ENABLED, \
METADATA_INTEGRATION_RUN_AS_PARTY, \
METADATA_INTEGRATION_RUNTIME, \
IntegrationRuntimeSpec
| StarcoderdataPython |
162011 | from __future__ import print_function
import sys
from PyQt4 import QtCore
from PyQt4 import QtGui
from startup_dialog_ui import Ui_startupDialog
from colorimeter import constants
from colorimeter.gui.basic import startBasicMainWindow
from colorimeter.gui.plot import startPlotMainWindow
from colorimeter.gui.measure import startMeasureMainWindow
class StartupDialog(QtGui.QDialog,Ui_startupDialog):
def __init__(self,parent=None):
super(StartupDialog,self).__init__(parent)
self.setupUi(self)
self.connectActions()
self.initialize()
self.setAppSize()
def connectActions(self):
self.basicPushButton.clicked.connect(
self.basicPushButtonClicked_Callback
)
self.plotPushButton.clicked.connect(
self.plotPushButtonClicked_Callback
)
self.measurePushButton.clicked.connect(
self.measurePushButtonClicked_Callback
)
def initialize(self):
self.program = None
def basicPushButtonClicked_Callback(self):
self.program = startBasicMainWindow
self.close()
def plotPushButtonClicked_Callback(self):
self.program = startPlotMainWindow
self.close()
def measurePushButtonClicked_Callback(self):
self.program = startMeasureMainWindow
self.close()
def setAppSize(self):
availGeom = QtGui.QApplication.desktop().availableGeometry()
x, y = constants.START_POS_X, constants.START_POS_Y
width = min([0.9*(availGeom.width()-x), self.geometry().width()])
height = min([0.9*(availGeom.height()-y), self.geometry().height()])
self.setGeometry(x,y,width,height)
def run(self):
self.show()
self.raise_()
self.exec_()
return self.program
def startColorimeterApp():
app = QtGui.QApplication(sys.argv)
dlg = StartupDialog()
program = dlg.run()
if program is not None:
program(app)
# ---------------------------------------------------------------------------------
if __name__ == '__main__':
startColorimeterApp()
| StarcoderdataPython |
1729247 | # Generated by Django 3.0.8 on 2020-09-03 00:16
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Brand',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(blank=True, max_length=100, null=True)),
('image', models.ImageField(blank=True, null=True, upload_to='content/images')),
],
),
migrations.CreateModel(
name='Catagory',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(blank=True, max_length=100, null=True)),
('image', models.ImageField(blank=True, null=True, upload_to='content/images')),
],
),
migrations.CreateModel(
name='Clothing',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('size', models.CharField(blank=True, max_length=200)),
('gender', models.BooleanField(blank=True, choices=[('M', 'Male'), ('F', 'Female')], max_length=5, null=True)),
('color', models.CharField(max_length=10)),
('country', models.CharField(blank=True, max_length=20, null=True)),
('material', models.CharField(blank=True, max_length=100, null=True)),
('weight', models.IntegerField(blank=True, null=True)),
('year', models.IntegerField(blank=True, null=True)),
],
),
migrations.CreateModel(
name='Cosmetic',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('color', models.CharField(max_length=10)),
('gender', models.BooleanField(blank=True, choices=[('M', 'Male'), ('F', 'Female')], max_length=1, null=True)),
('smell', models.CharField(blank=True, max_length=300, null=True)),
('country', models.CharField(blank=True, max_length=20, null=True)),
('volume', models.IntegerField(blank=True, null=True)),
('year', models.IntegerField(blank=True, null=True)),
],
),
migrations.CreateModel(
name='Cultural',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('creator', models.CharField(blank=True, max_length=50, null=True)),
('translator', models.CharField(blank=True, max_length=50, null=True)),
('publisher', models.CharField(blank=True, max_length=50, null=True)),
('gener', models.CharField(blank=True, max_length=50, null=True)),
('duration', models.DurationField()),
('year', models.IntegerField(blank=True, null=True)),
],
),
migrations.CreateModel(
name='Digital',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('screen_size', models.FloatField(blank=True, max_length=5, null=True)),
('color', models.CharField(blank=True, max_length=20, null=True)),
('ram', models.IntegerField(blank=True, null=True)),
('harddisck', models.IntegerField(blank=True, null=True)),
('screen_resolution', models.IntegerField(blank=True, null=True)),
('os', models.CharField(blank=True, choices=[('android', 'Android'), ('windows', 'Windows'), ('windows phone', 'Windows Phone'), ('linux', 'Linux'), ('ios', 'IOS')], max_length=15, null=True)),
('cpu', models.CharField(blank=True, max_length=20, null=True)),
('touch', models.BooleanField(blank=True, max_length=1, null=True)),
('weight', models.IntegerField(blank=True, null=True)),
('year', models.IntegerField(blank=True, null=True)),
],
),
migrations.CreateModel(
name='HomeApplience',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('volume', models.CharField(blank=True, max_length=10, null=True)),
('color', models.CharField(blank=True, max_length=10, null=True)),
('weight', models.IntegerField(blank=True, null=True)),
('size', models.CharField(blank=True, max_length=30, null=True)),
('capacity', models.CharField(blank=True, max_length=100, null=True)),
('material', models.CharField(blank=True, max_length=100, null=True)),
('year', models.IntegerField(blank=True, null=True)),
],
),
migrations.CreateModel(
name='SubCatagory',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('catogory_name', models.CharField(max_length=100)),
('catagory', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='subcategory', to='product.Catagory')),
],
),
migrations.CreateModel(
name='Product',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('number', models.CharField(max_length=100)),
('price', models.CharField(default=None, max_length=100)),
('descriptions', models.TextField(blank=True, max_length=2000, null=True)),
('add_time', models.DateField(auto_now_add=True, null=True)),
('image', models.ImageField(blank=True, null=True, upload_to='content/images')),
('brands', models.ForeignKey(default='', on_delete=django.db.models.deletion.CASCADE, to='product.Brand')),
('category', models.ForeignKey(default='', on_delete=django.db.models.deletion.CASCADE, to='product.Catagory')),
('clothing', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='product_clothing', to='product.Clothing')),
('cosmetic', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='product_cosmetic', to='product.Cosmetic')),
('cultural', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='product_cultural', to='product.Cultural')),
('digital', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='product_digital', to='product.Digital')),
('homeapplience', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='product_home', to='product.HomeApplience')),
('sub_catagory', models.ForeignKey(default='', on_delete=django.db.models.deletion.CASCADE, related_name='product', to='product.SubCatagory')),
],
options={
'ordering': ['add_time'],
},
),
migrations.CreateModel(
name='Imageproduct',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('image', models.ImageField(blank=True, null=True, upload_to='static/img')),
('product', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='product.Product')),
],
),
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('comment', models.CharField(blank=True, max_length=1000, null=True)),
('product', models.ForeignKey(default='', on_delete=django.db.models.deletion.CASCADE, to='product.Product')),
],
),
]
| StarcoderdataPython |
1796721 | """
Your chance to explore Loops and Turtles!
Authors: <NAME>, <NAME>, <NAME>, <NAME>,
their colleagues and <NAME>.
"""
###############################################################################
# DONE: 1.
# On Line 5 above, replace PUT_YOUR_NAME_HERE with your own name.
###############################################################################
###############################################################################
# Done: 2.
# You should have RUN the m4e_loopy_turtles module and READ its code.
# (Do so now if you have not already done so.)
#
# Below this comment, add ANY CODE THAT YOU WANT, as long as:
# 1. You construct at least 2 rg.SimpleTurtle objects.
# 2. Each rg.SimpleTurtle object draws something
# (by moving, using its rg.Pen). ANYTHING is fine!
# 3. Each rg.SimpleTurtle moves inside a LOOP.
#
# Be creative! Strive for way-cool pictures! Abstract pictures rule!
#
# If you make syntax (notational) errors, no worries -- get help
# fixing them at either this session OR at the NEXT session.
#
# Don't forget to COMMIT-and-PUSH when you are done with this module.
###############################################################################
import rosegraphics as rg
window = rg.TurtleWindow()
window.delay(10)
joye = rg.SimpleTurtle('classic')
oliver = rg.SimpleTurtle('turtle')
joye.pen = rg.Pen('black', 5)
oliver.pen = rg.Pen('red', 5)
joye.speed = 20
joye.pen_up()
joye.forward(150)
joye.left(90)
joye.pen_down()
oliver.speed = 15
oliver.pen_up()
oliver.forward(100)
oliver.left(90)
oliver.backward(50)
oliver.pen_down()
for i in range(3):
joye.draw_circle(50)
joye.pen_up()
joye.right(90)
joye.backward(100)
joye.left(90)
joye.pen_down()
for k in range(2):
oliver.draw_circle(50)
oliver.pen_up()
oliver.left(90)
oliver.forward(100)
oliver.right(90)
oliver.pen_down()
window.close_on_mouse_click()
| StarcoderdataPython |
89379 | <gh_stars>0
"""Provides a facade-like interface for easy access to ``tesliper``'s functionality.
There are some conventions that are important to note:
- ``tesliper`` stores multiple data entries of various types for each conformer. To
prevent confusion with Python's data ``type`` and with data itself, ``tesliper``
refers to specific kinds of data as "genres". Genres in code are represented by
specific strings, used as identifiers. To learn about data genres known to
``tesliper``, see documentation for
:class:`.GaussianParser`, which lists them.
- ``tesliper`` identifies conformers using stem of an extracted file (i.e. its filename
without extension). When files with identical names are extracted in course of
subsequent :meth:`.Tesliper.extract` calls or in recursive extraction using
``tesliper_object.extract(recursive=True)``, they are treated as data for one
conformer. This enables to join data from subsequent calculations steps, e.g. geometry
optimization, vibrational spectra simulation, and electronic spectra simulation.
Please note that if specific data genre is available from more than one calculation
job, only recently extracted values will be stored.
- ``tesliper`` was designed to deal with multiple conformers of single molecule and may
not work properly when used to process data concerning different molecules (i.e.
having different number of atoms, different number of degrees of freedom, etc.). If
you want to use it for such purpose anyway, you may set
:attr:`Tesliper.conformers.allow_data_inconsistency
< .Conformers.allow_data_inconsistency>` to ``True``.
``tesliper`` will then stop complaining and try to do its best.
"""
# IMPORTS
import logging as lgg
import os
from pathlib import Path
from typing import (
Callable,
Dict,
Generator,
Iterable,
Optional,
Sequence,
Set,
Tuple,
Union,
)
from tesliper.glassware.spectra import SingleSpectrum
from . import datawork as dw
from . import extraction as ex
from . import glassware as gw
from . import writing as wr
from .datawork.spectra import FittingFunctionType, Number
_DEVELOPMENT = "ENV" in os.environ and os.environ["ENV"] == "prod"
# LOGGER
logger = lgg.getLogger(__name__)
mainhandler = lgg.StreamHandler()
mainhandler.setLevel(lgg.DEBUG)
mainhandler.setFormatter(
lgg.Formatter("%(levelname)s:%(name)s:%(funcName)s - %(message)s")
)
logger.setLevel(lgg.DEBUG if _DEVELOPMENT else lgg.WARNING)
logger.addHandler(mainhandler)
_activities_types = Union[
gw.VibrationalActivities,
gw.ScatteringActivities,
gw.ElectronicActivities,
]
# CLASSES
class Tesliper:
"""This class is a main access point to ``tesliper``'s functionality. It allows you
to extract data from specified files, provides a proxy to the trimming
functionality, gives access to data in form of specialized arrays, enables you
to calculate and average desired spectra, and provides an easy way to export data.
Most basic use might look like this:
>>> tslr = Tesliper()
>>> tslr.extract()
>>> tslr.calculate_spectra()
>>> tslr.average_spectra()
>>> tslr.export_averaged()
This extracts data from files in the current working directory, calculates
available spectra using standard parameters, averages them using available energy
values, and exports to current working directory in .txt format.
You can customize this process by specifying call parameters for used methods
and modifying :class:`Tesliper`'s configuration attributes:
- to change source directory or location of exported files instantiate
:class:`Tesliper` object with :attr:`input_dir` and :attr:`output_dir` parameters
specified, respectively. You can also set appropriate attributes on the instance
directly.
- To extract only selected files in :attr:`input_dir` use :attr:`wanted_files` init
parameter. It should be given an iterable of filenames you want to parse. Again,
you can also directly set an identically named attribute.
- To change parameters used for calculation of spectra, modify appropriate entries
of :attr:`parameters` attribute.
- Use other export methods to export more data and specify ``fmt`` parameter in
method's call to export to other file formats.
>>> tslr = Tesliper(input_dir="./myjob/optimization/", output_dir="./myjob/output/")
>>> tslr.wanted_files = ["one", "two", "three"] # only files with this names
>>> tslr.extract() # use tslr.input_dir as source
>>> tslr.extract(path="./myjob/vcd_sim/") # use other input_dir
>>> tslr.conformers.trim_not_optimized() # trimming out unwanted conformers
>>> tslr.parameters["vcd"].update({"start": 500, "stop": 2500, "width": 2})
>>> tslr.calculate_spectra(genres=["vcd"]) # we want only VCD spectrum
>>> tslr.average_spectra()
>>> tslr.export_averaged(mode="w") # overwrite previously exported files
>>> tslr.export_activities(fmt="csv") # save activities for analysis elsewhere
>>> tslr.output_dir = "./myjob/ecd_sim/"
>>> tslr.export_job_file( # prepare files for next step of calculations
... route="# td=(singlets,nstates=80) B3LYP/Def2TZVP"
... )
When modifying :attr:`Tesliper.parameters` be careful to not delete any of the
parameters. If you need to revert to standard parameters values, you can find them
in :attr:`Tesliper.standard_parameters`.
>>> tslr.parameters["ir"] = {
... "start": 500, "stop": 2500, "width": 2
... } # this will cause problems!
>>> tslr.parameters = tslr.standard_parameters # revert to default values
Trimming functionality, used in previous example in
``tslr.conformers.trim_not_optimized()``, allows you to filter out conformers that
shouldn't be used in further processing and analysis. You can trim off conformers
that were not optimized, contain imaginary frequencies, or have other unwanted
qualities. Conformers with similar geometry may be discarded using an RMSD sieve.
For more information about trimming, please refer to the documentation
of :class:`.Conformers` class.
For more exploratory analysis, :class:`Tesliper` provides an easy way to access
desired data as an instance of specialized
:class:`.DataArray` class. Those objects implement a
number of convenience methods for dealing with specific data genres. A more detailed
information on :class:`.DataArray` see
:mod:`.arrays` module documentation. To get data in this form use
``array = tslr["genre"]`` were ``"genre"`` is string with the name of desired data
genre. For more control over instantiation of
:class:`.DataArray` you may use
:meth:`Tesliper.conformers.arrayed <.Conformers.arrayed>` factory method.
>>> energies = tslr["gib"]
>>> energies.values
array([-304.17061762, -304.17232455, -304.17186735])
>>> energies.populations
array([0.0921304 , 0.56174031, 0.3461293 ])
>>> energies.full_name
'Thermal Free Energy'
Please note, that if some conformers do not provide values for a specific data
genre, it will be ignored when retriving data for
:class:`.DataArray` instantiation, regardles if it were
trimmed off or not.
>>> tslr = Tesliper()
>>> tslr.conformers.update([
>>> ... ('one', {'gib': -304.17061762}),
>>> ... ('two', {'gib': -304.17232455}),
>>> ... ('three', {'gib': -304.17186735}),
>>> ... ('four', {})
>>> ... ])
>>> tslr.conformers.kept
[True, True, True, True]
>>> energies = tslr["gib"]
>>> energies.filenames
array(['one', 'two', 'three'], dtype='<U5')
Attributes
----------
conformers : Conformers
Container for data extracted from Gaussian output files. It provides trimming
functionality, enabling to filter out conformers of unwanted qualities.
spectra : dict of str: Spectra
Spectra calculated so far, using :meth:`.calculate_spectra` method.
Possible keys are spectra genres: "ir", "vcd", "uv", "ecd", "raman", and "roa".
Values are :class:`.Spectra` instances with lastly
calculated spetra of this genre.
averaged : dict of str: (dict of str: float or callable)
Spectra averaged using available energies genres, calculated with last call
to :meth:`.average_spectra` method. Keys are tuples of two strings: averaged
spectra genre and energies genre used for averaging.
experimental : dict of str: Spectra
Experimental spectra loaded from disk.
Possible keys are spectra genres: "ir", "vcd", "uv", "ecd", "raman", and "roa".
Values are :class:`.Spectra` instances with experimental spetra of this genre.
quantum_software : str
A name, lower case, of the quantum chemical computations software used to obtain
data. Used by ``tesliper`` to figure out, which parser to use to extract data,
if custom parsers are available. Only "gaussian" is supported out-of-the-box.
parameters : dict of str: (dict of str: float or callable)
Parameters for calculation of each spectra genres: "ir", "vcd", "uv", "ecd",
"raman", and "roa". Avaliable parameters are:
- "start": float or int, the beginning of the spectral range,
- "stop": float or int, the end of the spectral range,
- "step": float or int, step of the abscissa,
- "width": float or int, width of the peak,
- "fitting": callable, function used to simulate peaks as curves, preferably
one of :func:`datawork.gaussian <.gaussian>` or :func:`datawork.lorentzian
<.lorentzian>`.
"start", "stop", and "step" expect its values to by in cm^-1 units for
vibrational and scattering spectra, and nm units for electronic spectra.
"width" expects its value to be in cm^-1 units for vibrational and scattering
spectra, and eV units for electronic spectra.
"""
# TODO?: add proxy for trimming ?
# TODO?: make it inherit mapping ?
_standard_parameters = {
"ir": {
"width": 6,
"start": 800,
"stop": 2900,
"step": 2,
"fitting": dw.lorentzian,
},
"uv": {
"width": 0.35,
"start": 150,
"stop": 800,
"step": 1,
"fitting": dw.gaussian,
},
}
_standard_parameters["vcd"] = _standard_parameters["ir"].copy()
_standard_parameters["raman"] = _standard_parameters["ir"].copy()
_standard_parameters["roa"] = _standard_parameters["ir"].copy()
_standard_parameters["ecd"] = _standard_parameters["uv"].copy()
# TODO: introduce more sophisticated parameters proxy that enables using
# same or different params for genres of same type (e.g. "vibrational")
def __init__(
self,
input_dir: str = ".",
output_dir: str = ".",
wanted_files: Optional[Iterable[Union[str, Path]]] = None,
quantum_software: str = "gaussian",
):
"""
Parameters
----------
input_dir : str or path-like object, optional
Path to directory containing files for extraction, defaults to current
working directory.
output_dir : str or path-like object, optional
Path to directory for output files, defaults to current working directory.
wanted_files : list of str or list of Path, optional
List of files or filenames representing wanted files. If not given, all
files are considered wanted. File extensions are ignored.
quantum_software : str
A name of the quantum chemical computations software used to obtain data.
Used by ``tesliper`` to figure out, which parser to use, if custom parsers
are available.
"""
self.conformers = gw.Conformers()
self.wanted_files = wanted_files
self.input_dir = input_dir
self.output_dir = output_dir
self.spectra = dict()
self.averaged = dict()
self.experimental = dict()
self.parameters = self.standard_parameters
self.quantum_software = quantum_software.lower()
if self.quantum_software not in ex.parser_base._PARSERS:
logger.warning(
f"Unsupported quantum chemistry software: {quantum_software}. "
"Automatic data extraction will not be available."
)
def __getitem__(self, item: str) -> gw.conformers.AnyArray:
try:
return self.conformers.arrayed(item)
except ValueError:
raise KeyError(f"Unknown genre '{item}'.")
def clear(self):
"""Remove all data from the instance."""
self.conformers.clear()
self.wanted_files = []
self.input_dir = ""
self.output_dir = ""
self.spectra = dict()
self.averaged = dict()
self.experimental = dict()
self.parameters = self.standard_parameters
@property
def temperature(self) -> float:
"""Temperature of the system expressed in Kelvin units.
Value of this parameter is passed to :term:`data array`\\s created with the
:meth:`.Conformers.arrayed` method, provided that the target data array class
supports a parameter named *t* in it's constructor.
.. versionadded:: 0.9.1
Raises
------
ValueError
if set to a value lower than zero.
Notes
-----
It's actually just a proxy to :meth:`self.conformers.temperatue
<.Conformers.temperature>`.
"""
return self.conformers.temperature
@temperature.setter
def temperature(self, value):
self.conformers.temperature = value
@property
def energies(self) -> Dict[str, gw.Energies]:
"""Data for each energies' genre as :class:`.Energies` data array. Returned
dictionary is of form {"genre": :class:`.Energies`} for each of the genres:
"scf", "zpe", "ten", "ent", and "gib". If no values are available for a specific
genre, an empty :class:`.Energies` array is produced as corresponding dictionary
value.
>>> tslr = Tesliper()
>>> tslr.energies
{
"scf": Energies(genre="scf", ...),
"zpe": Energies(genre="zpe", ...),
"ten": Energies(genre="ten", ...),
"ent": Energies(genre="ent", ...),
"gib": Energies(genre="gib", ...),
}
Returns
-------
dict
Dictionary with genre names as keys
and :class:`.Energies` data arrays as values.
"""
keys = gw.Energies.associated_genres
return {k: self[k] for k in keys}
@property
def activities(self) -> Dict[str, _activities_types]:
"""Data for default activities used to calculate spectra as appropriate
:class:`.SpectralActivities` subclass. Returned dictionary is of form {"genre":
:class:`.SpectralActivities`} for each of the genres: "dip", "rot", "vosc",
"vrot", "raman1", and "roa1". If no values are available for a specific genre,
an empty data array is produced as corresponding dictionary value.
>>> tslr = Tesliper()
>>> tslr.activities
{
"dip": VibrationalActivities(genre="dip", ...),
"rot": VibrationalActivities(genre="rot", ...),
"vosc": ElectronicActivities(genre="vosc", ...),
"vrot": ElectronicActivities(genre="vrot", ...),
"raman1": ScatteringActivities(genre="raman1", ...),
"roa1": ScatteringActivities(genre="roa1", ...),
}
Returns
-------
dict
Dictionary with genre names as keys and
:class:`.SpectralActivities` data arrays as values.
"""
keys = dw.DEFAULT_ACTIVITIES.values()
return {k: self[k] for k in keys}
@property
def wanted_files(self) -> Optional[Set[str]]:
"""Set of files that are desired for data extraction, stored as filenames
without an extension. Any iterable of strings or Path objects is transformed
to this form.
>>> tslr = Tesliper()
>>> tslr.wanted_files = [Path("./dir/file_one.out"), Path("./dir/file_two.out")]
>>> tslr.wanted_files
{"file_one", "file_two"}
May also be set to ``None`` or other "falsy" value, in such case it is ignored.
"""
return self._wanted_files
@wanted_files.setter
def wanted_files(self, files: Optional[Iterable[Union[str, Path]]]):
self._wanted_files = None if not files else {Path(f).stem for f in files}
@property
def standard_parameters(self) -> Dict[str, Dict[str, Union[int, float, Callable]]]:
"""Default parameters for spectra calculation for each spectra genre
(ir, vcd, uv, ecd, raman, roa). This returns a dictionary,
but in fact it is a convenience, read-only attribute,
modifying it will have no persisting effect.
"""
return {key: params.copy() for key, params in self._standard_parameters.items()}
def update(self, other: Optional[Dict[str, dict]] = None, **kwargs):
"""Update stored conformers with given data.
Works like ``dict.update``, but if key is already present, it updates
dictionary associated with given key rather than assigning new value.
Keys of dictionary passed as positional parameter (or additional keyword
arguments given) should be conformers' identifiers and its values should be
dictionaries of ``{"genre": values}`` for those conformers.
Please note, that values of status genres like 'optimization_completed'
and 'normal_termination' will be updated as well for such key,
if are present in given new values.
>>> tslr.conformers
Conformers([('one', {'scf': -100, 'stoichiometry': 'CH4'})])
>>> tslr.update(
... {'one': {'scf': 97}, 'two': {'scf': 82, 'stoichiometry': 'CH4'}}
... )
>>> tslr.conformers
Conformers([
('one', {'scf': 97, 'stoichiometry': 'CH4'}),
('two', {'scf': 82, 'stoichiometry': 'CH4'}),
])
"""
self.conformers.update(other, **kwargs)
@property
def input_dir(self) -> Path:
"""Directory, from which files should be read."""
return self.__input_dir
@input_dir.setter
def input_dir(self, path: Union[Path, str] = "."):
path = Path(path).resolve()
if not path.is_dir():
raise FileNotFoundError(
"Invalid path or directory not found: {}".format(path)
)
logger.info("Current working directory is: {}".format(path))
self.__input_dir = path
@property
def output_dir(self) -> Path:
"""Directory, to which generated files should be written."""
return self.__output_dir
@output_dir.setter
def output_dir(self, path: Union[Path, str] = "."):
path = Path(path).resolve()
path.mkdir(exist_ok=True)
logger.info("Current output directory is: {}".format(path))
self.__output_dir = path
def extract_iterate(
self,
path: Optional[Union[str, Path]] = None,
wanted_files: Optional[Iterable[str]] = None,
extension: Optional[str] = None,
recursive: bool = False,
) -> Generator[Tuple[str, dict], None, None]:
"""Extracts data from chosen Gaussian output files present in given directory
and yields data for each conformer found.
Uses :attr:`Tesliper.input_dir` as source directory and
:attr:`Tesliper.wanted_files` list of chosen files if these are not explicitly
given as 'path' and 'wanted_files' parameters.
Parameters
----------
path : str or pathlib.Path, optional
Path to directory, from which Gaussian files should be read.
If not given or is ``None``, :attr:`Tesliper.output_dir` will be used.
wanted_files : list of str, optional
Filenames (without a file extension) of conformers that should be extracted.
If not given or is ``None``, :attr:`Tesliper.wanted_files` will be used. If
:attr:`Tesliper.wanted_files` is also ``None``, all found Gaussian output
files will be parsed.
extension : str, optional
Only files with given extension will be parsed. If omitted, Tesliper will
try to guess the extension from contents of input directory.
recursive : bool
If ``True``, also subdirectories are searched for files to parse, otherwise
subdirectories are ignored. Defaults to ``False``.
Yields
------
tuple
Two item tuple with name of parsed file as first and extracted
data as second item, for each Gaussian output file parsed.
"""
soxhlet = ex.Soxhlet(
path=path or self.input_dir,
purpose=self.quantum_software,
wanted_files=wanted_files or self.wanted_files,
extension=extension,
recursive=recursive,
)
for file, data in soxhlet.extract_iter():
self.update(((file, data),))
yield file, data
def extract(
self,
path: Optional[Union[str, Path]] = None,
wanted_files: Optional[Iterable[str]] = None,
extension: Optional[str] = None,
recursive: bool = False,
):
"""Extracts data from chosen Gaussian output files present in given directory.
Uses :attr:`Tesliper.input_dir` as source directory and
:attr:`Tesliper.wanted_files` list of chosen files if these are not explicitly
given as *path* and *wanted_files* parameters.
Parameters
----------
path : str or pathlib.Path, optional
Path to directory, from which Gaussian files should be read.
If not given or is ``None``, :attr:`Tesliper.output_dir` will be used.
wanted_files : list of str, optional
Filenames (without a file extension) of conformers that should be extracted.
If not given or is ``None``, :attr:`Tesliper.wanted_files` will be used.
extension : str, optional
Only files with given extension will be parsed. If omitted, Tesliper will
try to guess the extension from contents of input directory.
recursive : bool
If ``True``, also subdirectories are searched for files to parse, otherwise
subdirectories are ignored. Defaults to ``False``.
"""
for f, d in self.extract_iterate(path, wanted_files, extension, recursive):
_ = f, d
def load_parameters(
self,
path: Union[str, Path],
spectra_genre: Optional[str] = None,
) -> dict:
"""Load calculation parameters from a file.
Parameters
----------
path : str or pathlib.Path, optional
Path to the file with desired parameters specification.
spectra_genre : str, optional
Genre of spectra that loaded parameters concerns. If given, should be one of
"ir", "vcd", "uv", "ecd", "raman", or "roa" -- parameters for that
spectra will be updated with loaded values. Otherwise no update
is done, only parsed data is returned.
Returns
-------
dict
Parameters read from the file.
Notes
-----
For information on supported format of parameters configuration file, please
refer to :class:`.ParametersParser` documentation.
"""
soxhlet = ex.Soxhlet(self.input_dir, purpose="parameters")
settings = soxhlet.parse_one(path)
if spectra_genre is not None:
self.parameters[spectra_genre].update(settings)
return settings
def load_experimental(
self,
path: Union[str, Path],
spectrum_genre: str,
) -> SingleSpectrum:
"""Load experimental spectrum from a file. Data read from file is stored as
:class:`.SingleSpectrum` instance in :attr:`.Tesliper.experimental` dictionary
under *spectrum_genre* key.
Parameters
----------
path : str or pathlib.Path
Path to the file with experimental spectrum.
spectrum_genre : str
Genre of the experimental spectrum that will be loaded. Should be one of
"ir", "vcd", "uv", "ecd", "raman", or "roa".
Returns
-------
SingleSpectrum
Experimental spectrum loaded from the file.
"""
soxhlet = ex.Soxhlet(self.input_dir, purpose="spectra")
spc = soxhlet.parse_one(path)
self.experimental[spectrum_genre] = gw.SingleSpectrum(
genre=spectrum_genre, values=spc[1], abscissa=spc[0]
)
return self.experimental[spectrum_genre]
def calculate_single_spectrum(
self,
genre: str,
conformer: Union[str, int],
start: Number = None,
stop: Number = None,
step: Number = None,
width: Number = None,
fitting: FittingFunctionType = None,
) -> gw.SingleSpectrum:
"""Calculates spectrum for requested conformer.
'start', 'stop', 'step', 'width', and 'fitting' parameters, if given, will be
used instead of the parameters stored in :attr:`Tesliper.parameters` attribute.
'start', 'stop', and 'step' values will be interpreted as cm^-1 for vibrational
or scattering spectra/activities and as nm for electronic ones. Similarly,
'width' will be interpreted as cm^-1 or eV. If not given, values stored in
appropriate :attr:`Tesliper.parameters` are used.
Parameters
----------
genre : str
Spectra genre (or related spectral activities genre) that should
be calculated. If given spectral activity genre, this genre will be used
to calculate spectra instead of the default activities.
conformer : str or int
Conformer, specified as it's identifier or it's index, for which
spectrum should be calculated.
start : int or float, optional
Number representing start of spectral range.
stop : int or float, optional
Number representing end of spectral range.
step : int or float, optional
Number representing step of spectral range.
width : int or float, optional
Number representing half width of maximum peak height.
fitting : function, optional
Function, which takes spectral data, freqs, abscissa, width as parameters
and returns numpy.array of calculated, non-corrected spectrum points.
Basically one of :func:`datawork.gaussian <.gaussian>` or
:func:`datawork.lorentzian <.lorentzian>`.
Returns
-------
SingleSpectrum
Calculated spectrum.
"""
try:
bar_name = dw.DEFAULT_ACTIVITIES[genre]
except KeyError:
bar_name = genre
with self.conformers.trimmed_to([conformer]):
bar = self[bar_name]
sett_from_args = {
k: v
for k, v in zip(
("start", "stop", "step", "width", "fitting"),
(start, stop, step, width, fitting),
)
if v is not None
}
sett = self.parameters[bar.spectra_name].copy()
sett.update(sett_from_args)
spc = bar.calculate_spectra(**sett)
# TODO: maybe Spectra class should provide such conversion ?
return gw.SingleSpectrum(
spc.genre,
spc.values[0],
spc.abscissa,
spc.width,
spc.fitting,
scaling=spc.scaling,
offset=spc.offset,
filenames=spc.filenames,
)
def calculate_spectra(self, genres: Iterable[str] = ()) -> Dict[str, gw.Spectra]:
"""Calculates spectra for each requested genre using parameters stored
in :attr:`Tesliper.parameters` attribute.
Parameters
----------
genres : iterable of str
List of spectra genres (or related spectral activities genres) that should
be calculated. If given spectral activity genre, this genre will be used
to calculate spectra instead of the default activities. If given empty
sequence (default), all available spectra will be calculated using default
activities.
Returns
-------
dict of str: Spectra
Dictionary with calculated spectra genres as keys and :class:`.Spectra`
objects as values.
"""
if not genres:
# use default genres, ignoring empty
bars = (v for v in self.activities.values() if v)
else:
# convert to spectra name if bar name passed
default_act = dw.DEFAULT_ACTIVITIES
genres = genres.split() if isinstance(genres, str) else genres
query = [default_act[v] if v in default_act else v for v in genres]
query_set = set(query) # ensure no duplicates
bars = (self[g] for g in query_set)
output = {}
for bar in bars:
spectra = bar.calculate_spectra(**self.parameters[bar.spectra_name])
if spectra:
output[bar.spectra_name] = spectra
else:
# should empty spectra be included in output?
logger.warning(
f"No data for {bar.spectra_name} calculation; "
f"appropriate data is not available or was trimmed off."
)
self.spectra.update(output)
return output
def get_averaged_spectrum(
self, spectrum: str, energy: str, temperature: Optional[float] = None
) -> gw.SingleSpectrum:
"""Average previously calculated spectra using populations derived from
specified energies.
.. versionadded:: 0.9.1
The optional *temperature* parameter.
.. versionchanged:: 0.9.1
If spectra needed for averaging was not calulated so far,
it will try to calulate it instead of raising a KeyError.
Parameters
----------
spectrum : str
Genre of spectrum that should be averaged. This spectrum should be
previously calculated using :meth:`.calculate_spectra` method.
energy : str
Genre of energies, that should be used to calculate populations
of conformers. These populations will be used as weights for averaging.
temperature : float, optional
Temperature used for calculation of the Boltzmann distribution for spectra
averaging. If not given, :meth:`Tesliper.temperature` value is used.
Returns
-------
SingleSpectrum
Calculated averaged spectrum.
Raises
------
ValueError
If no data for calculation of requested spectrum is available.
"""
try:
spectra = self.spectra[spectrum]
except KeyError:
array = self[dw.DEFAULT_ACTIVITIES[spectrum]]
spectra = array.calculate_spectra(**self.parameters[spectrum])
if not spectra:
raise ValueError(
f"No data for {spectrum} calculation; "
f"appropriate data is not available or was trimmed off."
)
with self.conformers.trimmed_to(spectra.filenames):
en = (
self[energy]
if temperature is None
else self.conformers.arrayed(genre=energy, t=temperature)
)
output = spectra.average(en)
return output
def average_spectra(self) -> Dict[Tuple[str, str], gw.SingleSpectrum]:
"""For each previously calculated spectra (stored in :attr:`Tesliper.spectra`
attribute) calculate it's average using population derived from each available
energies genre.
Returns
-------
dict
Averaged spectrum for each previously calculated spectra and energies known
as a dictionary. It's keys are tuples of genres used for averaging and
values are :class:`.SingleSpectrum` instances (so this dictionary is of form
{tuple("spectra", "energies"): :class:`.SingleSpectrum`}).
"""
for genre, spectra in self.spectra.items():
with self.conformers.trimmed_to(spectra.filenames):
for energies in self.energies.values():
if energies:
av = spectra.average(energies)
self.averaged[(genre, energies.genre)] = av
return self.averaged
def export_data(self, genres: Sequence[str], fmt: str = "txt", mode: str = "x"):
"""Saves specified data genres to disk in given file format.
File formats available by default are: "txt", "csv", "xlsx", "gjf". Note that
not all formats may are compatible with every genre (e.g. only genres associated
with :class:`.Geometry` may be exported fo .gjf format). In such case genres
unsupported by given format are ignored.
Files produced are written to :attr:`Tesliper.output_dir` directory with
filenames automatically generated using adequate genre's name and conformers'
identifiers. In case of "xlsx" format only one file is produced and different
data genres are written to separate sheets. If there are no values for given
genre, no files will be created for this genre.
Parameters
----------
genres : list of str
List of genre names, that will be saved to disk.
fmt : str
File format of output files, defaults to "txt".
mode : str
Specifies how writing to file should be handled. May be one of:
"a" (append to existing file), "x" (only write if file doesn't exist yet),
"w" (overwrite file if it already exists). Defaults to "x".
"""
wrt = wr.writer(fmt=fmt, destination=self.output_dir, mode=mode)
data = (self[g] for g in genres)
data = [d for d in data if d]
if any(isinstance(arr, gw.arrays._VibData) for arr in data):
data += [self["freq"]]
if any(isinstance(arr, (gw.ElectronicData, gw.Transitions)) for arr in data):
data += [self["wavelen"]]
wrt.write(data)
def export_energies(self, fmt: str = "txt", mode: str = "x"):
"""Saves energies and population data to disk in given file format.
File formats available by default are: "txt", "csv", "xlsx". Files produced are
written to :attr:`Tesliper.output_dir` directory with filenames automatically
generated using adequate genre's name and conformers' identifiers. In case of
"xlsx" format only one file is produced and different data genres are written to
separate sheets.
Parameters
----------
fmt : str
File format of output files, defaults to "txt".
mode : str
Specifies how writing to file should be handled. May be one of:
"a" (append to existing file), "x" (only write if file doesn't exist yet),
"w" (overwrite file if it already exists). Defaults to "x".
"""
wrt = wr.writer(fmt=fmt, destination=self.output_dir, mode=mode)
energies = [e for e in self.energies.values() if e]
corrections = (self[f"{e.genre}corr"] for e in energies if e.genre != "scf")
frequencies = self["freq"]
stoichiometry = self["stoichiometry"]
wrt.write(data=[*energies, frequencies, stoichiometry, *corrections])
def export_spectral_data(self, fmt: str = "txt", mode: str = "x"):
"""Saves unprocessed spectral data to disk in given file format.
File formats available by default are: "txt", "csv", "xlsx". Files produced are
written to :attr:`Tesliper.output_dir` directory with filenames automatically
generated using adequate genre's name and conformers' identifiers. In case of
"xlsx" format only one file is produced and different data genres are written to
separate sheets.
Parameters
----------
fmt : str
File format of output files, defaults to "txt".
mode : str
Specifies how writing to file should be handled. May be one of:
"a" (append to existing file), "x" (only write if file doesn't exist yet),
"w" (overwrite file if it already exists). Defaults to "x".
"""
wrt = wr.writer(fmt=fmt, destination=self.output_dir, mode=mode)
bands = [self["freq"], self["wavelen"]]
genres = (
*gw.VibrationalData.associated_genres,
*gw.ElectronicData.associated_genres,
*gw.ScatteringData.associated_genres,
)
data = (self[g] for g in genres)
data = [d for d in data if d] # ignore empty DataArrays
data += [b for b in bands if b]
wrt.write(data)
def export_activities(self, fmt: str = "txt", mode: str = "x"):
"""Saves unprocessed spectral activities to disk in given file format.
File formats available by default are: "txt", "csv", "xlsx". Files produced are
written to :attr:`Tesliper.output_dir` directory with filenames automatically
generated using adequate genre's name and conformers' identifiers. In case of
"xlsx" format only one file is produced and different data genres are written to
separate sheets.
Parameters
----------
fmt : str
File format of output files, defaults to "txt".
mode : str
Specifies how writing to file should be handled. May be one of:
"a" (append to existing file), "x" (only write if file doesn't exist yet),
"w" (overwrite file if it already exists). Defaults to "x".
"""
wrt = wr.writer(fmt=fmt, destination=self.output_dir, mode=mode)
bands = [self["freq"], self["wavelen"]]
genres = (
*gw.VibrationalActivities.associated_genres,
*gw.ElectronicActivities.associated_genres,
*gw.ScatteringActivities.associated_genres,
)
data = (self[g] for g in genres)
data = [d for d in data if d] # ignore empty DataArrays
data += [b for b in bands if b]
wrt.write(data)
def export_spectra(self, fmt: str = "txt", mode: str = "x"):
"""Saves spectra calculated previously to disk in given file format.
File formats available by default are: "txt", "csv", "xlsx". Files produced are
written to :attr:`Tesliper.output_dir` directory with filenames automatically
generated using adequate genre's name and conformers' identifiers. In case of
"xlsx" format only one file is produced and different data genres are written to
separate sheets.
Parameters
----------
fmt : str
File format of output files, defaults to "txt".
mode : str
Specifies how writing to file should be handled. May be one of:
"a" (append to existing file), "x" (only write if file doesn't exist yet),
"w" (overwrite file if it already exists). Defaults to "x".
"""
wrt = wr.writer(fmt=fmt, destination=self.output_dir, mode=mode)
data = [s for s in self.spectra.values() if s]
wrt.write(data)
def export_averaged(self, fmt: str = "txt", mode: str = "x"):
"""Saves spectra calculated and averaged previously to disk
in given file format.
File formats available by default are: "txt", "csv", "xlsx". Files produced are
written to :attr:`Tesliper.output_dir` directory with filenames automatically
generated using adequate genre's name and conformers' identifiers. In case of
"xlsx" format only one file is produced and different data genres are written to
separate sheets.
Parameters
----------
fmt : str
File format of output files, defaults to "txt".
mode : str
Specifies how writing to file should be handled. May be one of:
"a" (append to existing file), "x" (only write if file doesn't exist yet),
"w" (overwrite file if it already exists). Defaults to "x".
"""
wrt = wr.writer(fmt=fmt, destination=self.output_dir, mode=mode)
data = [s for s in self.averaged.values() if s]
wrt.write(data)
def export_job_file(
self,
fmt: str = "gjf",
mode: str = "x",
geometry_genre: str = "last_read_geom",
**kwargs,
):
"""Saves conformers to disk as job files for quantum chemistry software
in given file format.
Currently only "gjf" format is provided, used by Gaussian software. Files
produced are written to :attr:`Tesliper.output_dir` directory with filenames
automatically generated using conformers' identifiers.
Parameters
----------
fmt : str
File format of output files, defaults to "gjf".
mode : str
Specifies how writing to file should be handled. May be one of:
"a" (append to existing file), "x" (only write if file doesn't exist yet),
"w" (overwrite file if it already exists). Defaults to "x".
geometry_genre : str
Name of the data genre representing conformers' geometry that should be used
as input geometry. Please note that the default value "last_read_geom" is
not necessarily an optimized geometry. Use "optimized_geom" if this is what
you need.
kwargs
Any additional keyword parameters are passed to the writer object, relevant
to the *fmt* requested. Keyword supported by the default
:class:`"gjf"-format writer <.GjfWriter>` are as follows:
route
A calculations route: keywords specifying calculations directives
for quantum chemical calculations software.
link0
Dictionary with "link zero" commands, where each key is command's
name and each value is this command's parameter.
comment
Contents of title section, i.e. a comment about the calculations.
post_spec
Anything that should be placed after conformer's geometry
specification. Will be written to the file as given.
"""
wrt = wr.writer(
fmt=fmt,
destination=self.output_dir,
mode=mode,
**kwargs,
)
wrt.geometry(
geometry=self[geometry_genre],
multiplicity=self["multiplicity"],
charge=self["charge"],
)
def serialize(self, filename: str = ".tslr", mode: str = "x") -> None:
"""Serialize instance of :class:`Tesliper` object to a file in
:attr:`.output_dir`.
Parameters
----------
filename: str
Name of the file, to which content will be written. Defaults to ".tslr".
mode: str
Specifies how writing to file should be handled.
Should be one of characters: "x" or "w".
"x" - only write if file doesn't exist yet;
"w" - overwrite file if it already exists.
Defaults to "x".
Raises
------
ValueError
If given any other ``mode`` than "x" or "w".
Notes
-----
If :attr:`.output_dir` is ``None``, current working directory is assumed.
"""
path = self.output_dir / filename
if mode not in {"x", "w"}:
raise ValueError(
f"'{mode}' is not a valid mode for serializing Tesliper object. "
f"It should be 'x' or 'w'."
)
writer = wr.ArchiveWriter(destination=path, mode=mode)
writer.write(self)
@classmethod
def load(cls, source: Union[Path, str]) -> "Tesliper":
"""Load serialized :class:`Tesliper` object from given file.
Parameters
----------
source: pathlib.Path or str
Path to the file with serialized Tesliper object.
Returns
-------
Tesliper
New instance of Tesliper class containing data read from the file.
"""
path = Path(source)
loader = wr.ArchiveLoader(source=path)
return loader.load()
| StarcoderdataPython |
11534 | <reponame>hehaoqian/romt<filename>src/romt/manifest.py
#!/usr/bin/env python3
# coding=utf-8
import copy
from pathlib import Path
from typing import (
Any,
Generator,
Iterable,
List,
MutableMapping,
Optional,
)
import toml
from romt import error
def target_matches_any(target: str, expected_targets: Iterable[str]) -> bool:
if target == "*":
return True
for expected in expected_targets:
if target == expected or expected == "*":
return True
return False
class Package:
def __init__(
self, name: str, target: str, details: MutableMapping[str, Any]
):
self.name = name
self.target = target
self.available = details["available"]
self.xz_url = details.get("xz_url", "")
@property
def has_rel_path(self) -> bool:
return self.xz_url != ""
@property
def rel_path(self) -> str:
if not self.has_rel_path:
raise ValueError(
"Package {}/{} missing xz_url".format(self.name, self.target)
)
url = self.xz_url
prefix = "/dist/"
return url[url.index(prefix) + len(prefix) :]
class Manifest:
def __init__(self, raw_dict: MutableMapping[str, Any]):
self._dict = raw_dict
@staticmethod
def from_toml_path(toml_path: Path) -> "Manifest":
return Manifest(toml.load(toml_path))
def clone(self) -> "Manifest":
return Manifest(copy.deepcopy(self._dict))
@property
def _rust_src_version(self) -> str:
version = self._dict["pkg"]["rust-src"]["version"]
# Sample version lines found below [pkg.rust-src]:
# version = "1.43.0-beta.5 (934ae7739 2020-04-06)"
# version = "1.44.0-nightly (42abbd887 2020-04-07)"
# version = "1.42.0 (b8cedc004 2020-03-09)"
return version
@property
def channel(self) -> str:
version = self._rust_src_version
if "-beta" in version:
channel = "beta"
elif "-nightly" in version:
channel = "nightly"
else:
channel = "stable"
return channel
@property
def version(self) -> str:
version = self._rust_src_version
# version = "1.44.0-nightly (42abbd887 2020-04-07)"
# version = "1.42.0 (b8cedc004 2020-03-09)"
return version.split("-")[0].split()[0]
@property
def date(self) -> str:
return self._dict["date"]
@property
def spec(self) -> str:
return "{}-{}".format(self.channel, self.date)
@property
def ident(self) -> str:
return "{}({})".format(self.spec, self.version)
def set_package_available(
self, package_name: str, target: str, available: bool = True
) -> None:
details = self._dict["pkg"][package_name]["target"][target]
if available and "xz_url" not in details:
raise error.AbortError(
"package {}/{} set available but missing xz_url".format(
package_name, target
)
)
details["available"] = available
def get_package(self, package_name: str, target: str) -> Package:
details = self._dict["pkg"][package_name]["target"][target]
return Package(package_name, target, details)
def gen_packages(self) -> Generator[Package, None, None]:
"""Generate Package for all (name, target) in manifest."""
for name, package_dict in self._dict["pkg"].items():
for target in package_dict["target"].keys():
yield self.get_package(name, target)
def gen_available_packages(
self, *, targets: Optional[Iterable[str]] = None
) -> Generator[Package, None, None]:
"""gen_packages() for available packages matching targets."""
for package in self.gen_packages():
if package.available:
if targets is None or target_matches_any(
package.target, targets
):
yield package
def available_packages(self) -> List[Package]:
return list(self.gen_available_packages())
def _targets_from_packages(self, packages: Iterable[Package]) -> List[str]:
targets = set(p.target for p in packages)
targets.discard("*")
return sorted(targets)
def all_targets(self) -> List[str]:
return self._targets_from_packages(self.gen_packages())
def available_targets(self) -> List[str]:
return self._targets_from_packages(self.gen_available_packages())
| StarcoderdataPython |
152764 | ####
# This sample uses the PyPDF2 library for combining pdfs together to get the full pdf for all the views in a
# workbook.
#
# You will need to do `pip install PyPDF2` to use this sample.
#
# To run the script, you must have installed Python 3.5 or later.
####
import argparse
import getpass
import logging
import tempfile
import shutil
import functools
import os.path
import tableauserverclient as TSC
try:
import PyPDF2
except ImportError:
print('Please `pip install PyPDF2` to use this sample')
import sys
sys.exit(1)
def get_views_for_workbook(server, workbook_id): # -> Iterable of views
workbook = server.workbooks.get_by_id(workbook_id)
server.workbooks.populate_views(workbook)
return workbook.views
def download_pdf(server, tempdir, view): # -> Filename to downloaded pdf
logging.info("Exporting {}".format(view.id))
destination_filename = os.path.join(tempdir, view.id)
server.views.populate_pdf(view)
with file(destination_filename, 'wb') as f:
f.write(view.pdf)
return destination_filename
def combine_into(dest_pdf, filename): # -> None
dest_pdf.append(filename)
return dest_pdf
def cleanup(tempdir):
shutil.rmtree(tempdir)
def main():
parser = argparse.ArgumentParser(description='Export to PDF all of the views in a workbook.')
parser.add_argument('--server', '-s', required=True, help='server address')
parser.add_argument('--site', '-S', default=None, help='Site to log into, do not specify for default site')
parser.add_argument('--username', '-u', required=True, help='username to sign into server')
parser.add_argument('--password', '-p', default=None, help='<PASSWORD>')
parser.add_argument('--logging-level', '-l', choices=['debug', 'info', 'error'], default='error',
help='desired logging level (set to error by default)')
parser.add_argument('--file', '-f', default='out.pdf', help='filename to store the exported data')
parser.add_argument('resource_id', help='LUID for the workbook')
args = parser.parse_args()
if args.password is None:
password = getpass.getpass("Password: ")
else:
password = args.password
# Set logging level based on user input, or error by default
logging_level = getattr(logging, args.logging_level.upper())
logging.basicConfig(level=logging_level)
tempdir = tempfile.mkdtemp('tsc')
logging.debug("Saving to tempdir: %s", tempdir)
tableau_auth = TSC.TableauAuth(args.username, password, args.site)
server = TSC.Server(args.server, use_server_version=True)
try:
with server.auth.sign_in(tableau_auth):
get_list = functools.partial(get_views_for_workbook, server)
download = functools.partial(download_pdf, server, tempdir)
downloaded = (download(x) for x in get_list(args.resource_id))
output = reduce(combine_into, downloaded, PyPDF2.PdfFileMerger())
with file(args.file, 'wb') as f:
output.write(f)
finally:
cleanup(tempdir)
if __name__ == '__main__':
main()
| StarcoderdataPython |
190375 | <filename>paprika/threads/SecondTimer.py
import threading
class SecondTimer(object):
def __init__(self, seconds):
object.__init__(self)
self.__executors = []
self.__seconds = seconds
self.__elapsed = 0
def get_elapsed(self):
return self.__elapsed
def set_elapsed(self, elapsed):
self.__elapsed = elapsed
def get_seconds(self):
return self.__seconds
def set_seconds(self, seconds):
self.__seconds = seconds
def get_executors(self):
return self.__executors
def on_time(self):
elapsed = self.get_elapsed()
elapsed += 1
self.set_elapsed(elapsed)
seconds = self.get_seconds()
if elapsed % seconds == 0:
executors = self.get_executors()
for executor in executors:
t = threading.Thread(target=executor.execute)
t.setDaemon(True)
t.start()
def register(self, executor):
executors = self.get_executors()
executors.append(executor)
| StarcoderdataPython |
1677592 | """A RedirectionProvider Service Provider."""
from config import session
from masonite.drivers import SessionCookieDriver, SessionMemoryDriver
from masonite.managers import SessionManager
from masonite.provider import ServiceProvider
from masonite.view import View
from masonite.request import Request
from masonite import Session
class SessionProvider(ServiceProvider):
def register(self):
self.app.bind('SessionConfig', session)
self.app.bind('SessionMemoryDriver', SessionMemoryDriver)
self.app.bind('SessionCookieDriver', SessionCookieDriver)
self.app.bind('SessionManager', SessionManager(self.app))
def boot(self, request: Request, view: View, session: SessionManager):
self.app.bind('Session', session.driver(self.app.make('SessionConfig').DRIVER))
self.app.swap(Session, session.driver(self.app.make('SessionConfig').DRIVER))
request.session = self.app.make('Session')
view.share({
'session': self.app.make('Session').helper
})
| StarcoderdataPython |
91706 | <reponame>desafinadude/municipal-data
# -*- coding: utf-8 -*-
# Generated by Django 1.11.28 on 2020-10-06 17:45
from __future__ import unicode_literals
from django.conf import settings
import django.contrib.postgres.fields.jsonb
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('municipal_finance', '0010_auto_20170301_1256'),
]
operations = [
migrations.CreateModel(
name='MunicipalityProfile',
fields=[
('demarcation_code', models.CharField(
max_length=10, primary_key=True, serialize=False)),
('data', django.contrib.postgres.fields.jsonb.JSONField()),
],
options={
'db_table': 'municipality_profile',
},
),
migrations.CreateModel(
name='MunicipalityProfilesRebuild',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('datetime', models.DateTimeField(auto_now_add=True)),
('user', models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'db_table': 'municipality_profiles_rebuild',
},
),
migrations.CreateModel(
name='MunicipalityStaffContactsUpload',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('datetime', models.DateTimeField(auto_now_add=True)),
('file', models.FileField(upload_to='uploads/contacts/')),
('user', models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'db_table': 'municipality_staff_contacts_uploads',
},
),
migrations.AddField(
model_name='municipalitystaffcontacts',
name='id',
field=models.AutoField(primary_key=True, serialize=False),
),
migrations.AlterUniqueTogether(
name='municipalitystaffcontacts',
unique_together=set([('demarcation_code', 'role')]),
),
]
| StarcoderdataPython |
3204285 | from argparse import Namespace
from src.learner import Learner
args = Namespace(
# Data and Path information
surname_csv="data/surnames/surnames_with_splits.csv",
vectorizer_file="vectorizer.json",
model_state_file="model.pth",
save_dir="model_storage/ch4/cnn",
# Model hyper parameters
hidden_dim=100,
num_channels=256,
# Training hyper parameters
seed=1337,
learning_rate=0.001,
batch_size=128,
num_epochs=100,
early_stopping_criteria=5,
dropout_p=0.1,
# Runtime options
cuda=False,
reload_from_files=False,
expand_filepaths_to_save_dir=True,
catch_keyboard_interrupt=True
)
learner=Learner.learner_from_args(args)
learner.train(learning_rate=1) | StarcoderdataPython |
161639 | <gh_stars>1-10
from PIL import Image
from cStringIO import StringIO
def inline(image, size):
tmp = Image.new('RGB', (size, size), None)
buf = tmp.load()
for v in xrange(size):
for u in xrange(size):
buf[u, v] = next(image)
out = StringIO()
tmp.save(out, 'PNG')
result = out.getvalue().encode('base64')
out.close()
return result
| StarcoderdataPython |
3205393 | from django.db import models
from django.utils.translation import ugettext_lazy as _
from .building_type import BuildingType
class ProfitableBuilding(BuildingType):
""" A Massilian building that generates income for the state. """
building_income = models.DecimalField(_('Income'), max_digits=4, decimal_places=2)
settings = models.ForeignKey('MassiliaSettings', on_delete=models.CASCADE)
def __str__(self):
return f'{self.number_built} {self.name.title()} +{self.building_income} talents'
| StarcoderdataPython |
1731124 | from cms.plugin_base import CMSPluginBase
from cms.plugin_pool import plugin_pool
from .models import SlickGalleryPlugin
from django.utils.translation import ugettext as _
class SlickGalleryPluginBase(CMSPluginBase):
name = _('Slick gallery')
model = SlickGalleryPlugin
render_template = "cmsplugin_slick_gallery/_slick_gallery_plugin.html"
allow_children = False
def render(self, context, instance, placeholder):
context['instance'] = instance
return context
plugin_pool.register_plugin(SlickGalleryPluginBase)
| StarcoderdataPython |
21898 | <filename>scripts/pa-loaddata.py<gh_stars>0
#! /usr/bin/python
import argparse
import os
from biokbase.probabilistic_annotation.DataParser import DataParser
from biokbase.probabilistic_annotation.Helpers import get_config
from biokbase import log
desc1 = '''
NAME
pa-loaddata -- load static database of gene annotations
SYNOPSIS
'''
desc2 = '''
DESCRIPTION
Load the static database of high-quality gene annotations along with
files containing intermediate data. The files are then available for
a probabilistic annotation server on this system. Since downloading
from Shock can take a long time, run this command to load the static
database files before the server is started. The configFilePath argument
specifies the path to the configuration file for the service.
Note that a probabilistic annotation server is unable to service client
requests for the annotate() and calculate() methods while this command is
running and must be restarted to use the new files.
'''
desc3 = '''
EXAMPLES
Load static database files:
> pa-loaddata loaddata.cfg
SEE ALSO
pa-gendata
pa-savedata
AUTHORS
<NAME>, <NAME>
'''
# Main script function
if __name__ == "__main__":
# Parse arguments.
parser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter, prog='pa-loaddata', epilog=desc3)
parser.add_argument('configFilePath', help='path to configuration file', action='store', default=None)
usage = parser.format_usage()
parser.description = desc1 + ' ' + usage + desc2
parser.usage = argparse.SUPPRESS
args = parser.parse_args()
# Create a log object.
submod = os.environ.get('KB_SERVICE_NAME', 'probabilistic_annotation')
mylog = log.log(submod, ip_address=True, authuser=True, module=True, method=True,
call_id=True, config=args.configFilePath)
# Get the probabilistic_annotation section from the configuration file.
config = get_config(args.configFilePath)
# Create a DataParser object for working with the static database files (the
# data folder is created if it does not exist).
dataParser = DataParser(config)
# Get the static database files. If the files do not exist and they are downloaded
# from Shock, the command may run for a long time.
testDataPath = os.path.join(os.environ['KB_TOP'], 'services', submod, 'testdata')
dataOption = dataParser.getDatabaseFiles(mylog, testDataPath)
exit(0)
| StarcoderdataPython |
1660157 | <filename>tests/epyccel/modules/types.py
# pylint: disable=missing-function-docstring, missing-module-docstring/
def test_int_default(x : 'int'):
return x
def test_int64(x : 'int64'):
return x
def test_int32(x : 'int32'):
return x
def test_int16(x : 'int16'):
return x
def test_int8(x : 'int8'):
return x
def test_real_default(x : 'float'):
return x
def test_float32(x : 'float32'):
return x
def test_float64(x : 'float64'):
return x
def test_complex_default(x : 'complex'):
return x
def test_complex64(x : 'complex64'):
return x
def test_complex128(x : 'complex128'):
return x
def test_bool(x : 'bool'):
return x
| StarcoderdataPython |
3221233 | <gh_stars>1-10
import networkx as nx
import numpy as np
import matplotlib.pyplot as plt
def show_graph(g) :
nx.draw(g,with_labels=True, font_weight='bold')
plt.show()
U=nx.Graph()
U.add_edge("a","b")
U.add_edge("b","c")
U.add_edge("a","c")
D=nx.DiGraph()
D.add_edge("a","b")
D.add_edge("b","c")
D.add_edge("a","c")
class MyGraph:
def __init__(self,arg,directed=False):
if directed : self.g=nx.DiGraph(arg)
else: self.g = nx.Graph(arg)
def show(self): show_graph(self.g)
G=MyGraph([(1,2),(2,3),(3,1)],directed=False)
G.show()
#show(D)
def plot1() :
G = nx.petersen_graph()
plt.subplot(121)
nx.draw(G, with_labels=True, font_weight='bold')
plt.subplot(122)
nx.draw_shell(G,
nlist=[range(5, 10),
range(5)],
with_labels=True,
font_weight='bold')
plt.show()
def plot2() :
G = nx.petersen_graph()
nx.draw(G, with_labels=True, font_weight='bold')
plt.show()
#plot1()
| StarcoderdataPython |
3297868 | from elasticsearch import Elasticsearch
import os
import zipfile
import shutil
import urllib.request
import logging
import lzma
import json
import tarfile
import hashlib
logger = logging.getLogger(__name__)
# index settings with analyzer to automatically remove stop words
index_settings = {
"settings": {
"analysis": {
"analyzer": {
"stop_analyzer": {
"type": "standard",
"stopwords": "_english_"
}
}
}
},
"mappings": {
"properties": {
"casebody.data.opinions.text": {
"type": "text",
"analyzer": "stop_analyzer"
},
"name": {
"type": "text",
"analyzer": "stop_analyzer"
}
}
}
}
def create_index_from_json(index_name, file_path, max_docs=None):
"""Create an index from json file formats.
Read each file line by line, parse each line as json
jsonl.xz
json : must be a json file containing a list
Arguments:
file_path {str} -- path to case.law bulk file
Keyword Arguments:
max_docs {int} -- maximum size of records to use in creating index.
small default can be used to enable quick testing (e.g: {2000}).
set this to None to use the entire data file.
"""
# print("*** maxdocs", max_docs)
es = Elasticsearch([{'host': 'localhost', 'port': 9200}])
es.indices.create(
index=index_name, body=index_settings, ignore=400)
extension = os.path.splitext(file_path)[1]
logger.info(">> Creating index using file " + file_path)
i = 0
if extension == ".xz":
with lzma.open(file_path) as f:
for line in f:
i += 1
line = json.loads(str(line, 'utf8'))
try:
index_status = es.index(
index=index_name, id=i, body=line)
# print(index_status)
except Exception as e:
logger.info(
"An error has occurred while creating index " + str(e))
break
# logger.info(index_status)
if (i > max_docs):
break
logger.info(">> Creating index complete, delete data file .. ")
os.remove(file_path)
def import_scotus_files(max_docs=2000):
scotus_url = "https://www.courtlistener.com/api/bulk-data/opinions/scotus.tar.gz"
scotus_dir = "scotusdata"
index_name = "supremecourt"
if (not os.path.exists(scotus_dir)):
os.makedirs(scotus_dir, exist_ok=True)
logger.info(">>> Downloading supreme court case data")
ftpstream = urllib.request.urlopen(scotus_url)
thetarfile = tarfile.open(fileobj=ftpstream, mode="r|gz")
thetarfile.extractall(path=scotus_dir)
logger.info(">>> Download completed ")
logger.info(">> Creating %s index using %s documents",
index_name, str(max_docs))
scotus_files = os.listdir(scotus_dir)
es = Elasticsearch([{'host': 'localhost', 'port': 9200}])
es.indices.create(
index=index_name, body=index_settings, ignore=400)
i = 0
for file_path in (scotus_files):
with open("scotusdata/" + file_path) as json_file:
scotus_case = json.load(json_file)
case = {"author": scotus_case["author"],
"casebody": scotus_case["plain_text"]}
if (scotus_case["plain_text"] != ""):
try:
index_status = es.index(
index=index_name, id=scotus_case["id"], body=case)
except Exception as e:
logger.info(
"An error has occurred while creating index " + str(e))
break
i += 1
if (i > max_docs):
break
logger.info(">> Index creation complete.")
def download_data(data_url, source_name):
"""Download Zip datafile from case.law
Arguments:
data_url {str} -- url path dataset
source_name {str} -- name for dataset
"""
# create data directory
os.makedirs("data", exist_ok=True)
# download data from caselaw
zip_file_path = source_name + ".zip"
logger.info(">> Downloading data file for " + source_name)
urllib.request.urlretrieve(data_url, zip_file_path)
logger.info(">> Downloaded data file " + zip_file_path)
extract_dir = "temp" + source_name
with zipfile.ZipFile(zip_file_path, 'r') as zip_ref:
zip_ref.extractall(extract_dir)
data_file = os.path.join(extract_dir, os.listdir(
extract_dir)[0], "data", "data.jsonl.xz")
final_file_path = os.path.join("data", source_name + "jsonl.xz")
shutil.copyfile(data_file, final_file_path)
logger.info(">> Extracted and moved jsonl file to data folder")
shutil.rmtree(extract_dir)
os.remove(zip_file_path)
return final_file_path
def import_sample_data(max_docs=2000):
"""This method downloads several datasets and builds an
elasticsearch index using the downloaded data.
Caselaw
Args:
max_docs (int, optional): [description]. Defaults to 2000.
"""
caselaw_data_paths = [
["https://api.case.law/v1/bulk/22411/download/", "newmexico"]
]
for data_path in caselaw_data_paths:
file_path = download_data(data_path[0], data_path[1])
create_index_from_json("cases", file_path, max_docs=max_docs)
# import_scotus_files(max_docs=max_docs)
# import_medical_data(max_docs=max_docs)
def parse_field_content(field_name, content):
"""Parse content fields if nested using dot notation, else return content as is.
e.g. for acrray content and field_name casebody.data.opinions.text, we return
content[casebody][data][opinions][text]. If any nest level is an array we return only the
first instance of this array. e.g. if opinions is an array, we return
content[casebody][data][opinions][0][text].
Args:
field_name ([str]): [description]
content ([dict]): [description]
Returns:
[str]: content of field
"""
if ("." not in field_name):
return content[field_name]
else:
fields = field_name.split(".")
for field in fields:
content = content[field]
if (isinstance(content, list)):
content = content[0]
return content
| StarcoderdataPython |
3325010 | # Python Program to find Largest of Two Numbers
a = float(input(" Please Enter the First number : "))
b = float(input(" Please Enter the Second number : "))
if(a > b):
print('first number is lergest ')
elif(b > a):
print('second number is lergest')
else:
print("Both are Equal") | StarcoderdataPython |
3330449 |
import json
import itertools
from os import environ, path, makedirs
import logging
import logging.config
from dotenv import load_dotenv
load_dotenv()
# pipenv run python generate_dqm_json_test_set.py
# Take large dqm json data and generate a smaller subset to test with, with data from beginning, middle, and end of data array
# This takes about 90 seconds to run
log_file_path = path.join(path.dirname(path.abspath(__file__)), '../logging.conf')
logging.config.fileConfig(log_file_path)
logger = logging.getLogger('literature logger')
# base_path = '/home/azurebrd/git/agr_literature_service_demo/src/xml_processing/'
base_path = environ.get('XML_PATH')
sample_path = base_path + 'dqm_sample/'
def generate_dqm_json_test_set():
if not path.exists(sample_path):
makedirs(sample_path)
sample_amount = 10
mods = ['SGD', 'RGD', 'FB', 'WB', 'MGI', 'ZFIN']
# mods = ['MGI']
for mod in mods:
logger.info("generating sample set for %s", mod)
input_filename = base_path + 'dqm_data/REFERENCE_' + mod + '.json'
print(input_filename)
f = open(input_filename)
dqm_data = json.load(f)
# generate half-as-big set
# sample_amount = int(len(dqm_data['data']) / 2)
# dqm_data['data'] = dqm_data['data'][:sample_amount] # half one
# dqm_data['data'] = dqm_data['data'][-sample_amount:] # half two
reference_amount = len(dqm_data['data'])
if reference_amount > 3 * sample_amount:
sample1 = dqm_data['data'][:sample_amount]
start = int(reference_amount / 2) - 1
sample2 = dqm_data['data'][start:start + sample_amount]
sample3 = dqm_data['data'][-sample_amount:]
dqm_data['data'] = list(itertools.chain(sample1, sample2, sample3))
output_json_file = sample_path + 'REFERENCE_' + mod + '.json'
with open(output_json_file, "w") as json_file:
json_data = json.dumps(dqm_data, indent=4, sort_keys=True)
json_file.write(json_data)
json_file.close()
if __name__ == "__main__":
"""
call main start function
"""
logger.info("starting generate_dqm_json_test_set.py")
generate_dqm_json_test_set()
logger.info("ending generate_dqm_json_test_set.py")
| StarcoderdataPython |
37453 | from twisted.plugin import IPlugin
from twisted.words.protocols import irc
from txircd.config import ConfigValidationError
from txircd.module_interface import Command, ICommand, IModuleData, ModuleData
from txircd.modules.xlinebase import XLineBase
from txircd.utils import durationToSeconds, ircLower, now
from zope.interface import implements
from fnmatch import fnmatchcase
class GLine(ModuleData, XLineBase):
implements(IPlugin, IModuleData)
name = "GLine"
core = True
lineType = "G"
def actions(self):
return [ ("register", 10, self.checkLines),
("changeident", 10, self.checkIdentChange),
("changehost", 10, self.checkHostChange),
("commandpermission-GLINE", 10, self.restrictToOper),
("statsruntype-glines", 10, self.generateInfo),
("burst", 10, self.burstLines) ]
def userCommands(self):
return [ ("GLINE", 1, UserGLine(self)) ]
def serverCommands(self):
return [ ("ADDLINE", 1, ServerAddGLine(self)),
("DELLINE", 1, ServerDelGLine(self)) ]
def load(self):
self.initializeLineStorage()
def verifyConfig(self, config):
if "client_ban_msg" in config and not isinstance(config["client_ban_msg"], basestring):
raise ConfigValidationError("client_ban_msg", "value must be a string")
def checkUserMatch(self, user, mask, data):
banMask = self.normalizeMask(mask)
userMask = ircLower("{}@{}".format(user.ident, user.host()))
if fnmatchcase(userMask, banMask):
return True
userMask = ircLower("{}@{}".format(user.ident, user.realHost))
if fnmatchcase(userMask, banMask):
return True
userMask = ircLower("{}@{}".format(user.ident, user.ip))
if fnmatchcase(userMask, banMask):
return True
return False
def killUser(self, user, reason):
self.ircd.log.info("Matched user {user.uuid} ({user.ident}@{user.host()}) against a g:line: {reason}", user=user, reason=reason)
user.sendMessage(irc.ERR_YOUREBANNEDCREEP, self.ircd.config.get("client_ban_msg", "You're banned! Email <EMAIL> for assistance."))
user.disconnect("G:Lined: {}".format(reason))
def checkLines(self, user):
banReason = self.matchUser(user)
if banReason is not None:
self.killUser(user, banReason)
return False
return True
def checkIdentChange(self, user, oldIdent, fromServer):
self.checkLines(user)
def checkHostChange(self, user, hostType, oldHost, fromServer):
if user.uuid[:3] == self.ircd.serverID:
self.checkLines(user)
def restrictToOper(self, user, data):
if not self.ircd.runActionUntilValue("userhasoperpermission", user, "command-gline", users=[user]):
user.sendMessage(irc.ERR_NOPRIVILEGES, "Permission denied - You do not have the correct operator privileges")
return False
return None
class UserGLine(Command):
implements(ICommand)
def __init__(self, module):
self.module = module
def parseParams(self, user, params, prefix, tags):
if len(params) < 1 or len(params) == 2:
user.sendSingleError("GLineParams", irc.ERR_NEEDMOREPARAMS, "GLINE", "Not enough parameters")
return None
banmask = params[0]
if banmask in self.module.ircd.userNicks:
targetUser = self.module.ircd.users[self.module.ircd.userNicks[banmask]]
banmask = "{}@{}".format(targetUser.ident, targetUser.realHost)
else:
if "@" not in banmask:
banmask = "*@{}".format(banmask)
if len(params) == 1:
return {
"mask": banmask
}
return {
"mask": banmask,
"duration": durationToSeconds(params[1]),
"reason": " ".join(params[2:])
}
def execute(self, user, data):
banmask = data["mask"]
if "reason" in data:
if not self.module.addLine(banmask, now(), data["duration"], user.hostmask(), data["reason"]):
user.sendMessage("NOTICE", "*** G:Line for {} is already set.".format(banmask))
return True
badUsers = []
for checkUser in self.module.ircd.users.itervalues():
reason = self.module.matchUser(checkUser)
if reason is not None:
badUsers.append((checkUser, reason))
for badUser in badUsers:
self.module.killUser(*badUser)
if data["duration"] > 0:
user.sendMessage("NOTICE", "*** Timed g:line for {} has been set, to expire in {} seconds.".format(banmask, data["duration"]))
else:
user.sendMessage("NOTICE", "*** Permanent g:line for {} has been set.".format(banmask))
return True
if not self.module.delLine(banmask):
user.sendMessage("NOTICE", "*** G:Line for {} doesn't exist.".format(banmask))
return True
user.sendMessage("NOTICE", "*** G:Line for {} has been removed.".format(banmask))
return True
class ServerAddGLine(Command):
implements(ICommand)
def __init__(self, module):
self.module = module
def parseParams(self, server, params, prefix, tags):
return self.module.handleServerAddParams(server, params, prefix, tags)
def execute(self, server, data):
if self.module.executeServerAddCommand(server, data):
badUsers = []
for user in self.module.ircd.users.itervalues():
reason = self.module.matchUser(user)
if reason is not None:
badUsers.append((user, reason))
for user in badUsers:
self.module.killUser(*user)
return True
return None
class ServerDelGLine(Command):
implements(ICommand)
def __init__(self, module):
self.module = module
def parseParams(self, server, params, prefix, tags):
return self.module.handleServerDelParams(server, params, prefix, tags)
def execute(self, server, data):
return self.module.executeServerDelCommand(server, data)
glineModule = GLine() | StarcoderdataPython |
17925 | <gh_stars>0
from nanome._internal._util._serializers import _StringSerializer
from nanome._internal._util._serializers import _TypeSerializer
class _OpenURL(_TypeSerializer):
def __init__(self):
self.string = _StringSerializer()
def version(self):
return 0
def name(self):
return "OpenURL"
def serialize(self, version, value, context):
context.write_using_serializer(self.string, value)
def deserialize(self, version, context):
raise NotImplementedError
| StarcoderdataPython |
3241821 | import logging
from django.contrib.auth.mixins import UserPassesTestMixin
from django.shortcuts import render
logger = logging.getLogger(__name__)
def handler500(request):
return render(request, 'errors/application-error.html', status=500)
def index(request):
return render(request, 'index.html')
class _CustomUserTest(UserPassesTestMixin):
"""A helper to ensure that the current user is only requesting a view their own data."""
def test_func(self):
object = self.get_object()
return self.request.user == object.creator
| StarcoderdataPython |
3305476 | import numpy
from grunnur import dtypes, Program, Queue, Array
def check_struct_fill(context, dtype):
"""
Fill every field of the given ``dtype`` with its number and check the results.
This helps detect issues with offsets in the struct.
"""
struct = dtypes.ctype_struct(dtype)
program = Program(
context.device,
"""
KERNEL void test(GLOBAL_MEM ${struct} *dest, GLOBAL_MEM int *itemsizes)
{
const SIZE_T i = get_global_id(0);
${struct} res;
%for i, field_info in enumerate(dtypes.flatten_dtype(dtype)):
res.${dtypes.c_path(field_info[0])} = ${i};
%endfor
dest[i] = res;
itemsizes[i] = sizeof(${struct});
}
""",
render_globals=dict(
struct=struct,
dtypes=dtypes,
dtype=dtype))
test = program.kernel.test
queue = Queue(context.device)
a_dev = Array.empty(context.device, 128, dtype)
itemsizes_dev = Array.empty(context.device, 128, numpy.int32)
test(queue, 128, None, a_dev, itemsizes_dev)
a = a_dev.get(queue)
itemsizes = itemsizes_dev.get(queue)
for i, field_info in enumerate(dtypes.flatten_dtype(dtype)):
path, _ = field_info
assert (dtypes.extract_field(a, path) == i).all()
assert (itemsizes == dtype.itemsize).all()
def test_struct_offsets(context):
"""
Test the correctness of alignment for an explicit set of field offsets.
"""
dtype_nested = numpy.dtype(dict(
names=['val1', 'pad'],
formats=[numpy.int32, numpy.int8],
offsets=[0, 4],
itemsize=8,
aligned=True))
dtype = numpy.dtype(dict(
names=['val1', 'val2', 'nested'],
formats=[numpy.int32, numpy.int16, dtype_nested],
offsets=[0, 4, 8],
itemsize=32,
aligned=True))
check_struct_fill(context, dtype)
def test_struct_offsets_array(context):
"""
Test the correctness of alignment for an explicit set of field offsets.
"""
dtype_nested = numpy.dtype(dict(
names=['val1', 'pad'],
formats=[numpy.int8, numpy.int8]))
dtype = numpy.dtype(dict(
names=['pad', 'struct_arr', 'regular_arr'],
formats=[numpy.int32, numpy.dtype((dtype_nested, 2)), numpy.dtype((numpy.int16, 3))]))
dtype_ref = numpy.dtype(dict(
names=['pad','struct_arr','regular_arr'],
formats=[numpy.int32, (dtype_nested, (2,)), (numpy.int16, (3,))],
offsets=[0,4,8],
itemsize=16))
dtype_aligned = dtypes.align(dtype)
check_struct_fill(context, dtype_aligned)
def test_struct_offsets_field_alignments(context):
dtype = numpy.dtype(dict(
names=['x', 'y', 'z'],
formats=[numpy.int8, numpy.int16, numpy.int32],
offsets=[0, 4, 16],
itemsize=32))
dtype_aligned = dtypes.align(dtype)
check_struct_fill(context, dtype_aligned)
| StarcoderdataPython |
4827142 | import time
import joblib
import os
import os.path as osp
import tensorflow as tf
import torch
import gym
from spinup import EpochLogger
from spinup.utils.logx import restore_tf_graph
def load_policy_and_env(fpath, itr='last', deterministic=False):
"""
Load a policy from save, whether it's TF or PyTorch, along with RL env.
Not exceptionally future-proof, but it will suffice for basic uses of the
Spinning Up implementations.
Checks to see if there's a tf1_save folder. If yes, assumes the model
is tensorflow and loads it that way. Otherwise, loads as if there's a
PyTorch save.
"""
# determine if tf save or pytorch save
if any(['tf1_save' in x for x in os.listdir(fpath)]):
backend = 'tf1'
else:
backend = 'pytorch'
# handle which epoch to load from
if itr=='last':
# check filenames for epoch (AKA iteration) numbers, find maximum value
if backend == 'tf1':
saves = [int(x[8:]) for x in os.listdir(fpath) if 'tf1_save' in x and len(x)>8]
elif backend == 'pytorch':
pytsave_path = osp.join(fpath, 'pyt_save')
# Each file in this folder has naming convention 'modelXX.pt', where
# 'XX' is either an integer or empty string. Empty string case
# corresponds to len(x)==8, hence that case is excluded.
saves = [int(x.split('.')[0][5:]) for x in os.listdir(pytsave_path) if len(x)>8 and 'model' in x]
itr = '%d'%max(saves) if len(saves) > 0 else ''
else:
assert isinstance(itr, int), \
"Bad value provided for itr (needs to be int or 'last')."
itr = '%d'%itr
# load the get_action function
if backend == 'tf1':
get_action = load_tf_policy(fpath, itr, deterministic)
else:
get_action = load_pytorch_policy(fpath, itr, deterministic)
# try to load environment from save
# (sometimes this will fail because the environment could not be pickled)
try:
state = joblib.load(osp.join(fpath, 'vars'+itr+'.pkl'))
env = state['env']
except:
env = None
return env, get_action
def load_tf_policy(fpath, itr, deterministic=False):
""" Load a tensorflow policy saved with Spinning Up Logger."""
fname = osp.join(fpath, 'tf1_save'+itr)
print('\n\nLoading from %s.\n\n'%fname)
# load the things!
sess = tf.Session()
model = restore_tf_graph(sess, fname)
# get the correct op for executing actions
if deterministic and 'mu' in model.keys():
# 'deterministic' is only a valid option for SAC policies
print('Using deterministic action op.')
action_op = model['mu']
else:
print('Using default action op.')
action_op = model['pi']
# make function for producing an action given a single state
get_action = lambda x : sess.run(action_op, feed_dict={model['x']: x[None,:]})[0]
return get_action
def load_pytorch_policy(fpath, itr, deterministic=False):
""" Load a pytorch policy saved with Spinning Up Logger."""
fname = osp.join(fpath, 'pyt_save', 'model'+itr+'.pt')
print('\n\nLoading from %s.\n\n'%fname)
model = torch.load(fname)
# make function for producing an action given a single state
def get_action(x):
with torch.no_grad():
x = torch.as_tensor(x, dtype=torch.float32)
action = model.act(x)
return action
return get_action
def run_policy(env, get_action, max_ep_len=None, num_episodes=100, render=True):
assert env is not None, \
"Environment not found!\n\n It looks like the environment wasn't saved, " + \
"and we can't run the agent in it. :( \n\n Check out the readthedocs " + \
"page on Experiment Outputs for how to handle this situation."
logger = EpochLogger()
if "BulletEnv" in env.spec.id:
env = gym.make(env.spec.id)
if render:
# pybullet envs have to initialize rendering before calling env.reset
env.render()
o, r, d, ep_ret, ep_len, n = env.reset(), 0, False, 0, 0, 0
while n < num_episodes:
if render:
env.render()
time.sleep(1e-3)
a = get_action(o)
o, r, d, _ = env.step(a)
ep_ret += r
ep_len += 1
if d or (ep_len == max_ep_len):
logger.store(EpRet=ep_ret, EpLen=ep_len)
print('Episode %d \t EpRet %.3f \t EpLen %d'%(n, ep_ret, ep_len))
o, r, d, ep_ret, ep_len = env.reset(), 0, False, 0, 0
n += 1
logger.log_tabular('EpRet', with_min_and_max=True)
logger.log_tabular('EpLen', average_only=True)
logger.dump_tabular()
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('fpath', type=str)
parser.add_argument('--len', '-l', type=int, default=0)
parser.add_argument('--episodes', '-n', type=int, default=100)
parser.add_argument('--norender', '-nr', action='store_true')
parser.add_argument('--itr', '-i', type=int, default=-1)
parser.add_argument('--deterministic', '-d', action='store_true')
args = parser.parse_args()
env, get_action = load_policy_and_env(args.fpath,
args.itr if args.itr >=0 else 'last',
args.deterministic)
run_policy(env, get_action, args.len, args.episodes, not(args.norender))
| StarcoderdataPython |
1651435 | <filename>src/whoosh/util/times.py
# Copyright 2010 <NAME>. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY MATT CHAPUT ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
# EVENT SHALL MATT CHAPUT OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# The views and conclusions contained in the software and documentation are
# those of the authors and should not be interpreted as representing official
# policies, either expressed or implied, of Matt Chaput.
import calendar
import copy
from datetime import date, datetime, timedelta
from whoosh.compat import iteritems
class TimeError(Exception):
pass
def relative_days(current_wday, wday, dir):
"""Returns the number of days (positive or negative) to the "next" or
"last" of a certain weekday. ``current_wday`` and ``wday`` are numbers,
i.e. 0 = monday, 1 = tuesday, 2 = wednesday, etc.
>>> # Get the number of days to the next tuesday, if today is Sunday
>>> relative_days(6, 1, 1)
2
:param current_wday: the number of the current weekday.
:param wday: the target weekday.
:param dir: -1 for the "last" (past) weekday, 1 for the "next" (future)
weekday.
"""
if current_wday == wday:
return 7 * dir
if dir == 1:
return (wday + 7 - current_wday) % 7
else:
return (current_wday + 7 - wday) % 7 * -1
def timedelta_to_usecs(td):
total = td.days * 86400000000 # Microseconds in a day
total += td.seconds * 1000000 # Microseconds in a second
total += td.microseconds
return total
def datetime_to_long(dt):
"""Converts a datetime object to a long integer representing the number
of microseconds since ``datetime.min``.
"""
return timedelta_to_usecs(dt.replace(tzinfo=None) - dt.min)
def long_to_datetime(x):
"""Converts a long integer representing the number of microseconds since
``datetime.min`` to a datetime object.
"""
days = x // 86400000000 # Microseconds in a day
x -= days * 86400000000
seconds = x // 1000000 # Microseconds in a second
x -= seconds * 1000000
return datetime.min + timedelta(days=days, seconds=seconds, microseconds=x)
# Ambiguous datetime object
class adatetime(object):
"""An "ambiguous" datetime object. This object acts like a
``datetime.datetime`` object but can have any of its attributes set to
None, meaning unspecified.
"""
units = frozenset(("year", "month", "day", "hour", "minute", "second",
"microsecond"))
def __init__(self, year=None, month=None, day=None, hour=None, minute=None,
second=None, microsecond=None):
if isinstance(year, datetime):
dt = year
self.year, self.month, self.day = dt.year, dt.month, dt.day
self.hour, self.minute, self.second = dt.hour, dt.minute, dt.second
self.microsecond = dt.microsecond
else:
if month is not None and (month < 1 or month > 12):
raise TimeError("month must be in 1..12")
if day is not None and day < 1:
raise TimeError("day must be greater than 1")
if (year is not None and month is not None and day is not None
and day > calendar.monthrange(year, month)[1]):
raise TimeError("day is out of range for month")
if hour is not None and (hour < 0 or hour > 23):
raise TimeError("hour must be in 0..23")
if minute is not None and (minute < 0 or minute > 59):
raise TimeError("minute must be in 0..59")
if second is not None and (second < 0 or second > 59):
raise TimeError("second must be in 0..59")
if microsecond is not None and (microsecond < 0
or microsecond > 999999):
raise TimeError("microsecond must be in 0..999999")
self.year, self.month, self.day = year, month, day
self.hour, self.minute, self.second = hour, minute, second
self.microsecond = microsecond
def __eq__(self, other):
if not other.__class__ is self.__class__:
if not is_ambiguous(self) and isinstance(other, datetime):
return fix(self) == other
else:
return False
return all(getattr(self, unit) == getattr(other, unit)
for unit in self.units)
def __repr__(self):
return "%s%r" % (self.__class__.__name__, self.tuple())
def tuple(self):
"""Returns the attributes of the ``adatetime`` object as a tuple of
``(year, month, day, hour, minute, second, microsecond)``.
"""
return (self.year, self.month, self.day, self.hour, self.minute,
self.second, self.microsecond)
def date(self):
return date(self.year, self.month, self.day)
def copy(self):
return adatetime(year=self.year, month=self.month, day=self.day,
hour=self.hour, minute=self.minute, second=self.second,
microsecond=self.microsecond)
def replace(self, **kwargs):
"""Returns a copy of this object with the attributes given as keyword
arguments replaced.
>>> adt = adatetime(year=2009, month=10, day=31)
>>> adt.replace(year=2010)
(2010, 10, 31, None, None, None, None)
"""
newadatetime = self.copy()
for key, value in iteritems(kwargs):
if key in self.units:
setattr(newadatetime, key, value)
else:
raise KeyError("Unknown argument %r" % key)
return newadatetime
def floor(self):
"""Returns a ``datetime`` version of this object with all unspecified
(None) attributes replaced by their lowest values.
This method raises an error if the ``adatetime`` object has no year.
>>> adt = adatetime(year=2009, month=5)
>>> adt.floor()
datetime.datetime(2009, 5, 1, 0, 0, 0, 0)
"""
y, m, d, h, mn, s, ms = (self.year, self.month, self.day, self.hour,
self.minute, self.second, self.microsecond)
if y is None:
raise ValueError("Date has no year")
if m is None:
m = 1
if d is None:
d = 1
if h is None:
h = 0
if mn is None:
mn = 0
if s is None:
s = 0
if ms is None:
ms = 0
return datetime(y, m, d, h, mn, s, ms)
def ceil(self):
"""Returns a ``datetime`` version of this object with all unspecified
(None) attributes replaced by their highest values.
This method raises an error if the ``adatetime`` object has no year.
>>> adt = adatetime(year=2009, month=5)
>>> adt.floor()
datetime.datetime(2009, 5, 30, 23, 59, 59, 999999)
"""
y, m, d, h, mn, s, ms = (self.year, self.month, self.day, self.hour,
self.minute, self.second, self.microsecond)
if y is None:
raise ValueError("Date has no year")
if m is None:
m = 12
if d is None:
d = calendar.monthrange(y, m)[1]
if h is None:
h = 23
if mn is None:
mn = 59
if s is None:
s = 59
if ms is None:
ms = 999999
return datetime(y, m, d, h, mn, s, ms)
def disambiguated(self, basedate):
"""Returns either a ``datetime`` or unambiguous ``timespan`` version
of this object.
Unless this ``adatetime`` object is full specified down to the
microsecond, this method will return a timespan built from the "floor"
and "ceil" of this object.
This method raises an error if the ``adatetime`` object has no year.
>>> adt = adatetime(year=2009, month=10, day=31)
>>> adt.disambiguated()
timespan(datetime(2009, 10, 31, 0, 0, 0, 0), datetime(2009, 10, 31, 23, 59 ,59, 999999)
"""
dt = self
if not is_ambiguous(dt):
return fix(dt)
return timespan(dt, dt).disambiguated(basedate)
# Time span class
class timespan(object):
"""A span of time between two ``datetime`` or ``adatetime`` objects.
"""
def __init__(self, start, end):
"""
:param start: a ``datetime`` or ``adatetime`` object representing the
start of the time span.
:param end: a ``datetime`` or ``adatetime`` object representing the
end of the time span.
"""
if not isinstance(start, (datetime, adatetime)):
raise TimeError("%r is not a datetime object" % start)
if not isinstance(end, (datetime, adatetime)):
raise TimeError("%r is not a datetime object" % end)
self.start = copy.copy(start)
self.end = copy.copy(end)
def __eq__(self, other):
if not other.__class__ is self.__class__:
return False
return self.start == other.start and self.end == other.end
def __repr__(self):
return "%s(%r, %r)" % (self.__class__.__name__, self.start, self.end)
def disambiguated(self, basedate, debug=0):
"""Returns an unambiguous version of this object.
>>> start = adatetime(year=2009, month=2)
>>> end = adatetime(year=2009, month=10)
>>> ts = timespan(start, end)
>>> ts
timespan(adatetime(2009, 2, None, None, None, None, None), adatetime(2009, 10, None, None, None, None, None))
>>> td.disambiguated(datetime.now())
timespan(datetime(2009, 2, 28, 0, 0, 0, 0), datetime(2009, 10, 31, 23, 59 ,59, 999999)
"""
#- If year is in start but not end, use basedate.year for end
#-- If year is in start but not end, but startdate is > basedate,
# use "next <monthname>" to get end month/year
#- If year is in end but not start, copy year from end to start
#- Support "next february", "last april", etc.
start, end = copy.copy(self.start), copy.copy(self.end)
start_year_was_amb = start.year is None
end_year_was_amb = end.year is None
if has_no_date(start) and has_no_date(end):
# The start and end points are just times, so use the basedate
# for the date information.
by, bm, bd = basedate.year, basedate.month, basedate.day
start = start.replace(year=by, month=bm, day=bd)
end = end.replace(year=by, month=bm, day=bd)
else:
# If one side has a year and the other doesn't, the decision
# of what year to assign to the ambiguous side is kind of
# arbitrary. I've used a heuristic here based on how the range
# "reads", but it may only be reasonable in English. And maybe
# even just to me.
if start.year is None and end.year is None:
# No year on either side, use the basedate
start.year = end.year = basedate.year
elif start.year is None:
# No year in the start, use the year from the end
start.year = end.year
elif end.year is None:
end.year = max(start.year, basedate.year)
if start.year == end.year:
# Once again, if one side has a month and day but the other side
# doesn't, the disambiguation is arbitrary. Does "3 am to 5 am
# tomorrow" mean 3 AM today to 5 AM tomorrow, or 3am tomorrow to
# 5 am tomorrow? What I picked is similar to the year: if the
# end has a month+day and the start doesn't, copy the month+day
# from the end to the start UNLESS that would make the end come
# before the start on that day, in which case use the basedate
# instead. If the start has a month+day and the end doesn't, use
# the basedate.
start_dm = not (start.month is None and start.day is None)
end_dm = not (end.month is None and end.day is None)
if end_dm and not start_dm:
if start.floor().time() > end.ceil().time():
start.month = basedate.month
start.day = basedate.day
else:
start.month = end.month
start.day = end.day
elif start_dm and not end_dm:
end.month = basedate.month
end.day = basedate.day
if floor(start).date() > ceil(end).date():
# If the disambiguated dates are out of order:
# - If no start year was given, reduce the start year to put the
# start before the end
# - If no end year was given, increase the end year to put the end
# after the start
# - If a year was specified for both, just swap the start and end
if start_year_was_amb:
start.year = end.year - 1
elif end_year_was_amb:
end.year = start.year + 1
else:
start, end = end, start
start = floor(start)
end = ceil(end)
if start.date() == end.date() and start.time() > end.time():
# If the start and end are on the same day, but the start time
# is after the end time, move the end time to the next day
end += timedelta(days=1)
return timespan(start, end)
# Functions for working with datetime/adatetime objects
def floor(at):
if isinstance(at, datetime):
return at
return at.floor()
def ceil(at):
if isinstance(at, datetime):
return at
return at.ceil()
def fill_in(at, basedate, units=adatetime.units):
"""Returns a copy of ``at`` with any unspecified (None) units filled in
with values from ``basedate``.
"""
if isinstance(at, datetime):
return at
args = {}
for unit in units:
v = getattr(at, unit)
if v is None:
v = getattr(basedate, unit)
args[unit] = v
return fix(adatetime(**args))
def has_no_date(at):
"""Returns True if the given object is an ``adatetime`` where ``year``,
``month``, and ``day`` are all None.
"""
if isinstance(at, datetime):
return False
return at.year is None and at.month is None and at.day is None
def has_no_time(at):
"""Returns True if the given object is an ``adatetime`` where ``hour``,
``minute``, ``second`` and ``microsecond`` are all None.
"""
if isinstance(at, datetime):
return False
return (at.hour is None and at.minute is None and at.second is None
and at.microsecond is None)
def is_ambiguous(at):
"""Returns True if the given object is an ``adatetime`` with any of its
attributes equal to None.
"""
if isinstance(at, datetime):
return False
return any((getattr(at, attr) is None) for attr in adatetime.units)
def is_void(at):
"""Returns True if the given object is an ``adatetime`` with all of its
attributes equal to None.
"""
if isinstance(at, datetime):
return False
return all((getattr(at, attr) is None) for attr in adatetime.units)
def fix(at):
"""If the given object is an ``adatetime`` that is unambiguous (because
all its attributes are specified, that is, not equal to None), returns a
``datetime`` version of it. Otherwise returns the ``adatetime`` object
unchanged.
"""
if is_ambiguous(at) or isinstance(at, datetime):
return at
return datetime(year=at.year, month=at.month, day=at.day, hour=at.hour,
minute=at.minute, second=at.second,
microsecond=at.microsecond)
| StarcoderdataPython |
73995 | <gh_stars>0
def do_stuff(fn, lhs, rhs):
return fn(lhs, rhs)
def add(lhs, rhs):
return lhs + rhs
def multiply(lhs, rhs):
return lhs * rhs
def exponent(lhs, rhs):
return lhs ** rhs
print(do_stuff(add, 2, 3))
print(do_stuff(multiply, 2, 3))
print(do_stuff(exponent, 2, 3))
| StarcoderdataPython |
116695 | from dataset import Dataset
from util import Util
class Feature:
def __init__(self, use_features):
self.dataset = Dataset(use_features)
years = [y for y in range(2008, 2020)]
self.data = self.dataset.get_data(years, "tokyo")
def get_dataset(self):
return self.data.copy()
def register_feature(self, feature, feature_name):
Util.dump_feature(feature, feature_name)
def standarlization(self):
for name in self.data.columns:
if self.data[name][0] is int or self.data[name][0] is float:
self.data[name] = (
self.data[name] - self.data[name].mean()
) / self.data[name].std(ddof=0)
| StarcoderdataPython |
3364353 | """
Generate `pyi` from corresponding `rst` docs.
"""
import rst
from class_ import Class
from rst2pyi import RST2PyI
__author__ = rst.__author__
__copyright__ = rst.__copyright__
__license__ = rst.__license__
__version__ = "7.2.0" # Version set by https://github.com/hlovatt/tag2ver
def pyb(shed: RST2PyI) -> None:
_pyb(shed)
nxt = _accel(shed)
nxt = _adc(nxt, shed)
nxt = _can(nxt, shed)
nxt = _dac(nxt, shed)
nxt = _ext_int(nxt, shed)
nxt = _flash(nxt, shed)
nxt = _i2c(nxt, shed)
nxt = _lcd(nxt, shed)
nxt = _led(nxt, shed)
nxt = _pin(nxt, shed)
nxt = _rtc(nxt, shed)
nxt = _servo(nxt, shed)
nxt = _spi(nxt, shed)
nxt = _switch(nxt, shed)
nxt = _timer(nxt, shed)
nxt = _uart(nxt, shed)
nxt = _usb_hid(nxt, shed)
_usb_vcp(nxt, shed)
shed.write()
def _usb_vcp(this: str, shed: RST2PyI) -> None:
shed.class_from_file(
pre_str="# noinspection PyPep8Naming", old=this,
)
shed.def_(
old=r".. class:: pyb.USB_VCP(id=0)", new="def __init__(self, id: int = 0, /)",
)
shed.def_(
old=r".. method:: USB_VCP.init(*, flow=-1)",
new="def init(self, *, flow: int = - 1) -> int",
)
shed.def_(
old=r".. method:: USB_VCP.setinterrupt(chr)",
new="def setinterrupt(self, chr: int, /) -> None",
)
shed.def_(
old=r".. method:: USB_VCP.isconnected()", new="def isconnected(self) -> bool",
)
shed.def_(
old=r".. method:: USB_VCP.any()", new="def any(self) -> bool",
)
shed.def_(
old=r".. method:: USB_VCP.close()", new="def close(self) -> None",
)
shed.def_(
old=r".. method:: USB_VCP.read([nbytes])",
new=[
"def read(self) -> bytes | None",
"def read(self, nbytes, /) -> bytes | None",
],
)
shed.def_(
old=r".. method:: USB_VCP.readinto(buf, [maxlen])",
new=[
"def readinto(self, buf: AnyWritableBuf, /) -> int | None",
"def readinto(self, buf: AnyWritableBuf, maxlen: int, /) -> int | None",
],
)
shed.def_(
old=r".. method:: USB_VCP.readline()", new="def readline(self) -> bytes | None",
)
shed.def_(
old=r".. method:: USB_VCP.readlines()",
new="def readlines(self) -> list[bytes] | None",
)
shed.def_(
old=r".. method:: USB_VCP.write(buf)",
new="def write(self, buf: AnyReadableBuf, /) -> int",
)
shed.def_(
old=r".. method:: USB_VCP.recv(data, *, timeout=5000)",
new=[
"def recv(self, data: int, /, *, timeout: int = 5000) -> bytes | None",
"def recv(self, data: AnyWritableBuf, /, *, timeout: int = 5000) -> int | None",
],
)
shed.def_(
old=r".. method:: USB_VCP.send(data, *, timeout=5000)",
new="def send(self, buf: AnyWritableBuf | bytes | int, /, *, timeout: int = 5000) -> int",
)
shed.vars(
old=[".. data:: USB_VCP.RTS", "USB_VCP.CTS"], end=None,
)
def _usb_hid(this: str, shed: RST2PyI) -> str:
shed.class_from_file(
pre_str="# noinspection PyPep8Naming", old=this,
)
shed.def_(
old=r".. class:: pyb.USB_HID()", new="def __init__(self)",
)
shed.def_(
old=r".. method:: USB_HID.recv(data, *, timeout=5000)",
new=[
"def recv(self, data: int, /, *, timeout: int = 5000) -> bytes",
"def recv(self, data: AnyWritableBuf, /, *, timeout: int = 5000) -> int",
],
)
nxt = "pyb.USB_VCP.rst"
shed.def_(
old=r".. method:: USB_HID.send(data)",
new="def send(self, data: Sequence[int]) -> None",
end=nxt,
)
return nxt
def _uart(this: str, shed: RST2PyI) -> str:
shed.class_from_file(
pre_str="# noinspection PyShadowingNames", old=this,
)
shed.def_(
old=r".. class:: pyb.UART(bus, ...)",
new=[
"""
def __init__(
self,
bus: int | str,
/
)
""",
"""
def __init__(
self,
bus: int | str,
baudrate: int,
/,
bits: int = 8,
parity: int | None = None,
stop: int = 1,
*,
timeout: int = 0,
flow: int = 0,
timeout_char: int = 0,
read_buf_len: int = 64
)
""",
],
)
shed.def_(
old=(
r".. method:: UART.init(baudrate, bits=8, parity=None, stop=1, *, "
r"timeout=0, flow=0, timeout_char=0, read_buf_len=64)"
),
new="""
def init(
self,
baudrate: int,
/,
bits: int = 8,
parity: int | None = None,
stop: int = 1,
*,
timeout: int = 0,
flow: int = 0,
timeout_char: int = 0,
read_buf_len: int = 64
)
""",
)
shed.def_(
old=r".. method:: UART.deinit()", new="def deinit(self) -> None",
)
shed.def_(
old=r".. method:: UART.any()", new="def any(self) -> int",
)
shed.def_(
old=r".. method:: UART.read([nbytes])",
new=[
"def read(self) -> bytes | None",
"def read(self, nbytes: int, /) -> bytes | None",
],
)
shed.def_(
old=r".. method:: UART.readchar()", new="def readchar(self) -> int",
)
shed.def_(
old=r".. method:: UART.readinto(buf[, nbytes])",
new=[
"def readinto(self, buf: AnyWritableBuf, /) -> int | None",
"def readinto(self, buf: AnyWritableBuf, nbytes: int, /) -> int | None",
],
)
shed.def_(
old=r".. method:: UART.readline()", new="def readline(self) -> str | None",
)
shed.def_(
old=r".. method:: UART.write(buf)",
new="def write(self, buf: AnyWritableBuf, /) -> int | None",
)
shed.def_(
old=r".. method:: UART.writechar(char)",
new="def writechar(self, char: int, /) -> None",
)
shed.def_(
old=r".. method:: UART.sendbreak()", new="def sendbreak(self) -> None",
)
shed.vars(
old=[".. data:: UART.RTS", "UART.CTS"], end="Flow Control",
)
nxt = "pyb.USB_HID.rst"
shed.pyi.doc.extend(shed.extra_notes(end=nxt))
return nxt
def _timer_channel(*, old: str, end: str, shed: RST2PyI) -> None:
shed.consume_containing_line(old)
shed.consume_equals_underline_line()
shed.consume_blank_line()
methods = "Methods"
doc = []
for doc_line in shed.rst:
if doc_line.startswith(methods):
shed.consume_minuses_underline_line()
shed.consume_blank_line()
break
doc.append(f" {doc_line}\n")
else:
assert False, f"Did not find: `{methods}`"
new_class = Class()
new_class.class_def = f"class TimerChannel(ABC):"
new_class.doc = doc
shed.pyi.classes.append(new_class)
shed.def_(
old=".. method:: timerchannel.callback(fun)",
new="""
@abstractmethod
def callback(self, fun: Callable[[Timer], None] | None, /) -> None
""",
)
shed.def_(
old=".. method:: timerchannel.capture([value])",
new=[
"""
@abstractmethod
def capture(self) -> int
""",
"""
@abstractmethod
def capture(self, value: int, /) -> None
""",
],
)
shed.def_(
old=".. method:: timerchannel.compare([value])",
new=[
"""
@abstractmethod
def compare(self) -> int
""",
"""
@abstractmethod
def compare(self, value: int, /) -> None
""",
],
)
shed.def_(
old=".. method:: timerchannel.pulse_width([value])",
new=[
"""
@abstractmethod
def pulse_width(self) -> int
""",
"""
@abstractmethod
def pulse_width(self, value: int, /) -> None
""",
],
)
shed.def_(
old=".. method:: timerchannel.pulse_width_percent([value])",
new=[
"""
@abstractmethod
def pulse_width_percent(self) -> float
""",
"""
@abstractmethod
def pulse_width_percent(self, value: int | float, /) -> None
""",
],
end=end,
)
def _timer(this: str, shed: RST2PyI) -> str:
shed.class_from_file(
pre_str="# noinspection PyShadowingNames,PyUnresolvedReferences",
old=this,
post_doc='''
UP: ClassVar[int] = ...
"""
configures the timer to count from 0 to ARR (default).
"""
DOWN: ClassVar[int] = ...
"""
configures the timer to count from ARR down to 0.
"""
CENTER: ClassVar[int] = ...
"""
configures the timer to count from 0 to ARR and then back down to 0.
"""
PWM: ClassVar[int] = ...
"""
configure the timer in PWM mode (active high).
"""
PWM_INVERTED: ClassVar[int] = ...
"""
configure the timer in PWM mode (active low).
"""
OC_TIMING: ClassVar[int] = ...
"""
indicates that no pin is driven.
"""
OC_ACTIVE: ClassVar[int] = ...
"""
the pin will be made active when a compare match occurs (active is determined by polarity).
"""
OC_INACTIVE: ClassVar[int] = ...
"""
the pin will be made inactive when a compare match occurs.
"""
OC_TOGGLE: ClassVar[int] = ...
"""
the pin will be toggled when an compare match occurs.
"""
OC_FORCED_ACTIVE: ClassVar[int] = ...
"""
the pin is forced active (compare match is ignored).
"""
OC_FORCED_INACTIVE: ClassVar[int] = ...
"""
the pin is forced inactive (compare match is ignored).
"""
IC: ClassVar[int] = ...
"""
configure the timer in Input Capture mode.
"""
ENC_A: ClassVar[int] = ...
"""
configure the timer in Encoder mode. The counter only changes when CH1 changes.
"""
ENC_B: ClassVar[int] = ...
"""
configure the timer in Encoder mode. The counter only changes when CH2 changes.
"""
ENC_AB: ClassVar[int] = ...
"""
configure the timer in Encoder mode. The counter changes when CH1 or CH2 changes.
"""
HIGH: ClassVar[int] = ...
"""
output is active high.
"""
LOW: ClassVar[int] = ...
"""
output is active low.
"""
RISING: ClassVar[int] = ...
"""
captures on rising edge.
"""
FALLING: ClassVar[int] = ...
"""
captures on falling edge.
"""
BOTH: ClassVar[int] = ...
"""
captures on both edges.
"""
''',
)
shed.def_(
old=r".. class:: pyb.Timer(id, ...)",
new=[
"""
def __init__(
self,
id: int,
/
)
""",
"""
def __init__(
self,
id: int,
/,
*,
freq: int,
mode: int = UP,
div: int = 1,
callback: Callable[[Timer], None] | None = None,
deadtime: int = 0
)
""",
"""
def __init__(
self,
id: int,
/,
*,
prescaler: int,
period: int,
mode: int = UP,
div: int = 1,
callback: Callable[[Timer], None] | None = None,
deadtime: int = 0
)
""",
],
)
shed.def_(
old=r".. method:: Timer.init(*, freq, prescaler, period, mode=Timer.UP, div=1, callback=None, deadtime=0)",
new=[
"""
def init(
self,
*,
freq: int,
mode: int = UP,
div: int = 1,
callback: Callable[[Timer], None] | None = None,
deadtime: int = 0
) -> None
""",
"""
def init(
self,
*,
prescaler: int,
period: int,
mode: int = UP,
div: int = 1,
callback: Callable[[Timer], None] | None = None,
deadtime: int = 0
) -> None
""",
],
)
shed.def_(
old=r".. method:: Timer.deinit()", new="def deinit(self) -> None",
)
shed.def_(
old=r".. method:: Timer.callback(fun)",
new="def callback(self, fun: Callable[[Timer], None] | None, /) -> None",
)
shed.def_(
old=r".. method:: Timer.channel(channel, mode, ...)",
new=[
"""
def channel(
self,
channel: int,
/
) -> "TimerChannel" | None
""",
"""
def channel(
self,
channel: int,
/,
mode: int,
*,
callback: Callable[[Timer], None] | None = None,
pin: Pin | None = None,
pulse_width: int,
) -> "TimerChannel"
""",
"""
def channel(
self,
channel: int,
/,
mode: int,
*,
callback: Callable[[Timer], None] | None = None,
pin: Pin | None = None,
pulse_width_percent: int | float,
) -> "TimerChannel"
""",
"""
def channel(
self,
channel: int,
/,
mode: int,
*,
callback: Callable[[Timer], None] | None = None,
pin: Pin | None = None,
compare: int,
polarity: int,
) -> "TimerChannel"
""",
"""
def channel(
self,
channel: int,
/,
mode: int,
*,
callback: Callable[[Timer], None] | None = None,
pin: Pin | None = None,
polarity: int,
) -> "TimerChannel"
""",
"""
def channel(
self,
channel: int,
/,
mode: int,
*,
callback: Callable[[Timer], None] | None = None,
pin: Pin | None = None,
) -> "TimerChannel"
""",
],
)
shed.def_(
old=".. method:: Timer.counter([value])",
new=["def counter(self) -> int", "def counter(self, value: int, /) -> None"],
)
shed.def_(
old=".. method:: Timer.freq([value])",
new=["def freq(self) -> int", "def freq(self, value: int, /) -> None"],
)
shed.def_(
old=".. method:: Timer.period([value])",
new=["def period(self) -> int", "def period(self, value: int, /) -> None"],
)
shed.def_(
old=".. method:: Timer.prescaler([value])",
new=[
"def prescaler(self) -> int",
"def prescaler(self, value: int, /) -> None",
],
)
timer_channel = "class TimerChannel --- setup a channel for a timer"
shed.def_(
old=r".. method:: Timer.source_freq()",
new="def source_freq(self) -> int",
end=timer_channel,
)
nxt = "pyb.UART.rst"
_timer_channel(old=timer_channel, end=nxt, shed=shed)
return nxt
def _switch(this: str, shed: RST2PyI) -> str:
shed.class_from_file(old=this)
shed.def_(
old=r".. class:: pyb.Switch()", new="def __init__(self)",
)
shed.def_(
old=r".. method:: Switch.__call__()", new="def __call__(self) -> bool",
)
shed.def_(
old=r".. method:: Switch.value()", new="def value(self) -> bool",
)
nxt = "pyb.Timer.rst"
shed.def_(
old=r".. method:: Switch.callback(fun)",
new="def callback(self, fun: Callable[[], None] | None) -> None",
end=nxt,
)
return nxt
def _spi(this: str, shed: RST2PyI) -> str:
shed.class_from_file(old=this)
shed.def_(
old=".. class:: pyb.SPI(bus, ...)",
new=[
"""
def __init__(self, bus: int, /)
""",
"""
def __init__(
self,
bus: int,
/,
mode: int = CONTROLLER,
baudrate: int = 328125,
*,
polarity: int = 1,
phase: int = 0,
bits: int = 8,
firstbit: int = MSB,
ti: bool = False,
crc: int | None = None
)
""",
"""
def __init__(
self,
bus: int,
/,
mode: int = CONTROLLER,
*,
prescaler: int = 256,
polarity: int = 1,
phase: int = 0,
bits: int = 8,
firstbit: int = MSB,
ti: bool = False,
crc: int | None = None
)
""",
],
)
shed.def_(
old=r".. method:: SPI.deinit()", new="def deinit(self) -> None",
)
shed.def_(
old=(
r".. method:: SPI.init(mode, baudrate=328125, *, prescaler, "
r"polarity=1, phase=0, bits=8, firstbit=SPI.MSB, ti=False, crc=None)"
),
new=[
"""
def init(
self,
mode: int = CONTROLLER,
baudrate: int = 328125,
*,
polarity: int = 1,
phase: int = 0,
bits: int = 8,
firstbit: int = MSB,
ti: bool = False,
crc: int | None = None
)
""",
"""
def init(
self,
mode: int = CONTROLLER,
*,
prescaler: int = 256,
polarity: int = 1,
phase: int = 0,
bits: int = 8,
firstbit: int = MSB,
ti: bool = False,
crc: int | None = None
)
""",
],
)
shed.def_(
old=r".. method:: SPI.recv(recv, *, timeout=5000)",
new="def recv(self, recv: int | AnyWritableBuf, /, *, timeout: int = 5000) -> AnyWritableBuf",
)
shed.def_(
old=r".. method:: SPI.send(send, *, timeout=5000)",
new="def send(self, send: int | AnyWritableBuf | bytes, /, *, timeout: int = 5000) -> None",
)
shed.def_(
old=r".. method:: SPI.send_recv(send, recv=None, *, timeout=5000)",
new="""
def send_recv(
self,
send: int | bytearray | array | bytes,
recv: AnyWritableBuf | None = None,
/,
*,
timeout: int = 5000
) -> AnyWritableBuf
""",
)
shed.vars(old=[".. data:: SPI.CONTROLLER", ".. data:: SPI.PERIPHERAL"],)
nxt = "pyb.Switch.rst"
shed.vars(
old=[".. data:: SPI.LSB", ".. data:: SPI.MSB"], end=nxt,
)
return nxt
def _servo(this: str, shed: RST2PyI) -> str:
shed.class_from_file(old=this)
shed.def_(
old=".. class:: pyb.Servo(id)", new="def __init__(self, id: int, /)",
)
shed.def_(
old=".. method:: Servo.angle([angle, time=0])",
new=[
"def angle(self) -> int",
"def angle(self, angle: int, time: int = 0, /) -> None",
],
)
shed.def_(
old=".. method:: Servo.speed([speed, time=0])",
new=[
"def speed(self) -> int",
"def speed(self, speed: int, time: int = 0, /) -> None",
],
)
shed.def_(
old=".. method:: Servo.pulse_width([value])",
new=["def speed(self) -> int", "def speed(self, value: int, /) -> None"],
)
nxt = "pyb.SPI.rst"
shed.def_(
old=".. method:: Servo.calibration([pulse_min, pulse_max, pulse_centre, [pulse_angle_90, pulse_speed_100]])",
new=[
"""
def calibration(
self
) -> tuple[int, int, int, int, int]
""",
"""
def calibration(
self,
pulse_min: int,
pulse_max: int,
pulse_centre: int,
/
) -> None
""",
"""
def calibration(
self,
pulse_min: int,
pulse_max: int,
pulse_centre: int,
pulse_angle_90: int,
pulse_speed_100: int,
/
) -> None
""",
],
end=nxt,
)
return nxt
def _rtc(this: str, shed: RST2PyI) -> str:
shed.class_from_file(old=this)
shed.def_(
old=".. class:: pyb.RTC()", new="def __init__(self)",
)
shed.def_(
old=".. method:: RTC.datetime([datetimetuple])",
new="def datetime(self, datetimetuple: tuple[int, int, int, int, int, int, int, int], /) -> None",
)
shed.def_(
old=".. method:: RTC.wakeup(timeout, callback=None)",
new="def wakeup(self, timeout: int, callback: Callable[[RTC], None] | None = None, /) -> None",
)
shed.def_(
old=".. method:: RTC.info()", new="def info(self) -> int",
)
nxt = "pyb.Servo.rst"
shed.def_(
old=".. method:: RTC.calibration(cal)",
new=[
"def calibration(self) -> int",
"def calibration(self, cal: int, /) -> None",
],
end=nxt,
)
return nxt
def _pin_af(*, end: str, shed: RST2PyI) -> None:
shed.consume_containing_line("class PinAF -- Pin Alternate Functions")
shed.consume_equals_underline_line()
shed.consume_blank_line()
doc = []
for doc_line in shed.rst:
if doc_line.startswith("Methods"):
shed.consume_minuses_underline_line()
shed.consume_blank_line()
break
doc.append(f" {doc_line}\n")
else:
assert False, f"Expected `{end}`, but did not find it!"
new_class = Class()
shed.pyi.classes.append(new_class)
new_class.class_def = "class PinAF(ABC):"
new_class.doc = doc
new_class.imports_vars.append(" __slots__ = ()")
shed.def_(
old=".. method:: pinaf.__str__()",
new="""
@abstractmethod
def __str__(self) -> str
""",
)
shed.def_(
old=".. method:: pinaf.index()",
new="""
@abstractmethod
def index(self) -> int
""",
)
shed.def_(
old=".. method:: pinaf.name()",
new="""
@abstractmethod
def name(self) -> str
""",
)
shed.def_(
old=".. method:: pinaf.reg()",
new="""
@abstractmethod
def reg(self) -> int
""",
end=end,
)
def _pin(this: str, shed: RST2PyI) -> str:
shed.class_from_file(
pre_str="# noinspection PyNestedDecorators",
old=this,
post_doc='''
AF1_TIM1: ClassVar[PinAF] = ...
"""
Alternate def_ 1, timer 1.
"""
AF1_TIM2: ClassVar[PinAF] = ...
"""
Alternate def_ 1, timer 2.
"""
AF2_TIM3: ClassVar[PinAF] = ...
"""
Alternate def_ 2, timer 3.
"""
AF2_TIM4: ClassVar[PinAF] = ...
"""
Alternate def_ 2, timer 4.
"""
AF2_TIM5: ClassVar[PinAF] = ...
"""
Alternate def_ 2, timer 5.
"""
AF3_TIM10: ClassVar[PinAF] = ...
"""
Alternate def_ 3, timer 10.
"""
AF3_TIM11: ClassVar[PinAF] = ...
"""
Alternate def_ 3, timer 11.
"""
AF3_TIM8: ClassVar[PinAF] = ...
"""
Alternate def_ 3, timer 8.
"""
AF3_TIM9: ClassVar[PinAF] = ...
"""
Alternate def_ 3, timer 9.
"""
AF4_I2C1: ClassVar[PinAF] = ...
"""
Alternate def_ 4, I2C 1.
"""
AF4_I2C2: ClassVar[PinAF] = ...
"""
Alternate def_ 4, I2C 2.
"""
AF5_SPI1: ClassVar[PinAF] = ...
"""
Alternate def_ 5, SPI 1.
"""
AF5_SPI2: ClassVar[PinAF] = ...
"""
Alternate def_ 5, SPI 2.
"""
AF7_USART1: ClassVar[PinAF] = ...
"""
Alternate def_ 7, USART 1.
"""
AF7_USART2: ClassVar[PinAF] = ...
"""
Alternate def_ 7, USART 2.
"""
AF7_USART3: ClassVar[PinAF] = ...
"""
Alternate def_ 7, USART 3.
"""
AF8_UART4: ClassVar[PinAF] = ...
"""
Alternate def_ 8, USART 4.
"""
AF8_USART6: ClassVar[PinAF] = ...
"""
Alternate def_ 8, USART 6.
"""
AF9_CAN1: ClassVar[PinAF] = ...
"""
Alternate def_ 9, CAN 1.
"""
AF9_CAN2: ClassVar[PinAF] = ...
"""
Alternate def_ 9, CAN 2.
"""
AF9_TIM12: ClassVar[PinAF] = ...
"""
Alternate def_ 9, timer 12.
"""
AF9_TIM13: ClassVar[PinAF] = ...
"""
Alternate def_ 9, timer 13.
"""
AF9_TIM14: ClassVar[PinAF] = ...
"""
Alternate def_ 9, timer 14.
"""
ALT: ClassVar[int] = ...
"""
Initialise the pin to alternate-def_ mode with a push-pull drive (same as `AF_PP`).
"""
ALT_OPEN_DRAIN: ClassVar[int] = ...
"""
Initialise the pin to alternate-def_ mode with an open-drain drive (same as `AF_OD`).
"""
IRQ_FALLING: ClassVar[int] = ...
"""
Initialise the pin to generate an interrupt on a falling edge.
"""
IRQ_RISING: ClassVar[int] = ...
"""
Initialise the pin to generate an interrupt on a rising edge.
"""
OPEN_DRAIN: ClassVar[int] = ...
"""
Initialise the pin to output mode with an open-drain drive (same as `OUT_OD`).
"""
# noinspection PyPep8Naming
class board:
"""
The board pins (board nomenclature, e.g. `X1`) that are bought out onto pads on a PyBoard.
"""
LED_BLUE: ClassVar[Pin] = ...
"""
The blue LED.
"""
LED_GREEN: ClassVar[Pin] = ...
"""
The green LED.
"""
LED_RED: ClassVar[Pin] = ...
"""
The red LED.
"""
LED_YELLOW: ClassVar[Pin] = ...
"""
The yellow LED.
"""
MMA_AVDD: ClassVar[Pin] = ...
"""
Accelerometer (MMA7660) analogue power (AVDD) pin.
"""
MMA_INT: ClassVar[Pin] = ...
"""
Accelerometer (MMA7660) interrupt (\\INT) pin.
"""
SD: ClassVar[Pin] = ...
"""
SD card present switch (0 for card inserted, 1 for no card) (same as SD_SW).
"""
SD_CK: ClassVar[Pin] = ...
"""
SD card clock.
"""
SD_CMD: ClassVar[Pin] = ...
"""
SD card command.
"""
SD_D0: ClassVar[Pin] = ...
"""
SD card serial data 0.
"""
SD_D1: ClassVar[Pin] = ...
"""
SD card serial data 1.
"""
SD_D2: ClassVar[Pin] = ...
"""
SD card serial data 2.
"""
SD_D3: ClassVar[Pin] = ...
"""
SD card serial data 3.
"""
SD_SW: ClassVar[Pin] = ...
"""
SD card present switch (0 for card inserted, 1 for no card) (same as SD).
"""
SW: ClassVar[Pin] = ...
"""
Usr switch (0 = pressed, 1 = not pressed).
"""
USB_DM: ClassVar[Pin] = ...
"""
USB data -.
"""
USB_DP: ClassVar[Pin] = ...
"""
USB data +.
"""
USB_ID: ClassVar[Pin] = ...
"""
USB OTG (on-the-go) ID.
"""
USB_VBUS: ClassVar[Pin] = ...
"""
USB VBUS (power) monitoring pin.
"""
X1: ClassVar[Pin] = ...
"""
X1 pin.
"""
X10: ClassVar[Pin] = ...
"""
X10 pin.
"""
X11: ClassVar[Pin] = ...
"""
X11 pin.
"""
X12: ClassVar[Pin] = ...
"""
X12 pin.
"""
X17: ClassVar[Pin] = ...
"""
X17 pin.
"""
X18: ClassVar[Pin] = ...
"""
X18 pin.
"""
X19: ClassVar[Pin] = ...
"""
X19 pin.
"""
X2: ClassVar[Pin] = ...
"""
X2 pin.
"""
X20: ClassVar[Pin] = ...
"""
X20 pin.
"""
X21: ClassVar[Pin] = ...
"""
X21 pin.
"""
X22: ClassVar[Pin] = ...
"""
X22 pin.
"""
X3: ClassVar[Pin] = ...
"""
X3 pin.
"""
X4: ClassVar[Pin] = ...
"""
X4 pin.
"""
X5: ClassVar[Pin] = ...
"""
X5 pin.
"""
X6: ClassVar[Pin] = ...
"""
X6 pin.
"""
X7: ClassVar[Pin] = ...
"""
X7 pin.
"""
X8: ClassVar[Pin] = ...
"""
X8 pin.
"""
X9: ClassVar[Pin] = ...
"""
X9 pin.
"""
Y1: ClassVar[Pin] = ...
"""
Y1 pin.
"""
Y10: ClassVar[Pin] = ...
"""
Y10 pin.
"""
Y11: ClassVar[Pin] = ...
"""
Y11 pin.
"""
Y12: ClassVar[Pin] = ...
"""
Y12 pin.
"""
Y2: ClassVar[Pin] = ...
"""
Y2 pin.
"""
Y3: ClassVar[Pin] = ...
"""
Y3 pin.
"""
Y4: ClassVar[Pin] = ...
"""
Y4 pin.
"""
Y5: ClassVar[Pin] = ...
"""
Y5 pin.
"""
Y6: ClassVar[Pin] = ...
"""
Y6 pin.
"""
Y7: ClassVar[Pin] = ...
"""
Y7 pin.
"""
Y8: ClassVar[Pin] = ...
"""
Y8 pin.
"""
Y9: ClassVar[Pin] = ...
"""
Y9 pin.
"""
# noinspection PyPep8Naming
class cpu:
"""
The CPU pins (CPU nomenclature, e.g. `A0`) that are bought out onto pads on a PyBoard.
"""
A0: ClassVar[Pin] = ...
"""
A0 pin.
"""
A1: ClassVar[Pin] = ...
"""
A1 pin.
"""
A10: ClassVar[Pin] = ...
"""
A10 pin.
"""
A11: ClassVar[Pin] = ...
"""
A11 pin.
"""
A12: ClassVar[Pin] = ...
"""
A12 pin.
"""
A13: ClassVar[Pin] = ...
"""
A13 pin.
"""
A14: ClassVar[Pin] = ...
"""
A14 pin.
"""
A15: ClassVar[Pin] = ...
"""
A15 pin.
"""
A2: ClassVar[Pin] = ...
"""
A2 pin.
"""
A3: ClassVar[Pin] = ...
"""
A3 pin.
"""
A4: ClassVar[Pin] = ...
"""
A4 pin.
"""
A5: ClassVar[Pin] = ...
"""
A5 pin.
"""
A6: ClassVar[Pin] = ...
"""
A6 pin.
"""
A7: ClassVar[Pin] = ...
"""
A7 pin.
"""
A8: ClassVar[Pin] = ...
"""
A8 pin.
"""
A9: ClassVar[Pin] = ...
"""
A9 pin.
"""
B0: ClassVar[Pin] = ...
"""
B0 pin.
"""
B1: ClassVar[Pin] = ...
"""
B1 pin.
"""
B10: ClassVar[Pin] = ...
"""
B10 pin.
"""
B11: ClassVar[Pin] = ...
"""
B11 pin.
"""
B12: ClassVar[Pin] = ...
"""
B12 pin.
"""
B13: ClassVar[Pin] = ...
"""
B13 pin.
"""
B14: ClassVar[Pin] = ...
"""
B14 pin.
"""
B15: ClassVar[Pin] = ...
"""
B15 pin.
"""
B2: ClassVar[Pin] = ...
"""
B2 pin.
"""
B3: ClassVar[Pin] = ...
"""
B3 pin.
"""
B4: ClassVar[Pin] = ...
"""
B4 pin.
"""
B5: ClassVar[Pin] = ...
"""
B5 pin.
"""
B6: ClassVar[Pin] = ...
"""
B6 pin.
"""
B7: ClassVar[Pin] = ...
"""
B7 pin.
"""
B8: ClassVar[Pin] = ...
"""
B8 pin.
"""
B9: ClassVar[Pin] = ...
"""
B9 pin.
"""
C0: ClassVar[Pin] = ...
"""
C0 pin.
"""
C1: ClassVar[Pin] = ...
"""
C1 pin.
"""
C10: ClassVar[Pin] = ...
"""
C10 pin.
"""
C11: ClassVar[Pin] = ...
"""
C11 pin.
"""
C12: ClassVar[Pin] = ...
"""
C12 pin.
"""
C13: ClassVar[Pin] = ...
"""
C13 pin.
"""
C2: ClassVar[Pin] = ...
"""
C2 pin.
"""
C3: ClassVar[Pin] = ...
"""
C3 pin.
"""
C4: ClassVar[Pin] = ...
"""
C4 pin.
"""
C5: ClassVar[Pin] = ...
"""
C5 pin.
"""
C6: ClassVar[Pin] = ...
"""
C6 pin.
"""
C7: ClassVar[Pin] = ...
"""
C7 pin.
"""
C8: ClassVar[Pin] = ...
"""
C8 pin.
"""
C9: ClassVar[Pin] = ...
"""
C9 pin.
"""
D2: ClassVar[Pin] = ...
"""
D2 pin.
"""
''',
)
shed.def_(
old=".. class:: pyb.Pin(id, ...)",
new="""
def __init__(
self,
id: Pin | str,
/,
mode: int = IN,
pull: int = PULL_NONE,
*,
value: Any = None,
alt: str | int = -1,
)
""",
)
shed.def_(
old=".. classmethod:: Pin.debug([state])",
new=[
"""
@staticmethod
def debug() -> bool
""",
"""
@staticmethod
def debug(state: bool, /) -> None
""",
],
)
shed.def_(
old=".. classmethod:: Pin.dict([dict])",
new=[
"""
@staticmethod
def dict() -> Dict[str, Pin]
""",
"""
@staticmethod
def dict(dict: Dict[str, Pin], /) -> None
""",
],
)
shed.def_(
old=".. classmethod:: Pin.mapper([fun])",
new=[
"""
@staticmethod
def mapper() -> Callable[[str], Pin]
""",
"""
@staticmethod
def mapper(fun: Callable[[str], Pin], /) -> None
""",
],
)
shed.def_(
old=r".. method:: Pin.init(mode, pull=Pin.PULL_NONE, \*, value=None, alt=-1)",
new="""
def init(
self,
mode: int = IN,
pull: int = PULL_NONE,
*,
value: Any = None,
alt: str | int = -1,
) -> None
""",
)
shed.def_(
old=".. method:: Pin.value([value])",
new=["def value(self) -> int", "def value(self, value: Any, /) -> None"],
)
shed.def_(
old=".. method:: Pin.__str__()", new="def __str__(self) -> str",
)
shed.def_(
old=".. method:: Pin.af()", new="def af(self) -> int",
)
shed.def_(
old=".. method:: Pin.af_list()", new="def af_list(self) -> list[PinAF]",
)
shed.def_(
old=".. method:: Pin.gpio()", new="def gpio(self) -> int",
)
shed.def_(
old=".. method:: Pin.mode()", new="def mode(self) -> int",
)
shed.def_(
old=".. method:: Pin.name()", new="def name(self) -> str",
)
shed.def_(
old=".. method:: Pin.names()", new="def names(self) -> list[str]",
)
shed.def_(
old=".. method:: Pin.pin()", new="def pin(self) -> int",
)
shed.def_(
old=".. method:: Pin.port()", new="def port(self) -> int",
)
shed.def_(
old=".. method:: Pin.pull()", new="def pull(self) -> int",
)
shed.vars(old=".. data:: Pin.AF_OD")
shed.vars(old=".. data:: Pin.AF_PP")
shed.vars(old=".. data:: Pin.ANALOG")
shed.vars(old=".. data:: Pin.IN")
shed.vars(old=".. data:: Pin.OUT_OD")
shed.vars(old=".. data:: Pin.OUT_PP")
shed.vars(old=".. data:: Pin.PULL_DOWN")
shed.vars(old=".. data:: Pin.PULL_NONE")
shed.vars(
old=".. data:: Pin.PULL_UP", end="class PinAF -- Pin Alternate Functions",
)
nxt = "pyb.RTC.rst"
_pin_af(end=nxt, shed=shed)
return nxt
def _led(this: str, shed: RST2PyI) -> str:
shed.class_from_file(old=this)
shed.def_(
old=".. class:: pyb.LED(id)", new="def __init__(self, id: int, /)",
)
shed.def_(
old=".. method:: LED.intensity([value])",
new=[
"def intensity(self) -> int",
"def intensity(self, value: int, /) -> None",
],
)
shed.def_(
old=".. method:: LED.off()", new="def off(self) -> None",
)
shed.def_(
old=".. method:: LED.on()", new="def on(self) -> None",
)
nxt = "pyb.Pin.rst"
shed.def_(
old=".. method:: LED.toggle()", new="def toggle(self) -> None", end=nxt,
)
return nxt
def _lcd(this: str, shed: RST2PyI) -> str:
shed.class_from_file(old=this)
shed.def_(
old=".. class:: pyb.LCD(skin_position)",
new="def __init__(self, skin_position: str, /)",
)
shed.def_(
old=".. method:: LCD.command(instr_data, buf)",
new="def command(self, inst_data: int, buf: bytes, /) -> None",
)
shed.def_(
old=".. method:: LCD.contrast(value)",
new="def contrast(self, value: int, /) -> None",
)
shed.def_(
old=".. method:: LCD.fill(colour)",
new="def fill(self, colour: int, /) -> None",
)
shed.def_(
old=".. method:: LCD.get(x, y)", new="def get(self, x: int, y: int, /) -> int",
)
shed.def_(
old=".. method:: LCD.light(value)",
new="def light(self, value: bool | int, /) -> None",
)
shed.def_(
old=".. method:: LCD.pixel(x, y, colour)",
new="def pixel(self, x: int, y: int, colour: int, /) -> None",
)
shed.def_(
old=".. method:: LCD.show()", new="def show(self) -> None",
)
shed.def_(
old=".. method:: LCD.text(str, x, y, colour)",
new="def text(self, str: str, x: int, y: int, colour: int, /) -> None",
)
nxt = "pyb.LED.rst"
shed.def_(
old=".. method:: LCD.write(str)",
new="def write(self, str: str, /) -> None",
end=nxt,
)
return nxt
def _i2c(this: str, shed: RST2PyI) -> str:
shed.class_from_file(old=this,)
shed.def_(
old=r".. class:: pyb.I2C(bus, ...)",
new="""
def __init__(
self,
bus: int | str,
mode: str,
/,
*,
addr: int = 0x12,
baudrate: int = 400_000,
gencall: bool = False,
dma: bool = False
)
""",
)
shed.def_(
old=r".. method:: I2C.deinit()", new="def deinit(self) -> None",
)
shed.def_(
old=r".. method:: I2C.init(mode, *, addr=0x12, baudrate=400000, gencall=False, dma=False)",
new="""
def init(
self,
bus: int | str,
mode: str,
/,
*,
addr: int = 0x12,
baudrate: int = 400_000,
gencall: bool = False,
dma: bool = False
) -> None
""",
)
shed.def_(
old=r".. method:: I2C.is_ready(addr)",
new="def is_ready(self, addr: int, /) -> bool",
)
shed.def_(
old=r".. method:: I2C.mem_read(data, addr, memaddr, *, timeout=5000, addr_size=8)",
new="""
def mem_read(
self,
data: int | AnyWritableBuf,
addr: int,
memaddr: int,
/,
*,
timeout: int = 5000,
addr_size: int = 8,
) -> bytes
""",
)
return "pyb.LCD.rst"
def _flash(this: str, shed: RST2PyI) -> str:
shed.class_from_file(old=this, super_class="AbstractBlockDev")
shed.def_(
old=".. class:: pyb.Flash()",
new="""
@overload
def __init__(self)
""",
)
shed.def_(
old=r".. class:: pyb.Flash(*, start=-1, len=-1)",
new="""
@overload
def __init__(self, *, start: int = -1, len: int = -1)
""",
)
shed.defs_with_common_description(
cmd=".. method:: Flash.", # Needs `.` at end!
old2new={
"readblocks(block_num, buf)": "def readblocks(self, blocknum: int, buf: bytes, offset: int = 0, /) -> None",
"readblocks(block_num, buf, offset)": "",
"writeblocks(block_num, buf)": "def writeblocks(self, blocknum: int, buf: bytes, offset: int = 0, /) -> None",
"writeblocks(block_num, buf, offset)": "",
"ioctl(cmd, arg)": "def ioctl(self, op: int, arg: int) -> int | None",
},
end="Hardware Note",
)
nxt = "pyb.I2C.rst"
shed.pyi.doc.extend(shed.extra_notes(end=nxt))
return nxt
def _ext_int(this: str, shed: RST2PyI) -> str:
shed.class_from_file(old=this,)
shed.def_(
old=".. class:: pyb.ExtInt(pin, mode, pull, callback)",
new="def __init__(self, pin: int | str | Pin, mode: int, pull: int, callback: Callable[[int], None])",
)
shed.def_(
old=".. classmethod:: ExtInt.regs()",
new="""
@staticmethod
def regs() -> None
""",
)
shed.def_(
old=".. method:: ExtInt.disable()", new="def disable(self) -> None",
)
shed.def_(
old=".. method:: ExtInt.enable()", new="def enable(self) -> None",
)
shed.def_(
old=".. method:: ExtInt.line()", new="def line(self) -> int",
)
shed.def_(
old=".. method:: ExtInt.swint()", new="def swint(self) -> None",
)
shed.vars(old=".. data:: ExtInt.IRQ_FALLING")
shed.vars(old=".. data:: ExtInt.IRQ_RISING")
nxt = "pyb.Flash.rst"
shed.vars(old=".. data:: ExtInt.IRQ_RISING_FALLING", end=nxt)
return nxt
def _dac(this: str, shed: RST2PyI) -> str:
shed.class_from_file(
pre_str="# noinspection PyShadowingNames",
old=this,
post_doc='''
NORMAL: ClassVar[int] = ...
"""
Normal mode (output buffer once) for `mode` argument of `write_timed`.
"""
CIRCULAR: ClassVar[int] = ...
"""
Circular mode (output buffer continuously) for `mode` argument of `write_timed`.
"""
''',
)
shed.def_(
old=r".. class:: pyb.DAC(port, bits=8, *, buffering=None)",
new="def __init__(self, port: int | Pin, /, bits: int = 8, *, buffering: bool | None = None)",
)
shed.def_(
old=r".. method:: DAC.init(bits=8, *, buffering=None)",
new="def init(self, bits: int = 8, *, buffering: bool | None = None) -> None",
)
shed.def_(
old=".. method:: DAC.deinit()", new="def deinit(self) -> None",
)
shed.def_(
old=".. method:: DAC.noise(freq)", new="def noise(self, freq: int, /) -> None",
)
shed.def_(
old=".. method:: DAC.triangle(freq)",
new="def triangle(self, freq: int, /) -> None",
)
shed.def_(
old=".. method:: DAC.write(value)",
new="def write(self, value: int, /) -> None",
)
nxt = "pyb.ExtInt.rst"
shed.def_(
old=r".. method:: DAC.write_timed(data, freq, *, mode=DAC.NORMAL)",
new="def write_timed(self, data: AnyWritableBuf, freq: int | Timer, /, *, mode: int = NORMAL) -> None",
end=nxt,
)
return nxt
def _can(this: str, shed: RST2PyI) -> str:
shed.class_from_file(old=this,)
shed.def_(
old=".. class:: pyb.CAN(bus, ...)",
new="""
def __init__(
self,
bus: int | str,
mode: int,
/,
extframe: bool = False,
prescaler: int = 100,
*,
sjw: int = 1,
bs1: int = 6,
bs2: int = 8,
auto_restart: bool = False
)
""",
)
shed.def_(
old=".. classmethod:: CAN.initfilterbanks(nr)",
new="""
@staticmethod
def initfilterbanks(nr: int, /) -> None
""",
)
shed.def_(
old=(
r".. method:: CAN.init(mode, extframe=False, prescaler=100, *, sjw=1, bs1=6, "
r"bs2=8, auto_restart=False, baudrate=0, sample_point=75)"
),
new="""
def init(
self,
mode: int,
/,
extframe: bool = False ,
prescaler: int = 100,
*,
sjw: int = 1,
bs1: int = 6,
bs2: int = 8,
auto_restart: bool = False,
baudrate: int = 0,
sample_point: int = 75
) -> None
""",
)
shed.def_(
old=".. method:: CAN.deinit()", new="def deinit(self) -> None",
)
shed.def_(
old=".. method:: CAN.restart()", new="def restart(self) -> None",
)
shed.def_(
old=".. method:: CAN.state()", new="def state(self) -> int",
)
shed.def_(
old=".. method:: CAN.info([list])",
new=[
"def info(self) -> list[int]",
"def info(self, list: list[int], /) -> list[int]",
],
)
shed.def_(
old=r".. method:: CAN.setfilter(bank, mode, fifo, params, *, rtr)",
new=[
"""
def setfilter(self, bank: int, mode: int, fifo: int, params: Sequence[int], /) -> None
""",
"""
def setfilter(
self,
bank: int,
mode: int,
fifo: int,
params: Sequence[int],
/,
*,
rtr: Sequence[bool]
) -> None
""",
],
)
shed.def_(
old=".. method:: CAN.clearfilter(bank)",
new="def clearfilter(self, bank: int, /) -> None",
)
shed.def_(
old=".. method:: CAN.any(fifo)", new="def any(self, fifo: int, /) -> bool",
)
shed.def_(
old=r".. method:: CAN.recv(fifo, list=None, *, timeout=5000)",
new=[
"def recv(self, fifo: int, /, *, timeout: int = 5000) -> tuple[int, bool, int, memoryview]",
"def recv(self, fifo: int, list: None, /, *, timeout: int = 5000) -> tuple[int, bool, int, memoryview]",
"def recv(self, fifo: int, list: list[int | bool | memoryview], /, *, timeout: int = 5000) -> None",
],
)
shed.def_(
old=r".. method:: CAN.send(data, id, *, timeout=0, rtr=False)",
new="""
def send(self, data: int | AnyWritableBuf, id: int, /, *, timeout: int = 0, rtr: bool = False) -> None
""",
)
shed.def_(
old=".. method:: CAN.rxcallback(fifo, fun)",
new="def rxcallback(self, fifo: int, fun: Callable[[CAN], None], /) -> None",
)
shed.vars(
old=[
".. data:: CAN.NORMAL",
"CAN.LOOPBACK",
"CAN.SILENT",
"CAN.SILENT_LOOPBACK",
],
)
shed.vars(
old=[
".. data:: CAN.STOPPED",
"CAN.ERROR_ACTIVE",
"CAN.ERROR_WARNING",
"CAN.ERROR_PASSIVE",
"CAN.BUS_OFF",
],
)
nxt = "pyb.DAC.rst"
shed.vars(
old=[".. data:: CAN.LIST16", "CAN.MASK16", "CAN.LIST32", "CAN.MASK32"], end=nxt,
)
return nxt
def _adc_all(*, this: str, end: str, shed: RST2PyI) -> None:
shed.consume_containing_line(this)
shed.consume_minuses_underline_line()
shed.consume_blank_line()
doc = []
for doc_line in shed.rst:
if doc_line.lstrip().startswith(end):
shed.rst.push_line(doc_line)
break
doc.append(f" {doc_line}\n")
else:
assert False, f"Did not find: {end}"
new_class = Class()
shed.pyi.classes.append(new_class)
new_class.class_def = "class ADCAll:"
new_class.doc = doc
new_class.defs.append(
f'''
def __init__(self, resolution: int, mask: int = 0xffffffff, /):
"""
Create a multi-channel ADC instance.
``resolution`` is the number of bits for all the ADCs (even those not enabled); one of:
14, 12, 10, or 8 bits.
To avoid unwanted activation of analog inputs (channel 0..15) a second parameter, ``mask``,
can be specified.
This parameter is a binary pattern where each requested analog input has the corresponding bit set.
The default value is 0xffffffff which means all analog inputs are active. If just the internal
channels (16..18) are required, the mask value should be 0x70000.
"""
def read_channel(self, channel: int, /) -> int:
"""
Read the given channel.
"""
def read_core_temp(self) -> float:
"""
Read MCU temperature (centigrade).
"""
def read_core_vbat(self) -> float:
"""
Read MCU VBAT (volts).
"""
def read_core_vref(self) -> float:
"""
Read MCU VREF (volts).
"""
def read_vref(self) -> float:
"""
Read MCU supply voltage (volts).
"""
'''
)
def _adc(this: str, shed: RST2PyI) -> str:
shed.class_from_file(old=this)
shed.def_(
old=".. class:: pyb.ADC(pin)", new="def __init__(self, pin: int | Pin, /)",
)
shed.def_(
old=".. method:: ADC.read()", new="def read(self) -> int",
)
shed.def_(
old=".. method:: ADC.read_timed(buf, timer)",
new="def read_timed(self, buf: AnyWritableBuf, timer: Timer | int, /) -> None",
)
extra = "The ADCAll Object"
shed.def_(
old=".. method:: ADC.read_timed_multi((adcx, adcy, ...), (bufx, bufy, ...), timer)",
new="""
@staticmethod
def read_timed_multi(
adcs: tuple[ADC, ...],
bufs: tuple[AnyWritableBuf, ...],
timer: Timer,
/
) -> bool
""",
end=extra,
)
nxt = "pyb.CAN.rst"
_adc_all(this=extra, end=nxt, shed=shed)
return nxt
def _accel(shed: RST2PyI) -> str:
shed.class_from_file(old="pyb.Accel.rst")
shed.def_(
old=".. class:: pyb.Accel()", new="def __init__(self)",
)
shed.def_(
old=".. method:: Accel.filtered_xyz()",
new="def filtered_xyz(self) -> tuple[int, int, int]",
)
shed.def_(
old=".. method:: Accel.tilt()", new="def tilt(self) -> int",
)
shed.def_(
old=".. method:: Accel.x()", new="def x(self) -> int",
)
shed.def_(
old=".. method:: Accel.y()", new="def y(self) -> int",
)
shed.def_(
old=".. method:: Accel.z()", new="def z(self) -> int", end="Hardware Note"
)
nxt = "pyb.ADC.rst"
shed.pyi.doc.extend(shed.extra_notes(end=nxt))
return nxt
def _pyb(shed: RST2PyI) -> None:
shed.module(
name="pyb",
old="functions related to the board",
post_doc=f'''
from abc import ABC, abstractmethod
from typing import NoReturn, overload, Sequence, runtime_checkable, Protocol
from typing import Callable, Dict, Any, ClassVar, Final
from uarray import array
from uio import AnyReadableBuf, AnyWritableBuf
from uos import AbstractBlockDev
@runtime_checkable
class _OldAbstractReadOnlyBlockDev(Protocol):
"""
A `Protocol` (structurally typed) with the defs needed by
`mount` argument `device` for read-only devices.
"""
__slots__ = ()
@abstractmethod
def readblocks(self, blocknum: int, buf: bytearray, /) -> None: ...
@abstractmethod
def count(self) -> int: ...
@runtime_checkable
class _OldAbstractBlockDev(_OldAbstractReadOnlyBlockDev, Protocol):
"""
A `Protocol` (structurally typed) with the defs needed by
`mount` argument `device` for read-write devices.
"""
__slots__ = ()
@abstractmethod
def writeblocks(self, blocknum: int, buf: bytes | bytearray, /) -> None: ...
@abstractmethod
def sync(self) -> None: ...
hid_mouse: Final[tuple[int, int, int, int, bytes]] = ...
"""
Mouse human interface device (hid), see `hid` argument of `usb_mode`.
"""
hid_keyboard: Final[tuple[int, int, int, int, bytes]] = ...
"""
Keyboard human interface device (hid), see `hid` argument of `usb_mode`.
"""
@overload
def country() -> str:
"""Return the current ISO 3166-1, Alpha-2, country code, eg US, GB, DE, AU."""
@overload
def country(alpha_2_code: str) -> None:
"""Set the ISO 3166-1, Alpha-2, country code, eg US, GB, DE, AU."""
''',
end=r"..",
)
shed.def_(
old=".. function:: delay(ms)", new="def delay(ms: int, /) -> None", indent=0,
)
shed.def_(
old=".. function:: udelay(us)", new="def udelay(us: int, /) -> None", indent=0,
)
shed.def_(
old=".. function:: millis()", new="def millis() -> int", indent=0,
)
shed.def_(
old=".. function:: micros()", new="def micros() -> int", indent=0,
)
shed.def_(
old=".. function:: elapsed_millis(start)",
new="def elapsed_millis(start: int, /) -> int",
indent=0,
)
shed.def_(
old=".. function:: elapsed_micros(start)",
new="def elapsed_micros(start: int, /) -> int",
indent=0,
)
shed.def_(
old=".. function:: hard_reset()", new="def hard_reset() -> NoReturn", indent=0,
)
shed.def_(
old=".. function:: bootloader()", new="def bootloader() -> NoReturn", indent=0,
)
shed.def_(
old=".. function:: fault_debug(value)",
new="def fault_debug(value: bool = False) -> None",
indent=0,
)
shed.def_(
old=".. function:: disable_irq()", new="def disable_irq() -> bool", indent=0,
)
shed.def_(
old=".. function:: enable_irq(state=True)",
new="def enable_irq(state: bool = True, /) -> None",
indent=0,
)
shed.def_(
old=".. function:: freq([sysclk[, hclk[, pclk1[, pclk2]]]])",
new=[
"def freq() -> tuple[int, int, int, int]",
"def freq(sysclk: int, /) -> None",
"def freq(sysclk: int, hclk: int, /) -> None",
"def freq(sysclk: int, hclk: int, pclk1: int, /) -> None",
"def freq(sysclk: int, hclk: int, pclk1: int, pclk2: int, /) -> None",
],
indent=0,
)
shed.def_(
old=".. function:: wfi()", new="def wfi() -> None", indent=0,
)
shed.def_(
old=".. function:: stop()", new="def stop() -> None", indent=0,
)
shed.def_(
old=".. function:: standby()", new="def standby() -> None", indent=0,
)
shed.def_(
old=".. function:: have_cdc()", new="def have_cdc() -> bool", indent=0,
)
shed.def_(
old=".. function:: hid((buttons, x, y, z))",
new=[
"def hid(data: tuple[int, int, int, int], /) -> None",
"def hid(data: Sequence[int], /) -> None",
],
indent=0,
)
shed.def_(
old=".. function:: info([dump_alloc_table])",
new=["def info() -> None", "def info(dump_alloc_table: bytes, /) -> None"],
indent=0,
)
shed.def_(
old=".. function:: main(filename)",
new="def main(filename: str, /) -> None",
indent=0,
)
shed.def_(
old=r".. function:: mount(device, mountpoint, *, readonly=False, mkfs=False)",
new=[
"""
def mount(
device: _OldAbstractReadOnlyBlockDev,
mountpoint: str,
/,
*,
readonly: bool = False,
mkfs: bool = False
) -> None
""",
"""
def mount(
device: _OldAbstractBlockDev,
mountpoint: str,
/,
*,
readonly: bool = False,
mkfs: bool = False
) -> None
""",
],
indent=0,
)
shed.def_(
old=".. function:: repl_uart(uart)",
new=["def repl_uart() -> UART | None", "def repl_uart(uart: UART, /) -> None"],
indent=0,
)
shed.def_(
old=".. function:: rng()", new="def rng() -> int", indent=0,
)
shed.def_(
old=".. function:: sync()", new="def sync() -> None", indent=0,
)
shed.def_(
old=".. function:: unique_id()", new="def unique_id() -> bytes", indent=0,
)
shed.def_(
pre_str="# noinspection PyShadowingNames",
old=(
".. function:: usb_mode("
"[modestr], port=-1, vid=0xf055, pid=-1, msc=(), hid=pyb.hid_mouse, high_speed=False)"
),
new=[
"""
def usb_mode() -> str
""",
"""
def usb_mode(
modestr: str,
/,
*,
port: int = -1,
vid: int = 0xf055,
pid: int = -1,
msc: Sequence[AbstractBlockDev] = (),
hid: tuple[int, int, int, int, bytes] = hid_mouse,
high_speed: bool = False
) -> None
""",
],
indent=0,
end="Classes",
)
| StarcoderdataPython |
1727121 | <filename>src/pose3d_utils/mat4.py
import numpy as np
def identity():
return np.eye(4, dtype=np.float64)
def affine(A=None, t=None):
aff = identity()
if A is not None:
aff[0:3, 0:3] = np.array(A, dtype=aff.dtype)
if t is not None:
aff[0:3, 3] = np.array(t, dtype=aff.dtype)
return aff
def flip_x():
"""Flip horizontally."""
return affine(A=[[-1, 0, 0],
[ 0, 1, 0],
[ 0, 0, 1]])
def rotate(axis, theta):
assert axis.shape == (3,), 'rotation axis must be a 3D vector'
axis = axis / np.linalg.norm(axis, 2)
ux, uy, uz = list(axis)
cos = np.cos(theta)
sin = np.sin(theta)
return affine(
A=[[cos + ux*ux*(1 - cos), ux*uy*(1-cos) - uz*sin, ux*uz*(1-cos) + uy*sin],
[uy*ux*(1-cos) + uz*sin, cos + uy*uy*(1-cos), uy*uz*(1-cos) - ux*sin],
[uz*ux*(1-cos) - uy*sin, uz*uy*(1-cos) + ux*sin, cos + uz*uz*(1-cos)]]
)
| StarcoderdataPython |
1787340 | <reponame>seanrcollings/arc<gh_stars>1-10
import functools
import io
import re
import sys
import time
from types import MethodType
import typing as t
import os
from arc import logging, typing as at
from arc.color import fg, effects, colorize
logger = logging.getArcLogger("util")
IDENT = r"[a-zA-Z-_0-9]+"
def indent(string: str, distance="\t", split="\n"):
"""Indents the block of text provided by the distance"""
return f"{distance}" + f"{split}{distance}".join(string.split(split))
def header(contents: str):
logger.debug(colorize(f"{contents:^35}", effects.UNDERLINE, effects.BOLD, fg.BLUE))
ansi_escape = re.compile(r"(\x9B|\x1B\[)[0-?]*[ -\/]*[@-~]")
@functools.cache
def ansi_clean(string: str):
"""Gets rid of escape sequences"""
return ansi_escape.sub("", string)
@functools.cache
def ansi_len(string: str):
return len(ansi_clean(string))
FuncT = t.TypeVar("FuncT", bound=t.Callable[..., t.Any])
def timer(name):
"""Decorator for timing functions
will only time if config.debug is set to True
"""
def wrapper(func: FuncT) -> FuncT:
@functools.wraps(func)
def decorator(*args, **kwargs):
start_time = time.time()
try:
return_value = func(*args, **kwargs)
except BaseException as e:
raise
finally:
end_time = time.time()
logger.info(
"%sCompleted %s in %ss%s",
fg.GREEN,
name,
round(end_time - start_time, 5),
effects.CLEAR,
)
return return_value
return t.cast(FuncT, decorator)
return wrapper
# https://en.wikibooks.org/wiki/Algorithm_Implementation/Strings/Levenshtein_distance#Python
def levenshtein(s1: str, s2: str):
if len(s1) < len(s2):
# pylint: disable=arguments-out-of-order
return levenshtein(s2, s1)
# len(s1) >= len(s2)
if len(s2) == 0:
return len(s1)
previous_row = range(len(s2) + 1)
for i, c1 in enumerate(s1):
current_row = [i + 1]
for j, c2 in enumerate(s2):
insertions = (
previous_row[j + 1] + 1
) # j+1 instead of j since previous_row and current_row are one character longer
deletions = current_row[j] + 1 # than s2
substitutions = previous_row[j] + (c1 != c2)
current_row.append(min(insertions, deletions, substitutions))
previous_row = current_row # type: ignore
return previous_row[-1]
def dispatch_args(func: t.Callable, *args):
"""Calls the given `func` with the maximum
slice of `*args` that it can accept. Handles
function and method types
For example:
```py
def foo(bar, baz): # only accepts 2 args
print(bar, baz)
# Will call the provided function with the first
# two arguments
dispatch_args(foo, 1, 2, 3, 4)
# 1 2
```
"""
if isinstance(func, MethodType):
unwrapped = func.__func__
else:
unwrapped = func # type: ignore
arg_count = unwrapped.__code__.co_argcount
args = args[0 : arg_count - 1]
return func(*args)
def cmp(a, b) -> at.CompareReturn:
"""Compare two values
Args:
a (Any): First value
b (Any): Second value
Returns:
- `a < b => -1`
- `a == b => 0`
- `a > b => 1`
"""
return (a > b) - (a < b)
def partition(item: t.Any, n: int):
"""Partion `item` into a list of elements `n` long"""
return [item[index : index + n] for index in range(0, len(item), n)]
def discover_name():
name = sys.argv[0]
return os.path.basename(name)
class IoWrapper(io.StringIO):
"""Wraps an IO object to handle colored text.
If the output looks to be a terminal, ansi-escape sequences will be allowed.
If it does not look like a terminal, ansi-escape sequences will be removed.
"""
def __init__(self, wrapped: t.TextIO):
self.wrapped = wrapped
def write(self, message: str):
if not self.wrapped.isatty():
message = ansi_clean(message)
self.wrapped.write(message)
| StarcoderdataPython |
178145 | <gh_stars>0
import utilities
import database
import model
from discord.ext import commands
import discord
import random
class Management(commands.Cog):
"""Here lie commands for managing guild-specific settings."""
def __init__(self, bot: model.Bakerbot) -> None:
self.bot = bot
async def cog_check(self, ctx: commands.Context) -> bool:
"""Ensure commands are being executed in a guild context."""
return ctx.guild is not None
@commands.group(invoke_without_subcommands=True)
async def guild(self, ctx: commands.Context) -> None:
"""The parent command for guild management."""
summary = ("You've encountered Bakerbot's guild-specific management system! "
"See `$help management` for a full list of available subcommands.")
await utilities.Commands.group(ctx, summary)
@guild.command()
async def ignore(self, ctx: commands.Command, channels: commands.Greedy[discord.TextChannel]) -> None:
"""Make Bakerbot ignore/respond to messages from certain channels."""
config = await database.GuildConfiguration.ensure(ctx.guild.id)
# Get the set of channels that are either already ignored or in the set of
# to-be ignored channels, but not in both. Implements channel toggling.
symmetric_difference = set(config.ignored_channels) ^ set(c.id for c in channels)
config.ignored_channels = list(symmetric_difference)
await config.write()
if len(config.ignored_channels) > 0:
list_modified = "Bakerbot will no longer respond to messages in these channels:\n"
unchanged = "Bakerbot currently ignores messages in these channels:\n"
text = list_modified if len(channels) > 0 else unchanged
generator = (self.bot.get_channel(c) for c in config.ignored_channels if c is not None)
text += "\n".join(f" • {channel.mention}" for channel in generator)
await ctx.reply(text)
elif len(channels) > 0:
await ctx.reply("Bakerbot will no longer ignore messages from any channels in this guild.")
else:
await ctx.reply("Bakerbot does not currently ignore messages from any channels in this guild.")
@guild.command()
async def nodelete(self, ctx: commands.Context, toggle: bool | None) -> None:
"""Query the status of/enable/disable the message persistence system."""
config = await database.GuildConfiguration.ensure(ctx.guild.id)
if toggle is None:
status = "enabled" if config.message_resender_enabled else "disabled"
return await ctx.reply(f"The message persistence system is currently {status}.")
config.message_resender_enabled = toggle
word_to_use = "enabled" if toggle else "disabled"
await config.write()
await ctx.reply(f"The message persistence system has been {word_to_use}.")
@guild.command()
async def whoasked(self, ctx: commands.Context, toggle: bool | None) -> None:
"""Query the status of/enable/disable the message autoreply system."""
config = await database.GuildConfiguration.ensure(ctx.guild.id)
if toggle is None:
status = "enabled" if config.who_asked_enabled else "disabled"
return await ctx.reply(f"The message persistence system is currently {status}.")
config.who_asked_enabled = toggle
word_to_use = "enabled" if toggle else "disabled"
await config.write()
await ctx.reply(f"The message autoreply system has been {word_to_use}.")
async def who_asked(self, message: discord.Message) -> None:
"""Handle the "ok but who asked?" reply feature."""
if message.author.id != self.bot.user.id and message.guild is not None:
if (config := await database.GuildConfiguration.get(message.guild.id)) is not None:
if config.who_asked_enabled and random.randint(0, 1000) == 0:
await message.reply("ok but who asked?")
async def message_resender(self, message: discord.Message) -> None:
"""Handle the message resending feature."""
if message.author.id != self.bot.user.id and message.guild is not None:
if (config := await database.GuildConfiguration.get(message.guild.id)) is not None:
if config.message_resender_enabled:
embed = utilities.Embeds.package(message)
await message.reply(embed=embed)
@commands.Cog.listener()
async def on_message(self, message: discord.Message) -> None:
"""Call relevant subroutines when a message is received."""
await self.who_asked(message)
@commands.Cog.listener()
async def on_message_delete(self, message: discord.Message) -> None:
"""Call relevant subroutines when a message is deleted."""
await self.message_resender(message)
def setup(bot: model.Bakerbot) -> None:
cog = Management(bot)
bot.add_cog(cog)
async def on_message(message: discord.Message) -> None:
"""Bot-wide message handler to enforce guild-ignored channels."""
if message.guild is not None:
config = await database.GuildConfiguration.get(message.guild.id)
if config is None or message.channel.id in config.ignored_channels:
return
await bot.process_commands(message)
bot.on_message = on_message
| StarcoderdataPython |
3228662 | <reponame>Cosmo-Tech/cosmotech-api-python-client<filename>test/test_scenariorun_api.py
"""
Cosmo Tech Plaform API
Cosmo Tech Platform API # noqa: E501
The version of the OpenAPI document: 0.0.11-SNAPSHOT
Contact: <EMAIL>
Generated by: https://openapi-generator.tech
"""
import unittest
import cosmotech_api
from cosmotech_api.api.scenariorun_api import ScenariorunApi # noqa: E501
class TestScenariorunApi(unittest.TestCase):
"""ScenariorunApi unit test stubs"""
def setUp(self):
self.api = ScenariorunApi() # noqa: E501
def tearDown(self):
pass
def test_delete_scenario_run(self):
"""Test case for delete_scenario_run
Delete a scenariorun # noqa: E501
"""
pass
def test_find_scenario_run_by_id(self):
"""Test case for find_scenario_run_by_id
Get the details of a scenariorun # noqa: E501
"""
pass
def test_get_scenario_run_cumulated_logs(self):
"""Test case for get_scenario_run_cumulated_logs
Get the cumulated logs of a scenariorun # noqa: E501
"""
pass
def test_get_scenario_run_logs(self):
"""Test case for get_scenario_run_logs
get the logs for the ScenarioRun # noqa: E501
"""
pass
def test_get_scenario_run_status(self):
"""Test case for get_scenario_run_status
get the status for the ScenarioRun # noqa: E501
"""
pass
def test_get_scenario_runs(self):
"""Test case for get_scenario_runs
get the list of ScenarioRuns for the Scenario # noqa: E501
"""
pass
def test_get_workspace_scenario_runs(self):
"""Test case for get_workspace_scenario_runs
get the list of ScenarioRuns for the Workspace # noqa: E501
"""
pass
def test_run_scenario(self):
"""Test case for run_scenario
run a ScenarioRun for the Scenario # noqa: E501
"""
pass
def test_search_scenario_runs(self):
"""Test case for search_scenario_runs
Search ScenarioRuns # noqa: E501
"""
pass
def test_start_scenario_run_containers(self):
"""Test case for start_scenario_run_containers
Start a new scenariorun with raw containers definition # noqa: E501
"""
pass
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
22895 | """
Copyright (c) Facebook, Inc. and its affiliates.
"""
import logging
import queue
from multiprocessing import Queue, Process
import sys
import os
from mc_memory_nodes import InstSegNode, PropSegNode
from heuristic_perception import all_nearby_objects
from shapes import get_bounds
VISION_DIR = os.path.dirname(os.path.realpath(__file__))
CRAFTASSIST_DIR = os.path.join(VISION_DIR, "../")
SEMSEG_DIR = os.path.join(VISION_DIR, "semantic_segmentation/")
sys.path.append(CRAFTASSIST_DIR)
sys.path.append(SEMSEG_DIR)
import build_utils as bu
from semseg_models import SemSegWrapper
# TODO all "subcomponent" operations are replaced with InstSeg
class SubcomponentClassifierWrapper:
def __init__(self, agent, model_path, vocab_path, perceive_freq=0):
self.agent = agent
self.memory = self.agent.memory
self.perceive_freq = perceive_freq
self.true_temp = 1
if model_path is not None:
self.subcomponent_classifier = SubComponentClassifier(
voxel_model_path=model_path, vocab_path=vocab_path,
)
self.subcomponent_classifier.start()
else:
self.subcomponent_classifier = None
def perceive(self, force=False):
if self.perceive_freq == 0 and not force:
return
if self.perceive_freq > 0 and self.agent.count % self.perceive_freq != 0 and not force:
return
if self.subcomponent_classifier is None:
return
# TODO don't all_nearby_objects again, search in memory instead
to_label = []
# add all blocks in marked areas
for pos, radius in self.agent.areas_to_perceive:
for obj in all_nearby_objects(self.agent.get_blocks, pos, radius):
to_label.append(obj)
# add all blocks near the agent
for obj in all_nearby_objects(self.agent.get_blocks, self.agent.pos):
to_label.append(obj)
for obj in to_label: # (6, 69, 11) in [b[0] for b in obj]
self.subcomponent_classifier.block_objs_q.put(obj)
# everytime we try to retrieve as many recognition results as possible
while not self.subcomponent_classifier.loc2labels_q.empty():
loc2labels, obj = self.subcomponent_classifier.loc2labels_q.get() # (6, 69, 11) in [b[0] for b in obj]
loc2ids = dict(obj)
label2blocks = {}
def contaminated(blocks):
"""
Check if blocks are still consistent with the current world
"""
mx, Mx, my, My, mz, Mz = get_bounds(blocks)
yzxb = self.agent.get_blocks(mx, Mx, my, My, mz, Mz)
for b, _ in blocks:
x, y, z = b
if loc2ids[b][0] != yzxb[y - my, z - mz, x - mx, 0]:
return True
return False
for loc, labels in loc2labels.items():
b = (loc, loc2ids[loc])
for l in labels:
if l in label2blocks:
label2blocks[l].append(b)
else:
label2blocks[l] = [b]
labels_str = " ".join(list(label2blocks.keys()))
if len(labels_str) == 1:
self.agent.send_chat(
"I found this in the scene: " + labels_str
)
elif len(labels_str) > 1:
self.agent.send_chat(
"I found these in the scene: " + labels_str
)
for l, blocks in label2blocks.items():
## if the blocks are contaminated we just ignore
if not contaminated(blocks):
#locs = [loc for loc, idm in blocks]
InstSegNode.create(
self.memory, blocks, [l, 'semseg'])
def update(self, label, blocks, house):
pass
#self.subcomponent_classifier.to_update_q.put((label, blocks, house))
class SubComponentClassifier(Process):
"""
A classifier class that calls a voxel model to output object tags.
"""
def __init__(self, voxel_model_path=None, vocab_path=None, true_temp=1):
super().__init__()
if voxel_model_path is not None:
logging.info(
"SubComponentClassifier using voxel_model_path={}".format(voxel_model_path)
)
self.model = SemSegWrapper(voxel_model_path, vocab_path)
else:
raise Exception("specify a segmentation model")
self.block_objs_q = Queue() # store block objects to be recognized
self.loc2labels_q = Queue() # store loc2labels dicts to be retrieved by the agent
#self.to_update_q = Queue()
self.daemon = True
def run(self):
"""
The main recognition loop of the classifier
"""
while True: # run forever
#for _ in range(100):
# print("If I print here, it solves the bug ¯\_(ツ)_/¯, priority thing?")
tb = self.block_objs_q.get(block=True, timeout=None)
loc2labels = self._watch_single_object(tb)
for k in loc2labels.keys():
loc2labels[k].append("house")
self.loc2labels_q.put((loc2labels, tb))
#try:
# label, blocks, house = self.to_update_q.get_nowait()
# self.update(label, blocks, house)
#except queue.Empty:
# pass
def _watch_single_object(self, tuple_blocks, t=1):
"""
Input: a list of tuples, where each tuple is ((x, y, z), [bid, mid]). This list
represents a block object.
Output: a dict of (loc, [tag1, tag2, ..]) pairs for all non-air blocks.
"""
def get_tags(p):
"""
convert a list of tag indices to a list of tags
"""
return [self.model.tags[i][0] for i in p]
def apply_offsets(cube_loc, offsets):
"""
Convert the cube location back to world location
"""
return (cube_loc[0] + offsets[0], cube_loc[1] + offsets[1], cube_loc[2] + offsets[2])
np_blocks, offsets = bu.blocks_list_to_npy(blocks=tuple_blocks, xyz=True)
pred = self.model.segment_object(np_blocks, T=t)
# convert prediction results to string tags
return dict([(apply_offsets(loc, offsets), get_tags([p])) for loc, p in pred.items()])
def recognize(self, list_of_tuple_blocks):
"""
Multiple calls to _watch_single_object
"""
tags = dict()
for tb in list_of_tuple_blocks:
tags.update(self._watch_single_object(tb))
return tags
def update(self, label, blocks, house):
# changes can come in from adds or removals, if add, update house
logging.info("Updated label {}".format(label))
if blocks[0][0][0] > 0:
house += blocks
blocks = [(xyz, (1, 0)) for xyz, _ in blocks]
np_house, offsets = bu.blocks_list_to_npy(blocks=house, xyz=True)
np_blocks, _ = bu.blocks_list_to_npy(
blocks=blocks, xyz=False, offsets=offsets, shape=np_house.shape) # shape is still xyz bc of shape arg
self.model.update(label, np_blocks, np_house)
| StarcoderdataPython |
3311478 | class Parent:
parentattr=100
def __init__(self):
print "Calling Parent Constructer "
def Parentattr(self,attr):
Parent.attr = attr
def Parentmethod(self):
print "Calling Parent Method "
def Getattrr(self):
print "Get Attr= ",parent.attr
class Child(Parent):
def __init__(self):
print"Calling Child Constucter"
def Childhood(self):
print "Calling Childhood Method"
c=Child()
c.Childhood()
c.Parentmethod()
c.Parentattr(200)
c.Getattr()
| StarcoderdataPython |
1651437 | <reponame>anthon-alindada/sanic_messaging<filename>app/domain/messaging/stores/message_store.py
# -*- coding: utf-8
# Core
from .base_store import BaseStore
# Model
from ..models import Message
class MessageStore(BaseStore):
"""
Message stores
"""
async def create(self, content, author_id, channel_id):
message = Message(
content=content, author_id=author_id, channel_id=channel_id)
message = await message.create()
return message
async def set_content(self, message, content):
message.content = content
self._update_query = message.update(content=message.content)
return message
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.