id stringlengths 2 8 | text stringlengths 16 264k | dataset_id stringclasses 1 value |
|---|---|---|
3577846 | <gh_stars>0
# Generated by Django 3.1.2 on 2020-10-13 16:47
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("core", "0002_auto_20201012_1804")]
operations = [
migrations.AlterField(
model_name="activity",
name="description",
field=models.TextField(
help_text="Descrição sobre o que deve ser feito na atividade.",
max_length=4000,
verbose_name="descrição",
),
)
]
| StarcoderdataPython |
8014728 | from abc import abstractmethod, ABCMeta
from uuid import UUID
from opyoid import Injector, Module
from erica.infrastructure.InfrastructureModule import InfrastructureModule
from erica.infrastructure.rq.RqModule import RqModule
from erica.infrastructure.sqlalchemy.repositories.EricaAuftragRepository import EricaAuftragRepository
injector = Injector([InfrastructureModule(), RqModule()])
class EricaAuftragServiceInterface:
__metaclass__ = ABCMeta
@abstractmethod
def get_status(self, auftrag_id: UUID):
pass
class EricaAuftragService(EricaAuftragServiceInterface):
erica_auftrag_repository: EricaAuftragRepository
def __init__(self, repository: EricaAuftragRepository = injector.inject(EricaAuftragRepository)) -> None:
super().__init__()
self.erica_auftrag_repository = repository
def get_status(self, auftrag_id: UUID):
return self.erica_auftrag_repository.get_by_id(auftrag_id)
class EricaAuftragServiceModule(Module):
def configure(self) -> None:
self.bind(EricaAuftragServiceInterface, to_class=EricaAuftragService)
| StarcoderdataPython |
9700040 | #!/usr/bin/env python
import psycopg2
DBNAME = "news"
query1 = """ SELECT '"' || articles.title || '"' ,
COUNT(log.id) AS views FROM articles
CROSS JOIN log WHERE log.path = '/article/' || articles.slug
AND log.status = '200 OK'
GROUP BY articles.id
ORDER BY views DESC LIMIT 3; """
query2 = """ SELECT authors.name, COUNT(log.id) AS views FROM authors
JOIN articles ON authors.id = articles.author
CROSS JOIN log WHERE log.path = '/article/' || articles.slug
AND log.status = '200 OK'
GROUP BY authors.name
ORDER BY views DESC; """
query3 = """ SELECT q1.day, ROUND(q2.views * 100.0 / q1.views, 2) AS error
FROM (SELECT TRIM(trailing ' ' FROM TO_CHAR(DATE(time),
'Month')) || ' ' ||
TO_CHAR(DATE(time), 'DD') || ', ' ||
TO_CHAR(DATE(time), 'YYYY') AS day,
COUNT(id) AS views
FROM log
GROUP BY day)
AS q1
JOIN (SELECT TRIM(trailing ' ' FROM TO_CHAR(DATE(time),
'Month')) || ' ' ||
TO_CHAR(DATE(time), 'DD') || ', ' ||
TO_CHAR(DATE(time), 'YYYY') AS day,
COUNT(id) AS views
FROM log
WHERE status = '404 NOT FOUND'
GROUP BY day)
AS q2 ON q1.day = q2.day
WHERE ROUND(q2.views * 100.0 / q1.views, 2) > 1.00
ORDER BY error DESC
LIMIT 1; """
def execute_query(query):
try:
db = psycopg2.connect(dbname=DBNAME)
c = db.cursor()
c.execute(query)
result = c.fetchall()
db.close()
return result
except BaseException:
return "Unable to connect to the database"
def get_popular_articles():
"""Return the most popular three articles of all time,
most popular article at the top."""
popular_articles = execute_query(query1)
for article in popular_articles:
print("{} -- {} views".format(article[0], article[1]))
def get_popular_authors():
"""Return the most popular article authors of all time,
most popular author at the top."""
popular_authors = execute_query(query2)
for author in popular_authors:
print("{} -- {} views".format(author[0], author[1]))
def get_error_requests():
"""Return which days with more than 1'%' of its requests lead to errors."""
error_requests = execute_query(query3)
for errors in error_requests:
print("{} -- {} % errors".format(errors[0], errors[1]))
if __name__ == "__main__":
print("**** Report Results ****\n(1)\tThe most popular three articles\n")
get_popular_articles()
print("\n-----------------------\n\n")
print("(2) The most popular article authors\n")
get_popular_authors()
print("\n-----------------------\n\n")
print("(3) Days with more than 1% of its requests lead to errors\n")
get_error_requests()
| StarcoderdataPython |
3300717 | import requests
class Pnr(object):
def __init__(self):
self.ids = {}
#json = requests.get("https://api.coinmarketcap.com/v1/ticker/").json()
#for cr in json:
#self.ids[cr["symbol"]] = cr["id"]
def get_pnr(self, pnrno):
try:
json = requests.get("https://api.railwayapi.com/v2/pnr-status/pnr/"+pnrno+"/apikey/qyynupcty2/").json()
passenger = "\n"
for i in range(0,json["passengers"].__len__()):
passenger+="**"+str(json["passengers"][i]["no"])+"\t"+json["passengers"][i]["current_status"]+"\t"+json["passengers"][i]["booking_status"]+"**\n"
message = "PNR Number : **{}**\n From : **{}** - **{}**\n To : **{}** - **{}**\nTrain Name :**{}**\nTrain Number :**{}**\nPassengers:{}".format(pnrno, json["from_station"]["code"],json["from_station"]["name"], json["reservation_upto"]["code"],json["reservation_upto"]["name"],json["train"]["name"],json["train"]["number"],passenger)
return message
except KeyError:
message = "Enter a valid PNR number"
return message | StarcoderdataPython |
4929108 | <reponame>alexfikl/python-doi
import re
import logging
__version__ = '0.1.1'
logger = logging.getLogger("doi")
def pdf_to_doi(filepath, maxlines=float('inf')):
"""Try to get doi from a filepath, it looks for a regex in the binary
data and returns the first doi found, in the hopes that this doi
is the correct one.
:param filepath: Path to the pdf file
:type filepath: str
:param maxlines: Maximum number of lines that should be checked
For some documnets, it would spend a long time trying to look for
a doi, and dois in the middle of documents don't tend to be the correct
doi of the document.
:type maxlines: int
:returns: DOI or None
:rtype: str or None
"""
with open(filepath, 'rb') as fd:
for j, line in enumerate(fd):
doi = find_doi_in_text(line.decode('ascii', errors='ignore'))
if doi:
return doi
if j > maxlines:
return None
else:
return None
def validate_doi(doi):
"""We check that the DOI can be resolved by official means. If so, we
return the resolved URL, otherwise, we return None (which means the DOI is
invalid).
http://www.doi.org/factsheets/DOIProxy.html
:param doi: Doi identificator
:type doi: str
:returns: It returns the url assigned to the doi if everything went right
:rtype: str
:raises ValueError: Whenever the doi is not valid
"""
from urllib.error import HTTPError, URLError
import urllib.request
import urllib.parse
import json
url = "https://doi.org/api/handles/{doi}".format(doi=doi)
logger.debug('handle url %s' % url)
request = urllib.request.Request(url)
try:
result = json.loads(urllib.request.urlopen(request).read().decode())
if 'values' in result:
url = [v['data']['value']
for v in result['values'] if v.get('type') == 'URL']
return url[0] if url else None
except HTTPError:
raise ValueError('HTTP 404: DOI not found')
except URLError as e:
raise ValueError(e)
response_code = int(result["responseCode"])
if response_code in [1, 200]:
# HTTP 200 all ok
logger.debug('HTTP 200: valid doi')
elif response_code == 2:
raise ValueError('HTTP 500: Interal DOI server error')
elif response_code == 100:
raise ValueError('HTTP 404: DOI not found')
else:
raise ValueError('Something unexpected happened')
def get_clean_doi(doi):
"""Check if doi is actually a url and in that case just get
the exact doi.
:doi: String containing a doi
:returns: The pure doi
"""
doi = re.sub(r'%2F', '/', doi)
# For pdfs
doi = re.sub(r'\)>', ' ', doi)
doi = re.sub(r'\)/S/URI', ' ', doi)
doi = re.sub(r'(/abstract)', '', doi)
doi = re.sub(r'\)$', '', doi)
return doi
def find_doi_in_text(text):
"""
Try to find a doi in a text
"""
text = get_clean_doi(text)
forbidden_doi_characters = r'"\s%$^\'<>@,;:#?&'
# Sometimes it is in the javascript defined
var_doi = re.compile(
r'doi(.org)?'
r'\s*(=|:|/|\()\s*'
r'("|\')?'
r'(?P<doi>[^{fc}]+)'
r'("|\'|\))?'
.format(
fc=forbidden_doi_characters
), re.I
)
for regex in [var_doi]:
miter = regex.finditer(text)
try:
m = next(miter)
if m:
doi = m.group('doi')
return get_clean_doi(doi)
except StopIteration:
pass
return None
def get_real_url_from_doi(doi):
url = validate_doi(doi)
if not url:
return url
m = re.match('.*linkinghub\.elsevier.*/pii/([A-Z0-9]+).*', url, re.I)
if m:
return ('https://www.sciencedirect.com/science/article/abs/pii/{pii}'
.format(pii=m.group(1)))
return url
| StarcoderdataPython |
3387447 | import warnings
from contextlib import contextmanager
from decimal import Decimal
import webcolors
from . import BaseRenderer, renders
from ..operations import (BaseList, Bold, BulletList, CodeBlock, Footnote,
Format, Group, Heading, HyperLink, Image, InlineCode,
Italic, LineBreak, ListElement, NumberedList,
Paragraph, Span, Style, Table, TableCell, TableRow,
Text, UnderLine)
WORD_WDCOLORINDEX_MAPPING = {
'lightgreen': 'wdBrightGreen',
'darkblue': 'wdDarkBlue',
'darkred': 'wdDarkRed',
'grey': 'wdGray50',
'silver': 'wdGray25',
}
class WordFormatter(object):
@staticmethod
def style_to_highlight_wdcolor(value, constants):
try:
name = webcolors.hex_to_name(value).lower() if value.startswith("#") else value.lower()
if name in WORD_WDCOLORINDEX_MAPPING:
return getattr(constants, WORD_WDCOLORINDEX_MAPPING[name])
# Try and get the color from the wdColors enumeration
return getattr(constants, "wd" + name.capitalize())
except (AttributeError, ValueError):
return None
@staticmethod
def rgbstring_to_hex(value):
"""
Transform a string like rgb(199,12,15) into a wdColor format used by word
:param value: A string like rgb(int,int,int)
:return: An integer representation that Word understands
"""
left, right = value.find("("), value.find(")")
values = value[left + 1:right].split(",")
rgblist = [int(v.strip()) for v in values]
return webcolors.rgb_to_hex(rgblist)
@staticmethod
def hex_to_wdcolor(value):
"""
Receive a HEX color attribute string like '#9bbb59' (or '9bbb59') and transform it to a numeric constant
in order to use it as a Selection.Font.Color attribute (as an item of WdColor enumeration)
:param value: A HEX color attribute
:return: A numeric WDCOLOR value
"""
rgbstrlst = webcolors.hex_to_rgb(value)
return int(rgbstrlst[0]) + 0x100 * int(rgbstrlst[1]) + 0x10000 * int(rgbstrlst[2])
@staticmethod
def style_to_wdcolor(value):
if value == 'none':
return None
try:
if value.startswith('rgb('):
value = WordFormatter.rgbstring_to_hex(value)
elif value in webcolors.css3_names_to_hex:
value = webcolors.css3_names_to_hex[value]
return WordFormatter.hex_to_wdcolor(value)
except Exception:
return None
@staticmethod
def size_to_points(css_value):
"""
Transform a pixel string into points (used by word).
:param css_value: string optionally ending in px/pt
:return: an integer point representation
"""
if isinstance(css_value, str):
is_pt = css_value.endswith('pt')
if css_value.endswith("px") or is_pt:
css_value = css_value[:-2]
try:
css_value = float(css_value)
except ValueError:
return None
if is_pt:
# Return pt sizes directly, no rounding or multiplication.
return css_value
css_value = round(css_value)
return css_value * 0.75
class COMRenderer(BaseRenderer):
def __init__(self, document, constants, range=None, debug=False, hooks=None):
self.word = document.Application
self.document = document
self.constants = constants
self._format_stack = None
if range is not None:
range.Select()
super().__init__(debug, hooks)
@property
def selection(self):
return self.document.ActiveWindow.Selection
def range(self, start=None, end=None):
if not (start or end):
raise RuntimeError("Start and End are both None!")
if start is None:
start = end
elif end is None:
end = start
return self.document.Range(Start=start, End=end)
@contextmanager
def get_range(self):
rng = self.selection.Range.Duplicate
try:
yield rng
finally:
rng.SetRange(rng.Start, self.selection.End)
@renders(Footnote)
def footnote(self, op: Footnote):
rng = self.selection.Range
content = op.attributes['data-content']
footnote = self.document.Footnotes.Add(rng)
footnote.Range.Text = content
new_range = self.range(rng.Start + 1, rng.End + 1)
new_range.select()
@renders(Span)
def span(self, op: Span):
yield
@renders(Heading)
def heading(self, op: Heading):
with self.style(Style(name="Heading {0}".format(op.level), attributes=op.original_attributes)):
yield self.new_operations(op.children)
@renders(Style)
def style(self, op: Style):
# old_style = self.selection.Style
self.selection.Style = self.document.Styles(op.name)
with self.get_range() as rng:
yield
self.selection.TypeParagraph()
if op.id:
# Insert a bookmark
self.document.Bookmarks.Add(str(op.id), rng)
@renders(Bold)
def bold(self, op: Bold):
self.selection.BoldRun()
yield
self.selection.BoldRun()
@renders(Italic)
def italic(self, op: Italic):
self.selection.ItalicRun()
yield
self.selection.ItalicRun()
@renders(UnderLine)
def underline(self, op: UnderLine):
self.selection.Font.Underline = self.constants.wdUnderlineSingle
yield
self.selection.Font.Underline = self.constants.wdUnderlineNone
@renders(Text)
def text(self, op: Text):
self.selection.TypeText(op.text)
@renders(LineBreak)
def linebreak(self, op: LineBreak):
if op.format.page_break_after == "always":
self.selection.InsertBreak(self.constants.wdPageBreak)
elif isinstance(op.parent, Paragraph) or isinstance(op.parent, Group) and op.parent.is_root_group:
self.selection.TypeParagraph()
@renders(Paragraph)
def paragraph(self, op: Paragraph):
previous_style = None
if op.has_child(LineBreak):
previous_style = self.selection.Style
self.selection.Style = self.document.Styles("No Spacing")
yield
if previous_style is not None:
self.selection.Style = previous_style
should_do_newline = True
if op.has_children and isinstance(op[-1], (BaseList, Image, Table)):
should_do_newline = False
if isinstance(op.parent, (ListElement, TableCell)) and op.parent[-1] is op:
# If our parent is a ListElement and our operation is the last one of it's children then we don't need to
# add a newline.
should_do_newline = False
if should_do_newline:
self.selection.TypeParagraph()
@renders(InlineCode)
def inline_code(self, op: InlineCode):
previous_style = self.selection.Style
previous_font_name, previous_font_size = self.selection.Font.Name, self.selection.Font.Size
self.selection.Font.Name = "Courier New"
yield
self.selection.Style = previous_style
self.selection.Font.Name = previous_font_name
@renders(CodeBlock)
def code_block(self, op: CodeBlock):
self.selection.Style = self.document.Styles("No Spacing")
self.selection.Font.Name = "Courier New"
new_operations = op.highlighted_operations() if op.highlight else None
with self.get_range() as rng:
if new_operations:
yield self.new_operations(new_operations)
else:
yield
rng.NoProofing = True
self.selection.ParagraphFormat.SpaceAfter = 8
self.selection.TypeParagraph()
self.selection.ClearFormatting()
@renders(Image)
def image(self, op: Image):
location, height, width = op.get_image_path_and_dimensions()
rng = self.selection
try:
image = rng.InlineShapes.AddPicture(FileName=location, SaveWithDocument=True)
except Exception:
location, height, width = op.get_404_image_and_dimensions()
image = rng.InlineShapes.AddPicture(FileName=location, SaveWithDocument=True)
if height:
image.Height = height * 0.75
if width:
image.Width = width * 0.75
if op.caption:
self.selection.TypeParagraph()
self.selection.Range.Style = self.document.Styles("caption")
self.selection.TypeText(op.caption)
op.render.image = image
if not isinstance(op.parent, TableCell):
self.selection.TypeParagraph()
@renders(HyperLink)
def hyperlink(self, op: HyperLink):
with self.get_range() as rng:
yield
# Inserting a hyperlink that contains different styles can reset the style. IE:
# Link<Bold<Text>> Text
# Bold will turn bold off, but link will reset it meaning the second Text is bold.
# Here we just reset the style after making the hyperlink.
style = self.selection.Style
if op.location.startswith('#'):
self.document.Hyperlinks.Add(Anchor=rng, TextToDisplay="", SubAddress=op.location.replace('#', '', 1))
elif op.location.startswith('!') or op.location.startswith('@'):
if op.location.startswith('!'):
text = "REF {} \h \* charformat".format(op.location.replace('!', '', 1))
else:
text = op.location.replace('@', '', 1)
code = text.split(' ')[0]
# Whitelist field codes
if code not in {'FILENAME', 'STYLEREF'}:
return
field = self.document.Fields.Add(
Range=rng,
Type=self.constants.wdFieldEmpty,
Text=text,
PreserveFormatting=False
)
# When inserting fields, the cursor stays at the beginning, select it and move the cursor to escape from it
field.Result.Select()
self.selection.MoveRight()
else:
self.document.Hyperlinks.Add(Anchor=rng, Address=op.location[:2048]) # Prevent out of memory error
self.selection.Collapse(Direction=self.constants.wdCollapseEnd)
self.selection.Style = style
def _get_constants_for_list(self, op: BaseList):
if isinstance(op, NumberedList):
gallery_type, list_types = self.constants.wdNumberGallery, self.constants.wdListSimpleNumbering
elif isinstance(op, BulletList):
gallery_type, list_types = self.constants.wdBulletGallery, self.constants.wdListBullet
else:
raise RuntimeError("Unknown list type {0}".format(op.__class__.__name__))
return gallery_type, list_types
@renders(BulletList, NumberedList)
def render_list(self, op):
list_level = op.depth + 1
first_list = list_level == 1
gallery_type, list_types = self._get_constants_for_list(op)
gallery = self.word.ListGalleries(gallery_type)
template = gallery.ListTemplates(1)
if op.type:
style_values = {
'roman-lowercase': self.constants.wdListNumberStyleLowercaseRoman,
'roman-uppercase': self.constants.wdListNumberStyleUppercaseRoman
}
if op.type in style_values:
value = style_values[op.type]
for list_template in gallery.ListTemplates:
if list_template.ListLevels(1).NumberStyle == value:
template = list_template
break
else:
warnings.warn('Unable to locate list style for {0}, using default'.format(op.type))
if first_list:
self.selection.Range.ListFormat.ApplyListTemplateWithLevel(
ListTemplate=template,
ContinuePreviousList=False,
DefaultListBehavior=self.constants.wdWord10ListBehavior
)
if op.format.style:
self._apply_style_to_range(op.format)
else:
self.selection.Range.ListFormat.ListIndent()
yield
if self.selection.Range.ListFormat.ListType != list_types:
self.selection.Range.ListFormat.ApplyListTemplateWithLevel(
ListTemplate=template,
ContinuePreviousList=True,
DefaultListBehavior=self.constants.wdWord10ListBehavior,
ApplyLevel=list_level,
ApplyTo=self.constants.wdListApplyToWholeList
)
if first_list:
self.selection.Range.ListFormat.RemoveNumbers(NumberType=self.constants.wdNumberParagraph)
self.selection.Style = self.constants.wdStyleNormal
else:
self.selection.Range.ListFormat.ListOutdent()
@renders(ListElement)
def list_element(self, op: ListElement):
yield
self.selection.TypeParagraph()
@renders(Table)
def table(self, op: Table):
table_range = self.selection.Range
self.selection.TypeParagraph()
if isinstance(op.parent, TableCell):
# There appears to be a bug in Word. If you are in a table cell and add a new table
# then it appears to only add one row. We get around this by adding a new paragraph, then using that
# range to insert. This ends up with an unrequested space/margin, but it's better than nothing.
self.selection.Range.Select()
table_range = self.selection.Range
self.selection.TypeParagraph()
end_range = self.selection.Range
rows, columns = op.dimensions
table = self.selection.Tables.Add(
table_range,
NumRows=rows,
NumColumns=columns,
AutoFitBehavior=self.constants.wdAutoFitFixed
)
table.Style = "Table Grid"
table.AllowAutoFit = True
table.Borders.Enable = 0 if op.border == '0' else 1
# This code is super super slow, running list() on a Cells collection takes >15 seconds.
# https://github.com/enthought/comtypes/issues/107
# cell_mapping = [
# list(row.Cells) for row in table.Rows
# ]
# This code is faster, but not as nice :(
cell_mapping = [
[row.Cells(i + 1) for i in range(len(row.Cells))]
for row in list(table.Rows)
]
processed_cells = set()
has_rowspan = False
# Handling merged cells is a bitch. We do it by finding the max dimensions of the table (the max sum of all
# colspans in a row) then creating a table with those dimensions.
# We then enumerate through each cell in each row, and find the corresponding word cell (the actual table cell)
for row_index, row in enumerate(op):
# Store the row object for later use. This cannot be used if the table has vertically merged cells
row.render.row_object = table.Rows(row_index+1) if not has_rowspan else None
# Loop through each row and extract the corresponding Row object from Word
row_cells = cell_mapping[row_index]
for column_index, cell in enumerate(row):
# For each cell/column in our row extract the table cell from Word
word_cell = row_cells[column_index]
if word_cell is None or word_cell in processed_cells:
# Skip forward and find the next unprocessed cell
for possible_cell in row_cells[column_index:]:
if possible_cell is not None and possible_cell not in processed_cells:
word_cell = possible_cell
column_index = row_cells.index(word_cell)
break
if cell.colspan > 1:
# If the cell has a colspan of more than 1 we need to get the 0-indexed
# column index (colspans are 1-indexed)
colspan_index = cell.colspan - 1
# If we want to merge from column 0 to column 3, we take the current column index and add the
# 0-indexed colspan index then merge all the cells up to that point.
word_cell.Merge(MergeTo=row_cells[column_index + colspan_index])
# We need to clear any 'dead' cells from our array. We delete all of the cells we have merged
# leaving the first one ('cell'), which is our merged cell.
del row_cells[column_index + 1:column_index + cell.colspan]
if cell.rowspan > 1:
# If the cell has a rowspan things get tricky.
has_rowspan = True
if cell.colspan > 1:
# If it's got a colspan we need to go down the rows below it and merge those cells into
# a single cell, pretty much the same as above.
for idx in range(row_index + 1, row_index + cell.rowspan):
colspan_cell = cell_mapping[idx][column_index]
next_cell = cell_mapping[idx][column_index + (cell.colspan - 1)]
colspan_cell.Merge(MergeTo=next_cell)
# We merge the multi-cells together
word_cell.Merge(MergeTo=cell_mapping[row_index + cell.rowspan - 1][column_index])
# And go down and delete all merged cells below. We set them to None so the size of the rows
# is still uniform.
for idx in range(row_index + 1, row_index + cell.rowspan):
slice_length = len(cell_mapping[idx][column_index:column_index + cell.colspan or 0])
cell_mapping[idx][column_index:column_index + cell.colspan or 0] = (None for _ in
range(slice_length))
# Store the cell object for later use
cell.render.cell_object = word_cell
processed_cells.add(word_cell)
# Store the table object for later use
op.render.table = table
table_width, unit = op.width
if table_width:
width_type_map = {
'%': self.constants.wdPreferredWidthPercent,
'pt': self.constants.wdPreferredWidthPoints,
}
if unit == '%':
table_width = max(0, min(table_width, 100))
table.PreferredWidthType = width_type_map[unit]
table.PreferredWidth = table_width
for row_child in op.children:
for cell_child in row_child.children:
cell_width, unit = cell_child.width
if cell_width is not None:
cell_o = cell_child.render.cell_object
cell_o.PreferredWidthType = width_type_map[unit]
cell_o.PreferredWidth = cell_width
table.AllowAutoFit = False
table.Select()
yield
end_range.Select()
@renders(TableRow)
def table_row(self, op):
yield
@renders(TableCell)
def table_cell(self, op: TableCell):
rng = op.render.cell_object.Range
rng.Collapse()
rng.Select()
yield
if op.orientation:
# ToDo: Move this to the Format handling. It is specific to a table cell though
mapping = {
'sideways-lr': 'wdTextOrientationUpward',
'sideways-rl': 'wdTextOrientationDownward',
}
if op.orientation in mapping:
op.render.cell_object.Range.Orientation = getattr(self.constants, mapping[op.orientation])
def _apply_style_to_range(self, op, rng=None):
rng = rng or self.selection.Range
for klass in op.style or []:
try:
rng.Style = klass
except Exception:
warnings.warn("Unable to apply style name '{0}'".format(klass))
def render_operation(self, operation, *args, **kwargs):
if operation.format is not None \
and operation.format.has_format() \
and operation.format.__class__ in self.render_methods:
format_func = self.collect_format_data
else:
format_func = self.ignored_element
if 'format_list' in kwargs:
format_list = kwargs.pop('format_list')
else:
format_list = self._format_stack
child_format_list = []
with format_func(operation.format, operation, format_list):
super().render_operation(operation, *args, format_list=child_format_list, **kwargs)
if child_format_list:
format_list.append(child_format_list)
def render(self, *args, **kwargs):
self._format_stack = []
super().render(*args, **kwargs)
self.apply_recursive_formatting(self._format_stack)
self._format_stack = None
@renders(Format)
def collect_format_data(self, op, parent_operation, format_stack):
with self.get_range() as rng:
yield
if not op.has_style:
return
format_stack.append((op, parent_operation, rng))
def apply_recursive_formatting(self, stack):
for item in stack:
if isinstance(item, tuple):
with self.with_hooks(*item):
self.handle_format(*item)
else:
self.apply_recursive_formatting(item)
def handle_format(self, op, parent_operation, element_range):
# should_type_x = op.should_use_x_hack
# Why TypeText('X')? Styles seem to overrun their containers (especially when they span an entire line). This
# adds a buffer to the end of the element, which is removed at the end. This is the least horrible way to do
# this, trust us.
# if should_type_x:
# self.selection.TypeText("X")
if isinstance(parent_operation, TableCell):
element_range = parent_operation.render.cell_object.Range
if op.style and not isinstance(parent_operation, BaseList):
self._apply_style_to_range(op, element_range)
if op.font_size:
size = WordFormatter.size_to_points(op.font_size)
if size:
element_range.Font.Size = size
if op.color:
col = WordFormatter.style_to_wdcolor(op.color)
if col:
element_range.Font.Color = col
if op.text_decoration == "underline":
element_range.Font.UnderlineColor = self.constants.wdColorAutomatic
element_range.Font.Underline = self.constants.wdUnderlineSingle
if op.margin:
if op.margin["left"] == "auto" and op.margin["right"] == "auto":
if not isinstance(parent_operation, Table):
# We don't want to center a table.
element_range.ParagraphFormat.Alignment = self.constants.wdAlignParagraphCenter
if op.margin["left"] != 'auto':
if isinstance(parent_operation, Table):
parent_operation.render.table.Rows.LeftIndent = WordFormatter.size_to_points(op.margin["left"])
if op.background:
background = op.background.split(" ")[0]
# This needs refactoring :/
if isinstance(parent_operation, Table):
bg_color = WordFormatter.style_to_wdcolor(background)
if bg_color:
parent_operation.render.table.Shading.BackgroundPatternColor = bg_color
elif isinstance(parent_operation, TableCell):
bg_color = WordFormatter.style_to_wdcolor(background)
if bg_color:
parent_operation.render.cell_object.Shading.BackgroundPatternColor = bg_color
else:
if op.display == 'block':
# If it's a block element with a background then we set the Shading.BackgroundPatternColor
bg_color = WordFormatter.style_to_wdcolor(background)
if bg_color:
element_range.Shading.BackgroundPatternColor = bg_color
else:
bg_color = WordFormatter.style_to_highlight_wdcolor(background, self.constants)
if bg_color:
element_range.HighlightColorIndex = bg_color
if op.vertical_align:
if isinstance(parent_operation, TableCell):
alignment = {
'top': self.constants.wdCellAlignVerticalTop,
'middle': self.constants.wdCellAlignVerticalCenter,
'bottom': self.constants.wdCellAlignVerticalBottom
}
if op.vertical_align in alignment:
parent_operation.render.cell_object.VerticalAlignment = alignment[op.vertical_align]
if op.text_align:
alignment = {
'center': self.constants.wdAlignParagraphCenter,
'left': self.constants.wdAlignParagraphLeft,
'right': self.constants.wdAlignParagraphRight
}
if op.text_align in alignment:
element_range.ParagraphFormat.Alignment = alignment[op.text_align]
if op.writing_mode:
orientations = {"vertical-lr": 1, "sideways-lr": 2}
if isinstance(parent_operation, TableCell):
orientation = orientations.get(op.writing_mode)
if orientation is not None:
parent_operation.render.cell_object.Range.Orientation = orientation
if op.border:
if isinstance(parent_operation, Image):
img = parent_operation.render.image
img.Line.Visible = True
if op.border["style"]:
style = op.border["style"]
constants = {
"solid": self.constants.msoLineSolid,
}
if style in constants:
img.Line.DashStyle = constants[style]
if op.border["width"]:
width = WordFormatter.size_to_points(op.border["width"])
if width:
img.Line.Weight = width
if op.border["color"]:
color = WordFormatter.style_to_wdcolor(op.border["color"])
if color:
img.Line.ForeColor.RGB = color
if isinstance(parent_operation, (Table, TableRow, TableCell)):
edges = {
"bottom": self.constants.wdBorderBottom,
"top": self.constants.wdBorderTop,
"left": self.constants.wdBorderLeft,
"right": self.constants.wdBorderRight,
}
# TODO: Support individual border-left, border-right, border-top and border-bottom properties
borders = {edge: element_range.Borders(constant) for edge, constant in edges.items()}
if op.border["style"]:
style = op.border["style"]
constants = {
"none": self.constants.wdLineStyleNone,
"solid": self.constants.wdLineStyleSingle,
"dotted": self.constants.wdLineStyleDot,
"dashed": self.constants.wdLineStyleDashSmallGap,
"double": self.constants.wdLineStyleDouble,
"inset": self.constants.wdLineStyleInset,
"outset": self.constants.wdLineStyleOutset,
"initial": self.word.Options.DefaultBorderLineStyle,
}
if style in constants:
for border in borders.values():
border.LineStyle = constants[style]
if op.border["width"]:
width = WordFormatter.size_to_points(op.border["width"])
# Numbers? Where we are going we don't need numbers
constants = {
0.25: self.constants.wdLineWidth025pt,
0.5: self.constants.wdLineWidth050pt,
0.75: self.constants.wdLineWidth075pt,
1: self.constants.wdLineWidth100pt,
1.5: self.constants.wdLineWidth150pt,
2.25: self.constants.wdLineWidth225pt,
3: self.constants.wdLineWidth300pt,
4.5: self.constants.wdLineWidth450pt,
6: self.constants.wdLineWidth600pt,
}
if width in constants:
for border in borders.values():
border.LineWidth = constants[width]
if op.border["color"]:
color = WordFormatter.style_to_wdcolor(op.border["color"])
if color:
for border in borders.values():
border.Color = color
if op.padding:
if op.padding['top']:
px = WordFormatter.size_to_points(op.padding['top'])
if px is not None:
if isinstance(parent_operation, Table):
parent_operation.render.table.TopPadding = px
elif isinstance(parent_operation, TableCell):
parent_operation.render.cell_object.TopPadding = px
else:
element_range.ParagraphFormat.SpaceBefore = px
if op.padding['bottom']:
px = WordFormatter.size_to_points(op.padding['bottom'])
if px is not None:
if isinstance(parent_operation, Table):
parent_operation.render.table.BottomPadding = px
elif isinstance(parent_operation, TableCell):
parent_operation.render.cell_object.BottomPadding = px
else:
element_range.ParagraphFormat.SpaceAfter = px
if op.padding['left']:
px = WordFormatter.size_to_points(op.padding['left'])
if px is not None:
if isinstance(parent_operation, Table):
parent_operation.render.table.LeftPadding = px
elif isinstance(parent_operation, TableCell):
parent_operation.render.cell_object.LeftPadding = px
if op.padding['right']:
px = WordFormatter.size_to_points(op.padding['right'])
if px is not None:
if isinstance(parent_operation, Table):
parent_operation.render.table.RightPadding = px
elif isinstance(parent_operation, TableCell):
parent_operation.render.cell_object.RightPadding = px
if op.line_height:
if op.line_height.isdecimal():
points = self.word.LinesToPoints(Decimal(op.line_height))
elif op.line_height.strip().endswith('%'):
points = self.word.LinesToPoints(Decimal(op.line_height.split('%')[0]) / 100)
else:
points = WordFormatter.size_to_points(op.line_height)
element_range.ParagraphFormat.LineSpacing = points
| StarcoderdataPython |
11302953 | # -*- coding: utf-8; -*-
#
# @file actioncontroller.py
# @brief collgate
# @author <NAME> (INRA UMR1095)
# @date 2018-01-05
# @copyright Copyright (c) 2018 INRA/CIRAD
# @license MIT (see LICENSE file)
# @details
from django.contrib.contenttypes.models import ContentType
from django.db import transaction, IntegrityError
from django.utils.translation import ugettext_lazy as _
from accession.models import Layout, ActionType, ActionToEntity, ActionData, ActionDataType
from accession.namebuilder import NameBuilderManager
from accession.actions.actionstepformat import ActionStepFormatManager, ActionStepFormat, ActionError
from accession.models import Action, Batch
from accession.namebuilder import NameBuilderManager
class ActionController(object):
"""
Action controller. Permit to setup and later update the state of an action instance.
"""
STEP_INIT = 0
STEP_SETUP = 1
STEP_PROCESS = 2
STEP_DONE = 3
def __init__(self, action_type_or_action, user=None):
if type(action_type_or_action) is ActionType:
self.action_type = action_type_or_action
self.user = user
self.action = None
elif type(action_type_or_action) is Action:
self.action_type = action_type_or_action.action_type
self.user = action_type_or_action.user
self.action = action_type_or_action
self.accession_content_type_id = ContentType.objects.get_by_natural_key('accession', 'accession').id
self.batch_content_type_id = ContentType.objects.get_by_natural_key('accession', 'batch').id
self.descriptor_content_type_id = ContentType.objects.get_by_natural_key('descriptor', 'descriptor').id
def action_content_type(self, step_format_type):
"""
Convert the entity type from the action step format IO_* to its content type id.
"""
if step_format_type == ActionStepFormat.IO_ACCESSION_ID:
return self.accession_content_type_id
elif step_format_type == ActionStepFormat.IO_BATCH_ID:
return self.batch_content_type_id
if step_format_type == ActionStepFormat.IO_DESCRIPTOR:
return self.descriptor_content_type_id
else:
raise ActionError("Unsupported action IO entity type")
def create(self, name, description):
"""
Create a new action using a name.
:param name: Informative name str
:param description: Description str
:return: A newly created action instance
"""
if self.action is not None:
raise ActionError("Action already exists")
if self.action_type is None:
raise ActionError("Action type must be non null")
if self.user is None:
raise ActionError("User must be non null")
# create the action and set it empty
action = Action()
action.name = name # informative name
action.user = self.user
action.description = description
action.action_type = self.action_type
# initial data structure
data = {'steps': []}
action.data = data
self.action = action
# and init the first step
if self.has_more_steps:
self.add_step_data()
action.save()
return action
@property
def name_builder(self):
"""
Get the related name builder. Default to the global one's.
"""
return NameBuilderManager.get(NameBuilderManager.GLOBAL_BATCH)
def batch_layout(self, accession):
"""
Return the batch layout model from the given accession layout parameters.
"""
data = accession.layout.parameters.get('data')
if not data:
return None
batch_layouts = data.get('batch_layouts')
if not batch_layouts:
return None
batch_layout_id = batch_layouts[0]
try:
batch_layout = Layout.objects.get(pk=batch_layout_id)
except Layout.DoesNotExist:
return None
return batch_layout
@property
def is_current_step_valid(self):
# step format
action_steps = self.action.data.get('steps')
if not action_steps:
return False
# then at least one element
step_index = len(action_steps) - 1
action_step = action_steps[step_index]
return action_step is not None
@property
def is_current_step_done(self):
# step format
action_steps = self.action.data.get('steps')
if not action_steps:
return True
# then at least one element
step_index = len(action_steps) - 1
action_step = action_steps[step_index]
return action_step.get('state', ActionController.STEP_INIT) == ActionController.STEP_DONE
@property
def has_more_steps(self):
action_type_steps = self.action_type.format['steps']
action_steps = self.action.data['steps']
return len(action_type_steps) > len(action_steps)
def add_step_data(self):
# check if the last step is done
if not self.is_current_step_done:
raise ActionError(_("Current action step if not done"))
action_type_steps = self.action_type.format['steps']
action_steps = self.action.data['steps']
# no more steps
if len(action_type_steps) == len(action_steps):
raise ActionError(_("No more action steps"))
step_data = {
'state': ActionController.STEP_INIT, # state of the step (init, setup, process, done)
'index': len(self.action.data['steps']), # index of the step [0..n]
'options': None, # user defined step options (not for all formats)
'progression': [0, 0] # some formats proceed data one by one, indicate the progression
}
self.action.data['steps'].append(step_data)
return step_data
def setup_data(self, input_data, input_columns):
if not self.is_current_step_valid:
raise ActionError("Invalid current action step")
if self.is_current_step_done:
raise ActionError("Current action step is already done")
# step format
action_type_steps = self.action_type.format['steps']
action_steps = self.action.data['steps']
step_index = len(action_steps) - 1
step_format = action_type_steps[step_index]
action_step_format = ActionStepFormatManager.get(step_format['type'])
if not action_step_format.accept_user_data and input_data is not None:
raise ActionError("Current step does not accept data from user")
action_step = action_steps[step_index]
# check step state
action_step_state = action_step.get('state', ActionController.STEP_INIT)
if action_step_state != ActionController.STEP_INIT and action_step_state != ActionController.STEP_SETUP:
raise ActionError("Current action step state must be initial or setup")
# validate data according to step format
validated_data = action_step_format.validate(step_format, input_data, input_columns)
action_step['state'] = ActionController.STEP_SETUP
action_data = ActionData()
action_data.action = self.action
action_data.data_type = ActionDataType.INPUT.value
action_data.step_index = step_index
action_data.data = validated_data or [] # never null
try:
# finally save
with transaction.atomic():
self.action.save()
action_data.save()
except IntegrityError as e:
raise ActionError(e)
def process_current_step_once(self, data):
"""
Process one more element of the current step in case of progressive step.
:param data: Array of elements to process, can be a singleton or many elements
:return:
"""
if not self.is_current_step_valid:
raise ActionError("Invalid current action step")
if self.is_current_step_done:
raise ActionError("Current action step is already done")
# step format
action_type_steps = self.action_type.format['steps']
action_steps = self.action.data['steps']
step_index = len(action_steps) - 1
step_format = action_type_steps[step_index]
action_step_format = ActionStepFormatManager.get(step_format['type'])
action_step = action_steps[step_index]
# check step state
action_step_state = action_step.get('state', ActionController.STEP_INIT)
if action_step_state != ActionController.STEP_SETUP or action_step_state != ActionController.STEP_PROCESS:
raise ActionError("Current action step state must be setup or process")
# current step data set
try:
input_data_array = ActionData.objects.get(
action=self.action,
step_index=step_index,
data_type=ActionDataType.INPUT).data
except ActionData.DoesNotExist:
input_data_array = None
if step_index > 0:
# output of the previous state if not initial step
try:
prev_output_array = ActionData.objects.get(
action=self.action,
step_index=step_index-1,
data_type=ActionDataType.OUTPUT).data
except ActionData.DoesNotExist:
prev_output_array = None
else:
prev_output_array = None
# check if element(s) to process are included into the input data array
for elt in data:
if elt not in input_data_array:
raise ActionError("Element(s) not allowed into the step")
# @todo could need to be adapted
to_process_data_array = data
# process, save and make associations
try:
with transaction.atomic():
output_data = action_step_format.process_iteration(
self,
self.action,
step_format,
action_step,
prev_output_array,
to_process_data_array)
# first time create the action data
try:
action_data = ActionData.objects.get(
action=self.action,
step_index=step_index,
data_type=ActionDataType.OUTPUT)
except ActionData.DoesNotExist:
action_data = ActionData()
action_data.action = self.action
action_data.data_type = ActionDataType.OUTPUT.value
action_data.step_index = step_index
# aggregate the results
action_data.data += output_data
# and one more element
action_step['progression'][0] += len(action_step_format.accept_format)
self.action.save()
action_data.save()
# and add the related refs
self.update_related_entities(step_index, output_data)
except IntegrityError as e:
raise ActionError(e)
def process_current_step(self):
if not self.is_current_step_valid:
raise ActionError("Invalid current action step")
if self.is_current_step_done:
raise ActionError("Current action step is already done")
# step format
action_type_steps = self.action_type.format['steps']
action_steps = self.action.data['steps']
step_index = len(action_steps) - 1
step_format = action_type_steps[step_index]
action_step_format = ActionStepFormatManager.get(step_format['type'])
action_step = action_steps[step_index]
# check step state
action_step_state = action_step.get('state', ActionController.STEP_INIT)
if action_step_state != ActionController.STEP_SETUP:
raise ActionError("Current action step state must be setup")
# current step data set
try:
data_array = ActionData.objects.get(
action=self.action,
step_index=step_index,
data_type=ActionDataType.INPUT).data
except ActionData.DoesNotExist:
data_array = None
if step_index > 0:
# output of the previous state if not initial step
prev_output_array = ActionData.objects.get(
action=self.action,
step_index=step_index-1,
data_type=ActionDataType.OUTPUT).data
else:
prev_output_array = None
# for iterative or user type step set the flag to process and returns
if (action_step_format.type == ActionStepFormat.TYPE_ITERATIVE or
action_step_format.type == ActionStepFormat.TYPE_USER):
# process, save and make associations
try:
with transaction.atomic():
action_step_format.prepare_iterative_process(
self,
step_format,
action_step,
prev_output_array,
data_array)
action_data = ActionData()
action_data.action = self.action
action_data.data_type = ActionDataType.OUTPUT.value
action_data.step_index = step_index
action_data.data = [] # empty array initially
# wait for iterative processing and a finalization
action_step['state'] = ActionController.STEP_PROCESS
self.action.save()
action_data.save()
except IntegrityError as e:
raise ActionError(e)
else:
# process, save and make associations
try:
with transaction.atomic():
action_step['state'] = ActionController.STEP_PROCESS
output_data = action_step_format.process(
self,
step_format,
action_step,
prev_output_array,
data_array)
action_data = ActionData()
action_data.action = self.action
action_data.data_type = ActionDataType.OUTPUT.value
action_data.step_index = step_index
action_data.data = output_data
# auto finalize
action_step['state'] = ActionController.STEP_DONE
# and init the next one
if self.has_more_steps:
self.add_step_data()
else:
self.action.completed = True
self.action.save()
action_data.save()
# and add the related refs
self.update_related_entities(step_index, output_data)
except IntegrityError as e:
raise ActionError(e)
def finalize_current_step(self):
if not self.is_current_step_valid:
raise ActionError("Invalid current action step")
# step format
action_type_steps = self.action_type.format['steps']
action_steps = self.action.data['steps']
step_index = len(action_steps) - 1
step_format = action_type_steps[step_index]
action_step_format = ActionStepFormatManager.get(step_format['type'])
action_step = action_steps[step_index]
# check step state
action_step_state = action_step.get('state', ActionController.STEP_INIT)
if action_step_state != ActionController.STEP_PROCESS:
raise ActionError("Current action step state must be process")
# done and save
try:
with transaction.atomic():
action_step_format.terminate_iterative_process(
self,
step_format,
action_step)
action_step['state'] = ActionController.STEP_DONE
# and init the next one
if self.has_more_steps:
self.add_step_data()
else:
self.action.completed = True
self.action.save()
except IntegrityError as e:
raise ActionError(e)
def reset_current_step(self):
# step format
action_steps = self.action.data.get('steps')
if not action_steps:
raise ActionError("Empty action steps")
action_steps = self.action.data['steps']
step_index = len(action_steps) - 1
action_step = action_steps[step_index]
# check step state
action_step_state = action_step.get('state', ActionController.STEP_INIT)
if action_step_state != ActionController.STEP_SETUP:
raise ActionError("Current action step state must be setup")
# step action data
try:
action_data = ActionData.objects.get(
action=self.action,
step_index=step_index,
data_type=ActionDataType.INPUT)
except ActionData.DoesNotExist:
raise ActionError("Missing action data")
step_data = {
'state': ActionController.STEP_INIT,
'index': step_index,
'options': None,
'progression': [0, 0] # some formats proceed data one by one, indicate the progression
}
action_steps[step_index] = step_data
try:
with transaction.atomic():
self.action.save()
action_data.delete()
except IntegrityError as e:
raise ActionError(e)
def update_related_entities(self, step_index, data_array):
"""
After processing a step, the related table of entities must be updated to easily lookup for which entities
an action is related to.
"""
if not data_array:
return
action_steps = self.action.data.get('steps')
if not action_steps:
raise ActionError("Empty action steps")
action_steps = self.action.data['steps']
if step_index >= len(action_steps):
raise ActionError("Action step index out of range")
action_step = action_steps[step_index]
# check step state
action_step_state = action_step.get('state', ActionController.STEP_INIT)
if action_step_state != ActionController.STEP_DONE:
raise ActionError("Current action step state must be done")
# step format
action_type_steps = self.action_type.format['steps']
step_format = action_type_steps[step_index]
action_step_format = ActionStepFormatManager.get(step_format['type'])
missing = []
self.get_missing_entities(data_array, action_step_format.data_format, missing)
# now for missing entities bulk create them
ActionToEntity.objects.bulk_create(missing)
def get_missing_entities(self, array, array_format, results):
# initiates the types
entity_ids_by_type = [[] for x in range(0, ActionStepFormat.NUM_IO_TYPES)]
entity_exists_by_type = [set() for x in range(0, ActionStepFormat.NUM_IO_TYPES)]
# for each non referenced entity input add it. first split objects by type and look
for e_id in array:
for ei in array_format:
entity_ids_by_type[ei].append(e_id)
for ei in array_format:
qs = ActionToEntity.objects.filter(
action_id=self.action.id,
id__in=entity_ids_by_type[ei],
entity_type=self.action_content_type(ei)).values_list('id', flat=True)
for e in qs:
entity_exists_by_type[ei].add(e)
# compute the diff
for ei in array_format:
local_set = entity_exists_by_type[ei]
content_type_id = self.action_content_type(ei)
for e_id in entity_ids_by_type[ei]:
if e_id not in local_set:
results.append(ActionToEntity(
action_id=self.action.id,
entity_id=e_id,
entity_type_id=content_type_id,
))
def has_step_data(self, step_index):
"""
Has data for a particular step index.
"""
action_steps = self.action.data.get('steps')
if not action_steps:
raise ActionError("Empty action steps")
action_steps = self.action.data['steps']
if step_index >= len(action_steps):
raise ActionError("Action step index out of range")
action_step = action_steps[step_index]
# # check step state
action_step_state = action_step.get('state', ActionController.STEP_INIT)
if action_step_state == ActionController.STEP_INIT:
return False
return ActionData.objects.filter(
action=self.action,
step_index=step_index,
data_type=ActionDataType.OUTPUT.value).exists()
def get_step_data(self, step_index):
"""
Get the data for a particular step index.
"""
action_steps = self.action.data.get('steps')
if not action_steps:
raise ActionError("Empty action steps")
action_steps = self.action.data['steps']
if step_index >= len(action_steps):
raise ActionError("Action step index out of range")
# # check step state
# action_step_state = action_step.get('state', ActionController.STEP_INIT)
# if action_step_state != ActionController.STEP_DONE:
# raise ActionError("Current action step state must be done")
try:
action_data = ActionData.objects.get(
action=self.action,
step_index=step_index,
data_type=ActionDataType.OUTPUT.value)
except ActionData.DoesNotExist:
raise ActionError("Action data does not exists")
return action_data.data
def get_step_data_format(self, step_index):
"""
Get the columns format for the data of a particular step index.
"""
action_steps = self.action.data.get('steps')
if not action_steps:
raise ActionError("Empty action steps")
action_steps = self.action.data['steps']
if step_index >= len(action_steps):
raise ActionError("Action step index out of range")
# # check step state
# action_step_state = action_step.get('state', ActionController.STEP_INIT)
# if action_step_state != ActionController.STEP_DONE:
# raise ActionError("Current action step state must be done")
# step format
action_type_steps = self.action_type.format['steps']
step_format = action_type_steps[step_index]
action_step_format = ActionStepFormatManager.get(step_format['type'])
return action_step_format.data_format
@property
def has_iterative_processing(self):
"""
Is the current step wait for iterative processing.
"""
action_steps = self.action.data.get('steps')
if not action_steps:
raise ActionError("Empty action steps")
step_index = len(action_steps) - 1
# step format
action_type_steps = self.action_type.format['steps']
step_format = action_type_steps[step_index]
action_step_format = ActionStepFormatManager.get(step_format['type'])
return action_step_format.type == ActionStepFormat.TYPE_ITERATIVE
@property
def current_step_index(self):
"""
Get the current step integer index
"""
action_steps = self.action.data.get('steps')
if not action_steps:
raise ActionError("Empty action steps")
return len(action_steps) - 1
def done_panel_id_and_type(self):
"""
For current action step get the working done panel id
"""
action_steps = self.action.data.get('steps')
if not action_steps:
raise ActionError("Empty action steps")
step_index = len(action_steps) - 1
panels = action_steps[step_index].get('panels')
if not panels:
raise ActionError("Missing action panels")
done_panel = panels.get('done')
if not panels:
raise ActionError("Missing action done panel")
return done_panel['id'], done_panel['type']
def todo_panel_id_and_type(self):
"""
For current action step get the working to do panel id
"""
action_steps = self.action.data.get('steps')
if not action_steps:
raise ActionError("Empty action steps")
step_index = len(action_steps) - 1
panels = action_steps[step_index].get('panels')
if not panels:
raise ActionError("Missing action panels")
todo_panel = panels.get('todo')
if not panels:
raise ActionError("Missing action todo panel")
return todo_panel['id'], todo_panel['type']
| StarcoderdataPython |
5164911 | from rest_framework.permissions import BasePermission, SAFE_METHODS
class IsSuperUser(BasePermission):
""" Allow access only to superusers. """
message = 'Allow access only to superusers.'
def has_permission(self, request, view):
return bool(
request.method in SAFE_METHODS or
request.user and
request.user.is_authenticated and
request.user.is_staff and
request.user.is_superuser
)
class IsAuthor(BasePermission):
""" 检查登陆用户是否是 文章作者 """
message = 'You must be the author of this article.'
def has_object_permission(self, request, view, obj):
return request.user == obj.author
class IsAuthorOrReadOnly(BasePermission):
""" 检查登陆用户是否是 文章作者 """
safe_method = ['GET', 'PUT', 'DELETE', 'OPTIONS']
message = 'You must be the author of this article.'
# 会先判断has_permission,为True才会判断has_object_permission, 为False直接返回
# 视图级判断,判断请求方式, GET, PUT, DELETE, OPTIONS
# def has_permission(self, request, view):
# if request.method in self.safe_method:
# return True
#
# self.message = 'This request method is not allow.'
# return False
# 对象级判断,判断 是否有权限访问对象
def has_object_permission(self, request, view, obj):
if request.method in ('GET',): # SAFE_METHODS = ('GET', 'HEAD', 'OPTIONS')
# Check permissions for read-only request
return True
# Check permissions for write request
# 对于其他通过的例如 put修改, delete删除 就判断是否 是作者
return (request.user == obj.author) or request.user.is_superuser
| StarcoderdataPython |
6705380 | <filename>models/team.py
import logging
import re
from google.appengine.ext import ndb
from helpers.champ_split_helper import ChampSplitHelper
from models.location import Location
class Team(ndb.Model):
"""
Teams represent FIRST Robotics Competition teams.
key_name is like 'frc177'
"""
team_number = ndb.IntegerProperty(required=True)
name = ndb.TextProperty(indexed=False)
nickname = ndb.StringProperty(indexed=False)
# city, state_prov, country, and postalcode are from FIRST
city = ndb.StringProperty() # Equivalent to locality. From FRCAPI
state_prov = ndb.StringProperty() # Equivalent to region. From FRCAPI
country = ndb.StringProperty() # From FRCAPI
postalcode = ndb.StringProperty() # From ElasticSearch only. String because it can be like "95126-1215"
# Normalized address from the Google Maps API, constructed using the above
normalized_location = ndb.StructuredProperty(Location)
website = ndb.StringProperty(indexed=False)
first_tpid = ndb.IntegerProperty() # from USFIRST. FIRST team ID number. -greg 5/20/2010
first_tpid_year = ndb.IntegerProperty() # from USFIRST. Year tpid is applicable for. -greg 9 Jan 2011
rookie_year = ndb.IntegerProperty()
motto = ndb.StringProperty(indexed=False)
created = ndb.DateTimeProperty(auto_now_add=True, indexed=False)
updated = ndb.DateTimeProperty(auto_now=True, indexed=False)
def __init__(self, *args, **kw):
# store set of affected references referenced keys for cache clearing
# keys must be model properties
self._affected_references = {
'key': set(),
}
self._location = None
self._city_state_country = None
super(Team, self).__init__(*args, **kw)
@property
def championship_location(self):
return ChampSplitHelper.get_champ(self)
@property
def location(self):
if self._location is None:
split_location = []
if self.city:
split_location.append(self.city)
if self.state_prov:
if self.postalcode:
split_location.append(self.state_prov + ' ' + self.postalcode)
else:
split_location.append(self.state_prov)
if self.country:
split_location.append(self.country)
self._location = ', '.join(split_location)
return self._location
@property
def city_state_country(self):
if not self._city_state_country and self.nl:
self._city_state_country = self.nl.city_state_country
if not self._city_state_country:
location_parts = []
if self.city:
location_parts.append(self.city)
if self.state_prov:
location_parts.append(self.state_prov)
if self.country:
country = self.country
if self.country == 'US':
country = 'USA'
location_parts.append(country)
self._city_state_country = ', '.join(location_parts)
return self._city_state_country
@property
def nl(self):
return self.normalized_location
@property
def details_url(self):
return "/team/%s" % self.team_number
@property
def key_name(self):
return "frc%s" % self.team_number
@classmethod
def validate_key_name(self, team_key):
key_name_regex = re.compile(r'^<KEY>')
match = re.match(key_name_regex, team_key)
return True if match else False
@property
def motto_without_quotes(self):
if (self.motto[0] == self.motto[-1]) and self.motto.startswith(("'", '"')):
return self.motto[1:-1]
return self.motto
| StarcoderdataPython |
9746537 | from pypair.association import continuous_continuous
from pypair.continuous import Continuous
x = [x for x in range(10)]
y = [y for y in range(10)]
for m in Continuous.measures():
r = continuous_continuous(x, y, m)
print(f'{r}: {m}')
print('-' * 15)
con = Continuous(x, y)
for m in con.measures():
r = con.get(m)
print(f'{r}: {m}')
| StarcoderdataPython |
1876012 | import pprint
import re
import requests
import upnpclient
from philips_hue.models import Bridge
def discover_hue(**kwargs):
cloud = kwargs.get("cloud", False)
upnp = kwargs.get("upnp", True)
bridges = Bridge.select()
bridge_addresses = []
for bridge in bridges:
bridge_addresses.append(bridge.ip)
if cloud:
devices_resp = requests.get("https://discovery.meethue.com/")
if devices_resp.status_code == 200:
devices = devices_resp.json()
for device in devices:
ip_address = device.get("internalipaddress")
urlbase = "http://%s:80" % ip_address
debug_address = "%s/debug/clip.html" % urlbase
if ip_address not in bridge_addresses:
new_bridge = Bridge.create(
name="Philips Hue Bridge",
ip=ip_address,
serial_number=device.get("id"),
url=urlbase,
debug=debug_address,
device_id=device.get("id"),
)
bridge_addresses.append(ip_address)
if upnp:
devices = upnpclient.discover()
for device in devices:
if "Philips hue" in device.friendly_name:
urlbase = device.location.replace("/description.xml", "")
ip_address = re.search(
r"(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})", urlbase
).group(1)
debug_address = "%s/debug/clip.html" % urlbase
if ip_address not in bridge_addresses:
new_bridge = Bridge.create(
name=device.friendly_name,
ip=ip_address,
serial_number=device.serial_number,
url=urlbase,
debug=debug_address,
device_id=device.serial_number,
)
bridge_addresses.append(ip_address)
bridges = Bridge.select()
return bridges
bridges = discover_hue(cloud=True, upnp=False)
for bridge in bridges:
bridge.connect("test")
for light in bridge.lights():
if light.name == "Floor lamp":
print(light.name)
light.toggle()
if light.is_on:
for x in range(0, 255):
light.brightness(x)
| StarcoderdataPython |
6528238 | <gh_stars>1-10
import os, gzip, math
import numpy as np
import scipy.misc
import imageio
import matplotlib.pyplot as plt
from matplotlib import cm
import torch
import torch.optim as optim
import torchvision
import torchvision.transforms as transforms
import torch.nn as nn
def load_dataset(dataset, batch_size = 64, for_tanh = True):
dataset = dataset.lower()
mapping = {'mnist': load_dataset_MNIST,
'mnist_sup': load_dataset_MNIST,
'small-mnist': load_dataset_MNIST_small,
}
trainloader, testloader = mapping[dataset](batch_size, for_tanh = for_tanh)
return trainloader, testloader
def load_dataset_MNIST_small(batch_size, for_tanh):
trainloader, testloader = load_dataset_MNIST(batch_size = batch_size, num_train = 60, num_test = 10, for_tanh = for_tanh)
print('MNIST small version loaded in normalized range ' + ('[-1, 1]' if for_tanh else '[0,1]'))
return trainloader, testloader
def load_dataset_MNIST(batch_size = 64, download=True, num_train = 60000, num_test = 10000, for_tanh = True):
"""
The output of torchvision datasets are PILImage images of range [0, 1].
Transform them to Tensors of normalized range [-1, 1]
"""
list_transforms = [ transforms.ToTensor() ]
if for_tanh:
list_transforms.append( transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5)) )
transform = transforms.Compose( list_transforms )
trainset = torchvision.datasets.MNIST(root='./data/mnist', train=True,
download=download,
transform=transform)
testset = torchvision.datasets.MNIST(root='./data/mnist', train=False,
download=download,
transform=transform)
trainloader, testloader = get_data_loader(batch_size, trainset, testset, num_train, num_test)
print('MNIST Data loaded in normalized range ' + ('[-1, 1]' if for_tanh else '[0,1]') )
return trainloader, testloader
def get_data_loader(batch_size, trainset, testset, num_train=0, num_test=0):
if num_train!=0 and num_test!=0:
trainset.train_data = trainset.train_data[:num_train,:,:]
trainset.train_labels = trainset.train_labels[:num_train]
testset.test_data = testset.test_data[:num_test,:,:]
testset.test_labels = testset.test_labels[:num_test]
trainloader = torch.utils.data.DataLoader(trainset, batch_size=batch_size,
shuffle=True, num_workers=2)
testloader = torch.utils.data.DataLoader(testset, batch_size=batch_size,
shuffle=False, num_workers=2)
return trainloader, testloader
def long_tensor_to_onehot(idx, max_idx):
return torch.zeros(idx.size()[0], max_idx).scatter_(1, idx.view(-1,1), 1).long()
def gen_random_labels(num_instance, max_idx):
return torch.multinomial(torch.ones(max_idx), num_instance, replacement = True)
def gen_noise_Uniform(num_instance, n_dim=2, lower = 0, upper = 1):
"""generate n-dim uniform random noise"""
return torch.rand(num_instance, n_dim)*(upper - lower) + lower
def gen_noise_Gaussian(num_instance, n_dim=2):
"""generate n-dim Gaussian random noise"""
return torch.randn(num_instance, n_dim)
def print_network(net):
num_params = 0
for param in net.parameters():
num_params += param.numel()
print(net)
print('Total number of parameters: %d' % num_params)
def print_network_num_parameters(net):
num_params = 0
for param in net.parameters():
num_params += param.numel()
print('Total number of parameters: %d' % num_params)
def initialize_weights(net):
for m in net.modules():
if isinstance(m, nn.Conv2d):
m.weight.data.normal_(0, 0.02)
m.bias.data.zero_()
elif isinstance(m, nn.ConvTranspose2d):
m.weight.data.normal_(0, 0.02)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.weight.data.normal_(0, 0.02)
if hasattr(m.bias,'data'):
m.bias.data.zero_()
print('Weight initialized')
| StarcoderdataPython |
5069993 | import requests
from pyprintplus import Log
class Flaschentaschen():
def __init__(self, show_log=True):
self.logs = ['self.__init__']
self.show_log = show_log
self.url = 'http://pegasus.noise:4444/api'
self.help = 'https://www.noisebridge.net/Flaschen_Taschen'
def log(self, text):
self.logs.append(text)
if self.show_log == True:
Log().print('{}'.format(text))
def showText(self, text):
try:
self.log('showText()')
requests.post(self.url+'/text', {'text': text})
except:
self.log('-> ERROR: Couldnt talk to Flaschentaschen. Make sure to deactivate your VPN connection and be in the local Noisebridge network.')
| StarcoderdataPython |
5094227 | """Version tests."""
from sphinxcontrib.towncrier import __version__
def test_version():
"""Test that version has at least 3 parts."""
assert __version__.count('.') >= 2
| StarcoderdataPython |
8146523 | import os, stat, hashlib, collections
from filekeep import logger, xml
def sha1_file(path, logger=None):
sha1 = hashlib.sha1()
with open(path, "rb", buffering=0) as f:
while True:
data = f.read(65536)
if data:
sha1.update(data)
if logger:
logger.progress(len(data))
else:
return sha1.hexdigest()
def compare_times(a, b, flexible):
if flexible:
return a//1000000000 == b//1000000000
return a == b
class File:
@staticmethod
def from_file(path, calculate_sha1=False):
st = os.lstat(path)
f = File(os.path.basename(path), st.st_size, st.st_mtime_ns, stat.S_IMODE(st.st_mode))
f.path = path
if calculate_sha1:
f.sha1 = sha1_file(path)
return f
@staticmethod
def from_xml(el):
f = File(el.get("name"), int(el.get("size")), int(el.get("mtime")), int(el.get("mode") or 0))
f.sha1 = el.get("sha1")
return f
def __init__(self, name, size, mtime, mode):
self.path = None
self.name = name
self.size = size
self.mtime = mtime
self.mode = mode
self.sha1 = ""
def to_xml(self):
el = xml.ET.Element("file")
el.set("name", self.name)
el.set("mtime", str(self.mtime))
el.set("mode", str(self.mode))
el.set("size", str(self.size))
el.set("sha1", self.sha1)
return el
def print_sha1sum(self, rel):
if rel:
rel += "/"
print(self.sha1 + " *" + rel + self.name)
class Directory:
@staticmethod
def from_file(path):
st = os.lstat(path)
d = Directory(os.path.basename(path), st.st_mtime_ns, stat.S_IMODE(st.st_mode))
d.path = path
return d
@staticmethod
def from_xml(el):
d = Directory(el.get("name"), int(el.get("mtime") or 0), int(el.get("mode") or 0))
for e in el:
if e.tag == "directory":
ee = Directory.from_xml(e)
d.entries[ee.name] = ee
elif e.tag == "file":
ee = File.from_xml(e)
d.entries[ee.name] = ee
return d
def __init__(self, name=None, mtime=0, mode=509):
self.path = None
self.name = name
self.mtime = mtime
self.mode = mode
self.entries = collections.OrderedDict()
def to_xml(self):
el = xml.ET.Element("directory")
if self.name != None:
el.set("name", self.name)
el.set("mtime", str(self.mtime))
el.set("mode", str(self.mode))
for e in self.entries.values():
el.append(e.to_xml())
return el
def size(self):
s = 0
for e in self.entries.values():
if isinstance(e, File):
s += e.size
else:
s += e.size()
return s
def print_sha1sum(self, rel):
if rel:
rel += "/"
if self.name:
rel += self.name
for e in self.entries.values():
e.print_sha1sum(rel)
class Collection:
def __init__(self, path):
self.path = path
if self.path == '.':
self.path_xml = os.path.join(self.path, 'filekeep.xml')
else:
self.path_xml = self.path + '.filekeep.xml'
if os.path.isfile(self.path_xml):
root = xml.read(self.path_xml)
self.name = root.find("name").text
self.directory = Directory.from_xml(root.find("directory"))
self.exists = True
else:
self.name = os.path.abspath(self.path) if self.path == '.' else self.path
st = os.lstat(path)
self.directory = Directory(os.path.basename(path), st.st_mtime_ns, stat.S_IMODE(st.st_mode))
self.exists = False
self.logger = logger.create(self.size())
def write_data(self):
root = xml.ET.Element("collection")
name = xml.ET.Element("name")
name.text = self.name
root.append(name)
root.append(self.directory.to_xml())
xml.write(self.path_xml, root)
def set_name(self, name):
self.name = name
def size(self):
return self.directory.size()
def create_from_path(self, quiet=False):
dirs = {
self.path: self.directory
}
cd = 1
cf = 0
for dirpath, dirnames, filenames in os.walk(self.path):
d = dirs[dirpath]
for dirname in dirnames:
path = os.path.join(dirpath, dirname)
d.entries[dirname] = dirs[path] = Directory.from_file(path)
cd += 1
if not quiet:
print(dirpath)
for filename in filenames:
path = os.path.join(dirpath, filename)
if not quiet:
print(" " + filename)
d.entries[filename] = File.from_file(path, True)
cf += 1
return (cd, cf)
def verify(self, fast=False, touch=False, flexible_times=False):
paths_to_touch = []
# directory entries referenced by relative path
dirs = {
self.path: self.directory
}
# function return value
result = True
for dirpath, dirnames, filenames in os.walk(self.path):
if not dirpath in dirs:
continue
found_error = False
d = dirs[dirpath]
entries = d.entries.copy()
# process directories
for dirname in dirnames:
path = os.path.join(dirpath, dirname)
if dirname in entries and isinstance(entries[dirname], Directory):
dirs[path] = entries[dirname]
del entries[dirname]
else:
self.logger.error("extra directory '" + path + "'")
found_error = True
# process files
for filename in filenames:
path = os.path.join(dirpath, filename)
if filename in entries and isinstance(entries[filename], File):
st = os.lstat(path)
needs_touch = False
if not compare_times(entries[filename].mtime, st.st_mtime_ns, flexible_times):
self.logger.error("'" + path + "' different mtime")
if touch:
needs_touch = True
else:
found_error = True
if entries[filename].mode != 0 and entries[filename].mode != stat.S_IMODE(st.st_mode):
self.logger.error("'{}' different mode ({} != {})".format(path, str(stat.S_IMODE(st.st_mode)), str(entries[filename].mode)))
if touch:
needs_touch = True
else:
found_error = True
if entries[filename].size != st.st_size:
self.logger.error("'" + path + "' different size")
self.logger.progress(entries[filename].size)
found_error = True
elif (not fast or needs_touch) and entries[filename].sha1 != sha1_file(path, self.logger):
self.logger.error("'" + path + "' different sha1")
found_error = True
elif needs_touch:
paths_to_touch.append((path, entries[filename]))
elif fast:
self.logger.progress(entries[filename].size)
del entries[filename]
elif path != "./filekeep.xml":
self.logger.error("extra file '" + path + "'")
found_error = True
# handle missing entries
for e in entries.values():
found_error = True
path = os.path.join(dirpath, e.name)
if isinstance(e, Directory):
self.logger.error("missing directory '" + path + "'")
self.logger.progress(e.size())
else:
self.logger.error("missing file '" + path + "'")
self.logger.progress(e.size)
# process directory
if dirpath != '.':
st = os.lstat(dirpath)
if not compare_times(d.mtime, st.st_mtime_ns, flexible_times):
self.logger.error("'" + dirpath + "' (directory) different mtime")
if touch and not found_error:
paths_to_touch.append((dirpath, d))
else:
result = False
if d.mode != 0 and d.mode != stat.S_IMODE(st.st_mode):
self.logger.error("'{}' (directory) different mode ({} != {})".format(dirpath, str(stat.S_IMODE(st.st_mode)), str(d.mode)))
if touch and not found_error:
paths_to_touch.append((dirpath, d))
else:
result = False
if found_error:
result = False
if touch:
if paths_to_touch:
self.logger.print("touching")
for (path, entry) in paths_to_touch:
os.utime(path, ns=(entry.mtime, entry.mtime))
if entry.mode != 0:
os.chmod(path, entry.mode)
else:
self.logger.print("nothing to touch")
return result
def all_files(self):
def func(d, path=''):
if d.name != None:
path += d.name + '/'
for entry in d.entries.values():
if isinstance(entry, Directory):
for a, b in func(entry, path):
yield a, b
else:
yield path + entry.name, entry
return func(self.directory)
def all_files_by_sha1(self):
ret = {}
for path, entry in self.all_files():
if entry.sha1 in ret:
ret[entry.sha1].append(path)
else:
ret[entry.sha1] = [path]
return ret
def find_duplicates(self):
dups = {}
for sha1, paths in self.all_files_by_sha1().items():
if len(paths) > 1:
dups[sha1] = paths
return dups
def print_sha1sum(self):
self.directory.print_sha1sum("")
| StarcoderdataPython |
5127891 | <filename>tests/ssl_api.py
from tests.test import WebTest
from models import Test
from database import db_session
from datetime import datetime
import requests
import time
class SSLAPITest(WebTest):
API_ENDPOINT = "https://api.ssllabs.com/api/v2/"
def __init__(self, scan):
self.scan = scan
self.test = Test(scan_id=self.scan.id, name="SSLAPI", status=1, started_date=datetime.utcnow())
db_session.add(self.test)
db_session.commit()
def run(self):
if not self.api_check():
return self.finish(status=3)
#options = { 'host': self.scan.website.get_url(), 'publish': 'off', 'startNew': 'off', 'fromCache': 'on', 'all': 'done', 'ignoreMismatch': 'on' }
options = {'host': self.scan.website.get_url(), 'publish': 'off', 'startNew': 'on', 'fromCache': 'off', 'all': 'done', 'ignoreMismatch': 'on'}
results = self.api_request(options)
if results == False:
return self.finish(status=3)
options.pop('startNew')
while results == False or (results['status'] != 'READY' and results['status'] != 'ERROR'):
time.sleep(10)
results = self.api_request(options)
endpoints = results['endpoints']
self.add_test_data(key="SSL_ENDPOINT_COUNT", value=len(endpoints))
i = 0
for endpoint in endpoints:
i += 1
grade = endpoint["grade"]
ip_address = endpoint["ipAddress"]
self.add_test_data(key="SSL_ENDPOINT_" + str(i) + "_GRADE", value=grade)
self.add_test_data(key="SSL_ENDPOINT_" + str(i) + "_IP", value=ip_address)
self.finish(status=2)
def api_request(self, options):
url = self.API_ENDPOINT + "analyze"
try:
response = requests.get(url, params=options)
except requests.exception.RequestException:
return False
data = response.json()
return data
def api_check(self):
url = self.API_ENDPOINT + "info"
try:
response = requests.get(url)
except requests.exception.RequestException:
return False;
if response.status_code != requests.codes.ok:
return False;
response = response.json()
if response.get('maxAssessments') <= 0:
return False;
return True;
def finish(self, status):
self.test.finished_date=datetime.utcnow()
self.test.status = status
db_session.commit()
if status == 3:
return False
return True | StarcoderdataPython |
1785792 | from pathlib import Path
from ombpdf.semdom import to_dom
from .snapshot import assert_snapshot_matches
MY_DIR = Path(__file__).parent
def assert_dom_xml_snapshot_matches(doc, force_overwrite=False):
dom = to_dom(doc)
xml = dom.toprettyxml(indent=' ')
name = Path(doc.filename).stem
expected_xml_path = MY_DIR / f'test_semdom.snapshot.{name}.xml'
assert_snapshot_matches(expected_xml_path, xml, 'DOM XML',
force_overwrite=force_overwrite)
def test_m_14_10_import(m_14_10_doc):
assert_dom_xml_snapshot_matches(m_14_10_doc)
def test_m_16_19_import(m_16_19_doc):
assert_dom_xml_snapshot_matches(m_16_19_doc)
| StarcoderdataPython |
5077908 | from command.public import SourceCommand
from remote_execution.public import RemoteHostExecutor
class DeviceModifyingCommand(SourceCommand):
"""
a command supplying utility methods for command which iterate over the devices of the source and target
"""
def _execute_on_every_device(self, executable_for_disks, executable_for_partitions=None, include_swap=False):
"""
execute the given executable with all devices.
:param executable_for_disks: a function which takes the parameters: remote_executor, source_device,
target_device
:type executable_for_disks: (self: Any, RemoteExecutor, (str, dict), (str, dict)) -> None
:param executable: a function which takes the parameters: remote_executor, source_device,
target_device, source_partition_device, target_partition_device
:type executable_for_partitions: (self: Any, RemoteExecutor, (str, dict), (str, dict), (str, dict), (str, dict)
) -> None
:param include_swap: should a swap device also be iterated over
:type include_swap: bool
:return: the used remote_executor, for extended use
:rtype: RemoteHostExecutor
"""
remote_executor = RemoteHostExecutor(self._target.remote_host)
for source_device_id, target_device in self._target.device_mapping.items():
source_device = self._source.remote_host.system_info['block_devices'][source_device_id]
if executable_for_disks and (include_swap or not include_swap and source_device['fs'] != 'swap'):
executable_for_disks(
remote_executor,
(source_device_id, source_device),
(target_device['id'], target_device),
)
if executable_for_partitions:
for source_partition_id, target_partition in target_device['children'].items():
source_partition = source_device['children'][source_partition_id]
if (include_swap or not include_swap and source_partition['fs'] != 'swap'):
executable_for_partitions(
remote_executor,
(source_device_id, source_device),
(target_device['id'], target_device),
(
source_partition_id,
source_partition
),
(target_partition['id'], target_partition),
)
return remote_executor
def _execute_on_every_partition(self, executable):
"""
executes the given executable on all partitions
:param executable: a function which takes the parameters: remote_executor, source_device,
target_device, source_partition_device, target_partition_device
:type executable: (self: Any, RemoteExecutor, (str, dict), (str, dict), (str, dict), (str, dict)) -> None
"""
return self._execute_on_every_device(None, executable)
| StarcoderdataPython |
11222916 | import json
import logging
import copy
from lib.rdcl_graph import RdclGraph
from lib.nemo.nemo_external_parser import Nemo_Intent, Nemo_Nodemodel
logging.basicConfig(level=logging.DEBUG)
log = logging.getLogger('NemoRdclGraph')
class NemoRdclGraph(RdclGraph):
"""Operates on the graph representation used for the GUI graph views"""
def __init__(self):
pass
def build_graph_from_project(self, json_project, model={}):
"""Creates a single graph for a whole project"""
#print "json_project ",json_project
graph_object = {
'vertices': [],
'edges': [],
'graph_parameters': {},
'model': model
}
try:
positions = json_project['positions'] if 'positions' in json_project else False
log.debug('build graph from project json')
if 'intent' in json_project:
#intents = {}
for intent in json_project['intent']:
#print "intent name", intent
#print "intent_conent ", json_project['intent'][intent]
self.create_views_for_intent(Nemo_Intent(json_project['intent'][intent]).to_dict(), intent, positions, graph_object)
if 'nodemodel' in json_project:
#nodemodels = {}
for nodemodel in json_project['nodemodel']:
self.create_views_for_nodemodel(Nemo_Nodemodel(json_project['nodemodel'][nodemodel]).to_dict(), nodemodel, positions, graph_object)
#print "intents ", intents
#print "nodemodels ", nodemodels
except Exception as e:
log.exception('Exception in build_graph_from_project')
raise
#print "graph_object ", graph_object
return graph_object
def create_views_for_intent(self, intent, name, positions, graph_object):
self.add_node(name, 'intent', name, positions, graph_object)
for node in intent['nodes']:
#print "node ", node
self.add_node(node["name"], 'nodemodel', name, positions, graph_object)
#print "links ", intent['connections']
for link in intent['connections']:
#print "link ", link
self.add_link(link['endpoints'][0], link['endpoints'][1], 'intent', name, graph_object)
def create_views_for_nodemodel(self, nodemodel, name, positions, graph_object):
self.add_node(name, 'nodemodel', name, positions, graph_object)
#print "nodemodel ", nodemodel
for node in nodemodel['subnodes']:
self.add_node(node, 'subnode', name, positions, graph_object)
for nemo_property in nodemodel['properties']:
#print "property ", nemo_property
self.add_node(nemo_property, 'nemo_property', name, positions, graph_object)
#print "node ", graph_object['vertices'][-1]
| StarcoderdataPython |
8119016 | #!/usr/bin/env python3
#
# Copyright 2018 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
import sys
import logging
from homekit.controller import Controller
from homekit.log_support import setup_logging, add_log_arguments
def setup_args_parser():
parser = argparse.ArgumentParser(description='HomeKit get_events app - listens to events from accessories')
parser.add_argument('-f', action='store', required=True, dest='file', help='File with the pairing data')
parser.add_argument('-a', action='store', required=True, dest='alias', help='alias for the pairing')
parser.add_argument('-c', action='append', required=True, dest='characteristics',
help='Use aid.iid value to change the value. Repeat to change multiple characteristics.')
parser.add_argument('-e', action='store', required=False, dest='eventCount', help='max number of events before end',
default=-1, type=int)
parser.add_argument('-s', action='store', required=False, dest='secondsCount', default=-1, type=int,
help='max number of seconds before end')
parser.add_argument('--adapter', action='store', dest='adapter', default='hci0',
help='the bluetooth adapter to be used (defaults to hci0)')
add_log_arguments(parser)
return parser.parse_args()
def func(events):
for event in events:
print('event for {aid}.{iid}: {event}'.format(aid=event[0], iid=event[1], event=event[2]))
if __name__ == '__main__':
args = setup_args_parser()
setup_logging(args.loglevel)
controller = Controller(args.adapter)
try:
controller.load_data(args.file)
except Exception as e:
print(e)
logging.debug(e, exc_info=True)
sys.exit(-1)
if args.alias not in controller.get_pairings():
print('"{a}" is no known alias'.format(a=args.alias))
sys.exit(-1)
try:
pairing = controller.get_pairings()[args.alias]
characteristics = [(int(c.split('.')[0]), int(c.split('.')[1])) for c in args.characteristics]
results = pairing.get_events(characteristics, func, max_events=args.eventCount, max_seconds=args.secondsCount)
except KeyboardInterrupt:
sys.exit(-1)
except Exception as e:
print(e)
logging.debug(e, exc_info=True)
sys.exit(-1)
for key, value in results.items():
aid = key[0]
iid = key[1]
status = value['status']
desc = value['description']
if status < 0:
print('put_characteristics failed on {aid}.{iid} because: {reason} ({code})'.format(aid=aid, iid=iid,
reason=desc,
code=status))
| StarcoderdataPython |
8091825 | <filename>cogs/commands/admin.py
import discord
from discord.ext import commands
class Admin(commands.Cog):
def __init__(self, bot):
self.bot = bot
async def cog_check(self, ctx):
guild = self.bot.get_guild(700880842309894175)
if ctx.author in guild.members:
user = guild.get_member(ctx.author.id)
role = discord.utils.get(guild.roles, id=700890355259670599)
if role in user.roles:
return True
return False
return False
@commands.command(name="reload", hidden=True)
async def _reload(self, ctx, cog_name):
try:
self.bot.reload_extension(f'cogs.{cog_name}')
except commands.ExtensionNotFound:
await ctx.send('指定されたcogが見つかりませんでした')
except commands.ExtensionNotLoaded:
await ctx.send('指定されたcogが見つかりませんでした')
else:
await ctx.message.add_reaction('\U00002705')
def setup(bot):
bot.add_cog(Admin(bot))
| StarcoderdataPython |
9779347 | from functools import reduce
import numpy as np
def parse_data():
with open('2019/03/input.txt') as f:
data = f.read()
return [wire.split(",") for wire in data.splitlines()]
def wire_circuit(data):
directions = {
"U": np.array([0, 1]),
"D": np.array([0, -1]),
"L": np.array([-1, 0]),
"R": np.array([1, 0]),
}
wires = [[(directions[part[0]], int(part[1:])) for part in wire] for wire in data]
wire_locations = []
for wire in wires:
current_location = (0, 0)
locations = [current_location]
for direction, steps in wire:
locations += [
tuple(current_location + direction * step) for step in range(1, steps + 1)
]
current_location += direction * steps
wire_locations.append(locations)
intersections = reduce(lambda loc1, loc2: set(loc1) & set(loc2), wire_locations)
intersections.remove((0, 0))
return wire_locations, intersections
def part_one(data):
_, intersections = wire_circuit(data)
return min(sum(np.abs(intersection)) for intersection in intersections)
def part_two(data):
wire_locations, intersections = wire_circuit(data)
return min(
sum(locations.index(intersection) for locations in wire_locations)
for intersection in intersections
)
def main():
data = parse_data()
print(f'Day 03 Part 01: {part_one(data)}')
print(f'Day 03 Part 02: {part_two(data)}')
| StarcoderdataPython |
8124981 | <gh_stars>1000+
#!/usr/bin/env python
import asyncio
import logging
import time
import aiohttp
from hummingbot.connector.exchange.wazirx import wazirx_constants as CONSTANTS
from typing import Optional, List, Dict, AsyncIterable, Any
from hummingbot.core.data_type.order_book import OrderBook
from hummingbot.core.data_type.order_book_message import OrderBookMessage
from hummingbot.core.data_type.order_book_tracker_data_source import OrderBookTrackerDataSource
from hummingbot.core.api_throttler.async_throttler import AsyncThrottler
from hummingbot.core.utils.async_utils import safe_gather
from hummingbot.logger import HummingbotLogger
from . import wazirx_utils
from .wazirx_active_order_tracker import WazirxActiveOrderTracker
from .wazirx_order_book import WazirxOrderBook
from .wazirx_utils import ms_timestamp_to_s
class WazirxAPIOrderBookDataSource(OrderBookTrackerDataSource):
MESSAGE_TIMEOUT = 30.0
PING_TIMEOUT = 10.0
_logger: Optional[HummingbotLogger] = None
@classmethod
def logger(cls) -> HummingbotLogger:
if cls._logger is None:
cls._logger = logging.getLogger(__name__)
return cls._logger
def __init__(
self,
trading_pairs: List[str] = None,
throttler: Optional[AsyncThrottler] = None,
shared_client: Optional[aiohttp.ClientSession] = None,
):
super().__init__(trading_pairs)
self._trading_pairs: List[str] = trading_pairs
self._snapshot_msg: Dict[str, any] = {}
self._shared_client = shared_client or self._get_session_instance()
self._throttler = throttler or self._get_throttler_instance()
@classmethod
def _get_session_instance(cls) -> aiohttp.ClientSession:
session = aiohttp.ClientSession()
return session
@classmethod
def _get_throttler_instance(cls) -> AsyncThrottler:
throttler = AsyncThrottler(CONSTANTS.RATE_LIMITS)
return throttler
@classmethod
async def get_last_traded_prices(
cls,
trading_pairs: List[str],
throttler: Optional[AsyncThrottler] = None,
shared_client: Optional[aiohttp.ClientSession] = None
) -> Dict[str, float]:
shared_client = shared_client or cls._get_session_instance()
result = {}
throttler = throttler or cls._get_throttler_instance()
async with throttler.execute_task(CONSTANTS.GET_TICKER_24H):
async with shared_client.get(f"{CONSTANTS.WAZIRX_API_BASE}/{CONSTANTS.GET_TICKER_24H}") as resp:
resp_json = await resp.json()
for t_pair in trading_pairs:
last_trade = [float(o["lastPrice"]) for o in resp_json if o["symbol"] == wazirx_utils.convert_to_exchange_trading_pair(t_pair)]
if last_trade and last_trade[0] is not None:
result[t_pair] = last_trade[0]
return result
@staticmethod
async def fetch_trading_pairs(throttler: Optional[AsyncThrottler] = None) -> List[str]:
async with aiohttp.ClientSession() as client:
throttler = throttler or WazirxAPIOrderBookDataSource._get_throttler_instance()
async with throttler.execute_task(CONSTANTS.GET_EXCHANGE_INFO):
async with client.get(f"{CONSTANTS.WAZIRX_API_BASE}/{CONSTANTS.GET_EXCHANGE_INFO}", timeout=10) as response:
if response.status == 200:
try:
data: Dict[str, Any] = await response.json()
return [str(item["baseAsset"]).upper() + '-' + str(item["quoteAsset"]).upper()
for item in data["symbols"]
if item["isSpotTradingAllowed"] is True]
except Exception:
pass
# Do nothing if the request fails -- there will be no autocomplete for kucoin trading pairs
return []
async def get_order_book_data(self, trading_pair: str, throttler: Optional[AsyncThrottler] = None) -> Dict[str, any]:
"""
Get whole orderbook
"""
throttler = throttler or self._get_throttler_instance()
async with throttler.execute_task(CONSTANTS.GET_ORDERBOOK):
async with self._shared_client.get(
f"{CONSTANTS.WAZIRX_API_BASE}/{CONSTANTS.GET_ORDERBOOK}?limit=100&symbol="
f"{wazirx_utils.convert_to_exchange_trading_pair(trading_pair)}"
) as orderbook_response:
if orderbook_response.status != 200:
raise IOError(
f"Error fetching OrderBook for {trading_pair} at {CONSTANTS.EXCHANGE_NAME}. "
f"HTTP status is {orderbook_response.status}."
)
orderbook_data: List[Dict[str, Any]] = await safe_gather(orderbook_response.json())
return orderbook_data[0]
async def get_new_order_book(self, trading_pair: str) -> OrderBook:
snapshot: Dict[str, Any] = await self.get_order_book_data(trading_pair)
snapshot_timestamp: float = time.time()
snapshot_msg: OrderBookMessage = WazirxOrderBook.snapshot_message_from_exchange(
snapshot,
snapshot_timestamp,
metadata={"trading_pair": trading_pair}
)
order_book = self.order_book_create_function()
active_order_tracker: WazirxActiveOrderTracker = WazirxActiveOrderTracker()
bids, asks = active_order_tracker.convert_snapshot_message_to_order_book_row(snapshot_msg)
order_book.apply_snapshot(bids, asks, snapshot_msg.update_id)
return order_book
async def _create_websocket_connection(self) -> aiohttp.ClientWebSocketResponse:
"""
Initialize WebSocket client for APIOrderBookDataSource
"""
try:
return await aiohttp.ClientSession().ws_connect(url=CONSTANTS.WSS_URL)
except asyncio.CancelledError:
raise
except Exception as e:
self.logger().network(f"Unexpected error occured when connecting to WebSocket server. "
f"Error: {e}")
raise
async def _iter_messages(self,
ws: aiohttp.ClientWebSocketResponse) -> AsyncIterable[Any]:
try:
while True:
yield await ws.receive_json()
except asyncio.CancelledError:
raise
except Exception as e:
self.logger().network(f"Unexpected error occured when parsing websocket payload. "
f"Error: {e}")
raise
finally:
await ws.close()
async def listen_for_trades(self, ev_loop: asyncio.BaseEventLoop, output: asyncio.Queue):
ws = None
while True:
try:
ws = await self._create_websocket_connection()
streams = [wazirx_utils.convert_to_exchange_trading_pair(pair) + "@trades" for pair in self._trading_pairs]
subscribe_request: Dict[str, Any] = {
"event": "subscribe",
"streams": streams
}
await ws.send_json(subscribe_request)
async for json_msg in self._iter_messages(ws):
if "stream" in json_msg:
if "@trades" in json_msg["stream"]:
for trade in json_msg["data"]["trades"]:
trade: Dict[Any] = trade
trade_timestamp: int = ms_timestamp_to_s(trade["E"])
trade_msg: OrderBookMessage = WazirxOrderBook.trade_message_from_exchange(
trade,
trade_timestamp,
metadata={"trading_pair": wazirx_utils.convert_from_exchange_trading_pair(trade["s"])}
)
output.put_nowait(trade_msg)
except asyncio.CancelledError:
raise
except Exception:
self.logger().error("Unexpected error with WebSocket connection. Retrying after 30 seconds...",
exc_info=True)
finally:
ws and await ws.close()
await self._sleep(30.0)
async def listen_for_order_book_diffs(self, ev_loop: asyncio.BaseEventLoop, output: asyncio.Queue):
"""
WazirX doesn't provide order book diff update at this moment.
"""
pass
async def listen_for_order_book_snapshots(self, ev_loop: asyncio.BaseEventLoop, output: asyncio.Queue):
"""
Listen for orderbook snapshots by fetching orderbook
"""
ws = None
while True:
try:
ws = await self._create_websocket_connection()
streams = [wazirx_utils.convert_to_exchange_trading_pair(pair) + "@depth" for pair in self._trading_pairs]
subscribe_request: Dict[str, Any] = {
"event": "subscribe",
"streams": streams
}
await ws.send_json(subscribe_request)
async for json_msg in self._iter_messages(ws):
if "stream" in json_msg:
if "@depth" in json_msg["stream"]:
data = json_msg["data"]
snapshot_timestamp: int = ms_timestamp_to_s(data["E"])
_msg = {
'asks': [list(map(float, item)) for item in data['a']],
'bids': [list(map(float, item)) for item in data['b']],
}
snapshot_msg: OrderBookMessage = WazirxOrderBook.snapshot_message_from_exchange(
_msg,
snapshot_timestamp,
{"trading_pair": wazirx_utils.convert_from_exchange_trading_pair(data["s"])}
)
output.put_nowait(snapshot_msg)
except asyncio.CancelledError:
raise
except Exception:
self.logger().error("Unexpected error with WebSocket connection. Retrying after 30 seconds...",
exc_info=True)
finally:
ws and await ws.close()
await self._sleep(30.0)
| StarcoderdataPython |
5156645 | <reponame>LSSTDESC/firecrown
import numpy as np
from scipy.interpolate import Akima1DInterpolator
import sacc
import pyccl as ccl
from ..cluster_count import ClusterCountStatistic
class DummySource(object):
pass
def test_cluster_count_sacc(tmpdir):
sacc_data = sacc.Sacc()
params = dict(
Omega_c=0.27,
Omega_b=0.045,
Omega_k=0.0,
w0=-1.0,
wa=0.0,
sigma8=0.8,
n_s=0.96,
h=0.67)
cosmo = ccl.Cosmology(**params)
mn = 0.5
z = np.linspace(0, 2, 50)
dndz = np.exp(-0.5 * (z - mn)**2 / 0.25 / 0.25)
nrm = np.max(dndz)
dndz /= nrm
sacc_data.add_tracer(
'NZ', 'trc1', z, dndz,
metadata={
"lnlam_min": 14,
"lnlam_max": 16,
"area_sd": 15.1 * (180.0/np.pi)**2
}
)
intp = Akima1DInterpolator(z, dndz)
def _sel(m, a):
a = np.atleast_1d(a)
m = np.atleast_1d(m)
z = 1.0 / a - 1.0
logm = np.log10(m)
zsel = intp(z)
msk = ~np.isfinite(zsel)
zsel[msk] = 0.0
vals = np.zeros((m.shape[0], a.shape[0]))
vals[:] = zsel
msk = (logm >= 14) & (logm < 16)
vals[~msk, :] = 0
return vals
mdef = ccl.halos.MassDef(200, 'matter')
hmf = ccl.halos.MassFuncTinker10(cosmo, mdef,
mass_def_strict=False)
hbf = ccl.halos.HaloBiasTinker10(cosmo, mass_def=mdef,
mass_def_strict=False)
hmc = ccl.halos.HMCalculator(
cosmo, hmf, hbf, mdef,
integration_method_M='spline',
nlog10M=256
)
true_cnts = hmc.number_counts(cosmo, _sel, amin=0.333333, amax=1, na=256)
true_cnts *= 15.1
sacc_data.add_data_point(
"count",
("trc1",),
true_cnts/10,
)
assert true_cnts > 0
def _src_sel(lnmass, a):
return _sel(np.exp(lnmass), a)
source = DummySource()
source.sacc_tracer = "trc1"
source.selfunc_ = _src_sel
source.area_sr_ = 15.1
source.z_ = z
sources = {"trc11": source}
stat = ClusterCountStatistic(
["trc11"],
mass_def=[200, "matter"],
mass_func="Tinker10",
halo_bias="Tinker10",
systematics=None,
na=256,
nlog10M=256,
)
stat.read(sacc_data, sources)
stat.compute(cosmo, {}, sources)
assert np.allclose(stat.predicted_statistic_, true_cnts)
assert np.allclose(stat.measured_statistic_, true_cnts/10)
| StarcoderdataPython |
1806962 | <filename>wagtail/admin/tests/test_account_management.py
import unittest
import pytz
from django.conf import settings
from django.contrib.auth import get_user_model
from django.contrib.auth import views as auth_views
from django.contrib.auth.models import Group, Permission
from django.contrib.auth.tokens import PasswordResetTokenGenerator
from django.core import mail
from django.core.files.uploadedfile import SimpleUploadedFile
from django.test import RequestFactory, TestCase, override_settings
from django.urls import reverse
from wagtail.admin.localization import (
WAGTAILADMIN_PROVIDED_LANGUAGES, get_available_admin_languages, get_available_admin_time_zones)
from wagtail.admin.views.account import account, profile_tab
from wagtail.images.tests.utils import get_test_image_file
from wagtail.tests.utils import WagtailTestUtils
from wagtail.users.models import UserProfile
class TestAuthentication(TestCase, WagtailTestUtils):
"""
This tests that users can login and logout of the admin interface
"""
def test_login_view(self):
"""
This tests that the login view responds with a login page
"""
# Get login page
response = self.client.get(reverse('wagtailadmin_login'))
# Check that the user received a login page
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailadmin/login.html')
def test_login_view_post(self):
"""
This posts user credentials to the login view and checks that
the user was logged in successfully
"""
# Create user to log in with
self.create_superuser(username='test', email='<EMAIL>', password='password')
# Post credentials to the login page
response = self.client.post(reverse('wagtailadmin_login'), {
'username': '<EMAIL>' if settings.AUTH_USER_MODEL == 'emailuser.EmailUser' else 'test',
'password': 'password',
# NOTE: This is set using a hidden field in reality
'next': reverse('wagtailadmin_home'),
})
# Check that the user was redirected to the dashboard
self.assertRedirects(response, reverse('wagtailadmin_home'))
# Check that the user was logged in
self.assertTrue('_auth_user_id' in self.client.session)
self.assertEqual(
str(self.client.session['_auth_user_id']),
str(get_user_model().objects.get(email='<EMAIL>').pk)
)
def test_already_logged_in_redirect(self):
"""
This tests that a user who is already logged in is automatically
redirected to the admin dashboard if they try to access the login
page
"""
# Login
self.login()
# Get login page
response = self.client.get(reverse('wagtailadmin_login'))
# Check that the user was redirected to the dashboard
self.assertRedirects(response, reverse('wagtailadmin_home'))
def test_logged_in_as_non_privileged_user_doesnt_redirect(self):
"""
This tests that if the user is logged in but hasn't got permission
to access the admin, they are not redirected to the admin
This tests issue #431
"""
# Login as unprivileged user
self.create_user(username='unprivileged', password='<PASSWORD>')
self.login(username='unprivileged', password='<PASSWORD>')
# Get login page
response = self.client.get(reverse('wagtailadmin_login'))
# Check that the user received a login page and was not redirected
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailadmin/login.html')
def test_logout(self):
"""
This tests that the user can logout
"""
# Login
self.login()
# Get logout page
response = self.client.get(reverse('wagtailadmin_logout'))
# Check that the user was redirected to the login page
self.assertRedirects(response, reverse('wagtailadmin_login'))
# Check that the user was logged out
self.assertFalse('_auth_user_id' in self.client.session)
def test_not_logged_in_redirect(self):
"""
This tests that a not logged in user is redirected to the
login page
"""
# Get dashboard
response = self.client.get(reverse('wagtailadmin_home'))
# Check that the user was redirected to the login page and that next was set correctly
self.assertRedirects(response, reverse('wagtailadmin_login') + '?next=' + reverse('wagtailadmin_home'))
def test_not_logged_in_gives_403_to_ajax_requests(self):
"""
This tests that a not logged in user is given a 403 error on AJAX requests
"""
# Get dashboard
response = self.client.get(reverse('wagtailadmin_home'), HTTP_X_REQUESTED_WITH='XMLHttpRequest')
# AJAX requests should be given a 403 error instead of being redirected
self.assertEqual(response.status_code, 403)
def test_not_logged_in_redirect_default_settings(self):
"""
This does the same as the above test but checks that it
redirects to the correct place when the user has not set
the LOGIN_URL setting correctly
"""
# Get dashboard with default LOGIN_URL setting
with self.settings(LOGIN_URL='django.contrib.auth.views.login'):
response = self.client.get(reverse('wagtailadmin_home'))
# Check that the user was redirected to the login page and that next was set correctly
# Note: The user will be redirected to 'django.contrib.auth.views.login' but
# this must be the same URL as 'wagtailadmin_login'
self.assertEqual(response.status_code, 302)
self.assertRedirects(response, reverse('wagtailadmin_login') + '?next=' + reverse('wagtailadmin_home'))
def test_logged_in_no_permission_redirect(self):
"""
This tests that a logged in user without admin access permissions is
redirected to the login page, with an error message
"""
# Login as unprivileged user
self.create_user(username='unprivileged', password='<PASSWORD>')
self.login(username='unprivileged', password='<PASSWORD>')
# Get dashboard
response = self.client.get(reverse('wagtailadmin_home'), follow=True)
# Check that the user was redirected to the login page and that next was set correctly
self.assertRedirects(response, reverse('wagtailadmin_login') + '?next=' + reverse('wagtailadmin_home'))
self.assertContains(response, 'You do not have permission to access the admin')
def test_logged_in_no_permission_gives_403_to_ajax_requests(self):
"""
This tests that a logged in user without admin access permissions is
given a 403 error on ajax requests
"""
# Login as unprivileged user
self.create_user(username='unprivileged', password='<PASSWORD>')
self.login(username='unprivileged', password='<PASSWORD>')
# Get dashboard
response = self.client.get(reverse('wagtailadmin_home'), HTTP_X_REQUESTED_WITH='XMLHttpRequest')
# AJAX requests should be given a 403 error instead of being redirected
self.assertEqual(response.status_code, 403)
class TestAccountSectionUtilsMixin:
def assertPanelActive(self, response, name):
panels = {panel.name for panelset in response.context['panels_by_tab'].values() for panel in panelset}
self.assertIn(name, panels, "Panel %s not active in response" % name)
def assertPanelNotActive(self, response, name):
panels = {panel.name for panelset in response.context['panels_by_tab'].values() for panel in panelset}
self.assertNotIn(name, panels, "Panel %s active in response" % name)
def post_form(self, extra_post_data):
post_data = {
'name_email-first_name': 'Test',
'name_email-last_name': 'User',
'name_email-email': self.user.email,
'notifications-submitted_notifications': 'false',
'notifications-approved_notifications': 'false',
'notifications-rejected_notifications': 'true',
'notifications-updated_comments_notifications': 'true',
'locale-preferred_language': 'es',
'locale-current_time_zone': 'Europe/London',
}
post_data.update(extra_post_data)
return self.client.post(reverse('wagtailadmin_account'), post_data)
class TestAccountSection(TestCase, WagtailTestUtils, TestAccountSectionUtilsMixin):
"""
This tests that the accounts section is working
"""
def setUp(self):
self.user = self.login()
def test_account_view(self):
"""
This tests that the accounts view responds with an index page
"""
# Get account page
response = self.client.get(reverse('wagtailadmin_account'))
# Check that the user received an account page
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailadmin/account/account.html')
self.assertPanelActive(response, 'name_email')
self.assertPanelActive(response, 'notifications')
self.assertPanelActive(response, 'locale')
self.assertPanelActive(response, 'password')
# These fields may hide themselves
self.assertContains(response, "Email:")
self.assertContains(response, "Preferred language:")
if settings.USE_TZ:
self.assertContains(response, "Current time zone:")
else:
self.assertNotContains(response, "Current time zone:")
# Form media should be included on the page
self.assertContains(response, 'vendor/colorpicker.js')
def test_change_name_post(self):
response = self.post_form({
'name_email-first_name': 'Fox',
'name_email-last_name': 'Mulder',
})
# Check that the user was redirected to the account page
self.assertRedirects(response, reverse('wagtailadmin_account'))
# Check that the name was changed
self.user.refresh_from_db()
self.assertEqual(self.user.first_name, 'Fox')
self.assertEqual(self.user.last_name, 'Mulder')
def test_change_email_post(self):
response = self.post_form({
'name_email-email': '<EMAIL>',
})
# Check that the user was redirected to the account page
self.assertRedirects(response, reverse('wagtailadmin_account'))
# Check that the email was changed
self.user.refresh_from_db()
self.assertEqual(self.user.email, '<EMAIL>')
def test_change_email_not_valid(self):
response = self.post_form({
'name_email-email': 'test@email',
})
# Check that the user wasn't redirected
self.assertEqual(response.status_code, 200)
# Check that a validation error was raised
self.assertTrue('email' in response.context['panels_by_tab'][profile_tab][0].get_form().errors.keys())
# Check that the email was not changed
self.user.refresh_from_db()
self.assertNotEqual(self.user.email, 'test@email')
@override_settings(WAGTAIL_EMAIL_MANAGEMENT_ENABLED=False)
def test_with_email_management_disabled(self):
# Get account page
response = self.client.get(reverse('wagtailadmin_account'))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailadmin/account/account.html')
self.assertNotContains(response, "Email:")
@override_settings(WAGTAIL_PASSWORD_MANAGEMENT_ENABLED=False)
def test_account_view_with_password_management_disabled(self):
# Get account page
response = self.client.get(reverse('wagtailadmin_account'))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailadmin/account/account.html')
# Page should NOT contain a 'Change password' option
self.assertNotContains(response, "Change password")
@override_settings(WAGTAIL_PASSWORD_MANAGEMENT_ENABLED=False)
def test_change_password_view_disabled(self):
response = self.client.get(reverse('wagtailadmin_account'))
self.assertPanelNotActive(response, 'password')
def test_change_password(self):
response = self.post_form({
'password-old_password': 'password',
'password-<PASSWORD>1': '<PASSWORD>',
'password-new_password2': '<PASSWORD>',
})
# Check that the user was redirected to the account page
self.assertRedirects(response, reverse('wagtailadmin_account'))
# Check that the password was changed
self.user.refresh_from_db()
self.assertTrue(self.user.check_password('<PASSWORD>'))
def test_change_password_post_password_mismatch(self):
response = self.post_form({
'password-old_password': 'password',
'password-new_password1': '<PASSWORD>',
'password-new_password2': '<PASSWORD>',
})
# Check that the user wasn't redirected
self.assertEqual(response.status_code, 200)
# Find password panel through context
password_panel = None
for panelset in response.context['panels_by_tab'].values():
for panel in panelset:
if panel.name == 'password':
password_panel = panel
break
# Check that a validation error was raised
password_form = password_panel.get_form()
self.assertTrue('new_password2' in password_form.errors.keys())
self.assertTrue("The two password fields didn’t match." in password_form.errors['new_password2'])
# Check that the password was not changed
self.user.refresh_from_db()
self.assertTrue(self.user.check_password('password'))
def test_change_notifications(self):
response = self.post_form({
'submitted_notifications': 'false',
'approved_notifications': 'false',
'rejected_notifications': 'true',
'updated_comments_notifications': 'true',
})
# Check that the user was redirected to the account page
self.assertRedirects(response, reverse('wagtailadmin_account'))
profile = UserProfile.get_for_user(get_user_model().objects.get(pk=self.user.pk))
# Check that the notification preferences are as submitted
self.assertFalse(profile.submitted_notifications)
self.assertFalse(profile.approved_notifications)
self.assertTrue(profile.rejected_notifications)
self.assertTrue(profile.updated_comments_notifications)
def test_change_language_preferences(self):
response = self.post_form({
'locale-preferred_language': 'es',
})
# Check that the user was redirected to the account page
self.assertRedirects(response, reverse('wagtailadmin_account'))
profile = UserProfile.get_for_user(self.user)
profile.refresh_from_db()
# Check that the language preferences are stored
self.assertEqual(profile.preferred_language, 'es')
# check that the updated language preference is now indicated in HTML header
response = self.client.get(reverse('wagtailadmin_home'))
self.assertContains(response, '<html class="no-js" lang="es" dir="ltr">')
def test_unset_language_preferences(self):
profile = UserProfile.get_for_user(self.user)
profile.preferred_language = 'en'
profile.save()
response = self.post_form({
'locale-preferred_language': '',
})
# Check that the user was redirected to the account page
self.assertRedirects(response, reverse('wagtailadmin_account'))
# Check that the language preferences are stored
profile.refresh_from_db()
self.assertEqual(profile.preferred_language, '')
# Check that the current language is assumed as English
self.assertEqual(profile.get_preferred_language(), "en")
@override_settings(WAGTAILADMIN_PERMITTED_LANGUAGES=[('en', 'English'), ('es', 'Spanish')])
def test_available_admin_languages_with_permitted_languages(self):
self.assertListEqual(get_available_admin_languages(), [('en', 'English'), ('es', 'Spanish')])
def test_available_admin_languages_by_default(self):
self.assertListEqual(get_available_admin_languages(), WAGTAILADMIN_PROVIDED_LANGUAGES)
@override_settings(WAGTAILADMIN_PERMITTED_LANGUAGES=[('en', 'English')])
def test_not_show_options_if_only_one_language_is_permitted(self):
response = self.client.get(reverse('wagtailadmin_account'))
self.assertNotContains(response, "Preferred language:")
@unittest.skipUnless(settings.USE_TZ, "Timezone support is disabled")
def test_change_current_time_zone(self):
response = self.post_form({
'locale-current_time_zone': 'Pacific/Fiji',
})
# Check that the user was redirected to the account page
self.assertRedirects(response, reverse('wagtailadmin_account'))
profile = UserProfile.get_for_user(self.user)
profile.refresh_from_db()
# Check that the current time zone is stored
self.assertEqual(profile.current_time_zone, 'Pacific/Fiji')
@unittest.skipUnless(settings.USE_TZ, "Timezone support is disabled")
def test_unset_current_time_zone(self):
response = self.post_form({
'locale-current_time_zone': '',
})
# Check that the user was redirected to the account page
self.assertRedirects(response, reverse('wagtailadmin_account'))
profile = UserProfile.get_for_user(self.user)
profile.refresh_from_db()
# Check that the current time zone are stored
self.assertEqual(profile.current_time_zone, '')
@unittest.skipUnless(settings.USE_TZ, "Timezone support is disabled")
@override_settings(WAGTAIL_USER_TIME_ZONES=['Africa/Addis_Ababa', 'America/Argentina/Buenos_Aires'])
def test_available_admin_time_zones_with_permitted_time_zones(self):
self.assertListEqual(get_available_admin_time_zones(),
['Africa/Addis_Ababa', 'America/Argentina/Buenos_Aires'])
@unittest.skipUnless(settings.USE_TZ, "Timezone support is disabled")
def test_available_admin_time_zones_by_default(self):
self.assertListEqual(get_available_admin_time_zones(), pytz.common_timezones)
@unittest.skipUnless(settings.USE_TZ, "Timezone support is disabled")
@override_settings(WAGTAIL_USER_TIME_ZONES=['Europe/London'])
def test_not_show_options_if_only_one_time_zone_is_permitted(self):
response = self.client.get(reverse('wagtailadmin_account'))
self.assertNotContains(response, "Current time zone:")
@unittest.skipIf(settings.USE_TZ, "Timezone support is enabled")
def test_not_show_options_if_timezone_support_disabled(self):
response = self.client.get(reverse('wagtailadmin_account'))
self.assertNotContains(response, "Current time zone:")
@unittest.skipUnless(settings.USE_TZ, "Timezone support is disabled")
@override_settings(
WAGTAIL_USER_TIME_ZONES=['Europe/London'],
WAGTAILADMIN_PERMITTED_LANGUAGES=[('en', 'English')]
)
def test_doesnt_render_locale_panel_when_only_one_timezone_and_one_locale_permitted(self):
response = self.client.get(reverse('wagtailadmin_account'))
self.assertPanelNotActive(response, 'locale')
def test_sensitive_post_parameters(self):
request = RequestFactory().post('wagtailadmin_account', data={})
request.user = self.user
account(request)
self.assertTrue(hasattr(request, 'sensitive_post_parameters'))
self.assertEqual(request.sensitive_post_parameters, '__ALL__')
class TestAccountUploadAvatar(TestCase, WagtailTestUtils, TestAccountSectionUtilsMixin):
def setUp(self):
self.user = self.login()
self.avatar = get_test_image_file()
self.other_avatar = get_test_image_file()
def test_account_view(self):
"""
This tests that the account view renders a "Upload a profile picture:" field
"""
response = self.client.get(reverse('wagtailadmin_account'))
self.assertEqual(response.status_code, 200)
self.assertContains(response, "Upload a profile picture:")
def test_set_custom_avatar_stores_and_get_custom_avatar(self):
response = self.post_form({
'avatar-avatar': SimpleUploadedFile('other.png', self.other_avatar.file.getvalue())
})
# Check that the user was redirected to the account page
self.assertRedirects(response, reverse('wagtailadmin_account'))
profile = UserProfile.get_for_user(self.user)
profile.refresh_from_db()
self.assertIn('other.png', profile.avatar.url)
def test_user_upload_another_image_removes_previous_one(self):
profile = UserProfile.get_for_user(self.user)
profile.avatar = self.avatar
profile.save()
old_avatar_path = profile.avatar.path
# Upload a new avatar
response = self.post_form({
'avatar-avatar': SimpleUploadedFile('other.png', self.other_avatar.file.getvalue())
})
# Check that the user was redirected to the account page
self.assertRedirects(response, reverse('wagtailadmin_account'))
# Check the avatar was changed
profile.refresh_from_db()
self.assertIn('other.png', profile.avatar.url)
# Check old avatar doesn't exist anymore in filesystem
with self.assertRaises(FileNotFoundError):
open(old_avatar_path)
def test_no_value_preserves_current_avatar(self):
"""
Tests that submitting a blank value for avatar doesn't remove it.
"""
profile = UserProfile.get_for_user(self.user)
profile.avatar = self.avatar
profile.save()
# Upload a new avatar
response = self.post_form({})
# Check that the user was redirected to the account page
self.assertRedirects(response, reverse('wagtailadmin_account'))
# Check the avatar was changed
profile.refresh_from_db()
self.assertIn('test.png', profile.avatar.url)
def test_clear_removes_current_avatar(self):
"""
Tests that submitting a blank value for avatar doesn't remove it.
"""
profile = UserProfile.get_for_user(self.user)
profile.avatar = self.avatar
profile.save()
# Upload a new avatar
response = self.post_form({
'avatar-clear': 'on'
})
# Check that the user was redirected to the account page
self.assertRedirects(response, reverse('wagtailadmin_account'))
# Check the avatar was changed
profile.refresh_from_db()
self.assertIn('test.png', profile.avatar.url)
class TestAccountManagementForNonModerator(TestCase, WagtailTestUtils):
"""
Tests of reduced-functionality for editors
"""
def setUp(self):
# Create a non-moderator user
self.submitter = self.create_user('submitter', '<EMAIL>', 'password')
self.submitter.groups.add(Group.objects.get(name='Editors'))
self.login(username='submitter', password='password')
def test_notification_preferences_panel_reduced_for_non_moderators(self):
"""
This tests that a user without publish permissions is not shown the
notification preference for 'submitted' items
"""
response = self.client.get(reverse('wagtailadmin_account'))
# Find notifications panel through context
notifications_panel = None
for panelset in response.context['panels_by_tab'].values():
for panel in panelset:
if panel.name == 'notifications':
notifications_panel = panel
break
notifications_form = notifications_panel.get_form()
self.assertIn('approved_notifications', notifications_form.fields.keys())
self.assertIn('rejected_notifications', notifications_form.fields.keys())
self.assertNotIn('submitted_notifications', notifications_form.fields.keys())
self.assertIn('updated_comments_notifications', notifications_form.fields.keys())
class TestAccountManagementForAdminOnlyUser(TestCase, WagtailTestUtils, TestAccountSectionUtilsMixin):
"""
Tests for users with no edit/publish permissions at all
"""
def setUp(self):
# Create a non-moderator user
admin_only_group = Group.objects.create(name='Admin Only')
admin_only_group.permissions.add(Permission.objects.get(codename='access_admin'))
self.admin_only_user = self.create_user(
'admin_only_user',
'<EMAIL>',
'password'
)
self.admin_only_user.groups.add(admin_only_group)
self.login(username='admin_only_user', password='password')
def test_notification_preferences_not_rendered_for_admin_only_users(self):
"""
Test that the user is not shown the notification preferences panel
"""
response = self.client.get(reverse('wagtailadmin_account'))
self.assertPanelNotActive(response, 'notifications')
class TestPasswordReset(TestCase, WagtailTestUtils):
"""
This tests that the password reset is working
"""
def setUp(self):
# Create a user
self.create_superuser(username='test', email='<EMAIL>', password='password')
def test_password_reset_view(self):
"""
This tests that the password reset view returns a password reset page
"""
# Get password reset page
response = self.client.get(reverse('wagtailadmin_password_reset'))
# Check that the user received a password reset page
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailadmin/account/password_reset/form.html')
def test_password_reset_view_post(self):
"""
This posts an email address to the password reset view and
checks that a password reset email was sent
"""
# Post email address to password reset view
post_data = {
'email': '<EMAIL>',
}
response = self.client.post(reverse('wagtailadmin_password_reset'), post_data)
# Check that the user was redirected to the done page
self.assertRedirects(response, reverse('wagtailadmin_password_reset_done'))
# Check that a password reset email was sent to the user
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].to, ['<EMAIL>'])
self.assertEqual(mail.outbox[0].subject, "Password reset")
def test_password_reset_view_post_unknown_email(self):
"""
This posts an unknown email address to the password reset view and
checks that the password reset form raises a validation error
"""
post_data = {
'email': '<EMAIL>',
}
response = self.client.post(reverse('wagtailadmin_password_reset'), post_data)
# Check that the user was redirected to the done page
self.assertRedirects(response,
reverse('wagtailadmin_password_reset_done'))
# Check that an email was not sent
self.assertEqual(len(mail.outbox), 0)
def test_password_reset_view_post_invalid_email(self):
"""
This posts an invalid email address to the password reset view and
checks that the password reset form raises a validation error
"""
post_data = {
'email': 'Hello world!',
}
response = self.client.post(reverse('wagtailadmin_password_reset'), post_data)
# Check that the user wasn't redirected
self.assertEqual(response.status_code, 200)
# Check that a validation error was raised
self.assertTrue('email' in response.context['form'].errors.keys())
self.assertTrue("Enter a valid email address." in response.context['form'].errors['email'])
# Check that an email was not sent
self.assertEqual(len(mail.outbox), 0)
def setup_password_reset_confirm_tests(self):
from django.utils.encoding import force_bytes, force_str
from django.utils.http import urlsafe_base64_encode
# Get user
self.user = get_user_model().objects.get(email='<EMAIL>')
# Generate a password reset token
self.password_reset_token = PasswordResetTokenGenerator().make_token(self.user)
# Generate a password reset uid
self.password_reset_uid = force_str(urlsafe_base64_encode(force_bytes(self.user.pk)))
# Create url_args
token = auth_views.PasswordResetConfirmView.reset_url_token
self.url_kwargs = dict(uidb64=self.password_reset_uid, token=token)
# Add token to session object
s = self.client.session
s.update({
auth_views.INTERNAL_RESET_SESSION_TOKEN: self.password_reset_token,
})
s.save()
def test_password_reset_confirm_view_invalid_link(self):
"""
This tests that the password reset view shows an error message if the link is invalid
"""
self.setup_password_reset_confirm_tests()
# Create invalid url_args
self.url_kwargs = dict(uidb64=self.password_reset_uid, token="invalid-token")
# Get password reset confirm page
response = self.client.get(reverse('wagtailadmin_password_reset_confirm', kwargs=self.url_kwargs))
# Check that the user received a password confirm done page
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailadmin/account/password_reset/confirm.html')
self.assertFalse(response.context['validlink'])
self.assertContains(response, 'The password reset link was invalid, possibly because it has already been used.')
self.assertContains(response, 'Request a new password reset')
def test_password_reset_confirm_view(self):
"""
This tests that the password reset confirm view returns a password reset confirm page
"""
self.setup_password_reset_confirm_tests()
# Get password reset confirm page
response = self.client.get(reverse('wagtailadmin_password_reset_confirm', kwargs=self.url_kwargs))
# Check that the user received a password confirm done page
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailadmin/account/password_reset/confirm.html')
def test_password_reset_confirm_view_post(self):
"""
This posts a new password to the password reset confirm view and checks
that the users password was changed
"""
self.setup_password_reset_confirm_tests()
# Post new password to change password page
post_data = {
'new_password1': '<PASSWORD>',
'new_password2': '<PASSWORD>',
}
response = self.client.post(reverse('wagtailadmin_password_reset_confirm', kwargs=self.url_kwargs), post_data)
# Check that the user was redirected to the complete page
self.assertRedirects(response, reverse('wagtailadmin_password_reset_complete'))
# Check that the password was changed
self.assertTrue(get_user_model().objects.get(email='<EMAIL>').check_password('<PASSWORD>'))
def test_password_reset_confirm_view_post_password_mismatch(self):
"""
This posts a two passwords that don't match to the password reset
confirm view and checks that a validation error was raised
"""
self.setup_password_reset_confirm_tests()
# Post new password to change password page
post_data = {
'new_password1': '<PASSWORD>',
'new_password2': '<PASSWORD>',
}
response = self.client.post(reverse('wagtailadmin_password_reset_confirm', kwargs=self.url_kwargs), post_data)
# Check that the user wasn't redirected
self.assertEqual(response.status_code, 200)
# Check that a validation error was raised
self.assertTrue('new_password2' in response.context['form'].errors.keys())
self.assertTrue("The two password fields didn’t match." in response.context['form'].errors['new_password2'])
# Check that the password was not changed
self.assertTrue(get_user_model().objects.get(email='<EMAIL>').check_password('password'))
def test_password_reset_done_view(self):
"""
This tests that the password reset done view returns a password reset done page
"""
# Get password reset done page
response = self.client.get(reverse('wagtailadmin_password_reset_done'))
# Check that the user received a password reset done page
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailadmin/account/password_reset/done.html')
def test_password_reset_complete_view(self):
"""
This tests that the password reset complete view returns a password reset complete page
"""
# Get password reset complete page
response = self.client.get(reverse('wagtailadmin_password_reset_complete'))
# Check that the user received a password reset complete page
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailadmin/account/password_reset/complete.html')
| StarcoderdataPython |
11245329 | # -*- coding: UTF-8 -*-
from django import forms
from apps.registro.models import AnexoAutoridad
class AnexoAutoridadFormFilters(forms.Form):
anexo_id = None
def __init__(self, *args, **kwargs):
try:
self.anexo_id = kwargs.pop('anexo_id')
except KeyError:
pass
super(AnexoAutoridadFormFilters, self).__init__(*args, **kwargs)
def buildQuery(self, q=None):
"""
Crea o refina un query de búsqueda.
"""
if q is None:
q = AnexoAutoridad.objects.all()
if self.anexo_id is not None:
q = q.filter(anexo__id=self.anexo_id)
return q.order_by('apellido', 'nombre')
| StarcoderdataPython |
8091572 | <reponame>Mirantis/ceph-monitoring<filename>ceph_report/service.py<gh_stars>1-10
import sys
import site
import time
import argparse
import subprocess
from typing import List, Any, cast
import configparser
from dataclasses import dataclass
from pathlib import Path
from typing import Optional, Dict
import logging.config
from . import get_file, setup_logging
from .utils import CLUSTER_NAME_RE, CLIENT_NAME_RE, re_checker
logger = logging.getLogger("service")
DONT_UPLOAD = 'dont_upload'
NO_CREDS = 'no_creds'
@dataclass
class ReporterConfig:
cfg_file: Path
root: Path
agent: Path
report_every: int
report_timeout: int
upload_timeout: int
inventory: Path
ceph_master: str
storage: Path
log_level: str
persistent_log: bool
cluster: str
customer: str
prometeus_url: Optional[str]
prometheus_interval: Optional[int]
url: Optional[str]
http_creds: Optional[str]
size: int
duration: int
min_duration: int
def get_config(path: Optional[Path]) -> ReporterConfig:
cfg = configparser.ConfigParser()
if not path:
path = get_file('config.cfg')
if not path.exists():
raise RuntimeError(f"Can't find config file at {path}")
cfg.read(path.open())
common = cfg['common']
if common['root'] == 'AUTO':
path_formatters: Dict[str, str] = {'root': str(Path(__file__).parent.parent)}
else:
path_formatters = {'root': common['root']}
path_formatters['agent'] = common['agent']
def mkpath(val: str) -> Path:
return Path(val.format(**path_formatters))
return ReporterConfig(
cfg_file=path,
root=Path(common['root']),
agent=Path(common['agent']),
report_every=cfg.getint('service', 'report_every'),
report_timeout=cfg.getint('service', 'report_timeout'),
upload_timeout=cfg.getint('service', 'upload_timeout'),
inventory=mkpath(cfg['collect']['inventory']),
ceph_master=cfg['collect']['ceph_master'],
storage=mkpath(cfg['collect']['storage']),
log_level=common['log_level'],
persistent_log=cfg.getboolean('common', 'persistent_log', fallback=False),
cluster=cfg['collect']['cluster'],
customer=cfg['collect']['customer'],
url=cfg.get('upload', 'url', fallback=None), # type: ignore
http_creds=cfg.get('upload', 'http_creds', fallback=None), # type: ignore
prometeus_url=cfg.get('collect', 'prometeus_url', fallback=None), # type: ignore
prometheus_interval=cfg.getint('collect', 'prometheus_interval', fallback=None), # type: ignore
duration=cfg.getint('historic', 'duration', fallback=None), # type: ignore
size=cfg.getint('historic', 'size', fallback=None), # type: ignore
min_duration=cfg.getint('historic', 'min_duration', fallback=None), # type: ignore
)
def configure(opts: Any) -> None:
params = {
"CEPH_MASTER": opts.ceph_master,
"UPLOAD_URL": opts.upload_url,
"HTTP_CREDS": opts.http_creds,
"CLUSTER": opts.cluster,
"CUSTOMER": opts.customer,
"PROMETEUS_URL": opts.customer,
"PROMETEUS_INTERVAL": opts.customer,
}
config_file_content = get_file('config_templ.cfg').open().read()
for name, val in params.items():
config_file_content = config_file_content.replace("{" + name + "}", val)
get_file("config.cfg").open("w").write(config_file_content)
cfg = get_config(None)
storage = Path(opts.storage)
storage.mkdir(parents=True, exist_ok=True)
for site_dir in map(Path, site.getsitepackages()):
if site_dir.is_dir():
(site_dir / 'ceph_report.pth').open("w").write(f"{cfg.root}\n{cfg.root / 'libs'}\n")
break
else:
raise RuntimeError("Can't install pth file - no site folder found")
def install_service(svc_name: str, svc_target: Path, opts: Any) -> None:
cfg = get_config(opts)
params = {"INSTALL_FOLDER": str(cfg.root), "CONF_FILE": str(cfg.cfg_file)}
service_file_content = get_file(svc_name).open().read()
for name, val in params.items():
service_file_content = service_file_content.replace("{" + name + "}", val)
if not svc_target.parent.exists():
svc_target.parent.mkdir(parents=True)
svc_target.open("w").write(service_file_content)
subprocess.run(["systemctl", "daemon-reload"]).check_returncode()
if not opts.dont_enable:
subprocess.run(["systemctl", "enable", svc_name]).check_returncode()
if not opts.dont_start:
subprocess.run(["systemctl", "start", svc_name]).check_returncode()
def unistall_service(svc_name: str, svc_target: Path) -> None:
if subprocess.run(["systemctl", "status", svc_name], stdout=subprocess.DEVNULL).returncode == 0:
subprocess.run(["systemctl", "disable", svc_name]).check_returncode()
subprocess.run(["systemctl", "stop", svc_name]).check_returncode()
svc_target.unlink()
subprocess.run(["systemctl", "daemon-reload"]).check_returncode()
def parse_args(argv: List[str]) -> Any:
class Formatter(argparse.ArgumentDefaultsHelpFormatter):
def __init__(self, *args, **kwargs) -> None:
kwargs['width'] = 120
argparse.ArgumentDefaultsHelpFormatter.__init__(self, *args, **kwargs)
parser = argparse.ArgumentParser(formatter_class=Formatter)
sub_parsers = parser.add_subparsers(dest='subparser_name')
configure_parser = sub_parsers.add_parser("configure")
configure_parser.add_argument("--config", default=None, help="Config file")
configure_parser.add_argument("--ceph-master", default='-',
help="Node to run whole-cluster collection, first inventory node by default")
configure_parser.add_argument("--http-creds", default=NO_CREDS,
help="Upload HTTP credentials, must be provided if --upload-url is provided")
configure_parser.add_argument("--upload-url", default=DONT_UPLOAD,
help="Report upload URL, if not provided - reports would not be uploaded")
configure_parser.add_argument("--cluster", required=True, type=re_checker(CLUSTER_NAME_RE),
help="Cluster name. Should create unique pair " +
f"with --customer and match {CLUSTER_NAME_RE}")
configure_parser.add_argument("--customer", required=True, type=re_checker(CLIENT_NAME_RE),
help=f"Customer name, should be unique and match {CLIENT_NAME_RE}")
configure_parser.add_argument("--no-service", action='store_true', default=False, help="Don't install service")
configure_parser.add_argument("--prometeus-url", default="", help="Prometheus URL to pull data")
configure_parser.add_argument("--prometeus-interval", default=0, type=int, metavar="HOURS",
help="Prometheus data pull hor how many hours")
install_parser = sub_parsers.add_parser("install_service")
install_parser.add_argument("--dont-start", action='store_true', default=False, help="Don't start service")
install_parser.add_argument("--dont-enable", action='store_true', default=False,
help="Don't enable auto start on boot")
sub_parsers.add_parser("uninstall_service")
run_parser = sub_parsers.add_parser("run")
run_parser.add_argument("--config", default=None, help="Config file path")
return parser.parse_args(argv[1:])
def main(argv: List[str]) -> int:
opts = parse_args(argv)
svc_name = 'mirantis_ceph_reporter.service'
svc_target = Path("/lib/systemd/system") / svc_name
if opts.subparser_name == 'configure':
configure(opts)
elif opts.subparser_name == 'install_service':
install_service(svc_name, svc_target, opts)
elif opts.subparser_name == 'uninstall_service':
unistall_service(svc_name, svc_target)
elif opts.subparser_name == 'run':
cfg = get_config(opts.config)
setup_logging(get_file("logging.json"), cfg.log_level, None, cfg.persistent_log)
logger.info(f"Started with {argv}")
assert cfg.url
assert cfg.http_creds
upload_args: List[str] = ["--url", cfg.url, "--http-creds", cfg.http_creds] # type: ignore
next_time = time.time()
while True:
sleep_time = next_time - time.time()
time.sleep(sleep_time if sleep_time > 0 else 0)
next_time += cfg.report_every
try:
cmd = [sys.executable, '-m', 'ceph_report.collect_info', 'collect', '--config', str(cfg.cfg_file)]
logger.info(f"Started collecting with {cmd}, timeout={cfg.report_timeout}")
res = subprocess.run(cmd, timeout=cfg.report_timeout, stdout=subprocess.PIPE)
res.check_returncode()
marker = "Will store results into"
stdout = res.stdout.decode()
if marker not in stdout:
continue
report = stdout[stdout.index(marker) + len(marker):].strip().split("\n")[0]
report_path = Path(report)
logger.info(f"Get new report {report_path}")
slink_path = report_path.parent / 'not_uploaded' / report_path.name
if not slink_path.parent.exists():
slink_path.parent.mkdir(parents=True)
slink_path.symlink_to(report_path)
if cfg.url != DONT_UPLOAD:
cmd = [sys.executable, "-m", "ceph_report.collect_info", "upload", *upload_args, str(report_path)]
logger.info(f"Start upload with {cmd}, timeout={report_path}")
subprocess.run(cmd, timeout=cfg.upload_timeout).check_returncode()
logger.info("Upload successful")
slink_path.unlink()
else:
logger.info("Skipping uploading, as it's disabled")
except subprocess.TimeoutExpired:
logger.error("Timeout expired")
except subprocess.CalledProcessError as exc:
logger.error(f"Child process failed with code {exc.returncode}")
else:
print(f"Unknown cmd {opts.subparser_name}")
return 1
return 0
if __name__ == "__main__":
main(sys.argv)
| StarcoderdataPython |
3296665 | <filename>tests/unit/sqlite3_to_mysql_test.py
import logging
import re
from random import choice
import mysql.connector
import pytest
from mysql.connector import errorcode
from sqlalchemy import create_engine, inspect
from sqlalchemy.dialects.sqlite import __all__ as sqlite_column_types
from sqlite3_to_mysql import SQLite3toMySQL
@pytest.mark.usefixtures("sqlite_database", "mysql_instance")
class TestSQLite3toMySQL:
def test_translate_type_from_sqlite_to_mysql_invalid_column_type(
self, sqlite_database, mysql_database, mysql_credentials, mocker
):
proc = SQLite3toMySQL(
sqlite_file=sqlite_database,
mysql_user=mysql_credentials.user,
mysql_password=<PASSWORD>,
mysql_host=mysql_credentials.host,
mysql_port=mysql_credentials.port,
mysql_database=mysql_credentials.database,
)
with pytest.raises(ValueError) as excinfo:
mocker.patch.object(proc, "_valid_column_type", return_value=False)
proc._translate_type_from_sqlite_to_mysql("text")
assert "Invalid column_type!" in str(excinfo.value)
@pytest.mark.parametrize(
"mysql_integer_type, mysql_string_type",
[
("INT(11)", "VARCHAR(300)"),
("BIGINT(19)", "TEXT"),
("BIGINT(20) UNSIGNED", "CHAR(100)"),
],
)
def test_translate_type_from_sqlite_to_mysql_all_valid_columns(
self,
sqlite_database,
mysql_database,
mysql_credentials,
faker,
mysql_integer_type,
mysql_string_type,
):
proc = SQLite3toMySQL(
sqlite_file=sqlite_database,
mysql_user=mysql_credentials.user,
mysql_password=<PASSWORD>,
mysql_host=mysql_credentials.host,
mysql_port=mysql_credentials.port,
mysql_database=mysql_credentials.database,
mysql_integer_type=mysql_integer_type,
mysql_string_type=mysql_string_type,
)
for column in sqlite_column_types + ("INT64",):
if column == "dialect":
continue
elif column == "VARCHAR":
assert (
proc._translate_type_from_sqlite_to_mysql(column)
== proc._mysql_string_type
)
elif column in {"INTEGER", "INT"}:
assert (
proc._translate_type_from_sqlite_to_mysql(column)
== proc._mysql_integer_type
)
elif column in {"INT64", "NUMERIC"}:
assert proc._translate_type_from_sqlite_to_mysql(column) == "BIGINT(19)"
else:
assert proc._translate_type_from_sqlite_to_mysql(column) == column
assert proc._translate_type_from_sqlite_to_mysql("TEXT") == "TEXT"
assert proc._translate_type_from_sqlite_to_mysql("CLOB") == "TEXT"
assert proc._translate_type_from_sqlite_to_mysql("CHARACTER") == "CHAR"
length = faker.pyint(min_value=1, max_value=99)
assert proc._translate_type_from_sqlite_to_mysql(
"CHARACTER({})".format(length)
) == "CHAR({})".format(length)
assert proc._translate_type_from_sqlite_to_mysql("NCHAR") == "CHAR"
length = faker.pyint(min_value=1, max_value=99)
assert proc._translate_type_from_sqlite_to_mysql(
"NCHAR({})".format(length)
) == "CHAR({})".format(length)
assert proc._translate_type_from_sqlite_to_mysql("NATIVE CHARACTER") == "CHAR"
length = faker.pyint(min_value=1, max_value=99)
assert proc._translate_type_from_sqlite_to_mysql(
"NATIVE CHARACTER({})".format(length)
) == "CHAR({})".format(length)
assert (
proc._translate_type_from_sqlite_to_mysql("VARCHAR")
== proc._mysql_string_type
)
length = faker.pyint(min_value=1, max_value=255)
assert proc._translate_type_from_sqlite_to_mysql(
"VARCHAR({})".format(length)
) == re.sub(r"\d+", str(length), proc._mysql_string_type)
assert proc._translate_type_from_sqlite_to_mysql("DOUBLE PRECISION") == "DOUBLE"
assert (
proc._translate_type_from_sqlite_to_mysql("UNSIGNED BIG INT")
== "BIGINT UNSIGNED"
)
length = faker.pyint(min_value=1000000000, max_value=99999999999999999999)
assert proc._translate_type_from_sqlite_to_mysql(
"UNSIGNED BIG INT({})".format(length)
) == "BIGINT({}) UNSIGNED".format(length)
assert (
proc._translate_type_from_sqlite_to_mysql("INT1")
== proc._mysql_integer_type
)
assert (
proc._translate_type_from_sqlite_to_mysql("INT2")
== proc._mysql_integer_type
)
length = faker.pyint(min_value=1, max_value=11)
assert proc._translate_type_from_sqlite_to_mysql(
"INT({})".format(length)
) == re.sub(r"\d+", str(length), proc._mysql_integer_type)
def test_create_database_connection_error(
self, sqlite_database, mysql_database, mysql_credentials, mocker, faker, caplog
):
proc = SQLite3toMySQL(
sqlite_file=sqlite_database,
mysql_user=mysql_credentials.user,
mysql_password=<PASSWORD>,
mysql_host=mysql_credentials.host,
mysql_port=mysql_credentials.port,
mysql_database=mysql_credentials.database,
)
class FakeCursor:
def execute(self, statement):
raise mysql.connector.Error(
msg="Unknown MySQL error", errno=errorcode.CR_UNKNOWN_ERROR
)
mocker.patch.object(proc, "_mysql_cur", FakeCursor())
with pytest.raises(mysql.connector.Error) as excinfo:
caplog.set_level(logging.DEBUG)
proc._create_database()
assert str(errorcode.CR_UNKNOWN_ERROR) in str(excinfo.value)
assert any(
str(errorcode.CR_UNKNOWN_ERROR) in message for message in caplog.messages
)
def test_create_table_cursor_error(
self, sqlite_database, mysql_database, mysql_credentials, mocker, faker, caplog
):
proc = SQLite3toMySQL(
sqlite_file=sqlite_database,
mysql_user=mysql_credentials.user,
mysql_password=<PASSWORD>,
mysql_host=mysql_credentials.host,
mysql_port=mysql_credentials.port,
mysql_database=mysql_credentials.database,
)
class FakeCursor:
def execute(self, statement):
raise mysql.connector.Error(
msg="Unknown MySQL error", errno=errorcode.CR_UNKNOWN_ERROR
)
mocker.patch.object(proc, "_mysql_cur", FakeCursor())
sqlite_engine = create_engine(
"sqlite:///{database}".format(database=sqlite_database)
)
sqlite_inspect = inspect(sqlite_engine)
sqlite_tables = sqlite_inspect.get_table_names()
with pytest.raises(mysql.connector.Error) as excinfo:
caplog.set_level(logging.DEBUG)
proc._create_table(choice(sqlite_tables))
assert str(errorcode.CR_UNKNOWN_ERROR) in str(excinfo.value)
assert any(
str(errorcode.CR_UNKNOWN_ERROR) in message for message in caplog.messages
)
def test_process_cursor_error(
self, sqlite_database, mysql_database, mysql_credentials, mocker, faker, caplog
):
proc = SQLite3toMySQL(
sqlite_file=sqlite_database,
mysql_user=mysql_credentials.user,
mysql_password=<PASSWORD>,
mysql_host=mysql_credentials.host,
mysql_port=mysql_credentials.port,
mysql_database=mysql_credentials.database,
)
def fake_transfer_table_data(sql, total_records=0):
raise mysql.connector.Error(
msg="Unknown MySQL error", errno=errorcode.CR_UNKNOWN_ERROR
)
mocker.patch.object(proc, "_transfer_table_data", fake_transfer_table_data)
with pytest.raises(mysql.connector.Error) as excinfo:
caplog.set_level(logging.DEBUG)
proc.transfer()
assert str(errorcode.CR_UNKNOWN_ERROR) in str(excinfo.value)
assert any(
str(errorcode.CR_UNKNOWN_ERROR) in message for message in caplog.messages
)
def test_add_indices_error(
self, sqlite_database, mysql_database, mysql_credentials, mocker, faker, caplog
):
proc = SQLite3toMySQL(
sqlite_file=sqlite_database,
mysql_user=mysql_credentials.user,
mysql_password=<PASSWORD>,
mysql_host=mysql_credentials.host,
mysql_port=mysql_credentials.port,
mysql_database=mysql_credentials.database,
)
sqlite_engine = create_engine(
"sqlite:///{database}".format(database=sqlite_database)
)
sqlite_inspect = inspect(sqlite_engine)
sqlite_tables = sqlite_inspect.get_table_names()
tables_with_indices = []
for table in sqlite_tables:
if sqlite_inspect.get_indexes(table):
tables_with_indices.append(table)
table_name = choice(tables_with_indices)
proc._create_table(table_name)
class FakeCursor:
def execute(self, statement):
raise mysql.connector.Error(
msg="Unknown MySQL error", errno=errorcode.CR_UNKNOWN_ERROR
)
mocker.patch.object(proc, "_mysql_cur", FakeCursor())
with pytest.raises(mysql.connector.Error) as excinfo:
caplog.set_level(logging.DEBUG)
proc._add_indices(table_name)
assert str(errorcode.CR_UNKNOWN_ERROR) in str(excinfo.value)
assert any(
str(errorcode.CR_UNKNOWN_ERROR) in message for message in caplog.messages
)
def test_add_foreign_keys_error(
self, sqlite_database, mysql_database, mysql_credentials, mocker, faker, caplog
):
proc = SQLite3toMySQL(
sqlite_file=sqlite_database,
mysql_user=mysql_credentials.user,
mysql_password=<PASSWORD>,
mysql_host=mysql_credentials.host,
mysql_port=mysql_credentials.port,
mysql_database=mysql_credentials.database,
)
sqlite_engine = create_engine(
"sqlite:///{database}".format(database=sqlite_database)
)
sqlite_inspect = inspect(sqlite_engine)
sqlite_tables = sqlite_inspect.get_table_names()
tables_with_foreign_keys = []
for table in sqlite_tables:
sqlite_fk_stmt = 'PRAGMA foreign_key_list("{table}")'.format(table=table)
sqlite_fk_result = sqlite_engine.execute(sqlite_fk_stmt)
if sqlite_fk_result.returns_rows:
for _ in sqlite_fk_result:
tables_with_foreign_keys.append(table)
break
table_name = choice(tables_with_foreign_keys)
proc._create_table(table_name)
class FakeCursor:
def execute(self, statement):
raise mysql.connector.Error(
msg="Unknown MySQL error", errno=errorcode.CR_UNKNOWN_ERROR
)
mocker.patch.object(proc, "_mysql_cur", FakeCursor())
with pytest.raises(mysql.connector.Error) as excinfo:
caplog.set_level(logging.DEBUG)
proc._add_foreign_keys(table_name)
assert str(errorcode.CR_UNKNOWN_ERROR) in str(excinfo.value)
assert any(
str(errorcode.CR_UNKNOWN_ERROR) in message for message in caplog.messages
)
| StarcoderdataPython |
8118666 | import torch
from torch import nn
class HexaConv2d(nn.Conv2d):
def __init__(self, *args, **kargs):
super(HexaConv2d, self).__init__(*args, **kargs)
self.mask = nn.Parameter(self.get_mask(), requires_grad=False)
copy_w = self.weight.clone().detach()
self.weight = nn.Parameter(copy_w * self.mask)
this = self
self.weight.register_hook(lambda grad: grad * this.mask)
def get_mask(self):
_, _, kh, kw = self.weight.shape
if kh < 3 or kw < 3:
raise Exception("A hexaconv must have a size a height and a width >= 3x3")
if kh % 2 != 1 or kw % 2 != 1:
raise Exception("A hexaconv must have a kernel where its width and height are odds")
if kh != kw:
raise Exception("A hexaconv must have a square kernel")
N = kh
mask = torch.ones_like(self.weight, requires_grad=False)
for i in range(N):
idx = ((N + 1) // 2 + i) % N
if i < (N - 1) / 2:
mask[:, :, i, idx:] = 0
if i > (N - 1) / 2:
mask[:, :, i, :idx] = 0
return mask
class HexaConv3d(nn.Conv3d):
def __init__(self, *args, **kargs):
super(HexaConv3d, self).__init__(*args, **kargs)
self.mask = nn.Parameter(self.get_mask(), requires_grad=False)
copy_w = self.weight.clone().detach()
self.weight = nn.Parameter(copy_w * self.mask)
this = self
self.weight.register_hook(lambda grad: grad * this.mask)
def get_mask(self):
_, _, kd, kh, kw = self.weight.shape
if kh < 3 or kw < 3:
raise Exception("A hexaconv must have a size a height and a width >= 3x3")
if kh % 2 != 1 or kw % 2 != 1:
raise Exception("A hexaconv must have a kernel where its width and height are odds")
if kh != kw:
raise Exception("A hexaconv must have a square kernel")
N = kh
mask = torch.ones_like(self.weight, requires_grad=False)
for i in range(N):
idx = ((N + 1) // 2 + i) % N
if i < (N - 1) / 2:
mask[:, :, :, i, idx:] = 0
if i > (N - 1) / 2:
mask[:, :, :, i, :idx] = 0
return mask
| StarcoderdataPython |
396560 | # Sequence Reconstruction
# Check whether the original sequence org can be uniquely reconstructed from the sequences in seqs.
# The orginal sequence is a permutation of the integers from 1 to n, with 1 ≤ n ≤ 104.
# Reconstruction means building a shortest common supersequence of the sequences in seqs
# (i.e., a shortest sequence so that all sequences in seqs are subsequences of it).
# Determine whether there is only one sequence that can be reconstructed from seqs and it is the org sequence.
class Solution(object):
def sequenceReconstruction(self, org, seqs):
"""
:type org: List[int]
:type seqs: List[List[int]]
:rtype: bool
"""
if not any(seqs):
return False
n = len(org)
pos = { org[i] : i for i in range(n) }
matched = [False] * (n + 1)
to_match = n - 1 # the last number in org is not deducted
for seq in seqs:
for i in range(len(seq)):
# node out of range
if seq[i] < 1 or seq[i] > n:
return False
if i == 0:
continue
u, v = seq[i - 1], seq[i]
# u is after v in org
if pos[u] >= pos[v]:
return False
# otherwise mark u to be matched if any pair of (u, v) occurs in org
if pos[u] + 1 == pos[v] and not matched[u]:
matched[u] = True
to_match -= 1
return to_match == 0
# O(S) time, O(N) space, S being total length of seqs, N being range of org
# test cases:
# org: [1,2,3], seqs: [[1,2],[1,3]] => False
# org: [1,2,3], seqs: [[1,2],[1,3],[2,3]] => True
# org: [4,1,5,2,6,3], seqs: [[5,2,6,3],[4,1,5,2]] => True
# topological sort version
# 'abc': a -> b -> c
class Solution(object):
def sequenceReconstruction(self, org, seqs):
"""
:type org: List[int]
:type seqs: List[List[int]]
:rtype: bool
"""
n = len(org)
graph = { i: set() for i in range(1, n + 1) } # node => nodes right after
in_degrees = { i: 0 for i in range(1, n + 1) } # node => number of nodes before it
nodes = set()
for seq in seqs:
for i, node in enumerate(seq):
# value out of range
if node < 1 or node > n:
return False
nodes.add(node)
# avoid duplicate edges
if i > 0 and seq[i] not in graph[seq[i - 1]]:
graph[seq[i - 1]].add(seq[i])
in_degrees[seq[i]] += 1
# all numbers from 1 to n must appear at least once
if len(nodes) < n:
return False
queue = collections.deque(i for i in in_degrees if in_degrees[i] == 0)
cursor = 0
while queue:
# more than one way to construct
if len(queue) > 1:
return False
# has remaining nodes or the order is not the smae
if cursor == n or queue[0] != org[cursor]:
return False
cursor += 1
curr = queue.popleft()
for child in graph[curr]:
in_degrees[child] -= 1
if in_degrees[child] == 0:
queue.append(child)
return cursor == n
# O(S) time, O(N) space, S being total length of seqs, N being range of org
| StarcoderdataPython |
6678138 | #!/usr/bin/env python
#
# Creates a csv file relating voltage shifts to amino acid index
#
from __future__ import print_function
import base
import numpy as np
def tasks():
"""
Returns a list of the tasks in this file.
"""
return [
VoltageShiftIndices(),
]
class VoltageShiftIndices(base.Task):
def __init__(self):
super(VoltageShiftIndices, self).__init__('voltage_shift_indices')
self._set_data_subdir('papergp')
def _run(self):
with base.connect() as con:
c = con.cursor()
print('Loading voltage shift data')
mutations = []
for row in c.execute('select * from epdata'):
idx = row['idx']
dva = row['dva']
dvi = row['dvi']
dva = float(dva) if dva is not None else dva
dvi = float(dvi) if dvi is not None else dvi
mutations.append([idx, dva, dvi])
filename = self.data_out('voltage-shift-indices.csv')
print('Writing ' + filename)
with open(filename, 'w') as f:
w = self.csv_writer(f)
w.writerow([
'idx',
'dva',
'dvi',
])
for mutation in mutations:
w.writerow(mutation)
print('Done')
if __name__ == '__main__':
runner = base.TaskRunner()
runner.add_tasks(tasks())
runner.run()
| StarcoderdataPython |
5060716 | <reponame>pecimuth/synthia<filename>backend/web/__init__.py<gh_stars>0
from flask import Flask
from flasgger import Swagger
from flask_cors import CORS
from . import service
from . import controller
import os
def create_app(**kwargs) -> Flask:
"""Create, configure and return a Flask app.
Keyword arguments may override any configuration parameter.
"""
app = Flask(
__name__,
instance_relative_config=True
)
app.config.from_mapping(
SWAGGER={
'title': 'Synthia',
'securityDefinitions': {
'APIKeyHeader': {
'type': 'apiKey',
'name': 'Authorization',
'in': 'header'
}
}
},
SECRET_KEY=os.environ.get('SECRET_KEY'),
PROJECT_STORAGE=os.path.join(app.instance_path, 'project'),
DATABASE_DRIVER='postgresql',
DATABASE_USER=os.environ.get('POSTGRES_USER'),
DATABASE_PASSWORD=os.environ.get('POSTGRES_PASSWORD'),
DATABASE_DB=os.environ.get('POSTGRES_DB'),
DATABASE_HOST=os.environ.get('DATABASE_HOST'),
DATABASE_PORT=os.environ.get('DATABASE_PORT'),
ORIGIN=os.environ.get('ORIGIN')
)
app.config.from_mapping(**kwargs)
Swagger(app)
CORS(app, origins=app.config['ORIGIN'], supports_credentials=True)
controller.init_app(app)
service.init_app(app)
try:
os.makedirs(app.instance_path)
os.makedirs(app.config['PROJECT_STORAGE'])
except OSError:
pass
return app
| StarcoderdataPython |
334578 | <reponame>dozymoe/django-carbondesign<filename>carbondesign/tags/inline_loading.py
"""
Inline Loading
==============
See: https://www.carbondesignsystem.com/components/inline-loading/usage/
The inline loading component provides visual feedback that data is being
processed.
Overview
--------
Inline loading spinners are used when performing actions. They notify to
the user that their request is being processed. Although they do not provide
details about what is occurring on the back-end, they reassure the user that
their action is being processed.
Common actions that benefit from inline loading include any create, update,
or delete actions that may have a lot of data to process. It can be used in
a table, after a primary or secondary button click, or even in a modal.
""" # pylint:disable=line-too-long
# pylint:disable=too-many-lines
from django.utils.translation import gettext as _
#-
from .base import Node
class InlineLoading(Node):
"""Inline Loading component.
"""
def prepare(self, values, context):
"""Prepare values for rendering the templates.
"""
values['txt_loading'] = _("Loading data...")
values['txt_loaded'] = _("Data loaded.")
values['txt_failed'] = _("Loading data failed.")
def render_default(self, values, context):
"""Output html of the component.
"""
template = """
<div data-inline-loading class="bx--inline-loading" role="alert"
aria-live="assertive">
<div class="bx--inline-loading__animation">
<div data-inline-loading-spinner class="bx--loading bx--loading--small">
<svg class="bx--loading__svg" viewBox="0 0 100 100">
<circle class="bx--loading__background" cx="50%" cy="50%" r="42" />
<circle class="bx--loading__stroke" cx="50%" cy="50%" r="42" />
</svg>
</div>
<svg focusable="false" preserveAspectRatio="xMidYMid meet"
xmlns="http://www.w3.org/2000/svg" fill="currentColor"
class="bx--inline-loading__checkmark-container" hidden=""
data-inline-loading-finished="" width="16" height="16"
viewBox="0 0 16 16" aria-hidden="true">
<path d="M8,1C4.1,1,1,4.1,1,8c0,3.9,3.1,7,7,7s7-3.1,7-7C15,4.1,11.9,1,8,1z M7,11L4.3,8.3l0.9-0.8L7,9.3l4-3.9l0.9,0.8L7,11z"></path>
<path d="M7,11L4.3,8.3l0.9-0.8L7,9.3l4-3.9l0.9,0.8L7,11z" data-icon-path="inner-path" opacity="0"></path>
</svg>
<svg focusable="false" preserveAspectRatio="xMidYMid meet"
xmlns="http://www.w3.org/2000/svg" fill="currentColor"
class="bx--inline-loading--error" hidden="" data-inline-loading-error=""
width="20" height="20" viewBox="0 0 32 32" aria-hidden="true">
<path d="M2,16H2A14,14,0,1,0,16,2,14,14,0,0,0,2,16Zm23.15,7.75L8.25,6.85a12,12,0,0,1,16.9,16.9ZM8.24,25.16A12,12,0,0,1,6.84,8.27L23.73,25.16a12,12,0,0,1-15.49,0Z"></path>
</svg>
</div>
<p data-inline-loading-text-active class="bx--inline-loading__text">
{txt_loading}
</p>
<p data-inline-loading-text-finished hidden class="bx--inline-loading__text">
{txt_loaded}
</p>
<p data-inline-loading-text-error hidden class="bx--inline-loading__text">
{txt_failed}
</p>
</div>
"""
return self.format(template, values)
components = {
'InlineLoading': InlineLoading,
}
| StarcoderdataPython |
1885062 | from typing import List, Iterable, Dict
from keras_preprocessing.text import Tokenizer as KTokenizer
from headliner.preprocessing.tokenizer import Tokenizer
class KerasTokenizer(Tokenizer):
def __init__(self, **kwargs):
self._keras_tokenizer = KTokenizer(**kwargs)
def encode(self, text: str) -> List[int]:
return self._keras_tokenizer.texts_to_sequences([text])[0]
def decode(self, sequence: List[int]) -> str:
return self._keras_tokenizer.sequences_to_texts([sequence])[0]
@property
def vocab_size(self) -> int:
return len(self._keras_tokenizer.word_index)
def fit(self, texts: Iterable[str]):
self._keras_tokenizer.fit_on_texts(texts)
@property
def token_index(self) -> Dict[str, int]:
return self._keras_tokenizer.word_index
| StarcoderdataPython |
11395411 | import abc
from typing import List
from signalflowgrapher.common.observable import ValueObservable
from collections import defaultdict
import logging
logger = logging.getLogger(__name__)
class Command(abc.ABC):
"""Command for undo and redo operation."""
@abc.abstractmethod
def redo(self):
pass
@abc.abstractmethod
def undo(self):
pass
class MergeableCommand(Command):
"""Command that can be merged with another unrelated to its order."""
@abc.abstractmethod
def merge(self, cmd):
"""Return new command that merges the given command with this."""
pass
@abc.abstractmethod
def get_ressource(self):
"""Get the ressource this command is applied to."""
pass
class ScriptCommand(Command):
"""Script command for undo and redo of several commands."""
def __init__(self, commands: List[Command]):
abort_merge = False
# Group type of command
type_cmd_map = defaultdict(list)
i = 0
while i < len(commands) and not abort_merge:
cmd = commands[i]
# Abort if at least one cmd can't be merged
if not isinstance(cmd, MergeableCommand):
abort_merge = True
key = type(cmd).__name__
type_cmd_map[key].append(cmd)
i += 1
if not abort_merge:
type_ressource_cmd_map = defaultdict(lambda: defaultdict(list))
for k1, cmds in type_cmd_map.items():
for cmd in cmds:
k2 = cmd.get_ressource()
type_ressource_cmd_map[k1][k2].append(cmd)
self.commands = list()
# Merge commands with same type and ressource
for k1 in type_ressource_cmd_map:
cmds_by_type = type_ressource_cmd_map[k1]
for k2 in cmds_by_type:
cmds_by_ressource = cmds_by_type[k2]
cmds = iter(cmds_by_ressource)
merged = next(cmds, None)
if merged is not None:
current = next(cmds, None)
while current is not None:
merged = merged.merge(current)
current = next(cmds, None)
self.commands.append(merged)
logger.debug(
"Script command merge successful - reduced from " +
str(len(commands)) + " to " + str(len(self.commands)))
else:
logger.debug(
"Script command merge unsuccessful - using raw commands")
self.commands = commands
def undo(self):
for cmd in reversed(self.commands):
cmd.undo()
def redo(self):
for cmd in self.commands:
cmd.redo()
class CommandHandler(object):
def __init__(self):
self.__undo_stack: List[Command] = []
self.__redo_stack: List[Command] = []
self.__script_commands = None
self.can_undo = ValueObservable(False)
self.can_redo = ValueObservable(False)
def add_command(self, command: Command):
""" Add a new command to the stack"""
if self.__script_commands is not None:
self.__script_commands.append(command)
else:
self.__undo_stack.append(command)
self.__redo_stack.clear()
self.__update_can_undo_redo()
def reset(self):
""" Clears undo / redo stack"""
self.__undo_stack.clear()
self.__redo_stack.clear()
self.__update_can_undo_redo()
def undo(self):
""" Undo the last command"""
if (len(self.__undo_stack)):
if (self.__script_commands is not None):
raise ValueError("Undo not allowed when script is running")
command = self.__undo_stack.pop()
command.undo()
self.__redo_stack.append(command)
self.__update_can_undo_redo()
def redo(self):
""" Redo the last undoed command"""
if (len(self.__redo_stack)):
if (self.__script_commands is not None):
raise ValueError("Redo not allowed when script is running")
command = self.__redo_stack.pop()
command.redo()
self.__undo_stack.append(command)
self.__update_can_undo_redo()
def __update_can_undo_redo(self):
self.can_undo.set(len(self.__undo_stack))
self.can_redo.set(len(self.__redo_stack))
def start_script(self):
""" Starts recording of a command script
raises ValueError if a script is already running"""
logger.debug("Start script")
if (self.__script_commands is not None):
raise ValueError("Script already running")
self.__script_commands = []
def end_script(self):
""" Ends recording of a command script
raises ValueError if no script is running"""
logger.debug("End script")
if (self.__script_commands is None):
raise ValueError("No script running")
# Assign command to temp variable because
# the variable must be None when calling add_command
commands = self.__script_commands
self.__script_commands = None
if len(commands) > 0:
self.add_command(ScriptCommand(commands))
| StarcoderdataPython |
3208309 | <reponame>ytoyama/yans_chainer_hackathon
import math
import numpy
from chainer import cuda
from chainer import function
from chainer.utils import type_check
def _as_mat(x):
if x.ndim == 2:
return x
return x.reshape(len(x), -1)
class Linear(function.Function):
"""Linear function (a.k.a. fully-connected layer or affine transformation).
This function holds a weight matrix ``W`` and a bias vector ``b``.
The weight matrix ``W`` has shape ``(out_size, in_size)``.
This matrix is initialized with i.i.d. Gaussian samples, each of which has
zero mean and deviation :math:`\sqrt{1/\\text{in_size}}`.
The deviation is scaled by factor ``wscale`` if specified.
The bias vector ``b`` is of size ``out_size``.
Each element is initialized with the ``bias`` value.
If ``nobias`` argument is set to True, then this function does not hold a
bias vector.
Let :math:`X` be an input matrix, and :math:`W, b` the weight matrix and
the bias vector, respectively.
Then, the output matrix :math:`Y` is computed by :math:`Y = XW^\\top + b`,
where the addition by :math:`b` is broadcasted across the minibatch.
Args:
in_size (int): Dimension of input vectors.
out_size (int): Dimension of output vectors.
wscale (float): Scaling factor of the weight matrix.
bias (float): Initial bias value.
nobias (bool): If True, then this function does not use the bias.
initialW (2-D array): Initial weight value. If ``None``, then this
function uses to initialize ``wscale``.
initial_bias (1-D array): Initial bias value. If ``None``, then this
function uses to initialize ``bias``.
.. note::
This function accepts an input variable of a non-matrix array.
In this case, the leading dimension is treated as the batch dimension,
and the other dimensions are reduced to one dimension.
"""
def __init__(self, in_size, out_size, wscale=1, bias=0, nobias=False,
initialW=None, initial_bias=None):
self.W = None
self.gW = None
self.b = None
self.gb = None
if initialW is not None:
assert initialW.shape == (out_size, in_size)
self.W = initialW
else:
self.W = numpy.random.normal(
0, wscale * math.sqrt(1. / in_size),
(out_size, in_size)).astype(numpy.float32)
xp = cuda.get_array_module(self.W)
self.gW = xp.full_like(self.W, numpy.nan)
if initial_bias is not None:
assert initial_bias.shape == (out_size,)
self.b = initial_bias
elif not nobias:
self.b = numpy.repeat(numpy.float32(bias), out_size)
if self.b is not None:
self.gb = xp.full_like(self.b, numpy.nan)
@property
def parameter_names(self):
if self.b is None:
return 'W',
return 'W', 'b'
@property
def gradient_names(self):
if self.gb is None:
return 'gW',
return 'gW', 'gb'
def check_type_forward(self, in_types):
type_check.expect(in_types.size() == 1)
x_type, = in_types
type_check.expect(
x_type.dtype == numpy.float32,
x_type.ndim >= 2,
(type_check.Variable(numpy.prod, 'prod')(x_type.shape[1:]) ==
type_check.Variable(self.W.shape[1], 'W.shape[1]')),
)
def zero_grads(self):
self.gW.fill(0)
if self.gb is not None:
self.gb.fill(0)
def forward(self, x):
x = _as_mat(x[0])
Wx = x.dot(self.W.T)
if self.b is not None:
Wx += self.b
return Wx,
def backward(self, x, gy):
_x = _as_mat(x[0])
self.gW += gy[0].T.dot(_x)
if self.gb is not None:
self.gb += gy[0].sum(0)
return gy[0].dot(self.W).reshape(x[0].shape),
class NonparameterizedLinear(function.Function):
"""Nonparameterized linear class.
.. seealso:: :class:`Linear`
"""
def check_type_forward(self, in_types):
type_check.expect(
2 <= in_types.size(),
in_types.size() <= 3,
)
x_type = in_types[0]
w_type = in_types[1]
prod = type_check.Variable(numpy.prod, 'prod')
type_check.expect(
x_type.dtype == numpy.float32,
w_type.dtype == numpy.float32,
x_type.ndim >= 2,
w_type.ndim == 2,
prod(x_type.shape[1:]) == w_type.shape[1],
)
if in_types.size().eval() == 3:
b_type = in_types[2]
type_check.expect(
b_type.ndim == 1,
b_type.shape[0] == w_type.shape[0],
)
def forward(self, x):
W = x[1]
out_size, in_size = W.shape
if len(x) == 3:
func = Linear(
in_size, out_size, initialW=W, initial_bias=x[2])
else:
func = Linear(
in_size, out_size, initialW=W, nobias=True)
self.func = func
if any(isinstance(i, cuda.ndarray) for i in x):
func.to_gpu()
return func.forward(x[:1])
def backward(self, x, gy):
func = self.func
func.zero_grads()
gx = func.backward(x[:1], gy)
if func.gb is None:
return (gx[0], func.gW)
return (gx[0], func.gW, func.gb)
def linear(x, W, b=None):
"""Nonparameterized linear function.
Args:
x (~chainer.Variable): Input variable.
W (~chainer.Variable): Weight variable.
b (~chainer.Variable): Bias variable (optional).
Returns:
~chainer.Variable: Output variable.
.. seealso:: :class:`Linear`
"""
if b is None:
return NonparameterizedLinear()(x, W)
else:
return NonparameterizedLinear()(x, W, b)
| StarcoderdataPython |
1782787 | ##########################################################################
# NSAp - Copyright (C) CEA, 2013
# Distributed under the terms of the CeCILL-B license, as published by
# the CEA-CNRS-INRIA. Refer to the LICENSE file or to
# http://www.cecill.info/licences/Licence_CeCILL-B_V1-en.html
# for details.
##########################################################################
"""
Module that provides tools to configure external softwares.
"""
# System import
import os
import re
import subprocess
def environment(sh_file=None, env={}):
""" Function that return a dictionary containing the environment
needed by a program (for instance FSL or FreeSurfer).
In the configuration file, the variable are expected to be defined
as 'VARIABLE_NAME=value'.
Parameters
----------
sh_file: str (mandatory)
The path to the sh script used to set up the environment.
env: dict (optional, default empty)
The default environment used to parse the configuration sh file.
Returns
-------
environment: dict
A dict containing the program configuration.
"""
# Use sh commands and a string instead of a list since
# we're using shell=True
# Pass empty environment to get only the prgram variables
command = ["bash", "-c", ". '{0}' ; /usr/bin/printenv".format(sh_file)]
process = subprocess.Popen(command, env=env,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = process.communicate()
stdout = stdout.decode("utf8")
stderr = stderr.decode("utf8")
if process.returncode != 0:
raise Exception(
"Could not parse 'sh_file' {0}. Maybe you should check if all "
"the dependencies are installed".format(stderr))
# Parse the output : each line should be of the form
# 'VARIABLE_NAME=value'
environment = {}
for line in stdout.split(os.linesep):
if line.startswith("export"):
line = line.replace("export ", "")
line = line.replace("'", "")
match = re.match(r"^(\w+)=(\S*)$", line)
if match:
name, value = match.groups()
if name != "PWD":
environment[name] = value
return environment
def concat_environment(env1, env2):
""" Concatenate two environments.
1 - Check duplicated keys and concatenate their values.
2 - Update the concatenated environment.
Parameters
----------
env1: dict (mandatory)
First environment.
env2: dict (mandatory)
Second environment.
Returns
-------
concat_env: dict
Updated environment where the duplicated keys values are concatenated
with ':'.
"""
concat_env = env1
for key, value in env2.items():
if key in concat_env.keys():
if value != concat_env[key]:
concat_env[key] += ":" + env2[key]
else:
concat_env[key] = env2[key]
return concat_env
| StarcoderdataPython |
297818 | <reponame>AliYoussef96/dinuq
from Bio import SeqIO
from Bio.Seq import Seq
###########################################################
################### RDA ###################
###########################################################
#non-informative dinucleotide positions that will be excluded
noninfo = ['CpCpos1', 'CpApos1', 'GpCpos1', 'GpGpos1', 'GpUpos1', 'GpApos1', 'UpGpos1', 'UpApos1', 'ApCpos1', 'ApUpos1', 'ApApos1']
#dinucl should be a list like: ['CpC', 'CpG', 'CpU', 'CpA', 'GpC', 'GpG', 'GpU', 'GpA', 'UpC', 'UpG', 'UpU', 'UpA', 'ApC', 'ApG', 'ApU', 'ApA']
#position should also be a list: ['pos1', 'pos2', 'bridge', 'all']
def RDA(fasta_file, dinucl, position = ['all']):
#define all results dictionary
all_results = {}
#parse the multiple sequence fasta file
records = SeqIO.parse(fasta_file, "fasta")
#### FASTA ####
#for each sequence
for rec in records:
results = {}
#ungap in case of an alignment
recungap = rec.seq.ungap("-")
recungap = rec.seq.ungap("~")
#save sequence as string
seq = str(recungap)
#make all uppercase
seq = seq.upper()
#make sure it's a coding sequence
if len(seq)%3 != 0:
print(str('\n\nSequence ' + rec.id + ' has length not a multiple of 3...\n\n'))
#save amino acid sequence as string
aa = str(recungap.translate())
#remove stop codons at the end
if aa[-1] == '*':
aa = aa[:-1]
seq = seq[:-3]
#check for internal stop codons
if '*' in aa:
print(str('\n\nSequence ' + rec.id + ' has internal stop codons...\n\n'))
#the amino acid sequence list is independent of the dinucleotide CDS position
aalist = []
#for each amino acid in the sequence (range of the length of the sequence)
for a in range(len(aa)):
#append it to the aa list
aalist.append(aa[a])
##########################
#### CALCULATIONS ####
#calculate one RDA value for the entire sequence
if 'all' in position:
#for each dinucleotide provided in the argument list
for dinuc in dinucl:
#### PARAMETERS ####
#define the parameters mapping to each dinucleotide and position
if dinuc == 'CpC':
thedinucleotide = 'CC'
if dinuc == 'CpG':
thedinucleotide = 'CG'
if dinuc == 'CpU':
thedinucleotide = 'CT'
if dinuc == 'CpA':
thedinucleotide = 'CA'
if dinuc == 'GpC':
thedinucleotide = 'GC'
if dinuc == 'GpG':
thedinucleotide = 'GG'
if dinuc == 'GpU':
thedinucleotide = 'GT'
if dinuc == 'GpA':
thedinucleotide = 'GA'
if dinuc == 'UpC':
thedinucleotide = 'TC'
if dinuc == 'UpG':
thedinucleotide = 'TG'
if dinuc == 'UpU':
thedinucleotide = 'TT'
if dinuc == 'UpA':
thedinucleotide = 'TA'
if dinuc == 'ApC':
thedinucleotide = 'AC'
if dinuc == 'ApG':
thedinucleotide = 'AG'
if dinuc == 'ApU':
thedinucleotide = 'AT'
if dinuc == 'ApA':
thedinucleotide = 'AA'
##########################
#create lists for bridge dinucleotides
dint = []
#define the last dinucleotide as the sequence length
lastdi = len(seq) - 1
#for nucleotide starting at position 2 (3rd position of first codon) and ending at the last position with a step of 3 (one codon)
for d in range(0,lastdi):
#store dinucleotide
di = str(seq[d] + seq[d+1])
#append it to the bridge dinucleotide list
dint.append(di)
thedinucleotideone = thedinucleotide[0]
thedinucleotidetwo = thedinucleotide[1]
#calculate frequencies
freq_one = seq.count(thedinucleotideone)/len(seq)
freq_two = seq.count(thedinucleotidetwo)/len(seq)
freq_all = dint.count(thedinucleotide)/len(dint)
#calculate rda
rda = freq_all/(freq_one*freq_two)
name = str(dinuc)
results.update({name:rda})
all_results.update({rec.id:results})
#### POSITIONS ####
#calculate a separate RDA value for each frame position
else:
#for each dinucleotide provided in the argument list
for dinuc in dinucl:
#### PARAMETERS ####
#define the parameters mapping to each dinucleotide and position
if dinuc == 'CpC':
thedinucleotide = 'CC'
if dinuc == 'CpG':
thedinucleotide = 'CG'
if dinuc == 'CpU':
thedinucleotide = 'CT'
if dinuc == 'CpA':
thedinucleotide = 'CA'
if dinuc == 'GpC':
thedinucleotide = 'GC'
if dinuc == 'GpG':
thedinucleotide = 'GG'
if dinuc == 'GpU':
thedinucleotide = 'GT'
if dinuc == 'GpA':
thedinucleotide = 'GA'
if dinuc == 'UpC':
thedinucleotide = 'TC'
if dinuc == 'UpG':
thedinucleotide = 'TG'
if dinuc == 'UpU':
thedinucleotide = 'TT'
if dinuc == 'UpA':
thedinucleotide = 'TA'
if dinuc == 'ApC':
thedinucleotide = 'AC'
if dinuc == 'ApG':
thedinucleotide = 'AG'
if dinuc == 'ApU':
thedinucleotide = 'AT'
if dinuc == 'ApA':
thedinucleotide = 'AA'
##########################
#for each position provided in the argument list
for pos in position:
#bridge
if pos == 'bridge':
#create lists for bridge dinucleotides
bdint = []
#define the last dinucleotide as the sequence length -3
lastdi = len(seq) - 3
#for nucleotide starting at position 2 (3rd position of first codon) and ending at the last position with a step of 3 (one codon)
for d in range(2,lastdi, 3):
#store dinucleotide
dint = str(seq[d] + seq[d+1])
#append it to the bridge dinucleotide list
bdint.append(dint)
dbstr = ""
for d in bdint:
dbstr = dbstr + d
thedinucleotideone = thedinucleotide[0]
thedinucleotidetwo = thedinucleotide[1]
freq_one = dbstr.count(thedinucleotideone)/len(dbstr)
freq_two = dbstr.count(thedinucleotidetwo)/len(dbstr)
freq_all = bdint.count(thedinucleotide)/len(bdint)
rda = freq_all/(freq_one*freq_two)
name = str(str(dinuc) + str(pos))
results.update({name:rda})
#pos1 or 2
if (pos == 'pos1' or pos == 'pos2'):
name = str(str(dinuc) + str(pos))
if name not in noninfo:
if pos == 'pos1':
pos_start = 0
if pos == 'pos2':
pos_start = 1
posdint = []
for d in range(pos_start,len(seq), 3):
dint = str(seq[d] + seq[d+1])
posdint.append(dint)
dbstr = ""
for d in posdint:
dbstr = dbstr + d
thedinucleotideone = thedinucleotide[0]
thedinucleotidetwo = thedinucleotide[1]
freq_one = dbstr.count(thedinucleotideone)/len(dbstr)
freq_two = dbstr.count(thedinucleotidetwo)/len(dbstr)
freq_all = posdint.count(thedinucleotide)/len(posdint)
rda = freq_all/(freq_one*freq_two)
results.update({name:rda})
all_results.update({rec.id:results})
##########################
return all_results
#### TABLE ####
def RDA_to_tsv(rda_dic, output_name):
table_out = "acc\t"
for i in range(len(list(rda_dic[list(rda_dic)[0]]))):
add = str(list(rda_dic[list(rda_dic)[0]])[i] + '\t')
table_out = table_out + add
table_out = str(table_out + '\n')
for i in range(len(list(rda_dic))):
acc = str(list(rda_dic)[i] + '\t')
dict = rda_dic[list(rda_dic)[i]]
table_out = table_out + acc
for r in range(len(dict)):
this_number = str(str(dict[list(dict)[r]]) + '\t')
table_out = table_out + this_number
table_out = str(table_out + '\n')
table_out = table_out[:-1]
out = open(output_name, "w+")
out.write(table_out)
out.close()
return print(table_out)
##########################
| StarcoderdataPython |
8049614 | #!/bin/bash
# -------------------------
# Filename: Address_Initialization.py
# Revision: 1.0
# Data: long ago
# Author: <NAME>
# Description: Generate codes that print out variables' name and their addresses, so we can bind dynamic address to its IR form.
# Process:
# 1. Based on initialization of global variable and socket fields, use regular expression to filter variable name
# 2. Add instrumentation to print variable and their address
# -------------------------
# Address_Initialization.py
from sets import Set
#import sys
#file1 = sys.argv[1]
filted_var = ["_sk_userlocks", "_do_early_retrans", "_frto", "_is_cwnd_limited", "_nonagle", "_repair", "_thin_dupack", "_skc_prot"]
def SYMBOLIZE_INIT(f_in, f_out, count):
#f_in = file(file1, "r")
#f_out = file(file2, "a")
filter_mark = False
Inside_Target_Function = False
for line in f_in:
if line.startswith("void Model0_Listen_Server_Initialize(){") or line.startswith("void Model0_Server_A_Initialize(){") or line.startswith("void Model0_Server_Initialize(){") or line.startswith("void Model1_Listen_Server_Initialize(){") or line.startswith("void Model1_Server_A_Initialize(){") or line.startswith("void Model1_Server_Initialize(){"):
print "Matched"
print line
Inside_Target_Function = True
continue
if Inside_Target_Function == False:
continue
elif line.find("}") != -1:
Inside_Target_Function = False;
line = line.strip()
if line.find(";") == -1:
continue
pos = line.find("//")
if (pos != -1):
line = line[:pos]
pos = line.find("=")
if pos == -1:
continue
line = line[:pos]
if (line.find("*((long long *)") > 1): # meanings they are stored in other heap, i.e., skc_prot-> .., skc_net->...
print line
continue
'''
for var in filted_var:
if line.find(var) != -1:
filter_mark = True
break
if filter_mark:
filter_mark = False
continue
'''
target = line
pos0 = target.find("unsigned")
pos1 = target[pos0:].find("*")
if pos1 == -1: #skc_prot case, since it doesn't use offset to locate its field
continue
if line.find("+48)") != -1: #skc_net case //Be careful!! Solver is hard to resolve address, so don't symbolize address, such as skc_prot and skc_net
continue
cur_type = target[pos0:pos0+pos1]
count = count+1
line0 = "\t"+cur_type+" symbol_a"+str(count)+";\n"
line1 = "\tklee_make_symbolic(&symbol_a"+str(count)+", sizeof(symbol_a"+str(count)+"), \""+ target+"\");\n"
line2 = "\t"+target+"=symbol_a"+str(count)+";\n"
f_out.write(line0)
f_out.write(line1)
f_out.write(line2)
#TODO: because Model0_Req_A has different struct (out_of_memory issue), we will find its fields specifically
'''
target1 = target.replace("Model0_Server_L", "Model0_Req_A")
count = count+1
line0 = "\t"+cur_type+" symbol_a"+str(count)+";\n"
line1 = "\tklee_make_symbolic(&symbol_a"+str(count)+", sizeof(symbol_a"+str(count)+"), \""+ target1+"\");\n"
line2 = "\t"+target1+"=symbol_a"+str(count)+";\n"
f_out.write(line0)
f_out.write(line1)
f_out.write(line2)
'''
'''
target2 = target.replace("Model0_Server_L", "Model0_Server_A")
count = count+1
line0 = "\t"+cur_type+" symbol_a"+str(count)+";\n"
line1 = "\tklee_make_symbolic(&symbol_a"+str(count)+", sizeof(symbol_a"+str(count)+"), \""+ target2+"\");\n"
line2 = "\t"+target2+"=symbol_a"+str(count)+";\n"
f_out.write(line0)
f_out.write(line1)
f_out.write(line2)
'''
#f_in.close()
#f_out.close()
return count
| StarcoderdataPython |
1946150 | # Consensus and Profile
# rosalind.info/problems/cons/
import sys
class cons:
def main(self, dna_file):
if not dna_file:
raise Exception('ERROR: File is empty.')
data = [line.strip() for line in dna_file]
matrix = []
for line in data:
if str(line[0]) == '>':
pass
else:
matrix.append(list(line))
matrix = [list(x) for x in zip(*matrix)]
consensus = ''
profile = []
for i in matrix:
profile_dict = {'A' : 0, 'C': 0, 'G': 0, 'T': 0}
for j in i:
profile_dict[j] += 1
profile.append(profile_dict)
for i in profile:
counter = 0
letter = ''
for j in i:
if i[j] > counter:
counter = i[j]
letter = j
consensus += letter
print(consensus)
strA = ''
strC = ''
strG = ''
strT = ''
for i in profile:
strA += ''.join(str(i['A'])) + ' '
strC += ''.join(str(i['C'])) + ' '
strG += ''.join(str(i['G'])) + ' '
strT += ''.join(str(i['T'])) + ' '
print('A: {}'.format(strA))
print('C: {}'.format(strC))
print('G: {}'.format(strG))
print('T: {}'.format(strT))
if __name__ == '__main__':
filename = sys.argv[1]
if not filename:
raise Exception('ERROR: File name should not be empty!')
with open(filename, 'r') as seq_file:
cons().main(seq_file) | StarcoderdataPython |
365334 | """
.. module:: location.text
:synopsis: Django location application text module.
Django location application text module.
"""
from django.utils.translation import ugettext_lazy as _
# flake8: noqa
# required because of pep8 regression in ignoring disable of E123
address_labels = {
"country": _("Country"),
"city": _("City"),
"extended_address": _("Extended street address"),
"geographic_lcoation": _("Geographic location"),
"label": _("Label"),
"postal_code": _("Postal code"),
"post_office_box": _("Post office box"),
"province": _("Province"),
"state": _("State"),
"street_address": _("Street address"),
"timezone": _("Timezone"),
}
address_help_texts = {
"country": _("Country."),
"city": _("City."),
"extended_street_address": _("Extended street address."),
"geographic_lcoation": _("Geographic location."),
"label": _("Label."),
"postal_code": _("Postal code."),
"post_office_box": _("Post office box."),
"province": _("Province."),
"state": _("State."),
"street_address": _("Street address."),
"timezone": _("Timezone."),
}
city_labels = {
"province": _("Province"),
"state": _("State"),
}
city_help_texts = {
"province": _("Province."),
"state": _("State."),
}
country_labels = {
"iso_code": _("ISO code"),
}
country_help_texts = {
"iso_code": _("ISO 3166-1 alpha-2."),
}
geographic_location_labels = {
"latitude": _("Latitude"),
"longitude": _("Longitude"),
"range": _("Range"),
"range_unit": _("Unit"),
}
geographic_location_help_texts = {
"latitude": _("Latitude."),
"longitude": _("Longitude."),
"range": _("Range in unit."),
"range_unit": _("Distance unit."),
}
language_labels = {
"iso_code": _("ISO code"),
}
language_help_texts = {
"iso_code": _("ISO code."),
}
region_labels = {
"country": _("Country"),
"iso_code": _("ISO code"),
}
region_help_texts = {
"country": _("Country."),
"iso_code": _("ISO code."),
}
timezone_labels = {
"timezone": _("Timezone"),
}
timezone_help_texts = {
"timezone": _("Timezone."),
}
| StarcoderdataPython |
6469037 | #!/usr/bin/env python2
# -*- coding: UTF-8 -*-
# File: html.py
# Date: Tue May 20 18:01:39 2014 +0800
# Author: <NAME> <<EMAIL>>
from . import api_method, request
from ukdbconn import get_mongo
# api: /html?pid=2&page=0,1,3,5
# 0 is the html framework
@api_method('/html')
def html():
""" return a dict of {pagenum: 'html'} """
try:
pid = long(request.values.get('pid'))
page_str = request.values.get('page')
pages = map(int, page_str.split(','))
except Exception:
return {'status': 'error',
'reason': 'invalid request'}
db = get_mongo('paper')
doc = db.find_one({'_id': pid}, {'page': 1, 'html': 1})
if max(pages) > doc['page'] or min(pages) < 0:
return {'status': 'error',
'reason': 'invalid page index'}
res = {}
for p in pages:
res[p] = doc['html'][p]
return {'status': 'ok',
'htmls': res }
| StarcoderdataPython |
1840381 | import gws.tools.net
import gws.tools.xml2
from . import error
_ows_error_strings = '<ServiceException', '<ServerException', '<ows:ExceptionReport'
def raw_get(url, **kwargs):
# the reason to use lax is that we want an exception text from the server
# even if the status != 200
kwargs['lax'] = True
try:
resp = gws.tools.net.http_request(url, **kwargs)
except gws.tools.net.Error as e:
raise error.Error('http error') from e
status = resp.status_code
# check for an ows error (no matter what status code says)
# we can get big image responses here, so be careful and don't blindly decode them
if resp.content.startswith(b'<') or 'xml' in resp.content_type:
text = str(resp.content[:1024], encoding='utf8', errors='ignore').lower()
for e in _ows_error_strings:
if e.lower() in text:
raise error.Error(resp.text[:1024])
if status != 200:
raise error.Error(f'HTTP error: {resp.status_code!r}')
return resp
def get(url, service, request, **kwargs):
"""Get a raw service response"""
params = kwargs.get('params') or {}
params['SERVICE'] = service.upper()
params['REQUEST'] = request
# some guys accept only uppercase params
params = {k.upper(): v for k, v in params.items()}
kwargs['params'] = params
return raw_get(url, **kwargs)
def get_text(url, service, request, **kwargs):
resp = get(url, service, request, **kwargs)
return resp.text
| StarcoderdataPython |
5033906 | <gh_stars>0
import argparse
import numpy
from db.sqlite import get_sqlite_twint
parser = argparse.ArgumentParser(
description="Fetches data from a number of sources and compiles a training set"
)
parser.add_argument(
"--sqlite_twint", action="store", type=str, help="Where the bird site is stored"
)
args = parser.parse_args()
source_data = []
if args.sqlite_twint:
temp = get_sqlite_twint(args.sqlite_twint)
for item in temp:
source_data.append(item)
del temp
print("Compiling engagement data for each source user")
level_finder = {}
for item in source_data:
if item["source"] == "twitter":
key = item["source"] + "&" + item["author"]
if key not in level_finder.keys():
level_finder[key] = [item["score"]]
else:
temp = level_finder[key]
temp.append(item["score"])
level_finder[key] = temp
print("Computing engagement levels")
bars = {}
for key, dataset in level_finder.items():
bars[key] = numpy.percentile( # identify 85th percentile or lower posts
level_finder[key], 85
)
print("Filtering based on engagement level")
print("Pre: " + str(len(source_data)))
filtered_data = []
for item in source_data:
if item["source"] == "twitter":
key = item["source"] + "&" + item["author"]
bar = bars[key]
if item["score"] >= bar:
item["bar"] = bar
filtered_data.append(item)
del source_data
print("Post: " + str(len(filtered_data)))
print("Removing cruft")
raw_content = []
for item in filtered_data:
raw_content.append(item["content"])
del filtered_data
print("Processing content")
print("Pre: " + str(len(raw_content)))
processed_content = []
for content in raw_content:
content = content.lower()
content = content.strip()
# if adding reddit, this also needs to convert to all-text
split_content = content.split()
result_content = []
seen_word = False
for token in split_content:
if not seen_word:
if token.startswith("@"):
continue
else:
seen_word = True
if token.startswith("http://"):
continue
if token.startswith("https://"):
continue
result_content.append(token)
if len(result_content) > 3:
result = " ".join(result_content)
processed_content.append(result)
print("Post: " + str(len(processed_content)))
with open("model.txt", "w") as file_obj:
for line in processed_content:
file_obj.write(line + "\r\n")
| StarcoderdataPython |
3566282 | def ignore_msg(msg):
return 'fuzzy' in msg.flags or \
msg.obsolete or \
msg.msgstr == msg.msgid or \
not msg.msgstr
| StarcoderdataPython |
8050222 | <filename>model_compiler/src/model_compiler/compilers/saved_model_file_to_openvino_model.py
# Copyright 2019 ZTE corporation. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
from tempfile import TemporaryDirectory
from . import repository
from ..models.sources.saved_model_file import SavedModelFile
from ..models.targets.openvino_model import OpenvinoModel
from ..openvino_util import Config, execute_optimize_action
def _get_optimize_params(input_model, output_dir, config):
params = {'model_name': 'model',
'saved_model_dir': input_model,
'output_dir': output_dir}
# if enble_nhwc_to_nchw=None or False set --disable_nhwc_to_nchw
if not config.enable_nhwc_to_nchw:
params['disable_nhwc_to_nchw'] = None
if config.input_names is not None:
params['input'] = ','.join(config.input_names)
if config.input_shapes is not None:
params['input_shape'] = ','.join(str(shape) for shape in config.input_shapes)
if config.max_batch_size is not None:
params['batch'] = str(config.max_batch_size)
if config.output_names is not None:
params['output'] = ','.join(config.output_names)
if config.saved_model_tags is None:
params['saved_model_tags'] = 'serve'
else:
params['saved_model_tags'] = ','.join(config.saved_model_tags)
if config.data_type is not None:
params['data_type'] = config.data_type
return params
@repository.REPOSITORY.register(source_type=SavedModelFile, target_type=OpenvinoModel, config_type=Config)
def compile_source(source: SavedModelFile, config: Config) -> object:
temp_path = TemporaryDirectory()
optimize_params = _get_optimize_params(source.model_path, temp_path.name, config)
execute_optimize_action(optimize_params)
return OpenvinoModel.from_directory(temp_path)
| StarcoderdataPython |
3567972 | from substance.monads import *
from substance.logs import *
from substance import (Command, Engine)
from tabulate import tabulate
class Recreate(Command):
def getShellOptions(self, optparser):
optparser.add_option("-t", "--time", dest="time",
help="Seconds to wait before sending SIGKILL", default=10)
return optparser
def getUsage(self):
return "substance recreate [options] [CONTAINER...]"
def getHelpTitle(self):
return "Recreate all or specified container(s)"
def main(self):
return self.core.loadCurrentEngine(name=self.parent.getOption('engine')) \
.bind(Engine.loadConfigFile) \
.bind(Engine.envRecreate, containers=self.args, time=self.getOption('time')) \
.catch(self.exitError)
| StarcoderdataPython |
3421073 | class A:
__class__ = 15
a = A()
print(a.__class__)
# <ref> | StarcoderdataPython |
4922552 | <filename>com_detection.py
'''
This file includes the implementation of community detection module.
'''
import networkx as nx
import numpy as np
import community
from basic_test import compute_p, GAW
from scipy.stats import norm
from utils import to_undirected_graph, augmentation, percentile
def get_partition(graph):
#first compute the best partition
partition = community.best_partition(to_undirected_graph(graph))
# print(partition)
num_communities = float(len(set(partition.values())))
communities = {}
for key, value in partition.items():
if communities.get(value) is None:
sub_g = nx.DiGraph()
sub_g.add_node(key)
communities[value] = sub_g
else:
sub_g = communities[value]
sub_g.add_node(key)
nodes = sub_g.nodes()
for node in nodes:
if node != key:
edge = graph.get_edge_data(key, node)
if edge is not None:
sub_g.add_edge(key, node, weight=edge['weight'])
edge = graph.get_edge_data(node, key)
if edge is not None:
sub_g.add_edge(node, key, weight=edge['weight'])
communities[value] = sub_g
return communities
def compute_first_density(graph, communities):
full_density = nx.density(graph)
for com, sub_g in communities.items():
density = nx.density(sub_g)
stat = density / full_density
for node in sub_g.nodes():
graph.node[node]['first_density'] = stat
return graph
def compute_second_density(graph, communities):
for com, sub_g in communities.items():
num_nodes = sub_g.number_of_nodes()
for node in sub_g.nodes():
graph.node[node]['second_density'] = graph.node[node]['first_density'] / num_nodes
return graph
# full_density = nx.density(graph)
# for com, sub_g in communities.items():
# num_nodes = sub_g.number_of_nodes()
# density = nx.density(sub_g)
# stat = density / full_density / num_nodes
# for node in sub_g.nodes():
# graph.node[node]['second_density'] = stat
# return graph
def get_null_distribution(graph, null_samples):
all_densities = []
for comm in null_samples:
try:
all_densities.append(nx.density(comm))
except:
print(type(comm))
exit()
mean = np.mean(all_densities)
std = np.std(all_densities)
return mean, std
def compute_third_density(graph, communities, null_samples):
mean, std = get_null_distribution(graph, null_samples)
for _, sub_g in communities.items():
density = nx.density(sub_g)
p_value = compute_p(density, mean, std)
if p_value >= 0.5:
score = 0
else:
score = norm.ppf(1 - p_value)
score = np.clip(score, a_min=-8, a_max=8)
for node in sub_g.nodes():
graph.node[node]['third_density'] = score
return graph
def small_community_feature(graph, communities, criterion):
for node in graph.nodes():
graph.node[node]['small_community'] = 0
for _, sub_g in communities.items():
num_nodes = sub_g.number_of_nodes()
if num_nodes <= criterion:
for node in sub_g.nodes():
graph.node[node]['small_community'] = 1
return graph
def compute_first_strength(graph, communities):
all_weights = list(nx.get_edge_attributes(graph, 'weight').values())
network_gaw = GAW(all_weights, mode='simple')
for _, sub_g in communities.items():
weights = list(nx.get_edge_attributes(sub_g, 'weight').values())
com_gaw = GAW(weights, mode='simple')
strength = com_gaw / network_gaw
for node in sub_g.nodes():
graph.node[node]['first_strength'] = strength
return graph
def compute_second_strength(graph, communities):
for _, sub_g in communities.items():
num_nodes = sub_g.number_of_nodes()
for node in sub_g.nodes():
graph.node[node]['second_strength'] = graph.node[node]['first_strength'] / num_nodes
return graph
def community_detection(graph, null_samples, num_samples=20, small_criterion=4):
augmented_g = augmentation(graph)
communities = get_partition(augmented_g)
graph = compute_first_density(graph, communities)
graph = compute_second_density(graph, communities)
graph = compute_third_density(graph, communities, null_samples[:num_samples])
graph = small_community_feature(graph, communities, small_criterion)
graph = compute_first_strength(graph, communities)
graph = compute_second_strength(graph, communities)
return graph
# from generator import ER_generator, draw_anomalies
# from utils import generate_null_models
# graph = ER_generator(n=500, p=0.02, seed=None)
# graph = draw_anomalies(graph)
# _, null_samples = generate_null_models(graph, num_models=4, min_size=10)
# graph = community_detection(graph, null_samples, num_samples=4)
# print(graph.nodes(data=True))
# print('FINISH!')
| StarcoderdataPython |
9716390 | <reponame>pulumi/pulumi-f5bigip<filename>sdk/python/pulumi_f5bigip/big_iq_as3.py
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
__all__ = ['BigIqAs3Args', 'BigIqAs3']
@pulumi.input_type
class BigIqAs3Args:
def __init__(__self__, *,
as3_json: pulumi.Input[str],
bigiq_address: pulumi.Input[str],
bigiq_password: pulumi.Input[str],
bigiq_user: pulumi.Input[str],
bigiq_login_ref: Optional[pulumi.Input[str]] = None,
bigiq_port: Optional[pulumi.Input[str]] = None,
bigiq_token_auth: Optional[pulumi.Input[bool]] = None,
tenant_list: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a BigIqAs3 resource.
:param pulumi.Input[str] as3_json: Path/Filename of Declarative AS3 JSON which is a json file used with builtin ```file``` function
:param pulumi.Input[str] bigiq_address: Address of the BIG-IQ to which your targer BIG-IP is attached
:param pulumi.Input[str] bigiq_password: Password of the BIG-IQ to which your targer BIG-IP is attached
:param pulumi.Input[str] bigiq_user: User name of the BIG-IQ to which your targer BIG-IP is attached
:param pulumi.Input[str] bigiq_login_ref: Login reference for token authentication (see BIG-IQ REST docs for details)
:param pulumi.Input[str] bigiq_port: The registration key pool to use
:param pulumi.Input[bool] bigiq_token_auth: Enable to use an external authentication source (LDAP, TACACS, etc)
:param pulumi.Input[str] tenant_list: Name of Tenant
"""
pulumi.set(__self__, "as3_json", as3_json)
pulumi.set(__self__, "bigiq_address", bigiq_address)
pulumi.set(__self__, "bigiq_password", bigiq_password)
pulumi.set(__self__, "bigiq_user", bigiq_user)
if bigiq_login_ref is not None:
pulumi.set(__self__, "bigiq_login_ref", bigiq_login_ref)
if bigiq_port is not None:
pulumi.set(__self__, "bigiq_port", bigiq_port)
if bigiq_token_auth is not None:
pulumi.set(__self__, "bigiq_token_auth", bigiq_token_auth)
if tenant_list is not None:
pulumi.set(__self__, "tenant_list", tenant_list)
@property
@pulumi.getter(name="as3Json")
def as3_json(self) -> pulumi.Input[str]:
"""
Path/Filename of Declarative AS3 JSON which is a json file used with builtin ```file``` function
"""
return pulumi.get(self, "as3_json")
@as3_json.setter
def as3_json(self, value: pulumi.Input[str]):
pulumi.set(self, "as3_json", value)
@property
@pulumi.getter(name="bigiqAddress")
def bigiq_address(self) -> pulumi.Input[str]:
"""
Address of the BIG-IQ to which your targer BIG-IP is attached
"""
return pulumi.get(self, "bigiq_address")
@bigiq_address.setter
def bigiq_address(self, value: pulumi.Input[str]):
pulumi.set(self, "bigiq_address", value)
@property
@pulumi.getter(name="bigiqPassword")
def bigiq_password(self) -> pulumi.Input[str]:
"""
Password of the BIG-IQ to which your targer BIG-IP is attached
"""
return pulumi.get(self, "bigiq_password")
@bigiq_password.setter
def bigiq_password(self, value: pulumi.Input[str]):
pulumi.set(self, "bigiq_password", value)
@property
@pulumi.getter(name="bigiqUser")
def bigiq_user(self) -> pulumi.Input[str]:
"""
User name of the BIG-IQ to which your targer BIG-IP is attached
"""
return pulumi.get(self, "bigiq_user")
@bigiq_user.setter
def bigiq_user(self, value: pulumi.Input[str]):
pulumi.set(self, "bigiq_user", value)
@property
@pulumi.getter(name="bigiqLoginRef")
def bigiq_login_ref(self) -> Optional[pulumi.Input[str]]:
"""
Login reference for token authentication (see BIG-IQ REST docs for details)
"""
return pulumi.get(self, "bigiq_login_ref")
@bigiq_login_ref.setter
def bigiq_login_ref(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "bigiq_login_ref", value)
@property
@pulumi.getter(name="bigiqPort")
def bigiq_port(self) -> Optional[pulumi.Input[str]]:
"""
The registration key pool to use
"""
return pulumi.get(self, "bigiq_port")
@bigiq_port.setter
def bigiq_port(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "bigiq_port", value)
@property
@pulumi.getter(name="bigiqTokenAuth")
def bigiq_token_auth(self) -> Optional[pulumi.Input[bool]]:
"""
Enable to use an external authentication source (LDAP, TACACS, etc)
"""
return pulumi.get(self, "bigiq_token_auth")
@bigiq_token_auth.setter
def bigiq_token_auth(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "bigiq_token_auth", value)
@property
@pulumi.getter(name="tenantList")
def tenant_list(self) -> Optional[pulumi.Input[str]]:
"""
Name of Tenant
"""
return pulumi.get(self, "tenant_list")
@tenant_list.setter
def tenant_list(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "tenant_list", value)
@pulumi.input_type
class _BigIqAs3State:
def __init__(__self__, *,
as3_json: Optional[pulumi.Input[str]] = None,
bigiq_address: Optional[pulumi.Input[str]] = None,
bigiq_login_ref: Optional[pulumi.Input[str]] = None,
bigiq_password: Optional[pulumi.Input[str]] = None,
bigiq_port: Optional[pulumi.Input[str]] = None,
bigiq_token_auth: Optional[pulumi.Input[bool]] = None,
bigiq_user: Optional[pulumi.Input[str]] = None,
tenant_list: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering BigIqAs3 resources.
:param pulumi.Input[str] as3_json: Path/Filename of Declarative AS3 JSON which is a json file used with builtin ```file``` function
:param pulumi.Input[str] bigiq_address: Address of the BIG-IQ to which your targer BIG-IP is attached
:param pulumi.Input[str] bigiq_login_ref: Login reference for token authentication (see BIG-IQ REST docs for details)
:param pulumi.Input[str] bigiq_password: Password of the BIG-IQ to which your targer BIG-IP is attached
:param pulumi.Input[str] bigiq_port: The registration key pool to use
:param pulumi.Input[bool] bigiq_token_auth: Enable to use an external authentication source (LDAP, TACACS, etc)
:param pulumi.Input[str] bigiq_user: User name of the BIG-IQ to which your targer BIG-IP is attached
:param pulumi.Input[str] tenant_list: Name of Tenant
"""
if as3_json is not None:
pulumi.set(__self__, "as3_json", as3_json)
if bigiq_address is not None:
pulumi.set(__self__, "bigiq_address", bigiq_address)
if bigiq_login_ref is not None:
pulumi.set(__self__, "bigiq_login_ref", bigiq_login_ref)
if bigiq_password is not None:
pulumi.set(__self__, "bigiq_password", bigiq_password)
if bigiq_port is not None:
pulumi.set(__self__, "bigiq_port", bigiq_port)
if bigiq_token_auth is not None:
pulumi.set(__self__, "bigiq_token_auth", bigiq_token_auth)
if bigiq_user is not None:
pulumi.set(__self__, "bigiq_user", bigiq_user)
if tenant_list is not None:
pulumi.set(__self__, "tenant_list", tenant_list)
@property
@pulumi.getter(name="as3Json")
def as3_json(self) -> Optional[pulumi.Input[str]]:
"""
Path/Filename of Declarative AS3 JSON which is a json file used with builtin ```file``` function
"""
return pulumi.get(self, "as3_json")
@as3_json.setter
def as3_json(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "as3_json", value)
@property
@pulumi.getter(name="bigiqAddress")
def bigiq_address(self) -> Optional[pulumi.Input[str]]:
"""
Address of the BIG-IQ to which your targer BIG-IP is attached
"""
return pulumi.get(self, "bigiq_address")
@bigiq_address.setter
def bigiq_address(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "bigiq_address", value)
@property
@pulumi.getter(name="bigiqLoginRef")
def bigiq_login_ref(self) -> Optional[pulumi.Input[str]]:
"""
Login reference for token authentication (see BIG-IQ REST docs for details)
"""
return pulumi.get(self, "bigiq_login_ref")
@bigiq_login_ref.setter
def bigiq_login_ref(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "bigiq_login_ref", value)
@property
@pulumi.getter(name="bigiqPassword")
def bigiq_password(self) -> Optional[pulumi.Input[str]]:
"""
Password of the BIG-IQ to which your targer BIG-IP is attached
"""
return pulumi.get(self, "bigiq_password")
@bigiq_password.setter
def bigiq_password(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "bigiq_password", value)
@property
@pulumi.getter(name="bigiqPort")
def bigiq_port(self) -> Optional[pulumi.Input[str]]:
"""
The registration key pool to use
"""
return pulumi.get(self, "bigiq_port")
@bigiq_port.setter
def bigiq_port(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "bigiq_port", value)
@property
@pulumi.getter(name="bigiqTokenAuth")
def bigiq_token_auth(self) -> Optional[pulumi.Input[bool]]:
"""
Enable to use an external authentication source (LDAP, TACACS, etc)
"""
return pulumi.get(self, "bigiq_token_auth")
@bigiq_token_auth.setter
def bigiq_token_auth(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "bigiq_token_auth", value)
@property
@pulumi.getter(name="bigiqUser")
def bigiq_user(self) -> Optional[pulumi.Input[str]]:
"""
User name of the BIG-IQ to which your targer BIG-IP is attached
"""
return pulumi.get(self, "bigiq_user")
@bigiq_user.setter
def bigiq_user(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "bigiq_user", value)
@property
@pulumi.getter(name="tenantList")
def tenant_list(self) -> Optional[pulumi.Input[str]]:
"""
Name of Tenant
"""
return pulumi.get(self, "tenant_list")
@tenant_list.setter
def tenant_list(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "tenant_list", value)
class BigIqAs3(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
as3_json: Optional[pulumi.Input[str]] = None,
bigiq_address: Optional[pulumi.Input[str]] = None,
bigiq_login_ref: Optional[pulumi.Input[str]] = None,
bigiq_password: Optional[pulumi.Input[str]] = None,
bigiq_port: Optional[pulumi.Input[str]] = None,
bigiq_token_auth: Optional[pulumi.Input[bool]] = None,
bigiq_user: Optional[pulumi.Input[str]] = None,
tenant_list: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
`BigIqAs3` provides details about bigiq as3 resource
This resource is helpful to configure as3 declarative JSON on BIG-IP through BIG-IQ.
## Example Usage
```python
import pulumi
import pulumi_f5bigip as f5bigip
# Example Usage for json file
exampletask = f5bigip.BigIqAs3("exampletask",
as3_json=(lambda path: open(path).read())("bigiq_example.json"),
bigiq_address="xx.xx.xxx.xx",
bigiq_password="<PASSWORD>",
bigiq_user="xxxxx")
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] as3_json: Path/Filename of Declarative AS3 JSON which is a json file used with builtin ```file``` function
:param pulumi.Input[str] bigiq_address: Address of the BIG-IQ to which your targer BIG-IP is attached
:param pulumi.Input[str] bigiq_login_ref: Login reference for token authentication (see BIG-IQ REST docs for details)
:param pulumi.Input[str] bigiq_password: Password of the BIG-IQ to which your targer BIG-IP is attached
:param pulumi.Input[str] bigiq_port: The registration key pool to use
:param pulumi.Input[bool] bigiq_token_auth: Enable to use an external authentication source (LDAP, TACACS, etc)
:param pulumi.Input[str] bigiq_user: User name of the BIG-IQ to which your targer BIG-IP is attached
:param pulumi.Input[str] tenant_list: Name of Tenant
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: BigIqAs3Args,
opts: Optional[pulumi.ResourceOptions] = None):
"""
`BigIqAs3` provides details about bigiq as3 resource
This resource is helpful to configure as3 declarative JSON on BIG-IP through BIG-IQ.
## Example Usage
```python
import pulumi
import pulumi_f5bigip as f5bigip
# Example Usage for json file
exampletask = f5bigip.BigIqAs3("exampletask",
as3_json=(lambda path: open(path).read())("bigiq_example.json"),
bigiq_address="xx.xx.xxx.xx",
bigiq_password="<PASSWORD>",
bigiq_user="xxxxx")
```
:param str resource_name: The name of the resource.
:param BigIqAs3Args args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(BigIqAs3Args, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
as3_json: Optional[pulumi.Input[str]] = None,
bigiq_address: Optional[pulumi.Input[str]] = None,
bigiq_login_ref: Optional[pulumi.Input[str]] = None,
bigiq_password: Optional[pulumi.Input[str]] = None,
bigiq_port: Optional[pulumi.Input[str]] = None,
bigiq_token_auth: Optional[pulumi.Input[bool]] = None,
bigiq_user: Optional[pulumi.Input[str]] = None,
tenant_list: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = BigIqAs3Args.__new__(BigIqAs3Args)
if as3_json is None and not opts.urn:
raise TypeError("Missing required property 'as3_json'")
__props__.__dict__["as3_json"] = as3_json
if bigiq_address is None and not opts.urn:
raise TypeError("Missing required property 'bigiq_address'")
__props__.__dict__["bigiq_address"] = bigiq_address
__props__.__dict__["bigiq_login_ref"] = bigiq_login_ref
if bigiq_password is None and not opts.urn:
raise TypeError("Missing required property 'bigiq_password'")
__props__.__dict__["bigiq_password"] = <PASSWORD>
__props__.__dict__["bigiq_port"] = bigiq_port
__props__.__dict__["bigiq_token_auth"] = bigiq_token_auth
if bigiq_user is None and not opts.urn:
raise TypeError("Missing required property 'bigiq_user'")
__props__.__dict__["bigiq_user"] = bigiq_user
__props__.__dict__["tenant_list"] = tenant_list
super(BigIqAs3, __self__).__init__(
'f5bigip:index/bigIqAs3:BigIqAs3',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
as3_json: Optional[pulumi.Input[str]] = None,
bigiq_address: Optional[pulumi.Input[str]] = None,
bigiq_login_ref: Optional[pulumi.Input[str]] = None,
bigiq_password: Optional[pulumi.Input[str]] = None,
bigiq_port: Optional[pulumi.Input[str]] = None,
bigiq_token_auth: Optional[pulumi.Input[bool]] = None,
bigiq_user: Optional[pulumi.Input[str]] = None,
tenant_list: Optional[pulumi.Input[str]] = None) -> 'BigIqAs3':
"""
Get an existing BigIqAs3 resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] as3_json: Path/Filename of Declarative AS3 JSON which is a json file used with builtin ```file``` function
:param pulumi.Input[str] bigiq_address: Address of the BIG-IQ to which your targer BIG-IP is attached
:param pulumi.Input[str] bigiq_login_ref: Login reference for token authentication (see BIG-IQ REST docs for details)
:param pulumi.Input[str] bigiq_password: Password of the BIG-IQ to which your targer BIG-IP is attached
:param pulumi.Input[str] bigiq_port: The registration key pool to use
:param pulumi.Input[bool] bigiq_token_auth: Enable to use an external authentication source (LDAP, TACACS, etc)
:param pulumi.Input[str] bigiq_user: User name of the BIG-IQ to which your targer BIG-IP is attached
:param pulumi.Input[str] tenant_list: Name of Tenant
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _BigIqAs3State.__new__(_BigIqAs3State)
__props__.__dict__["as3_json"] = as3_json
__props__.__dict__["bigiq_address"] = bigiq_address
__props__.__dict__["bigiq_login_ref"] = bigiq_login_ref
__props__.__dict__["bigiq_password"] = <PASSWORD>
__props__.__dict__["bigiq_port"] = bigiq_port
__props__.__dict__["bigiq_token_auth"] = bigiq_token_auth
__props__.__dict__["bigiq_user"] = bigiq_user
__props__.__dict__["tenant_list"] = tenant_list
return BigIqAs3(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="as3Json")
def as3_json(self) -> pulumi.Output[str]:
"""
Path/Filename of Declarative AS3 JSON which is a json file used with builtin ```file``` function
"""
return pulumi.get(self, "as3_json")
@property
@pulumi.getter(name="bigiqAddress")
def bigiq_address(self) -> pulumi.Output[str]:
"""
Address of the BIG-IQ to which your targer BIG-IP is attached
"""
return pulumi.get(self, "bigiq_address")
@property
@pulumi.getter(name="bigiqLoginRef")
def bigiq_login_ref(self) -> pulumi.Output[Optional[str]]:
"""
Login reference for token authentication (see BIG-IQ REST docs for details)
"""
return pulumi.get(self, "bigiq_login_ref")
@property
@pulumi.getter(name="bigiqPassword")
def bigiq_password(self) -> pulumi.Output[str]:
"""
Password of the BIG-IQ to which your targer BIG-IP is attached
"""
return pulumi.get(self, "bigiq_password")
@property
@pulumi.getter(name="bigiqPort")
def bigiq_port(self) -> pulumi.Output[Optional[str]]:
"""
The registration key pool to use
"""
return pulumi.get(self, "bigiq_port")
@property
@pulumi.getter(name="bigiqTokenAuth")
def bigiq_token_auth(self) -> pulumi.Output[Optional[bool]]:
"""
Enable to use an external authentication source (LDAP, TACACS, etc)
"""
return pulumi.get(self, "bigiq_token_auth")
@property
@pulumi.getter(name="bigiqUser")
def bigiq_user(self) -> pulumi.Output[str]:
"""
User name of the BIG-IQ to which your targer BIG-IP is attached
"""
return pulumi.get(self, "bigiq_user")
@property
@pulumi.getter(name="tenantList")
def tenant_list(self) -> pulumi.Output[str]:
"""
Name of Tenant
"""
return pulumi.get(self, "tenant_list")
| StarcoderdataPython |
8012341 | <filename>lab01/redis-chat/client.py
import redis
# create the redis connection
r = redis.Redis()
# class representative of the system's users
class Person():
def __init__(self, username, name):
self.username = username
self.name = name
def __str__(self):
return self.username
def main():
print("Welcome to Redis Message Server")
username = input("Insert your username: ")
name = input("Insert your name (first and last): ")
person = Person(username, name)
r.hset(f"RedisMS:{person.username}", "name", person.name)
while True:
options = "\n1) Send message\n2) Check messages from subscriptions\n3) Check messages sent\n4) Follow users\n5) Unfollow users\n6) Exit\n>>> "
op = input(options)
if (op == "6"):
print("Goodbye! :)")
break
elif (op == "1"):
message = input("Message to send: ")
r.lpush(f"RedisMessages:{person.username}", message)
elif (op == "2"):
following = r.smembers(f"RedisFollowing:{person.username}")
if (following == set()):
print("You don't follow anyone (there are no messages).")
else:
for user in following:
user = str(user, 'utf-8')
print(f"-- Messages from User: {user} --")
message_list = r.lrange(f"RedisMessages:{user}", 0, -1)
for msg in message_list:
print(f" {str(msg, 'utf-8')}")
elif (op == "3"):
messages_sent = r.lrange(f"RedisMessages:{person.username}", 0, -1)
if (messages_sent == []):
print("You didn't sent any messages!")
else:
print(f"--- Messages You Sent ---")
for msg in messages_sent:
print(f" {str(msg, 'utf-8')}")
elif (op == "4"):
users = r.keys("RedisMS:*")
print("----- Users List -----")
users_list = [str(user, "utf-8")[8:] for user in users]
users_list.remove(person.username)
for user in users_list:
print(f" {user}")
user_to_follow = input("\nUser to follow (username): ")
if (user_to_follow == person.username):
print("You can't follow yourself!")
elif (user_to_follow not in users_list):
print("The specified user does not exist!")
else:
r.sadd(f"RedisFollowing:{person.username}", user_to_follow)
elif (op == "5"):
following = [str(user, 'utf-8')
for user in r.smembers(f"RedisFollowing:{person.username}")]
if (following == []):
print("You don't follow anyone.")
else:
print("----- Following ------")
for user in following:
print(f" {user}")
user_to_unfollow = input("\nUser to unfollow (username): ")
if user_to_unfollow not in following:
print("You don't follow the specified user!")
r.srem(f"RedisFollowing:{person.username}", user_to_unfollow)
else:
print("Input error! Please try again.")
if __name__ == "__main__":
main()
| StarcoderdataPython |
9661929 | from tornado.web import RequestHandler
import json
class MainHandler(RequestHandler):
"""
Render the frontend of the system
"""
async def get(self):
self.render("index.html")
| StarcoderdataPython |
6578979 | from typing import Any, Dict, Optional
from broadcaster import Event as BroadcasterEvent
class Event(BroadcasterEvent):
def __init__(
self,
channel: str,
message: Any,
context: Optional[Dict[str, Any]] = None,
):
super().__init__(channel, message)
if context is None:
context = {}
self.context = context
| StarcoderdataPython |
1797257 | """
WSGI config for pbs project.
It exposes the WSGI callable as a module-level variable named ``application``
"""
import confy
import os
from pathlib2 import Path
d = Path(__file__).resolve().parents[1]
dot_env = os.path.join(str(d), '.env')
if os.path.exists(dot_env):
confy.read_environment_file(dot_env)
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'pbs_project.settings')
from django.core.wsgi import get_wsgi_application
from dj_static import Cling, MediaCling
application = Cling(MediaCling(get_wsgi_application()))
| StarcoderdataPython |
1873330 | import socket
import threading
import argparse
def serveClient(clientToServeSocket, clientIPAddress, portNumber):
clientRequest = clientToServeSocket.recv(4096)
print('[!] Received dara from the client (%s:%d) : %s' % clientIPAddress, portNumber, clientRequest)
# Reply back to client
clientToServeSocket.send('I am a server!')
# Close socket
clientToServeSocket.close()
def startServer(portNumber):
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.bind(('0.0.0.0', portNumber))
server.listen(10)
print('[!] Listening locally on port %d ...' % portNumber)
while True:
client,address = server.accept()
print('[+] Connected with the client: %s:%d' % (address[0],address[1]))
# Handle clients
serveClientThread = threading.Thread(target=serveClient, args(client, address[0], address[1]))
serveClientThread.start()
def main():
# Parse the command line arguments
parser = argparse.ArgumentParser('TCP server')
parser.add_argument('-p', '--port', type = int, help='The port number' )
args = parser.parse_args()
# Store he argument value
portNumber = args.port
# Start the server
startServer(portNumber)
if __name__ == "__main__":
main()
| StarcoderdataPython |
11311997 | <filename>pitch-predictor/answers/components/collectStats/collect_stats_dataflow.py<gh_stars>1-10
# libraries
from __future__ import print_function
from apache_beam.options.pipeline_options import PipelineOptions
from apache_beam.options.pipeline_options import SetupOptions
import apache_beam as beam
import argparse
import datetime
import logging
import os
import uuid
# parallel functions
from ParDoFns.gameday import GameDay
from ParDoFns.collectGames import collectGames
from ParDoFns.collectPitchers import collectPitchers
from ParDoFns.collectStats import collectStats
# environment vars
# os.environ['GOOGLE_APPLICATION_CREDENTIALS']='{{ GCP_PROJECT }}-da165d24798a.json'
# create a list of days to collect
today = datetime.datetime.now().date()
start_date = datetime.datetime.strptime(
"20190801", "%Y%m%d"
).date() # opening day 2019 :: 20190328
pcol = []
while start_date < today:
day = str(start_date.day)
month = str(start_date.month)
year = str(start_date.year)
gday = GameDay(day, month, year)
pcol.append(gday)
start_date += datetime.timedelta(days=1)
def run(argv=None):
parser = argparse.ArgumentParser()
parser.add_argument(
"--runner",
dest="runner",
default="DataflowRunner",
help="Select between DirectRunner vs DataflowRunner",
)
parser.add_argument(
"--project",
dest="project",
default="{{ GCP_PROJECT }}",
help="Select the gcp project to run this job",
)
parser.add_argument(
"--region",
dest="region",
default="us-central1",
help="Select the gcp project to run this job",
)
parser.add_argument(
"--staging_location",
dest="staging_location",
default=f"gs://{{ GCP_PROJECT }}-dataflow/dataflow_stage/",
help="Select the staging location for this job",
)
parser.add_argument(
"--temp_location",
dest="temp_location",
default=f"gs://{{ GCP_PROJECT }}-dataflow/dataflow_tmp/",
help="Select the temp location for this job",
)
parser.add_argument(
"--setup_file",
dest="setup_file",
default="./setup.py",
help="Config options for the pipeline",
)
parser.add_argument(
"--year",
dest="year",
action="append",
default=["2019"],
help="Calendar years to pull data",
)
known_args, pipeline_args = parser.parse_known_args(argv)
pipeline_args.extend(
[
"--runner=" + known_args.runner,
"--project=" + known_args.project,
"--region=" + known_args.region,
"--staging_location=" + known_args.staging_location,
"--temp_location=" + known_args.temp_location,
"--job_name=mlb-collect-games-{}".format((str(uuid.uuid4()))[0:6]),
"--setup_file=" + known_args.setup_file,
]
)
pipeline_options = PipelineOptions(pipeline_args)
pipeline_options.view_as(SetupOptions).save_main_session = True
logging.info(pipeline_args)
# define schema for BigQuery sink
schema = "dateStamp:DATE,park_sv_id:STRING,play_guid:STRING,ab_total:FLOAT64,ab_count:FLOAT64,pitcher_id:STRING,batter_id:STRING,ab_id:FLOAT64,des:STRING,type:STRING,id:STRING,sz_top:FLOAT64,sz_bot:FLOAT64,pfx_xDataFile:FLOAT64,pfx_zDataFile:FLOAT64,mlbam_pitch_name:STRING,zone_location:FLOAT64,pitch_con:FLOAT64,stand:STRING,strikes:FLOAT64,balls:FLOAT64,p_throws:STRING,gid:STRING,pdes:STRING,spin:FLOAT64,norm_ht:FLOAT64,inning:FLOAT64,pitcher_team:STRING,tstart:FLOAT64,vystart:FLOAT64,ftime:FLOAT64,pfx_x:FLOAT64,pfx_z:FLOAT64,uncorrected_pfx_x:FLOAT64,uncorrected_pfx_z:FLOAT64,x0:FLOAT64,y0:FLOAT64,z0:FLOAT64,vx0:FLOAT64,vy0:FLOAT64,vz0:FLOAT64,ax:FLOAT64,ay:FLOAT64,az:FLOAT64,start_speed:FLOAT64,px:FLOAT64,pz:FLOAT64,pxold:FLOAT64,pzold:FLOAT64,tm_spin:FLOAT64,sb:FLOAT64"
# Init day for each year
pcol = []
for year in known_args.year:
day = "1"
month = "1"
year = str(year)
gday = GameDay(day, month, year)
pcol.append(gday)
# begin pipeline
with beam.Pipeline(options=pipeline_options) as p:
years = p | "Create Date Objects" >> beam.Create(pcol)
pitchers = years | "Collect Pitchers" >> beam.ParDo(collectPitchers())
games = pitchers | "Collect Games" >> beam.ParDo(collectGames())
stats = games | "Collect Stats" >> beam.ParDo(collectStats())
stats | "Write to BigQuery" >> beam.io.WriteToBigQuery(
table="raw_games",
dataset="baseball",
project="{{ GCP_PROJECT }}",
create_disposition="CREATE_IF_NEEDED",
write_disposition="WRITE_APPEND",
schema=schema,
)
if __name__ == "__main__":
logging.getLogger().setLevel(logging.INFO)
run()
| StarcoderdataPython |
9633207 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import misc.utils as utils
from torch.nn.utils.rnn import PackedSequence, pack_padded_sequence, pad_packed_sequence
class CiderPredictor(nn.Module):
def __init__(self, embed_size=16304, embed_dimension=512, lstm_dimension=512, image_feature_dimension=2048, pretrained=None):
super(CiderPredictor, self).__init__()
self.image_feature_dimension = image_feature_dimension
self.embeddings = nn.Embedding(embed_size, embed_dimension)
if pretrained is not None:
self.embeddings = nn.Embedding.from_pretrained(pretrained)
self.bilstm = nn.LSTM(lstm_dimension, lstm_dimension, batch_first=True, bidirectional=True)
self.attention_linear = nn.Linear(2 * embed_dimension, image_feature_dimension)
self.attention_layer_2 = nn.Linear(image_feature_dimension, 2 * embed_dimension)
self.final_layer = nn.Linear(2*embed_dimension, 1)
def forward(self, image_regions, captions): # N * 36 * 2048, N * 14
captions_after_embedding = self.embeddings(captions)
batch_size = captions.shape[0]
_, (hn, _) = self.bilstm(captions_after_embedding) #hn = 2*embedding_size
hn = hn.permute(1,0,2)
hn = hn.reshape((batch_size, -1))
linear_out = self.attention_linear(hn)
relu = nn.ReLU()
linear_out = relu(linear_out)
linear_out = linear_out.unsqueeze(-1) # N * 2048 * 1
attention_out = torch.bmm(image_regions, linear_out) # N * 36 * 1
attention_out = attention_out.squeeze(-1) # N * 36
softmax = nn.Softmax(dim = 1)
weights = softmax(attention_out).unsqueeze(-1) # N * 36 * 1
attention_out = torch.sum(image_regions * weights, axis = 1) # N * 2048
attention_out = self.attention_layer_2(attention_out) # N * (2*embed_dimension)
attention_out = relu(attention_out)
fusion_out = attention_out * hn # N * (2*embed_dimension)
output = self.final_layer(fusion_out) # N * 1
output = relu(output)
return output.squeeze(-1)
| StarcoderdataPython |
9684288 | import os
import numpy as np
import gzip
import pickle
from pathlib import Path
from proseco.core import StarsTable
import pytest
from proseco import get_aca_catalog
from proseco.characteristics import aca_t_ccd_penalty_limit, MonFunc, MonCoord
import agasc
from Quaternion import Quat
import Ska.Sun
from proseco.tests.test_common import DARK40, mod_std_info
from .. import ACAReviewTable, run_aca_review
# Do not use the AGASC supplement in testing by default since mags can change
os.environ[agasc.SUPPLEMENT_ENABLED_ENV] = 'False'
KWARGS_48464 = {'att': [-0.51759295, -0.30129397, 0.27093045, 0.75360213],
'date': '2019:031:13:25:30.000',
'detector': 'ACIS-S',
'dither_acq': (7.9992, 7.9992),
'dither_guide': (7.9992, 7.9992),
'man_angle': 67.859,
'n_acq': 8,
'n_fid': 0,
'n_guide': 8,
'obsid': 48464,
'sim_offset': -3520.0,
'focus_offset': 0,
't_ccd_acq': -9.943,
't_ccd_guide': -9.938}
def test_t_ccd_effective_message():
"""Test printing a message about effective guide and/or acq CCD temperature
when it is different from the predicted temperature."""
kwargs = KWARGS_48464.copy()
kwargs['t_ccd_guide'] = aca_t_ccd_penalty_limit + 0.75
kwargs['t_ccd_acq'] = aca_t_ccd_penalty_limit + 0.5
aca = get_aca_catalog(**kwargs)
acar = aca.get_review_table()
acar.run_aca_review()
# Pre-formatted text that gets put into HTML report
text = acar.get_text_pre()
eff_guide = kwargs['t_ccd_guide'] + 1 + (kwargs['t_ccd_guide'] - aca_t_ccd_penalty_limit)
eff_acq = kwargs['t_ccd_acq'] + 1 + (kwargs['t_ccd_acq'] - aca_t_ccd_penalty_limit)
assert (f'Predicted Guide CCD temperature (max): {kwargs["t_ccd_guide"]:.1f} '
f'<span class="caution">(Effective : {eff_guide:.1f})</span>') in text
assert (f'Predicted Acq CCD temperature (init) : {kwargs["t_ccd_acq"]:.1f} '
f'<span class="caution">(Effective : {eff_acq:.1f})</span>') in text
def test_review_catalog(tmpdir):
aca = get_aca_catalog(**KWARGS_48464)
acar = aca.get_review_table()
acar.run_aca_review()
assert acar.messages == [
{'text': 'Guide star imposter offset 2.6, limit 2.5 arcsec', 'category': 'warning',
'idx': 4},
{'text': 'P2: 3.33 less than 4.0 for ER', 'category': 'warning'},
{'text': 'ER count of 9th (8.9 for -9.9C) mag guide stars 1.91 < 3.0',
'category': 'critical'},
{'text': 'ER with 6 guides but 8 were requested', 'category': 'caution'}]
assert acar.roll_options is None
msgs = (acar.messages >= 'critical')
assert msgs == [
{'text': 'ER count of 9th (8.9 for -9.9C) mag guide stars 1.91 < 3.0',
'category': 'critical'}]
assert acar.review_status() == -1
# Run the review but without making the HTML and ensure review messages
# are available on roll options.
acar.run_aca_review(roll_level='critical', roll_args={'method': 'uniq_ids'})
assert len(acar.roll_options) > 1
assert acar.roll_options[0]['acar'].messages == acar.messages
assert len(acar.roll_options[1]['acar'].messages) > 0
# Check doing a full review for this obsid
acar = aca.get_review_table()
acar.run_aca_review(make_html=True, report_dir=tmpdir, report_level='critical',
roll_level='critical', roll_args={'method': 'uniq_ids'})
path = Path(str(tmpdir))
assert (path / 'index.html').exists()
obspath = path / 'obs48464'
assert (obspath / 'acq' / 'index.html').exists()
assert (obspath / 'guide' / 'index.html').exists()
assert (obspath / 'rolls' / 'index.html').exists()
def test_review_roll_options():
"""
Test that the 'acar' key in the roll_option dict is an ACAReviewTable
and that the first one has the same messages as the base (original roll)
version
:param tmpdir: temp dir supplied by pytest
:return: None
"""
# This is a catalog that has a critical message and one roll option
kwargs = {'att': (160.9272490316051, 14.851572261604668, 99.996111473617802),
'date': '2019:046:07:16:58.449',
'detector': 'ACIS-S',
'dither_acq': (7.9992, 7.9992),
'dither_guide': (7.9992, 7.9992),
'focus_offset': 0.0,
'man_angle': 1.792525648258372,
'n_acq': 8,
'n_fid': 3,
'n_guide': 5,
'obsid': 21477,
'sim_offset': 0.0,
't_ccd_acq': -11.14616454993262,
't_ccd_guide': -11.150381856818923}
aca = get_aca_catalog(**kwargs)
acar = aca.get_review_table()
acar.run_aca_review(roll_level='critical')
assert len(acar.roll_options) == 4
# First roll_option is at the same attitude (and roll) as original. The check
# code is run again independently but the outcome should be the same.
assert acar.roll_options[0]['acar'].messages == acar.messages
for opt in acar.roll_options:
assert isinstance(opt['acar'], ACAReviewTable)
def test_probs_weak_reference():
"""
Test issues related to the weak reference to self.acqs within the AcqProbs
objects in cand_acqs.
See comment in ACAReviewTable.__init__() for details.
"""
aca = get_aca_catalog(**KWARGS_48464)
aca2 = pickle.loads(pickle.dumps(aca))
assert aca2.acqs is not aca.acqs
# These fail. TODO: fix!
# aca2 = aca.__class__(aca) # default is copy=True
# aca2 = deepcopy(aca)
acar = ACAReviewTable(aca)
assert aca.guides is not acar.guides
assert aca.acqs is not acar.acqs
def test_roll_options_with_include_ids():
"""
Test case from James that was breaking code due to a roll option that puts
a force_include star outside the FOV.
"""
kwargs = {'obsid': 48397.0,
'att': [0.43437703, -0.47822201, -0.68470554, 0.33734053],
'date': '2019:053:04:05:33.004', 'detector': 'ACIS-S',
'dither_acq': (7.9992, 2.0016), 'dither_guide': (7.9992, 2.0016),
'man_angle': 131.2011858838081, 'n_acq': 8, 'n_fid': 0, 'n_guide': 8,
'sim_offset': 0.0, 'focus_offset': 0.0, 't_ccd_acq': -12.157792574498563,
't_ccd_guide': -12.17,
'include_ids_acq': np.array( # Also tests passing float ids for include
[8.13042280e+08, 8.13040960e+08, 8.13044168e+08, 8.12911064e+08,
8.12920176e+08, 8.12913936e+08, 8.13043216e+08, 8.13045352e+08]),
'include_halfws_acq': np.array(
[160., 160., 160., 160., 160., 160., 120., 60.])}
aca = get_aca_catalog(**kwargs)
acar = aca.get_review_table()
acar.run_aca_review(roll_level='all', roll_args={'method': 'uniq_ids'})
# As of the 2020-02 acq model update there is just one roll option
# assert len(acar.roll_options) > 1
def test_roll_options_with_monitor_star():
"""
Test that roll optimization succeeds in the case where a monitor star is
specified in the catalog. This tests https://github.com/sot/proseco/issues/365
which fixes https://github.com/sot/proseco/issues/364.
"""
kwargs = {'att': [0.32916333, -0.50759709, 0.07481427, 0.79271655],
'date': '2021:116:16:42:09.065', 'detector': 'ACIS-I',
'dither_acq': (7.9992, 7.9992), 'dither_guide': (7.9992, 7.9992),
'man_angle': 86.49934496445843, 'n_acq': 8, 'n_fid': 3, 'n_guide': 5,
'obsid': 23050.0, 'sim_offset': 0.0, 'focus_offset': 0.0,
't_ccd_acq': -9.067548914167258, 't_ccd_guide': -8.616156814261098,
't_ccd_penalty_limit': -6.8,
'monitors': [[335.516667, 58.675556, MonCoord.RADEC, 9.63, MonFunc.AUTO]]}
aca = get_aca_catalog(**kwargs)
acar = aca.get_review_table()
acar.run_aca_review()
assert acar.messages == []
# get_roll_options throws BadMonitorError before proseco PR#365
acar.get_roll_options()
def test_uniform_roll_options():
"""Use obsid 22508 as a test case for failing to find a roll option using
the 'uniq_ids' algorithm and falling through to a 'uniform' search.
See https://github.com/sot/sparkles/issues/138 for context.
"""
kwargs = {'att': [-0.25019352, -0.90540872, -0.21768747, 0.26504794],
'date': '2020:045:18:19:50.234',
'detector': 'ACIS-S',
'n_guide': 5,
'n_fid': 3,
'dither': 8.0,
'focus_offset': 0,
'man_angle': 1.56,
'obsid': 22508,
'sim_offset': 0,
't_ccd_acq': -9.8,
't_ccd_guide': -9.8}
aca = get_aca_catalog(**kwargs)
acar = aca.get_review_table()
acar.run_aca_review(roll_level='critical', roll_args={'max_roll_dev': 2.5,
'd_roll': 0.25})
# Fell through to uniform roll search
assert acar.roll_info['method'] == 'uniform'
# Found at least one roll option with no critical messages
assert any(len(roll_option['acar'].messages >= 'critical') == 0
for roll_option in acar.roll_options)
assert len(acar.roll_options) == 5
# Now limit the number of roll options
acar = aca.get_review_table()
acar.run_aca_review(roll_level='critical',
roll_args={'max_roll_dev': 2.5, 'max_roll_options': 3,
'd_roll': 0.25})
assert len(acar.roll_options) == 3
def test_catch_exception_from_function():
exc = run_aca_review(raise_exc=False, load_name='non-existent load name fail fail')
assert 'FileNotFoundError: no matching pickle file' in exc
with pytest.raises(FileNotFoundError):
exc = run_aca_review(load_name='non-existent load name fail fail')
def test_catch_exception_from_method():
aca = get_aca_catalog(**KWARGS_48464)
acar = aca.get_review_table()
exc = acar.run_aca_review(raise_exc=False, roll_level='BAD VALUE')
assert 'ValueError: tuple.index(x): x not in tuple' in exc
with pytest.raises(ValueError):
acar.run_aca_review(roll_level='BAD VALUE')
def test_run_aca_review_function(tmpdir):
aca = get_aca_catalog(**KWARGS_48464)
acar = aca.get_review_table()
acars = [acar]
exc = run_aca_review(load_name='test_load', report_dir=tmpdir, acars=acars)
assert exc is None
assert acar.messages == [
{'text': 'Guide star imposter offset 2.6, limit 2.5 arcsec', 'category': 'warning',
'idx': 4},
{'text': 'P2: 3.33 less than 4.0 for ER', 'category': 'warning'},
{'text': 'ER count of 9th (8.9 for -9.9C) mag guide stars 1.91 < 3.0',
'category': 'critical'},
{'text': 'ER with 6 guides but 8 were requested', 'category': 'caution'}]
path = Path(str(tmpdir))
assert (path / 'index.html').exists()
obspath = path / 'obs48464'
assert (obspath / 'starcat48464.png').exists()
assert 'TEST_LOAD sparkles review' in (path / 'index.html').read_text()
def test_roll_outside_range():
"""
Run a test on obsid 48334 from ~MAR1119 that is at a pitch that has 0 roll_dev
and is at a roll that is not exactly nominal roll for the attitude and date.
The 'att' ends up with roll outside of the internally-computed roll_min / roll_max
which caused indexing excption in the roll-options code. Fixed by PR #91 which
includes code to expand the roll_min / roll_max range to always include the roll
of the originally supplied attitude.
"""
kw = {'att': [-0.82389459, -0.1248412, 0.35722113, 0.42190692],
'date': '2019:073:21:55:30.000',
'detector': 'ACIS-S',
'dither_acq': (7.9992, 7.9992),
'dither_guide': (7.9992, 7.9992),
'focus_offset': 0.0,
'man_angle': 122.97035882921071,
'n_acq': 8,
'n_fid': 0,
'n_guide': 8,
'obsid': 48334.0,
'sim_offset': 0.0,
't_ccd_acq': -10.257559323423214,
't_ccd_guide': -10.25810835536192}
aca = get_aca_catalog(**kw)
acar = aca.get_review_table()
acar.get_roll_options()
assert Quat(kw['att']).roll <= acar.roll_info['roll_max']
assert Quat(kw['att']).roll >= acar.roll_info['roll_min']
def test_roll_options_dec89_9():
"""Test getting roll options for an OR and ER at very high declination
where the difference between the ACA and target frames is large. Here
the roll will differ by around 10 deg.
"""
dec = 89.9
date = '2019:006:12:00:00'
roll = Ska.Sun.nominal_roll(0, dec, time=date)
att = Quat([0, dec, roll])
# Expected roll options. Note same basic outputs for add_ids and drop_ids but
# difference roll values.
# NOTE ALSO: the P2 values are impacted by the bad region aroundrow=-318,
# col=-298. If handling of that bad region for acq changes then the P2
# values may change.
exp = {}
exp[48000] = [' roll P2 n_stars improvement roll_min roll_max add_ids drop_ids',
'------ ---- ------- ----------- -------- -------- --------- ---------',
'287.25 3.61 0.55 0.00 287.25 287.25 -- --',
'281.00 7.24 6.98 9.53 276.75 285.25 608567744 --',
'287.50 7.25 5.43 7.68 268.50 306.00 -- --',
'268.50 6.82 4.98 6.93 268.50 273.25 610927224 606601776',
'270.62 6.82 4.22 6.01 268.50 273.25 610927224 --']
exp[18000] = [' roll P2 n_stars improvement roll_min roll_max add_ids drop_ids',
'------ ---- ------- ----------- -------- -------- --------- ---------',
'276.94 3.61 7.54 0.00 276.94 276.94 -- --',
'277.07 7.25 8.00 1.89 258.19 295.69 -- --',
'270.57 7.16 8.00 1.84 266.19 274.94 608567744 --',
'258.19 6.82 8.00 1.68 258.19 262.69 610927224 606601776',
'259.69 6.82 8.00 1.68 258.19 262.69 610927224 --']
for obsid in (48000, 18000):
kwargs = mod_std_info(att=att, n_guide=8, n_fid=0, obsid=obsid, date=date)
# Exclude a bunch of good stars to make the initial catalog lousy
exclude_ids = [606470536, 606601760, 606732552, 606732584, 610926712, 611058024]
kwargs['exclude_ids_acq'] = exclude_ids
kwargs['exclude_ids_guide'] = exclude_ids
aca = get_aca_catalog(**kwargs)
acar = aca.get_review_table()
acar.run_aca_review(roll_level='all', roll_args={'method': 'uniq_ids'}, make_html=False)
tbl = acar.get_roll_options_table()
out = tbl.pformat(max_lines=-1, max_width=-1)
assert out == exp[obsid]
def test_calc_targ_from_aca():
"""
Confirm _calc_targ_from_aca seems to do the right thing based on obsid
This does a bit too much processing for what should be a lightweight test.
"""
# Testing an ER where we expect the targ quaternion to be the same as the ACA
# quaternion.
aca = get_aca_catalog(**KWARGS_48464)
acar = aca.get_review_table()
q_targ = acar._calc_targ_from_aca(acar.att, 0, 0)
assert q_targ is acar.att
# Here we change the review object to represent an OR (by changing is_OR) and
# confirm the targ quaternion is different from the ACA quaternion
acar._is_OR = True
q_targ = acar._calc_targ_from_aca(acar.att, 0, 0)
# Specifically the targ quaternion should be off by ODB_SI_ALIGN which is about 70
# arcsecs in yaw
assert np.isclose(acar.att.dq(q_targ).yaw * 3600, 69.59, atol=0.01, rtol=0)
def test_get_roll_intervals():
"""
Test that the behavior of get_roll_intervals is different for ORs and ERs with
regard to use of offsets. They are set to arbitrary large values in the test.
"""
# This uses the catalog at KWARGS_48464, but would really be better as a fully
# synthetic test
obs_kwargs = KWARGS_48464.copy()
# Use these values to override the get_roll_intervals ranges to get more interesting
# outputs.
obs_kwargs['target_offset'] = (20 / 60., 30 / 60) # deg
aca_er = get_aca_catalog(**obs_kwargs)
acar_er = aca_er.get_review_table()
kw_or = obs_kwargs.copy()
# Set this one to have an OR obsid (and not 0 which is special)
kw_or['obsid'] = 1
aca_or = get_aca_catalog(**kw_or)
acar_or = aca_or.get_review_table()
roll_dev = 5
er_roll_intervs, er_info = acar_er.get_roll_intervals(
acar_er.get_candidate_better_stars(),
roll_dev=roll_dev)
or_roll_intervs, or_info = acar_or.get_roll_intervals(
acar_or.get_candidate_better_stars(),
roll_dev=roll_dev)
assert acar_er.att.roll <= er_info['roll_max']
assert acar_er.att.roll >= er_info['roll_min']
# The roll ranges in ACA rolls should be different for the ER and the OR version
assert or_info != er_info
# Up to this point this is really a weak functional test. The following asserts
# are more regression tests for the attitude at obsid 48464
or_rolls = [interv['roll'] for interv in or_roll_intervs]
er_rolls = [interv['roll'] for interv in er_roll_intervs]
assert or_rolls != er_rolls
# Set a function to do some looping and isclose logic to compare
# the actual vs expected intervals.
def compare_intervs(intervs, exp_intervs):
assert len(intervs) == len(exp_intervs)
for interv, exp_interv in zip(intervs, exp_intervs):
assert interv.keys() == exp_interv.keys()
for key in interv.keys():
if key.startswith('roll'):
assert np.isclose(interv[key], exp_interv[key], atol=1e-6, rtol=0)
else:
assert interv[key] == exp_interv[key]
# For the OR we expect this
or_exp_intervs = [{'add_ids': {84943288},
'drop_ids': {84937736},
'roll': 281.53501733258395,
'roll_max': 281.57597660655892,
'roll_min': 281.53501733258395},
{'add_ids': set(),
'drop_ids': set(),
'roll': 289.07597660655892,
'roll_max': 291.53501733258395,
'roll_min': 283.82597660655892},
{'add_ids': {84941648},
'drop_ids': set(),
'roll': 289.07597660655892,
'roll_max': 290.32597660655892,
'roll_min': 287.82597660655892},
{'add_ids': {85328120, 84941648},
'drop_ids': set(),
'roll': 289.82597660655892,
'roll_max': 290.32597660655892,
'roll_min': 289.32597660655892},
{'add_ids': {85328120},
'drop_ids': set(),
'roll': 291.53501733258395,
'roll_max': 291.53501733258395,
'roll_min': 289.32597660655892}]
compare_intervs(or_roll_intervs, or_exp_intervs)
# For the ER we expect these
er_exp_intervs = [{'add_ids': set(),
'drop_ids': set(),
'roll': 290.80338289905592,
'roll_max': 291.63739755173594,
'roll_min': 285.17838289905592},
{'add_ids': {84943288},
'drop_ids': set(),
'roll': 291.63739755173594,
'roll_max': 291.63739755173594,
'roll_min': 289.67838289905592},
{'add_ids': {85328120, 84943288},
'drop_ids': set(),
'roll': 291.63739755173594,
'roll_max': 291.63739755173594,
'roll_min': 290.92838289905592}]
compare_intervs(er_roll_intervs, er_exp_intervs)
def test_review_with_mon_star():
"""Test that requesting n_guide=5 with a monitor window produces no review
messages (all OK), although this does result in aca.n_guide == 4."""
monitors = [[0, 0, MonCoord.YAGZAG, 7.5, MonFunc.MON_FIXED]]
stars = StarsTable.empty()
stars.add_fake_constellation(n_stars=8, mag=8.5)
aca = get_aca_catalog(**mod_std_info(n_fid=3, n_guide=5, obsid=5000),
monitors=monitors,
stars=stars, dark=DARK40,
raise_exc=True)
acar = ACAReviewTable(aca)
acar.run_aca_review()
assert aca.n_guide == 4
assert len(aca.mons) == 1
assert acar.messages == []
def test_review_from_pickle():
"""
Test that run_aca_review works from a gzip pickle file.
"""
# This is similar to the loading from core.get_acas_from_pickle
# but that method does not allow full path specification.
filename = Path(__file__).parent / 'data' / 'proseco_4.12.1_catalog.pkl.gz'
with gzip.open(filename, 'rb') as fh:
acas_dict = pickle.load(fh)
acars = [ACAReviewTable(aca, obsid=obsid)
for obsid, aca in acas_dict.items()]
acars[0].run_aca_review()
assert acars[0].messages == []
| StarcoderdataPython |
11241675 | <gh_stars>0
from django.test import TestCase
from django.core.management import call_command
from surveys18.models import Survey, NoSalaryHire, Month
class ModelTestCase(TestCase):
"""
models: Survey, NoSalaryHire
reference models : WorkType, Month
data: nosalaryhire.yaml, survey.yaml, month.yaml
main: NoSalaryHire associate other models, the one farmer has many employee.
"""
def setUp(self):
# load fixtures
call_command('loaddata', 'test/survey.yaml', verbosity=0)
call_command('loaddata', 'month.yaml', verbosity=0)
call_command('loaddata', 'test/nosalaryhire.yaml', verbosity=0)
def test_loaddata(self):
survey_list = Survey.objects.all()
self.assertEquals(len(survey_list), 3)
nosalaryhire_list = NoSalaryHire.objects.all()
self.assertEquals(len(nosalaryhire_list), 5)
def test_create_population(self):
survey_id = Survey.objects.get(id=3)
month = Month.objects.get(id=12)
nosalaryhire_list_before_size = len(NoSalaryHire.objects.all())
#new value
NoSalaryHire.objects.create(survey=survey_id, count=15, month=month)
nosalaryhire_list_after_size = len(NoSalaryHire.objects.all())
self.assertEquals(nosalaryhire_list_after_size, nosalaryhire_list_before_size+1)
def test_survey_delete(self):
Survey.objects.filter(id=1).delete()
nosalaryhire_list = NoSalaryHire.objects.filter(survey=1)
self.assertEquals(nosalaryhire_list.count(), 0)
def test_survey_delete_all(self):
month_list_before_size = len(Month.objects.all())
Survey.objects.all().delete()
nosalaryhire_list = NoSalaryHire.objects.all()
month_list_after_size = len(Month.objects.all())
self.assertEquals(len(nosalaryhire_list), 0)
self.assertEquals(month_list_before_size, month_list_after_size)
| StarcoderdataPython |
3471530 | <filename>wellcad/com/_page.py<gh_stars>1-10
from ._dispatch_wrapper import DispatchWrapper
class Page(DispatchWrapper):
""" The Page class manages properties for the document print out.
Example
-------
>>> import wellcad.com
>>> app = wellcad.com.Application()
>>> app.new_borehole()
<wellcad.com._borehole.Borehole object at 0x0000018B3DAF9D30>
>>> borehole = app.get_active_borehole()
>>> page = borehole.page
"""
@property
def depth_range(self):
"""int : Identify the depth range mode
The available modes are the following:
* 0 = depth range not defined (maximum depth range to be printed)
* 1 = depth range defined by the user
"""
return self._dispatch.DepthRange
@depth_range.setter
def depth_range(self, mode):
self._dispatch.DepthRange = mode
@property
def document_height(self):
"""float: The document height in mm."""
return self._dispatch.DocumentHeight
@property
def document_width(self):
"""float: The document width in mm."""
return self._dispatch.DocumentWidth
@document_width.setter
def document_width(self, value):
self._dispatch.DocumentWidth = value
@property
def nb_of_depth_range(self):
"""double: The number of defined depth ranges."""
return self._dispatch.NbOfDepthRange
@property
def paper_mode(self):
"""int: 0 for page-by-page and 1 for fanfold."""
return self._dispatch.PaperMode
@paper_mode.setter
def paper_mode(self, mode):
self._dispatch.PaperMode = mode
@property
def print_titles_on_top(self):
"""bool: Show the log titles at the top of the printout."""
return self._dispatch.PrintTitlesOnTop
@print_titles_on_top.setter
def print_titles_on_top(self, show):
self._dispatch.PrintTitlesOnTop = show
@property
def print_titles_on_bottom(self):
"""bool: Show the log titles at the bottom of the printout."""
return self._dispatch.PrintTitlesOnBottom
@print_titles_on_bottom.setter
def print_titles_on_bottom(self, show):
self._dispatch.PrintTitlesOnBottom = show
@property
def print_titles_on_bottom_on_each_page(self):
"""bool: Repeat the log titles at the bottom of each printed page."""
return self._dispatch.PrintTitlesOnBottomOnEachPage
@print_titles_on_bottom_on_each_page.setter
def print_titles_on_bottom_on_each_page(self, show):
self._dispatch.PrintTitlesOnBottomOnEachPage = show
@property
def print_titles_on_top_on_each_page(self):
"""bool: Repeat the log titles at the top of each printed page."""
return self._dispatch.PrintTitlesOnTopOnEachPage
@print_titles_on_top_on_each_page.setter
def print_titles_on_top_on_each_page(self, flag):
self._dispatch.PrintTitlesOnTopOnEachPage = flag
@property
def top_margin(self):
"""int: The top margin of the page to print in mm."""
return self._dispatch.TopMargin
@top_margin.setter
def top_margin(self, value):
self._dispatch.TopMargin = value
@property
def bottom_margin(self):
"""int: The bottom margin of the page to print in mm."""
return self._dispatch.BottomMargin
@bottom_margin.setter
def bottom_margin(self, value):
self._dispatch.BottomMargin = value
@property
def left_margin(self):
"""int: The left margin of the page to print in mm."""
return self._dispatch.LeftMargin
@left_margin.setter
def left_margin(self, value):
self._dispatch.LeftMargin = value
@property
def right_margin(self):
"""int: The right margin of the page to print in mm."""
return self._dispatch.RightMargin
@right_margin.setter
def right_margin(self, value):
self._dispatch.RightMargin = value
@property
def numbering(self):
"""int: The page numbering mode
The available modes are the following:
* 0 = none
* 1 = left
* 2 = right
* 3 = center
* 4 = alternating
"""
return self._dispatch.Numbering
@numbering.setter
def numbering(self, mode):
self._dispatch.Numbering = mode
@property
def print_header(self):
"""bool: Option to print the document header or not."""
return self._dispatch.PrintHeader
@print_header.setter
def print_header(self, flag):
self._dispatch.PrintHeader = flag
def add_depth_range(self, top, bottom):
"""Adds a new depth range to be printed in current master depth units.
Parameters
----------
top : float
Top depth of the interval that will be added to the print list.
bottom : float
Bottom depth of the interval that will be added to the print list.
"""
return self._dispatch.AddDepthRange(top, bottom)
def remove_depth_range(self, index):
"""Remove an entry from the list of depth ranges.
Parameters
----------
index : int
Zero based index of the entry to be removed from the list.
"""
return self._dispatch.RemoveDepthRange(index)
| StarcoderdataPython |
369592 | <filename>o3/operators/row_count_operator.py
# -*- coding: utf-8 -*-
"""Custom operator for counting rows in a file."""
from airflow.exceptions import AirflowException
from airflow.models import BaseOperator
from airflow.utils.decorators import apply_defaults
from ..hooks.hdfs_hook import HDFSHook
class RowCountOperator(BaseOperator):
"""Count rows in a file, found locally or in HDFS. Only supports UTF-8.
:param filepath: File path, list of paths, or callable that produces paths.
:param str fs_type: 'local' or 'hdfs'.
"""
ui_color = '#ffefeb'
@apply_defaults
def __init__(self, filepath, fs_type: str = 'local', *args, **kwargs):
super(RowCountOperator, self).__init__(*args, **kwargs)
if fs_type not in ('local', 'hdfs'):
raise AirflowException(f'Unsupported fs_type {fs_type!r}.')
if isinstance(filepath, list):
self.filepath_strs = filepath
elif isinstance(filepath, str):
self.filepath_strs = [filepath]
else:
self.filepath_strs = None
self.filepath_callable = filepath if callable(filepath) else None
self.fs_type = fs_type
@staticmethod
def _count_rows(text: str) -> str:
rows = len(text.splitlines())
return f'found {rows} rows'
def execute(self, context) -> list:
row_counts = []
if self.filepath_strs:
filepaths = self.filepath_strs
else:
filepaths = self.filepath_callable(**context)
if self.fs_type == 'local':
for filepath in filepaths:
self.log.debug(f'Reading local file {filepath}')
with open(filepath, 'r', encoding='utf-8') as file_obj:
row_counts.append(self._count_rows(file_obj.read()))
self.log.info(f'Processed local file {filepath}: '
f'{row_counts[-1]}')
else:
hdfs = HDFSHook().get_conn()
for filepath in filepaths:
self.log.debug(f'Reading HDFS file {filepath}')
with hdfs.open(filepath, 'rb') as file_obj:
row_counts.append(
self._count_rows(file_obj.read().decode('utf-8')))
self.log.info(f'Processed HDFS file {filepath}: '
f'{row_counts[-1]}')
if not row_counts:
raise AirflowException('No rows counted.')
return row_counts
| StarcoderdataPython |
1628714 | # -*- coding: utf-8 -*-
from typing import List, Tuple
import collections.abc as abc
import random
import string
import click
FAKE_OPT_NAME_LEN = 30
def get_callback_and_params(func) -> Tuple[abc.Callable, List[click.Option]]:
"""Returns callback function and its parameters list
:param func: decorated function or click Command
:return: (callback, params)
"""
if isinstance(func, click.Command):
params = func.params
func = func.callback
else:
params = getattr(func, '__click_params__', [])
func = resolve_wrappers(func)
return func, params
def get_fake_option_name(name_len: int = FAKE_OPT_NAME_LEN, prefix: str = 'fake'):
return f'--{prefix}-' + ''.join(random.choices(string.ascii_lowercase, k=name_len))
def raise_mixing_decorators_error(wrong_option: click.Option, callback: abc.Callable):
error_hint = wrong_option.opts or [wrong_option.name]
raise TypeError((
"Grouped options must not be mixed with regular parameters while adding by decorator. "
f"Check decorator position for {error_hint} option in '{callback.__name__}'."
))
def resolve_wrappers(f):
"""Get the underlying function behind any level of function wrappers."""
return resolve_wrappers(f.__wrapped__) if hasattr(f, "__wrapped__") else f
| StarcoderdataPython |
312219 | # -*- coding: utf-8 -*-
# Created by apple on 2017/1/30.
import os
import logging
from subprocess import Popen
class BaseConfig:
# server config
host = '10.0.1.90' # 服务器访问地址
bing = '0.0.0.0' # 绑定地址
port = 8000 # 绑定端口
debug = False # 是否为测试模式
url = None
# static 静态uri
static_main = '/static'
static_icon = '/icon'
static_app = '/app'
static_plist = '/plist'
static_cer = '/cer/ca.cer'
static_html = '/'
# limit, 分页
apps_limit = 8 # 每页app数
app_versions_limit = 8 # 每页app version数
# dir or file path 静态文件夹、文件
data_dir = 'data'
app_dir = '{}/app'.format(data_dir)
log_dir = 'log'
icon_dir = '{}/icon'.format(data_dir)
cer_dir = '{}/cer'.format(data_dir)
db_dir = '{}/db'.format(data_dir)
plist_dir = '{}/plist'.format(data_dir)
html_dir = 'html'
log_file = '{}/app.log'.format(log_dir)
generate_certificate_file = './generate_certificate.sh'
ca_file = '{}/ca.cer'.format(cer_dir)
host_file = '{}/host'.format(cer_dir)
server_key_file = '{}/server.key'.format(cer_dir)
server_cer_file = '{}/server.cer'.format(cer_dir)
# log config
log_name = 'AppServer'
log_level = logging.WARNING
log_file_max_byte = 1024 * 1024 * 100 # 100M
log_file_backup_count = 10
# db
db_name = 'app_server.db'
db_url = 'sqlite:///{}/{}'.format(db_dir, db_name)
class DebugConfig(BaseConfig):
"""
Debug的配置
"""
debug = True
log_level = logging.DEBUG
class ProductionConfig(BaseConfig):
"""
Product的配置
"""
pass
Config = DebugConfig
Config.url = 'https://{}:{}'.format(Config.host, Config.port)
# 创建需要的目录
paths = [Config.data_dir, Config.app_dir, Config.log_dir, Config.icon_dir, Config.db_dir, Config.plist_dir]
not_exists_dirs = [path for path in paths if not os.path.isdir(path)]
for directory in not_exists_dirs:
os.mkdir(directory)
# 创建自签证书
pre_host = None
if os.path.isfile(Config.host_file):
with open(Config.host_file) as hf:
pre_host = hf.read()
if pre_host != Config.host or not os.path.isfile(Config.ca_file):
Popen('{} {} {}'.format(Config.generate_certificate_file, Config.host, Config.cer_dir), shell=True).wait()
with open(Config.host_file, 'wb+') as f:
f.write(Config.host.encode())
| StarcoderdataPython |
26045 | <filename>main.py
import pygame
import random
import math
import numpy as np
from pygame import mixer
x = np.array(([723, 123.4000000000003], [121, 133.40000000000038], [586, 125.40000000000032]), dtype=float )
y = np.array(([99], [86], [89]), dtype=float )
# Scaled Units
x = x / np.amax ( x, axis=0 )
y = y / 100
# maximum value
class NeuralNetwork:
def __init__(self):
self.input_size = 2
self.output_size = 1
self.hidden_size = 3
# Heights
self.w1 = np.random.randn(self.input_size, self.hidden_size)
self.w2 = np.random.randn(self.hidden_size, self.output_size)
def feed_forward(self, x):
# forward propagation to the network
self.z = np.dot(x, self.w1)
self.z2 = self.sigmoid(self.z)
self.z3 = np.dot(self.z2, self.w2)
output = self.sigmoid(self.z3)
return output
def sigmoid(self, s, deriv = False):
if deriv:
return s*(1-s)
return 1/(1+np.exp(-s))
def backward(self, x, y, output):
# Backward propagation through the network
self.output_error = y - output
self.output_delta = self.output_error * self.sigmoid(output, deriv = True)
self.z2_error = self.output_delta.dot(self.w2.T)
self.z2_delta = self.z2_error * self.sigmoid(self.z2, deriv = True)
# Weight updation
self.w1 += x.T.dot(self.z2_delta)
self.w2 += self.z2.T.dot(self.output_delta)
def train(self, x, y):
output = self.feed_forward(x)
self.backward(x, y, output)
NN = NeuralNetwork()
for i in range(500000):
NN.train(x, y)
print("Predicted Output : " + str(NN.feed_forward(x)))
# Initialize the pygame
pygame.init()
# create the screen
screen = pygame.display.set_mode((800, 600))
# Background
background = pygame.image.load("bg.png")
# Background Sound
mixer.music.load("DeathMatch.ogg")
mixer.music.play(-1)
# Title and the Icon of the game
pygame.display.set_caption("Space Shooter")
icon = pygame.image.load("ufoBlue.png")
pygame.display.set_icon(icon)
# Player
playerImg = pygame.image.load("player.png")
X = 370
Y = 480
player_speed = 2
clock = pygame.time.Clock()
# Enemy
enemyImg = []
enemyX = []
enemyY = []
num_of_enemies = 3
enemy_speed = 1
for i in range(num_of_enemies):
enemyImg.append(pygame.image.load("alien.png"))
enemyX.append(random.randint(0, 736))
enemyY.append(random.randint(0, 20))
# Bullets
bullets = pygame.image.load("bullet_pl.png")
bulletX = 0
bulletY = 480
bullet_state = "ready"
bullet_x_change = 0
bullet_y_change = 8
# Score
score_value = 0
font = pygame.font.Font('freesansbold.ttf', 32)
textX = 10
textY = 10
def show_score(x, y):
score = font.render("Score : " + str(score_value), True, (255, 0, 0))
screen.blit(score, (x, y))
def player(x, y):
screen.blit(playerImg, (x, y))
def enemy(x, y, i):
screen.blit(enemyImg[i], (x, y))
def bullet(x, y):
global bullet_state
bullet_state = "fire"
screen.blit(bullets, (x+18.8, y-5))
def is_collision(enemy_x, enemy_y, bullet_x, bullet_y):
distance = math.sqrt(math.pow((enemy_x - bullet_x), 2) + math.pow((enemy_y - bullet_y), 2))
if distance < 27:
return True
else:
return False
# Game Loop
def run_game():
global playerImg
global player_speed
global bulletX
global bulletY
global score_value
global bullet_state
global X, Y
global enemyX, enemyY
left_move = False
right_move = False
blast = False
action = True
running = True
while running:
# RGB value
screen.fill((0, 0, 0))
# Background Image
screen.blit(background, (0, 0))
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
# Left and Right movement of the player
if event.type == pygame.KEYUP:
player_speed = 0
# For updating the changes made
X = X + player_speed
# Setting Boundaries in our game window
if X <= 0:
X = 0
elif X >= 736:
X = 736
player(X, Y)
# Enemy Movement
for i in range(num_of_enemies):
enemyY[i] = enemyY[i] + enemy_speed
if enemyY[i] >= 600:
enemyX[i] = random.randint(0, 736)
enemyY[i] = random.randint(0, 50)
score_value -= 1
enemy(enemyX[i], enemyY[i], i)
# Collision
collision = is_collision(enemyX[i], enemyY[i], bulletX, bulletY )
if collision:
bulletY = 480
bullet_state = "ready"
score_value += 1
print(score_value)
enemyX[i] = random.randint(0, 736)
enemyY[i] = random.randint(0, 50)
x1 = ([enemyX[0], enemyY[0]], [enemyX[1], enemyY[1]], [enemyX[2], enemyY[2]])
y1 = NN.feed_forward ( x1 )
max_value_index = 0
max_value = -1
a = 0
for i in y1 :
if i > max_value :
max_value = i
max_value_index = a
a += 1
if X < enemyX[max_value_index]:
while X < enemyX[max_value_index]:
X = X + player_speed
elif X > enemyX[max_value_index]:
while X > enemyX[max_value_index]:
X = X - player_speed
blast = True
if action :
if left_move :
player_speed = -4
if right_move :
player_speed = 4
if blast :
bulletX = X
bullet_sound = mixer.Sound ( "laser5.wav" )
bullet_sound.play ( )
bullet ( X, bulletY )
blast = False
# BulletMovement
if bulletY <= 0:
bullet_state = "ready"
bulletX = 0
bulletY = 480
if bullet_state is "fire":
bullet(bulletX, bulletY)
bulletY -= bullet_y_change
show_score(textX, textY)
pygame.display.update()
clock.tick(90)
run_game()
| StarcoderdataPython |
6681439 | import argparse
parser = argparse.ArgumentParser('Multimodal arbitrary style transfer')
parser.add_argument('input_path', type=str, help='path to a folder of input images')
parser.add_argument('style_path', type=str, help='path to a folder of style images')
parser.add_argument('weight_file', type=str, help='path to a trained weight file')
parser.add_argument('-n', '--n_styles', type=int, default=5, help='number of outputs per style')
parser.add_argument('--gpu', type=int, default=0, help='gpu nummber')
args = parser.parse_args()
import os
os.environ['CUDA_VISIBLE_DEVICES'] = str(args.gpu)
from theano import tensor as T
import neuralnet as nn
import numpy as np
from scipy import misc
import time
from net import Encoder, Decoder
from data_loader import prep_image_test, get_weights
input_size = (3, 256, 256)
vgg_param_file = 'vgg19_weights_normalized.h5'
vgg_info = [64, 64, 128, 128, 256, 256, 256, 256, 512]
num_styles = args.n_styles
style_img_folder = args.style_path
input_img_folder = args.input_path
dec_param_file = args.weight_file
def test_random():
enc = Encoder((None,) + input_size, vgg_param_file)
dec = Decoder(enc.output_shape, dec_param_file)
X = T.tensor4('input')
Y = T.tensor4('style')
weights = [T.vector('weights') for i in range(len(vgg_info))]
nn.set_training_off()
X_styled = dec(enc((X, Y), weights))
test = nn.function([X, Y] + weights, X_styled, name='test generator')
style_folder = os.listdir(style_img_folder)
input_folder = os.listdir(input_img_folder)
time_list = []
if not os.path.exists('outputs'):
os.mkdir('outputs')
for style_file in style_folder:
sty_img = prep_image_test(misc.imread(style_img_folder + '/' + style_file))
for input_file in input_folder:
try:
input_img = prep_image_test(misc.imread(input_img_folder + '/' + input_file))
except ValueError:
continue
for i in range(num_styles):
start = time.time()
output = test(input_img, sty_img, *get_weights(vgg_info))
time_list.append(time.time() - start)
output = np.transpose(output[0], (1, 2, 0))
misc.imsave(os.path.join('outputs', input_file[:-4] + '_' + style_file[:-4] + '_%d.jpg' % i), output)
print('Took %f s/image' % np.mean(time_list))
print('Testing finished!')
if __name__ == '__main__':
test_random()
| StarcoderdataPython |
3372499 | <gh_stars>10-100
"""Set up application's fonts"""
import dearpygui.dearpygui as dpg
import os
import pkg_resources
from ..items_ids import *
with dpg.font_registry() as font_registry:
dpg.add_font(pkg_resources.resource_filename('raviewer',
'/fonts/OpenSans-Bold.ttf'),
16,
id=items["fonts"]["opensans_bold"])
dpg.add_font(pkg_resources.resource_filename(
'raviewer', '/fonts/OpenSans-BoldItalic.ttf'),
16,
id=items["fonts"]["opensans_bolditalic"])
dpg.add_font(pkg_resources.resource_filename(
'raviewer', '/fonts/OpenSans-ExtraBold.ttf'),
16,
id=items["fonts"]["opensans_extrabold"])
dpg.add_font(pkg_resources.resource_filename(
'raviewer', '/fonts/OpenSans-ExtraBoldItalic.ttf'),
16,
id=items["fonts"]["opensans_extrabolditalic"])
dpg.add_font(pkg_resources.resource_filename('raviewer',
'/fonts/OpenSans-Italic.ttf'),
16,
id=items["fonts"]["opensans_italic"])
dpg.add_font(pkg_resources.resource_filename('raviewer',
'/fonts/OpenSans-Light.ttf'),
16,
id=items["fonts"]["opensans_light"])
dpg.add_font(pkg_resources.resource_filename(
'raviewer', '/fonts/OpenSans-LightItalic.ttf'),
16,
id=items["fonts"]["opensans_lightitalic"])
dpg.add_font(pkg_resources.resource_filename(
'raviewer', '/fonts/OpenSans-Regular.ttf'),
16,
id=items["fonts"]["opensans_regular"])
dpg.add_font(pkg_resources.resource_filename(
'raviewer', '/fonts/OpenSans-Semibold.ttf'),
16,
id=items["fonts"]["opensans_semibold"])
dpg.add_font(pkg_resources.resource_filename(
'raviewer', '/fonts/OpenSans-SemiboldItalic.ttf'),
16,
id=items["fonts"]["opensans_semibolditalic"])
| StarcoderdataPython |
9673948 | <gh_stars>100-1000
""" Logic to write ELF files.
"""
import io
import logging
from collections import defaultdict
from ...arch.arch_info import Endianness
from ... import ir
from .headers import ElfMachine
from .headers import SectionHeaderType, SectionHeaderFlag
from .headers import SymbolTableBinding, SymbolTableType
from .headers import ProgramHeaderType
from .file import ElfFile
from .string import StringTable, elf_hash
logger = logging.getLogger("elf")
def write_elf(obj, f, type="executable"):
""" Save object as an ELF file.
You can specify the type of ELF file with
the type argument:
- 'executable'
- 'relocatable'
"""
mapping = {
"arm": (32, Endianness.LITTLE),
"microblaze": (32, Endianness.BIG),
"x86_64": (64, Endianness.LITTLE),
"xtensa": (32, Endianness.LITTLE),
"riscv": (32, Endianness.LITTLE),
}
bits, endianity = mapping[obj.arch.name]
elf_file = ElfFile(bits=bits, endianness=endianity)
etype_mapping = {
"executable": ET_EXEC,
"relocatable": ET_REL,
"shared": ET_DYN,
}
e_type = etype_mapping[type]
writer = ElfWriter(f, elf_file)
writer.export_object(obj, e_type)
# Elf types:
ET_NONE = 0
ET_REL = 1
ET_EXEC = 2
ET_DYN = 3
ET_CORE = 4
ET_NUM = 5
ET_LOOS = 0xFE00
ET_HIOS = 0xFEFF
ET_LOPROC = 0xFF00
ET_HIPROC = 0xFFFF
# Dynamic tags:
DT_NULL = 0
DT_NEEDED = 1
DT_PLTRELSZ = 2
DT_PLTGOT = 3
DT_HASH = 4
DT_STRTAB = 5
DT_SYMTAB = 6
DT_RELA = 7
DT_RELASZ = 8
DT_RELAENT = 9
DT_STRSZ = 10
DT_SYMENT = 11
DT_INIT = 12
DT_FINI = 13
DT_SONAME = 14
DT_RPATH = 15
DT_SYMBOLIC = 16
DT_REL = 17
DT_RELSZ = 18
DT_RELENT = 19
DT_PLTREL = 20
DT_DEBUG = 21
DT_TEXTREL = 22
DT_JMPREL = 23
DT_LOPROC = 0x70000000
DT_HIPROC = 0x7FFFFFFF
machine_map = {
"arm": ElfMachine.ARM,
"microblaze": ElfMachine.MICROBLAZE,
"x86_64": ElfMachine.X86_64,
"xtensa": ElfMachine.XTENSA,
"riscv": ElfMachine.RISCV,
}
class ElfWriter:
""" ELF file creator.
"""
def __init__(self, f, elf_file):
self.f = f
self.elf_file = elf_file
self.header_types = elf_file.header_types
self.obj = None
self.elf_header = None
self.e_ident_size = 16
def export_object(self, obj, e_type):
""" Main invocation point to generate an ELF file. """
logger.debug("Saving %s bits ELF file", self.header_types.bits)
self.obj = obj
self.e_type = e_type
self.write_identification()
self.elf_header = self.elf_file.header_types.ElfHeader()
self.program_headers = []
self.section_headers = []
self.string_table = StringTable()
self.section_numbers = {}
self.symbol_id_map = {}
# Skip over elf header, will come back to this.
self.f.seek(self.header_types.ElfHeader.size, io.SEEK_CUR)
self.page_size = 0x1000
if self.obj.images and self.e_type in [ET_EXEC, ET_DYN]:
self.write_images()
self.write_sections()
self.write_symbol_table()
if self.e_type == ET_REL:
self.write_rela_table()
elif self.e_type == ET_DYN:
self.write_dynamic_section()
self.write_string_table()
self.write_section_headers()
self.f.seek(self.e_ident_size)
self.write_elf_header()
self.write_program_headers()
def write_identification(self):
""" Write ELF identification magic. """
bits = self.header_types.bits
endianness = self.header_types.endianness
bit_map = {32: 1, 64: 2}
endianity_map = {Endianness.LITTLE: 1, Endianness.BIG: 2}
e_ident = bytearray([0x7F, ord("E"), ord("L"), ord("F")] + [0] * 12)
e_ident[4] = bit_map[bits] # 1=32 bit, 2=64 bit
e_ident[5] = endianity_map[endianness] # 1=little endian, 2=big endian
e_ident[6] = 1 # elf version = 1
e_ident[7] = 0 # os abi (3 =linux), 0=system V
assert len(e_ident) == self.e_ident_size
self.f.write(e_ident)
def write_elf_header(self):
""" Write ELF header.
"""
self.elf_header.e_type = self.e_type
self.elf_header.e_machine = machine_map[self.obj.arch.name]
self.elf_header.e_version = 1
if self.e_type == ET_EXEC:
if self.obj.entry_symbol_id is None:
logger.warning(
"ELF file without an entry symbol specified. This file might crash."
)
self.elf_header.e_entry = 0
else:
self.elf_header.e_entry = self.obj.get_symbol_id_value(
self.obj.entry_symbol_id
)
self.elf_header.e_flags = 0
# Size of elf header + identification:
self.elf_header.e_ehsize = (
self.e_ident_size + self.header_types.ElfHeader.size
)
# Index into table with strings:
self.elf_header.e_shstrndx = self.section_numbers[".strtab"]
# Write header:
self.elf_header.write(self.f)
def write_program_headers(self):
""" Write program headers """
assert self.elf_header.e_phnum == len(self.program_headers)
for program_header in self.program_headers:
program_header.write(self.f)
def write_images(self):
""" Write images (segments in ELF speak) to file. """
# Program header offset in file:
self.elf_header.e_phoff = self.f.tell()
# size of 1 program header:
self.elf_header.e_phentsize = self.header_types.ProgramHeader.size
# number of program headers:
self.elf_header.e_phnum = len(self.obj.images)
if self.e_type == ET_DYN:
# Add dynamic section:
self.elf_header.e_phnum += 1
# Skip over program headers, will come back to this:
self.f.seek(
self.elf_header.e_phnum * self.elf_header.e_phentsize, io.SEEK_CUR
)
# Write sections contained in images:
for image in self.obj.images:
self.align_to(self.page_size)
file_offset = self.f.tell()
for section in image.sections:
section_offset = section.address - image.address
section_file_offset = file_offset + section_offset
self.gen_section_header(section, section_file_offset)
self.f.write(image.data)
vaddr = image.address
size = image.size
if image.name == "code":
p_flags = 5
else:
p_flags = 6
# Create program header:
program_header = self.header_types.ProgramHeader()
program_header.p_type = ProgramHeaderType.LOAD
program_header.p_flags = p_flags
program_header.p_offset = file_offset
program_header.p_vaddr = vaddr
program_header.p_paddr = vaddr
program_header.p_filesz = size
program_header.p_memsz = size
program_header.p_align = self.page_size
self.program_headers.append(program_header)
def write_sections(self):
""" Write section which is not inside an image. """
for section in self.obj.sections:
if section.name not in self.section_numbers:
self.align_to(section.alignment)
file_offset = self.f.tell()
self.f.write(section.data)
self.gen_section_header(section, file_offset)
def gen_section_header(self, section, offset):
""" Create a section header for the given section.
This header will be written to the section header table
at the end of the file.
"""
section_header = self.header_types.SectionHeader()
section_header.sh_name = self.get_string(section.name)
section_header.sh_type = SectionHeaderType.PROGBITS.value
sh_flags = SectionHeaderFlag.ALLOC
if section.name == "data":
# Hmm, we should have an attribute on the section to
# determine the type of section...
sh_flags |= SectionHeaderFlag.WRITE
else:
sh_flags |= SectionHeaderFlag.EXECINSTR
section_header.sh_flags = sh_flags
section_header.sh_addr = section.address
section_header.sh_offset = offset # Offset in file
section_header.sh_size = section.size
section_header.sh_addralign = section.alignment
self.section_headers.append(section_header)
self.section_numbers[section.name] = len(self.section_headers)
def write_symbol_table(self):
""" Create symbol table. """
alignment = 8 if self.elf_file.bits == 64 else 4
self.align_to(alignment)
symtab_offset = self.f.tell()
symtab_entsize = self.header_types.SymbolTableEntry.size
symtab_size = symtab_entsize * (len(self.obj.symbols) + 1)
# Split symbols in local and global symbols:
local_symbols = []
global_symbols = []
for symbol in self.obj.symbols:
if symbol.binding == ir.Binding.GLOBAL:
global_symbols.append(symbol)
else:
local_symbols.append(symbol)
# Null symtab element (index 0):
self.f.write(bytes(symtab_entsize))
symbol_table_types = {
"func": SymbolTableType.FUNC,
"object": SymbolTableType.OBJECT,
}
for nr, symbol in enumerate(local_symbols + global_symbols, 1):
self.symbol_id_map[symbol.id] = nr
if symbol.binding == ir.Binding.GLOBAL:
st_bind = SymbolTableBinding.GLOBAL
else:
st_bind = SymbolTableBinding.LOCAL
st_type = symbol_table_types.get(
symbol.typ, SymbolTableType.NOTYPE
)
entry = self.header_types.SymbolTableEntry()
entry.st_name = self.get_string(symbol.name)
entry.st_info = (int(st_bind) << 4) | int(st_type)
if symbol.defined:
entry.st_shndx = self.section_numbers[symbol.section]
entry.st_value = (
symbol.value + self.obj.get_section(symbol.section).address
)
else:
entry.st_shndx = 0
entry.st_value = 0
entry.st_size = symbol.size
entry.write(self.f)
symbol_table_index_first_global = len(local_symbols) + 1
section_header = self.header_types.SectionHeader()
section_header.sh_name = self.get_string(".symtab")
section_header.sh_type = SectionHeaderType.SYMTAB.value
section_header.sh_flags = SectionHeaderFlag.ALLOC
section_header.sh_offset = symtab_offset
section_header.sh_size = symtab_size
section_header.sh_link = 0 # filled later
section_header.sh_info = symbol_table_index_first_global
section_header.sh_addralign = alignment
section_header.sh_entsize = symtab_entsize
self.section_headers.append(section_header)
self.section_numbers[".symtab"] = len(self.section_headers)
def write_rela_table(self):
""" Create relocation (rela) table.
Since a rela table is related to a single
other section, we might require several rela
tables, one per section.
"""
alignment = 8 if self.elf_file.bits == 64 else 4
sh_entsize = self.header_types.RelocationTableEntry.size
# Create a table per section:
reloc_groups = defaultdict(list)
for rel in self.obj.relocations:
reloc_groups[rel.section].append(rel)
for section_name in sorted(reloc_groups):
sh_size = sh_entsize * len(reloc_groups[section_name])
self.align_to(alignment)
rela_offset = self.f.tell()
for rel in reloc_groups[section_name]:
assert rel.section == section_name
r_sym = self.symbol_id_map[rel.symbol_id]
r_type = self.get_reloc_type(rel)
if self.elf_file.bits == 64:
r_info = (r_sym << 32) + r_type
else:
r_info = (r_sym << 8) + r_type
rela_entry = self.header_types.RelocationTableEntry()
rela_entry.r_offset = rel.offset
rela_entry.r_info = r_info
rela_entry.r_addend = rel.addend
rela_entry.write(self.f)
rela_name = ".rela" + section_name
section_header = self.header_types.SectionHeader()
section_header.sh_name = self.get_string(rela_name)
section_header.sh_type = SectionHeaderType.RELA.value
section_header.sh_flags = SectionHeaderFlag.INFO_LINK
section_header.sh_offset = rela_offset
section_header.sh_size = sh_size
section_header.sh_link = 0 # symtab, to be filled later
section_header.sh_info = self.section_numbers[section_name]
section_header.sh_addralign = alignment
section_header.sh_entsize = sh_entsize
self.section_headers.append(section_header)
def get_reloc_type(self, rel):
symbol = self.obj.symbols_by_id[rel.symbol_id]
r_type = self.obj.arch.get_reloc_type(rel.reloc_type, symbol)
return r_type
def write_string_table(self):
""" Write string table (last section) """
alignment = 1
self.align_to(alignment)
strtab_offset = self.f.tell()
sh_name = self.get_string(".strtab")
strtab_size = len(self.string_table.strtab)
self.f.write(self.string_table.strtab)
assert strtab_size == len(self.string_table.strtab)
section_header = self.header_types.SectionHeader()
section_header.sh_name = sh_name
section_header.sh_type = SectionHeaderType.STRTAB.value
section_header.sh_flags = SectionHeaderFlag.ALLOC
section_header.sh_offset = strtab_offset
section_header.sh_size = strtab_size
section_header.sh_addralign = alignment
self.section_headers.append(section_header)
self.section_numbers[".strtab"] = len(self.section_headers)
def write_section_headers(self):
""" Write section header table into file. """
self.align_to(8)
# section header offset:
self.elf_header.e_shoff = self.f.tell()
# size of a single section header:
self.elf_header.e_shentsize = self.header_types.SectionHeader.size
# Number of section headers:
self.elf_header.e_shnum = len(self.section_headers) + 1
# Null section all zeros (index 0):
self.f.write(bytes(self.elf_header.e_shentsize))
for section_header in self.section_headers:
# Patch in some forward links:
if section_header.sh_type == SectionHeaderType.SYMTAB.value:
section_header.sh_link = self.section_numbers[".strtab"]
elif section_header.sh_type == SectionHeaderType.DYNAMIC.value:
section_header.sh_link = self.section_numbers[".strtab"]
elif section_header.sh_type == SectionHeaderType.RELA.value:
section_header.sh_link = self.section_numbers[".symtab"]
section_header.write(self.f)
def write_dynamic_section(self):
""" Create dynamic instruction table.
The dynamic table includes instruction for
the runtime to execute.
"""
# Create dynamic contraption:
Entry = self.header_types.DynamicEntry
instructions = []
def emit(tag, val):
entry = Entry()
entry.d_tag = tag
entry.d_val = val
instructions.append(entry)
# DT_NEEDED libc.so.6
emit(DT_NEEDED, self.get_string("libc.so.6"))
emit(DT_NULL, 0)
# TODO: figure out how to generate this info properly
# DT_HASH
# DT_STRTAB
# DT_SYMTAB
# DT_STRSZ
# DT_SYMENT
# Write dynamic table to file:
alignment = 8 if self.elf_file.bits == 64 else 4
self.align_to(alignment)
dynamic_file_offset = self.f.tell()
for ins in instructions:
ins.write(self.f)
dynamic_size = len(instructions) * Entry.size
# Create program header:
p_flags = 6
# TODO: where to place the dynamic section in memory?
vaddr = 0x60000000
program_header = self.header_types.ProgramHeader()
program_header.p_type = ProgramHeaderType.DYNAMIC
program_header.p_flags = p_flags
program_header.p_offset = dynamic_file_offset
program_header.p_vaddr = vaddr
program_header.p_paddr = vaddr
program_header.p_filesz = dynamic_size
program_header.p_memsz = dynamic_size
program_header.p_align = alignment
self.program_headers.append(program_header)
# Create section header:
section_header = self.header_types.SectionHeader()
section_header.sh_name = self.get_string(".dynamic")
section_header.sh_type = SectionHeaderType.DYNAMIC
section_header.sh_addr = vaddr
section_header.sh_flags = SectionHeaderFlag.ALLOC
section_header.sh_offset = dynamic_file_offset
section_header.sh_size = dynamic_size
section_header.sh_link = 0 # filled later
section_header.sh_info = 0
section_header.sh_addralign = alignment
section_header.sh_entsize = Entry.size
self.section_headers.append(section_header)
self.section_numbers[".dynamic"] = len(self.section_headers)
def create_hash_table(self):
""" Create hash table for fast symbol lookup.
This is used by the dynamic loader when looking
up many symbols.
"""
# Same amount as symbol table
nchains = len(self.obj.symbols) + 1
nbuckets = 8
buckets = [0] * nbuckets
chain = [0] * nchains
for symbol in self.obj.symbols:
symbol_index = self.symbol_id_map[symbol.id]
hash_value = elf_hash(symbol.name)
bucket_index = hash_value % nbuckets
if buckets[bucket_index] == 0:
# empty bucket
buckets[bucket_index] = symbol_index
else:
# follow chain until empty slot.
chain_index = buckets[bucket_index]
while chain[chain_index] != 0:
chain_index = chain[chain_index]
chain[chain_index] = symbol_index
def align_to(self, alignment):
padding = (alignment - (self.f.tell() % alignment)) % alignment
self.f.write(bytes(padding))
assert self.f.tell() % alignment == 0
def get_string(self, txt: str) -> int:
""" Enter text in the string table and return the offset. """
return self.string_table.get_name(txt)
| StarcoderdataPython |
6531595 | from django.urls import path
from .views import MyObtainTokenPairView, RegisterView, UserView
from rest_framework_simplejwt.views import TokenRefreshView
urlpatterns = [
path('login/', MyObtainTokenPairView.as_view(), name='token_obtain_pair'),
# path('login/refresh/', TokenRefreshView.as_view(), name='token_refresh'),
path('register/', RegisterView.as_view(), name='auth_register'),
path('me/', UserView.as_view(), name='user_info')
] | StarcoderdataPython |
4952708 | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = [
'ClusterEndpointConfigArgs',
'ClusterEndpointsArgs',
'ClusterImagePolicyConfigArgs',
'ClusterImagePolicyConfigKeyDetailArgs',
'ClusterMetadataArgs',
'ClusterOptionsArgs',
'ClusterOptionsAddOnsArgs',
'ClusterOptionsAdmissionControllerOptionsArgs',
'ClusterOptionsKubernetesNetworkConfigArgs',
'NodePoolInitialNodeLabelArgs',
'NodePoolNodeArgs',
'NodePoolNodeConfigDetailsArgs',
'NodePoolNodeConfigDetailsPlacementConfigArgs',
'NodePoolNodeErrorArgs',
'NodePoolNodeShapeConfigArgs',
'NodePoolNodeSourceArgs',
'NodePoolNodeSourceDetailsArgs',
'GetClustersFilterArgs',
'GetNodePoolsFilterArgs',
'GetWorkRequestErrorsFilterArgs',
'GetWorkRequestLogEntriesFilterArgs',
'GetWorkRequestsFilterArgs',
]
@pulumi.input_type
class ClusterEndpointConfigArgs:
def __init__(__self__, *,
subnet_id: pulumi.Input[str],
is_public_ip_enabled: Optional[pulumi.Input[bool]] = None,
nsg_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):
"""
:param pulumi.Input[str] subnet_id: The OCID of the regional subnet in which to place the Cluster endpoint.
:param pulumi.Input[bool] is_public_ip_enabled: Whether the cluster should be assigned a public IP address. Defaults to false. If set to true on a private subnet, the cluster provisioning will fail.
:param pulumi.Input[Sequence[pulumi.Input[str]]] nsg_ids: A list of the OCIDs of the network security groups (NSGs) to apply to the cluster endpoint. For more information about NSGs, see [NetworkSecurityGroup](https://docs.cloud.oracle.com/iaas/api/#/en/iaas/20160918/NetworkSecurityGroup/).
"""
pulumi.set(__self__, "subnet_id", subnet_id)
if is_public_ip_enabled is not None:
pulumi.set(__self__, "is_public_ip_enabled", is_public_ip_enabled)
if nsg_ids is not None:
pulumi.set(__self__, "nsg_ids", nsg_ids)
@property
@pulumi.getter(name="subnetId")
def subnet_id(self) -> pulumi.Input[str]:
"""
The OCID of the regional subnet in which to place the Cluster endpoint.
"""
return pulumi.get(self, "subnet_id")
@subnet_id.setter
def subnet_id(self, value: pulumi.Input[str]):
pulumi.set(self, "subnet_id", value)
@property
@pulumi.getter(name="isPublicIpEnabled")
def is_public_ip_enabled(self) -> Optional[pulumi.Input[bool]]:
"""
Whether the cluster should be assigned a public IP address. Defaults to false. If set to true on a private subnet, the cluster provisioning will fail.
"""
return pulumi.get(self, "is_public_ip_enabled")
@is_public_ip_enabled.setter
def is_public_ip_enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_public_ip_enabled", value)
@property
@pulumi.getter(name="nsgIds")
def nsg_ids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
A list of the OCIDs of the network security groups (NSGs) to apply to the cluster endpoint. For more information about NSGs, see [NetworkSecurityGroup](https://docs.cloud.oracle.com/iaas/api/#/en/iaas/20160918/NetworkSecurityGroup/).
"""
return pulumi.get(self, "nsg_ids")
@nsg_ids.setter
def nsg_ids(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "nsg_ids", value)
@pulumi.input_type
class ClusterEndpointsArgs:
def __init__(__self__, *,
kubernetes: Optional[pulumi.Input[str]] = None,
private_endpoint: Optional[pulumi.Input[str]] = None,
public_endpoint: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] kubernetes: The non-native networking Kubernetes API server endpoint.
:param pulumi.Input[str] private_endpoint: The private native networking Kubernetes API server endpoint.
:param pulumi.Input[str] public_endpoint: The public native networking Kubernetes API server endpoint, if one was requested.
"""
if kubernetes is not None:
pulumi.set(__self__, "kubernetes", kubernetes)
if private_endpoint is not None:
pulumi.set(__self__, "private_endpoint", private_endpoint)
if public_endpoint is not None:
pulumi.set(__self__, "public_endpoint", public_endpoint)
@property
@pulumi.getter
def kubernetes(self) -> Optional[pulumi.Input[str]]:
"""
The non-native networking Kubernetes API server endpoint.
"""
return pulumi.get(self, "kubernetes")
@kubernetes.setter
def kubernetes(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "kubernetes", value)
@property
@pulumi.getter(name="privateEndpoint")
def private_endpoint(self) -> Optional[pulumi.Input[str]]:
"""
The private native networking Kubernetes API server endpoint.
"""
return pulumi.get(self, "private_endpoint")
@private_endpoint.setter
def private_endpoint(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "private_endpoint", value)
@property
@pulumi.getter(name="publicEndpoint")
def public_endpoint(self) -> Optional[pulumi.Input[str]]:
"""
The public native networking Kubernetes API server endpoint, if one was requested.
"""
return pulumi.get(self, "public_endpoint")
@public_endpoint.setter
def public_endpoint(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "public_endpoint", value)
@pulumi.input_type
class ClusterImagePolicyConfigArgs:
def __init__(__self__, *,
is_policy_enabled: Optional[pulumi.Input[bool]] = None,
key_details: Optional[pulumi.Input[Sequence[pulumi.Input['ClusterImagePolicyConfigKeyDetailArgs']]]] = None):
"""
:param pulumi.Input[bool] is_policy_enabled: (Updatable) Whether the image verification policy is enabled. Defaults to false. If set to true, the images will be verified against the policy at runtime.
:param pulumi.Input[Sequence[pulumi.Input['ClusterImagePolicyConfigKeyDetailArgs']]] key_details: (Updatable) A list of KMS key details.
"""
if is_policy_enabled is not None:
pulumi.set(__self__, "is_policy_enabled", is_policy_enabled)
if key_details is not None:
pulumi.set(__self__, "key_details", key_details)
@property
@pulumi.getter(name="isPolicyEnabled")
def is_policy_enabled(self) -> Optional[pulumi.Input[bool]]:
"""
(Updatable) Whether the image verification policy is enabled. Defaults to false. If set to true, the images will be verified against the policy at runtime.
"""
return pulumi.get(self, "is_policy_enabled")
@is_policy_enabled.setter
def is_policy_enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_policy_enabled", value)
@property
@pulumi.getter(name="keyDetails")
def key_details(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ClusterImagePolicyConfigKeyDetailArgs']]]]:
"""
(Updatable) A list of KMS key details.
"""
return pulumi.get(self, "key_details")
@key_details.setter
def key_details(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ClusterImagePolicyConfigKeyDetailArgs']]]]):
pulumi.set(self, "key_details", value)
@pulumi.input_type
class ClusterImagePolicyConfigKeyDetailArgs:
def __init__(__self__, *,
kms_key_id: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] kms_key_id: The OCID of the KMS key to be used as the master encryption key for Kubernetes secret encryption. When used, `kubernetesVersion` must be at least `v1.13.0`.
"""
if kms_key_id is not None:
pulumi.set(__self__, "kms_key_id", kms_key_id)
@property
@pulumi.getter(name="kmsKeyId")
def kms_key_id(self) -> Optional[pulumi.Input[str]]:
"""
The OCID of the KMS key to be used as the master encryption key for Kubernetes secret encryption. When used, `kubernetesVersion` must be at least `v1.13.0`.
"""
return pulumi.get(self, "kms_key_id")
@kms_key_id.setter
def kms_key_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "kms_key_id", value)
@pulumi.input_type
class ClusterMetadataArgs:
def __init__(__self__, *,
created_by_user_id: Optional[pulumi.Input[str]] = None,
created_by_work_request_id: Optional[pulumi.Input[str]] = None,
deleted_by_user_id: Optional[pulumi.Input[str]] = None,
deleted_by_work_request_id: Optional[pulumi.Input[str]] = None,
time_created: Optional[pulumi.Input[str]] = None,
time_deleted: Optional[pulumi.Input[str]] = None,
time_updated: Optional[pulumi.Input[str]] = None,
updated_by_user_id: Optional[pulumi.Input[str]] = None,
updated_by_work_request_id: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] created_by_user_id: The user who created the cluster.
:param pulumi.Input[str] created_by_work_request_id: The OCID of the work request which created the cluster.
:param pulumi.Input[str] deleted_by_user_id: The user who deleted the cluster.
:param pulumi.Input[str] deleted_by_work_request_id: The OCID of the work request which deleted the cluster.
:param pulumi.Input[str] time_created: The time the cluster was created.
:param pulumi.Input[str] time_deleted: The time the cluster was deleted.
:param pulumi.Input[str] time_updated: The time the cluster was updated.
:param pulumi.Input[str] updated_by_user_id: The user who updated the cluster.
:param pulumi.Input[str] updated_by_work_request_id: The OCID of the work request which updated the cluster.
"""
if created_by_user_id is not None:
pulumi.set(__self__, "created_by_user_id", created_by_user_id)
if created_by_work_request_id is not None:
pulumi.set(__self__, "created_by_work_request_id", created_by_work_request_id)
if deleted_by_user_id is not None:
pulumi.set(__self__, "deleted_by_user_id", deleted_by_user_id)
if deleted_by_work_request_id is not None:
pulumi.set(__self__, "deleted_by_work_request_id", deleted_by_work_request_id)
if time_created is not None:
pulumi.set(__self__, "time_created", time_created)
if time_deleted is not None:
pulumi.set(__self__, "time_deleted", time_deleted)
if time_updated is not None:
pulumi.set(__self__, "time_updated", time_updated)
if updated_by_user_id is not None:
pulumi.set(__self__, "updated_by_user_id", updated_by_user_id)
if updated_by_work_request_id is not None:
pulumi.set(__self__, "updated_by_work_request_id", updated_by_work_request_id)
@property
@pulumi.getter(name="createdByUserId")
def created_by_user_id(self) -> Optional[pulumi.Input[str]]:
"""
The user who created the cluster.
"""
return pulumi.get(self, "created_by_user_id")
@created_by_user_id.setter
def created_by_user_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "created_by_user_id", value)
@property
@pulumi.getter(name="createdByWorkRequestId")
def created_by_work_request_id(self) -> Optional[pulumi.Input[str]]:
"""
The OCID of the work request which created the cluster.
"""
return pulumi.get(self, "created_by_work_request_id")
@created_by_work_request_id.setter
def created_by_work_request_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "created_by_work_request_id", value)
@property
@pulumi.getter(name="deletedByUserId")
def deleted_by_user_id(self) -> Optional[pulumi.Input[str]]:
"""
The user who deleted the cluster.
"""
return pulumi.get(self, "deleted_by_user_id")
@deleted_by_user_id.setter
def deleted_by_user_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "deleted_by_user_id", value)
@property
@pulumi.getter(name="deletedByWorkRequestId")
def deleted_by_work_request_id(self) -> Optional[pulumi.Input[str]]:
"""
The OCID of the work request which deleted the cluster.
"""
return pulumi.get(self, "deleted_by_work_request_id")
@deleted_by_work_request_id.setter
def deleted_by_work_request_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "deleted_by_work_request_id", value)
@property
@pulumi.getter(name="timeCreated")
def time_created(self) -> Optional[pulumi.Input[str]]:
"""
The time the cluster was created.
"""
return pulumi.get(self, "time_created")
@time_created.setter
def time_created(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "time_created", value)
@property
@pulumi.getter(name="timeDeleted")
def time_deleted(self) -> Optional[pulumi.Input[str]]:
"""
The time the cluster was deleted.
"""
return pulumi.get(self, "time_deleted")
@time_deleted.setter
def time_deleted(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "time_deleted", value)
@property
@pulumi.getter(name="timeUpdated")
def time_updated(self) -> Optional[pulumi.Input[str]]:
"""
The time the cluster was updated.
"""
return pulumi.get(self, "time_updated")
@time_updated.setter
def time_updated(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "time_updated", value)
@property
@pulumi.getter(name="updatedByUserId")
def updated_by_user_id(self) -> Optional[pulumi.Input[str]]:
"""
The user who updated the cluster.
"""
return pulumi.get(self, "updated_by_user_id")
@updated_by_user_id.setter
def updated_by_user_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "updated_by_user_id", value)
@property
@pulumi.getter(name="updatedByWorkRequestId")
def updated_by_work_request_id(self) -> Optional[pulumi.Input[str]]:
"""
The OCID of the work request which updated the cluster.
"""
return pulumi.get(self, "updated_by_work_request_id")
@updated_by_work_request_id.setter
def updated_by_work_request_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "updated_by_work_request_id", value)
@pulumi.input_type
class ClusterOptionsArgs:
def __init__(__self__, *,
add_ons: Optional[pulumi.Input['ClusterOptionsAddOnsArgs']] = None,
admission_controller_options: Optional[pulumi.Input['ClusterOptionsAdmissionControllerOptionsArgs']] = None,
kubernetes_network_config: Optional[pulumi.Input['ClusterOptionsKubernetesNetworkConfigArgs']] = None,
service_lb_subnet_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):
"""
:param pulumi.Input['ClusterOptionsAddOnsArgs'] add_ons: Configurable cluster add-ons
:param pulumi.Input['ClusterOptionsAdmissionControllerOptionsArgs'] admission_controller_options: (Updatable) Configurable cluster admission controllers
:param pulumi.Input['ClusterOptionsKubernetesNetworkConfigArgs'] kubernetes_network_config: Network configuration for Kubernetes.
:param pulumi.Input[Sequence[pulumi.Input[str]]] service_lb_subnet_ids: The OCIDs of the subnets used for Kubernetes services load balancers.
"""
if add_ons is not None:
pulumi.set(__self__, "add_ons", add_ons)
if admission_controller_options is not None:
pulumi.set(__self__, "admission_controller_options", admission_controller_options)
if kubernetes_network_config is not None:
pulumi.set(__self__, "kubernetes_network_config", kubernetes_network_config)
if service_lb_subnet_ids is not None:
pulumi.set(__self__, "service_lb_subnet_ids", service_lb_subnet_ids)
@property
@pulumi.getter(name="addOns")
def add_ons(self) -> Optional[pulumi.Input['ClusterOptionsAddOnsArgs']]:
"""
Configurable cluster add-ons
"""
return pulumi.get(self, "add_ons")
@add_ons.setter
def add_ons(self, value: Optional[pulumi.Input['ClusterOptionsAddOnsArgs']]):
pulumi.set(self, "add_ons", value)
@property
@pulumi.getter(name="admissionControllerOptions")
def admission_controller_options(self) -> Optional[pulumi.Input['ClusterOptionsAdmissionControllerOptionsArgs']]:
"""
(Updatable) Configurable cluster admission controllers
"""
return pulumi.get(self, "admission_controller_options")
@admission_controller_options.setter
def admission_controller_options(self, value: Optional[pulumi.Input['ClusterOptionsAdmissionControllerOptionsArgs']]):
pulumi.set(self, "admission_controller_options", value)
@property
@pulumi.getter(name="kubernetesNetworkConfig")
def kubernetes_network_config(self) -> Optional[pulumi.Input['ClusterOptionsKubernetesNetworkConfigArgs']]:
"""
Network configuration for Kubernetes.
"""
return pulumi.get(self, "kubernetes_network_config")
@kubernetes_network_config.setter
def kubernetes_network_config(self, value: Optional[pulumi.Input['ClusterOptionsKubernetesNetworkConfigArgs']]):
pulumi.set(self, "kubernetes_network_config", value)
@property
@pulumi.getter(name="serviceLbSubnetIds")
def service_lb_subnet_ids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
The OCIDs of the subnets used for Kubernetes services load balancers.
"""
return pulumi.get(self, "service_lb_subnet_ids")
@service_lb_subnet_ids.setter
def service_lb_subnet_ids(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "service_lb_subnet_ids", value)
@pulumi.input_type
class ClusterOptionsAddOnsArgs:
def __init__(__self__, *,
is_kubernetes_dashboard_enabled: Optional[pulumi.Input[bool]] = None,
is_tiller_enabled: Optional[pulumi.Input[bool]] = None):
"""
:param pulumi.Input[bool] is_kubernetes_dashboard_enabled: Whether or not to enable the Kubernetes Dashboard add-on.
:param pulumi.Input[bool] is_tiller_enabled: Whether or not to enable the Tiller add-on.
"""
if is_kubernetes_dashboard_enabled is not None:
pulumi.set(__self__, "is_kubernetes_dashboard_enabled", is_kubernetes_dashboard_enabled)
if is_tiller_enabled is not None:
pulumi.set(__self__, "is_tiller_enabled", is_tiller_enabled)
@property
@pulumi.getter(name="isKubernetesDashboardEnabled")
def is_kubernetes_dashboard_enabled(self) -> Optional[pulumi.Input[bool]]:
"""
Whether or not to enable the Kubernetes Dashboard add-on.
"""
return pulumi.get(self, "is_kubernetes_dashboard_enabled")
@is_kubernetes_dashboard_enabled.setter
def is_kubernetes_dashboard_enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_kubernetes_dashboard_enabled", value)
@property
@pulumi.getter(name="isTillerEnabled")
def is_tiller_enabled(self) -> Optional[pulumi.Input[bool]]:
"""
Whether or not to enable the Tiller add-on.
"""
return pulumi.get(self, "is_tiller_enabled")
@is_tiller_enabled.setter
def is_tiller_enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_tiller_enabled", value)
@pulumi.input_type
class ClusterOptionsAdmissionControllerOptionsArgs:
def __init__(__self__, *,
is_pod_security_policy_enabled: Optional[pulumi.Input[bool]] = None):
"""
:param pulumi.Input[bool] is_pod_security_policy_enabled: (Updatable) Whether or not to enable the Pod Security Policy admission controller.
"""
if is_pod_security_policy_enabled is not None:
pulumi.set(__self__, "is_pod_security_policy_enabled", is_pod_security_policy_enabled)
@property
@pulumi.getter(name="isPodSecurityPolicyEnabled")
def is_pod_security_policy_enabled(self) -> Optional[pulumi.Input[bool]]:
"""
(Updatable) Whether or not to enable the Pod Security Policy admission controller.
"""
return pulumi.get(self, "is_pod_security_policy_enabled")
@is_pod_security_policy_enabled.setter
def is_pod_security_policy_enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_pod_security_policy_enabled", value)
@pulumi.input_type
class ClusterOptionsKubernetesNetworkConfigArgs:
def __init__(__self__, *,
pods_cidr: Optional[pulumi.Input[str]] = None,
services_cidr: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] pods_cidr: The CIDR block for Kubernetes pods.
:param pulumi.Input[str] services_cidr: The CIDR block for Kubernetes services.
"""
if pods_cidr is not None:
pulumi.set(__self__, "pods_cidr", pods_cidr)
if services_cidr is not None:
pulumi.set(__self__, "services_cidr", services_cidr)
@property
@pulumi.getter(name="podsCidr")
def pods_cidr(self) -> Optional[pulumi.Input[str]]:
"""
The CIDR block for Kubernetes pods.
"""
return pulumi.get(self, "pods_cidr")
@pods_cidr.setter
def pods_cidr(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "pods_cidr", value)
@property
@pulumi.getter(name="servicesCidr")
def services_cidr(self) -> Optional[pulumi.Input[str]]:
"""
The CIDR block for Kubernetes services.
"""
return pulumi.get(self, "services_cidr")
@services_cidr.setter
def services_cidr(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "services_cidr", value)
@pulumi.input_type
class NodePoolInitialNodeLabelArgs:
def __init__(__self__, *,
key: Optional[pulumi.Input[str]] = None,
value: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] key: (Updatable) The key of the pair.
:param pulumi.Input[str] value: (Updatable) The value of the pair.
"""
if key is not None:
pulumi.set(__self__, "key", key)
if value is not None:
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def key(self) -> Optional[pulumi.Input[str]]:
"""
(Updatable) The key of the pair.
"""
return pulumi.get(self, "key")
@key.setter
def key(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "key", value)
@property
@pulumi.getter
def value(self) -> Optional[pulumi.Input[str]]:
"""
(Updatable) The value of the pair.
"""
return pulumi.get(self, "value")
@value.setter
def value(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "value", value)
@pulumi.input_type
class NodePoolNodeArgs:
def __init__(__self__, *,
availability_domain: Optional[pulumi.Input[str]] = None,
error: Optional[pulumi.Input['NodePoolNodeErrorArgs']] = None,
fault_domain: Optional[pulumi.Input[str]] = None,
id: Optional[pulumi.Input[str]] = None,
kubernetes_version: Optional[pulumi.Input[str]] = None,
lifecycle_details: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
node_pool_id: Optional[pulumi.Input[str]] = None,
private_ip: Optional[pulumi.Input[str]] = None,
public_ip: Optional[pulumi.Input[str]] = None,
state: Optional[pulumi.Input[str]] = None,
subnet_id: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] availability_domain: (Updatable) The availability domain in which to place nodes. Example: `Uocm:PHX-AD-1`
:param pulumi.Input['NodePoolNodeErrorArgs'] error: An error that may be associated with the node.
:param pulumi.Input[str] fault_domain: The fault domain of this node.
:param pulumi.Input[str] id: The OCID of the compute instance backing this node.
:param pulumi.Input[str] kubernetes_version: (Updatable) The version of Kubernetes to install on the nodes in the node pool.
:param pulumi.Input[str] lifecycle_details: Details about the state of the node.
:param pulumi.Input[str] name: (Updatable) The name of the node pool. Avoid entering confidential information.
:param pulumi.Input[str] node_pool_id: The OCID of the node pool to which this node belongs.
:param pulumi.Input[str] private_ip: The private IP address of this node.
:param pulumi.Input[str] public_ip: The public IP address of this node.
:param pulumi.Input[str] state: The state of the node.
:param pulumi.Input[str] subnet_id: (Updatable) The OCID of the subnet in which to place nodes.
"""
if availability_domain is not None:
pulumi.set(__self__, "availability_domain", availability_domain)
if error is not None:
pulumi.set(__self__, "error", error)
if fault_domain is not None:
pulumi.set(__self__, "fault_domain", fault_domain)
if id is not None:
pulumi.set(__self__, "id", id)
if kubernetes_version is not None:
pulumi.set(__self__, "kubernetes_version", kubernetes_version)
if lifecycle_details is not None:
pulumi.set(__self__, "lifecycle_details", lifecycle_details)
if name is not None:
pulumi.set(__self__, "name", name)
if node_pool_id is not None:
pulumi.set(__self__, "node_pool_id", node_pool_id)
if private_ip is not None:
pulumi.set(__self__, "private_ip", private_ip)
if public_ip is not None:
pulumi.set(__self__, "public_ip", public_ip)
if state is not None:
pulumi.set(__self__, "state", state)
if subnet_id is not None:
pulumi.set(__self__, "subnet_id", subnet_id)
@property
@pulumi.getter(name="availabilityDomain")
def availability_domain(self) -> Optional[pulumi.Input[str]]:
"""
(Updatable) The availability domain in which to place nodes. Example: `Uocm:PHX-AD-1`
"""
return pulumi.get(self, "availability_domain")
@availability_domain.setter
def availability_domain(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "availability_domain", value)
@property
@pulumi.getter
def error(self) -> Optional[pulumi.Input['NodePoolNodeErrorArgs']]:
"""
An error that may be associated with the node.
"""
return pulumi.get(self, "error")
@error.setter
def error(self, value: Optional[pulumi.Input['NodePoolNodeErrorArgs']]):
pulumi.set(self, "error", value)
@property
@pulumi.getter(name="faultDomain")
def fault_domain(self) -> Optional[pulumi.Input[str]]:
"""
The fault domain of this node.
"""
return pulumi.get(self, "fault_domain")
@fault_domain.setter
def fault_domain(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "fault_domain", value)
@property
@pulumi.getter
def id(self) -> Optional[pulumi.Input[str]]:
"""
The OCID of the compute instance backing this node.
"""
return pulumi.get(self, "id")
@id.setter
def id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "id", value)
@property
@pulumi.getter(name="kubernetesVersion")
def kubernetes_version(self) -> Optional[pulumi.Input[str]]:
"""
(Updatable) The version of Kubernetes to install on the nodes in the node pool.
"""
return pulumi.get(self, "kubernetes_version")
@kubernetes_version.setter
def kubernetes_version(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "kubernetes_version", value)
@property
@pulumi.getter(name="lifecycleDetails")
def lifecycle_details(self) -> Optional[pulumi.Input[str]]:
"""
Details about the state of the node.
"""
return pulumi.get(self, "lifecycle_details")
@lifecycle_details.setter
def lifecycle_details(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "lifecycle_details", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
(Updatable) The name of the node pool. Avoid entering confidential information.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="nodePoolId")
def node_pool_id(self) -> Optional[pulumi.Input[str]]:
"""
The OCID of the node pool to which this node belongs.
"""
return pulumi.get(self, "node_pool_id")
@node_pool_id.setter
def node_pool_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "node_pool_id", value)
@property
@pulumi.getter(name="privateIp")
def private_ip(self) -> Optional[pulumi.Input[str]]:
"""
The private IP address of this node.
"""
return pulumi.get(self, "private_ip")
@private_ip.setter
def private_ip(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "private_ip", value)
@property
@pulumi.getter(name="publicIp")
def public_ip(self) -> Optional[pulumi.Input[str]]:
"""
The public IP address of this node.
"""
return pulumi.get(self, "public_ip")
@public_ip.setter
def public_ip(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "public_ip", value)
@property
@pulumi.getter
def state(self) -> Optional[pulumi.Input[str]]:
"""
The state of the node.
"""
return pulumi.get(self, "state")
@state.setter
def state(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "state", value)
@property
@pulumi.getter(name="subnetId")
def subnet_id(self) -> Optional[pulumi.Input[str]]:
"""
(Updatable) The OCID of the subnet in which to place nodes.
"""
return pulumi.get(self, "subnet_id")
@subnet_id.setter
def subnet_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "subnet_id", value)
@pulumi.input_type
class NodePoolNodeConfigDetailsArgs:
def __init__(__self__, *,
placement_configs: pulumi.Input[Sequence[pulumi.Input['NodePoolNodeConfigDetailsPlacementConfigArgs']]],
size: pulumi.Input[int],
nsg_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):
"""
:param pulumi.Input[Sequence[pulumi.Input['NodePoolNodeConfigDetailsPlacementConfigArgs']]] placement_configs: (Updatable) The placement configurations for the node pool. Provide one placement configuration for each availability domain in which you intend to launch a node.
:param pulumi.Input[int] size: (Updatable) The number of nodes that should be in the node pool.
:param pulumi.Input[Sequence[pulumi.Input[str]]] nsg_ids: (Updatable) The OCIDs of the Network Security Group(s) to associate nodes for this node pool with. For more information about NSGs, see [NetworkSecurityGroup](https://docs.cloud.oracle.com/iaas/api/#/en/iaas/20160918/NetworkSecurityGroup/).
"""
pulumi.set(__self__, "placement_configs", placement_configs)
pulumi.set(__self__, "size", size)
if nsg_ids is not None:
pulumi.set(__self__, "nsg_ids", nsg_ids)
@property
@pulumi.getter(name="placementConfigs")
def placement_configs(self) -> pulumi.Input[Sequence[pulumi.Input['NodePoolNodeConfigDetailsPlacementConfigArgs']]]:
"""
(Updatable) The placement configurations for the node pool. Provide one placement configuration for each availability domain in which you intend to launch a node.
"""
return pulumi.get(self, "placement_configs")
@placement_configs.setter
def placement_configs(self, value: pulumi.Input[Sequence[pulumi.Input['NodePoolNodeConfigDetailsPlacementConfigArgs']]]):
pulumi.set(self, "placement_configs", value)
@property
@pulumi.getter
def size(self) -> pulumi.Input[int]:
"""
(Updatable) The number of nodes that should be in the node pool.
"""
return pulumi.get(self, "size")
@size.setter
def size(self, value: pulumi.Input[int]):
pulumi.set(self, "size", value)
@property
@pulumi.getter(name="nsgIds")
def nsg_ids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
(Updatable) The OCIDs of the Network Security Group(s) to associate nodes for this node pool with. For more information about NSGs, see [NetworkSecurityGroup](https://docs.cloud.oracle.com/iaas/api/#/en/iaas/20160918/NetworkSecurityGroup/).
"""
return pulumi.get(self, "nsg_ids")
@nsg_ids.setter
def nsg_ids(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "nsg_ids", value)
@pulumi.input_type
class NodePoolNodeConfigDetailsPlacementConfigArgs:
def __init__(__self__, *,
availability_domain: pulumi.Input[str],
subnet_id: pulumi.Input[str]):
"""
:param pulumi.Input[str] availability_domain: (Updatable) The availability domain in which to place nodes. Example: `Uocm:PHX-AD-1`
:param pulumi.Input[str] subnet_id: (Updatable) The OCID of the subnet in which to place nodes.
"""
pulumi.set(__self__, "availability_domain", availability_domain)
pulumi.set(__self__, "subnet_id", subnet_id)
@property
@pulumi.getter(name="availabilityDomain")
def availability_domain(self) -> pulumi.Input[str]:
"""
(Updatable) The availability domain in which to place nodes. Example: `Uocm:PHX-AD-1`
"""
return pulumi.get(self, "availability_domain")
@availability_domain.setter
def availability_domain(self, value: pulumi.Input[str]):
pulumi.set(self, "availability_domain", value)
@property
@pulumi.getter(name="subnetId")
def subnet_id(self) -> pulumi.Input[str]:
"""
(Updatable) The OCID of the subnet in which to place nodes.
"""
return pulumi.get(self, "subnet_id")
@subnet_id.setter
def subnet_id(self, value: pulumi.Input[str]):
pulumi.set(self, "subnet_id", value)
@pulumi.input_type
class NodePoolNodeErrorArgs:
def __init__(__self__, *,
code: Optional[pulumi.Input[str]] = None,
message: Optional[pulumi.Input[str]] = None,
status: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] code: A short error code that defines the upstream error, meant for programmatic parsing. See [API Errors](https://docs.cloud.oracle.com/iaas/Content/API/References/apierrors.htm).
:param pulumi.Input[str] message: A human-readable error string of the upstream error.
:param pulumi.Input[str] status: The status of the HTTP response encountered in the upstream error.
"""
if code is not None:
pulumi.set(__self__, "code", code)
if message is not None:
pulumi.set(__self__, "message", message)
if status is not None:
pulumi.set(__self__, "status", status)
@property
@pulumi.getter
def code(self) -> Optional[pulumi.Input[str]]:
"""
A short error code that defines the upstream error, meant for programmatic parsing. See [API Errors](https://docs.cloud.oracle.com/iaas/Content/API/References/apierrors.htm).
"""
return pulumi.get(self, "code")
@code.setter
def code(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "code", value)
@property
@pulumi.getter
def message(self) -> Optional[pulumi.Input[str]]:
"""
A human-readable error string of the upstream error.
"""
return pulumi.get(self, "message")
@message.setter
def message(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "message", value)
@property
@pulumi.getter
def status(self) -> Optional[pulumi.Input[str]]:
"""
The status of the HTTP response encountered in the upstream error.
"""
return pulumi.get(self, "status")
@status.setter
def status(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "status", value)
@pulumi.input_type
class NodePoolNodeShapeConfigArgs:
def __init__(__self__, *,
memory_in_gbs: Optional[pulumi.Input[float]] = None,
ocpus: Optional[pulumi.Input[float]] = None):
"""
:param pulumi.Input[float] memory_in_gbs: (Updatable) The total amount of memory available to each node, in gigabytes.
:param pulumi.Input[float] ocpus: (Updatable) The total number of OCPUs available to each node in the node pool. See [here](https://docs.cloud.oracle.com/en-us/iaas/api/#/en/iaas/20160918/Shape/) for details.
"""
if memory_in_gbs is not None:
pulumi.set(__self__, "memory_in_gbs", memory_in_gbs)
if ocpus is not None:
pulumi.set(__self__, "ocpus", ocpus)
@property
@pulumi.getter(name="memoryInGbs")
def memory_in_gbs(self) -> Optional[pulumi.Input[float]]:
"""
(Updatable) The total amount of memory available to each node, in gigabytes.
"""
return pulumi.get(self, "memory_in_gbs")
@memory_in_gbs.setter
def memory_in_gbs(self, value: Optional[pulumi.Input[float]]):
pulumi.set(self, "memory_in_gbs", value)
@property
@pulumi.getter
def ocpus(self) -> Optional[pulumi.Input[float]]:
"""
(Updatable) The total number of OCPUs available to each node in the node pool. See [here](https://docs.cloud.oracle.com/en-us/iaas/api/#/en/iaas/20160918/Shape/) for details.
"""
return pulumi.get(self, "ocpus")
@ocpus.setter
def ocpus(self, value: Optional[pulumi.Input[float]]):
pulumi.set(self, "ocpus", value)
@pulumi.input_type
class NodePoolNodeSourceArgs:
def __init__(__self__, *,
image_id: Optional[pulumi.Input[str]] = None,
source_name: Optional[pulumi.Input[str]] = None,
source_type: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] image_id: (Updatable) The OCID of the image used to boot the node.
:param pulumi.Input[str] source_name: The user-friendly name of the entity corresponding to the OCID.
:param pulumi.Input[str] source_type: (Updatable) The source type for the node. Use `IMAGE` when specifying an OCID of an image.
"""
if image_id is not None:
pulumi.set(__self__, "image_id", image_id)
if source_name is not None:
pulumi.set(__self__, "source_name", source_name)
if source_type is not None:
pulumi.set(__self__, "source_type", source_type)
@property
@pulumi.getter(name="imageId")
def image_id(self) -> Optional[pulumi.Input[str]]:
"""
(Updatable) The OCID of the image used to boot the node.
"""
return pulumi.get(self, "image_id")
@image_id.setter
def image_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "image_id", value)
@property
@pulumi.getter(name="sourceName")
def source_name(self) -> Optional[pulumi.Input[str]]:
"""
The user-friendly name of the entity corresponding to the OCID.
"""
return pulumi.get(self, "source_name")
@source_name.setter
def source_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "source_name", value)
@property
@pulumi.getter(name="sourceType")
def source_type(self) -> Optional[pulumi.Input[str]]:
"""
(Updatable) The source type for the node. Use `IMAGE` when specifying an OCID of an image.
"""
return pulumi.get(self, "source_type")
@source_type.setter
def source_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "source_type", value)
@pulumi.input_type
class NodePoolNodeSourceDetailsArgs:
def __init__(__self__, *,
image_id: pulumi.Input[str],
source_type: pulumi.Input[str],
boot_volume_size_in_gbs: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] image_id: (Updatable) The OCID of the image used to boot the node.
:param pulumi.Input[str] source_type: (Updatable) The source type for the node. Use `IMAGE` when specifying an OCID of an image.
:param pulumi.Input[str] boot_volume_size_in_gbs: (Updatable) The size of the boot volume in GBs. Minimum value is 50 GB. See [here](https://docs.cloud.oracle.com/en-us/iaas/Content/Block/Concepts/bootvolumes.htm) for max custom boot volume sizing and OS-specific requirements.
"""
pulumi.set(__self__, "image_id", image_id)
pulumi.set(__self__, "source_type", source_type)
if boot_volume_size_in_gbs is not None:
pulumi.set(__self__, "boot_volume_size_in_gbs", boot_volume_size_in_gbs)
@property
@pulumi.getter(name="imageId")
def image_id(self) -> pulumi.Input[str]:
"""
(Updatable) The OCID of the image used to boot the node.
"""
return pulumi.get(self, "image_id")
@image_id.setter
def image_id(self, value: pulumi.Input[str]):
pulumi.set(self, "image_id", value)
@property
@pulumi.getter(name="sourceType")
def source_type(self) -> pulumi.Input[str]:
"""
(Updatable) The source type for the node. Use `IMAGE` when specifying an OCID of an image.
"""
return pulumi.get(self, "source_type")
@source_type.setter
def source_type(self, value: pulumi.Input[str]):
pulumi.set(self, "source_type", value)
@property
@pulumi.getter(name="bootVolumeSizeInGbs")
def boot_volume_size_in_gbs(self) -> Optional[pulumi.Input[str]]:
"""
(Updatable) The size of the boot volume in GBs. Minimum value is 50 GB. See [here](https://docs.cloud.oracle.com/en-us/iaas/Content/Block/Concepts/bootvolumes.htm) for max custom boot volume sizing and OS-specific requirements.
"""
return pulumi.get(self, "boot_volume_size_in_gbs")
@boot_volume_size_in_gbs.setter
def boot_volume_size_in_gbs(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "boot_volume_size_in_gbs", value)
@pulumi.input_type
class GetClustersFilterArgs:
def __init__(__self__, *,
name: str,
values: Sequence[str],
regex: Optional[bool] = None):
"""
:param str name: The name to filter on.
"""
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "values", values)
if regex is not None:
pulumi.set(__self__, "regex", regex)
@property
@pulumi.getter
def name(self) -> str:
"""
The name to filter on.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: str):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def values(self) -> Sequence[str]:
return pulumi.get(self, "values")
@values.setter
def values(self, value: Sequence[str]):
pulumi.set(self, "values", value)
@property
@pulumi.getter
def regex(self) -> Optional[bool]:
return pulumi.get(self, "regex")
@regex.setter
def regex(self, value: Optional[bool]):
pulumi.set(self, "regex", value)
@pulumi.input_type
class GetNodePoolsFilterArgs:
def __init__(__self__, *,
name: str,
values: Sequence[str],
regex: Optional[bool] = None):
"""
:param str name: The name to filter on.
"""
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "values", values)
if regex is not None:
pulumi.set(__self__, "regex", regex)
@property
@pulumi.getter
def name(self) -> str:
"""
The name to filter on.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: str):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def values(self) -> Sequence[str]:
return pulumi.get(self, "values")
@values.setter
def values(self, value: Sequence[str]):
pulumi.set(self, "values", value)
@property
@pulumi.getter
def regex(self) -> Optional[bool]:
return pulumi.get(self, "regex")
@regex.setter
def regex(self, value: Optional[bool]):
pulumi.set(self, "regex", value)
@pulumi.input_type
class GetWorkRequestErrorsFilterArgs:
def __init__(__self__, *,
name: str,
values: Sequence[str],
regex: Optional[bool] = None):
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "values", values)
if regex is not None:
pulumi.set(__self__, "regex", regex)
@property
@pulumi.getter
def name(self) -> str:
return pulumi.get(self, "name")
@name.setter
def name(self, value: str):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def values(self) -> Sequence[str]:
return pulumi.get(self, "values")
@values.setter
def values(self, value: Sequence[str]):
pulumi.set(self, "values", value)
@property
@pulumi.getter
def regex(self) -> Optional[bool]:
return pulumi.get(self, "regex")
@regex.setter
def regex(self, value: Optional[bool]):
pulumi.set(self, "regex", value)
@pulumi.input_type
class GetWorkRequestLogEntriesFilterArgs:
def __init__(__self__, *,
name: str,
values: Sequence[str],
regex: Optional[bool] = None):
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "values", values)
if regex is not None:
pulumi.set(__self__, "regex", regex)
@property
@pulumi.getter
def name(self) -> str:
return pulumi.get(self, "name")
@name.setter
def name(self, value: str):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def values(self) -> Sequence[str]:
return pulumi.get(self, "values")
@values.setter
def values(self, value: Sequence[str]):
pulumi.set(self, "values", value)
@property
@pulumi.getter
def regex(self) -> Optional[bool]:
return pulumi.get(self, "regex")
@regex.setter
def regex(self, value: Optional[bool]):
pulumi.set(self, "regex", value)
@pulumi.input_type
class GetWorkRequestsFilterArgs:
def __init__(__self__, *,
name: str,
values: Sequence[str],
regex: Optional[bool] = None):
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "values", values)
if regex is not None:
pulumi.set(__self__, "regex", regex)
@property
@pulumi.getter
def name(self) -> str:
return pulumi.get(self, "name")
@name.setter
def name(self, value: str):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def values(self) -> Sequence[str]:
return pulumi.get(self, "values")
@values.setter
def values(self, value: Sequence[str]):
pulumi.set(self, "values", value)
@property
@pulumi.getter
def regex(self) -> Optional[bool]:
return pulumi.get(self, "regex")
@regex.setter
def regex(self, value: Optional[bool]):
pulumi.set(self, "regex", value)
| StarcoderdataPython |
6625008 | <filename>TkiWrapper/logger.py
from TkiWrapper.Settings import Settings
from Namespace.Namespace import Namespace
from datetime import datetime
class LogIssuer:
def setIssuerData(self):
self.__logIssuerData__ = Namespace(scope = 'tki',
name = self.__class__.__name__, id = hex(id(self))[2:].upper())
return self
def printLog(level, issuer, *message):
if not Settings.enableLogs: return
time = datetime.now()
time = time.strftime('%I:%M:%S')
levels = ['Debug', 'Info', 'Note', 'Warn', 'Error']
levelNo = levels.index(level)
if levelNo < levels.index(Settings.logLevel): return
lvlPrefix = '+'*levelNo + ' '*(4-levelNo)
try: issuer = issuer.__logIssuerData__
except:
print('LOG ISSUER NOT SPECIFIED')
raise
print(f'@{time} [{lvlPrefix}] <{issuer.id} {issuer.scope}:{issuer.name}>', *message)
def Debug(issuer, *message):
printLog('Debug', issuer, *message)
def Info(issuer, *message):
printLog('Info', issuer, *message)
def Note(issuer, *message):
printLog('Note', issuer, *message)
def Warn(issuer, *message):
printLog('Warn', issuer, *message)
def Error(issuer, *message):
printLog('Error', issuer, *message)
| StarcoderdataPython |
9754856 | <gh_stars>1-10
"""""
Old BRL UTIL code. Temporary Trash codes.
"""""
import sys
sys.path.insert(0,'/usr/local/lib/python2.7/site-packages')
import matplotlib.pyplot as plt
#from mpl_toolkits.mplot3d import Axes3D
import numpy as np
from scipy.stats import norm
import pdb
from matplotlib import cm
from operator import itemgetter
color2num = dict(
gray=30,
red=31,
green=32,
yellow=33,
blue=34,
magenta=35,
cyan=36,
white=37,
crimson=38
)
def colorize(string, color, bold=False, highlight = False):
attr = []
num = color2num[color]
if highlight: num += 10
attr.append(unicode(num))
if bold: attr.append('1')
return '\x1b[%sm%s\x1b[0m' % (';'.join(attr), string)
class feat_vec(object):
def __init__(self, snum):
self.snum = snum
def __call__(self, state):
phi = np.zeros(self.snum,dtype = np.float32)
phi[state] = 1.0
return phi
def mean_max_graph(mu,test_var):
d = np.arange(0.1,1.0,0.1)
var = np.array([0.1,1.0,10.0,20.0,30.0,40.0,50.0,80.0,100.0])
if not(test_var in var):
raise ValueError('the input variance value does not exist')
for (i,v) in enumerate(var):
if test_var == v:
idx = i
r = len(var)
c = len(d)
d,var = np.meshgrid(d,var)
mu_bar = d*(1+d)/(1+d**2)*mu
var_bar = d**2/(1+d**2)*var
v = np.sqrt(var*d**2 + var_bar)
mean = np.zeros(d.shape)
for i in range(r):
for j in range(c):
mean[i,j] = mu_bar[i,j] + var_bar[i,j]*norm.pdf(mu_bar[i,j],d[i,j]*mu, v[i,j])/norm.cdf(mu_bar[i,j],d[i,j]*mu,v[i,j])
fig = plt.figure(1)
ax = fig.gca(projection='3d')
surf = ax.plot_surface(d, var, mu*np.ones(d.shape), color='r', linewidth=0, antialiased=False)
ax.plot_wireframe(d, var, mean)#, rstride=10, cstride=10)
plt.figure(2)
plt.plot(d[idx,:],mean[idx,:])
plt.plot(d[idx,:],mu*np.ones((d[idx,:]).shape),'r')
plt.title("For Mean="+str(mu)+" and Variance="+str(test_var))
plt.xlabel("discount factor")
plt.ylabel("Mean Value")
plt.show()
def mean_max(mu,var,d):
mu_bar = d*(1+d)/(1+d**2)*mu
var_bar = d**2/(1+d**2)*var
"""
Uncomment the following lines and compare Z and sum(p). They must be same.
x = np.arange(-100,100,1)
p = norm.pdf(x,mu_bar,np.sqrt(var_bar))*norm.cdf(x,d*mu,d*np.sqrt(var))
Z = norm.cdf(mu_bar,d*mu,v)
"""
v = np.sqrt(var*d**2 + var_bar)
mean = mu_bar + var_bar*norm.pdf(mu_bar,d*mu, v)/norm.cdf(mu_bar,d*mu,v)
return mean
def maxGaussian(means, sds):
"""
INPUT:
means: a numpy array of Gaussian mean values of (next state, action) pairs for all available actions.
sds: a numpy array of Gaussian SD values of (next state,action) pairs for all available actions.
obs:
mean and variance of the distribution
"""
num_interval = 500
interval = 12.0*max(sds)/float(num_interval)
x = np.arange(min(means-6.0*sds),max(means+6.0*sds),interval)
eps = 1e-5*np.ones(x.shape) # 501X1
max_p = np.zeros(x.shape) # 501X1
cdfs = [np.maximum(eps,norm.cdf(x,means[i], sds[i])) for i in range(len(means))]
for i in range(len(means)):
max_p += norm.pdf(x,means[i],sds[i])/cdfs[i]
max_p*=np.prod(np.array(cdfs),0)
z = np.sum(max_p)*interval
max_p = max_p/z # Normalization
#plt.figure(1)
#plt.plot(x,max_p,"bo")
#plt.show()
max_mean = np.inner(x,max_p)*interval
return max_mean,np.inner(x**2,max_p)*interval- max_mean**2
def posterior_numeric_old(n_means, n_vars, c_mean, c_var, rew, dis, terminal, num_interval= 800, width = 16.0):
if terminal:
new_var = 1.0/(1.0/c_var + 1.0/REW_VAR)
new_sd = np.sqrt(new_var)
new_mean = new_var*(c_mean/c_var + rew/REW_VAR)
interval = width*new_sd/float(num_interval)
x = np.arange(new_mean-0.5*width*new_sd,new_mean+0.5*width*new_sd, interval)
return new_mean, new_var, (x, norm.pdf(x, new_mean, new_sd))
"""
Designed for ADF approach
INPUT:
n_means: a numpy array of Gaussian mean values of (next state, action) pairs for all available actions.
n_sds: a numpy array of Gaussian SD values of (next state,action) pairs for all available actions.
c_mean: a mean value of the current state and action
c_sd: a SD value of the current state and action
obs:
mean and variance of the joint distribution
"""
delta = 0.05
target_means = rew + dis*np.array(n_means, dtype=np.float32)
target_vars = dis*dis*np.array(n_vars, dtype = np.float32)
bar_vars = 1.0/(1.0/c_var + 1.0/target_vars)
bar_means = bar_vars*(c_mean/c_var + target_means/target_vars)
c_tmp = -(c_mean-target_means)**2/2.0/(c_var + target_vars)
c_min = np.amin(c_tmp, axis=0)
c_max = np.amax(c_tmp, axis=0)
B = float(c_max-c_min < 50.0)
weights = np.exp(c_tmp - c_min*B - c_max*(1-B))/np.sqrt(c_var+target_vars)
weights = weights/np.sum(weights, axis =0)
mean_range = np.concatenate((target_means,[c_mean]))
sd_range = np.sqrt(np.concatenate((dis*dis*n_vars, [c_var])))
interval = width*max(sd_range)/float(num_interval)
x = np.arange(min(mean_range-0.5*width*sd_range), max(mean_range+0.5*width*sd_range), interval)
count = 0
done = False
while(not(done)):
count += 1
eps = 1e-5*np.ones(x.shape)
prob = np.zeros(x.shape)
cdfs = []
for i in range(len(n_means)):
cdfs.append(np.maximum(eps,norm.cdf(x, target_means[i], np.sqrt(target_vars[i]))))
prob += weights[i] * norm.pdf(x,bar_means[i],np.sqrt(bar_vars[i])) / cdfs[i]
prob*=np.prod(np.array(cdfs),axis=0)
z = np.sum(prob)*interval
prob = prob/z # Normalization
if count > 50:
delta += 0.01
plt.plot(x,prob,'bx'); plt.show();
pdb.set_trace()
if (prob[0] > 0.1):
x = np.arange(x[0]-delta, x[-1]-delta, interval)
elif (prob[-1]>0.1):
x = np.arange(x[0]+delta, x[-1]+delta, interval)
else:
done = True
max_mean = np.inner(x,prob)*interval
max_var = np.inner(x**2,prob)*interval- max_mean**2
return max_mean ,max_var, (x,prob)
## Moved to here on April 27th.
def posterior_numeric(n_means, n_vars, c_mean, c_var, rew, dis, terminal, num_interval=500, width = 6.0):
# ADFQ-Numeric
# Not for Batch
if terminal:
new_var = 1.0/(1.0/c_var + 1.0/REW_VAR)
new_sd = np.sqrt(new_var)
new_mean = new_var*(c_mean/c_var + rew/REW_VAR)
interval = width*new_sd/float(num_interval)
x = np.arange(new_mean-0.5*width*new_sd,new_mean+0.5*width*new_sd, interval)
return new_mean, new_var, (x, norm.pdf(x, new_mean, new_sd))
pdb.set_trace()
target_means = rew + dis*np.array(n_means, dtype=np.float32)
target_vars = dis*dis*np.array(n_vars, dtype = np.float32)
bar_vars = 1.0/(1.0/c_var + 1.0/target_vars)
bar_means = bar_vars*(c_mean/c_var + target_means/target_vars)
add_vars = c_var+target_vars
mean_range = np.concatenate((target_means,[c_mean]))
sd_range = np.sqrt(np.concatenate((dis*dis*n_vars, [c_var])))
interval = width*max(sd_range)/float(num_interval)
x = np.arange(min(mean_range-0.5*width*sd_range), max(mean_range+0.5*width*sd_range), interval)
eps = 1e-5*np.ones(x.shape)
log_prob = np.sum([np.log(np.maximum(eps,norm.cdf(x,target_means[i], np.sqrt(target_vars[i])))) for i in range(len(n_means))], axis=0) \
+ logsumexp([-0.5*np.log(add_vars[i]) \
-0.5*(c_mean-target_means[i])**2/add_vars[i]-0.5*np.log(bar_vars[i]) \
-0.5*(x-bar_means[i])**2/bar_vars[i] - np.log(np.maximum(eps, norm.cdf(x,target_means[i],np.sqrt(target_vars[i])))) for i in range(len(n_means))], axis=0)
prob = np.exp(log_prob-max(log_prob))
prob = prob/(interval*np.sum(prob))
new_mean = interval*np.inner(x, prob)
new_var = interval*np.inner((x-new_mean)**2, prob)
return new_mean, new_var, (x, prob)
def posterior_approx(n_means, n_vars, c_mean, c_var, rew, dis, terminal, hard_approx = True, varTH = 1e-3, batch=False):
if batch:
batch_size = len(n_means)
c_mean = np.reshape(c_mean, (batch_size,1))
c_var = np.reshape(c_var, (batch_size,1))
rew = np.reshape(rew, (batch_size,1))
terminal = np.reshape(terminal, (batch_size,1))
target_means = rew + dis*np.array(n_means, dtype=np.float32)
target_vars = dis*dis*np.array(n_vars, dtype = np.float32)
bar_vars = 1.0/(1.0/c_var + 1.0/target_vars)
bar_means = bar_vars*(c_mean/c_var + target_means/target_vars)
add_vars = c_var+target_vars
log_weights = -0.5*np.log(add_vars) -(c_mean-target_means)**2/2.0/add_vars #(batch_size X anum)
weights = np.exp(log_weights - np.max(log_weights, axis=int(batch), keepdims=batch))
weights = weights/np.sum(weights, axis=int(batch))
mean_new = np.sum(weights*bar_means, axis = int(batch))
if hard_approx :
var_new = np.sum(weights*bar_vars, axis=int(batch))
else:
var_new = np.dot(weights,bar_vars+bar_means**2) - mean_new**2
var_new = (1.-terminal)*var_new + terminal*1./(1./c_var + 1./REW_VAR)
mean_new = (1.-terminal)*mean_new + terminal*var_new*(c_mean/c_var + rew/REW_VAR)
if np.isnan(mean_new).any() or np.isnan(var_new).any():
pdb.set_trace()
return mean_new, var_new, (bar_means, bar_vars, weights)
def maxGaussian_adf(n_means, n_vars, c_mean, c_var, rew, dis, num_interval=300, width = 16.0):
c_sd = np.sqrt(c_var)
n_sds = np.sqrt(n_vars)
"""
Designed for ADF approach
INPUT:
n_means: a numpy array of Gaussian mean values of (next state, action) pairs for all available actions.
n_sds: a numpy array of Gaussian SD values of (next state,action) pairs for all available actions.
c_mean: a mean value of the current state and action
c_sd: a SD value of the current state and action
obs:
mean and variance of the joint distribution
"""
delta = 0.05
target_means = rew + dis*np.array(n_means, dtype=np.float32)
target_vars = dis*dis*np.array(n_vars, dtype = np.float32)
bar_vars = 1.0/(1.0/c_var + 1.0/target_vars)
bar_means = bar_vars*(c_mean/c_var + target_means/target_vars)
c_tmp = -(c_mean-target_means)**2/2.0/(c_var + target_vars)
c_min = np.amin(c_tmp, axis=0)
c_max = np.amax(c_tmp, axis=0)
B = float(c_max-c_min < 50.0)
weights = np.exp(c_tmp - c_min*B - c_max*(1-B))/np.sqrt(c_var+target_vars)
weights = weights/np.sum(weights, axis =0)
interval = width*max(np.sqrt(bar_vars))/float(num_interval)
x = np.arange(min(bar_means-0.5*width*np.sqrt(bar_vars)),max(bar_means + 0.5*width*np.sqrt(bar_vars)),interval) # 501X1
count = 0
done = False
while(not(done)):
count += 1
eps = 1e-5*np.ones(x.shape)
prob = np.zeros(x.shape)
cdfs = []
for i in range(len(n_means)):
cdfs.append(np.maximum(eps,norm.cdf(x, target_means[i], np.sqrt(target_vars[i]))))
prob += weights[i] * norm.pdf(x,bar_means[i],np.sqrt(bar_vars[i])) / cdfs[i]
prob*=np.prod(np.array(cdfs),axis=0)
z = np.sum(prob)*interval
prob = prob/z # Normalization
if count > 50:
delta += 0.01
plt.plot(x,prob,'bx'); plt.show();
pdb.set_trace()
if (prob[0] > 0.1):
x = np.arange(x[0]-delta, x[-1]-delta, interval)
elif (prob[-1]>0.1):
x = np.arange(x[0]+delta, x[-1]+delta, interval)
else:
done = True
max_mean = np.inner(x,prob)*interval
max_var = np.inner(x**2,prob)*interval- max_mean**2
return max_mean ,max_var, (x,prob)
def maxGaussian_smVar(n_means, n_vars, c_mean, c_var, rew, dis, bias_rate = 0.0):
#k = np.argmax(n_means)
#max_mean, max_var, _ = prod_of2Gaussian(c_mean, c_var, rew+dis*n_means[k], dis*dis*n_vars[k]) dt
anum = len(n_means)
bars = []
w_vals = []
for i in range(anum):
tmp_mean = rew+dis*n_means[i]
tmp_var = dis*dis*n_vars[i]
m,v, _ = prod_of2Gaussian(c_mean, c_var, tmp_mean, tmp_var)
#bias_rate = norm.cdf(c_mean,tmp_mean,np.sqrt(c_var+tmp_var))
bars.append( (m, 1/(1/v- 0.1*bias_rate/c_var) ) )
w_vals.append(-((c_mean-tmp_mean)**2)/2/(c_var+tmp_var))
min_val = min(w_vals)
max_val = max(w_vals)
if max_val-min_val < 500:
weights = [np.exp(w_vals[i]-min_val)/np.sqrt(c_var+dis*dis*n_vars[i]) for i in range(anum)]
else:
weights = [np.exp(w_vals[i]-max_val)/np.sqrt(c_var+dis*dis*n_vars[i]) for i in range(anum)]
mean_new = sum([weights[i]*bars[i][0] for i in range(anum)])/float(sum(weights))
if np.isnan(mean_new):
pdb.set_trace()
return mean_new, max(0.0001,sum([weights[i]*bars[i][1] for i in range(anum)])/float(sum(weights)))
def posterior_approx(n_means, n_vars, c_mean, c_var, rew, dis, terminal, hard_approx = True, varTH = 1e-3, batch=False):
if batch:
batch_size = len(n_means)
c_mean = np.reshape(c_mean, (batch_size,1))
c_var = np.reshape(c_var, (batch_size,1))
rew = np.reshape(rew, (batch_size,1))
terminal = np.reshape(terminal, (batch_size,1))
target_means = rew + dis*np.array(n_means, dtype=np.float32)
target_vars = dis*dis*np.array(n_vars, dtype = np.float32)
bar_vars = 1.0/(1.0/c_var + 1.0/target_vars)
bar_means = bar_vars*(c_mean/c_var + target_means/target_vars)
add_vars = c_var+target_vars
c_tmp = -(c_mean-target_means)**2/2.0/add_vars
c_min = np.amin(c_tmp, axis=int(batch), keepdims = batch)
c_max = np.amax(c_tmp, axis=int(batch), keepdims = batch)
B = (c_max - c_min < 50.0).astype(np.float32) if batch else float(c_max-c_min < 50.0)
weights = np.exp(c_tmp - c_min*B - c_max*(1-B))/np.sqrt(add_vars)
weights = weights/np.sum(weights, axis = int(batch))
#Z = np.sum(weights, axis = int(batch))
log_weights = -0.5*np.log(add_vars) -(c_mean-target_means)**2/2.0/add_vars #(batch_size X anum)
weights02 = np.exp(log_weights - np.max(log_weights, axis=int(batch), keepdims=batch))
weights02 = weights/np.sum(weights, axis=int(batch))
if(weights.astype(np.float16) != weights02.astype(np.float16)).any():
print(weights)
print(weights02)
pdb.set_trace()
mean_new = np.sum(weights*bar_means, axis = int(batch))
if hard_approx :
var_new = np.sum(weights*bar_vars, axis=int(batch))
else:
var_new = np.dot(weights,bar_vars+bar_means**2) - mean_new**2
var_new = (1.-terminal)*var_new + terminal*1./(1./c_var + 1.)
mean_new = (1.-terminal)*mean_new + terminal*var_new*(c_mean/c_var + rew)
if np.isnan(mean_new).any() or np.isnan(var_new).any():
pdb.set_trace()
return mean_new, var_new
def maxGaussian_plot(mean_init,var_init,anum, rew, dis):
mean = np.array([10,20,mean_init])
var = np.array([0.01,0.01,0.01])#,0.01,0.01])
#var = var_init*np.ones((1,anum+1))[0]
#mean = mean_init*np.ones((1,anum+1))[0]
#var = var_init*np.ones((1,anum+1))[0]
#mean = mean_init+np.random.rand(1,anum+1)[0]
#var = var_init+np.random.rand(1,anum+1)[0]
mean_bar = []
for i in range(anum):
m,_, _ = prod_of2Gaussian(mean[-1], var[-1], rew+dis*mean[i], dis*dis*var[i])
mean_bar.append(m)
print("mean:",mean)
print("var:",var)
print("estimated means", [rew+dis*x for x in mean[:-1]])
print("mean bars", mean_bar)
norm_m, norm_v, (x,p) = maxGaussian_adf(mean[:-1], var[:-1], mean[-1], var[-1], rew, dis,width=100.0)
sm_approx_m, sm_approx_v = maxGaussian_smVar(mean[:-1], var[:-1], mean[-1], var[-1], rew, dis)
k = np.argmax(mean[:-1])
approx_m, approx_v , _ = prod_of2Gaussian(mean[-1], var[-1], rew+ dis*mean[k], dis*dis*var[k])
print approx_m, approx_v
f1, ax1 = plt.subplots()
ax1.plot(x,p,'r2')
ax1.plot(x,norm.pdf(x,norm_m, np.sqrt(norm_v)),'g1')
ax1.plot(x,norm.pdf(x,approx_m, np.sqrt(approx_v)),'bx')
ax1.plot(x,norm.pdf(x,sm_approx_m, np.sqrt(sm_approx_v)),'k3')
ax1.legend(['true','numeric adf', 'approx adf','approx adf sm'])
plt.show()
def prod_of2Gaussian(mu1,var1,mu2,var2):
"""
Input:
"""
var1 = float(var1)
var2 = float(var2)
var12 = 1/(1/var1+1/var2)
mu12 = var12*(mu1/var1 + mu2/var2)
#C12 = (mu1**2)/var1 + (mu2**2)/var2 - (mu12**2)/var12
C12 = (mu2-mu1)*(mu2-mu1)/(var1+var2)
return mu12, var12, C12
def draw_maze(obj,s,rew):
M = np.zeros((6,7))
walls = [(0,1),(1,1),(0,4),(1,4),(3,0),(3,1),(3,5),(3,6),(5,6)]
for (i,j) in walls:
M[i][j] = 10
M[0][2] = M[5][0] = M[4][6] = 3
v_flag = obj.num2flag(s%8)
pos = s/8
r = pos%6
M[r][(pos-r)/6] = 6
imgplot = plt.imshow(M)
plt.title(str(sum(v_flag))+' '+str(sum(rew)))
plt.draw()
plt.pause(0.005)
M[r][(pos-r)/6] = 0
def plot_V_pi(obj, Q):
Q = Q.reshape(obj.snum, obj.anum)
V = np.max(Q,1)
pi = np.argmax(Q,1)
if obj.name == ('maze' or 'minimaze'): # For Obstacles
Pi = 4*np.ones(obj.dim)
grid_map = obj.map_img
for i in range(obj.snum):
Pi[obj.idx2cell[i]] = pi[i]
grid_map[obj.idx2cell[i]] = V[i]
else:
Pi = pi.reshape(obj.dim)
grid_map = V.reshape(obj.dim)
plt.figure(figsize=(obj.dim[1],obj.dim[0]))
plt.imshow(grid_map, cmap='gray', interpolation='nearest')
ax = plt.gca()
ax.set_xticks(np.arange(obj.dim[1]) - .5)
ax.set_yticks(np.arange(obj.dim[0]) - .5)
ax.set_xticklabels([])
ax.set_yticklabels([])
#Y, X = np.mgrid[0:obj.dim[0], 0:obj.dim[1]]
a2uv = {0: (0,1), 1: (0,-1), 2: (-1, 0), 3: (1, 0), 4:(0, 0)} # xy coordinate not matrix
for y in range(obj.dim[0]):
for x in range(obj.dim[1]):
a = Pi[y, x]
u, v = a2uv[a]
plt.arrow(x, y, u * .3, v * .3, color='m',
head_width=0.1, head_length=0.1)
plt.text(obj.start_pos[1], obj.dim[0] - 1 - obj.start_pos[0], "S",
color='g', size=12, verticalalignment='center',
horizontalalignment='center', fontweight='bold')
plt.text(obj.goal_pos[1], obj.dim[0] -1 - obj.goal_pos[0], "G",
color='g', size=12, verticalalignment='center',
horizontalalignment='center', fontweight='bold')
plt.grid(color='b', lw=2, ls='-')
plt.show()
def plot_to_save(T, ys, labels, save, x_name, y_name, shadow = True, legend=(True, (1,1)), pic_name = None, colors=None,):
if not(colors):
colors = ['r','b','g','k','c','m','y','burlywood','chartreuse','0.8','--', '-.', ':']
plot_err = []
f1, ax1 = plt.subplots()
if len(ys.shape)==2:
for (i,y) in enumerate(ys):
tmp, = ax1.plot(y, colors[i], label=labels[i], linewidth=2.0)
plot_err.append(tmp)
else:
ts = range(0,T-1,T/50)
for (i,y) in enumerate(ys):
m, ids25, ids75 = iqr(y)
tmp, = ax1.plot(ts[1:] ,m, colors[i], label=labels[i], linewidth=2.0)
plot_err.append(tmp)
if shadow:
ax1.fill_between(ts[1:], ids75, ids25, facecolor=colors[i], alpha=0.15)
if legend[0]:
ax1.legend(plot_err, labels ,loc='lower left', bbox_to_anchor=legend[1],fontsize=25, shadow=True,
prop={'family':'Times New Roman'})
ax1.tick_params(axis='both',which='major',labelsize=20)
ax1.set_xlabel(x_name,fontsize=25, fontname="Times New Roman")
ax1.set_ylabel(y_name,fontsize=25, fontname="Times New Roman")
#ax1.set_ylim((0,1.2))
if save:
f1.savefig(pic_name)
else:
plt.show()
def iqr(x):
"""
x has to be a 2D np array. The interquantiles are computed along with the axis 1
"""
i25 = int(0.25*x.shape[0])
i75 = int(0.75*x.shape[0])
x=x.T
ids25=[]
ids75=[]
m = []
for y in x:
tmp = np.sort(y)
ids25.append(tmp[i25])
ids75.append(tmp[i75])
m.append(np.mean(tmp,dtype=np.float32))
return m, ids25, ids75
| StarcoderdataPython |
176324 | import pytest
from datetime import datetime, timedelta
from lt_booking_scraper.utils import extract_number, validate_date, generate_headers
def test_generate_headers_accept():
header = generate_headers()
assert 'Accept' in header
def test_generate_headers_user_agent():
header = generate_headers()
assert 'User-Agent' in header
@pytest.mark.parametrize("value, expected", [
("1.5 km from centre", 1.5),
("from centre 1.5 km", 1.5),
("from 1.5 km centre", 1.5),
("1.5km from centre", 1.5),
])
def test_extract_number(value, expected):
assert extract_number(value) == expected
def test_validate_date():
now = datetime.now()
date = now.strftime("%Y-%m-%d")
assert validate_date(date) == now.date()
def test_validate_date_in_the_past():
yesterday = datetime.now() - timedelta(days=1)
date = yesterday.strftime("%Y-%m-%d")
with pytest.raises(ValueError):
validate_date(date)
| StarcoderdataPython |
9666831 | <gh_stars>10-100
import copy
from ..fstrips import AddEffect, DelEffect, FunctionalEffect, UniversalEffect
from ..evaluators.simple import evaluate
from ..fstrips.representation import substitute_expression
from ..syntax.transform.substitutions import enumerate_substitutions
def is_applicable(model, operator):
""" Check whether a given (ground) operator is applicable in the given model (state). """
return evaluate(operator.precondition, model)
def is_effect_applicable(model, effect):
return evaluate(effect.condition, model)
def apply_effect(model, effect):
""" Apply the given effect to the given model. """
if not is_effect_applicable(model, effect):
return
if isinstance(effect, AddEffect):
model.add(effect.atom.predicate, *effect.atom.subterms)
elif isinstance(effect, DelEffect):
model.discard(effect.atom.predicate, *effect.atom.subterms)
elif isinstance(effect, FunctionalEffect):
model.set(effect.lhs, evaluate(effect.rhs, model))
elif isinstance(effect, UniversalEffect):
for subst in enumerate_substitutions(effect.variables):
for eff in effect.effects:
apply_effect(model, substitute_expression(eff, subst))
else:
raise RuntimeError(f'Don\'t know how to apply effect "{effect}"')
def progress(state, operator):
""" Returns the progression of the given state along the effects of the given operator.
Note that this method does not check that the operator is applicable.
"""
# TODO This is unnecessarily expensive, but a simple copy wouldn't work either.
# If/when we transition towards a C++-backed model implementation, this should be improved.
sprime = copy.deepcopy(state)
# Let's push to the beginning the delete effect, to ensure add-after-delete semantics
effects = sorted(operator.effects, key=lambda e: 0 if isinstance(e, DelEffect) else 1)
for eff in effects:
apply_effect(sprime, eff)
return sprime
| StarcoderdataPython |
89333 | <gh_stars>1-10
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RSpatialpack(RPackage):
"""Tools to assess the association between two spatial processes."""
homepage = "https://cloud.r-project.org/package=SpatialPack"
url = "https://cloud.r-project.org/src/contrib/SpatialPack_0.3-8.tar.gz"
list_url = "https://cloud.r-project.org/src/contrib/Archive/SpatialPack"
version('0.3-8', sha256='a0e54b5dee3cd30a634e2d30380fe163942b672073fd909be888803332ed5151')
version('0.3', sha256='4c80fc1c77bc97fc678e6e201ecf7f0f89dcf3417b3b497a28a3639e9b30bd8a')
depends_on('r@2.10:', type=('build', 'run'))
| StarcoderdataPython |
4993069 | import zipfile
from urllib.request import urlretrieve
from tqdm import tqdm
class DLProgress(tqdm):
"""
Report download progress to the terminal.
:param tqdm: Information fed to the tqdm library to estimate progress.
"""
last_block = 0
def hook(self, block_num=1, block_size=1, total_size=None):
"""
Store necessary information for tracking progress.
:param block_num: current block of the download
:param block_size: size of current block
:param total_size: total download size, if known
"""
self.total = total_size
self.update((block_num - self.last_block) * block_size) # Updates progress
self.last_block = block_num
print('Downloading zip file ...')
with DLProgress(unit='B', unit_scale=True, miniters=1) as pbar:
urlretrieve('https://s3.eu-central-1.amazonaws.com/avg-kitti/data_road.zip', 'data_road.zip', pbar.hook)
print('Extracting zip file...')
zip_ref = zipfile.ZipFile('data_road.zip', 'r')
zip_ref.extractall()
zip_ref.close()
| StarcoderdataPython |
3420209 | <reponame>cbonilla20/great_expectations
"""
Helper utilities for creating and testing benchmarks using NYC Taxi data (yellow_trip_data_sample_2019-01.csv)
found in the tests/test_sets/taxi_yellow_trip_data_samples directory, and used extensively in unittest and
integration tests for Great Expectations.
"""
import os
from typing import List, Optional
from great_expectations import DataContext
from great_expectations.checkpoint import SimpleCheckpoint
from great_expectations.core.expectation_configuration import ExpectationConfiguration
from great_expectations.data_context import BaseDataContext
from great_expectations.data_context.types.base import (
ConcurrencyConfig,
DataContextConfig,
InMemoryStoreBackendDefaults,
)
def create_checkpoint(
number_of_tables: int, backend_api: str = "V3", html_dir: Optional[str] = None
) -> SimpleCheckpoint:
"""Create a checkpoint from scratch, including setting up data sources/etc.
Args:
number_of_tables: Number of tables validated in the checkpoint. The tables are assumed to be created by
"setup_bigquery_tables_for_performance_test.sh", which creates 100 tables, so this number must be <= 100.
backend_api: Either "V3" or "V2".
html_dir: Directory path to write the HTML Data Docs to. If not specified, Data Docs are not written.
Returns:
Configured checkpoint ready to be run.
"""
checkpoint_name = "my_checkpoint"
datasource_name = "my_datasource"
data_connector_name = "my_data_connector"
# These tables are created by "setup_bigquery_tables_for_performance_test.sh", with numbering from 1 to 100.
assert 1 <= number_of_tables <= 100
suite_and_asset_names = [f"taxi_trips_{i}" for i in range(1, number_of_tables + 1)]
context = _create_context(
backend_api,
datasource_name,
data_connector_name,
suite_and_asset_names,
html_dir,
)
for suite_name in suite_and_asset_names:
_add_expectation_configuration(context=context, suite_name=suite_name)
return _add_checkpoint(
context,
backend_api,
datasource_name,
data_connector_name,
checkpoint_name,
suite_and_asset_names,
)
def concurrency_config() -> ConcurrencyConfig:
return ConcurrencyConfig(enabled=True)
def expected_validation_results() -> List[dict]:
return [
{
"meta": {},
"expectation_config": {
"meta": {},
"kwargs": {
"column_set": [
"vendor_id",
"pickup_datetime",
"dropoff_datetime",
"passenger_count",
"trip_distance",
"rate_code_id",
"store_and_fwd_flag",
"pickup_location_id",
"dropoff_location_id",
"payment_type",
"fare_amount",
"extra",
"mta_tax",
"tip_amount",
"tolls_amount",
"improvement_surcharge",
"total_amount",
"congestion_surcharge",
]
},
"expectation_type": "expect_table_columns_to_match_set",
},
"result": {
"observed_value": [
"vendor_id",
"pickup_datetime",
"dropoff_datetime",
"passenger_count",
"trip_distance",
"rate_code_id",
"store_and_fwd_flag",
"pickup_location_id",
"dropoff_location_id",
"payment_type",
"fare_amount",
"extra",
"mta_tax",
"tip_amount",
"tolls_amount",
"improvement_surcharge",
"total_amount",
"congestion_surcharge",
]
},
"exception_info": {
"raised_exception": False,
"exception_traceback": None,
"exception_message": None,
},
"success": True,
},
{
"meta": {},
"expectation_config": {
"meta": {},
"kwargs": {"column": "vendor_id"},
"expectation_type": "expect_column_values_to_not_be_null",
},
"result": {
"element_count": 10000,
"unexpected_count": 0,
"unexpected_percent": 0.0,
"partial_unexpected_list": [],
},
"exception_info": {
"raised_exception": False,
"exception_traceback": None,
"exception_message": None,
},
"success": True,
},
{
"meta": {},
"expectation_config": {
"meta": {},
"kwargs": {"column": "vendor_id", "type_": "INTEGER"},
"expectation_type": "expect_column_values_to_be_of_type",
},
"result": {"observed_value": "Integer"},
"exception_info": {
"raised_exception": False,
"exception_traceback": None,
"exception_message": None,
},
"success": True,
},
{
"meta": {},
"expectation_config": {
"meta": {},
"kwargs": {"column": "pickup_datetime", "type_": "STRING"},
"expectation_type": "expect_column_values_to_be_of_type",
},
"result": {"observed_value": "String"},
"exception_info": {
"raised_exception": False,
"exception_traceback": None,
"exception_message": None,
},
"success": True,
},
{
"meta": {},
"expectation_config": {
"meta": {},
"kwargs": {
"column": "rate_code_id",
"value_set": [1, 2, 3, 4, 5, 6, 99],
},
"expectation_type": "expect_column_values_to_be_in_set",
},
"result": {
"element_count": 10000,
"unexpected_count": 0,
"unexpected_percent": 0,
"partial_unexpected_list": [],
"missing_count": 0,
"missing_percent": 0.0,
"unexpected_percent_nonmissing": 0.0,
},
"exception_info": {
"raised_exception": False,
"exception_traceback": None,
"exception_message": None,
},
"success": True,
},
{
"meta": {},
"expectation_config": {
"meta": {},
"kwargs": {
"column": "trip_distance",
"max_value": 1000.0,
"min_value": 0,
},
"expectation_type": "expect_column_values_to_be_between",
},
"result": {
"element_count": 10000,
"unexpected_count": 0,
"unexpected_percent": 0.0,
"partial_unexpected_list": [],
"missing_count": 0,
"missing_percent": 0.0,
"unexpected_percent_nonmissing": 0.0,
},
"exception_info": {
"raised_exception": False,
"exception_traceback": None,
"exception_message": None,
},
"success": True,
},
]
def _create_context(
backend_api: str,
datasource_name: str,
data_connector_name: str,
asset_names: List[str],
html_dir: Optional[str] = None,
) -> DataContext:
data_docs_sites = (
{
"local_site": {
"class_name": "SiteBuilder",
"show_how_to_buttons": False,
"store_backend": {
"class_name": "TupleFilesystemStoreBackend",
"base_directory": html_dir,
},
}
}
if html_dir
else None
)
bigquery_project = os.environ["GE_TEST_BIGQUERY_PROJECT"]
bigquery_dataset = os.environ.get(
"GE_TEST_BIGQUERY_PERFORMANCE_DATASET", "performance_ci"
)
data_context_config = DataContextConfig(
store_backend_defaults=InMemoryStoreBackendDefaults(),
data_docs_sites=data_docs_sites,
anonymous_usage_statistics={"enabled": False},
concurrency=concurrency_config(),
)
context = BaseDataContext(project_config=data_context_config)
if backend_api == "V3":
datasource_config = {
"name": datasource_name,
"class_name": "Datasource",
"execution_engine": {
"class_name": "SqlAlchemyExecutionEngine",
"connection_string": f"bigquery://{bigquery_project}/{bigquery_dataset}",
},
"data_connectors": {
data_connector_name: {
"class_name": "ConfiguredAssetSqlDataConnector",
"name": "whole_table",
"assets": {asset_name: {} for asset_name in asset_names},
},
},
}
elif backend_api == "V2":
datasource_config = {
"name": datasource_name,
"credentials": {
"url": f"bigquery://{bigquery_project}/{bigquery_dataset}",
},
"class_name": "SqlAlchemyDatasource",
"module_name": "great_expectations.datasource",
"batch_kwargs_generators": {},
"data_asset_type": {
"module_name": "great_expectations.dataset",
"class_name": "SqlAlchemyDataset",
},
}
else:
raise ValueError(f"Unsupported backend_api {backend_api}")
context.add_datasource(**datasource_config)
return context
def _add_checkpoint(
context: BaseDataContext,
backend_api: str,
datasource_name: str,
data_connector_name: str,
checkpoint_name: str,
suite_and_asset_names=[],
) -> SimpleCheckpoint:
if backend_api == "V3":
validations = [
{
"expectation_suite_name": suite_and_asset_name,
"batch_request": {
"datasource_name": datasource_name,
"data_connector_name": data_connector_name,
"data_asset_name": suite_and_asset_name,
"batch_spec_passthrough": {"create_temp_table": False},
},
}
for suite_and_asset_name in suite_and_asset_names
]
return context.add_checkpoint(
name=checkpoint_name,
class_name="SimpleCheckpoint",
validations=validations,
run_name_template="my_run_name",
)
elif backend_api == "V2":
batches = [
{
"expectation_suite_names": [suite_and_asset_name],
"batch_kwargs": {
"datasource": datasource_name,
"data_asset_name": suite_and_asset_name,
"table": suite_and_asset_name,
"batch_spec_passthrough": {"create_temp_table": False},
},
}
for suite_and_asset_name in suite_and_asset_names
]
return context.add_checkpoint(
name=checkpoint_name,
class_name="LegacyCheckpoint",
batches=batches,
)
else:
raise ValueError(f"Unsupported backend_api {backend_api}")
def _add_expectation_configuration(context: BaseDataContext, suite_name: str):
suite = context.create_expectation_suite(expectation_suite_name=suite_name)
suite.add_expectation(
expectation_configuration=ExpectationConfiguration(
expectation_type="expect_table_columns_to_match_set",
kwargs={
"column_set": [
"vendor_id",
"pickup_datetime",
"dropoff_datetime",
"passenger_count",
"trip_distance",
"rate_code_id",
"store_and_fwd_flag",
"pickup_location_id",
"dropoff_location_id",
"payment_type",
"fare_amount",
"extra",
"mta_tax",
"tip_amount",
"tolls_amount",
"improvement_surcharge",
"total_amount",
"congestion_surcharge",
]
},
)
)
suite.add_expectation(
expectation_configuration=ExpectationConfiguration(
expectation_type="expect_column_values_to_not_be_null",
kwargs={"column": "vendor_id"},
)
)
suite.add_expectation(
expectation_configuration=ExpectationConfiguration(
expectation_type="expect_column_values_to_be_of_type",
kwargs={"column": "vendor_id", "type_": "INTEGER"},
)
)
suite.add_expectation(
expectation_configuration=ExpectationConfiguration(
expectation_type="expect_column_values_to_be_of_type",
kwargs={"column": "pickup_datetime", "type_": "STRING"},
)
)
suite.add_expectation(
expectation_configuration=ExpectationConfiguration(
expectation_type="expect_column_values_to_be_in_set",
# rate_code_id refers to the final rate code in effect at the end of the trip
# (https://www1.nyc.gov/assets/tlc/downloads/pdf/data_dictionary_trip_recordsgyellow.pdf)
# 1=Standard rate
# 2=JFK
# 3=Newark
# 4=Nassau or Westchester
# 5=Negotiated fare
# 6=Group ride
# 99=NA
kwargs={"column": "rate_code_id", "value_set": [1, 2, 3, 4, 5, 6, 99]},
)
)
suite.add_expectation(
expectation_configuration=ExpectationConfiguration(
expectation_type="expect_column_values_to_be_between",
kwargs={
"column": "trip_distance",
"min_value": 0,
"max_value": 1000.0,
},
)
)
# Save the expectation suite or else it doesn't show up in the data docs.
context.save_expectation_suite(
expectation_suite=suite, expectation_suite_name=suite_name
)
| StarcoderdataPython |
11241479 | <reponame>rainprob/GibsonEnv
#from realenv.client.client_actions import client_actions as actions
#from realenv.client.vnc_client import VNCClient as VNCClient
from gym.envs.registration import registry, register, make, spec
#===================== Full Environments =====================#
## Eventually we will package all environments for encapsulation
# An environment can be invoked by:
# env = gym.make('HumanoidCamera-v0') ## unconfigurable
## env = HumanoidCamera(human=True) ## configurable
register(
id='HumanoidCamera-v0',
entry_point='realenv.envs.humanoid_env:HumanoidCameraEnv'
)
register(
id='HumanoidSensor-v0',
entry_point='realenv.envs.humanoid_env:HumanoidSensorEnv'
)
register(
id='AntWalkingEnv-v0',
entry_point='realenv.envs.simple_env:AntWalkingEnv'
)
register(
id='HuskyWalkingEnv-v0',
entry_point='realenv.envs.simple_env:HuskyWalkingEnv'
)
| StarcoderdataPython |
1772927 | import numpy as np
import random as rd
import torch
from log import Logger
import torch.nn as nn
from c4Grid import c4Grid
IN_LEN=43
OUT_LEN=7
NUM_IMG=1
DEVICE=torch.device("cpu") if torch.cuda.is_available() else torch.device("cpu")
DTYPE=torch.float
LR=1e-3
NUM_ITERATIONS=401
INF = 1000000
RED = 2
YELLOW = 1
DRAW = -1
MAX_MOVES = 42
FINAL_LOSS = -10000
TERMINAL_LOSS = -10000
ROLLOUT_LOSS = -10000
TERMINAL_WIN = 100
ROLLOUT_WIN = 10
TERMINAL_DRAW = 1
ROLLOUT_DRAW = 0
class NeuralNet(nn.Module):
def __init__(self):
super(NeuralNet, self).__init__()
self.denseN=nn.Sequential(
nn.Linear(IN_LEN, 100),
nn.Tanh(),
nn.Linear(100, 50),
nn.Sigmoid(),
nn.Linear(50, OUT_LEN),
nn.Softmax(dim=1)
)
def forward(self, x):
return self.denseN(x)
class c4Agent:
def __init__(self, color):
self.color = color
self.nnObj = NeuralNet().to(device=DEVICE, dtype=DTYPE)
self.data_target_map={}
self.logger=Logger("c4Log.txt")
def addDataTarget(self, data, target):
data=tuple(data[0])
self.data_target_map[data]=target
def make_target(self, node):
arr=[]
for i in node.children:
if i!=None:
arr.append(i.n/node.n)
else:
arr.append(0)
return arr
def train(self, actions, root, iterations):
node=root
color=RED
target=[]
data=[]
for action in actions:
temp_target=self.make_target(node)
temp_data=self.gridToNNState(node.state, color).numpy()
"""
target.append(temp_target)
data.append(temp_data[0])
"""
self.addDataTarget(temp_data, temp_target)
if len(node.children) > 0:
node = node.children[action]
else:
node=None
color = self.switchColor(color)
"""
if not node: #check for when playing against human
prev_node.populateNode(color)
node = prev_node.children[action]
"""
for dataPoint in self.data_target_map.keys():
data.append(list(dataPoint))
target.append(self.data_target_map[dataPoint])
target=torch.tensor(target, device=DEVICE, dtype=DTYPE)
data=torch.tensor(data, device=DEVICE, dtype=DTYPE)
loss_fn=nn.MSELoss()
optimizer=torch.optim.Adam(self.nnObj.parameters(), lr=LR)
for i in range(iterations):
res=self.nnObj.forward(data)
loss=loss_fn(res, target)
if i%100==0:
print(res.shape, target.shape)
print("Loss at %d: %f"%(i, loss.item()))
optimizer.zero_grad()
loss.backward()
optimizer.step()
def getReward(self, winColor):
if winColor == DRAW:
return ROLLOUT_DRAW
if self.color == winColor:
return ROLLOUT_WIN #for win
return ROLLOUT_LOSS #for loss
def gridToNNState(self, state, colorToMove):
nnState=[]
temp_inp=[]
dict1={0:0, 1:0.5, 2:1}
for i in range(6):
for j in range(7):
temp_inp.append(dict1[state[i][j]])
temp_inp.append(colorToMove)
nnState.append(temp_inp)
nnState=torch.tensor(nnState, device=DEVICE, dtype=DTYPE)
return nnState
def getArgMax(self, out, cols):
max_ind=-1
max_val=-10000
for i in range(7):
if cols[i]>=0 and out[i].item()>max_val:
max_val=out[i].item()
max_ind=i
return max_ind
def getNNOutput(self, state, color):
return self.nnObj.forward(self.gridToNNState(state, color))
def makeRandomVirtualMove(self, state, cols, color):
action = -1
ret=self.nnObj.forward(self.gridToNNState(state, color))
action=self.getArgMax(ret[0], cols)
state[cols[action]][action] = color
x = cols[action]
y = action
cols[action] -= 1
return state, cols, x, y
def switchColor(self, color):
if color == RED:
return YELLOW
return RED
def rollout(self, vgrid, vcols, moveCnt, colorToMove):
grid = c4Grid()
while True:
vgrid, vcols, x, y = self.makeRandomVirtualMove(vgrid, vcols, colorToMove)
moveCnt += 1
if moveCnt == 42:
return 0 #draw reward
if grid.checkWinVirtual(vgrid, x, y):
return self.getReward(colorToMove) #return win
colorToMove = self.switchColor(colorToMove)
def getRewardTerminal(self, winColor):
if winColor == DRAW:
return TERMINAL_DRAW
if self.color == winColor:
return TERMINAL_WIN #for win
return TERMINAL_LOSS #for loss
def getBestMove(self, actions, n_iterations, root, grid):
next_node = None
action = 0
count = 0
node = root
prev_node = root
color = YELLOW
for action in actions:
prev_node = node
if len(node.children) > 0:
node = node.children[action]
else:
node = None
color = self.switchColor(color)
if not node: #check for when playing against human
prev_node.populateNode(color)
node = prev_node.children[action]
if node.checkLeaf():
node.populateNode(self.color)
curr = node
change = False
print(self.getNNOutput(node.state, self.color))
while count < n_iterations:
if not change: #to reset curr to the initial node
#self.logger.log("LOG", "-------Running iteration %d-------"%(count+1))
curr = node
if curr.checkLeaf():
if curr.n == 0:
if curr.isTerminal:
reward = self.getRewardTerminal(curr.winColor)
curr.backpropagate(reward)
#self.logger.log("LOG", "Got reward: %d, in leaf+terminal+unvisited node"%(reward))
count += 1
change = False
#self.logger.log("LOG", "-------Ending iteration %d-------"%(count+1))
continue
else:
vgrid = curr.state.copy()
vcols = curr.cols.copy()
colorToMove = YELLOW if curr.moveCnt%2 == 1 else RED
reward = self.rollout(vgrid, vcols, curr.moveCnt, colorToMove)
curr.backpropagate(reward)
#self.logger.log("LOG", "Got reward: %d, in leaf+unvisited node"%(reward))
count += 1
change = False
#self.logger.log("LOG", "-------Ending iteration %d-------"%(count))
continue
else:
colorToMove = YELLOW if curr.moveCnt%2 == 1 else RED
if curr.isTerminal:
reward = self.getRewardTerminal(curr.winColor)
curr.backpropagate(reward)
#self.logger.log("LOG", "Got reward: %d, in leaf+terminal node"%(reward))
count += 1
change = False
#self.logger.log("LOG", "-------Ending iteration %d-------"%(count))
continue
curr.populateNode(colorToMove)
curr, _, _ = curr.getMaxUcbNode(root.n)
if curr.isTerminal:
reward = self.getRewardTerminal(curr.winColor)
curr.backpropagate(reward)
#self.logger.log("LOG", "Got reward: %d, in leaf_expanded+terminal node"%(reward))
count += 1
change = False
#self.logger.log("LOG", "-------Ending iteration %d-------"%(count))
continue
vgrid = curr.state.copy()
vcols = curr.cols.copy()
colorToMove = YELLOW if curr.moveCnt%2 == 1 else RED
reward = self.rollout(vgrid, vcols, curr.moveCnt, colorToMove)
curr.backpropagate(reward)
#self.logger.log("LOG", "Got reward: %d, in expanded+leaf node"%(reward))
count += 1
change = False
#self.logger.log("LOG", "-------Ending iteration %d-------"%(count))
continue
else:
change = True
#self.logger.log("LOG", "Already visited, choosing max_ucb node")
curr, _ , _= curr.getMaxUcbNode(root.n)
next_node, action, ucbs = node.getMaxUcbNode(root.n)
print(ucbs)
return action
def feedFinalReward(self, actions, root, res):
node = root
prev_node = root
color = YELLOW
for action in actions:
prev_node = node
if len(node.children) > 0:
node = node.children[action]
else:
node = None
color = self.switchColor(color)
if not node: #check for when playing against human
prev_node.populateNode(color)
node = prev_node.children[action]
if res=="LOSS":
node.backpropagate(FINAL_LOSS)
elif res=="WIN":
node.backpropagate(TERMINAL_WIN)
else:
node.backpropagate(TERMINAL_DRAW)
| StarcoderdataPython |
11251772 | from typing import List
# from dataclasses import dataclass, field
# @dataclass
# class Identifier:
# label: str
# allowed_values: List[str]
# @dataclass
# class Product:
# id: str
# name: str=None
# description: str=None
# image: str=None
# unit_of_measure: str=None
# unit_of_measure_quantity: int=None
# dimension_unit_of_measure: str=None
# weight_unit_of_measure: str=None
# length: float=None
# width: float=None
# height: float=None
# weight: float=None
# identifiers: List[Identifier]=None
class Identifier(object):
def __init__(
self,
label, # str
allowed_values # List[str]
):
self.label = label
self.allowed_values = allowed_values
class Product(object):
def __init__(
self,
id, # str
name=None, # str=None
description=None, # str=None
image=None, # str=None
unit_of_measure=None, # str=None
unit_of_measure_quantity=None, # int=None
dimension_unit_of_measure=None, # str=None
weight_unit_of_measure=None, # str=None
length=None, # float=None
width=None, # float=None
height=None, # float=None
weight=None, # float=None
identifiers=None, # List[Identifier]=None
):
self.id = id
self.name = name
self.description = description
self.image = image
self.unit_of_measure = unit_of_measure
self.unit_of_measure_quantity = unit_of_measure_quantity
self.dimension_unit_of_measure = dimension_unit_of_measure
self.weight_unit_of_measure = weight_unit_of_measure
self.length = length
self.width = width
self.height = height
self.weight = weight
self.identifiers = identifiers
| StarcoderdataPython |
5153333 | # Import Standard Libraries
import logging
import scipy as np
# Import Local Libraries
from Utilities import *
#===========================================================================
# EC2 Equations - Material properties
#===========================================================================
def elastic_modulus(fck, units="MPa"):
""" Input: fck = char. comp. strength of concrete
units = "MPa" or "psi" (default = "MPa")
Output: Ec = mean elastic modulus of concrete """
fck = convert_2_MPa(fck, units)
fcm = fck+8
Ec = 22000*(fcm/10)**0.3
return Ec if units == "MPa" else convert_2_psi(Ec, "MPa")
def tensile_strength(fck, units="MPa"):
""" Input: fck = char. comp. strength of concrete
units = "MPa" or "psi" (default = "MPa")
Output: fctm = mean tensile strength of concrete """
fck = convert_2_MPa(fck, units)
fcm = fck+8
fctm = 0.3*fck**(2/3) if fck <= 50 else 2.12*np.log(1+fcm/10)
return fctm if units == "MPa" else convert_2_psi(fctm, "MPa")
def flex_tensile_strength(fck, h, units="MPa"):
""" Input: fck = char. comp. strength of concrete
h = height of reinforced concrete beam
units = "MPa" or "psi" (default = "MPa")
Output: fctm,fl = mean tensile strength for flexure """
fck = convert_2_MPa(fck, units)
fctm = tensile_strength(fck)
h = convert_2_mm(h, units)
fctm = min((1.6-h/1000)*fctm, fctm)
return fctm if units == "MPa" else convert_2_psi(fctm, "MPa")
def ultimate_strain(fck, units="MPa"):
""" Input: fck = char. comp. strength of concrete
units = "MPa" or "psi" (default = "MPa")
Output: ecu3 = ultimate tensile strain """
fck = convert_2_MPa(fck, units)
ecu3 = 2.6+35*((90-fck)/100)**4
return min(ecu3, 3.5)/1000
#===========================================================================
# EC2 Equations - Parameters
#===========================================================================
def alpha_beta(fck, units="MPa"):
""" Input: fck = char. comp. strength of concrete
units = "MPa" or "psi" (default = "MPa")
Output: alpha = factor for bilinear stress block
beta = (dist. from comp. to Nc)/Xu """
fck = convert_2_MPa(fck, units)
alpha = np.ceil((9E-05*fck**2 - 0.0177*fck + 1.4032)*100)/100
beta = np.ceil((4E-05*fck**2 - 0.0071*fck + 0.634)*100)/100
return [min(alpha, 0.75), min(beta, 0.39)]
def lambda_eta(fck, units="MPa"):
""" Input: fck = char. comp. strength of concrete
units = "MPa" or "psi" (default = "MPa")
Output: la = (height of compressive zone)/Xu
eta = factor for "Whitney" stress block """
fck = convert_2_MPa(fck, units)
la = min(0.8-(fck-50)/400, 0.8)
eta = min(1-(fck-50)/200, 1.0)
return [la, eta]
#===========================================================================
# EC2 Equations - Maximum reinforcement (Ductility)
#===========================================================================
def ductility_requirement(Xu, d, fck, fyd, units="MPa"):
""" Input: Xu = dist. from comp. to neutral axis
d = dist. from comp. to reinforcement
fck = char. comp. strength of concrete
fyd = design steel yield stress
units = "MPa" or "psi" (default = "MPa")
Output: Xu_max = Max. dist. to neutral axis """
[fck, fyd] = convert_2_MPa(np.array([fck, fyd]), units)
ecu = ultimate_strain(fck) # units="MPa"
Xu_max = min(ecu*10**6/(ecu*10**6+7*fyd), 0.535)*d
if Xu < Xu_max:
logging.info(
" Xu = {:6.2f} < Xu_max = {:6.2f}. OK".format(Xu, Xu_max))
else:
logging.info(
" Xu = {:6.2f} > Xu_max = {:6.2f}. Not OK".format(Xu, Xu_max))
return Xu_max
#===========================================================================
# EC2 Equations - Minimum reinforcement (Md > Mcr)
#===========================================================================
def steel_ratio(As, fck, fyk, b, d, h, Xu, units="MPa"):
""" Input: As = area of reinforcement steel
fck = char. comp. strength of concrete
fyk = char. yield stress of reinforcement
b = width of beam portion in compression
d = dist. from comp. to reinforcement
h = height of reinforced concrete beam
Xu = maximum dist. to neutral axis
units = "MPa" or "psi" (default = "MPa")
Output: A_min = minimum reinforcement area
A_max = maximum reinforcement area """
[fck, fyk] = convert_2_MPa(np.array([fck, fyk]), units)
[b, d, h, Xu] = convert_2_mm(np.array([b, d, h, Xu]), units)
As = convert_2_mm2(As, units)
fctm = flex_tensile_strength(fck, h) # units="MPa"
A_min = max((0.26*fctm/fyk),0.0013)* (b*d)
fcd = fck/1.5
fyd = fyk/1.15
alpha = alpha_beta(fck)[0] # units="MPa"
A_max = min(alpha*(fcd/fyd)*b*Xu, 0.4*b*d)
compare_steel_area(As, A_min, A_max)
return [A_min, A_max] if units == "MPa" else [A_min/(25.4**2), A_max/(25.4**2)]
| StarcoderdataPython |
9636819 | <reponame>ariadne-pereira/cev-python
from datetime import date
anoNasc = int(input('Digite o ano de nascimento do atleta: '))
idade = date.today().year - anoNasc
print('O atleta tem {} anos e sua categoria é: '.format(idade))
if idade <= 9:
print('Mirim')
elif idade <= 14:
print('Infantil')
elif idade <= 19:
print('Junior')
elif idade <= 25:
print('Sênior')
else:
print('Master')
| StarcoderdataPython |
4975759 | from tester import *
from PIL import Image
from PIL import ImagePalette
ImagePalette = ImagePalette.ImagePalette
def test_sanity():
assert_no_exception(lambda: ImagePalette("RGB", list(range(256))*3))
assert_exception(ValueError, lambda: ImagePalette("RGB", list(range(256))*2))
def test_getcolor():
palette = ImagePalette()
map = {}
for i in range(256):
map[palette.getcolor((i, i, i))] = i
assert_equal(len(map), 256)
assert_exception(ValueError, lambda: palette.getcolor((1, 2, 3)))
def test_file():
palette = ImagePalette()
file = tempfile("temp.lut")
palette.save(file)
from PIL.ImagePalette import load, raw
p = load(file)
# load returns raw palette information
assert_equal(len(p[0]), 768)
assert_equal(p[1], "RGB")
p = raw(p[1], p[0])
assert_true(isinstance(p, ImagePalette))
| StarcoderdataPython |
5136175 | from django.apps import AppConfig
class BotReminderConfig(AppConfig):
name = "bot.remind"
label = "bot_reminder"
| StarcoderdataPython |
6491032 | <filename>smdebug/rules/rule_invoker.py
# First Party
from smdebug.core.logger import get_logger
from smdebug.exceptions import (
NoMoreProfilerData,
RuleEvaluationConditionMet,
StepUnavailable,
TensorUnavailable,
TensorUnavailableForStep,
)
logger = get_logger()
def invoke_rule(rule_obj, start_step=0, end_step=None, raise_eval_cond=False):
step = start_step if start_step is not None else 0
logger.info("Started execution of rule {} at step {}".format(type(rule_obj).__name__, step))
while (end_step is None) or (step < end_step):
try:
rule_obj.invoke(step)
except (TensorUnavailableForStep, StepUnavailable, TensorUnavailable) as e:
logger.debug(str(e))
except RuleEvaluationConditionMet as e:
# If raise_eval_cond specified, pop up the exception.
if raise_eval_cond:
raise e
else:
logger.debug(str(e))
# In case RuleEvaluationConditionMet indicated the end of the rule, break the execution loop.
if e.end_of_rule:
break
except NoMoreProfilerData as e:
logger.info(
"No more profiler data for rule {} at timestamp {}".format(
type(rule_obj).__name__, e.timestamp
)
)
break
step += 1
# decrementing because we increment step in the above line
logger.info(
"Ended execution of rule {} at end_step {}".format(type(rule_obj).__name__, step - 1)
)
| StarcoderdataPython |
5134458 | from .photos import photos
from .sets import sets
__all__ = ['photos', 'sets'] | StarcoderdataPython |
1915290 | # Copyright 2020-2021 Axis Communications AB.
#
# For a full list of individual contributors, please see the commit history.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""ETOS Environment Provider webserver module."""
import os
import logging
import falcon
from etos_lib.etos import ETOS
from etos_lib.lib.database import Database
from etos_lib.logging.logger import FORMAT_CONFIG
from jsontas.jsontas import JsonTas
from environment_provider.middleware import RequireJSON, JSONTranslator
from environment_provider.lib.celery import APP
from environment_provider.lib.registry import ProviderRegistry
from environment_provider.backend.environment import (
check_environment_status,
get_environment_id,
get_release_id,
release_environment,
request_environment,
)
from environment_provider.backend.register import (
get_iut_provider,
get_execution_space_provider,
get_log_area_provider,
register,
)
from environment_provider.backend.configure import (
configure,
get_configuration,
get_dataset,
get_execution_space_provider_id,
get_iut_provider_id,
get_log_area_provider_id,
)
from environment_provider.backend.subsuite import get_sub_suite, get_id
from environment_provider.backend.common import get_suite_id
class Webserver:
"""Environment provider base endpoint."""
def __init__(self, database, celery_worker):
"""Init with a db class.
:param database: database class.
:type database: class
:param celery_worker: The celery app to use.
:type celery_worker: :obj:`celery.Celery`
"""
self.database = database
self.celery_worker = celery_worker
def release(self, response, task_id): # pylint:disable=too-many-locals
"""Release an environment.
:param response: Response object to edit and return.
:type response: :obj:`falcon.response`
:param task_id: Task to release.
:type task_id: str
"""
etos = ETOS(
"ETOS Environment Provider",
os.getenv("HOSTNAME"),
"Environment Provider",
)
jsontas = JsonTas()
registry = ProviderRegistry(etos, jsontas, self.database())
task_result = self.celery_worker.AsyncResult(task_id)
success, message = release_environment(
etos, jsontas, registry, task_result, task_id
)
if not success:
response.media = {
"error": "Failed to release environment",
"details": message,
"status": task_result.status if task_result else "PENDING",
}
return
response.status = falcon.HTTP_200
response.media = {"status": task_result.status if task_result else "PENDING"}
def on_get(self, request, response):
"""GET endpoint for environment provider API.
Get environment task or release environment.
:param request: Falcon request object.
:type request: :obj:`falcon.request`
:param response: Falcon response object.
:type response: :obj:`falcon.response`
"""
task_id = get_environment_id(request)
release = get_release_id(request)
if task_id is None and release is None:
raise falcon.HTTPBadRequest(
"Missing parameters", "'id' or 'release' are required parameters."
)
if release:
self.release(response, release)
return
result = check_environment_status(self.celery_worker, task_id)
response.status = falcon.HTTP_200
response.media = result
@staticmethod
def on_post(request, response):
"""POST endpoint for environment provider API.
Create a new environment and return it.
:param request: Falcon request object.
:type request: :obj:`falcon.request`
:param response: Falcon response object.
:type response: :obj:`falcon.response`
"""
task_id = request_environment(get_suite_id(request))
response.status = falcon.HTTP_200
response.media = {"result": "success", "data": {"id": task_id}}
class Configure:
"""Configure endpoint for environment provider. Configure an environment for checkout.
This endpoint should be called before attempting to checkout an environment so that
the environment provider is configured to handle it.
"""
logger = logging.getLogger(__name__)
def __init__(self, database):
"""Init with a db class.
:param database: database class.
:type database: class
"""
self.database = database
def on_post(self, request, response):
"""Verify that all parameters are available and configure the provider registry.
:param request: Falcon request object.
:type request: :obj:`falcon.request`
:param response: Falcon response object.
:type response: :obj:`falcon.response`
"""
etos = ETOS(
"ETOS Environment Provider", os.getenv("HOSTNAME"), "Environment Provider"
)
jsontas = JsonTas()
registry = ProviderRegistry(etos, jsontas, self.database())
suite_id = get_suite_id(request)
FORMAT_CONFIG.identifier = suite_id
success, message = configure(
registry,
get_iut_provider_id(request),
get_execution_space_provider_id(request),
get_log_area_provider_id(request),
get_dataset(request),
get_suite_id(request),
)
if not success:
self.logger.error(message)
raise falcon.HTTPBadRequest("Bad request", message)
response.status = falcon.HTTP_200
def on_get(self, request, response):
"""Get an already configured environment based on suite ID.
Use only to verify that the environment has been configured properly.
:param request: Falcon request object.
:type request: :obj:`falcon.request`
:param response: Falcon response object.
:type response: :obj:`falcon.response`
"""
etos = ETOS(
"ETOS Environment Provider", os.getenv("HOSTNAME"), "Environment Provider"
)
jsontas = JsonTas()
registry = ProviderRegistry(etos, jsontas, self.database())
suite_id = get_suite_id(request)
if suite_id is None:
raise falcon.HTTPBadRequest(
"Missing parameters", "'suite_id' is a required parameter."
)
FORMAT_CONFIG.identifier = suite_id
response.status = falcon.HTTP_200
response.media = get_configuration(registry, suite_id)
class Register: # pylint:disable=too-few-public-methods
"""Register one or several new providers to the environment provider."""
def __init__(self, database):
"""Init with a db class.
:param database: database class.
:type database: class
"""
self.database = database
def on_post(self, request, response):
"""Register a new provider.
:param request: Falcon request object.
:type request: :obj:`falcon.request`
:param response: Falcon response object.
:type response: :obj:`falcon.response`
"""
etos = ETOS(
"ETOS Environment Provider", os.getenv("HOSTNAME"), "Environment Provider"
)
jsontas = JsonTas()
registry = ProviderRegistry(etos, jsontas, self.database())
registered = register(
registry,
iut_provider=get_iut_provider(request),
log_area_provider=get_log_area_provider(request),
execution_space_provider=get_execution_space_provider(request),
)
if registered is False:
raise falcon.HTTPBadRequest(
"Missing parameters",
"At least one of 'iut_provider', 'log_area_provider' "
"& 'execution_space_provider' is a required parameter.",
)
response.status = falcon.HTTP_204
class SubSuite: # pylint:disable=too-few-public-methods
"""Get generated sub suites from environment provider."""
def __init__(self, database):
"""Init with a db class.
:param database: database class.
:type database: class
"""
self.database = database
def on_get(self, request, response):
"""Get a generated sub suite from environment provider.
:param request: Falcon request object.
:type request: :obj:`falcon.request`
:param response: Falcon response object.
:type response: :obj:`falcon.response`
"""
suite = get_sub_suite(self.database(), get_id(request))
if suite is None:
raise falcon.HTTPNotFound(
title="Sub suite not found.",
description=f"Could not find sub suite with ID {get_suite_id(request)}",
)
response.status = falcon.HTTP_200
response.media = suite
FALCON_APP = falcon.API(middleware=[RequireJSON(), JSONTranslator()])
WEBSERVER = Webserver(Database, APP)
CONFIGURE = Configure(Database)
REGISTER = Register(Database)
SUB_SUITE = SubSuite(Database)
FALCON_APP.add_route("/", WEBSERVER)
FALCON_APP.add_route("/configure", CONFIGURE)
FALCON_APP.add_route("/register", REGISTER)
FALCON_APP.add_route("/sub_suite", SUB_SUITE)
| StarcoderdataPython |
3464802 | from django.contrib.auth import get_user_model
from django.test import TestCase
from django.urls import reverse
from posts.models import Post
class TestPostModel(TestCase):
def setUp(self):
self.user = get_user_model().objects.create_user(
username="testuser",
email="<EMAIL>",
password="<PASSWORD>"
)
self.post = Post.objects.create(
title="Test Post",
slug="test-post",
content="This is a test post.",
author=self.user,
published=True
)
self.post1 = Post.objects.create(
title="Unpublished Test Post",
slug="unpublished-test-post",
content="This is an unpublished test post",
author=self.user,
published=False
)
def test_get_absolute_url(self):
url = self.post.get_absolute_url()
reversed_url = reverse('posts:detail', kwargs={'slug':self.post.slug})
self.assertEqual(url, reversed_url)
def test_is_published_on_published_post(self):
self.assertTrue(self.post.is_published())
def test_is_published_on_unpublished_post(self):
self.assertFalse(self.post1.is_published())
| StarcoderdataPython |
5062392 | from datetime import datetime
class Cooldown:
users_cld = {}
@classmethod
def check_user(cls, cmd: str, author_id: int, cooldown: int) -> tuple:
now = datetime.now()
user_cld = cls.users_cld.get(author_id)
if user_cld is None:
cls.users_cld[author_id] = {}
return True, now
user_cld = user_cld.get(cmd)
if user_cld is None:
cls.users_cld[author_id][cmd] = now
return True, now
last_move = now - user_cld
if last_move.seconds > cooldown:
cls.users_cld[author_id][cmd] = now
return True, now
else:
return False, cooldown - last_move.seconds
| StarcoderdataPython |
9712115 | from datetime import datetime
from typing import Any, Dict
from bluepy.btle import DefaultDelegate, Peripheral
from miblepy import ATTRS
from miblepy.deviceplugin import MibleDevicePlugin
class LYWSD03MMC(MibleDevicePlugin, DefaultDelegate):
plugin_id = "lywsd03mmc"
plugin_name = "LYWSD03MMC"
plugin_description = "suports the Temperature/Humidity LCD BLE sensor LYWSD03MMC from Mi/Xiaomi"
def __init__(self, mac: str, interface: str, **kwargs: Any):
self.peripheral: Peripheral = None
self.data: Dict[str, Any] = {}
super().__init__(mac, interface, **kwargs)
def fetch_data(self, **kwargs: Any) -> Dict[str, Any]:
# connect to device
self.peripheral = Peripheral(self.mac, iface=int(self.interface.replace("hci", "")))
# attach notification handler
self.peripheral.setDelegate(self)
# safe power: https://github.com/JsBergbau/MiTemperature2/issues/18#issuecomment-590986874
self.peripheral.writeCharacteristic(0x46, bytes([0xF4, 0x01, 0x00]), withResponse=True)
if self.peripheral.waitForNotifications(10000):
self.peripheral.disconnect()
return self.data
def handleNotification(self, cHandle: int, data: bytes) -> None:
if cHandle != 0x36:
return
# parse data
voltage = int.from_bytes(data[3:5], byteorder="little") / 1000
self.data.update(
{
"name": self.plugin_name,
"sensors": [
{
"name": f"{self.alias} {ATTRS.TEMPERATURE.value.capitalize()}",
"value_template": "{{value_json." + ATTRS.TEMPERATURE.value + "}}",
"entity_type": ATTRS.TEMPERATURE,
},
{
"name": f"{self.alias} {ATTRS.HUMIDITY.value.capitalize()}",
"value_template": "{{value_json." + ATTRS.HUMIDITY.value + "}}",
"entity_type": ATTRS.HUMIDITY,
},
],
"attributes": {
# 3.1 or above --> 100% 2.1 --> 0 %
ATTRS.BATTERY.value: min(int(round((voltage - 2.1), 2) * 100), 100),
ATTRS.VOLTAGE.value: str(voltage),
ATTRS.TEMPERATURE.value: str(int.from_bytes(data[0:2], byteorder="little", signed=True) / 100),
ATTRS.HUMIDITY.value: str(int.from_bytes(data[2:3], byteorder="little")),
ATTRS.TIMESTAMP.value: str(datetime.now().isoformat()),
},
}
)
self.peripheral.disconnect()
| StarcoderdataPython |
1631724 | # Еще одна задача без подвоха :)
#
# Вам необходимо написать программу, которая считает вещественные числа A и B и выведет результат деления A на B.
#
# Напомним, что вещественное деление делается с помощью операции / (в отличие от деления нацело, которое делается
# с помощью операции //).
a = float(input())
b = float(input())
print(a / b)
| StarcoderdataPython |
3350181 | #!/usr/bin/python
#
# File: DockSim.py
# Author: <NAME>
# Email: <EMAIL>
# Date: Dec 20, 2015
#----------------------------------------------------------------------------
from __future__ import print_function, division
from collections import namedtuple
from math import sqrt, trunc
StateVec = namedtuple('StateVec', 'phase distTraveled currVelocity fuelRemaining tEnd')
#----------------------------------------------------------------------------
class FlightParams(object):
""" An object to hold the flight profile parameters
tAft is the duration of the acceleration burn, in seconds
tCoast is the duration of the coast phase, in seconds
tFore is the duration of the deceleration burn, in seconds
aAft is the force of acceleration, in m/sec^2
aFore is the force of deceleration, in m/sec^2
rFuel is the rate of fuel consumption in kg/sec
qFuel is the initial amount of fuel, in kg
dist is the initial distance to the dock, in m
vMin is the minimum sucessful docking velocity, in m/s
vMax is the maximum sucessful docking velocity, in m/s
vInit is the ship's initial velocity, in m/s
tSim is the maximum duration of the simulation in seconds (an int)
The user flight profile parameters: tAft, tCoast, and tFore, are
forced to be values representable as ddd.d. tSim is forced to
be an int.
"""
def __init__(self, tAft, tCoast, tFore, aAft, aFore,
rFuel, qFuel, dist, vMin, vMax, vInit, tSim):
self.tAft = (trunc(tAft * 10) % 10000)/10.0
self.tCoast = (trunc(tCoast * 10) % 10000)/10.0
self.tFore = (trunc(tFore * 10) % 10000)/10.0
self.aAft = aAft
self.aFore = aFore
self.rFuel = rFuel
self.qFuel = qFuel
self.dist = dist
self.vMin = vMin
self.vMax = vMax
self.vInit = vInit
self.tSim = int(tSim)
#----------------------------------------------------------------------------
class DockSim(object):
""" DockSim contains the flight profile simulation parameters and computes
simulation output values.
"""
# Flight parameters
# (TODO: should come from MS Settings table)
MAX_V_DOCK = 0.1 # max terminal velocity for successful dock in m/sec
MIN_V_DOCK = 0.01 # min terminal velocity for successful dock in m/sec
INITIAL_V = 0.0 # velocity at start of simulation in m/sec
# Longest flight time allowed
# (must be greater than maximum burn length self.qFuel/self.rFuel)
# (TODO: should come from MS Settings table)
MAX_FLIGHT_DURATION_S = 1000 * 60 # 1000 minutes
# Flight phases
START_PHASE = 0
ACCEL_PHASE = 1
COAST_PHASE = 2
DECEL_PHASE = 3
GLIDE_PHASE = 4
END_PHASE = 5
PHASE_STR = { START_PHASE: "START",
ACCEL_PHASE: "ACCELERATE",
COAST_PHASE: "COAST",
DECEL_PHASE: "DECELERATE",
GLIDE_PHASE: "GLIDE",
END_PHASE : "END",
}
# Status value returned at end of travel interval computation
INTERVAL_DNF = 0 # Did not finish
INTERVAL_DEST = 1 # Dest reached
INTERVAL_END = 2 # End of time interval reached
# Final simulation result conditions
OUTCOME_DNF = "OUTCOME_DNF"
OUTCOME_NO_FUEL = "OUTCOME_NO_FUEL"
OUTCOME_TOO_SLOW = "OUTCOME_TOO_SLOW"
OUTCOME_TOO_FAST = "OUTCOME_TOO_FAST"
OUTCOME_SUCCESS = "OUTCOME_SUCCESS"
def __init__(self, fp):
""" Store the simulation parameters.
fp is a FlightParams namedtuple.
Raises ValueError if any of the flight characteristics are out of
range, but allows the user-supplied time values to be anything.
"""
# User-supplied flight profile parameters
self.tAft = fp.tAft # sec (aft acceleration burn)
self.tCoast = fp.tCoast # sec (coasting interval)
self.tFore = fp.tFore # sec (forward deceleration burn)
# Capsule flight characteristics parameters
self.aAft = fp.aAft # m/sec^2 (aft acceleration)
self.aFore = fp.aFore # m/sec^2 (forward deceleration)
self.rFuel = fp.rFuel # kg/sec (fuel consumption rate)
self.qFuel = fp.qFuel # kg (initial fuel quantity)
self.dist = fp.dist # m (initial distance to dock)
self.vMin = fp.vMin # m/s (min docking velocity)
self.vMax = fp.vMax # m/s (max docking velocity)
self.v0 = fp.vInit # m/sec (initial velocity)
# Validate some parameters
if self.rFuel <= 0.0:
raise ValueError("Fuel consumption rate must be greater than 0 if you hope to get anywhere")
if self.qFuel <= 0.0:
raise ValueError("Fuel quantity must be greater than 0 if you hope to get anywhere")
if self.dist <= 0.0:
raise ValueError("Distance to travel must be greater than 0")
if self.aFore <= 0.0:
raise ValueError("Fore thruster (nose maneuvering jets) acceleration must be greater than 0")
if self.aAft <= 0.0:
raise ValueError("Aft thruster (rear engine) acceleration must be greater than 0")
def outcome(self, state):
""" Determine the nature of the failure from the final state """
status = self.OUTCOME_SUCCESS
if state.currVelocity <= 0.0:
status = self.OUTCOME_DNF
elif state.fuelRemaining <= 0.0:
status = self.OUTCOME_NO_FUEL
elif state.currVelocity < self.vMin:
status = self.OUTCOME_TOO_SLOW
elif state.currVelocity > self.vMax:
status = self.OUTCOME_TOO_FAST
return status
def accelVelocity(self):
""" Return the velocity at the end of the acceleration phase """
return self.shipState(self.tAft).currVelocity
def coastVelocity(self):
""" Return the velocity during the coast phase """
return self.shipState(self.tAft + self.tCoast).currVelocity
def decelVelocity(self):
""" Return the velocity at the end of the deceleration phase """
return self.shipState(self.tAft + self.tCoast + self.tFore).currVelocity
def terminalVelocity(self):
""" Return the terminal velocity of the maneuver. """
return self.shipState(self.flightDuration()).currVelocity
def safeDockingVelocity(self, v):
""" Return True if v is in the safe docking range """
return v >= self.vMin and v <= self.vMax
def dockIsSuccessful(self):
""" Return True if the ship docks with a terminal velocity
between self.vMin and self.vMax.
"""
return self.safeDockingVelocity(self.terminalVelocity())
def distanceTraveled(self, dt, v0, a=0.0):
""" Compute the distance traveled.
dt is the amount of time traveled, in seconds
v0 is the velocity at the start of the time interval, in m/s
a is the amount of constant acceleration being applied during
the interval, in m/s^2
Returns the distance traveled during the timeInterval, in meters
computed by the formula d = v0 * dt + 0.5 * a * dt**2
"""
return (v0 + 0.5 * a * dt) * dt
def velocity(self, dt, v0, a):
""" Compute resulting velocity from initial velocity, accel, and time interval """
return v0 + a * dt
def fuelConsumed(self, dt):
""" Compute amount of fuel consumed by a burn of dt """
return dt * self.rFuel # time * rate of consumption
def timeToTravel(self, d, v0, a):
""" Return the time it takes to traverse a distance, d.
d is the distance to be traversed, in meters (d >= 0)
v0 is the initial velocity, in m/s
a is the constant acceleration, in m/s**2
Returns the positive time in seconds to go the distance d,
or the negative time it takes for the velocity to go to 0
if a negative acceleration causes the velocity to go negative,
or None if v0 <= 0
Note: This may handle more cases than it needs to, but that's
okay.
"""
if a == 0.0:
if v0 == 0.0:
return None
else:
return d/v0
else:
disc = v0**2 - 2.0 * a * (-d)
if disc < 0.0:
# Negative acceleration will cause the velocity to go negative,
# also resulting in no real solution for the time
# so instead we will return the time it takes the velocity to go to zero
return v0/a # either v0 or a is negative
else:
return (-v0 + sqrt(v0**2 - 2.0 * a * (-d))) / a
def timeToStop(self, v0, a):
""" Return the time it takes for velocity to go to zero.
v0 must be >= 0.0 or ValueError is raised.
a must be < 0.0 or ValueError is raised.
Returns the time in seconds for the initial velocity to be
reduced to zero.
"""
if a >= 0.0:
raise ValueError("a must be < 0.0")
if v0 < 0.0:
raise ValueError("v0 must be >= 0.0")
# Use: v = v0 + a * t
# Solve for v = 0:
# v0 + a * t = 0
# t = -v0/a
return -v0/a
def timeUntilFuelRunsOut(self, qFuel):
""" Return the burn time until fuel is completely consumed.
qFuel is the amount of fuel, in kg.
Assumes constant burn rate, self.rFuel.
Returns the time in seconds of the maximum burn.
"""
return qFuel/self.rFuel
def computeNoThrustTravelInterval(self, dt, v0, distToDest, qFuel):
""" Compute distance traveled, ending velocity, time remaining, and end condition.
Assumes acceleration is 0 (no thrust), so fuel is not an issue.
The initial velocity, v0, and the quantity of fuel, qFuel, are
passed in, but are just passed through unchanged.
Return a tuple (distanceTraveled, v0, qFuel, timeRemaining, endCondition)
"""
# Are we there already?
if distToDest <= 0.0:
return(0.0, # did not take any time to reach dest
v0, # velocity unchanged
qFuel, # fuel quantity unchanged
dt, # no time used
self.INTERVAL_DEST # destination was reached
)
# Compute time to reach destination
tDest = self.timeToTravel(distToDest, v0, 0.0)
# If tDest is None, the destination will never be reached because the
# velocity is 0 or negative
if tDest is None:
return (0.0, # say that no distance toward the target was traversed
v0, # velocity unchanged
qFuel, # fuel quantity unchanged
dt, # say that no time was used progressing to the dest
self.INTERVAL_DNF
)
else:
if tDest < dt:
# Destination was reached within this time interval
return (distToDest, # distance to dest was traversed
v0, # velocity unchanged
qFuel, # fuel quantity unchanged
dt - tDest, # time remaining in interval
self.INTERVAL_DEST # destination was reached
)
else:
# Reached end of time interval before reaching dest
return (self.distanceTraveled(dt, v0), # the distance that was traveled
v0, # velocity unchanged
qFuel, # fuel quantity unchanged
0.0, # end of interval reached
self.INTERVAL_END
)
def computeTravelInterval(self, dt, v0, a, distToDest, qFuel):
""" Compute distance traveled, velocity, and fuel consumed.
Correctly handles running out of fuel part way through
the interval. Also checks whether the ship would travel
past the destination.
If the destination is reached during dt, the values are
returned with dist = distToDest, and v = the final velocity
at docking (not 0).
Return a tuple (distanceTraveled, endVelocity, fuelRemaining, timeRemaining, endCondition)
"""
# Validate the inputs
if distToDest <= 0.0: # we are done
return (0.0, # already at (or past) dest
v0, # velocity unchanged
qFuel, # fuel quantity unchanged
dt, # no time used
self.INTERVAL_DEST # destination reached
)
if dt <= 0.0:
return (0.0, # position unchanged
v0, # velocity unchanged
qFuel, # fuel quantity unchanged
dt, # no time used
self.INTERVAL_END # end of time interval
)
if v0 < 0.0:
raise ValueError("v0 must be >= 0.0")
# If there is no acceleration or deceleration, do a simpler
# constant velocity computation
if a == 0.0:
return self.computeNoThrustTravelInterval(dt, v0, distToDest, qFuel)
# Compute how long the engines can fire before fuel runs out
tFuel = self.timeUntilFuelRunsOut(qFuel)
# If the craft is decelerating, it could stop or reverse before reaching the
# destination. If so, determine the time until v crosses 0 (how long the engines
# can fire before the ship stops making progress toward the destination).
# Otherwise, set tStop to a really big number.
if a < 0.0:
tStop = self.timeToStop(v0, a)
else:
tStop = self.MAX_FLIGHT_DURATION_S
# The time under acceleration is the shorter of the time until the ship
# stops due to deceleration, or the time until the fuel runs out
tAccel = min(tFuel, tStop)
# Compute the distance traveled during the time under acceleration
distTraveled = self.distanceTraveled(tAccel, v0, a)
# Is dest reached while under acceleration (with engines firing), which
# is True if the distTraveled under acceleration would be greater than the
# distance to the destination. If so, shorten the time under acceleration
# to the time until the destination is reached, and set the distance traveled
# to be the distance to the dest.
if distTraveled >= distToDest:
tAccel = self.timeToTravel(distToDest, v0, a)
distTraveled = distToDest
# Does the end of the time interval occur before dest reached or fuel runs out
# i.e., while the ship is still accelerating? If so, it is straightforward to
# calculate the state values using dt.
if dt < tAccel:
return (self.distanceTraveled(dt, v0, a), # dist traveled during interval
self.velocity(dt, v0, a), # velocity at end of interval
qFuel - self.fuelConsumed(dt), # fuel remaining
0.0, # entire interval used
self.INTERVAL_END # dest not reached yet
)
# If this code is reached, the ship either ran out of fuel, or the velocity
# went to zero or negative. If the time under acceleration is the same as
# the time until v = 0, then the velocity went to zero before fuel ran out.
# The destination will never be reached, so compute values for the time
# interval up to where v goes to zero.
if tAccel == tStop:
# Flight will never reach destination
return (self.distanceTraveled(tStop, v0, a), # forward progress made
0.0, # ship stopped
qFuel - self.fuelConsumed(tStop), # fuel remaining
dt - tStop, # time remaining after ship stopped
self.INTERVAL_DNF # dest will not be reached
)
# Either dest reached (tAccel < tFuel) or fuel runs out, continue at constant velocity
cDist,cVel,cFuel,cTime,cState = self.computeNoThrustTravelInterval(dt - tAccel, # time remaining after acceleration
self.velocity(tAccel, v0, a), # velocity after acceleration
distToDest - self.distanceTraveled(tAccel, v0, a), # dist remaining after acceleration
0.0 if tFuel < tAccel else qFuel - self.fuelConsumed(tAccel) # fuel remaining
)
return (self.distanceTraveled(tAccel, v0, a) + cDist, # total dist after both phases
cVel, # final velocity is passed through
cFuel, # final fuel quantity is passed through (0.0)
cTime, # whatever is left after constV phase
cState # final state is state at end of constV phase
)
def currPhase(self, t):
""" Return the flight phase for time t. """
if t <= 0.0: return self.START_PHASE
elif t <= self.tAft: return self.ACCEL_PHASE
elif t <= self.tAft + self.tCoast: return self.COAST_PHASE
elif t <= self.tAft + self.tCoast + self.tFore: return self.DECEL_PHASE
else: return self.GLIDE_PHASE
def nextPhase(self, phase):
""" Return the next flight phase after phase """
return {self.START_PHASE: self.ACCEL_PHASE,
self.ACCEL_PHASE: self.COAST_PHASE,
self.COAST_PHASE: self.DECEL_PHASE,
self.DECEL_PHASE: self.GLIDE_PHASE,
self.GLIDE_PHASE: self.END_PHASE,
self.END_PHASE : self.END_PHASE,
}[phase]
def acceleration(self, phase):
""" Return the acceleration amount for the specified phase """
return {self.START_PHASE: 0.0,
self.ACCEL_PHASE: self.aAft,
self.COAST_PHASE: 0.0,
self.DECEL_PHASE: -self.aFore,
self.GLIDE_PHASE: 0.0,
self.END_PHASE : 0.0,
}[phase]
def computePhase(self, t, stateVec):
""" Computes a state vector for an acceleration phase of time t.
t is time in seconds since the start of the maneuver.
Returns a StateVec containing (phase, distTraveled, currVelocity, fuelRemaining, tEnd)
"""
phase = self.nextPhase(stateVec.phase)
accel = self.acceleration(phase)
dt = t - stateVec.tEnd # time delta between end of last phase and t
distRemaining = self.dist - stateVec.distTraveled
d,v,fuelRemaining,tRemaining,intervalStatus = self.computeTravelInterval(dt, stateVec.currVelocity, accel, distRemaining, stateVec.fuelRemaining)
endTime = stateVec.tEnd + dt - tRemaining
if intervalStatus in (self.INTERVAL_DNF, self.INTERVAL_DEST):
phase = self.END_PHASE
return StateVec(phase, d + stateVec.distTraveled, v, fuelRemaining, endTime)
def flightDuration(self):
""" Return the total duration of the docking maneuver.
Return None if the terminal velocity is < 0.
"""
stateVec = self.shipState(self.tAft + self.tCoast + self.tFore)
if stateVec.phase == self.END_PHASE:
tEnd = stateVec.tEnd
else:
if stateVec.currVelocity <= 0.0:
tEnd = None
else:
# Compute coast time
tEnd = stateVec.tEnd + self.timeToTravel(self.dist - stateVec.distTraveled, stateVec.currVelocity, 0.0)
return tEnd
def shipState(self, t):
""" Return ship state vector for time t.
t is time in seconds since the start of the maneuver.
Returns a StateVec containing (phase, distTraveled, currVelocity, fuelRemaining, tEnd)
"""
# Create initial stateVec
stateVec = StateVec(phase=self.START_PHASE,
distTraveled=0.0,
currVelocity=self.v0,
fuelRemaining=self.qFuel,
tEnd=0.0)
# Create an array containing the ending time of each flight phase (accel, coast, decel)
phaseTimes = [self.tAft, self.tAft + self.tCoast, self.tAft + self.tCoast + self.tFore]
# Sequentially calculate the flight phases until the simulation state reaches END_PHASE
while stateVec.phase != self.END_PHASE:
if phaseTimes and t > phaseTimes[0]:
stateVec = self.computePhase(phaseTimes[0], stateVec)
phaseTimes = phaseTimes[1:]
else:
# Compute the final time segment
stateVec = self.computePhase(t, stateVec)
break
return stateVec
#----------------------------------------------------------------------------
if __name__ == "__main__":
import sys
# import unittest
#
# class DockSimTestCase(unittest.TestCase):
# def testInit(self):
# pass
#
# # def testEq(self):
# # obj = DockSim()
# # self.assertEqual(obj, 42)
#
# unittest.main() # run the unit tests
fp = FlightParams(tAft=9.2,#8.4#8.3
tCoast=1, #0
tFore=13.1,
aAft=0.15,
aFore=0.09,
rFuel=0.7,
qFuel=20,
dist=15.0,
vMin=0.01,
vMax=0.1,
vInit=0.0,
tSim=45,
)
ds = DockSim(fp)
# ds = DockSim(tAft=2, tCoast=2, tFore=13.1, aAft=0.15, aFore=0.09, rFuel=0.7, qFuel=20, dist=15.0)
t = 0.0
# s = ds.shipState(11.0)
while True:
if t == 17.0:
pass
s = ds.shipState(t)
print("{}: {}".format(t, s))
if s.phase == DockSim.END_PHASE:
break
t += 1.0
print("dockIsSuccessful:", ds.dockIsSuccessful())
print("terminalVelocity:", ds.terminalVelocity())
print("flightDuration:", ds.flightDuration())
print("StateVec:", ds.shipState(ds.flightDuration()))
sys.exit(0)
| StarcoderdataPython |
220110 | <reponame>BluecellChen/Python-Challenge<filename>pyPoll/main.py
#!/usr/bin/env python
# coding: utf-8
# In[4]:
# Import Modules / Dependencies
import os
import csv
# Create file path
csv_path = os.path.join("..", "PyPoll", "Resources", "election_data.csv")
csv_path
#Voter ID,County,Candidate
# In[13]:
# Read in the CSV file
with open(csv_path, newline="") as csvfile:
# split the data on commas
csv_reader = csv.reader(csvfile, delimiter = ",")
header = next(csv_reader)
# create variables /lists to add the csv values to
vote_count = 0
Khan_count = 0
Correy_count = 0
Li_count = 0
OTooley_count = 0
# candidate = []
# Loop through the data, count the total number of votes
for row in csv_reader:
vote_count += 1
# Calculate total number of votes for each candidate
if (row[2] == "Khan"):
Khan_count += 1
elif (row[2] == "Correy"):
Correy_count += 1
elif (row[2] == "Li"):
Li_count += 1
else:
OTooley_count += 1
# percentage of votes for each candidate
Khan_percent = Khan_count / vote_count
Correy_percent = Correy_count / vote_count
Li_percent = Li_count / vote_count
OTooley_percent = OTooley_count / vote_count
# winner has the most votes
winner = max(Khan_count, Correy_count, Li_count, OTooley_count)
if winner == Khan_count:
winner_is = "Khan"
elif winner == Correy_count:
winner_is = "Correy"
elif winner == Li_count:
winner_is = "Li"
else:
winner_is = "O'Tooley"
# In[15]:
output = os.path.join("..", "PyPoll", "Resources", "PyPollOutput.txt")
with open(output,"w") as PyPoll_analysis:
PyPoll_analysis.write(f"Election Results\n")
PyPoll_analysis.write(f"-------------------------\n")
PyPoll_analysis.write(f"Total Votes: {vote_count}\n")
PyPoll_analysis.write(f"-------------------------\n")
PyPoll_analysis.write(f"Khan: {Khan_percent:.3%}({Khan_count})\n")
PyPoll_analysis.write(f"Correy: {Correy_percent:.3%}({Correy_count})\n")
PyPoll_analysis.write(f"Li: {Li_percent:.3%}({Li_count})\n")
PyPoll_analysis.write(f"O'Tooley: {OTooley_percent:.3%}({OTooley_count})\n")
PyPoll_analysis.write(f"-------------------------\n")
PyPoll_analysis.write(f"Winner: {winner_is}\n")
PyPoll_analysis.write(f"-------------------------\n")
with open(output, 'r') as readfile:
print(readfile.read())
# In[26]:
print(f"Election Results")
print(f"-------------------------")
print(f"Total Votes:{vote_count}")
print(f"-------------------------")
print(f"Khan: {Khan_percent:.3%}({Khan_count})")
print(f"Correy: {Correy_percent:.3%}({Correy_count})")
print(f"Li: {Li_percent:.3%}({Li_count})")
print(f"O'Tooley: {OTooley_percent:.3%}({OTooley_count})")
print(f"-------------------------")
print(f"Winner: {winner_is}")
print(f"-------------------------")
| StarcoderdataPython |
1993421 | import datetime
from datetime import date
import pytest
from regolith.dates import (month_to_str_int,
day_to_str_int,
find_gaps_overlaps,
get_dates, last_day,
is_current, get_due_date,
has_started, has_finished,
is_before, is_after, is_between)
TEST_DATE = date(2019, 6, 15)
TEST_START_DATE = date(2019, 1, 1)
TEST_END_DATE = date(2019, 2, 5)
@pytest.mark.parametrize(
"input,expected",
[
(1, "01"),
(10, "10"),
("Oct", "10"),
("Jan", "01"),
],
)
def test_month_to_str(input, expected):
assert month_to_str_int(input) == expected
import datetime
from regolith.dates import date_to_float, month_to_int
@pytest.mark.parametrize(
"input,expected",
[
('2020-05-01', datetime.date(2020, 5, 1)),
(datetime.date(2020, 5, 1), datetime.date(2020, 5, 1)),
(2020, True),
],
)
def test_get_due_date(input, expected):
with pytest.raises(Exception):
assert get_due_date(input) == expected
@pytest.mark.parametrize(
"input,expected",
[
('Jan', 1),
(1, 1),
('February', 2)
],
)
def test_month_to_int(input, expected):
assert month_to_int(input) == expected
@pytest.mark.parametrize(
"input,expected",
[
([2019, 1, 15], 2019.0115),
([2019, 'May', 0], 2019.05),
([2019, 'February', 2], 2019.0202)
],
)
def test_date_to_float(input, expected):
assert date_to_float(input[0], input[1], d=input[2]) == expected
@pytest.mark.parametrize(
"input,expected",
[
(1, "01"),
(10, "10"),
],
)
def test_day_to_str(input, expected):
assert day_to_str_int(input) == expected
@pytest.mark.parametrize(
"input,flag,expected",
[
([(date(2020, 1, 1), date(2020, 1, 31)),
(date(2020, 2, 1), date(2020, 2, 5))], False, True),
([(date(2020, 2, 1), date(2020, 2, 5)),
(date(2020, 1, 1), date(2020, 1, 31))], False, True),
([(date(2020, 1, 1), date(2020, 1, 31)),
(date(2020, 2, 2), date(2020, 2, 5))], False, False),
([(date(2020, 1, 1), date(2020, 1, 31)),
(date(2020, 1, 31), date(2020, 2, 5))], False, False),
([(date(2020, 1, 1), date(2020, 1, 31)),
(date(2020, 1, 31), date(2020, 2, 5))], True, True),
([(date(2020, 1, 1), date(2020, 1, 31)),
(date(2020, 2, 1), date(2020, 2, 5)),
(date(2020, 2, 6), date(2020, 2, 7))], False, True),
([(date(2020, 1, 1), date(2020, 1, 31)),
(date(2020, 2, 1), date(2020, 2, 5)),
(date(2020, 2, 7), date(2020, 2, 7))], False, False)
],
)
def test_find_gaps_overlaps(input, flag, expected):
actual = find_gaps_overlaps(input, overlaps_ok=flag)
assert actual == expected
@pytest.mark.parametrize(
"input,expected",
[
({'year': 2020}, {'begin_date': datetime.date(2020, 1, 1),
'end_date': datetime.date(2020, 12, 31),
'date': None
}
),
({'year': 2020, 'month': 9},
{'begin_date': datetime.date(2020, 9, 1),
'end_date': datetime.date(2020, 9, 30),
'date': None
}
),
({'year': 2020, 'month': 'Sep', 'day': 15},
{'begin_date': datetime.date(2020, 9, 15),
'end_date': datetime.date(2020, 9, 15),
'date': datetime.date(2020, 9, 15)
}
),
({'begin_year': 2020},
{'begin_date': datetime.date(2020, 1, 1),
'end_date': None,
'date': None
}
),
({'begin_year': 2020, 'begin_month': 4},
{'begin_date': datetime.date(2020, 4, 1),
'end_date': None,
'date': None
}
),
({'begin_year': 2020, 'begin_month': 4, 'begin_day': 5},
{'begin_date': datetime.date(2020, 4, 5),
'end_date': None,
'date': None
}
),
({'begin_year': 2019, 'end_year': 2020},
{'begin_date': datetime.date(2019, 1, 1),
'end_date': datetime.date(2020, 12, 31),
'date': None
}
),
({'begin_year': 2019, 'end_year': 2020, 'end_month': 'Feb'},
{'begin_date': datetime.date(2019, 1, 1),
'end_date': datetime.date(2020, 2, 29),
'date': None
}
),
({'begin_year': 2019, 'end_year': 2020, 'end_month': 'Feb', 'end_day': 10},
{'begin_date': datetime.date(2019, 1, 1),
'end_date': datetime.date(2020, 2, 10),
'date': None
}
),
({'begin_date': '2020-05-09', 'begin_year': 2019, 'end_year': 2020, 'end_month': 'Feb',
'end_day': 10},
{'begin_date': datetime.date(2020, 5, 9),
'end_date': datetime.date(2020, 2, 10),
'date': None
}
),
({'end_date': '2020-5-20', 'begin_year': 2019, 'end_year': 2020, 'end_month': 'Feb',
'end_day': 10},
{'begin_date': datetime.date(2019, 1, 1),
'end_date': datetime.date(2020, 5, 20),
'date': None
}
),
({'date': '2020-5-20', 'begin_year': 2019, 'end_year': 2020,
'end_month': 'Feb',
'end_day': 10},
{'begin_date': datetime.date(2019, 1, 1),
'end_date': datetime.date(2020, 2, 10),
'date': datetime.date(2020, 5, 20)
}
),
({'date': datetime.date(2020, 5, 20), 'begin_year': 2019, 'end_year': 2020,
'end_month': 'Feb',
'end_day': 10},
{'begin_date': datetime.date(2019, 1, 1),
'end_date': datetime.date(2020, 2, 10),
'date': datetime.date(2020, 5, 20)
}
),
({'date': datetime.date(2020, 5, 20), 'begin_date': datetime.date(2015, 6, 8),
'end_date': datetime.date(2025, 10, 4)},
{'begin_date': datetime.date(2015, 6, 8),
'end_date': datetime.date(2025, 10, 4),
'date': datetime.date(2020, 5, 20)
}
),
],
)
def test_get_dates(input, expected):
actual = get_dates(input)
assert actual == expected
@pytest.mark.parametrize(
"year,month,expected",
[
(2020, 2, 29),
(2020, 'Feb', 29)
]
)
def test_last_day(year, month, expected):
assert last_day(year, month) == expected
@pytest.mark.parametrize(
"thing,expected",
[
({"begin_date": '2020-01-01', "end_date": '2020-12-31'}, False),
({"begin_date": '2019-01-01', "end_date": '2020-12-31'}, True),
({"begin_date": '2019-01-01'}, True),
({"begin_year": 2018}, True),
({"begin_year": 2019}, True),
({"begin_year": 2020}, False),
({"begin_year": 2019, "begin_month": "Apr"}, True),
({"begin_year": 2019, "begin_month": "Jun"}, True),
({"begin_year": 2019, "begin_month": "Jul"}, False),
({"begin_year": 2019, "begin_month": "Jun", "begin_day": 14}, True),
({"begin_year": 2019, "begin_month": "Jun", "begin_day": 15}, True),
({"begin_year": 2019, "begin_month": "Jun", "begin_day": 16}, False),
({"year": 2018}, False),
({"year": 2019}, True),
({"year": 2020}, False),
({"year": 2019, "month": "Apr"}, False),
({"year": 2019, "month": "Jun"}, True),
({"year": 2019, "month": "Jul"}, False),
({"year": 2019, "month": "Jun", "day": 14}, False),
({"year": 2019, "month": "Jun", "day": 15}, True),
({"year": 2019, "month": "Jun", "day": 16}, False),
]
)
def test_is_current(thing, expected, now=TEST_DATE):
assert is_current(thing, now=now) == expected
@pytest.mark.parametrize(
"thing,expected",
[
({"begin_date": '2020-01-01', "end_date": '2020-12-31'}, False),
({"begin_date": '2019-01-01', "end_date": '2020-12-31'}, True),
({"begin_date": '2019-01-01'}, True),
({"begin_year": 2018}, True),
({"begin_year": 2019}, True),
({"begin_year": 2020}, False),
({"begin_year": 2019, "begin_month": "Apr"}, True),
({"begin_year": 2019, "begin_month": "Jun"}, True),
({"begin_year": 2019, "begin_month": "Jul"}, False),
({"begin_year": 2019, "begin_month": "Jun", "begin_day": 14}, True),
({"begin_year": 2019, "begin_month": "Jun", "begin_day": 15}, True),
({"begin_year": 2019, "begin_month": "Jun", "begin_day": 16}, False),
({"year": 2018}, True),
({"year": 2019}, True),
({"year": 2020}, False),
({"year": 2019, "month": "Apr"}, True),
({"year": 2019, "month": "Jun"}, True),
({"year": 2019, "month": "Jul"}, False),
({"year": 2019, "month": "Jun", "day": 14}, True),
({"year": 2019, "month": "Jun", "day": 15}, True),
({"year": 2019, "month": "Jun", "day": 16}, False),
]
)
def test_has_started(thing, expected, now=TEST_DATE):
assert has_started(thing, now=now) == expected
@pytest.mark.parametrize(
"thing,expected",
[
({"begin_date": '2020-01-01', "end_date": '2020-12-31'}, False),
({"begin_date": '2019-01-01', "end_date": '2019-06-15'}, False),
({"begin_date": '2019-01-01'}, False),
({"begin_year": 2018}, False),
({"begin_year": 2019}, False),
({"begin_year": 2020}, False),
({"begin_year": 2019, "begin_month": "Apr"}, False),
({"begin_year": 2019, "begin_month": "Jun"}, False),
({"begin_year": 2019, "begin_month": "Jul"}, False),
({"begin_year": 2019, "begin_month": "Jun", "begin_day": 14}, False),
({"begin_year": 2019, "begin_month": "Jun", "begin_day": 15}, False),
({"begin_year": 2019, "begin_month": "Jun", "begin_day": 16}, False),
({"year": 2018}, True),
({"year": 2019}, False),
({"year": 2020}, False),
({"year": 2019, "month": "Apr"}, True),
({"year": 2019, "month": "Jun"}, False),
({"year": 2019, "month": "Jul"}, False),
({"year": 2019, "month": "Jun", "day": 14}, True),
({"year": 2019, "month": "Jun", "day": 15}, False),
({"year": 2019, "month": "Jun", "day": 16}, False),
]
)
def test_has_finished(thing, expected, now=TEST_DATE):
assert has_finished(thing, now=now) == expected
@pytest.mark.parametrize(
"thing,expected",
[
({"year": 2019, "month": "Jun", "day": 14}, True),
({"year": 2019, "month": "Jun", "day": 15}, False),
({"year": 2019, "month": "Jun", "day": 16}, False),
({"date": "2019-04-15"}, True),
({"date": "2019-08-10"}, False),
]
)
def test_is_before(thing, expected, now=TEST_DATE):
assert is_before(thing, now=now) == expected
@pytest.mark.parametrize(
"thing,expected",
[
({"year": 2019, "month": "Jun", "day": 14}, False),
({"year": 2019, "month": "Jun", "day": 15}, False),
({"year": 2019, "month": "Jun", "day": 16}, True),
({"date": "2019-04-15"}, False),
({"date": "2019-08-10"}, True),
]
)
def test_is_after(thing, expected, now=TEST_DATE):
assert is_after(thing, now=now) == expected
@pytest.mark.parametrize(
"thing,expected",
[
({"year": 2019, "month": "Jun", "day": 14}, False),
({"year": 2019, "month": "Jan", "day": 15}, True),
({"year": 2019, "month": "Jan", "day": 2}, True),
({"date": "2019-04-15"}, False),
({"date": "2019-02-03"}, True),
]
)
def test_is_between(thing, expected, start=TEST_START_DATE, end=TEST_END_DATE):
assert is_between(thing, start=start, end=end) == expected
| StarcoderdataPython |
146 | <reponame>yavook/kiwi-scp
from typing import Tuple
import click
from .cmd import KiwiCommandType, KiwiCommand
from .decorators import kiwi_command
from ..executable import COMPOSE_EXE
from ..instance import Instance
from ..project import Project
@click.argument(
"compose_args",
metavar="[ARG]...",
nargs=-1,
)
@click.argument(
"compose_cmd",
metavar="COMMAND",
)
@kiwi_command(
short_help="Run docker-compose command",
# ignore arguments looking like options
# just pass everything down to docker-compose
context_settings={"ignore_unknown_options": True},
)
class CmdCommand(KiwiCommand):
"""Run raw docker-compose command in a project"""
type = KiwiCommandType.PROJECT
enabled_only = True
@classmethod
def run_for_project(cls, instance: Instance, project: Project, compose_cmd: str = None,
compose_args: Tuple[str] = None) -> None:
COMPOSE_EXE.run([compose_cmd, *compose_args], **project.process_kwargs)
| StarcoderdataPython |
1762726 | <gh_stars>1-10
# -*- coding: utf-8 -*-
# Zinc dumping and parsing module
# (C) 2016 VRT Systems
#
# vim: set ts=4 sts=4 et tw=78 sw=4 si:
import base64
import binascii
import datetime
import random
import string
import sys
import traceback
import six
import hszinc
from hszinc import VER_3_0, Grid, MODE_ZINC, MODE_JSON, XStr
from .pint_enable import to_pint
STR_CHARSET = string.ascii_letters + string.digits + '\n\r\t\f\b'
GENERATION_NUMBER = 1 # FIXME
PERCENT_RECURSIVE = 1
def gen_random_const():
return random.choice([True, False, None, hszinc.MARKER, hszinc.REMOVE, hszinc.NA])
def gen_random_ref():
# Generate a randomised reference.
name = gen_random_str(charset= \
string.ascii_letters + string.digits \
+ '_:-.~')
if random.choice([True, False]):
value = gen_random_str(charset= \
string.ascii_letters + string.digits + '_')
else:
value = None
return hszinc.Ref(name, value)
def gen_random_bin():
# Generate a randomized binary
return hszinc.Bin(random.choice([
'text/plain',
'text/html',
'text/zinc',
'application/json',
'application/octet-stream',
'image/png',
'image/jpeg',
]))
def gen_random_xstr():
# Generate a randomized binary
barray = bytearray(random.getrandbits(8) for _ in range(5))
return XStr(*random.choice([
('hex', binascii.hexlify(barray).decode('ascii')),
('b64', binascii.b2a_base64(barray)[:-1] if sys.version_info[0] <= 2
else binascii.b2a_base64(barray).decode("ascii")
)
]))
def gen_random_uri():
return hszinc.Uri(gen_random_str(charset= \
string.ascii_letters + string.digits))
def gen_random_str(min_length=1, max_length=20, charset=STR_CHARSET):
# Generate a random 20-character string
return ''.join([random.choice(charset) for c in range(0,
random.randint(min_length, max_length))])
def gen_random_date():
# This might generate an invalid date, we keep trying until we get one.
while True:
try:
return datetime.date(random.randint(1, 3000),
random.randint(1, 12), random.randint(1, 31))
except ValueError:
pass
def gen_random_time():
return datetime.time(random.randint(0, 23), random.randint(0, 59),
random.randint(0, 59), random.randint(0, 999999))
def gen_random_date_time():
# Pick a random timezone
tz_name = random.choice(list(hszinc.zoneinfo.get_tz_map().keys()))
tz = hszinc.zoneinfo.timezone(tz_name)
return tz.localize(datetime.datetime.combine(
gen_random_date(), gen_random_time()))
def gen_random_coordinate():
return hszinc.Coordinate( \
round(gen_random_num(360) - 180.0, 2),
round(gen_random_num(360) - 180.0, 2))
def gen_random_num(scale=1000, digits=2):
return round(random.random() * scale, digits)
def gen_random_quantity():
return hszinc.Quantity(gen_random_num(),
to_pint('percent'))
def gen_random_list():
return [gen_random_scalar() for x in range(0, random.randint(0, 2))]
def gen_random_map():
return {gen_random_name(): gen_random_scalar() for x in range(0, random.randint(0, 2))}
RANDOM_TYPES = [
# Only for v2.0 gen_random_bin,
gen_random_xstr,
# gen_random_const, gen_random_ref, gen_random_uri, gen_random_xstr,
# gen_random_str, gen_random_date, gen_random_time, gen_random_date_time,
# gen_random_coordinate, gen_random_num, gen_random_quantity
]
def gen_random_scalar():
if (random.randint(0, 100) < PERCENT_RECURSIVE):
return random.choice(RANDOM_RECURSIVE_TYPES)()
else:
return random.choice(RANDOM_TYPES)()
def gen_random_name(existing=None):
while True:
meta = random.choice(string.ascii_lowercase) \
+ gen_random_str(min_length=0, max_length=7, \
charset=string.ascii_letters + string.digits)
if (existing is None) or (meta not in existing):
return meta
def gen_random_meta():
meta = hszinc.MetadataObject()
names = set()
for n in range(0, random.randint(1, 5)):
name = gen_random_name(existing=names)
value = gen_random_scalar()
meta[name] = value
return meta
def gen_random_grid():
# Generate a randomised grid of values and try parsing it back.
grid = hszinc.Grid(version=VER_3_0)
grid.metadata.extend(gen_random_meta())
# Randomised columns
for n in range(0, random.randint(1, 5)):
col_name = gen_random_name(existing=grid.column)
if random.choice([True, False]):
grid.column[col_name] = gen_random_meta()
else:
grid.column[col_name] = {}
# Randomised rows
for n in range(0, random.randint(0, 20)):
row = {}
for c in grid.column.keys():
if random.choice([True, False]):
row[c] = gen_random_scalar()
grid.append(row)
return grid
RANDOM_RECURSIVE_TYPES = [gen_random_list, gen_random_map, gen_random_grid]
def dump_grid(g):
print('Version: %s' % g.version)
print('Metadata:')
for k, v in g.metadata.items():
print(' %s = %r' % (k, v))
print('Columns:')
for c, meta in g.column.items():
print(' %s:' % c)
for k, v in g.column[c].items():
print(' %s = %r' % (k, v))
print('Rows:')
for row in g:
print('---')
for c, v in row.items():
print(' %s = %r' % (c, v))
def approx_check(v1, v2):
# Check types match
if (isinstance(v1, six.string_types) \
and isinstance(v2, six.string_types)):
assert type(v1) == type(v2), '%s != %s' % (type(v1), type(v2))
if isinstance(v1, datetime.time):
assert v1.replace(microsecond=0) == v2.replace(microsecond=0)
elif isinstance(v1, datetime.datetime):
assert v1.tzinfo == v2.tzinfo
assert v1.date() == v2.date()
approx_check(v1.time(), v2.time())
elif isinstance(v1, hszinc.Quantity):
assert v1.unit == v2.unit
approx_check(v1.value, v2.value)
elif isinstance(v1, hszinc.Coordinate):
approx_check(v1.latitude, v2.latitude)
approx_check(v1.longitude, v2.longitude)
elif isinstance(v1, float) or isinstance(v2, float):
assert abs(v1 - v2) < 0.000001
elif isinstance(v1, Grid):
approx_check_grid(v1, v2)
else:
assert v1 == v2, '%r != %r' % (v1, v2)
def _try_dump_parse(ref_grid, mode):
try:
# Dump the randomised grid to a string
grid_str = hszinc.dump(ref_grid, mode=mode)
except:
# Dump some detail about the grid
print('Failed to dump grid.')
dump_grid(ref_grid)
raise
# Parse the grid string
try:
parsed_grid = hszinc.parse(grid_str, mode=mode, single=True)
except:
print('Failed to parse dumped grid')
dump_grid(ref_grid)
print('--- Parsed string ---')
print(grid_str)
raise
approx_check_grid(parsed_grid, ref_grid)
def try_dump_parse_json():
ref_grid = gen_random_grid()
_try_dump_parse(ref_grid, MODE_JSON)
def try_dump_parse_zinc():
ref_grid = gen_random_grid()
_try_dump_parse(ref_grid, MODE_ZINC)
def approx_check_grid(parsed_grid, ref_grid):
# Check metadata matches
try:
assert set(ref_grid.metadata.keys()) \
== set(parsed_grid.metadata.keys())
for key in ref_grid.metadata.keys():
approx_check(ref_grid.metadata[key], parsed_grid.metadata[key])
except:
print(traceback.format_exc())
print('Mismatch in metadata')
print('Reference grid')
dump_grid(ref_grid)
print('Parsed grid')
dump_grid(parsed_grid)
raise
try:
# Check column matches
assert set(ref_grid.column.keys()) \
== set(parsed_grid.column.keys())
except:
print(traceback.format_exc())
print('Mismatch in column')
print('Reference grid')
dump_grid(ref_grid)
print('Parsed grid')
dump_grid(parsed_grid)
raise
for col in ref_grid.column.keys():
try:
for key in ref_grid.column[col].keys():
approx_check(ref_grid.column[col][key], \
parsed_grid.column[col][key])
except:
print(traceback.format_exc())
print('Mismatch in metadata for column %s' % col)
print('Reference: %r' % ref_grid.column[col])
print('Parsed: %r' % parsed_grid.column[col])
raise
try:
# Check row matches
assert len(ref_grid) == len(parsed_grid)
except:
print(traceback.format_exc())
print('Mismatch in row count')
print('Reference grid')
dump_grid(ref_grid)
print('Parsed grid')
dump_grid(parsed_grid)
for (ref_row, parsed_row) in zip(ref_grid, parsed_grid):
try:
for col in ref_grid.column.keys():
approx_check(ref_row.get(col), parsed_row.get(col))
except:
print(traceback.format_exc())
print('Mismatch in row')
print('Reference:')
print(ref_row)
print('Parsed:')
print(parsed_row)
raise
assert parsed_grid == ref_grid
def test_loopback_zinc():
for trial in range(0, GENERATION_NUMBER):
try_dump_parse_zinc()
def test_loopback_json():
for trial in range(0, GENERATION_NUMBER):
try_dump_parse_json()
| StarcoderdataPython |
213793 | # -*- coding: utf-8 -*-
# This file is part of pygal
#
# A python svg graph plotting library
# Copyright © 2012-2015 Kozea
#
# This library is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option) any
# later version.
#
# This library is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with pygal. If not, see <http://www.gnu.org/licenses/>.
"""
Bar chart that presents grouped data with rectangular bars with lengths
proportional to the values that they represent.
"""
from __future__ import division
from pygal.graph.graph import Graph
from pygal.util import alter, decorate, ident, swap
class Bar(Graph):
"""Bar graph class"""
_series_margin = .06
_serie_margin = .06
def __init__(self, *args, **kwargs):
"""Bar chart creation"""
self._x_ranges = None
super(Bar, self).__init__(*args, **kwargs)
def _bar(self, serie, parent, x, y, i, zero, secondary=False):
"""Internal bar drawing function"""
width = (self.view.x(1) - self.view.x(0)) / self._len
x, y = self.view((x, y))
series_margin = width * self._series_margin
x += series_margin
width -= 2 * series_margin
width /= self._order
if self.horizontal:
serie_index = self._order - serie.index - 1
else:
serie_index = serie.index
x += serie_index * width
serie_margin = width * self._serie_margin
x += serie_margin
width -= 2 * serie_margin
height = self.view.y(zero) - y
r = serie.rounded_bars * 1 if serie.rounded_bars else 0
alter(self.svg.transposable_node(
parent, 'rect',
x=x, y=y, rx=r, ry=r, width=width, height=height,
class_='rect reactive tooltip-trigger'), serie.metadata.get(i))
transpose = swap if self.horizontal else ident
return transpose((x + width / 2, y + height / 2))
def bar(self, serie, rescale=False):
"""Draw a bar graph for a serie"""
serie_node = self.svg.serie(serie)
bars = self.svg.node(serie_node['plot'], class_="bars")
if rescale and self.secondary_series:
points = self._rescale(serie.points)
else:
points = serie.points
for i, (x, y) in enumerate(points):
if None in (x, y) or (self.logarithmic and y <= 0):
continue
metadata = serie.metadata.get(i)
bar = decorate(
self.svg,
self.svg.node(bars, class_='bar'),
metadata)
val = self._format(serie.values[i])
x_center, y_center = self._bar(
serie, bar, x, y, i, self.zero, secondary=rescale)
self._tooltip_data(
bar, val, x_center, y_center, "centered",
self._get_x_label(i))
self._static_value(serie_node, val, x_center, y_center, metadata)
def _compute(self):
"""Compute y min and max and y scale and set labels"""
if self._min:
self._box.ymin = min(self._min, self.zero)
if self._max:
self._box.ymax = max(self._max, self.zero)
self._x_pos = [
x / self._len for x in range(self._len + 1)
] if self._len > 1 else [0, 1] # Center if only one value
self._points(self._x_pos)
self._x_pos = [(i + .5) / self._len for i in range(self._len)]
def _plot(self):
"""Draw bars for series and secondary series"""
for serie in self.series:
self.bar(serie)
for serie in self.secondary_series:
self.bar(serie, True)
| StarcoderdataPython |
4843233 | <gh_stars>0
import wikipedia
import folders
from pydub import AudioSegment
from pydub import effects
import sys
sys.path.append('/path/to/ffmpeg')
def write_file(file,data):
# f = open(file, "w")
# f.write(data)
# f.close()
with open(file, 'w') as f:
f.write(data)
def getwiki(path_wiki,title):
wikipedia.set_lang("hu")
mycontent = wikipedia.page(title).content
mycontent = mycontent.split('\n')
mycontent = [x for x in mycontent if len(x)>0]
mycontent = '\n'.join(mycontent)
filename = title.replace("-","_").replace(":","_").replace("!","_").replace("\\","_")
write_file(path_wiki+'/'+filename+'.txt',mycontent)
def mp3_speed(root):
# root = r'original/abc.mp3'
velocidad_X = 1.5 # No puede estar por debajo de 1.0
sound = AudioSegment.from_mp3(root)
so = sound.speedup(velocidad_X, 150, 25)
so.export(root[:-4] + '_out.mp3', format = 'mp3')
def main(path_wiki,path_out):
# print(mp3files)
# folders.create_folder(path_wiki)
# with open('list.txt','r') as f:
# data = f.read()
# titles = data.split('\n')
# print(titles)
# for title in titles:
# getwiki(path_wiki,title)
# folders.folder_in_out(path_wiki,path_out)
mp3files = folders.list_files2('output', 'mp3')
# mp3_mod = [mp3_speed(x) for x in mp3files]
for mp3file in mp3files:
print(mp3file)
mp3_speed(mp3file)
if __name__ == '__main__':
path_wiki = 'output/wiki'
path_out = 'output/mp3/'
main(path_wiki,path_out)
# with open(file,'w') as f:
# f.write(data)
# mycontent = ny.content
# file.
# u'New York is a state in the Northeastern region of the United States. New York is the 27th-most exten'...
# ny.links[0] | StarcoderdataPython |
75717 | """
Python Interchangeable Virtual Instrument Library
Copyright (c) 2012-2016 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from . import ivi
# Parameter Values
CurrentLimitBehavior = set(['regulate', 'trip'])
RangeType = set(['current', 'voltage'])
OutputState = set(['constant_voltage', 'constant_current', 'over_voltage',
'over_current', 'unregulated'])
MeasurementType = set(['current', 'voltage'])
def get_range(range_list, offset, val):
l = list()
for i in range_list:
l.append((i, abs(range_list[i][offset])))
l.sort(key=lambda x: x[1], reverse=True)
k = None
for i in range(len(l)):
if l[i][1] >= abs(val):
k = l[i][0]
return k
class Base(ivi.IviContainer):
"Base IVI methods for all DC power supplies"
def __init__(self, *args, **kwargs):
# needed for _init_outputs calls from other __init__ methods
self._output_count = 1
super(Base, self).__init__(*args, **kwargs)
cls = 'IviDCPwr'
grp = 'Base'
ivi.add_group_capability(self, cls+grp)
self._output_current_limit = list()
self._output_current_limit_behavior = list()
self._output_enabled = list()
self._output_ovp_enabled = list()
self._output_ovp_limit = list()
self._output_voltage_level = list()
self._output_name = list()
self._output_count = 1
self._output_spec = [
{
'range': {
'P0V': (0, 0)
},
'ovp_max': 0,
'voltage_max': 0,
'current_max': 0
}
]
self._add_property('outputs[].current_limit',
self._get_output_current_limit,
self._set_output_current_limit,
None,
ivi.Doc("""
Specifies the output current limit. The units are Amps.
The value of the Current Limit Behavior attribute determines the behavior
of the power supply when the output current is equal to or greater than
the value of this attribute.
""", cls, grp, '4.2.1'))
self._add_property('outputs[].current_limit_behavior',
self._get_output_current_limit_behavior,
self._set_output_current_limit_behavior,
None,
ivi.Doc("""
Specifies the behavior of the power supply when the output current is
equal to or greater than the value of the Current Limit attribute.
Values
* 'trip' - The power supply disables the output when the output current is
equal to or greater than the value of the Current Limit attribute.
* 'regulate' - The power supply restricts the output voltage such that the
output current is not greater than the value of the Current Limit
attribute.
""", cls, grp, '4.2.2'))
self._add_property('outputs[].enabled',
self._get_output_enabled,
self._set_output_enabled,
None,
ivi.Doc("""
If true, the signal the power supply produces appears at the output
connector. If false, the signal the power supply produces does not appear
at the output connector.
""", cls, grp, '4.2.3'))
self._add_property('outputs[].ovp_enabled',
self._get_output_ovp_enabled,
self._set_output_ovp_enabled,
None,
ivi.Doc("""
Specifies whether the power supply provides over-voltage protection. If
this attribute is set to True, the power supply disables the output when
the output voltage is greater than or equal to the value of the OVP
Limit attribute.
""", cls, grp, '4.2.4'))
self._add_property('outputs[].ovp_limit',
self._get_output_ovp_limit,
self._set_output_ovp_limit,
None,
ivi.Doc("""
Specifies the voltage the power supply allows. The units are Volts.
If the OVP Enabled attribute is set to True, the power supply disables the
output when the output voltage is greater than or equal to the value of
this attribute.
If the OVP Enabled is set to False, this attribute does not affect the
behavior of the instrument.
""", cls, grp, '4.2.5'))
self._add_property('outputs[].voltage_level',
self._get_output_voltage_level,
self._set_output_voltage_level,
None,
ivi.Doc("""
Specifies the voltage level the DC power supply attempts to generate. The
units are Volts.
""", cls, grp, '4.2.6'))
self._add_property('outputs[].name',
self._get_output_name,
None,
None,
ivi.Doc("""
This attribute returns the repeated capability identifier defined by
specific driver for the output channel that corresponds to the index that
the user specifies. If the driver defines a qualified Output Channel name,
this property returns the qualified name.
If the value that the user passes for the Index parameter is less than
zero or greater than the value of the Output Channel Count, the attribute
raises a SelectorRangeException.
""", cls, grp, '4.2.9'))
self._add_method('outputs[].configure_current_limit',
self._output_configure_current_limit,
ivi.Doc("""
This function configures the current limit. It specifies the output
current limit value and the behavior of the power supply when the output
current is greater than or equal to that value.
See the definition of the Current Limit Behavior attribute for defined
values for the behavior parameter.
""", cls, grp, '4.3.1'))
self._add_method('outputs[].configure_range',
self._output_configure_range,
ivi.Doc("""
Configures the power supply's output range on an output. One parameter
specifies whether to configure the voltage or current range, and the other
parameter is the value to which to set the range.
Setting a voltage range can invalidate a previously configured current
range. Setting a current range can invalidate a previously configured
voltage range.
""", cls, grp, '4.3.3'))
self._add_method('outputs[].configure_ovp',
self._output_configure_ovp,
ivi.Doc("""
Configures the over-voltage protection. It specifies the over-voltage
limit and the behavior of the power supply when the output voltage is
greater than or equal to that value.
When the Enabled parameter is False, the Limit parameter does not affect
the instrument's behavior, and the driver does not set the OVP Limit
attribute.
""", cls, grp, '4.3.4'))
self._add_method('outputs[].query_current_limit_max',
self._output_query_current_limit_max,
ivi.Doc("""
This function returns the maximum programmable current limit that the
power supply accepts for a particular voltage level on an output.
""", cls, grp, '4.3.7'))
self._add_method('outputs[].query_voltage_level_max',
self._output_query_voltage_level_max,
ivi.Doc("""
This function returns the maximum programmable voltage level that the
power supply accepts for a particular current limit on an output.
""", cls, grp, '4.3.8'))
self._add_method('outputs[].query_output_state',
self._output_query_output_state,
ivi.Doc("""
This function returns whether the power supply is in a particular output
state.
A constant voltage condition occurs when the output voltage is equal to
the value of the Voltage Level attribute and the current is less than or
equal to the value of the Current Limit attribute.
A constant current condition occurs when the output current is equal to
the value of the Current Limit attribute and the Current Limit Behavior
attribute is set to the Current Regulate defined value.
An unregulated condition occurs when the output voltage is less than the
value of the Voltage Level attribute and the current is less than the
value of the Current Limit attribute.
An over-voltage condition occurs when the output voltage is equal to or
greater than the value of the OVP Limit attribute and the OVP Enabled
attribute is set to True.
An over-current condition occurs when the output current is equal to or
greater than the value of the Current Limit attribute and the Current
Limit Behavior attribute is set to the Current Trip defined value.
When either an over-voltage condition or an over-current condition
occurs, the power supply's output protection disables the output. If the
power supply is in an over-voltage or over-current state, it does not
produce power until the output protection is reset. The Reset Output
Protection function resets the output protection. Once the output
protection is reset, the power supply resumes generating a power signal.
Values for output_state:
* 'constant_voltage'
* 'constant_current'
* 'over_voltage'
* 'over_current'
* 'unregulated'
""", cls, grp, '4.3.9'))
self._add_method('outputs[].reset_output_protection',
self._output_reset_output_protection,
ivi.Doc("""
This function resets the power supply output protection after an
over-voltage or over-current condition occurs.
An over-voltage condition occurs when the output voltage is equal to or
greater than the value of the OVP Limit attribute and the OVP Enabled
attribute is set to True.
An over-current condition occurs when the output current is equal to or
greater than the value of the Current Limit attribute and the Current
Limit Behavior attribute is set to Current Trip.
When either an over-voltage condition or an over-current condition
occurs, the output protection of the power supply disables the output.
Once the output protection is reset, the power supply resumes generating
a power signal.
Use the Query Output State function to determine if the power supply is in
an over-voltage or over-current state.
""", cls, grp, '4.3.10'))
self._init_outputs()
def _init_outputs(self):
try:
super(Base, self)._init_outputs()
except AttributeError:
pass
self._output_name = list()
self._output_current_limit = list()
self._output_current_limit_behavior = list()
self._output_enabled = list()
self._output_ovp_enabled = list()
self._output_ovp_limit = list()
self._output_voltage_level = list()
for i in range(self._output_count):
self._output_name.append("output%d" % (i+1))
self._output_current_limit.append(0)
self._output_current_limit_behavior.append('regulate')
self._output_enabled.append(False)
self._output_ovp_enabled.append(True)
self._output_ovp_limit.append(0)
self._output_voltage_level.append(0)
self.outputs._set_list(self._output_name)
def _get_output_current_limit(self, index):
index = ivi.get_index(self._output_name, index)
return self._output_current_limit[index]
def _set_output_current_limit(self, index, value):
index = ivi.get_index(self._output_name, index)
value = float(value)
if value < 0 or value > self._output_spec[index]['current_max']:
raise ivi.OutOfRangeException()
self._output_current_limit[index] = value
def _get_output_current_limit_behavior(self, index):
index = ivi.get_index(self._output_name, index)
return self._output_current_limit_behavior[index]
def _set_output_current_limit_behavior(self, index, value):
index = ivi.get_index(self._output_name, index)
if value not in CurrentLimitBehavior:
raise ivi.ValueNotSupportedException()
self._output_current_limit_behavior[index] = value
def _get_output_enabled(self, index):
index = ivi.get_index(self._output_name, index)
return self._output_enabled[index]
def _set_output_enabled(self, index, value):
index = ivi.get_index(self._output_name, index)
value = bool(value)
self._output_enabled[index] = value
def _get_output_ovp_enabled(self, index):
index = ivi.get_index(self._output_name, index)
return self._output_ovp_enabled[index]
def _set_output_ovp_enabled(self, index, value):
index = ivi.get_index(self._output_name, index)
value = bool(value)
self._output_ovp_enabled[index] = value
def _get_output_ovp_limit(self, index):
index = ivi.get_index(self._output_name, index)
return self._output_ovp_limit[index]
def _set_output_ovp_limit(self, index, value):
index = ivi.get_index(self._output_name, index)
value = float(value)
if self._output_spec[index]['ovp_max'] >= 0:
if voltage_level < 0 or voltage_level > self._output_spec[index]['ovp_max']:
raise ivi.OutOfRangeException()
else:
if voltage_level > 0 or voltage_level < self._output_spec[index]['ovp_max']:
raise ivi.OutOfRangeException()
self._output_ovp_limit[index] = value
def _get_output_voltage_level(self, index):
index = ivi.get_index(self._output_name, index)
return self._output_voltage_level[index]
def _set_output_voltage_level(self, index, value):
index = ivi.get_index(self._output_name, index)
value = float(value)
if self._output_spec[index]['voltage_max'] >= 0:
if voltage_level < 0 or voltage_level > self._output_spec[index]['voltage_max']:
raise ivi.OutOfRangeException()
else:
if voltage_level > 0 or voltage_level < self._output_spec[index]['voltage_max']:
raise ivi.OutOfRangeException()
self._output_voltage_level[index] = value
def _get_output_name(self, index):
index = ivi.get_index(self._output_name, index)
return self._output_name[index]
def _output_configure_current_limit(self, index, behavior, limit):
self._set_output_current_limit_behavior(index, behavior)
self._set_output_current_limit(index, limit)
def _output_configure_range(self, index, range_type, range_val):
index = ivi.get_index(self._output_name, index)
if range_type not in RangeType:
raise ivi.ValueNotSupportedException()
if range_type == 'voltage':
t = 0
elif range_type == 'current':
t = 1
k = dcpwr.get_range(self._output_range[index], t, range_val)
if k < 0:
raise ivi.OutOfRangeException()
self._output_spec[index]['voltage_max'] = self._output_range[index][k][0]
self._output_spec[index]['current_max'] = self._output_range[index][k][1]
pass
def _output_configure_ovp(self, index, enabled, limit):
if enabled:
self._set_output_ovp_limit(index, limit)
self._set_output_ovp_enabled(index, enabled)
def _output_query_current_limit_max(self, index, voltage_level):
index = ivi.get_index(self._output_name, index)
if self._output_spec[index]['voltage_max'] >= 0:
if voltage_level < 0 or voltage_level > self._output_spec[index]['voltage_max']:
raise ivi.OutOfRangeException()
else:
if voltage_level > 0 or voltage_level < self._output_spec[index]['voltage_max']:
raise ivi.OutOfRangeException()
return self._output_spec[index]['current_max']
def _output_query_voltage_level_max(self, index, current_limit):
index = ivi.get_index(self._output_name, index)
if current_limit < 0 or current_limit > self._output_spec[index]['current_limit_max']:
raise ivi.OutOfRangeException()
return self._output_spec[index]['voltage_max']
def _output_query_output_state(self, index, state):
index = ivi.get_index(self._output_name, index)
if state not in OutputState:
raise ivi.ValueNotSupportedException()
return False
def _output_reset_output_protection(self, index):
pass
class Trigger(ivi.IviContainer):
"Extension IVI methods for power supplies supporting trigger based output changes"
def __init__(self, *args, **kwargs):
super(Trigger, self).__init__(*args, **kwargs)
cls = 'IviDCPwr'
grp = 'Trigger'
ivi.add_group_capability(self, cls+grp)
self._output_trigger_source = list()
self._output_triggered_current_limit = list()
self._output_triggered_voltage_level = list()
self._add_property('outputs[].trigger_source',
self._get_output_trigger_source,
self._set_output_trigger_source,
None,
ivi.Doc("""
Specifies the trigger source. After an Initiate call, the power supply
waits for a trigger event from the source specified with this attribute.
After a trigger event occurs, the power supply changes the voltage level
to the value of the Triggered Voltage Level attribute and the current
limit to the value of the Triggered Current Limit attribute.
""", cls, grp, '5.2.1'))
self._add_property('outputs[].triggered_current_limit',
self._get_output_triggered_current_limit,
self._set_output_triggered_current_limit,
None,
ivi.Doc("""
Specifies the value to which the power supply sets the current limit after
a trigger event occurs. The units are Amps.
After an Initiate call, the power supply waits for a trigger event from
the source specified with the Trigger Source attribute. After a trigger
event occurs, the power supply sets the current limit to the value of this
attribute.
After a trigger occurs, the value of the Current Limit attribute reflects
the new value to which the current limit has been set.
""", cls, grp, '5.2.2'))
self._add_property('outputs[].triggered_voltage_level',
self._get_output_triggered_voltage_level,
self._set_output_triggered_voltage_level,
None,
ivi.Doc("""
Specifies the value to which the power supply sets the voltage level
after a trigger event occurs. The units are Volts.
After an Initiate call, the power supply waits for a trigger event from
the source specified with the Trigger Source attribute. After a trigger
event occurs, the power supply sets the voltage level to the value of this
attribute.
After a trigger occurs, the value of the Voltage Level attribute reflects
the new value to which the voltage level has been set.
""", cls, grp, '5.2.3'))
self._add_method('trigger.abort',
self._trigger_abort,
ivi.Doc("""
If the power supply is currently waiting for a trigger to change the
output signal, this function returns the power supply to the ignore
triggers state.
If the power supply is not waiting for a trigger, this function does
nothing and returns Success.
""", cls, grp, '5.3.1'))
self._add_method('trigger.initiate',
self._trigger_initiate,
ivi.Doc("""
If the power supply is not currently waiting for a trigger, this function
causes the power supply to wait for a trigger.
If the power supply is already waiting for a trigger, this function does
nothing and returns Success.
""", cls, grp, '5.3.5'))
def _init_outputs(self):
try:
super(Trigger, self)._init_outputs()
except AttributeError:
pass
self._output_trigger_source = list()
self._output_triggered_current_limit = list()
self._output_triggered_voltage_level = list()
for i in range(self._output_count):
self._output_trigger_source.append('')
self._output_triggered_current_limit.append(0)
self._output_triggered_voltage_level.append(0)
def _get_output_trigger_source(self, index):
index = ivi.get_index(self._output_name, index)
return self._output_trigger_source[index]
def _set_output_trigger_source(self, index, value):
index = ivi.get_index(self._output_name, index)
value = str(value)
self._output_trigger_source[index] = value
def _get_output_triggered_current_limit(self, index):
index = ivi.get_index(self._output_name, index)
return self._output_triggered_current_limit[index]
def _set_output_triggered_current_limit(self, index, value):
index = ivi.get_index(self._output_name, index)
value = float(value)
if value < 0 or value > self._output_spec[index]['current_max']:
raise ivi.OutOfRangeException()
self._output_triggered_current_limit[index] = value
def _get_output_triggered_voltage_level(self, index):
index = ivi.get_index(self._output_name, index)
return self._output_triggered_voltage_level[index]
def _set_output_triggered_voltage_level(self, index, value):
index = ivi.get_index(self._output_name, index)
value = float(value)
if self._output_spec[index]['voltage_max'] >= 0:
if value < 0 or value > self._output_spec[index]['voltage_max']:
raise ivi.OutOfRangeException()
else:
if value > 0 or value < self._output_spec[index]['voltage_max']:
raise ivi.OutOfRangeException()
self._output_triggered_voltage_level[index] = value
def _trigger_abort(self):
pass
def _trigger_initiate(self):
pass
class SoftwareTrigger(ivi.IviContainer):
"Extension IVI methods for power supplies supporting software triggering"
def __init__(self, *args, **kwargs):
super(SoftwareTrigger, self).__init__(*args, **kwargs)
cls = 'IviDCPwr'
grp = 'SoftwareTrigger'
ivi.add_group_capability(self, cls+grp)
self._add_method('send_software_trigger',
self._send_software_trigger,
ivi.Doc("""
This function sends a software-generated trigger to the instrument. It is
only applicable for instruments using interfaces or protocols which
support an explicit trigger function. For example, with GPIB this function
could send a group execute trigger to the instrument. Other
implementations might send a ``*TRG`` command.
Since instruments interpret a software-generated trigger in a wide variety
of ways, the precise response of the instrument to this trigger is not
defined. Note that SCPI details a possible implementation.
This function should not use resources which are potentially shared by
other devices (for example, the VXI trigger lines). Use of such shared
resources may have undesirable effects on other devices.
This function should not check the instrument status. Typically, the
end-user calls this function only in a sequence of calls to other
low-level driver functions. The sequence performs one operation. The
end-user uses the low-level functions to optimize one or more aspects of
interaction with the instrument. To check the instrument status, call the
appropriate error query function at the conclusion of the sequence.
The trigger source attribute must accept Software Trigger as a valid
setting for this function to work. If the trigger source is not set to
Software Trigger, this function does nothing and returns the error Trigger
Not Software.
""", cls, grp, '6.2.1', 'send_software_trigger'))
def _send_software_trigger(self):
pass
class Measurement(ivi.IviContainer):
"Extension IVI methods for power supplies supporting measurement of the output signal"
def __init__(self, *args, **kwargs):
super(Measurement, self).__init__(*args, **kwargs)
cls = 'IviDCPwr'
grp = 'Measurement'
ivi.add_group_capability(self, cls+grp)
self._add_method('outputs[].measure',
self._output_measure,
ivi.Doc("""
Takes a measurement on the output signal and returns the measured value.
Values for measurement_type:
* 'voltage'
* 'current'
""", cls, grp, '7.2.1'))
def _output_measure(self, index, type):
index = ivi.get_index(self._output_name, index)
if type not in MeasurementType:
raise ivi.ValueNotSupportedException()
return 0
| StarcoderdataPython |
3221205 | #
# @lc app=leetcode id=118 lang=python
#
# [118] Pascal's Triangle
#
# https://leetcode.com/problems/pascals-triangle/description/
#
# algorithms
# Easy (44.14%)
# Total Accepted: 225.9K
# Total Submissions: 509K
# Testcase Example: '5'
#
# Given a non-negative integer numRows, generate the first numRows of Pascal's
# triangle.
#
#
# In Pascal's triangle, each number is the sum of the two numbers directly
# above it.
#
# Example:
#
#
# Input: 5
# Output:
# [
# [1],
# [1,1],
# [1,2,1],
# [1,3,3,1],
# [1,4,6,4,1]
# ]
#
#
#
class Solution(object):
def generate(self, numRows):
"""
:type numRows: int
:rtype: List[List[int]]
"""
res = []
for i in range(numRows):
if i == 0:
res.append([1])
else:
last = res[-1]
this = [1]
for i in range(len(last)):
if i + 1 < len(last):
this.append(last[i] + last[i+1])
else:
this.append(last[i])
res.append(this)
return res
| StarcoderdataPython |
5196863 | # IMPORTS ############################################
from rest_framework.generics import (
CreateAPIView,
ListAPIView,
RetrieveAPIView,
RetrieveDestroyAPIView,
)
from vsitapp.models import Post
from .serializers import (
PeopleSerializer,
PeopleDetailSerializer,
PeopleCreateSerializer,
)
from rest_framework.permissions import (
AllowAny,
IsAuthenticated,
IsAuthenticatedOrReadOnly,
)
from .permissions import IsOwnerOrReadOnly
# #######################################################
class PeopleCreateView(CreateAPIView):
queryset = Post.objects.all()
serializer_class = PeopleCreateSerializer
permission_classes = [IsAuthenticated]
def perform_create(self, serializer):
serializer.save(author=self.request.user, first_name=self.request.user.first_name)
class PeopleView(ListAPIView):
queryset = Post.objects.all()
serializer_class = PeopleSerializer
class PeopleDetailView(RetrieveAPIView):
queryset = Post.objects.all()
serializer_class = PeopleDetailSerializer
lookup_field = 'first_name'
class PeopleDeleteView(RetrieveDestroyAPIView):
queryset = Post.objects.all()
serializer_class = PeopleDetailSerializer
permission_classes = [IsAuthenticatedOrReadOnly, IsOwnerOrReadOnly]
lookup_field = 'first_name'
lookup_url_kwarg = 'target'
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.