id stringlengths 1 7 | text stringlengths 6 1.03M | dataset_id stringclasses 1
value |
|---|---|---|
3263105 | from django.contrib import admin
from .models import Course, Department, User, Student, ExamPaper, Material, Announcement, CourseAllotment,Bookmark, Feedback, Stat, Contributor
admin.site.empty_value_display = '(None)'
# Register your models here.
class CourseAdmin(admin.ModelAdmin):
list_display = ('name','dept','code' )
list_display_links = ('name',)
admin.site.register(Course,CourseAdmin)
class DepartmentAdmin(admin.ModelAdmin):
fields = ('name','acronym')
list_display = ('name','acronym', )
list_display_links = ('name',)
admin.site.register(Department,DepartmentAdmin)
class FeedbackAdmin(admin.ModelAdmin):
fields = ('author','title','feedback','files')
list_display = ('title','feedback','author' )
list_display_links = ('title',)
admin.site.register(Feedback,FeedbackAdmin)
class UserAdmin(admin.ModelAdmin):
fields = ('first_name','last_name','email','user_role','avatar')
list_display = ('name','email','user_role' )
list_display_links = ('name','email')
def name(self,obj):
return 'None' if(obj.first_name +' '+ obj.last_name==' ') else obj.first_name +' '+ obj.last_name
admin.site.register(User,UserAdmin)
class StudentAdmin(admin.ModelAdmin):
list_display = ('registration_no','semester','user')
list_display_links = ('registration_no',)
admin.site.register(Student,StudentAdmin)
class AnnouncementAdmin(admin.ModelAdmin):
list_display = ('course','description','updated_on','added_on','files','title','author' )
list_display_links = ('course','title')
admin.site.register(Announcement,AnnouncementAdmin)
class MaterialAdmin(admin.ModelAdmin):
list_display = ('course','title','added_on','files','author' )
list_display_links = ('course',)
admin.site.register(Material,MaterialAdmin)
class ExamPapersAdmin(admin.ModelAdmin):
list_display = ('course','files','term','author' )
list_display_links = ('course',)
admin.site.register(ExamPaper,ExamPapersAdmin)
class CourseAllotmentAdmin(admin.ModelAdmin):
list_display = ('course','semester' )
list_display_links = ('course',)
admin.site.register(CourseAllotment,CourseAllotmentAdmin)
class BookmarksAdmin(admin.ModelAdmin):
list_display = ('course','user' )
list_display_links = ('course',)
admin.site.register(Bookmark,BookmarksAdmin)
class StatAdmin(admin.ModelAdmin):
list_display = ('tag','user_count','contributor_count','announcement_count','paper_count','material_count' )
list_display_links = ('tag',)
admin.site.register(Stat,StatAdmin)
class ContributorAdmin(admin.ModelAdmin):
list_display = ('user','paper','material','announcement','feedback','points' )
list_display_links = ('user',)
admin.site.register(Contributor,ContributorAdmin) | StarcoderdataPython |
1610725 | # -*- coding: utf-8 -*-
#@+leo-ver=5-thin
#@+node:ekr.20090502071837.3: * @file leoRst.py
#@@first
#@+<< docstring >>
#@+node:ekr.20090502071837.4: ** << docstring >>
"""Support for restructured text (rST), adapted from rst3 plugin.
For full documentation, see: http://leoeditor.com/rstplugin3.html
To generate documents from rST files, Python's docutils_ module must be
installed. The code will use the SilverCity_ syntax coloring package if is is
available."""
#@-<< docstring >>
#@+<< imports >>
#@+node:ekr.20100908120927.5971: ** << imports >> (leoRst)
import io
import os
import re
import time
# Third-part imports...
try:
import docutils
import docutils.core
from docutils import parsers
from docutils.parsers import rst
except Exception:
docutils = None # type:ignore
# Leo imports.
from leo.core import leoGlobals as g
# Aliases & traces.
StringIO = io.StringIO
if 'plugins' in getattr(g.app, 'debug', []):
print('leoRst.py: docutils:', bool(docutils))
print('leoRst.py: parsers:', bool(parsers))
print('leoRst.py: rst:', bool(rst))
#@-<< imports >>
#@+others
#@+node:ekr.20150509035745.1: ** cmd (decorator)
def cmd(name):
"""Command decorator for the RstCommands class."""
return g.new_cmd_decorator(name, ['c', 'rstCommands',])
#@+node:ekr.20090502071837.33: ** class RstCommands
class RstCommands:
"""
A class to convert @rst nodes to rST markup.
"""
#@+others
#@+node:ekr.20090502071837.34: *3* rst: Birth
#@+node:ekr.20090502071837.35: *4* rst.__init__
def __init__(self, c):
"""Ctor for the RstCommand class."""
self.c = c
#
# Statistics.
self.n_intermediate = 0 # Number of intermediate files written.
self.n_docutils = 0 # Number of docutils files written.
#
# Http support for HtmlParserClass. See http_addNodeMarker.
self.anchor_map = {} # Keys are anchors. Values are positions
self.http_map = {} # Keys are named hyperlink targets. Value are positions.
self.nodeNumber = 0 # Unique node number.
#
# For writing.
self.at_auto_underlines = '' # Full set of underlining characters.
self.at_auto_write = False # True: in @auto-rst importer.
self.encoding = 'utf-8' # From any @encoding directive.
self.path = '' # The path from any @path directive.
self.result_list = [] # The intermediate results.
self.root = None # The @rst node being processed.
#
# Default settings.
self.default_underline_characters = '#=+*^~`-:><-'
self.user_filter_b = None
self.user_filter_h = None
#
# Complete the init.
self.reloadSettings()
#@+node:ekr.20210326084034.1: *4* rst.reloadSettings
def reloadSettings(self):
"""RstCommand.reloadSettings"""
c = self.c
getBool, getString = c.config.getBool, c.config.getString
#
# Reporting options.
self.silent = not getBool('rst3-verbose', default=True)
#
# Http options.
self.http_server_support = getBool('rst3-http-server-support', default=False)
self.node_begin_marker = getString('rst3-node-begin-marker') or 'http-node-marker-'
#
# Output options.
self.default_path = getString('rst3-default-path') or ''
self.generate_rst_header_comment = getBool('rst3-generate-rst-header-comment', default=True)
self.underline_characters = (
getString('rst3-underline-characters')
or self.default_underline_characters)
self.write_intermediate_file = getBool('rst3-write-intermediate-file', default=True)
self.write_intermediate_extension = getString('rst3-write-intermediate-extension') or '.txt'
#
# Docutils options.
self.call_docutils = getBool('rst3-call-docutils', default=True)
self.publish_argv_for_missing_stylesheets = getString('rst3-publish-argv-for-missing-stylesheets') or ''
self.stylesheet_embed = getBool('rst3-stylesheet-embed', default=False) # New in leoSettings.leo.
self.stylesheet_name = getString('rst3-stylesheet-name') or 'default.css'
self.stylesheet_path = getString('rst3-stylesheet-path') or ''
#@+node:ekr.20100813041139.5920: *3* rst: Entry points
#@+node:ekr.20210403150303.1: *4* rst.rst-convert-legacy-outline
@cmd('rst-convert-legacy-outline')
@cmd('convert-legacy-rst-outline')
def convert_legacy_outline(self, event=None):
"""
Convert @rst-preformat nodes and `@ @rst-options` doc parts.
"""
c = self.c
for p in c.all_unique_positions():
if g.match_word(p.h, 0, '@rst-preformat'):
self.preformat(p)
self.convert_rst_options(p)
#@+node:ekr.20210403153112.1: *5* rst.convert_rst_options
options_pat = re.compile(r'^@ @rst-options', re.MULTILINE)
default_pat = re.compile(r'^default_path\s*=(.*)$', re.MULTILINE)
def convert_rst_options(self, p):
"""
Convert options @doc parts. Change headline to @path <fn>.
"""
m1 = self.options_pat.search(p.b)
m2 = self.default_pat.search(p.b)
if m1 and m2 and m2.start() > m1.start():
fn = m2.group(1).strip()
if fn:
old_h = p.h
p.h = f"@path {fn}"
print(f"{old_h} => {p.h}")
#@+node:ekr.20210403151958.1: *5* rst.preformat
def preformat(self, p):
"""Convert p.b as if preformatted. Change headline to @rst-no-head"""
if not p.b.strip():
return
p.b = '::\n\n' + ''.join(
f" {s}" if s.strip() else '\n'
for s in g.splitLines(p.b))
old_h = p.h
p.h = '@rst-no-head'
print(f"{old_h} => {p.h}")
#@+node:ekr.20090511055302.5793: *4* rst.rst3 command & helpers
@cmd('rst3')
def rst3(self, event=None):
"""Write all @rst nodes."""
t1 = time.time()
self.n_intermediate = self.n_docutils = 0
self.processTopTree(self.c.p)
t2 = time.time()
g.es_print(
f"rst3: wrote...\n"
f"{self.n_intermediate:4} intermediate file{g.plural(self.n_intermediate)}\n"
f"{self.n_docutils:4} docutils file{g.plural(self.n_docutils)}\n"
f"in {t2 - t1:4.2f} sec.")
#@+node:ekr.20090502071837.62: *5* rst.processTopTree
def processTopTree(self, p):
"""Call processTree for @rst and @slides node p's subtree or p's ancestors."""
def predicate(p):
return self.is_rst_node(p) or g.match_word(p.h, 0, '@slides')
roots = g.findRootsWithPredicate(self.c, p, predicate=predicate)
if roots:
for p in roots:
self.processTree(p)
else:
g.warning('No @rst or @slides nodes in', p.h)
#@+node:ekr.20090502071837.63: *5* rst.processTree
def processTree(self, root):
"""Process all @rst nodes in a tree."""
for p in root.self_and_subtree():
if self.is_rst_node(p):
if self.in_rst_tree(p):
g.trace(f"ignoring nested @rst node: {p.h}")
else:
h = p.h.strip()
fn = h[4:].strip()
if fn:
source = self.write_rst_tree(p, fn)
self.write_docutils_files(fn, p, source)
elif g.match_word(h, 0, "@slides"):
if self.in_slides_tree(p):
g.trace(f"ignoring nested @slides node: {p.h}")
else:
self.write_slides(p)
#@+node:ekr.20090502071837.64: *5* rst.write_rst_tree
def write_rst_tree(self, p, fn):
"""Convert p's tree to rst sources."""
c = self.c
self.root = p.copy()
#
# Init encoding and path.
d = c.scanAllDirectives(p)
self.encoding = d.get('encoding') or 'utf-8'
self.path = d.get('path') or ''
# Write the output to self.result_list.
self.result_list = [] # All output goes here.
if self.generate_rst_header_comment:
self.result_list.append(f".. rst3: filename: {fn}")
for p in self.root.self_and_subtree():
self.writeNode(p)
source = self.compute_result()
return source
#@+node:ekr.20100822092546.5835: *5* rst.write_slides & helper
def write_slides(self, p):
"""Convert p's children to slides."""
c = self.c
p = p.copy()
h = p.h
i = g.skip_id(h, 1) # Skip the '@'
kind, fn = h[:i].strip(), h[i:].strip()
if not fn:
g.error(f"{kind} requires file name")
return
title = p.firstChild().h if p and p.firstChild() else '<no slide>'
title = title.strip().capitalize()
n_tot = p.numberOfChildren()
n = 1
d = c.scanAllDirectives(p)
self.encoding = d.get('encoding') or 'utf-8'
self.path = d.get('path') or ''
for child in p.children():
# Compute the slide's file name.
fn2, ext = g.os_path_splitext(fn)
fn2 = f"{fn2}-{n:03d}{ext}" # Use leading zeros for :glob:.
n += 1
# Write the rst sources.
self.result_list = []
self.writeSlideTitle(title, n - 1, n_tot)
self.result_list.append(child.b)
source = self.compute_result()
self.write_docutils_files(fn2, p, source)
#@+node:ekr.20100822174725.5836: *6* rst.writeSlideTitle
def writeSlideTitle(self, title, n, n_tot):
"""Write the title, underlined with the '#' character."""
if n != 1:
title = f"{title} ({n} of {n_tot})"
width = max(4, len(g.toEncodedString(title,
encoding=self.encoding, reportErrors=False)))
self.result_list.append(f"{title}\n{'#' * width}")
#@+node:ekr.20090502071837.85: *5* rst.writeNode & helper
def writeNode(self, p):
"""Append the rst srouces to self.result_list."""
c = self.c
if self.is_ignore_node(p) or self.in_ignore_tree(p):
return
if g.match_word(p.h, 0, '@rst-no-head'):
self.result_list.append(self.filter_b(c, p))
else:
self.http_addNodeMarker(p)
if p != self.root:
self.result_list.append(self.underline(p, self.filter_h(c, p)))
self.result_list.append(self.filter_b(c, p))
#@+node:ekr.20090502071837.96: *6* rst.http_addNodeMarker
def http_addNodeMarker(self, p):
"""
Add a node marker for the mod_http plugin (HtmlParserClass class).
The first three elements are a stack of tags, the rest is html code::
[
<tag n start>, <tag n end>, <other stack elements>,
<html line 1>, <html line 2>, ...
]
<other stack elements> has the same structure::
[<tag n-1 start>, <tag n-1 end>, <other stack elements>]
"""
if self.http_server_support:
self.nodeNumber += 1
anchorname = f"{self.node_begin_marker}{self.nodeNumber}"
self.result_list.append(f".. _{anchorname}:")
self.http_map[anchorname] = p.copy()
#@+node:ekr.20100813041139.5919: *4* rst.write_docutils_files & helpers
def write_docutils_files(self, fn, p, source):
"""Write source to the intermediate file and write the output from docutils.."""
junk, ext = g.os_path_splitext(fn)
ext = ext.lower()
fn = self.computeOutputFileName(fn)
ok = self.createDirectoryForFile(fn)
if not ok:
return
# Write the intermediate file.
if self.write_intermediate_file:
self.writeIntermediateFile(fn, source)
# Should we call docutils?
if not self.call_docutils:
return
if ext not in ('.htm', '.html', '.tex', '.pdf', '.s5', '.odt'): # #1884: test now.
return
# Write the result from docutils.
s = self.writeToDocutils(source, ext)
if s and ext in ('.html', '.htm'):
s = self.addTitleToHtml(s)
if not s:
return
s = g.toEncodedString(s, 'utf-8')
with open(fn, 'wb') as f:
f.write(s)
self.n_docutils += 1
self.report(fn)
#@+node:ekr.20100813041139.5913: *5* rst.addTitleToHtml
def addTitleToHtml(self, s):
"""
Replace an empty <title> element by the contents of the first <h1>
element.
"""
i = s.find('<title></title>')
if i == -1:
return s
m = re.search(r'<h1>([^<]*)</h1>', s)
if not m:
m = re.search(r'<h1><[^>]+>([^<]*)</a></h1>', s)
if m:
s = s.replace('<title></title>',
f"<title>{m.group(1)}</title>")
return s
#@+node:ekr.20090502071837.89: *5* rst.computeOutputFileName
def computeOutputFileName(self, fn):
"""Return the full path to the output file."""
c = self.c
openDirectory = c.frame.openDirectory
if self.default_path:
path = g.os_path_finalize_join(self.path, self.default_path, fn)
elif self.path:
path = g.os_path_finalize_join(self.path, fn)
elif openDirectory:
path = g.os_path_finalize_join(self.path, openDirectory, fn)
else:
path = g.os_path_finalize_join(fn)
return path
#@+node:ekr.20100813041139.5914: *5* rst.createDirectoryForFile
def createDirectoryForFile(self, fn):
"""
Create the directory for fn if
a) it doesn't exist and
b) the user options allow it.
Return True if the directory existed or was made.
"""
c, ok = self.c, False # 1815.
# Create the directory if it doesn't exist.
theDir, junk = g.os_path_split(fn)
theDir = g.os_path_finalize(theDir) # 1341
if g.os_path_exists(theDir):
return True
if c and c.config and c.config.create_nonexistent_directories:
theDir = c.expand_path_expression(theDir)
ok = g.makeAllNonExistentDirectories(theDir) # type:ignore
if not ok:
g.error('did not create:', theDir)
return ok
#@+node:ekr.20100813041139.5912: *5* rst.writeIntermediateFile
def writeIntermediateFile(self, fn, s):
"""Write s to to the file whose name is fn."""
# ext = self.getOption(p, 'write_intermediate_extension')
ext = self.write_intermediate_extension
if not ext.startswith('.'):
ext = '.' + ext
fn = fn + ext
with open(fn, 'w', encoding=self.encoding) as f:
f.write(s)
self.n_intermediate += 1
self.report(fn)
#@+node:ekr.20090502071837.65: *5* rst.writeToDocutils & helper
def writeToDocutils(self, s, ext):
"""Send s to docutils using the writer implied by ext and return the result."""
if not docutils:
g.error('writeToDocutils: docutils not present')
return None
join = g.os_path_finalize_join
openDirectory = self.c.frame.openDirectory
overrides = {'output_encoding': self.encoding}
#
# Compute the args list if the stylesheet path does not exist.
styleSheetArgsDict = self.handleMissingStyleSheetArgs()
if ext == '.pdf':
module = g.import_module('leo.plugins.leo_pdf')
if not module:
return None
writer = module.Writer() # Get an instance.
writer_name = None
else:
writer = None
for ext2, writer_name in (
('.html', 'html'),
('.htm', 'html'),
('.tex', 'latex'),
('.pdf', 'leo.plugins.leo_pdf'),
('.s5', 's5'),
('.odt', 'odt'),
):
if ext2 == ext:
break
else:
g.error(f"unknown docutils extension: {ext}")
return None
#
# Make the stylesheet path relative to open directory.
rel_stylesheet_path = self.stylesheet_path or ''
stylesheet_path = join(openDirectory, rel_stylesheet_path)
assert self.stylesheet_name
path = join(self.stylesheet_path, self.stylesheet_name)
if not self.stylesheet_embed:
rel_path = join(rel_stylesheet_path, self.stylesheet_name)
rel_path = rel_path.replace('\\', '/')
overrides['stylesheet'] = rel_path
overrides['stylesheet_path'] = None
overrides['embed_stylesheet'] = None
elif os.path.exists(path):
if ext != '.pdf':
overrides['stylesheet'] = path
overrides['stylesheet_path'] = None
elif styleSheetArgsDict:
g.es_print('using publish_argv_for_missing_stylesheets', styleSheetArgsDict)
overrides.update(styleSheetArgsDict) # MWC add args to settings
elif rel_stylesheet_path == stylesheet_path:
g.error(f"stylesheet not found: {path}")
else:
g.error('stylesheet not found\n', path)
if self.path:
g.es_print('@path:', self.path)
g.es_print('open path:', openDirectory)
if rel_stylesheet_path:
g.es_print('relative path:', rel_stylesheet_path)
try:
result = None
result = docutils.core.publish_string(source=s,
reader_name='standalone',
parser_name='restructuredtext',
writer=writer,
writer_name=writer_name,
settings_overrides=overrides)
if isinstance(result, bytes):
result = g.toUnicode(result)
except docutils.ApplicationError as error:
g.error('Docutils error:')
g.blue(error)
except Exception:
g.es_print('Unexpected docutils exception')
g.es_exception()
return result
#@+node:ekr.20090502071837.66: *6* rst.handleMissingStyleSheetArgs
def handleMissingStyleSheetArgs(self, s=None):
"""
Parse the publish_argv_for_missing_stylesheets option,
returning a dict containing the parsed args.
"""
if 0:
# See http://docutils.sourceforge.net/docs/user/config.html#documentclass
return {
'documentclass': 'report',
'documentoptions': 'english,12pt,lettersize',
}
if not s:
s = self.publish_argv_for_missing_stylesheets
if not s:
return {}
#
# Handle argument lists such as this:
# --language=en,--documentclass=report,--documentoptions=[english,12pt,lettersize]
d = {}
while s:
s = s.strip()
if not s.startswith('--'):
break
s = s[2:].strip()
eq = s.find('=')
cm = s.find(',')
if eq == -1 or (-1 < cm < eq): # key[nl] or key,
val = ''
cm = s.find(',')
if cm == -1:
key = s.strip()
s = ''
else:
key = s[:cm].strip()
s = s[cm + 1 :].strip()
else: # key = val
key = s[:eq].strip()
s = s[eq + 1 :].strip()
if s.startswith('['): # [...]
rb = s.find(']')
if rb == -1:
break # Bad argument.
val = s[: rb + 1]
s = s[rb + 1 :].strip()
if s.startswith(','):
s = s[1:].strip()
else: # val[nl] or val,
cm = s.find(',')
if cm == -1:
val = s
s = ''
else:
val = s[:cm].strip()
s = s[cm + 1 :].strip()
if not key:
break
if not val.strip():
val = '1'
d[str(key)] = str(val)
return d
#@+node:ekr.20090512153903.5803: *4* rst.writeAtAutoFile & helpers
def writeAtAutoFile(self, p, fileName, outputFile):
"""
at.writeAtAutoContents calls this method to write an @auto tree
containing imported rST code.
at.writeAtAutoContents will close the output file.
"""
self.result_list = []
self.initAtAutoWrite(p)
self.root = p.copy()
after = p.nodeAfterTree()
if not self.isSafeWrite(p):
return False
try:
self.at_auto_write = True # Set the flag for underline.
p = p.firstChild() # A hack: ignore the root node.
while p and p != after:
self.writeNode(p) # side effect: advances p
s = self.compute_result()
outputFile.write(s)
ok = True
except Exception:
ok = False
finally:
self.at_auto_write = False
return ok
#@+node:ekr.20090513073632.5733: *5* rst.initAtAutoWrite
def initAtAutoWrite(self, p):
"""Init underlining for for an @auto write."""
# User-defined underlining characters make no sense in @auto-rst.
d = p.v.u.get('rst-import', {})
underlines2 = d.get('underlines2', '')
#
# Do *not* set a default for overlining characters.
if len(underlines2) > 1:
underlines2 = underlines2[0]
g.warning(f"too many top-level underlines, using {underlines2}")
underlines1 = d.get('underlines1', '')
#
# Pad underlines with default characters.
default_underlines = '=+*^~"\'`-:><_'
if underlines1:
for ch in default_underlines[1:]:
if ch not in underlines1:
underlines1 = underlines1 + ch
else:
underlines1 = default_underlines
self.at_auto_underlines = underlines2 + underlines1
self.underlines1 = underlines1
self.underlines2 = underlines2
#@+node:ekr.20210401155057.7: *5* rst.isSafeWrite
def isSafeWrite(self, p):
"""
Return True if node p contributes nothing but
rst-options to the write.
"""
lines = g.splitLines(p.b)
for z in lines:
if z.strip() and not z.startswith('@') and not z.startswith('.. '):
# A real line that will not be written.
g.error('unsafe @auto-rst')
g.es('body text will be ignored in\n', p.h)
return False
return True
#@+node:ekr.20090502071837.67: *4* rst.writeNodeToString
def writeNodeToString(self, p):
"""
rst.writeNodeToString: A utility for scripts. Not used in Leo.
Write p's tree to a string as if it were an @rst node.
Return the string.
"""
return self.write_rst_tree(p, fn=p.h)
#@+node:ekr.20210329105456.1: *3* rst: Filters
#@+node:ekr.20210329105948.1: *4* rst.filter_b & self.filter_h
def filter_b(self, c, p):
"""
Filter p.b with user_filter_b function.
Don't allow filtering when in the @auto-rst logic.
"""
if self.user_filter_b and not self.at_auto_write:
try:
# pylint: disable=not-callable
return self.user_filter_b(c, p)
except Exception:
g.es_exception()
self.user_filter_b = None
return p.b
def filter_h(self, c, p):
"""
Filter p.h with user_filter_h function.
Don't allow filtering when in the @auto-rst logic.
"""
if self.user_filter_h and not self.at_auto_write:
try:
# pylint: disable=not-callable
return self.user_filter_h(c, p)
except Exception:
g.es_exception()
self.user_filter_h = None
return p.h
#@+node:ekr.20210329111528.1: *4* rst.register_*_filter
def register_body_filter(self, f):
"""Register the user body filter."""
self.user_filter_b = f
def register_headline_filter(self, f):
"""Register the user headline filter."""
self.user_filter_h = f
#@+node:ekr.20210331084407.1: *3* rst: Predicates
def in_ignore_tree(self, p):
return any(g.match_word(p2.h, 0, '@rst-ignore-tree')
for p2 in self.rst_parents(p))
def in_rst_tree(self, p):
return any(self.is_rst_node(p2) for p2 in self.rst_parents(p))
def in_slides_tree(self, p):
return any(g.match_word(p.h, 0, "@slides") for p2 in self.rst_parents(p))
def is_ignore_node(self, p):
return g.match_words(p.h, 0, ('@rst-ignore', '@rst-ignore-node'))
def is_rst_node(self, p):
return g.match_word(p.h, 0, "@rst") and not g.match(p.h, 0, "@rst-")
def rst_parents(self, p):
for p2 in p.parents():
if p2 == self.root:
return
yield p2
#@+node:ekr.20090502071837.88: *3* rst: Utils
#@+node:ekr.20210326165315.1: *4* rst.compute_result
def compute_result(self):
"""Concatenate all strings in self.result, ensuring exactly one blank line between strings."""
return ''.join(f"{s.rstrip()}\n\n" for s in self.result_list if s.strip())
#@+node:ekr.20090502071837.43: *4* rst.dumpDict
def dumpDict(self, d, tag):
"""Dump the given settings dict."""
g.pr(tag + '...')
for key in sorted(d):
g.pr(f" {key:20} {d.get(key)}")
#@+node:ekr.20090502071837.90: *4* rst.encode
def encode(self, s):
"""return s converted to an encoded string."""
return g.toEncodedString(s, encoding=self.encoding, reportErrors=True)
#@+node:ekr.20090502071837.91: *4* rst.report
def report(self, name):
"""Issue a report to the log pane."""
if self.silent:
return
name = g.os_path_finalize(name) # 1341
g.pr(f"wrote: {name}")
#@+node:ekr.20090502071837.92: *4* rst.rstComment
def rstComment(self, s):
return f".. {s}"
#@+node:ekr.20090502071837.93: *4* rst.underline
def underline(self, p, s):
"""
Return the underlining string to be used at the given level for string s.
This includes the headline, and possibly a leading overlining line.
"""
# Never add the root's headline.
if not s:
return ''
encoded_s = g.toEncodedString(s, encoding=self.encoding, reportErrors=False)
if self.at_auto_write:
# We *might* generate overlines for top-level sections.
u = self.at_auto_underlines
level = p.level() - self.root.level()
# This is tricky. The index n depends on several factors.
if self.underlines2:
level -= 1 # There *is* a double-underlined section.
n = level
else:
n = level - 1
if 0 <= n < len(u):
ch = u[n]
elif u:
ch = u[-1]
else:
g.trace('can not happen: no u')
ch = '#'
# Write longer underlines for non-ascii characters.
n = max(4, len(encoded_s))
if level == 0 and self.underlines2:
# Generate an overline and an underline.
return f"{ch * n}\n{p.h}\n{ch * n}"
# Generate only an underline.
return f"{p.h}\n{ch * n}"
#
# The user is responsible for top-level overlining.
u = self.underline_characters # '''#=+*^~"'`-:><_'''
level = max(0, p.level() - self.root.level())
level = min(level + 1, len(u) - 1) # Reserve the first character for explicit titles.
ch = u[level]
n = max(4, len(encoded_s))
return f"{s.strip()}\n{ch * n}"
#@-others
#@-others
#@@language python
#@@tabwidth -4
#@@pagewidth 70
#@-leo
| StarcoderdataPython |
152497 | from django.test import TestCase
from django.core.urlresolvers import reverse
class ViewsTestCase(TestCase):
def test_about_view(self):
response = self.client.get(reverse('about'))
self.assertEqual(response.status_code, 200)
self.assertContains(response, "About")
def test_contact_page(self):
response = self.client.get(reverse('contact'))
self.assertEqual(response.status_code, 200)
self.assertContains(response, "Contact")
| StarcoderdataPython |
3238619 | <reponame>avilash/TikTokApi
import argparse
from TikTokAPI import TikTokAPI
from utils import read_json_from_file
def getVideoById(video_id):
api = TikTokAPI(read_json_from_file("cookie.json"))
return api.getVideoById(video_id)
def downloadVideoById(video_id):
api = TikTokAPI(read_json_from_file("cookie.json"))
api.downloadVideoById(video_id, video_id+".mp4")
def downloadVideoByIdNoWatermark(video_id):
api = TikTokAPI(read_json_from_file("cookie.json"))
api.downloadVideoByIdNoWatermark(video_id, video_id+"_no_wm.mp4")
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='')
parser.add_argument('--data', dest='data', type=str, help='data/video/video_no_wm')
args = parser.parse_args()
var = "6843481669886954757"
if args.data == 'data':
retval = getVideoById(var)
print(retval)
elif args.data == 'video':
downloadVideoById(var)
elif args.data == 'video_no_wm':
downloadVideoByIdNoWatermark(var)
else:
print("Invalid Argument")
| StarcoderdataPython |
1708101 | import os
from models.person import Person
from models.room import LivingSpace, Office, Room
from .base_db import (DBDoesNotExistException, OverWriteException,
UpdateException, create_session, create_tables,
load_engine)
class Dojo():
"""
models Dojo facillity
"""
def __init__(self, name):
self.rooms = {'offices': {}, 'livingspace': {}}
self.person = {'fellow': {}, 'staff': {}}
self.name = name
self.database_session = None
self.database_engine = None
self.db_migrations = True
self.loaded = None
self.database_name = None
@property
def livingspace(self):
return self.rooms['livingspace'].values()
@property
def fellow(self):
return self.person['fellow'].values()
@property
def staff(self):
return self.person['staff'].values()
@property
def office(self):
return self.rooms['offices'].values()
@property
def takken_names(self):
return (list(self.rooms['offices'].keys()) +
list(self.rooms['livingspace'].keys()))
def add_person_office(self, name, staff):
self.rooms['offices'][name].add_occupant(staff)
def add_fellow_living(self, name, fellow):
self.rooms['livingspace'][name].add_occupant(fellow)
def add_office(self, new_office):
# refactor office
self.rooms['offices'][new_office.name] = new_office
def add_livingspace(self, new_livingspace):
# refactor settet livingspace
self.rooms['livingspace'][new_livingspace.name] = new_livingspace
def add_staff(self, new_staff):
# refactor staff setter
self.person['staff'][new_staff.id] = new_staff
def add_fellow(self, new_fellow):
# refactor fellow setter
self.person['fellow'][new_fellow.id] = new_fellow
def is_fellow(self, person):
"""
returns true if person is fellow @ Dojo else False
"""
return person.id in self.person['fellow']
def is_staff(self, person):
"""
returns True if person is staff @ Dojo else false
"""
return person.id in self.person["staff"]
def get_office(self, name):
return self.rooms['offices'].get(name, False)
def get_livingspace(self, name):
return self.rooms['livingspace'].get(name, False)
def get_person(self, person_id):
staff = self.person['staff'].get(person_id, False)
fellow = self.person['fellow'].get(person_id, False)
person = staff if not fellow else fellow
return person
def get_person_room(self, person):
results = [None, None]
i = 0
for room_type in self.rooms.keys():
for room_name in self.rooms[room_type]:
if self.rooms[room_type][room_name].is_in_room(person):
results[i] = self.rooms[room_type][room_name]
i += 1
break
return results
def remove_office(self, old_space):
"""
Removes Office from the Dojo
"""
if old_space.name in self.rooms['offices']:
for occupants in old_space.occupants:
occupants.office = False
self.delete_from_db(old_space)
del self.rooms['offices'][old_space.name]
return True
return False
def remove_livingspace(self, old_space):
"""
Removes LivingSpace from the Dojo
"""
if old_space.name in self.rooms['livingspace']:
for occupants in old_space.occupants:
occupants.livingspace = False
self.delete_from_db(old_space)
del self.rooms['livingspace'][old_space.name]
return True
return False
def remove_fellow(self, old_fellow):
"""
Remove Fellow fro the Dojo
Return True if succesfull else False
"""
if old_fellow.id in self.person['fellow']:
rooms = self.get_person_room(old_fellow)
for room in rooms:
if room:
room.occupants.remove(old_fellow)
del self.person['fellow'][old_fellow.id]
return True
return False
def remove_staff(self, old_staff):
"""
Remove staff from the Dojo
Return True if succesfull else False
"""
if old_staff.id in self.person['staff']:
rooms = self.get_person_room(old_staff)
for room in rooms:
if room:
room.occupants.remove(old_staff)
self.delete_from_db(old_staff)
del self.person['staff'][old_staff.id]
return True
return False
def save_state(self, database_name="default.db", over_write=False, up=" "):
if type(database_name) != str:
raise TypeError
if over_write:
if os.path.exists("models/database/" + database_name):
os.remove("models/database/" + database_name)
raise OverWriteException
if os.path.exists("models/database/" + database_name) and up:
raise UpdateException
if up:
self.reset_db()
self.init_db(database_name)
self.database_session.add_all(list(self.fellow) + list(self.staff))
self.database_session.add_all(list(self.office) +
list(self.livingspace))
self.database_session.commit()
if not self.database_name:
self.database_name = database_name
def load_state(self, database_name="default.db", previous_state=False):
if previous_state:
database_name = "default.db"
if type(database_name) != str:
raise TypeError
if not os.path.exists("models/database/" + database_name):
raise DBDoesNotExistException
# reset db
self.reset_db()
# initialize db
self.init_db(database_name)
# reset internal state
self.rooms = {'offices': {}, 'livingspace': {}}
self.person = {'fellow': {}, 'staff': {}}
table_name = None
key_value = None
location_to_insert_item = None
types_to_be_loaded = ['offices', 'livingspace', 'fellow', 'staff']
for type_quried in types_to_be_loaded:
if type_quried == 'offices' or type_quried == 'livingspace':
table_name = Room
key_value = "name"
location_to_insert_item = self.rooms
else:
table_name = Person
key_value = "id"
location_to_insert_item = self.person
data = self.query_database_table(table_name, type_quried)
for item in data:
value = None
if key_value == "name":
value = item.name
elif key_value == "id":
value = item.id
location_to_insert_item[type_quried][value] = item
# Update relevent variable
Office.max_occupants = 6
Office.number_of_offices += len(self.office)
LivingSpace.max_occupants = 4
LivingSpace.number_of_livingspace += len(self.livingspace)
Person.number_of_person = len(self.staff) + len(self.fellow)
self.loaded = True
def init_db(self, database_name):
if not self.database_engine:
self.database_engine = load_engine(database_name)
if self.db_migrations:
create_tables(self.database_engine)
self.db_migrations = False
if not self.database_session:
self.database_session = create_session(self.database_engine)
def reset_db(self):
if self.database_session:
self.database_session.commit()
self.database_session.close()
self.database_session = None
self.database_engine = None
self.db_migrations = True
def delete_from_db(self, item_in_db):
if self.database_session:
self.database_session.delete(item_in_db)
self.database_session.commit()
def query_database_table(self, table_name, type_quried):
query_set = (self.database_session.query(table_name).filter_by
(type=type_quried))
for item in query_set:
yield item
| StarcoderdataPython |
1624186 | #!/usr/python
# -*- coding: utf-8 -*-
# from qpython import qconnection
# from qpython import qcollection
from binascii import hexlify
import numpy
from qpython import*
# https://github.com/exxeleron/qPython
# https://kx.com/documentation.php
q = qconnection.QConnection(host='192.168.3.10', port=9001,
username='superuser1',
password='password', timeout=3.0)
try:
q.open()
print(q.sync('til 10'))
print(q.sync('{til x}', 10))
print(q.sync('{y + til x}', 10, 1))
print(q.sync('{y + til x}', *[10, 1]))
print(q('{y + til x}', 10, 1))
q.query(qconnection.MessageType.SYNC, '{x}', 10)
print(q.receive(data_only=False, raw=False))
q.query(qconnection.MessageType.SYNC, '{x}', 10)
print(q.receive(data_only=True, raw=False))
q.sync('asynchMult:{[a;b] res:a*b; (neg .z.w)(res) }')
q.async('asynchMult', 2, 3)
print(q.receive())
q.query(qconnection.MessageType.SYNC, '{x}', 10)
print(hexlify(q.receive(data_only=True, raw=True)))
query = "{[x] 0Nd, `date$til x}"
print(hexlify(q(query, 5, raw=True)))
print(q.sync(query, 5, numpy_temporals=True))
q.query(qconnection.MessageType.SYNC, query, 3)
print(q.receive(numpy_temporals=False))
print(q.sync('{[x] type each x}', ['one', 'two', '3'],
single_char_strings=False))
print(q.sync('{[x] type each x}', ['one', 'two', '3'],
single_char_strings=True))
print repr(qcollection.qlist(numpy.array([0x01, 0x02, 0xff],
dtype=numpy.byte)))
# qcollection.qlist([366, 121, qnull(QDATE)], qtype=QDATE_LIST)
# qcollection.qlist(numpy.array(
# [uuid.UUID('8c680a01-5a49-5aab-5a65-d4bfddb6a661'),
# qnull(QGUID)]), qtype=QGUID_LIST)
[numpy.int64(1), numpy.string_('bcd'), '0bc', numpy.float32(5.5)]
# qlist([1, 2, 3], qtype=QSHORT_LIST)
# (1h;2h;3h)
# qlist([366, 121, qnull(QDATE)], qtype=QDATE_LIST)
print [numpy.int64(42), None, numpy.string_('foo')]
# ...
ds = q('(1i;0Ni;3i)', pandas=True)
print ds
print(ds.meta)
finally:
q.close()
| StarcoderdataPython |
3377435 | <gh_stars>0
# -*- coding: utf-8 -*-
"""
Created on Sun Aug 8 15:12:05 2021
@author: samuel
"""
import re
import json
import requests
from bs4 import BeautifulSoup
from tqdm import tqdm
import difflib
from pprint import pprint
import time
import os
import shutil
import pandas as pd
## importing custom files / modules
import errorhandling
import database
dbc = database.databaseConnect()
## ================ helper functions ======================= ##
def loadwebsite(url):
"""
this function loads the website of given url.
make sure network is connected.
Parameters
----------
url : str
website url to be loaded.
Returns
-------
webdata : BeautifulSoup Object
returns the BeautifulSoup object if the website response is 200.
or else it will return empty string and print the error message when request is made.
"""
webdata = ""
response = requests.get(url)
if response.status_code == 200:
webdata = BeautifulSoup(response.text, "html.parser")
else:
print("[ ! ] failed to request the web : {} reponse code : {}".format(url, response.status_code))
return webdata
def cleantext(sentence):
"""
this function will cleanup the input sentences.
cleaning process includes removing all the special characters.
Parameters
----------
sentence : str
input sentence to be cleaned from special chars
Returns
-------
str
return sentences after removing all the special chars
"""
return re.sub('[^A-Za-z0-9]+', ' ', sentence).strip()
## ================ operational classes ==================== ##
class webmining():
def __init__(self, webtemplate_filepath, scrapping_information, mining_mode = "online", account_path="."):
"""
webminig will find the most relevate tag and attributes to get the requied data.
webmining supports 'online' and 'offline' way of searching the tags and attributes
webmining once completed, it will save the 'mapper.json' file in the folder created by the name of 'template_name'
'mapper.json' is used for webdata scrapping
"""
self.webtemplate_filepath = webtemplate_filepath
self.mining_mode = mining_mode
self.account_path = account_path
self.scrapping_information = scrapping_information
def startmining(self):
"""
this function will start the minig process
steps of mining are:
a. load the template
b. select the mining mode (offline / online)
c. if offline mode - download the webpages listed in 'urls_to_scrap'
d. scan and get all the unique tags of webpages
e. loop through all the tags and scrap the webpage and match the text from template.
f. saving most matched all the tags and attributes in 'web_mapper' key and save the final results into 'mapper.json'
NOTE : scrapped data matches with the text in mapper and get the score, score used in sequence matcher
Returns
-------
None.
"""
try:
self.webtemplate_filepath = os.path.join(self.webtemplate_filepath, os.listdir(self.webtemplate_filepath)[0])
print("[ * ] starting webmining for template : ", self.webtemplate_filepath)
print("[ ! ] web mining mode : ", self.mining_mode)
self.webtemplate = self.loadWebTemplate()
if self.mining_mode == "offline":
self.downloadwebpages()
with open(self.offline_webpage_filepath, "rb") as fip:
self.webdata = BeautifulSoup(fip.read())
else:
self.webdata = loadwebsite(url = self.webtemplate["website"])
self.webtags = set([tag.name for tag in self.webdata.find_all()])
tag_list = ["div", "a", "h1", "h2", "h3", "h4", "ul", "li", "p"]
print("[ * ] website tag elements count : ", len(self.webtags), "\n")
time.sleep(3)
self.web_element_mapped = []
for info_ext in self.webtemplate["data"]:
## minig for text type data
if info_ext["type"] == "text":
for data_key, data_sample in info_ext["information"].items():
final_tag_dimension = {}
text_match_score = 0.0
print("--------------------------------------------------------")
for tag_name in self.webtags:
time.sleep(2)
tag_dimension_score = self.searchElementMap(information_type="text",
tag_name = tag_name,
search_sample=data_sample,
data_key = data_key)
if tag_dimension_score["score"] > text_match_score:
final_tag_dimension = {
"data_key" : data_key,
"tag_name" : tag_name,
"tag_dimension" : tag_dimension_score["dimension_map"],
"match_score" : tag_dimension_score["score"]
}
if (tag_dimension_score["score"] == 1.0):
break
self.web_element_mapped.append(final_tag_dimension)
## saving the mined webmapper
self.savemapper()
except:
errorhandling.catchError(custom_message="webminner failed for request_id : "+self.scrapping_information["request_id"])
def loadWebTemplate(self):
"""
a. load the template
Returns
-------
webtemplate : dict
webtemplate whose data to be minied
"""
print("[ * ] loading webtemplate. status = ", end="")
try:
with open(self.webtemplate_filepath, "r") as fp:
webtemplate = json.load(fp)
print("success")
return webtemplate
except Exception as err_msg:
print("failed. error message : "+str(err_msg))
errorhandling.catchError(custom_message="failed to load webtemplate. request_id : "+self.scrapping_information["request_id"])
def downloadwebpages(self):
"""
c. if offline mode - download the webpages listed in 'urls_to_scrap'
the downloaded web pages will be saved in 'temp' folder
Returns
-------
None.
"""
try:
print("[ * ] downloading website.", end=" status = ")
download_filepath = os.path.join(self.account_path, "temp")
if "temp" not in os.listdir(self.account_path):
os.makedirs(download_filepath)
webdata = loadwebsite(url = self.webtemplate["website"])
filename = webdata.title.text.replace(" ","_") + ".html"
self.offline_webpage_filepath = os.path.join(download_filepath, filename)
html = webdata.prettify("utf-8")
with open(self.offline_webpage_filepath, "wb") as file:
file.write(html)
print("success")
except Exception as err_msg:
print("failed. error message : "+str(err_msg))
errorhandling.catchError(custom_message="failed to download webpage. request_id : "+self.scrapping_information["request_id"])
def mineWebElements(self, tag_name):
"""
d. scan and get all the unique tags of webpages
this function will get all the attributes fiven 'tag_name'
Parameters
----------
tag_name : str
tag name whose all the attributes need to be scanned
Returns
-------
webelement_map : dict
mapped tag and its attributes list
"""
try:
webelement_map = {tag_name : []}
for webobj in self.webdata.find_all(tag_name):
emap = {}
for attr_name, attr_value in webobj.attrs.items():
if isinstance(attr_value, list):
attr_value = " ".join(attr_value)
emap[attr_name] = attr_value
webelement_map[tag_name].append(emap)
return webelement_map
except:
errorhandling.catchError(custom_message="failed to process mining web elements. request_id : "+self.scrapping_information["request_id"])
return {tag_name : []}
def searchElementMap(self, information_type, tag_name, search_sample, data_key):
"""
e. loop through all the tags and scrap the webpage and match the text from template.
this function will map the best fit tag with attribute for searching "search_sample"
best fit match is found from sequence matcher with extracted_text and "search_sample"
Parameters
----------
information_type : str
type of web information to be extracted i.e. text, url, image, video
tag_name : str
one of the tag name which is already minied from loaded webpage
search_sample : str
sample of text, urls to be searched these data is loaded from template
data_key : str
key name of the 'search_sample' to be mapped
Returns
-------
tag_dimension : dict
the best fit map of tag, attribute for given 'search_sample'
this will also have the score computed from sequence matcher
"""
try:
break_flag = -1
match_score = 0.0
tag_dimension = {"dimension_map" : {},
"score" : match_score}
if information_type == "text":
finding_text = cleantext(search_sample)
object_dimension = self.mineWebElements(tag_name = tag_name)
for dimension_map in tqdm(object_dimension[tag_name], desc="mining for data : {} with tag : {}".format(data_key, tag_name)):
for tag in self.webdata.find_all(tag_name, dimension_map):
extracted_text = cleantext(tag.text)
score = difflib.SequenceMatcher(None, finding_text, extracted_text).ratio()
if score > match_score:
tag_dimension = {"dimension_map" : dimension_map,
"score" : score}
match_score = score
if finding_text == extracted_text:
break_flag = 0
break
if break_flag == 0:
break
return tag_dimension
except:
errorhandling.catchError(custom_message="failed to search element maps. request_id : "+self.scrapping_information["request_id"])
def savemapper(self):
"""
f. saving most matched all the tags and attributes in 'web_mapper' key and save the final results into 'mapper.json'
this function will save all the tag and attribute mapped to given information to be extracted.
'mapper.json' is saved in folder name 'template_name' from template
Returns
-------
None.
"""
time.sleep(2)
try:
print("[ * ] saving the webmapper.", end=" status = ")
self.webtemplate.update({"web_mapper" : self.web_element_mapped})
output_filepath = os.path.join(self.account_path, "mapper.json")
with open(output_filepath, "w") as fop:
json.dump(self.webtemplate, fop)
print("sucess")
print("\t\t > mapper location : ", output_filepath)
req_mapper = self.webtemplate
req_mapper.update(self.scrapping_information)
query = {"collection_name" : "scrappingInformation",
"data" : req_mapper}
dbc.insertData(querydata = query)
except Exception as err_msg:
print("failed. error message :"+str(err_msg))
errorhandling.catchError(custom_message="failed to save the mapper. request_id : "+self.scrapping_information["request_id"])
class webscrapper():
def __init__(self, web_mapper_filepath, scrapping_information, scrapping_mode = "online", account_path = "."):
print("[ * ] starting webscrapping for template : ", web_mapper_filepath)
print("[ ! ] web scrapping mode : ", scrapping_mode)
"""
webscrapper will load the 'mapper.json' created by the webminig operation.
webscrapper have offline and online mode of scrapping.
'offline' mode, will download all the webpage which are in the key 'urls_to_scrap' in 'mapper.json' file
downloaded webpages is saved in 'temp' folder
'online' mode, will directly starts scrapping using 'web_mapper' key, which holds tag and attributes information.
and later all the scrapped data is save in 'data' folder.
"""
self.web_mapper_filepath = web_mapper_filepath
self.scrapping_mode = scrapping_mode
self.account_path = account_path
self.scrapping_information = scrapping_information
self.loadWebMapper()
def loadWebMapper(self):
"""
a. load the 'mapper.json' which is generated from mining process.
this function will load 'mapper.json' template which have all the tag and attributes to be used for scarpping
Returns
-------
None.
"""
print("[ * ] loading webmapper. status = ", end="")
self.webmapper = ""
try:
with open(self.web_mapper_filepath, "r") as fp:
self.webmapper = json.load(fp)
#webmapper = webmapper["web_mapper"]
print("success")
except Exception as err_msg:
print("failed. error message : "+str(err_msg))
errorhandling.catchError(custom_message="failed to load webmapper. request_id : "+self.scrapping_information["request_id"])
def downloadwebpages(self):
"""
c. if offline mode, than download all the webpages from key 'urls_to_scrap' into 'temp' folder
this function will download all the webpages and saves in "temp" folder
Returns
-------
None.
"""
try:
self.downloadpages_metadata = []
self.download_filepath = os.path.join(self.account_path, "temp")
if "temp" not in os.listdir(self.account_path):
os.makedirs(self.download_filepath)
else:
shutil.rmtree(self.download_filepath)
os.makedirs(self.download_filepath)
page_count = 1
for website_url in tqdm(self.webmapper["urls_to_scrap"], desc="downloading websites"):
webdata = loadwebsite(url = website_url)
filename = webdata.title.text.replace(" ","_") + "_page_{}.html".format(page_count)
self.offline_webpage_filepath = os.path.join(self.download_filepath, filename)
html = webdata.prettify("utf-8")
with open(self.offline_webpage_filepath, "wb") as file:
file.write(html)
self.downloadpages_metadata.append({
"page_name" : filename, "page_url" : website_url})
page_count += 1
except Exception as err_msg:
print("failed. error message : {} for url {}".format(err_msg, website_url))
errorhandling.catchError(custom_message="failed to download webpage {} for request_id : {}".format(website_url, self.scrapping_information["request_id"]))
def startscrapping(self):
"""
this function will start the scrapping process.
steps of scrapping are:
a. load the 'mapper.json' which is generated from mining process.
b. check for (offline / online) scrapping mode.
c. if offline mode, than download all the webpages from key 'urls_to_scrap' into 'temp' folder
d. loop throught all the downloaded webpages and start scrapping using the 'web_mapper' key from 'mapper.json'
e. collect all the information and reformat data
f. save the collected data
Returns
-------
None.
"""
try:
self.all_page_processed_data = {}
print("[ * ] scrapping started. mode : ", self.scrapping_mode)
time.sleep(2)
if self.scrapping_mode == "offline":
self.downloadwebpages()
for saved_webpage_info in tqdm(self.downloadpages_metadata, desc="scrapping downloaded webpages"):
time.sleep(2)
with open(os.path.join(self.download_filepath, saved_webpage_info["page_name"]), "rb") as fip:
self.webdata = BeautifulSoup(fip.read())
webpage_data = self.processwebpages()
self.all_page_processed_data[saved_webpage_info["page_url"]] = {"page_name" : saved_webpage_info["page_name"],
"data" : webpage_data}
elif self.scrapping_mode == "online":
page_count = 1
for webpage_url in self.webmapper["urls_to_scrap"]:
self.webdata = loadwebsite(webpage_url)
page_name = self.webdata.title.text.replace(" ","_") + "_page_{}.html".format(page_count)
webpage_data = self.processwebpages()
self.all_page_processed_data[webpage_url] = {"page_name" : page_name,
"data" : webpage_data}
page_count += 1
self.reformat()
self.savescrapperdata()
except:
errorhandling.catchError(custom_message="failed to process the scrapping request. request_id : "+self.scrapping_information["request_id"])
def processwebpages(self):
"""
e. collect all the information and reformat data
this function will scrap the data based on tag and attribute mapped
Returns
-------
webpage_data : TYPE
DESCRIPTION.
"""
try:
webpage_data = {}
for search_element in self.webmapper["web_mapper"]:
searched_element_data = [cleantext(taglog.text) for taglog in self.webdata.find_all(search_element["tag_name"], search_element["tag_dimension"])]
webpage_data[search_element["data_key"]] = searched_element_data
return webpage_data
except:
errorhandling.catchError(custom_message = "failed to process webpages. request_id" + self.scrapping_information["request_id"])
return {}
def reformat(self):
"""
e. collect all the information and reformat data
this function will reformat data, which means it will pair each data_key into list of dict
NOTE : 'non-reformatted' data is missing the 'data_key' values miss-matched and
in which its missed is stored in variable 'class_obejct.skipped_webpages'
Returns
-------
None.
"""
try:
self.all_page_reformated_data = []
self.skipped_webpages = {}
self.reformat_success_flag = -1
for page_key in tqdm(self.all_page_processed_data.keys(), desc="reformatting page data"):
datapoints = self.all_page_processed_data[page_key]["data"].keys()
data_counts = {data_key : len(self.all_page_processed_data[page_key]["data"][data_key]) for data_key in datapoints}
match_count = list(set(data_counts.values()))
if len(match_count) == 1:
data = pd.DataFrame(self.all_page_processed_data[page_key]["data"])
data = data.to_dict(orient="records")
self.all_page_reformated_data.append(data)
self.reformat_success_flag = 0
else:
self.skipped_webpages.update({"page_key" : page_key,
"data_count" : data_counts})
self.reformat_success_flag = -1
if len(self.skipped_webpages) > 1:
print("[ ! ] reformating skilled pages due to page data in-consistency")
pprint(self.skipped_webpages)
except:
errorhandling.catchError(custom_message="failed to reformat data request_id : "+self.scrapping_information["request_id"])
def savescrapperdata(self):
"""
f. save the collected data
this function will save the scrapped data into json format.
save mode is of 2 types 'reformatted' and 'non-reformatted'
Returns
-------
None.
"""
time.sleep(2)
try:
output_folderpath = os.path.join(self.account_path, "data")
if "data" not in os.listdir(self.account_path):
os.mkdir(output_folderpath)
output_filepath = os.path.join(output_folderpath, self.webmapper["template_name"]+".json")
if self.reformat_success_flag == 0:
print("[ * ] saving reformatted data", end=" status = ")
data_to_save = self.all_page_reformated_data
else:
print("[ * ] saving non-reformatted data", end=" status = ")
data_to_save = self.all_page_processed_data
with open(output_filepath, "w") as fop:
json.dump(data_to_save, fop)
if len(data_to_save) == 1:
data_to_save = data_to_save[0]
for data_record in data_to_save:
data_record.update({"account_id" : self.scrapping_information["account_id"],
"request_id" : self.scrapping_information["request_id"]})
query = {"collection_name" : "scrappedData",
"data" : data_to_save}
dbc.insertData(querydata = query)
print("success")
print("\t > ", output_filepath)
except:
msg = "failed to save scrapped data. request_id : "+self.scrapping_information["request_id"]
errorhandling.catchError(custom_message=msg)
## ===================== code execution example =============================== ##
## website mining process
# start_time = time.time()
# wm = webmining(webtemplate_filepath="google_cloud_press_release.json", mining_mode="online")
# wm.startmining()
## website scrapping process
# ws = webscrapper(web_mapper_filepath="google_cloud_press_release/mapper.json", scrapping_mode="online")
# ws.startscrapping()
# end_time = time.time()
# print("[ # ] process took time : ", end_time - start_time) | StarcoderdataPython |
1732724 | from itertools import chain
from operator import itemgetter
from collections import defaultdict
import numpy as np
from gym import spaces
from coordination.environment.deployment import ServiceCoordination
class NFVdeepCoordination(ServiceCoordination):
COMPUTE_UNIT_COST = 0.2
MEMORY_UNIT_COST = 0.2
DATARATE_UNIT_COST = 6.0 * 1e-4
# worked best in our experiments; set similar to threshold in MAVEN-S
REVENUE = 5.0
def __init__(self, net_path, process, vnfs, services):
super().__init__(net_path, process, vnfs, services)
# observation space of NFVdeep simulation environment
self.OBS_SIZE = 3 * len(self.net.nodes) + 6
self.observation_space = spaces.Box(low=0.0, high=1.0, shape=(self.OBS_SIZE,), dtype=np.float16)
def compute_state(self) -> np.ndarray:
if self.done:
return np.zeros(self.OBS_SIZE)
# (1) encode remaining compute resources
computing = [c / self.MAX_COMPUTE for c in self.computing.values()]
# (2) encode remaining memory resources
memory = [m / self.MAX_MEMORY for m in self.memory.values()]
# (3) encode remaining output datarate
MAX_OUTPUT = self.MAX_DEGREE * max(data['datarate'] for _, _, data in self.net.edges(data=True))
output_rates = defaultdict(float)
for src in self.net.nodes:
for trg in self.net.nodes:
if frozenset({src, trg}) in self.datarate:
output_rates[src] += self.datarate[frozenset({src, trg})]
output_rates = list(itemgetter(*self.net.nodes)(output_rates))
output_rates = [rate / MAX_OUTPUT for rate in output_rates]
# (4) encode request specific properties
rate = self.request.datarate / self.MAX_LINKRATE
resd_lat = self.request.resd_lat / 100.0
num_components = (len(self.request.vtypes) - len(self.vtype_bidict.mirror[self.request])) / max(len(s) for s in self.services)
# resource consumption depend on placement decisions; use the mean resource demand
cdemands, mdemands = [], []
vnum = len(self.vtype_bidict.mirror[self.request])
vtype = self.request.vtypes[vnum]
config = self.vnfs[vtype]
for node in self.net.nodes:
supplied_rate = sum([service.datarate for service in self.vtype_bidict[(node, vtype)]])
after_cdem, after_mdem = self.score(supplied_rate + self.request.datarate, config)
prev_cdem, prev_mdem = self.score(supplied_rate, config)
cdemand = np.clip((after_cdem - prev_cdem) / self.MAX_COMPUTE, a_min=0.0, a_max=1.0)
mdemand = np.clip((after_mdem - prev_mdem) / self.MAX_MEMORY, a_min=0.0, a_max=1.0)
cdemands.append(cdemand)
mdemands.append(mdemand)
cdemand = np.mean(cdemands) / self.MAX_COMPUTE
mdemand = np.mean(mdemands) / self.MAX_MEMORY
duration = self.request.duration / 100
request = [rate, resd_lat, num_components, cdemand, mdemand, duration]
state = chain(computing, memory, output_rates, request)
return np.asarray(list(state))
def compute_reward(self, finalized, deployed, req) -> float:
if deployed:
cresources = np.asarray([data['compute'] for node, data in self.net.nodes(data=True)])
cavailable = np.asarray([self.computing[node] for node in self.net.nodes])
ccost = np.sum(((cresources - cavailable) > 0) * cresources) * self.COMPUTE_UNIT_COST / self.MAX_COMPUTE
mresources = np.asarray([data['memory'] for node, data in self.net.nodes(data=True)])
mavailable = np.asarray([self.memory[node] for node in self.net.nodes])
mcost = np.sum(((mresources - mavailable) > 0) * mresources) * self.MEMORY_UNIT_COST / self.MAX_MEMORY
dresources = np.asarray([data['datarate'] for src, trg, data in self.net.edges(data=True)])
davailable = np.asarray([self.datarate[frozenset({src, trg})] for src, trg in self.net.edges])
dcost = np.sum(((dresources - davailable) > 0) * dresources) * self.DATARATE_UNIT_COST / self.MAX_LINKRATE
# in our setting, the revenue is the same for any request
return self.REVENUE - (ccost + mcost + dcost)
return 0.0
| StarcoderdataPython |
18287 | import requests
from bs4 import BeautifulSoup
import json
def loadMasterStock():
url = "http://www.supremenewyork.com/mobile_stock.json"
user = {"User-Agent": "Mozilla/5.0 (iPhone; CPU iPhone OS 10_2_1 like Mac OS X) AppleWebKit/602.4.6 (KHTML, like Gecko) Version/10.0 Mobile/14D27 Safari/602.1"}
# user = {"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.95 Safari/537.36"}
r = requests.get(url, headers=user)
masterStock = json.loads(r.text)
with open("masterstock.json", 'w') as outfile:
json.dump(masterStock, outfile, indent=4, sort_keys=True)
print("Saved to masterstock.json")
itemInfo = ""
while(True):
try:
item = input("Enter item name to get id or cntrl-c to quit: ")
except:
print("Exiting...")
if itemInfo != "":
itemInfo = itemInfo[:-1]
print("\n"+itemInfo)
with open("filteredStock.txt",'w') as outfile:
outfile.write(itemInfo)
exit()
if item == "new":
print("Getting all new items...")
for itemCount in range(len(masterStock['products_and_categories']["new"])):
itemInfo += '"'+str(masterStock['products_and_categories']["new"][itemCount]['id'])+'":"'
itemInfo += str(masterStock['products_and_categories']["new"][itemCount]['name'])+'",'
else:
for itemCount in range(len(masterStock['products_and_categories']["new"])):
if item.lower() in str(masterStock['products_and_categories']["new"][itemCount]['name']).lower():
itemInfo += '"'+str(masterStock['products_and_categories']["new"][itemCount]['id'])+'":"'
print("Added "+str(masterStock['products_and_categories']["new"][itemCount]['name']))
itemInfo += str(masterStock['products_and_categories']["new"][itemCount]['name'])+'",'
# print(itemInfo)
if __name__ == '__main__':
loadMasterStock()
| StarcoderdataPython |
3329281 | <reponame>ZhiruiFeng/CarsMemory
# -*- coding: utf-8 -*-
import json
import math
import pandas as pd
import flask
import dash
from dash.dependencies import Input, Output, State
import dash_core_components as dcc
import dash_html_components as html
import plotly.plotly as py
from plotly import graph_objs as go
import datetime
from app import app, indicator, millify, df_to_table
states = ["AL", "AK", "AZ", "AR", "CA", "CO", "CT", "DC", "DE", "FL", "GA",
"HI", "ID", "IL", "IN", "IA", "KS", "KY", "LA", "ME", "MD",
"MA", "MI", "MN", "MS", "MO", "MT", "NE", "NV", "NH", "NJ",
"NM", "NY", "NC", "ND", "OH", "OK", "OR", "PA", "RI", "SC",
"SD", "TN", "TX", "UT", "VT", "VA", "WA", "WV", "WI", "WY"]
# returns choropleth map figure based on status filter
def choropleth_map(status, df):
# TODO add selection to get the time scope for display
df = df.groupby("location").count()
scl = [[0.0, "rgb(38, 78, 134)"], [1.0, "#0091D5"]] # colors scale
data = [
dict(
type="choropleth",
colorscale=scl,
locations=df.index,
z=df["dashcam_id"],
locationmode="USA-states",
marker=dict(line=dict(color="rgb(255,255,255)", width=2)),
)
]
layout = dict(
geo=dict(
scope="usa",
projection=dict(type="albers usa"),
lakecolor="rgb(255, 255, 255)",
),
margin=dict(l=10, r=10, t=0, b=0),
)
return dict(data=data, layout=layout)
# returns pie chart that shows lead source repartition
def lead_source(status, df):
# TODO add selection to get the time scope for display
nb_total = df['total'].sum()
types = ['airfield', 'alley', 'bridge', 'crosswalk', 'downtown',
'gas', 'harbor', 'highway', 'neighborhood', 'park',
'parking', 'ruin', 'snowfield', 'station', 'street',
'wild', "village"]
values = []
# compute % for each leadsource type
for scene_type in types:
nb_type = df[scene_type].sum()
values.append(nb_type / nb_total * 100)
trace = go.Pie(
labels=types,
values=values,
marker={"colors": ["#264e86", "#0074e4", "#74dbef", "#eff0f4"]},
)
layout = dict(margin=dict(l=15, r=10, t=0, b=65), legend=dict(orientation="h"))
return dict(data=[trace], layout=layout)
def converted_leads_count(period, df):
df["CreatedDate"] = pd.to_datetime(df["CreatedDate"], format="%Y-%m-%d")
df = df[df["Status"] == "Closed - Converted"]
df = (
df.groupby([pd.Grouper(key="CreatedDate", freq=period)])
.count()
.reset_index()
.sort_values("CreatedDate")
)
trace = go.Scatter(
x=df["CreatedDate"],
y=df["Id"],
name="converted leads",
fill="tozeroy",
fillcolor="#e6f2ff",
)
data = [trace]
layout = go.Layout(
xaxis=dict(showgrid=False),
margin=dict(l=33, r=25, b=37, t=5, pad=4),
paper_bgcolor="white",
plot_bgcolor="white",
)
return {"data": data, "layout": layout}
layout = [
# top controls
html.Div(
[
html.Div(
dcc.Dropdown(
id="converted_leads_dropdown",
options=[
{"label": "By day", "value": "D"},
{"label": "By week", "value": "W-MON"},
{"label": "By month", "value": "M"},
],
value="D",
clearable=False,
),
className="two columns",
),
html.Div(
dcc.Dropdown(
id="lead_source_dropdown",
options=[
{"label": "All status", "value": "all"},
{"label": "Open leads", "value": "open"},
{"label": "Converted leads", "value": "converted"},
{"label": "Lost leads", "value": "lost"},
],
value="all",
clearable=False,
),
className="two columns",
),
],
className="row",
style={"marginBottom": "10"},
),
# indicators row div
html.Div(
[
indicator(
"#00cc96",
"Unique States Count",
"left_leads_indicator"
),
indicator(
"#119DFF",
"Connected DashCam Count",
"middle_leads_indicator"
),
indicator(
"#EF553B",
"Recording Dates Count",
"right_leads_indicator",
),
],
className="row",
),
# charts row div
html.Div(
[
html.Div(
[
html.P("DashCam count per state"),
dcc.Graph(
id="map",
style={"height": "90%", "width": "98%"},
config=dict(displayModeBar=False),
),
],
className="six columns chart_div"
),
html.Div(
[
html.P("Scene feature distribution"),
dcc.Graph(
id="lead_source",
style={"height": "90%", "width": "98%"},
config=dict(displayModeBar=False),
),
],
className="six columns chart_div"
),
],
className="row",
style={"marginTop": "5"},
),
html.Div(
[
html.P("Recent week's keyframes percentage for each user"),
dcc.Graph(id='graph-with-slider'),
dcc.Slider(
id='date-slider',
min=0,
max=7,
value=7,
marks={(7-N): (datetime.date.today()- datetime.timedelta(days=N)).strftime("%Y%m%d") for N in range(7, -1, -1)}
)
],
className="row"
),
]
# updates left indicator based on df updates
@app.callback(
Output("left_leads_indicator", "children"), [Input("leads_df", "children")]
)
def left_leads_indicator_callback(df):
df = pd.read_json(df, orient="split")
unique_location = len(df['location'].unique())
return unique_location
# updates middle indicator based on df updates
@app.callback(
Output("middle_leads_indicator", "children"), [Input("leads_df", "children")]
)
def middle_leads_indicator_callback(df):
df = pd.read_json(df, orient="split")
unique_cam = len(df['dashcam_id'].unique())
return unique_cam
# updates right indicator based on df updates
@app.callback(
Output("right_leads_indicator", "children"), [Input("leads_df", "children")]
)
def right_leads_indicator_callback(df):
df = pd.read_json(df, orient="split")
unique_date = len(df['store_date'].unique())
return unique_date
# update pie chart figure based on dropdown's value and df updates
@app.callback(
Output("lead_source", "figure"),
[Input("lead_source_dropdown", "value"), Input("leads_df", "children")],
)
def lead_source_callback(status, df):
df = pd.read_json(df, orient="split")
return lead_source(status, df)
# update heat map figure based on dropdown's value and df updates
@app.callback(
Output("map", "figure"),
[Input("lead_source_dropdown", "value"), Input("leads_df", "children")],
)
def map_callback(status, df):
df = pd.read_json(df, orient="split")
return choropleth_map(status, df)
# Update the figure with slider
@app.callback(
Output('graph-with-slider', 'figure'),
[Input('date-slider', 'value'), Input("leads_df", "children")],
)
def update_figure(selected_date, df):
df = pd.read_json(df, orient="split")
selected_date = 7 - selected_date
selected_day = (datetime.date.today()- datetime.timedelta(days=selected_date)).strftime("%Y%m%d")
filtered_df = df[df.store_date == int(selected_day)]
traces = []
for i in filtered_df.location.unique():
df_by_continent = filtered_df[filtered_df['location'] == i]
traces.append(go.Scatter(
x=df_by_continent['frames'],
y=df_by_continent['keyframes'],
text=df_by_continent['dashcam_id'],
mode='markers',
opacity=0.7,
marker={
'size': 15,
'line': {'width': 0.5, 'color': 'white'}
},
name=i
))
return {
'data': traces,
'layout': go.Layout(
xaxis={'type': 'log', 'title': 'Total Frames'},
yaxis={'title': 'Keyframes Count', 'range': [20, 90]},
margin={'l': 40, 'b': 40, 't': 10, 'r': 10},
legend={'x': 0, 'y': 1},
hovermode='closest'
)
}
# update table based on dropdown's value and df updates
@app.callback(
Output("leads_table", "children"),
[Input("lead_source_dropdown", "value"), Input("leads_df", "children")],
)
def leads_table_callback(status, df):
df = pd.read_json(df, orient="split")
if status == "open":
df = df[
(df["Status"] == "Open - Not Contacted")
| (df["Status"] == "Working - Contacted")
]
elif status == "converted":
df = df[df["Status"] == "Closed - Converted"]
elif status == "lost":
df = df[df["Status"] == "Closed - Not Converted"]
df = df[["CreatedDate", "Status", "Company", "State", "LeadSource"]]
return df_to_table(df)
# update pie chart figure based on dropdown's value and df updates
@app.callback(
Output("converted_leads", "figure"),
[Input("converted_leads_dropdown", "value"), Input("leads_df", "children")],
)
def converted_leads_callback(period, df):
df = pd.read_json(df, orient="split")
return converted_leads_count(period, df)
| StarcoderdataPython |
1619410 | # ----------------------------------------------------- THUMOS CONFIG ------------------------------------------------
#PATH
THUMOS_CLASSIDX = '/ssd1/users/km/OTAL/THUMOS/meta/classidx.txt' # '/NAS2/CIPLAB/users/kyh/thumos/json/classidx.txt'
THUMOS_ANNOTATION_PATH_TRAIN = '/ssd1/users/km/OTAL/THUMOS/meta/annotations_validation/annotation' # '/NAS2/CIPLAB/users/kyh/thumos/annotations_validation/annotation'
THUMOS_ANNOTATION_PATH_VALTEST = '/ssd1/users/km/OTAL/THUMOS/meta/annotations_test/annotations/annotation' # '/NAS2/CIPLAB/users/kyh/thumos/annotations_test/annotations/annotation'
THUMOS_VID_PATH_TRAIN = '/ssd1/users/km/OTAL/THUMOS/vid/validation' # '/NAS2/CIPLAB/users/kyh/thumos/validation'
THUMOS_VID_PATH_VALTEST = '/ssd1/users/km/OTAL/THUMOS/vid/THUMOS14_test' # '/NAS2/CIPLAB/users/kyh/thumos/THUMOS14_test'
#FEATURE
THUMOS_NUM_CLASSES = 21 #including background class
# ----------------------------------------------------- ANET CONFIG -------------------------------------------------
# PATH
ANET_CLASSIDX = '/workspace/ActivityNet_200_1.3/anet_classidx.txt'
ANET_ANNOTATION_FILE = '/workspace/ActivityNet_200_1.3/annotation.json'
ANET_VID_PATH = '/workspace/ActivityNet_200_1.3/videos/'
# FEATURE
ANET_NUM_CLASSES = 201 | StarcoderdataPython |
1667783 | <gh_stars>1-10
# encoding=utf8
__author__ = 'wcong'
import web
import util
import config
import pdbc
urls = (
'/', 'Index'
)
class Index():
def GET(self):
return config.render.login()
def POST(self):
email = web.input().get("email")
password = web.input().get("password")
result = pdbc.User.select_login_user(email, password)
if result > 0:
web.setcookie("email", util.encode_string(email), path='/')
web.setcookie("last_visit_time", util.encode_string(str(util.make_time_stamp())), path='/')
web.seeother('../home/')
app_login = web.application(urls, locals())
| StarcoderdataPython |
1681007 | <reponame>affjljoo3581/canrevan
import json
import os
from canrevan.parsing import extract_article_urls, parse_article_content
def _get_resource_content(name: str) -> str:
res_path = os.path.join(os.path.dirname(__file__), "resources", name)
with open(res_path, "r") as fp:
return fp.read()
def test_extracting_article_urls():
article_urls = extract_article_urls(_get_resource_content("nav_html"))
article_urls = [url[55:] for url in article_urls]
assert article_urls == [
"sid1=100&sid2=265&oid=029&aid=0002625369",
"sid1=100&sid2=265&oid=079&aid=0003409069",
"sid1=100&sid2=265&oid=421&aid=0004881296",
"sid1=100&sid2=265&oid=421&aid=0004881295",
"sid1=100&sid2=265&oid=421&aid=0004881294",
"sid1=100&sid2=265&oid=001&aid=0011892040",
"sid1=100&sid2=265&oid=001&aid=0011892037",
"sid1=100&sid2=265&oid=001&aid=0011892036",
"sid1=100&sid2=265&oid=001&aid=0011892035",
"sid1=100&sid2=265&oid=001&aid=0011892034",
"sid1=100&sid2=265&oid=001&aid=0011892013",
"sid1=100&sid2=265&oid=014&aid=0004497351",
"sid1=100&sid2=265&oid=421&aid=0004881269",
"sid1=100&sid2=265&oid=468&aid=0000698806",
"sid1=100&sid2=265&oid=001&aid=0011891993",
"sid1=100&sid2=265&oid=001&aid=0011891992",
"sid1=100&sid2=265&oid=001&aid=0011891989",
"sid1=100&sid2=265&oid=001&aid=0011891986",
"sid1=100&sid2=265&oid=001&aid=0011891985",
"sid1=100&sid2=265&oid=001&aid=0011891984",
]
def test_parsing_article_contents():
content = parse_article_content(_get_resource_content("article_html"))
assert json.decoder.scanstring(content, 1)[0] == (
'이재명 경기도지사가 19일 "지자체에 지역화폐가 확산하면 단점이 '
'심화할 수 있다"고 지적한 국민의힘 윤희숙 의원을 향해 "언론 뒤에 '
'숨지 말고 공개 토론하자"고 제안했다.\n'
'이 지사는 이날 페이스북에서 "경제 전문가인 윤희숙 위원장님, '
"지역화폐는 소비의 지역 간 이전 차단보다 업종 내 규모별 재분배에 더 "
'중점이 있다는 거 모르시진 않으시지요?"라며 이같이 밝혔다.\n'
'그는 "유통 대기업의 골목상권 잠식으로 피해 보는 영세자영업자와 '
'골목상권을 보호하는 지역화폐는 문재인 정부의 포용정책 중 하나"'
'라며 "윤 의원은 비중이 적은 소비의 지역 이전 부분만 강조하고 '
'핵심요소인 규모별 이전 효과는 의도적으로 외면하는 것 같다"고 '
"했다.\n"
'이어 "왜곡조작으로 기득권 옹호하는 일부 보수언론 뒤에 숨어 '
"불합리한 일방적 주장만 하지 말고, 수차례 제안한 국민 앞 "
'공개토론에서 당당하게 논쟁해 보실 용의는 없냐"고 덧붙였다.\n'
"윤 의원은 이날 페이스북에 '지역화폐가 역효과를 낸다'는 "
'한국조세재정연구원(조세연)의 보고서에 대해 "분석과 서술방식 모두 '
'잘 쓰인 보고서"라고 평가하며 "지자체에 (지역화폐가) 확산하면 '
'의도했던 장점은 줄고 단점만 심화할 수 있다"고 지적했다. 또 이 '
'지사의 조세연 비판을 두고 "권력을 가진 이들이 전문가집단을 힘으로 '
"찍어누르려 하는 것은 한 나라의 지적 인프라를 위협하는 일인 동시에 "
'본인들 식견의 얕음을 내보이는 일"이라고 날을 세웠다.'
)
| StarcoderdataPython |
3217154 | from __future__ import print_function, division, absolute_import
import itertools
import numpy as np
import regreg.atoms.group_lasso as GL
import regreg.api as rr
import nose.tools as nt
from .test_seminorms import Solver, all_close, SolverFactory
from .test_cones import ConeSolverFactory
class GroupSolverFactory(SolverFactory):
group_choices = [np.arange(10),
np.array([1,1,2,2,2,3,3,4,4,4,4,5,5,6,6,6,6])]
FISTA_choices = [True]
L_choices = [0.3]
coef_stop_choices = [False]
def __init__(self, klass, mode):
self.klass = klass
self.mode = mode
def __iter__(self):
for offset, FISTA, coef_stop, L, q, groups in itertools.product(self.offset_choices,
self.FISTA_choices,
self.coef_stop_choices,
self.L_choices,
self.quadratic_choices,
self.group_choices):
self.FISTA = FISTA
self.coef_stop = coef_stop
self.L = L
if self.mode == 'lagrange':
atom = self.klass(groups, lagrange=self.lagrange)
else:
atom = self.klass(groups, bound=self.bound)
if q:
atom.quadratic = rr.identity_quadratic(0,0,np.random.standard_normal(atom.shape)*0.02)
if offset:
atom.offset = 0.02 * np.random.standard_normal(atom.shape)
solver = Solver(atom, interactive=self.interactive,
coef_stop=coef_stop,
FISTA=FISTA,
L=L)
yield solver
class GroupConeSolverFactory(ConeSolverFactory):
group_choices = [np.arange(10),
np.array([1,1,2,2,2,3,3,4,4,4,4,5,5,6,6,6,6])]
def __iter__(self):
for offset, FISTA, coef_stop, L, q, groups in itertools.product(self.offset_choices,
self.FISTA_choices,
self.coef_stop_choices,
self.L_choices,
self.quadratic_choices,
self.group_choices):
self.FISTA = FISTA
self.coef_stop = coef_stop
self.L = L
atom = self.klass(groups)
if q:
atom.quadratic = rr.identity_quadratic(0,0,np.random.standard_normal(atom.shape)*0.02)
if offset:
atom.offset = 0.02 * np.random.standard_normal(atom.shape)
solver = Solver(atom, interactive=self.interactive,
coef_stop=coef_stop,
FISTA=FISTA,
L=L)
yield solver
@np.testing.dec.slow
def test_proximal_maps(interactive=False):
for klass in GL.conjugate_seminorm_pairs.keys():
factory = GroupSolverFactory(klass, 'lagrange')
for solver in factory:
penalty = solver.atom
dual = penalty.conjugate
Z = solver.prox_center
L = solver.L
yield all_close, penalty.lagrange_prox(Z, lipschitz=L), Z-dual.bound_prox(Z*L)/L, 'testing lagrange_prox and bound_prox starting from atom\n %s ' % klass, None
# some arguments of the constructor
nt.assert_raises(AttributeError, setattr, penalty, 'bound', 4.)
nt.assert_raises(AttributeError, setattr, dual, 'lagrange', 4.)
nt.assert_raises(AttributeError, setattr, penalty, 'bound', 4.)
nt.assert_raises(AttributeError, setattr, dual, 'lagrange', 4.)
for t in solver.all_tests():
yield t
factory = GroupSolverFactory(klass, 'bound')
for solver in factory:
for t in solver.all_tests():
yield t
for klass in GL.conjugate_cone_pairs.keys():
factory = GroupConeSolverFactory(klass)
for solver in factory:
for t in solver.all_tests():
yield t
| StarcoderdataPython |
23207 | #!/usr/local/bin/python
# -*- coding: utf-8 -*-
"""
"""
__author__ = 'joscha'
__date__ = '03.08.12'
| StarcoderdataPython |
97950 | <filename>modules/deprado.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import multiprocessing as mp
import pandas as pd
import numpy as np
from tqdm import tqdm, tqdm_notebook
import time
import datetime as dt
import sys
def getDailyVolatility(close, span0=100):
"""
From <NAME> - Daily Volatility
Daily volatility reindexed to close - computes the daily volatility at
intraday estimation points, applying a span of span0 days (bars) to an
exponentially weighted moving standard deviation.
# args
close : series of closing prices (could be from tick, volume or
dollar bars)
span0 : number of days (or bars) to span
bartype : type of bar being ohlc-d ("tick" <default>, "dollar",
"volume")
# returns
df0 : a dataframe with ohlc values
"""
df0 = close.index.searchsorted(close.index-pd.Timedelta(days=1))
df0 = df0[df0 > 0]
df0 = (pd.Series(close.index[df0-1],
index=close.index[close.shape[0]-df0.shape[0]:]))
try:
df0 = close.loc[df0.index]/close.loc[df0.values].values-1
except Exception as e:
print(f'error: {e}\nplease confirm no duplicate indices')
df0 = df0.ewm(span=span0).std().rename('dailyVolatility')
return df0
def applyPtSlOnT1(close, events, ptSl, molecule):
"""
From <NAME> - apply stop loss / profit taking
if it takes place before t1 (end of event)
# args
close : a series of closing prices (could be from tick, volume or
dollar bars)
events : events
ptSl : profit-taking / stop loss multiples
molecule : a list with the subset of event indices that will be
processed by a single thread.
# returns
df0 : a dataframe with ohlc values
"""
events_ = events.loc[molecule]
out = events_[['t1']].copy(deep=True)
if ptSl[0] > 0:
pt = ptSl[0]*events_['trgt']
else:
pt = pd.Series(index=events.index)
if ptSl[1] > 0:
sl = -ptSl[1]*events_['trgt']
else:
sl = pd.Series(index=events.index)
for loc, t1 in events_['t1'].fillna(close.index[-1]).iteritems():
df0 = close[loc:t1] # path prices
df0 = (df0/close[loc]-1)*events_.at[loc, 'side'] # path returns
out.loc[loc, 'sl'] = df0[df0 < sl[loc]].index.min() # earliest SL
out.loc[loc, 'pt'] = df0[df0 > pt[loc]].index.min() # earliest PT
return out
def getEvents(close,
tEvents, ptSl, trgt, minRet, numThreads, t1=False, side=None):
"""
# args
close: A pandas series of prices.
tEvents: The pandas timeindex containing the timestamps
that will seed every triple barrier. These are the
timestamps selected by the sampling procedures discussed
in Chapter 2, Section 2.5.
ptSl: A non-negative float that sets the width of the two
barriers.
A 0 value means that the respective horizontal barrier
(profit taking and/or stop loss) will be disabled.
t1: A pandas series with the timestamps of the vertical
barriers. We pass a False when we want to disable vertical
barriers.
trgt: A pandas series of targets, expressed in terms of absolute
returns.
minRet: The minimum target return required for running a triple
barrier search.
numThreads: The number of threads concurrently used by the function.
# returns
a pandas series of events
"""
# 1) get target
trgt = trgt.loc[tEvents]
trgt = trgt[trgt > minRet] # minRet
# 2) get t1 (max holding period)
if t1 is False:
t1 = pd.Series(pd.NaT, index=tEvents)
# 3) form events object, apply stop loss on t1
if side is None:
side_, ptSl_ = pd.Series(1., index=trgt.index), [ptSl[0], ptSl[0]]
else:
side_, ptSl_ = side.loc[trgt.index], ptSl[:2]
# events= pd.concat({'t1':t1,'trgt':trgt,'side':side_}, axis=1)
events = (pd.concat({'t1': t1, 'trgt': trgt, 'side': side_}, axis=1)
.dropna(subset=['trgt']))
df0 = mpPandasObj(func=applyPtSlOnT1,
pdObj=('molecule', events.index),
numThreads=numThreads,
close=close,
events=events,
ptSl=ptSl_)
events['t1'] = df0.dropna(how='all').min(axis=1) # pd.min ignores nan
if side is None:
events = events.drop('side', axis=1)
return events
def getTEvents(gRaw, h):
"""
From de Prado - Symmetrical CUSUM Filter
The CUSUM filter is a quality-control method, designed to detect a shift
in the mean value of a measured quantity away from a target value.
# args
gRaw : raw time series of closing prices (could be from tick, volume
or dollar bars)
h : a threshold value
# returns
a series of index timestamps
"""
tEvents, sPos, sNeg = [], 0, 0
diff = np.log(gRaw).diff().dropna()
for i in tqdm(diff.index[1:]):
try:
pos, neg = float(sPos+diff.loc[i]), float(sNeg+diff.loc[i])
except Exception as e:
print(e)
print(sPos+diff.loc[i], type(sPos+diff.loc[i]))
print(sNeg+diff.loc[i], type(sNeg+diff.loc[i]))
break
sPos, sNeg = max(0., pos), min(0., neg)
if sNeg < -h:
sNeg = 0
tEvents.append(i)
elif sPos > h:
sPos = 0
tEvents.append(i)
return pd.DatetimeIndex(tEvents)
def addVerticalBarrier(tEvents, close, numDays=1):
"""
From <NAME> - add a vertical barrier, t1
# args
tEvents : threshold events
close : series of close prices
numDays : number of days wide for the barrier
# returns
t1 : barrier timestamp
"""
t1 = close.index.searchsorted(tEvents+pd.Timedelta(days=numDays))
t1 = t1[t1 < close.shape[0]]
t1 = (pd.Series(close.index[t1], index=tEvents[:t1.shape[0]]))
return t1
def linParts(numAtoms, numThreads):
"""
# partition of atoms with a single loop
"""
parts = np.linspace(0, numAtoms, min(numThreads, numAtoms)+1)
parts = np.ceil(parts).astype(int)
return parts
def nestedParts(numAtoms, numThreads, upperTriang=False):
"""
# partition of atoms with an inner loop
"""
parts, numThreads_ = [0], min(numThreads, numAtoms)
for num in range(numThreads_):
part = 1+4*(parts[-1]**2+parts[-1]+numAtoms*(numAtoms+1.)/numThreads_)
part = (-1+part**.5)/2.
parts.append(part)
parts = np.round(parts).astype(int)
if upperTriang: # the first rows are heaviest
parts = np.cumsum(np.diff(parts)[::-1])
parts = np.append(np.array([0]), parts)
return parts
def processJobs_(jobs):
# Run jobs sequentially, for debugging
out = []
for job in jobs:
out_ = expandCall(job)
out.append(out_)
return out
def mpPandasObj(func, pdObj, numThreads=24, mpBatches=1, linMols=True,
**kargs):
"""
Parallelize jobs, return a dataframe or series
+ func: function to be parallelized. Returns a DataFrame
+ pdObj[0]: Name of argument used to pass the molecule
+ pdObj[1]: List of atoms that will be grouped into molecules
+ kwds: any other argument needed by func
Example: df1=mpPandasObj(func,('molecule',df0.index),24,**kwds)
"""
import pandas as pd
# if linMols:parts=linParts(len(argList[1]),numThreads*mpBatches)
# else:parts=nestedParts(len(argList[1]),numThreads*mpBatches)
if linMols:
parts = linParts(len(pdObj[1]), numThreads*mpBatches)
else:
parts = nestedParts(len(pdObj[1]), numThreads*mpBatches)
jobs = []
for i in range(1, len(parts)):
job = {pdObj[0]: pdObj[1][parts[i-1]:parts[i]], 'func': func}
job.update(kargs)
jobs.append(job)
if numThreads == 1:
out = processJobs_(jobs)
else:
out = processJobs(jobs, numThreads=numThreads)
if isinstance(out[0], pd.DataFrame):
df0 = pd.DataFrame()
elif isinstance(out[0], pd.Series):
df0 = pd.Series()
else:
return out
for i in out:
df0 = df0.append(i)
df0 = df0.sort_index()
return df0
def reportProgress(jobNum, numJobs, time0, task):
# Report progress as asynch jobs are completed
msg = [float(jobNum)/numJobs, (time.time()-time0)/60.]
msg.append(msg[1]*(1/msg[0]-1))
timeStamp = str(dt.datetime.fromtimestamp(time.time()))
msg = timeStamp+' '+str(round(msg[0]*100, 2))+'% '+task+' done after ' \
+ str(round(msg[1], 2))+' minutes. Remaining ' \
+ str(round(msg[2], 2))+' minutes.'
if jobNum < numJobs:
sys.stderr.write(msg+'\r')
else:
sys.stderr.write(msg+'\n')
return
def processJobs(jobs, task=None, numThreads=24):
"""
Run in parallel.
jobs must contain a 'func' callback, for expandCall
"""
if task is None:
task = jobs[0]['func'].__name__
pool = mp.Pool(processes=numThreads)
outputs, out, time0 = pool.imap_unordered(expandCall,
jobs), [], time.time()
# Process asyn output, report progress
for i, out_ in enumerate(outputs, 1):
out.append(out_)
reportProgress(i, len(jobs), time0, task)
pool.close()
pool.join() # this is needed to prevent memory leaks
return out
def expandCall(kargs):
"""
Expand the arguments of a callback function, kargs['func']
"""
func = kargs['func']
del kargs['func']
out = func(**kargs)
return out
def getBins(events, close):
"""
Compute event's outcome (including side information, if provided).
events is a DataFrame where:
-events.index is event's starttime
-events['t1'] is event's endtime
-events['trgt'] is event's target
-events['side'] (optional) implies the algo's position side
Case 1: ('side' not in events): bin in (-1,1) <-label by price action
Case 2: ('side' in events): bin in (0,1) <-label by pnl (meta-labeling)
"""
# 1) prices aligned with events
events_ = events.dropna(subset=['t1'])
px = events_.index.union(events_['t1'].values).drop_duplicates()
px = close.reindex(px, method='bfill')
# 2) create out object
out = pd.DataFrame(index=events_.index)
out['ret'] = px.loc[events_['t1'].values].values/px.loc[events_.index]-1
if 'side' in events_:
out['ret'] *= events_['side'] # meta-labeling
out['bin'] = np.sign(out['ret'])
if 'side' in events_:
out.loc[out['ret'] <= 0, 'bin'] = 0 # meta-labeling
return out
def dropLabels(events, minPct=.05):
"""
# apply weights, drop labels with insufficient examples
"""
while True:
df0 = events['bin'].value_counts(normalize=True)
if df0.min() > minPct or df0.shape[0] < 3:
break
print('dropped label: ', df0.argmin(), df0.min())
events = events[events['bin'] != df0.argmin()]
return events
| StarcoderdataPython |
1644877 | # Copyright 2008 Divmod, Inc. See LICENSE file for details
# -*- test-case-name: xmantissa.test.test_webapp,xmantissa.test.test_publicweb,xmantissa.test.test_website -*-
"""
This unfortunate module exists to contain code that would create an ugly
dependency loop if it were somewhere else.
"""
from zope.interface import implements
from twisted.cred.portal import IRealm
from epsilon.structlike import record
from axiom.userbase import getDomainNames
from nevow import athena
from nevow.rend import NotFound
from nevow.inevow import IResource, IRequest
from xmantissa.ixmantissa import (IWebViewer, INavigableFragment,
ISiteRootPlugin)
from xmantissa.websharing import UserIndexPage
from xmantissa.error import CouldNotLoadFromThemes
class WebViewerHelper(object):
"""
This is a mixin for the common logic in the two providers of
L{IWebViewer} included with Mantissa,
L{xmantissa.publicweb._AnonymousWebViewer} and
L{xmantissa.webapp._AuthenticatedWebViewer}.
@ivar _getDocFactory: a 1-arg callable which returns a nevow loader.
@ivar _preferredThemes: a 0-arg callable which returns a list of nevow
themes.
"""
def __init__(self, _getDocFactory, _preferredThemes):
"""
"""
self._getDocFactory = _getDocFactory
self._preferredThemes = _preferredThemes
def _wrapNavFrag(self, fragment, useAthena):
"""
Subclasses must implement this to wrap a fragment.
@param fragment: an L{INavigableFragment} provider that should be
wrapped in the resulting page.
@param useAthena: Whether the resulting L{IResource} should be a
L{LivePage}.
@type useAthena: L{bool}
@return: a fragment to display to the user.
@rtype: L{IResource}
"""
def wrapModel(self, model):
"""
Converts application-provided model objects to L{IResource} providers.
"""
res = IResource(model, None)
if res is None:
frag = INavigableFragment(model)
fragmentName = getattr(frag, 'fragmentName', None)
if fragmentName is not None:
fragDocFactory = self._getDocFactory(fragmentName)
if fragDocFactory is not None:
frag.docFactory = fragDocFactory
if frag.docFactory is None:
raise CouldNotLoadFromThemes(frag, self._preferredThemes())
useAthena = isinstance(frag, (athena.LiveFragment, athena.LiveElement))
return self._wrapNavFrag(frag, useAthena)
else:
return res
class MantissaViewHelper(object):
"""
This is the superclass of all Mantissa resources which act as a wrapper
around an L{INavigableFragment} provider. This must be mixed in to some
hierarchy with a C{locateChild} method, since it expects to cooperate in
such a hierarchy.
Due to infelicities in the implementation of some (pre-existing)
subclasses, there is no __init__; but subclasses must set the 'fragment'
attribute in theirs.
"""
fragment = None
def locateChild(self, ctx, segments):
"""
Attempt to locate the child via the '.fragment' attribute, then fall
back to normal locateChild behavior.
"""
if self.fragment is not None:
# There are still a bunch of bogus subclasses of this class, which
# are used in a variety of distasteful ways. 'fragment' *should*
# always be set to something that isn't None, but there's no way to
# make sure that it will be for the moment. Every effort should be
# made to reduce public use of subclasses of this class (instead
# preferring to wrap content objects with
# IWebViewer.wrapModel()), so that the above check can be
# removed. -glyph
lc = getattr(self.fragment, 'locateChild', None)
if lc is not None:
x = lc(ctx, segments)
if x is not NotFound:
return x
return super(MantissaViewHelper, self).locateChild(ctx, segments)
class SiteRootMixin(object):
"""
Common functionality for L{AnonymousSite} and L{WebSite}.
"""
def locateChild(self, context, segments):
"""
Return a statically defined child or a child defined by a site root
plugin or an avatar from guard.
"""
request = IRequest(context)
webViewer = IWebViewer(self.store, None)
childAndSegments = self.siteProduceResource(request, segments, webViewer)
if childAndSegments is not None:
return childAndSegments
return NotFound
# IMantissaSite
def siteProduceResource(self, req, segments, webViewer):
"""
Retrieve a child resource and segments from rootChild_ methods on this
object and SiteRootPlugins.
@return: a 2-tuple of (resource, segments), suitable for return from
locateChild.
@param req: an L{IRequest} provider.
@param segments: a tuple of L{str}s, the segments from the request.
@param webViewer: an L{IWebViewer}, to be propagated through the child
lookup process.
"""
# rootChild_* is not the same as child_, because its signature is
# different. Maybe this should be done some other way.
shortcut = getattr(self, 'rootChild_' + segments[0], None)
if shortcut:
res = shortcut(req, webViewer)
if res is not None:
return res, segments[1:]
for plg in self.store.powerupsFor(ISiteRootPlugin):
produceResource = getattr(plg, 'produceResource', None)
if produceResource is not None:
childAndSegments = produceResource(req, segments, webViewer)
else:
childAndSegments = plg.resourceFactory(segments)
if childAndSegments is not None:
return childAndSegments
return None
# IPowerupIndirector
def indirect(self, interface):
"""
Create a L{VirtualHostWrapper} so it can have the first chance to
handle web requests.
"""
if interface is IResource:
siteStore = self.store.parent
if self.store.parent is None:
siteStore = self.store
return VirtualHostWrapper(
siteStore,
IWebViewer(self.store),
self)
return self
class VirtualHostWrapper(record('siteStore webViewer wrapped')):
"""
Resource wrapper which implements per-user virtual subdomains. This should
be wrapped around any resource which sits at the root of the hierarchy. It
will examine requests for their hostname and, when appropriate, redirect
handling of the query to the appropriate sharing resource.
@type siteStore: L{Store}
@ivar siteStore: The site store which will be queried to determine which
hostnames are associated with this server.
@type webViewer: L{IWebViewer}
@ivar webViewer: The web viewer representing the user.
@type wrapped: L{IResource} provider
@ivar wrapped: A resource to which traversal will be delegated if the
request is not for a user subdomain.
"""
implements(IResource)
def subdomain(self, hostname):
"""
Determine of which known domain the given hostname is a subdomain.
@return: A two-tuple giving the subdomain part and the domain part or
C{None} if the domain is not a subdomain of any known domain.
"""
hostname = hostname.split(":")[0]
for domain in getDomainNames(self.siteStore):
if hostname.endswith("." + domain):
username = hostname[:-len(domain) - 1]
if username != "www":
return username, domain
return None
def locateChild(self, context, segments):
"""
Delegate dispatch to a sharing resource if the request is for a user
subdomain, otherwise fall back to the wrapped resource's C{locateChild}
implementation.
"""
request = IRequest(context)
hostname = request.getHeader('host')
info = self.subdomain(hostname)
if info is not None:
username, domain = info
index = UserIndexPage(IRealm(self.siteStore),
self.webViewer)
resource = index.locateChild(None, [username])[0]
return resource, segments
return self.wrapped.locateChild(context, segments)
| StarcoderdataPython |
1786671 | <reponame>patrick-finke/mecs<filename>mecs.py<gh_stars>1-10
"""An implementation of the Entity Component System (ECS) paradigm."""
from itertools import repeat as _repeat
__version__ = '1.2.1'
class CommandBuffer():
"""A buffer that stores commands and plays them back later.
*New in version 1.1.*
"""
def __init__(self, scene):
"""Associate the buffer with the provided scene."""
self.scene = scene
self.commands = []
self.lasteid = 0
self.eidmap = {}
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.flush()
def new(self, *comps):
"""Returns an entity id that is only valid to use with the current buffer. If one or more components are supplied to the method, these will be added to the new entity.
*New in version 1.2.*
"""
self.lasteid -= 1
self.commands.append((self.scene.new, (self.lasteid, *comps,)))
return self.lasteid
def add(self, eid, *comps):
"""Add a component to an entity. The component will not be added immediately, but when the buffer is flushed. In particular, exceptions do not occur when calling this method, but only when the buffer is flushed.
*Changed in version 1.2:* Added support for multiple components.
*Deprecated since version 1.2:* Use *set()* instead.
"""
self.commands.append((self.scene.add, (eid, *comps)))
def set(self, eid, *comps):
"""Set components of an entity. The componentes will not be set immediately, but when the buffer is flushed. In particular, exception do not ossur when calling this method, but only when the buffer if flushed.
*New in version 1.2.*
"""
self.commands.append((self.scene.set, (eid, *comps)))
def remove(self, eid, *comptypes):
"""Remove a component from an entity. The component will not be removed immediately, but when the buffer is flushed. In particular, exceptions do not occur when calling this method, but only when the buffer is flushed.
*Changed in version 1.2:* Added support for multiple component types.
"""
self.commands.append((self.scene.remove, (eid, *comptypes)))
def free(self, eid):
"""Remove all components of an entity. The components will not be removed immediately, but when the buffer if flushed. In particular, exceptions do not occur when calling this method, but only when the buffer is flushed."""
self.commands.append((self.scene.free, (eid,)))
def flush(self):
"""Flush the buffer. This will apply all commands that have been previously stored in the buffer to its associated scene. If any arguments in these commands are faulty, exceptions may arrise."""
for cmd, args in self.commands:
if cmd == self.scene.new:
eid, *comps = args
realeid = self.scene.new(*comps)
self.eidmap[eid] = realeid
else:
eid, *other = args
if eid < 0: eid = self.eidmap[eid]
cmd(eid, *other)
self.commands.clear()
class Scene():
"""A scene of entities that allows for efficient component management."""
def __init__(self):
self.entitymap = {} # {eid: (archetype, index)}
self.archetypemap = {} # {component type: set(archetype)}
self.chunkmap = {} # {archetype: ([eid], {component type: [component]})}
self.lasteid = -1 # the last valid entity id
def _removeEntity(self, eid):
"""Internal method to remove an entity. The entity id must be valid and in entitymap, i.e. the entity must have at least one component."""
archetype, index = self.entitymap[eid]
eidlist, comptypemap = self.chunkmap[archetype]
# remove the entity by swapping it with another entity, or ...
if len(eidlist) > 1:
swapid = eidlist[-1]
if swapid != eid: # do not replace with self
self.entitymap[swapid] = (archetype, index)
eidlist[index] = swapid
for complist in comptypemap.values():
swapcomp = complist[-1]
complist[index] = swapcomp
# remove swaped entity
eidlist.pop()
for complist in comptypemap.values():
complist.pop()
else: # ... if the archetype container will be empty after this, remove it
for ct in archetype:
self.archetypemap[ct].remove(archetype)
if not self.archetypemap[ct]:
del self.archetypemap[ct]
del self.chunkmap[archetype]
del self.entitymap[eid]
def _addEntity(self, eid, compdict):
"""Internal method to add an entity. The entity id must be valid and the component list must be non-empty. Also, there must be a maximum of one component of each type."""
archetype = frozenset(compdict.keys())
if archetype in self.chunkmap: # collect unique instance from cache, if possible
archetype = next(iter(x for x in self.chunkmap if x == archetype))
# if there is no container for the new archetype, create one
if archetype not in self.chunkmap:
# add to chunkmap
self.chunkmap[archetype] = ([], {ct: [] for ct in archetype})
# add to archetypemap
for ct in archetype:
if ct not in self.archetypemap:
self.archetypemap[ct] = set()
self.archetypemap[ct].add(archetype)
# add the entity and components to the archetype container
eidlist, comptypemap = self.chunkmap[archetype]
eidlist.append(eid)
for ct, c in compdict.items():
comptypemap[ct].append(c)
# make reference to entity in entitymap
index = len(eidlist) - 1
self.entitymap[eid] = (archetype, index)
def buffer(self):
"""Return a new command buffer that is associated to this scene.
*New in version 1.1.*
*Deprecated since version 1.2:* Use *CommandBuffer(scene)* instead.
"""
return CommandBuffer(self)
def new(self, *comps):
"""Returns a valid and previously unused entity id. If one or more components are supplied to the method, these will be added to the new entity. Raises *ValueError* if trying to add duplicate component types.
*Changed in version 1.2:* Added the optional *comps* parameter.
"""
# increment valid entity id
self.lasteid += 1
# add components
if comps:
compdict = {type(c): c for c in comps}
# raise ValueError on trying to add duplicate component types
if len(compdict) < len(comps):
comptypes = [type(comp) for comp in comps]
raise ValueError(f"adding duplicate component type(s): {', '.join(str(ct) for ct in comptypes if comptypes.count(ct) > 1)}")
self._addEntity(self.lasteid, compdict)
return self.lasteid
def free(self, eid):
"""Remove all components of an entity. The entity id will not be invalidated by this operation. Returns a list of the components. Raises *KeyError* if the entity id is not valid."""
# raise KeyError on invalid entity id
if eid < 0 or eid > self.lasteid:
raise KeyError(f"invalid entity id: {eid}")
# unpack entity
try:
archetype, index = self.entitymap[eid]
_, comptypemap = self.chunkmap[archetype]
except KeyError: # eid not in self.entitymap
return []
# collect the components and remove the entity
components = [comptypemap[comptype][index] for comptype in comptypemap]
self._removeEntity(eid)
return components
def components(self, eid):
"""Returns a tuple of all components of an entity. Raises *KeyError* if the entity id is not valid."""
# raise KeyError on invalid entity id
if eid < 0 or eid > self.lasteid:
raise KeyError(f"invalid entity id: {eid}")
# unpack entity
try:
archetype, index = self.entitymap[eid]
_, comptypemap = self.chunkmap[archetype]
except KeyError: # eid not in self.entitymap
return ()
return tuple(comptypemap[comptype][index] for comptype in comptypemap)
def archetype(self, eid):
"""Returns the archetype of an entity. Raises *KeyError* if the entity id is not valid."""
# raise KeyError on invalid entity id
if eid < 0 or eid > self.lasteid:
raise KeyError(f"invalid entity id: {eid}")
# unpack entity
try:
archetype, _ = self.entitymap[eid]
except KeyError: # eid not in self.entitymap
return ()
return tuple(archetype)
def add(self, eid, *comps):
"""Add components to an entity. Returns the component(s) as a list if two or more components are given, or a single component instance if only one component is given. Raises *KeyError* if the entity id is not valid or *ValueError* if the entity would have one or more components of the same type after this operation or no components are supplied to the method.
*Changed in version 1.2:* Added support for multiple components.
*Deprecated since version 1.2:* Use *set()* instead.
"""
# raise KeyError on invalid entity id
if eid < 0 or eid > self.lasteid:
raise KeyError(f"invalid entity id: {eid}")
# raise ValueError if no component are given
if not comps:
raise ValueError("missing input")
# raise ValueError if trying to add duplicate component types
if len(set(type(comp) for comp in comps)) < len(comps):
comptypes = [type(comp) for comp in comps]
raise ValueError(f"adding duplicate component type(s): {', '.join(str(ct) for ct in comptypes if comptypes.count(ct) > 1)}")
complist = list(comps)
if eid in self.entitymap:
archetype, index = self.entitymap[eid]
_, comptypemap = self.chunkmap[archetype]
# raise ValueError if trying to add component types that are already present
if any(type(comp) in comptypemap for comp in comps):
raise ValueError(f"component type(s) already present: {', '.join(str(type(comp)) for comp in comps if type(comp) in comptypemap)}")
# collect old components and remove the entity
complist.extend(comptypemap[comptype][index] for comptype in comptypemap)
self._removeEntity(eid)
compdict = {type(c): c for c in complist}
self._addEntity(eid, compdict)
if len(comps) == 1:
return comps[0]
else:
return list(comps)
def set(self, eid, *comps):
"""Set components of an entity. Raises *KeyError* if the entity id is not valid or *ValueError* if trying to set two or more components of the same type simultaneously.
*New in version 1.2.*
"""
# raise KeyError on invalid entity id
if eid < 0 or eid > self.lasteid:
raise KeyError(f"invalid entity id: {eid}")
# skip if no components are given
if not comps:
return
# sort components by type
compdict = {type(comp): comp for comp in comps}
# raise ValueError if trying to set duplicate component types
if len(compdict) < len(comps):
comptypes = list(compdict.keys())
raise ValueError(f"duplicate component type(s): {', '.join(str(ct) for ct in comptypes if comptypes.count(ct) > 1)}")
# Modify entity if already presend, else ...
if eid in self.entitymap:
archetype, index = self.entitymap[eid]
_, comptypemap = self.chunkmap[archetype]
oldcompdict = {ct: comptypemap[ct][index] for ct in comptypemap}
# If possible update components directly, else ...
if compdict.keys() <= oldcompdict.keys():
for ct, c in compdict.items():
comptypemap[ct][index] = c
else: # ... move entity in into another chunk.
newcompdict = {**oldcompdict, **compdict}
self._removeEntity(eid)
self._addEntity(eid, newcompdict)
else: # ... add entity.
self._addEntity(eid, compdict)
def has(self, eid, *comptypes):
"""Return *True* if the entity has a component of each of the given types, *False* otherwise. Raises *KeyError* if the entity id is not valid or *ValueError* if no component type is supplied to the method.
*Changed in version 1.2:* Added support for multiple component types.
"""
# raise KeyError on invalid entity id
if eid < 0 or eid > self.lasteid:
raise KeyError(f"invalid entity id: {eid}")
# raise ValueError if no component types are given
if not comptypes:
raise ValueError("missing input")
# unpack entity
try:
archetype, _ = self.entitymap[eid]
_, comptypemap = self.chunkmap[archetype]
except KeyError: # eid not in self.entitymap
return False
return all(ct in comptypemap for ct in comptypes)
def collect(self, eid, *comptypes):
"""Collect multiple components of an entity. Returns a list of the components. Raises *KeyError* if the entity id is not valid or *ValueError* if a component of any of the requested types is missing.
*New in version 1.2.*
"""
# raise KeyError on invalid entity id
if eid < 0 or eid > self.lasteid:
raise KeyError(f"invalid entity id: {eid}")
# return empty list if no components are requested
if not comptypes:
return []
# unpack entity
try:
archetype, index = self.entitymap[eid]
_, comptypemap = self.chunkmap[archetype]
except KeyError: # eid not in self.entitymap
raise ValueError(f"missing component type(s): {', '.join(str(ct) for ct in comptypes)}")
# collect and return components
try:
return [comptypemap[ct][index] for ct in comptypes]
except KeyError: # ct not in comptypemap
raise ValueError(f"missing component type(s): {', '.join(str(ct) for ct in comptypes if ct not in comptypemap)}")
def get(self, eid, comptype):
"""Get one component of an entity. Returns the component. Raises *KeyError* if the entity id is not valid or *ValueError* if the entity does not have a component of the requested type."""
# raise KeyError on invalid entity id
if eid < 0 or eid > self.lasteid:
raise KeyError(f"invalid entity id: {eid}")
# unpack entity
try:
archetype, index = self.entitymap[eid]
_, comptypemap = self.chunkmap[archetype]
except KeyError: # eid not in self.entitymap
raise ValueError(f"missing component type: {str(comptype)}")
# collect and return component
try:
return comptypemap[comptype][index]
except KeyError: # comptype not in comptypemap
raise ValueError(f"missing component type: {str(comptype)}")
def remove(self, eid, *comptypes):
"""Remove components from an entity. Returns a list of the components if two or more component types are given, or a single component instance if only one component type is given. Raises *KeyError* if the entity id is not valid or *ValueError* if the entity does not have a component of any of the given types or if no component types are supplied to the method.
*Changed in version 1.2:* Added support for multiple component types.
"""
# raise KeyError on invalid entity id
if eid < 0 or eid > self.lasteid:
raise KeyError(f"invalid entity id: {eid}")
# raise ValueError if no component types are given
if not comptypes:
raise ValueError("missing input")
# unpack entity
try:
archetype, index = self.entitymap[eid]
_, comptypemap = self.chunkmap[archetype]
except KeyError: # eid not in self.entitymap
raise ValueError(f"missing component type(s): {', '.join(str(ct) for ct in comptypes)}")
# raise ValueError if the entity does not have the requested component types
if not all(ct in comptypemap for ct in comptypes):
raise ValueError(f"missing component type(s): {', '.join(str(ct) for ct in comptypes if ct not in comptypemap)}")
# collect components that will remain on the entity and the ones to be removed
compdict = {ct: comptypemap[ct][index] for ct in comptypemap if ct not in comptypes}
removed = list(comptypemap[ct][index] for ct in comptypes)
# remove the entity and add it back if there are remaining components
self._removeEntity(eid)
if compdict:
self._addEntity(eid, compdict)
if len(removed) == 1:
return removed[0]
else:
return removed
def start(self, *systems, **kwargs):
"""Initialize the scene. All systems must implement an `onStart(scene, **kwargs)` method where this scene instance will be passed as the first argument and the `kwargs` of this method will also be passed on. The systems will be called in the same order they are supplied to this method."""
for system in systems:
system.onStart(self, **kwargs)
def update(self, *systems, **kwargs):
"""Update the scene. All systems must implement an `onUpdate(scene, **kwargs)` method where this scene instance will be passed as the first argument and the `kwargs` of this method will also be passed on. The systems will be called in the same order they are supplied to this method."""
for system in systems:
system.onUpdate(self, **kwargs)
def stop(self, *systems, **kwargs):
"""Clean up the scene. All systems must implement an 'onStop(scene, **kwargs)' method where this scene instance will be passed as the first argument and the `kwargs` of this method will also be passed on. The systems will be called in the same order they are supplied to this method."""
for system in systems:
system.onStop(self, **kwargs)
def select(self, *comptypes, exclude=None):
"""Iterate over entity ids and their corresponding components. Yields tuples of the form `(eid, (compA, compB, ...))` where `compA`, `compB`, ... are of the given component types and belong to the entity with entity id eid. If no component types are given, iterate over all entities. If *exclude* is not *None*, entities with component types listed in *exclude* will not be considered. Raises *ValueError* if *exclude* contains component types that are also explicitly included."""
# raise ValueError if trying to exclude component types that are also included
if exclude and any(ct in exclude for ct in comptypes):
raise ValueError(f"excluding explicitely included component types: {', '.join(str(x) for x in set(comptypes).intersection(exclude))}")
# collect archetypes that should be included and archetypes that should be excluded
incarchetypes = set.intersection(*[self.archetypemap.get(ct, set()) for ct in comptypes]) if comptypes else set(self.chunkmap.keys())
excarchetypes = set.union(*[self.archetypemap.get(ct, set()) for ct in exclude]) if exclude else set()
# iterate over all included archetype that are not excluded
# the iteration is reversed, because this will yield better performance when calling e.g. scene.remove() on the result.
archetypes = incarchetypes - excarchetypes
if comptypes:
for archetype in archetypes:
eidlist, comptypemap = self.chunkmap[archetype]
complists = [reversed(comptypemap[ct]) for ct in comptypes]
yield from zip(reversed(eidlist), zip(*complists))
else:
for archetype in archetypes:
eidlist, _ = self.chunkmap[archetype]
yield from zip(reversed(eidlist), _repeat(()))
| StarcoderdataPython |
3319849 | # -*- coding: utf-8 -*-
"""
Created on Thu Sep 5 23:33:06 2019
@author: toothsmile,CQU
@email: <EMAIL>
"""
import sys,getopt
import os
def mkdir(path):
# 去除首位空格
path=path.strip()
# 去除尾部 \ 符号
path=path.rstrip("\\")
# 判断路径是否存在
# 存在 True
# 不存在 False
isExists=os.path.exists(path)
# 判断结果
if not isExists:
# 如果不存在则创建目录
# 创建目录操作函数
os.makedirs(path)
print path+' 创建成功'
return True
else:
# 如果目录存在则不创建,并提示目录已存在
print path+' 目录已存在'
return False
def readfile(fname):
with open(fname) as textdata:
lines=textdata.readlines()
return lines
def outfile(fname,line):
with open(fname,"a")as wf:
wf.write(line)
argv= sys.argv[1:]
inputfile = ''
outputfile = ''
try:
opts, args = getopt.getopt(argv,"hi:o:",["ifile=","ofile="])
except getopt.GetoptError:
print 'csv2txt_file.py -i <inputfilehead> -o <outputfilehead>'
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print 'csv2txt_file.py -i <inputfilehead> -o <outputfilehead>'
sys.exit()
elif opt in ("-i", "--ifile"):
inputfile = arg
elif opt in ("-o", "--ofile"):
outputfile = arg
print '输入的文件夹+文件名前缀为:', inputfile
print '输出的文件夹为:', outputfile
mkdir(outputfile)
#处理ulog2csv生成的文件作为ecl_offline的输入
#主要转换gps,imu,mag,vision数据
#ecl的输入要求(1)是数据之间不能是,需要是空格(2)去掉首行(3)处理时间:必须处理成imu的时间最早(由于ecl_offline中的数据输入设计)
#file_head="03_19_48_vision_rtk_fuse"
file_head=inputfile
imu_file=file_head+"_sensor_combined_0.csv"
gps_file=file_head+"_vehicle_gps_position_0.csv"
air_file=file_head+"_vehicle_air_data_0.csv"
mag_file=file_head+"_vehicle_magnetometer_0.csv"
vision_pos_file=file_head+"_vehicle_vision_position_0.csv"
vision_att_file=file_head+"_vehicle_vision_attitude_0.csv"
#outfile_head=''
outfile_head=outputfile
out_imu=outfile_head+'imu.txt'
out_gps=outfile_head+'gps.txt'
out_mag=outfile_head+'mag.txt'
out_vision_pos=outfile_head+'vision_pos.txt'
out_vision_att=outfile_head+"vision_att.txt"
out_air=outfile_head+"baro.txt"
#handle imu
lines=[]
lines=readfile(imu_file)
imu_time_first=float(lines[1].split(',')[0])
for line in lines:
strLine=line.split(',')
if(strLine[0]=="timestamp"):
continue
line2str=''
line2str+=(strLine[0])
for str_data in strLine:
if(str_data==strLine[0]):
continue
line2str+=(' ')
line2str+=(str_data)
outfile(out_imu,line2str)
#handle gps data
lines=readfile(gps_file)
for line in lines:
strLine=line.split(',')
if(strLine[0]=="timestamp"):
continue
if(float(strLine[0])<imu_time_first):
continue
line2str=''
line2str+=(strLine[0])
for str_data in strLine:
if(str_data==strLine[0]):
continue
line2str+=(' ')
line2str+=(str_data)
outfile(out_gps,line2str)
#handle mag data
lines=readfile(mag_file)
for line in lines:
strLine=line.split(',')
if(strLine[0]=="timestamp"):
continue
#print(strLine[0])
if(float(strLine[0])<imu_time_first):
continue
line2str=''
line2str+=(strLine[0])
for str_data in strLine:
if(str_data==strLine[0]):
continue
line2str+=(' ')
line2str+=(str_data)
outfile(out_mag,line2str)
#handle vision data
lines=readfile(vision_pos_file)
for line in lines:
strLine=line.split(',')
if(strLine[0]=="timestamp"):
continue
if(float(strLine[0])<imu_time_first):
continue
line2str=''
line2str+=(strLine[0])
for str_data in strLine:
if(str_data==strLine[0]):
continue
line2str+=(' ')
line2str+=(str_data)
outfile(out_vision_pos,line2str)
lines=readfile(vision_att_file)
for line in lines:
strLine=line.split(',')
if(strLine[0]=="timestamp"):
continue
if(float(strLine[0])<imu_time_first):
continue
line2str=''
line2str+=(strLine[0])
for str_data in strLine:
if(str_data==strLine[0]):
continue
line2str+=(' ')
line2str+=(str_data)
outfile(out_vision_att,line2str)
#handle baro data
lines=readfile(air_file)
for line in lines:
strLine=line.split(',')
if(strLine[0]=="timestamp"):
continue
if(float(strLine[0])<imu_time_first):
continue
line2str=''
line2str+=(strLine[0])
for str_data in strLine:
if(str_data==strLine[0]):
continue
line2str+=(' ')
line2str+=(str_data)
outfile(out_air,line2str)
| StarcoderdataPython |
1727485 | """
Django settings for demo_backend project.
Generated by 'django-admin startproject' using Django 2.1.4.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
# 注意,因为 settings 被放到文件夹了,所以我们需要获取上上级目录(也就是项目的根目录,比正常情况下要多写一层)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '<KEY>'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
# 设置白名单
# 正式上线需要,dev 时被 develop.py 中的 ['*'] 代替
ALLOWED_HOSTS = ['naivegenerator.com']
# DB
DB_NAME = os.environ.get('DB_NAME')
DB_HOST = os.environ.get('DB_HOST')
DB_PORT = os.environ.get('DB_PORT')
DB_USER = os.environ.get('DB_USER')
DB_PASSWORD = os.environ.get('DB_PASSWORD')
REDIS = os.environ.get('REDIS')
# Celery settings
CELERY_BROKER_URL = REDIS
# 使用 django 的 db 和 cache
CELERY_RESULT_BACKEND = 'django-db'
CELERY_CACHE_BACKEND = 'django-cache'
CELERY_ACCEPT_CONTENT = ['json']
CELERY_TASK_SERIALIZER = 'json'
# TF Models related
# 可以通过在这里配置模型文件覆盖掉 app 里的
# 这里的目录可以映射出去,这样当模型很大时 docker image 不至于需要把模型也装进去
# TFMODEL_CHECKPOINTDIR = os.path.join(BASE_DIR, 'new_modeldir', 'ckpt_xx')
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'django_celery_results',
'corsheaders',
'text_generator'
]
# 设置为需要登录才能访问(AllowAny 是任何人可以直接访问)
REST_FRAMEWORK = {
'DEFAULT_PERMISSION_CLASSES': [
'rest_framework.permissions.IsAuthenticated', # AllowAny
]
}
CORS_ORIGIN_WHITELIST = (
'naivegenerator.com',
'localhost:3000'
)
MIDDLEWARE = [
'corsheaders.middleware.CorsMiddleware', # new
'django.middleware.common.CommonMiddleware', # new
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'demo_backend.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'demo_backend.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
# DATABASES = {
# 'default': {
# 'ENGINE': 'django.db.backends.sqlite3',
# 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
# }
# }
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': DB_NAME,
'USER': DB_USER,
'PASSWORD': <PASSWORD>,
'HOST': DB_HOST, # or use db, which is the name in your docker-compose.yml
'PORT': DB_PORT,
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, "static")
| StarcoderdataPython |
4826348 | <reponame>alueschow/srupy
"""Utility classes and functions."""
import re
from collections import defaultdict
def get_namespace(element):
"""Return the namespace of an XML element.
:param element: An XML element.
"""
return re.search('({.*})', element.tag).group(1)
# https://stackoverflow.com/a/10076823
def etree_to_dict_without_ns(t):
"""Docstring."""
tag = re.sub(r'\{.*\}', '', t.tag)
d = {tag: {} if t.attrib else None}
children = list(t)
if children:
dd = defaultdict(list)
for dc in map(etree_to_dict_without_ns, children):
for k, v in dc.items():
dd[re.sub(r'\{.*\}', '', k)].append(v)
d = {tag: {re.sub(r'\{.*\}', '', k): v[0] if len(v) == 1 else v
for k, v in dd.items()}}
if t.attrib:
d[tag].update(('@' + k, v)
for k, v in t.attrib.items())
if t.text:
text = t.text.strip()
if children or t.attrib:
if text:
d[tag]['_text'] = text
else:
d[tag] = text
return d
def etree_to_dict(t):
"""Docstring."""
d = {t.tag: {} if t.attrib else None}
children = list(t)
if children:
dd = defaultdict(list)
for dc in map(etree_to_dict, children):
for k, v in dc.items():
dd[k].append(v)
d = {t.tag: {k: v[0] if len(v) == 1 else v
for k, v in dd.items()}}
if t.attrib:
d[t.tag].update(('@' + k, v)
for k, v in t.attrib.items())
if t.text:
text = t.text.strip()
if children or t.attrib:
if text:
d[t.tag]['_text'] = text
else:
d[t.tag] = text
return d
| StarcoderdataPython |
99057 | <reponame>HawxChen/barrelfishOS
from arm_ds.debugger_v1 import Debugger
from arm_ds.debugger_v1 import DebugException
import os
# The CPU driver is linked at this address
LINKADDRESS = 0
debugger = Debugger()
ec = debugger.getCurrentExecutionContext()
es = ec.getExecutionService()
# Run until the end of molly, to discover where the kernel has been loaded.
# XXX - this is fragile, and should be replaced with a symbol.
es.resumeTo('molly_init32.c', 108)
es.waitForStop()
# The old execution context became invalid when we resumed.
ec = debugger.getCurrentExecutionContext()
# Get the CPU driver's final load address.
vs = ec.getVariableService()
kernel_start = vs.readValue('kernel_start')
offset = int(kernel_start) - LINKADDRESS
print "Kernel loaded at: %08x" % int(kernel_start), " linked at %08x" % LINKADDRESS, " offset %08x" % offset
# Replace the molly symbols with the kernel symbols
im= ec.getImageService()
im.loadSymbols('Barrelfish/armv7/sbin/cpu_a9ve', offset)
# Finally, advance to arch_init()
es.resumeTo('arch_init')
| StarcoderdataPython |
3392794 | # -*- coding: utf-8 -*-
"""API model for working with system configuration."""
import math
from ..mixins import ChildMixins, Model
class Meta(ChildMixins):
"""Child API model for working with instance metadata."""
def about(self) -> dict:
"""Get about page metadata.
Returns:
:obj:`dict`: about page metadata
"""
if not hasattr(self, "_about_data"):
data = self._about()
data["Version"] = self._get_version(about=data)
self._about_data = data
return self._about_data
def historical_sizes(self) -> dict:
"""Get disk usage metadata.
Returns:
:obj:`dict`: disk usage metadata
"""
return parse_sizes(self._historical_sizes())
def _get_version(self, about: dict) -> str:
"""Pass."""
version = about.pop("Version", "") or about.pop("Installed Version", "")
version = version.replace("_", ".")
return version
@property
def version(self) -> str:
"""Get the version of Axonius."""
about = self.about()
return about["Version"]
def _init(self, parent: Model):
"""Post init method for subclasses to use for extra setup.
Args:
parent (:obj:`.api.mixins.Model`): parent API model of this child
"""
super(Meta, self)._init(parent=parent)
def _about(self) -> dict:
"""Direct API method to get the About page.
Returns:
:obj:`dict`: about page metadata
"""
path = self.router.meta_about
return self.request(method="get", path=path)
def _historical_sizes(self) -> dict:
"""Direct API method to get the metadata about disk usage.
Returns:
:obj:`dict`: disk usage metadata
"""
path = self.router.meta_historical_sizes
return self.request(method="get", path=path)
def parse_sizes(raw: dict) -> dict:
"""Pass."""
parsed = {}
parsed["disk_free_mb"] = math.floor(raw["disk_free"] / 1024 / 1024)
parsed["disk_used_mb"] = math.ceil(raw["disk_used"] / 1024 / 1024)
parsed["historical_sizes_devices"] = raw["entity_sizes"].get("Devices", {})
parsed["historical_sizes_users"] = raw["entity_sizes"].get("Users", {})
return parsed
| StarcoderdataPython |
1671876 | from typing import Union, Dict, NamedTuple
FormattingRule = Union[None, Dict, bool]
FormattingResult = NamedTuple(
"FormattingResult",
[("text", str), ("dumping_config", dict), ("loading_config", dict)],
)
| StarcoderdataPython |
4806996 | <reponame>marsven/conan-center-index<gh_stars>1-10
from conans import ConanFile, CMake, tools
from conans.errors import ConanInvalidConfiguration
import os
class FoxgloveWebSocketConan(ConanFile):
name = "foxglove-websocket"
url = "https://github.com/conan-io/conan-center-index"
homepage = "https://github.com/foxglove/ws-protocol"
description = "A C++ server implementation of the Foxglove WebSocket Protocol"
license = "MIT"
topics = ("foxglove", "websocket")
settings = ("os", "compiler", "build_type", "arch")
requires = ("nlohmann_json/3.10.5", "websocketpp/0.8.2")
generators = ("cmake", "cmake_find_package")
_source_root = "source_root"
_source_package_path = os.path.join(_source_root, "cpp", "foxglove-websocket")
def source(self):
tools.get(**self.conan_data["sources"][self.version], strip_root=True, destination=self._source_root)
def validate(self):
if self.settings.compiler.get_safe("cppstd"):
tools.check_min_cppstd(self, "17")
if (self.settings.compiler == "gcc" or self.settings.compiler == "clang") and tools.Version(self.settings.compiler.version) <= 8:
raise ConanInvalidConfiguration("Compiler version is not supported, c++17 support is required")
if self.settings.compiler == "Visual Studio" and tools.Version(self.settings.compiler.version) <= "16.8":
raise ConanInvalidConfiguration("Compiler version is not supported, c++17 support is required")
def configure(self):
self.options["websocketpp"].asio = "standalone"
def package(self):
self.copy("LICENSE", dst="licenses", src=self._source_package_path)
self.copy("include/*", src=self._source_package_path)
def package_id(self):
self.info.header_only()
| StarcoderdataPython |
165281 | from selenium import webdriver
from selenium.webdriver.chrome.options import Options
def get_driver():
opt = webdriver.ChromeOptions()
opt.add_experimental_option("debuggerAddress", "localhost:8989")
driver = webdriver.Chrome(executable_path="E:\\chromedriver\\chromedriver.exe", chrome_options=opt)
return driver
| StarcoderdataPython |
3290 | # 获取调课、改课通知例子
from zfnew import GetInfo, Login
base_url = '学校教务系统的主页url'
lgn = Login(base_url=base_url)
lgn.login('账号', '密码')
cookies = lgn.cookies # cookies获取方法
person = GetInfo(base_url=base_url, cookies=cookies)
message = person.get_message()
print(message)
| StarcoderdataPython |
1647428 | import numpy as np
from src.models.dnam.tabnet import TabNetModel
import torch
import lightgbm as lgb
import pandas as pd
import hydra
from omegaconf import DictConfig
from pytorch_lightning import (
LightningDataModule,
seed_everything,
)
from experiment.logging import log_hyperparameters
from pytorch_lightning.loggers import LightningLoggerBase
from src.utils import utils
from experiment.routines import eval_classification_sa
from typing import List
import wandb
from catboost import CatBoost
import xgboost as xgb
log = utils.get_logger(__name__)
def inference(config: DictConfig):
if "seed" in config:
seed_everything(config.seed)
if 'wandb' in config.logger:
config.logger.wandb["project"] = config.project_name
# Init lightning loggers
loggers: List[LightningLoggerBase] = []
if "logger" in config:
for _, lg_conf in config.logger.items():
if "_target_" in lg_conf:
log.info(f"Instantiating logger <{lg_conf._target_}>")
loggers.append(hydra.utils.instantiate(lg_conf))
log.info("Logging hyperparameters!")
log_hyperparameters(loggers, config)
# Init Lightning datamodule for test
log.info(f"Instantiating datamodule <{config.datamodule._target_}>")
datamodule: LightningDataModule = hydra.utils.instantiate(config.datamodule)
datamodule.setup()
feature_names = datamodule.get_feature_names()
class_names = datamodule.get_class_names()
outcome_name = datamodule.get_outcome_name()
df = datamodule.get_df()
df['pred'] = 0
X_test = df.loc[:, feature_names].values
y_test = df.loc[:, outcome_name].values
if config.model_type == "lightgbm":
model = lgb.Booster(model_file=config.ckpt_path)
y_test_pred_prob = model.predict(X_test)
elif config.model_type == "catboost":
model = CatBoost()
model.load_model(config.ckpt_path)
y_test_pred_prob = model.predict(X_test)
elif config.model_type == "xgboost":
model = xgb.Booster()
model.load_model(config.ckpt_path)
dmat_test = xgb.DMatrix(X_test, y_test, feature_names=feature_names)
y_test_pred_prob = model.predict(dmat_test)
elif config.model_type == "tabnet":
model = TabNetModel.load_from_checkpoint(checkpoint_path=f"{config.ckpt_path}")
model.produce_probabilities = True
model.eval()
model.freeze()
X_test_pt = torch.from_numpy(X_test)
y_test_pred_prob = model(X_test_pt).cpu().detach().numpy()
else:
raise ValueError(f"Unsupported sa_model")
y_test_pred = np.argmax(y_test_pred_prob, 1)
eval_classification_sa(config, class_names, y_test, y_test_pred, y_test_pred_prob, loggers, 'inference', is_log=True, is_save=True)
df.loc[:, "pred"] = y_test_pred
for cl_id, cl in enumerate(class_names):
df.loc[:, f"pred_prob_{cl_id}"] = y_test_pred_prob[:, cl_id]
predictions = df.loc[:, [outcome_name, "pred"] + [f"pred_prob_{cl_id}" for cl_id, cl in enumerate(class_names)]]
predictions.to_excel(f"predictions.xlsx", index=True)
for logger in loggers:
logger.save()
if 'wandb' in config.logger:
wandb.finish()
| StarcoderdataPython |
119472 | <gh_stars>1-10
# Generated by Django 3.1.5 on 2021-03-15 11:52
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('blogapp', '0007_post_likes'),
]
operations = [
migrations.AddField(
model_name='post',
name='created_date',
field=models.DateTimeField(default=django.utils.timezone.now),
),
]
| StarcoderdataPython |
3395111 | from django.urls import path
from menus.views import SystemMenuView
from django.views.decorators.csrf import csrf_exempt
urlpatterns = [
path('system/', csrf_exempt(SystemMenuView.as_view())),
]
| StarcoderdataPython |
11394 | <reponame>hadleyhzy34/reinforcement_learning<gh_stars>0
import numpy as np
import gym
from utils import *
from agent import *
from config import *
def train(env, agent, num_episode, eps_init, eps_decay, eps_min, max_t):
rewards_log = []
average_log = []
eps = eps_init
for i in range(1, 1 + num_episode):
episodic_reward = 0
done = False
state = env.reset()
t = 0
while not done and t < max_t:
t += 1
state = state.reshape(1, -1)
action = agent.act(state, eps)
next_state, reward, done, _ = env.step(action)
agent.memory.remember(state, action, reward, next_state, done)
if t % 4 == 0 and len(agent.memory) >= agent.bs:
agent.learn()
agent.soft_update(agent.tau)
state = next_state.copy()
episodic_reward += reward
rewards_log.append(episodic_reward)
average_log.append(np.mean(rewards_log[-100:]))
print('\rEpisode {}, Reward {:.3f}, Average Reward {:.3f}'.format(i, episodic_reward, average_log[-1]), end='')
if i % 100 == 0:
print()
eps = max(eps * eps_decay, eps_min)
return rewards_log, average_log
if __name__ == '__main__':
env = gym.make(RAM_ENV_NAME)
agent = Agent(env.observation_space.shape[0], env.action_space.n, BATCH_SIZE, LEARNING_RATE, TAU, GAMMA, DEVICE, False, DUEL, DOUBLE, PRIORITIZED)
rewards_log, _ = train(env, agent, RAM_NUM_EPISODE, EPS_INIT, EPS_DECAY, EPS_MIN, MAX_T)
np.save('{}_rewards.npy'.format(RAM_ENV_NAME), rewards_log)
agent.Q_local.to('cpu')
torch.save(agent.Q_local.state_dict(), '{}_weights.pth'.format(RAM_ENV_NAME)) | StarcoderdataPython |
21066 | import html
import json
import re
from datetime import date
from autoslug import AutoSlugField
from django.conf import settings
from django.contrib.contenttypes.models import ContentType
from django.core.validators import MinLengthValidator
from django.db.models.aggregates import Count
from django.db import models
from django.urls import reverse
from django.utils.text import slugify
from django.utils.timezone import now
from django.utils.translation import gettext as _
from easyaudit.models import CRUDEvent
from taggit_autosuggest.managers import TaggableManager
from pycompanies.models import UserCompanyProfile
from .constants import STATE_LABEL_CLASSES
class EventType(models.IntegerChoices):
"""
Types of event visualization
"""
LISTING_VIEW = (0, _('Visualización en Listado'))
DETAIL_VIEW = (1, _('Visualización de la oferta completa'))
CONTACT_INFO_VIEW = (2, _('Apertura de la información de contacto'))
class Experience(models.TextChoices):
"""
Choices for JobOffer Experience.
"""
ZERO = '0', _('0')
ONE_PLUS = '1+', _('1+')
TWO_PLUS = '2+', _('2+')
THREE_PLUS = '3+', _('3+')
FIVE_PLUS = '5+', _('5+')
TEN_PLUS = '10+', _('10+')
class Remoteness(models.TextChoices):
"""
Choices for Remoteness.
"""
REMOTE = 'REMOTE', _('Remoto')
OFFICE = 'IN_OFFICE', _('Presencial')
HYBRID = 'MIXED', _('Mixto')
class HiringType(models.TextChoices):
"""
Choices for HiringType.
"""
EMPLOYEE = 'EMPLOYEE', _('Relación de dependencia')
MONOTRIBUTISTA = 'MONOTRIBUTO', _('Monotributista')
CONTRACTOR_SHORT = 'CONTRACTOR_SHORT', _('Contractor short term')
CONTRACTOR_LONG = 'CONTRACTOR_LONG', _('Contractor long term')
COOPERATIVE = 'COOPERATIVE', _('Cooperativa de trabajo')
GOVERNMENT = 'GOVERNMENT', _('Estado')
OTHER = 'OTHER', _('Otra')
class OfferState(models.TextChoices):
"""
Choices for JobOfferStates.
"""
NEW = 'NEW', _('Nuevo') # Used only for actions
DEACTIVATED = 'DEACTIVATED', _('Desactivada')
MODERATION = 'MODERATION', _('En moderación')
ACTIVE = 'ACTIVE', _('Activa')
REJECTED = 'REJECTED', _('Rechazada')
EXPIRED = 'EXPIRED', _('Caducada')
class JobOffer(models.Model):
"""A PyAr Job Offer."""
title = models.CharField(
max_length=255, verbose_name=_('Título'), validators=[MinLengthValidator(20)], unique=True
)
company = models.ForeignKey(
'pycompanies.Company',
verbose_name=_('Empresa'),
on_delete=models.CASCADE,
)
location = models.CharField(max_length=100, blank=True, null=True, verbose_name=_('Lugar'))
contact_mail = models.EmailField(
max_length=255, blank=True, null=True, verbose_name=_('E-mail')
)
contact_phone = models.CharField(
max_length=255, null=True, blank=True, verbose_name=_('Teléfono')
)
contact_url = models.CharField(
max_length=255, null=True, blank=True, verbose_name=_('URL Contacto')
)
experience = models.CharField(
max_length=3, choices=Experience.choices, verbose_name=_('Experiencia')
)
remoteness = models.CharField(
max_length=32, choices=Remoteness.choices, verbose_name=_('Modalidad de trabajo')
)
tags = TaggableManager(verbose_name=_('Etiquetas'), blank=True)
hiring_type = models.CharField(
max_length=32, choices=HiringType.choices, verbose_name=_('Tipo de contratación')
)
salary = models.CharField(
max_length=255, null=True, verbose_name=_('Rango salarial')
)
description = models.TextField(verbose_name=_('Descripción'))
short_description = models.TextField(
max_length=512,
verbose_name=_('Descripción corta')
)
created_at = models.DateTimeField(
auto_now_add=True, verbose_name=_('Hora de creación')
)
created_by = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete=models.CASCADE,
verbose_name=_('Creado por'),
related_name='created_offers',
)
modified_at = models.DateTimeField(auto_now=True, verbose_name=_('Hora de Modificación'))
modified_by = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete=models.CASCADE,
verbose_name=_('Modificado por'),
related_name='modified_offers',
)
state = models.CharField(
max_length=32, choices=OfferState.choices, default=OfferState.DEACTIVATED,
verbose_name=_('Estado de la oferta')
)
slug = AutoSlugField(populate_from='title', unique=True)
def get_absolute_url(self):
url = reverse('joboffers:view', kwargs={'slug': self.slug})
absolute_url = "".join((settings.BASE_URL, url))
return absolute_url
def __str__(self):
return self.title
@property
def last_comment(self):
"""
Return the last rejection JobOfferComment
"""
return self.joboffercomment_set.last()
@classmethod
def get_short_description(cls, description):
"""
Deduce the short_description from a given html description string
"""
description_stripped_tags = re.sub(r'<[^>]*>', ' ', description)
description_without_spaces = re.sub(r'\s+', ' ', description_stripped_tags).strip()
description_unescaped = html.unescape(description_without_spaces)
return description_unescaped[:512]
def track_visualization(self, session, event_type: EventType):
"""
Either get or create the matching JobOfferAccessLog instance for the joboffer.
"""
today = date.today()
month_year = today.year * 100 + today.month
if session.session_key is None:
session.save()
return JobOfferAccessLog.objects.get_or_create(
month_and_year=month_year,
event_type=event_type,
session=session.session_key,
joboffer=self
)
def get_publisher_mail_addresses(self):
"""
Return a list of the email addresses of the publishers of this offer.
It filters users with empty mail field
"""
profiles = UserCompanyProfile.objects.filter(company=self.company)
addresses = set()
for profile in profiles:
if profile.user.email:
addresses.add(profile.user.email)
return addresses
def get_visualizations_count(self):
"""
Get a dict with visualizations count for every kind of event
"""
items = JobOfferAccessLog.objects \
.filter(joboffer=self) \
.values_list('event_type') \
.annotate(total=Count('event_type')) \
.order_by()
return dict(items)
def save(self, *args, **kwargs):
self.slug = slugify(self.title)
if not self.short_description:
self.short_description = self.get_short_description(self.description)
super().save(*args, **kwargs)
@classmethod
def get_options(cls):
"""
Public _meta API accesor https://docs.djangoproject.com/en/4.0/ref/models/meta/
"""
return cls._meta
class Meta:
constraints = [
models.CheckConstraint(
name='%(app_label)s_%(class)s_not_all_contact_info_null',
check=(
models.Q(
contact_mail__isnull=False,
)
| models.Q(
contact_phone__isnull=False,
)
| models.Q(
contact_url__isnull=False,
)
),
),
models.CheckConstraint(
name='%(app_label)s_%(class)s_location_not_null_when_not_remote',
check=(
(
models.Q(remoteness__in=(Remoteness.HYBRID, Remoteness.OFFICE))
& models.Q(location__isnull=False)
)
| models.Q(remoteness=Remoteness.REMOTE)
),
),
]
class CommentType(models.TextChoices):
"""
Choices for Types of JobOfferComments.
"""
MODERATION = 'MODERATION', _('Moderación')
EDITION = 'EDITION', _('Edición')
SPAM = 'SPAM', _('Spam')
INSUFICIENT = 'INSUFICIENT', _('Información insuficiente')
NOT_RELATED = 'NOT_PYTHON', _('Oferta no relacionada con Python')
class JobOfferComment(models.Model):
"""
A comment on a JobOffer.
"""
text = models.TextField(verbose_name=_('Texto'))
comment_type = models.CharField(
max_length=32, choices=CommentType.choices, verbose_name=_('Tipo'))
created_at = models.DateTimeField(
auto_now_add=True, verbose_name=_('Rango salarial')
)
created_by = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete=models.CASCADE,
verbose_name=_('Creado por'),
related_name='created_joboffer_comments',
)
joboffer = models.ForeignKey(JobOffer, on_delete=models.CASCADE)
@classmethod
def get_options(cls):
"""
Public _meta API accesor https://docs.djangoproject.com/en/4.0/ref/models/meta/
"""
return cls._meta
def __str__(self):
return f"{self.joboffer.title}: {self.get_comment_type_display()}"
class JobOfferHistoryManager(models.Manager):
def for_offer(self, joboffer):
"""
Get all the history objects for a given joboffer. It can be JobOffer and JobOfferComment
"""
qs = super().get_queryset()
offer_ctype = ContentType.objects.get(app_label='joboffers', model='joboffer')
offer_comment_ctype = ContentType.objects.get(
app_label='joboffers', model='joboffercomment'
)
offer_q = models.Q(event_type__lt=4, object_id=joboffer.id, content_type=offer_ctype)
offer_comment_ids = [
offer_comment.id for offer_comment in joboffer.joboffercomment_set.all()
]
offer_comment_q = models.Q(
object_id__in=offer_comment_ids, content_type=offer_comment_ctype
)
qs = qs.filter(offer_q | offer_comment_q)
return qs
class JobOfferHistory(CRUDEvent):
"""
This is a proxy model used to simplify the code take away all the logic from the controller
"""
objects = JobOfferHistoryManager()
@property
def fields(self):
"""
Return the representation of the joboffer after this particular change is applied.
It returns a python dict that can contain different fields that the current model.
"""
obj_repr = json.loads(self.object_json_repr)
fields = obj_repr[0]['fields']
return fields
@property
def joboffer_comment(self):
"""
Return the JobOfferComment instance for the matching JobOfferHistory
"""
if self.content_type.model != 'joboffercomment':
raise ValueError("Unexpected model. Expected a JobOfferComment instance.")
return JobOfferComment.objects.get(id=self.object_id)
@property
def changes(self):
"""
Get a dict with the changes made to the object.
"""
if self.changed_fields:
return json.loads(self.changed_fields)
else:
return None
@property
def state_label(self):
"""
Get the state of the joboffer at the time of the change
"""
if self.content_type.model != 'joboffer':
raise ValueError("Unexpected model. Expected a JobOffer instance.")
fields = self.fields
joboffer = JobOffer(state=fields['state'])
return joboffer.get_state_display()
@property
def state_label_class(self):
"""
Get the bootstrap label class for the matching joboffer state. Returns a default if the
'state' field is not present. Maybe because a name update in the model.
"""
if self.content_type.model != 'joboffer':
raise ValueError("Unexpected model. Expected a JobOffer instance.")
state = self.fields['state']
return STATE_LABEL_CLASSES[state]
class Meta:
proxy = True
class JobOfferAccessLog(models.Model):
"""
Model to track visualization of joboffers
"""
created_at = models.DateTimeField(default=now)
month_and_year = models.PositiveIntegerField()
event_type = models.PositiveSmallIntegerField(
choices=EventType.choices, verbose_name=_('Tipo de Evento')
)
session = models.CharField(max_length=40, verbose_name=_('Identificador de Sesión'))
joboffer = models.ForeignKey(JobOffer, on_delete=models.CASCADE)
class Meta:
ordering = ['created_at']
| StarcoderdataPython |
3258260 | <reponame>datalexum/UNIX-time-from-NTP
import calendar
import subprocess
from ntplib import NTPClient
from datetime import datetime, timezone, timedelta
from socket import gaierror
def time_from_ntp(arguments):
arguments
ntp_client = NTPClient()
try:
response = ntp_client.request(arguments['server'], version = 3)
response.offset
time_date = datetime.fromtimestamp(response.tx_time, timezone(timedelta(hours=arguments['timezone'])))
time_template = "{day} {month} {year} {hour}:{minute}:{second}"
month = calendar.month_name[time_date.month][:3].upper()
time_string = time_template.format(day=time_date.day, month=month, year=time_date.year, hour=time_date.hour, minute=time_date.minute, second=time_date.second)
output = subprocess.check_output(['date', '-s', "{}".format(time_string)])
print("Time set to {}".format(time_string))
except gaierror:
print("Connection Error: No internet connection or connection to NTP-Server not possible!")
except subprocess.CalledProcessError:
print("Permission Error: You don't have permissions to set the date and time!") | StarcoderdataPython |
1796454 | import pandas as pd
import numpy as np
from typing import List
import psycopg2
from psycopg2.extensions import register_adapter, AsIs
import src.util
import src.helpers
# Setup system connection
mode = "DEV"
psycopg2.extensions.register_adapter(np.int64, AsIs)
logger_wrapper = src.util.LoggerWrapper(algo_id=None)
db_connector = src.helpers.DBConnector(logger=logger_wrapper.logger, mode=mode)
############################# SET PARAMETERS ###################################
# To-be populated currency pairs
pairs = [
{"BASE": "LINK", "QUOTE": "EUR"},
{"BASE": "BTC", "QUOTE": "EUR"},
{"BASE": "BTC", "QUOTE": "USD"},
{"BASE": "ETH", "QUOTE": "EUR"},
]
# To-be populated echange currency pair associations
exchange_names = ['BITFINEX', 'BITPANDA',
'KRAKEN'] # Single quotation marks !!!
currency_pairs = ['LINKEUR']
# To-be populated algo exchange associations
algos = ['A-tests-multi-lateral']
algo_exchanges = ['BITFINEX', 'BITPANDA', 'KRAKEN']
# To-be populated algo currency pair associations
cups_algo = ['LINKEUR']
############################# DEFINE FUNCTIONS #################################
def get_algo_registry(db_connection):
query = """
SELECT *
FROM "public"."algo_registry" AS "ALR"
--JOIN "public"."algo_configuration" AS "ALC"
--ON "ALR"."id" = "ALC"."id_algo"
WHERE "name" = 'A-tests-multi-lateral'
"""
return pd.read_sql_query(query, db_connection)
def migrate_algo_registry(db_connection, data):
query_migrate = """
INSERT INTO "PROD_001"."ALGO_REGISTRY"
("ALR_ID", "ALR_NAME", "ALR_DESCRIPTION", "ALR_STATUS")
VALUES (NEXTVAL('"PROD_001"."SEQ_ALR_ID"'),%s,%s,%s)
"""
query_lookup = """
SELECT "ALR_ID"
FROM "PROD_001"."ALGO_REGISTRY"
WHERE "ALR_NAME" IN (%s)
"""
lookup = {}
with db_connection.cursor() as cursor:
for row in data.itertuples(index=False):
values = (row.name, row.description, row.status)
cursor.execute(query_migrate, values)
cursor.execute(query_lookup, (row.name,))
lookup[row.id] = cursor.fetchall()[0][0]
return lookup
def get_algo_config(db_connection):
query = """
SELECT *
FROM "public"."algo_configuration"
"""
config_df = pd.read_sql_query(query, db_connection)
# Make entries atomic
for row in config_df.itertuples():
field_entries = str(row.property_value).replace(" ", "").split(",")
if len(field_entries) > 1:
for item in field_entries:
new_row = [[row.property_name, item, row.id_algo]]
new_row = pd.DataFrame(data=new_row, columns=config_df.columns)
config_df = pd.concat([config_df, new_row])
"""config_df.append(
{"property_name": row.property_name,
"property_value": item,
"id_alog": row.id_algo},
ignore_index=True
)"""
config_df.drop(index=row.Index, inplace=True)
return config_df
def migrate_algo_config(db_connection, data, lookup):
query_migrate = """
INSERT INTO "PROD_001"."ALGO_CONFIGURATION"
("ALC_ID", "ALC_ALR_ID", "ALC_NAME", "ALC_VALUE")
VALUES (NEXTVAL('"PROD_001"."SEQ_ALC_ID"'),%s,%s,%s)
"""
with db_connection.cursor() as cursor:
for row in data.itertuples():
values = (
lookup[row.id_algo], row.property_name, row.property_value)
cursor.execute(query_migrate, values)
def get_currency(db_connection):
query = """
SELECT *
FROM "public"."currency"
ORDER BY "id" ASC
"""
return pd.read_sql_query(query, db_connection)
def migrate_currency(db_connection, data):
query_migrate = """
INSERT INTO "PROD_001"."CURRENCY"
("CUR_ID", "CUR_CODE", "CUR_TYPE")
VALUES (NEXTVAL('"PROD_001"."SEQ_CUR_ID"'),%s,%s)
"""
with db_connection.cursor() as cursor:
for row in data.itertuples():
values = (row.code, row.type)
cursor.execute(query_migrate, values)
def get_exchanges(db_connection):
query = """
SELECT *
FROM "public"."exchanges"
ORDER BY "id" ASC
"""
return pd.read_sql_query(query, db_connection)
def migrate_exchanges(db_connection, data):
query_migrate = """
INSERT INTO "PROD_001"."EXCHANGE"
("EXC_ID", "EXC_NAME")
VALUES (NEXTVAL('"PROD_001"."SEQ_EXC_ID"'),%s)
"""
with db_connection.cursor() as cursor:
for row in data.itertuples():
values = (row.name,)
cursor.execute(query_migrate, values)
def get_pairs(db_connection):
query = """
SELECT *
FROM "public"."currency_pairs"
ORDER BY "id" ASC
"""
return pd.read_sql_query(query, db_connection)
def populate_pairs(db_connection, pairs):
"""
Inputs: List of dicts with pairs aka {"BASE": "LINK", "QUOTE": "BTC"}
"""
query_keys = """
SELECT *
FROM "PROD_001"."CURRENCY"
"""
keys = pd.read_sql_query(query_keys, db_connection)
query = """
INSERT INTO "PROD_001"."CURRENCY_PAIR"
("CUR_ID", "CUR_CUP_ID_BASE", "CUR_CUP_ID_QUOTE", "CUP_CODE")
VALUES (NEXTVAL('"PROD_001"."SEQ_CUP_ID"'),%s,%s,%s)
"""
with db_connection.cursor() as cursor:
for pair in pairs:
cur_id_base = keys[keys["CUR_CODE"] == pair["BASE"]]["CUR_ID"].iloc[
0]
cur_id_quote = \
keys[keys["CUR_CODE"] == pair["QUOTE"]]["CUR_ID"].iloc[0]
pair_code = pair["BASE"] + pair["QUOTE"]
values = (cur_id_base, cur_id_quote, pair_code)
cursor.execute(query, values)
# TODO complement exchange specific technicals: precision, step size, etc.
def populate_exchange_currency_pair(db_connection, exchange_names,
currency_pairs):
"""
Inputs: exchange_names, currency_pairs list e.g. ["'BITFINEX'"]
Write cross product to db
"""
if len(exchange_names) == 1:
exchange_names.append(exchange_names[0])
if len(currency_pairs) == 1:
currency_pairs.append(currency_pairs[0])
exchange_names, currency_pairs = tuple(exchange_names), tuple(
currency_pairs)
query_keys = """
SELECT
"EXC_ID",
"CUP_ID"
FROM
"PROD_001"."CURRENCY_PAIR",
"PROD_001"."EXCHANGE"
WHERE
"EXC_NAME" IN {}
AND "CUP_CODE" IN {}
ORDER BY "EXC_ID"
""".format(exchange_names, currency_pairs)
keys = pd.read_sql_query(query_keys, db_connector.connection)
query = """
INSERT INTO "PROD_001"."EXCHANGE_CURRENCY_PAIR"
("ECP_ID", "ECP_EXC_ID", "ECP_CUP_ID","ECP_BASE_PREC","ECP_QUOTE_PREC",
"ECP_MIN_QTY", "ECP_STEPSIZE")
VALUES (NEXTVAL('"PROD_001"."SEQ_EXC_ID"'),%s,%s)
"""
with db_connection.cursor() as cursor:
for row in keys.itertuples():
values = (row.EXC_ID, row.CUP_ID)
cursor.execute(query, values)
def populate_algo_exchange_asso(db_connection, algo_names: List,
exchanges: List):
"""
Writes cross product of algo_names and exchanges to table
"PROD_001"."ALGO_EXCHANGE_ASSOCIATION"
"""
if len(exchanges) == 1:
exchanges.append(exchanges[0])
if len(algo_names) == 1:
algo_names.append(algo_names[0])
algo_names = tuple(algo_names)
exchanges = tuple(exchanges)
query_keys = """
SELECT
"ALR_ID",
"EXC_ID"
FROM
"PROD_001"."ALGO_REGISTRY",
"PROD_001"."EXCHANGE"
WHERE
"ALR_NAME" IN {}
AND "EXC_NAME" IN {}
""".format(algo_names, exchanges)
query_populate = """
INSERT INTO "PROD_001"."ALGO_EXCHANGE_ASSOCIATION"
("AEA_ID", "AEA_ALR_ID", "AEA_EXC_ID")
VALUES (NEXTVAL('"PROD_001"."SEQ_AEA_ID"'),%s,%s)
"""
keys = pd.read_sql_query(query_keys, db_connection)
with db_connection.cursor() as cursor:
for row in keys.itertuples():
values = (row.ALR_ID, row.EXC_ID)
cursor.execute(query_populate, values)
def populate_algo_currency_asso(db_connection, algo_names,
currency_pairs: List):
"""
Writes cross product of algo_names and exchanges to table
"PROD_001"."ALGO_EXCHANGE_ASSOCIATION"
"""
if len(currency_pairs) == 1:
currency_pairs.append(currency_pairs[0])
if len(algo_names) == 1:
algo_names.append(algo_names[0])
algo_names, currency_pairs = tuple(algo_names), tuple(currency_pairs)
query_keys = """
SELECT
"ALR_ID",
"CUP_ID"
FROM
"PROD_001"."ALGO_REGISTRY",
"PROD_001"."CURRENCY_PAIR"
WHERE
"ALR_NAME" IN {}
AND "CUP_CODE" IN {}
""".format(algo_names, currency_pairs)
query_populate = """
INSERT INTO "PROD_001"."ALGO_CURRENCY_ASSOCIATION"
("ACA_ID", "ACA_ALR_ID", "ACA_CUP_ID")
VALUES (NEXTVAL('"PROD_001"."SEQ_ACA_ID"'),%s,%s)
"""
keys = pd.read_sql_query(query_keys, db_connection)
with db_connection.cursor() as cursor:
for row in keys.itertuples():
values = (row.ALR_ID, row.CUP_ID)
cursor.execute(query_populate, values)
def create_schema_relations(db_connection):
query = """
--Create Schema, Tables and References
--If you want to recreate and overwrite the existing schema, use the
--following command:
--DROP SCHEMA IF EXISTS "PROD_001" CASCADE;
CREATE SCHEMA IF NOT EXISTS "PROD_001";
CREATE TABLE "PROD_001"."ALGO_REGISTRY" (
"ALR_ID" INT PRIMARY KEY NOT NULL,
"ALR_NAME" VARCHAR NOT NULL,
"ALR_DESCRIPTION" VARCHAR NOT NULL,
"ALR_STATUS" VARCHAR NOT NULL
);
CREATE TABLE "PROD_001"."EXCHANGE" (
"EXC_ID" INT PRIMARY KEY NOT NULL,
"EXC_NAME" VARCHAR NOT NULL
);
CREATE TABLE "PROD_001"."CURRENCY" (
"CUR_ID" INT PRIMARY KEY NOT NULL,
"CUR_CODE" VARCHAR NOT NULL,
"CUR_TYPE" VARCHAR NOT NULL
);
CREATE TABLE "PROD_001"."METRIC_DEFINITION" (
"MED_ID" INT PRIMARY KEY NOT NULL,
"MED_NAME" VARCHAR NOT NULL,
"MED_DESCRIPTION" VARCHAR NOT NULL,
"MED_CLASS_NAME" VARCHAR NOT NULL
);
CREATE TABLE "PROD_001"."METRIC_CLASS_PARAMETER" (
"MCP_ID" INT PRIMARY KEY NOT NULL,
"MCP_MED_ID" INT REFERENCES "PROD_001"."METRIC_DEFINITION"("MED_ID") NOT NULL,
"MCP_NAME" VARCHAR NOT NULL,
"MCP_VALUE" VARCHAR NOT NULL
);
CREATE TABLE "PROD_001"."ALGO_METRIC_ASSOCIATION" (
"AMA_ID" INT PRIMARY KEY NOT NULL,
"AMA_ALR_ID" INT REFERENCES "PROD_001"."ALGO_REGISTRY"("ALR_ID") NOT NULL,
"AMA_MED_ID" INT REFERENCES "PROD_001"."METRIC_DEFINITION"("MED_ID") NOT NULL
);
CREATE TABLE "PROD_001"."ALGO_CONFIGURATION" (
"ALC_ID" INT PRIMARY KEY NOT NULL,
"ALC_ALR_ID" INT REFERENCES "PROD_001"."ALGO_REGISTRY"("ALR_ID") NOT NULL,
"ALC_NAME" VARCHAR NOT NULL,
"ALC_VALUE" VARCHAR NOT NULL
);
CREATE TABLE "PROD_001"."ALGO_EXCHANGE_ASSOCIATION" (
"AEA_ID" INT PRIMARY KEY NOT NULL,
"AEA_ALR_ID" INT REFERENCES "PROD_001"."ALGO_REGISTRY"("ALR_ID") NOT NULL,
"AEA_EXC_ID" INT REFERENCES "PROD_001"."EXCHANGE"("EXC_ID") NOT NULL
);
CREATE TABLE "PROD_001"."CURRENCY_PAIR" (
"CUP_ID" INT PRIMARY KEY NOT NULL,
"CUP_CUR_ID_BASE" INT REFERENCES "PROD_001"."CURRENCY"("CUR_ID") NOT NULL,
"CUP_CUR_ID_QUOTE" INT REFERENCES "PROD_001"."CURRENCY"("CUR_ID") NOT NULL,
"CUP_CODE" VARCHAR NOT NULL
);
CREATE TABLE "PROD_001"."ALGO_CURRENCY_ASSOCIATION" (
"ACA_ID" INT PRIMARY KEY NOT NULL,
"ACA_ALR_ID" INT REFERENCES "PROD_001"."ALGO_REGISTRY"("ALR_ID") NOT NULL,
"ACA_CUP_ID" INT REFERENCES "PROD_001"."CURRENCY_PAIR"("CUP_ID") NOT NULL
);
CREATE TABLE "PROD_001"."EXCHANGE_CURRENCY_PAIR" (
"ECP_ID" INT PRIMARY KEY NOT NULL,
"ECP_EXC_ID" INT REFERENCES "PROD_001"."EXCHANGE"("EXC_ID") NOT NULL,
"ECP_CUP_ID" INT REFERENCES "PROD_001"."CURRENCY_PAIR"("CUP_ID") NOT NULL,
"ECP_BASE_PREC" DOUBLE PRECISION,
"ECP_QUOTE_PREC" DOUBLE PRECISION,
"ECP_MIN_QTY" DOUBLE PRECISION,
"ECP_STEPSIZE" DOUBLE PRECISION
);
CREATE TABLE "PROD_001"."BALANCE" (
"BAL_ID" BIGINT PRIMARY KEY NOT NULL,
"BAL_TIMESTAMP" TIMESTAMPTZ NOT NULL,
"BAL_ALR_ID" INT REFERENCES "PROD_001"."ALGO_REGISTRY"("ALR_ID") NOT NULL,
"BAL_EXC_ID" INT REFERENCES "PROD_001"."EXCHANGE"("EXC_ID") NOT NULL,
"BAL_CUR_ID" INT REFERENCES "PROD_001"."CURRENCY"("CUR_ID") NOT NULL,
"BAL_AMOUNT" DOUBLE PRECISION NOT NULL,
"BAL_QUOTE_PRICE" DOUBLE PRECISION NOT NULL,
"BAL_QUOTE_CURRENCY" INT REFERENCES "PROD_001"."CURRENCY"("CUR_ID") NOT NULL
);
CREATE TABLE "PROD_001"."UNITS" (
"UNI_ID" INT PRIMARY KEY NOT NULL,
"UNI_SYMBOL" VARCHAR NOT NULL,
"UNI_DESCRIPTION" VARCHAR
);
CREATE TABLE "PROD_001"."PERFORMANCE_LOG" (
"PEL_ID" BIGINT PRIMARY KEY NOT NULL,
"PEL_ALR_ID" INT REFERENCES "PROD_001"."ALGO_REGISTRY"("ALR_ID") NOT NULL,
"PEL_MED_ID" INT REFERENCES "PROD_001"."METRIC_DEFINITION"("MED_ID") NOT NULL,
"PEL_UNI_ID" INT REFERENCES "PROD_001"."UNITS"("UNI_ID") NOT NULL,
"PEL_TIMESTAMP" TIMESTAMPTZ NOT NULL,
"PEL_VALUE" DOUBLE PRECISION NOT NULL,
);
CREATE TABLE "PROD_001"."ORDER_LOG" (
"ORL_ID" UUID PRIMARY KEY NOT NULL,
"ORL_ALR_ID" INT REFERENCES "PROD_001"."ALGO_REGISTRY"("ALR_ID") NOT NULL,
"ORL_EXC_ID" INT REFERENCES "PROD_001"."EXCHANGE"("EXC_ID") NOT NULL,
"ORL_CUP_ID" INT REFERENCES "PROD_001"."CURRENCY_PAIR"("CUP_ID") NOT NULL,
"ORL_COMBO_ID" VARCHAR,
"ORL_TIMESTAMP" TIMESTAMPTZ NOT NULL,
"ORL_QUANTITY" DOUBLE PRECISION NOT NULL,
"ORL_PRICE" DOUBLE PRECISION NOT NULL,
"ORL_DIRECTION" VARCHAR NOT NULL,
"ORL_TYPE" VARCHAR NOT NULL,
"ORL_Q_FILLED" DOUBLE PRECISION NOT NULL,
"ORL_STATUS" VARCHAR NOT NULL,
"ORL_FEE" DOUBLE PRECISION NOT NULL,
"ORL_FEE_CURRENCY" INT REFERENCES "PROD_001"."CURRENCY"("CUR_ID") NOT NULL
);
CREATE TABLE "PROD_001"."SYSTEM_LOG" (
"SYL_ID" BIGINT PRIMARY KEY NOT NULL,
"SYL_ENTITY_NAME" VARCHAR NOT NULL,
"SYL_ORL_ID" UUID,
"SYL_TIMESTAMP" TIMESTAMPTZ NOT NULL,
"SYL_LEVEL" VARCHAR NOT NULL,
"SYL_FILE" VARCHAR NOT NULL,
"SYL_FUNCTION" VARCHAR NOT NULL,
"SYL_LINE_NO" INT NOT NULL,
"SYL_MESSAGE" VARCHAR NOT NULL
);
CREATE TABLE "PROD_001"."TRANSFERS" (
"TRF_ID" BIGINT PRIMARY KEY NOT NULL,
"TRF_ALR_ID" INT REFERENCES "PROD_001"."ALGO_REGISTRY"("ALR_ID") NOT NULL,
"TRF_EXC_ID_IN" INT REFERENCES "PROD_001"."EXCHANGE"("EXC_ID") NOT NULL,
"TRF_EXC_ID_OUT" INT REFERENCES "PROD_001"."EXCHANGE"("EXC_ID") NOT NULL,
"TRF_CUR_ID" INT REFERENCES "PROD_001"."CURRENCY"("CUR_ID") NOT NULL,
"TRF_TIMESTAMP" TIMESTAMPTZ NOT NULL,
"TRF_AMOUNT" DOUBLE PRECISION NOT NULL
);
CREATE TABLE "PROD_001"."LATENCY_LOG" (
"LAL_ID" BIGINT PRIMARY KEY NOT NULL,
"LAL_TIMESTAMP" TIMESTAMPTZ NOT NULL,
"LAL_TYPE" VARCHAR NOT NULL,
"LAL_ORL_ID" UUID,
"LAL_VALUE" INT
);
--Create Sequences
CREATE SEQUENCE "PROD_001"."SEQ_ALR_ID"
AS INT
INCREMENT 1
START 1
OWNED BY "PROD_001"."ALGO_REGISTRY"."ALR_ID";
CREATE SEQUENCE "PROD_001"."SEQ_EXC_ID"
AS INT
INCREMENT 1
START 1
OWNED BY "PROD_001"."EXCHANGE"."EXC_ID";
CREATE SEQUENCE "PROD_001"."SEQ_CUR_ID"
AS INT
INCREMENT 1
START 1
OWNED BY "PROD_001"."CURRENCY"."CUR_ID";
CREATE SEQUENCE "PROD_001"."SEQ_MED_ID"
AS INT
INCREMENT 1
START 1
OWNED BY "PROD_001"."METRIC_DEFINITION"."MED_ID";
CREATE SEQUENCE "PROD_001"."SEQ_AMA_ID"
AS INT
INCREMENT 1
START 1
OWNED BY "PROD_001"."ALGO_METRIC_ASSOCIATION"."AMA_ID";
CREATE SEQUENCE "PROD_001"."SEQ_MCP_ID"
AS INT
INCREMENT 1
START 1
OWNED BY "PROD_001"."METRIC_CLASS_PARAMETER"."MCP_ID";
CREATE SEQUENCE "PROD_001"."SEQ_ALC_ID"
AS INT
INCREMENT 1
START 1
OWNED BY "PROD_001"."ALGO_CONFIGURATION"."ALC_ID";
CREATE SEQUENCE "PROD_001"."SEQ_AEA_ID"
AS INT
INCREMENT 1
START 1
OWNED BY "PROD_001"."ALGO_EXCHANGE_ASSOCIATION"."AEA_ID";
CREATE SEQUENCE "PROD_001"."SEQ_CUP_ID"
AS INT
INCREMENT 1
START 1
OWNED BY "PROD_001"."CURRENCY_PAIR"."CUP_ID";
CREATE SEQUENCE "PROD_001"."SEQ_ACA_ID"
AS INT
INCREMENT 1
START 1
OWNED BY "PROD_001"."ALGO_CURRENCY_ASSOCIATION"."ACA_ID";
CREATE SEQUENCE "PROD_001"."SEQ_ECP_ID"
AS INT
INCREMENT 1
START 1
OWNED BY "PROD_001"."EXCHANGE_CURRENCY_PAIR"."ECP_ID";
CREATE SEQUENCE "PROD_001"."SEQ_BAL_ID"
AS BIGINT
INCREMENT 1
START 1
OWNED BY "PROD_001"."BALANCE"."BAL_ID";
CREATE SEQUENCE "PROD_001"."SEQ_PEL_ID"
AS BIGINT
INCREMENT 1
START 1
OWNED BY "PROD_001"."PERFORMANCE_LOG"."PEL_ID";
CREATE SEQUENCE "PROD_001"."SEQ_SYL_ID"
AS BIGINT
INCREMENT 1
START 1
OWNED BY "PROD_001"."SYSTEM_LOG"."SYL_ID";
CREATE SEQUENCE "PROD_001"."SEQ_TRF_ID"
AS BIGINT
INCREMENT 1
START 1
OWNED BY "PROD_001"."TRANSFERS"."TRF_ID";
CREATE SEQUENCE "PROD_001"."SEQ_LAL_ID"
AS BIGINT
START 1
INCREMENT 1
OWNED BY "PROD_001"."LATENCY_LOG"."LAL_ID";
CREATE SEQUENCE "PROD_001"."SEQ_UNI_ID"
AS INT
INCREMENT 1
START 1
OWNED BY "PROD_001"."UNITS"."UNI_ID";
"""
with db_connection.cursor() as cursor:
cursor.execute(query)
print("SUCCESS: CREATE NEW SCHEMA")
############################### START SCRIPT ###################################
create_schema_relations(db_connector.connection)
alr = get_algo_registry(db_connector.connection)
lookup = migrate_algo_registry(db_connector.connection, alr)
alc = get_algo_config(db_connector.connection)
migrate_algo_config(db_connector.connection, alc, lookup)
cur = get_currency(db_connector.connection)
migrate_currency(db_connector.connection, cur)
exc = get_exchanges(db_connector.connection)
migrate_exchanges(db_connector.connection, exc)
cup = get_pairs(db_connector.connection)
populate_pairs(db_connector.connection, pairs)
populate_exchange_currency_pair(
db_connector.connection, exchange_names, currency_pairs
)
populate_algo_exchange_asso(db_connector.connection, algos, algo_exchanges)
populate_algo_currency_asso(db_connector.connection, algos, cups_algo)
print("SUCCESS: POPULATE NEW SCHEMA")
if __name__ == '__main__':
pass
| StarcoderdataPython |
100085 | from slack_sdk.web.async_client import AsyncWebClient
class AsyncUpdate:
"""`update()` utility to tell Slack the processing results of a `save` listener.
async def save(ack, view, update):
await ack()
values = view["state"]["values"]
task_name = values["task_name_input"]["name"]
task_description = values["task_description_input"]["description"]
inputs = {
"task_name": {"value": task_name["value"]},
"task_description": {"value": task_description["value"]}
}
outputs = [
{
"type": "text",
"name": "task_name",
"label": "Task name",
},
{
"type": "text",
"name": "task_description",
"label": "Task description",
}
]
await update(inputs=inputs, outputs=outputs)
ws = AsyncWorkflowStep(
callback_id="add_task",
edit=edit,
save=save,
execute=execute,
)
app.step(ws)
This utility is a thin wrapper of workflows.stepFailed API method.
Refer to https://api.slack.com/methods/workflows.updateStep for details.
"""
def __init__(self, *, client: AsyncWebClient, body: dict):
self.client = client
self.body = body
async def __call__(self, **kwargs) -> None:
await self.client.workflows_updateStep(
workflow_step_edit_id=self.body["workflow_step"]["workflow_step_edit_id"],
**kwargs,
)
| StarcoderdataPython |
3276071 | import os
from functools import wraps
from flask import request
from swagger_server.response_code.cors_response import cors_401
def login_required(f):
@wraps(f)
def decorated_function(*args, **kwargs):
if os.getenv('VOUCH_COOKIE_NAME') not in request.cookies:
return cors_401(details='Login required: {0}/login'.format(os.getenv('API_SERVER_URL')))
return f(*args, **kwargs)
return decorated_function
| StarcoderdataPython |
75098 | """
Module with reading functionalities for calibration spectra.
"""
import os
import configparser
from typing import Optional, Dict, Tuple
import h5py
import spectres
import numpy as np
from typeguard import typechecked
from scipy.optimize import curve_fit
from species.analysis import photometry
from species.core import box
from species.read import read_filter
from species.util import read_util
class ReadCalibration:
"""
Class for reading a calibration spectrum from the database.
"""
@typechecked
def __init__(self,
tag: str,
filter_name: Optional[str] = None) -> None:
"""
Parameters
----------
tag : str
Database tag of the calibration spectrum.
filter_name : str, None
Filter name that is used for the wavelength range. Full spectrum is used if set to
``None``.
Returns
-------
NoneType
None
"""
self.tag = tag
self.filter_name = filter_name
if filter_name is None:
self.wavel_range = None
else:
transmission = read_filter.ReadFilter(filter_name)
self.wavel_range = transmission.wavelength_range()
config_file = os.path.join(os.getcwd(), 'species_config.ini')
config = configparser.ConfigParser()
config.read_file(open(config_file))
self.database = config['species']['database']
@typechecked
def resample_spectrum(self,
wavel_points: np.ndarray,
model_param: Optional[Dict[str, float]] = None,
apply_mask: bool = False) -> box.SpectrumBox:
"""
Function for resampling of a spectrum and uncertainties onto a new wavelength grid.
Parameters
----------
wavel_points : np.ndarray
Wavelength points (um).
model_param : dict, None
Model parameters. Should contain the 'scaling' value. Not used if set to ``None``.
apply_mask : bool
Exclude negative values and NaN values.
Returns
-------
species.core.box.SpectrumBox
Box with the resampled spectrum.
"""
calibbox = self.get_spectrum()
if apply_mask:
indices = np.where(calibbox.flux > 0.)[0]
calibbox.wavelength = calibbox.wavelength[indices]
calibbox.flux = calibbox.flux[indices]
calibbox.error = calibbox.error[indices]
flux_new, error_new = spectres.spectres(wavel_points,
calibbox.wavelength,
calibbox.flux,
spec_errs=calibbox.error,
fill=0.,
verbose=False)
if model_param is not None:
flux_new = model_param['scaling']*flux_new
error_new = model_param['scaling']*error_new
return box.create_box(boxtype='spectrum',
spectrum='calibration',
wavelength=wavel_points,
flux=flux_new,
error=error_new,
name=self.tag,
simbad=None,
sptype=None,
distance=None)
@typechecked
def get_spectrum(self,
model_param: Optional[Dict[str, float]] = None,
apply_mask: bool = False,
spec_res: Optional[float] = None,
extrapolate: bool = False,
min_wavelength: Optional[float] = None) -> box.SpectrumBox:
"""
Function for selecting the calibration spectrum.
Parameters
----------
model_param : dict, None
Model parameters. Should contain the 'scaling' value. Not used if set to ``None``.
apply_mask : bool
Exclude negative values and NaN values.
spec_res : float, None
Spectral resolution. Original wavelength points are used if set to ``None``.
extrapolate : bool
Extrapolate to 6 um by fitting a power law function.
min_wavelength : float, None
Minimum wavelength used for fitting the power law function. All data is used if set
to ``None``.
Returns
-------
species.core.box.SpectrumBox
Box with the spectrum.
"""
with h5py.File(self.database, 'r') as h5_file:
data = np.asarray(h5_file[f'spectra/calibration/{self.tag}'])
wavelength = np.asarray(data[0, ])
flux = np.asarray(data[1, ])
error = np.asarray(data[2, ])
if apply_mask:
indices = np.where(flux > 0.)[0]
wavelength = wavelength[indices]
flux = flux[indices]
error = error[indices]
if model_param is not None:
flux = model_param['scaling']*flux
error = model_param['scaling']*error
if self.wavel_range is None:
wl_index = np.ones(wavelength.size, dtype=bool)
else:
wl_index = (flux > 0.) & (wavelength > self.wavel_range[0]) & \
(wavelength < self.wavel_range[1])
count = np.count_nonzero(wl_index)
if count > 0:
index = np.where(wl_index)[0]
if index[0] > 0:
wl_index[index[0] - 1] = True
if index[-1] < len(wl_index)-1:
wl_index[index[-1] + 1] = True
wavelength = wavelength[wl_index]
flux = flux[wl_index]
error = error[wl_index]
if extrapolate:
def _power_law(wavelength, offset, scaling, power_index):
return offset + scaling*wavelength**power_index
if min_wavelength:
indices = np.where(wavelength > min_wavelength)[0]
else:
indices = np.arange(0, wavelength.size, 1)
popt, pcov = curve_fit(f=_power_law,
xdata=wavelength[indices],
ydata=flux[indices],
p0=(0., np.mean(flux[indices]), -1.),
sigma=error[indices])
sigma = np.sqrt(np.diag(pcov))
print('Fit result for f(x) = a + b*x^c:')
print(f'a = {popt[0]} +/- {sigma[0]}')
print(f'b = {popt[1]} +/- {sigma[1]}')
print(f'c = {popt[2]} +/- {sigma[2]}')
while wavelength[-1] <= 6.:
wl_add = wavelength[-1] + wavelength[-1]/1000.
wavelength = np.append(wavelength, wl_add)
flux = np.append(flux, _power_law(wl_add, popt[0], popt[1], popt[2]))
error = np.append(error, 0.)
if spec_res is not None:
wavelength_new = read_util.create_wavelengths((wavelength[0], wavelength[-1]),
spec_res)
flux_new, error_new = spectres.spectres(wavelength_new,
wavelength,
flux,
spec_errs=error,
fill=0.,
verbose=True)
wavelength = wavelength_new
flux = flux_new
error = error_new
return box.create_box(boxtype='spectrum',
spectrum='calibration',
wavelength=wavelength,
flux=flux,
error=error,
name=self.tag,
simbad=None,
sptype=None,
distance=None)
@typechecked
def get_flux(self,
model_param: Optional[Dict[str, float]] = None) -> Tuple[float, float]:
"""
Function for calculating the average flux for the ``filter_name``.
Parameters
----------
model_param : dict, None
Model parameters. Should contain the 'scaling' value. Not used if set to ``None``.
Returns
-------
tuple(float, float)
Average flux and uncertainty (W m-2 um-1).
"""
specbox = self.get_spectrum(model_param=model_param)
synphot = photometry.SyntheticPhotometry(self.filter_name)
return synphot.spectrum_to_flux(specbox.wavelength, specbox.flux, error=specbox.flux)
@typechecked
def get_magnitude(self,
model_param: Optional[Dict[str, float]] = None,
distance: Optional[Tuple[float, float]] = None) -> Tuple[
Tuple[float, Optional[float]], Tuple[Optional[float], Optional[float]]]:
"""
Function for calculating the apparent magnitude for the ``filter_name``.
Parameters
----------
model_param : dict, None
Model parameters. Should contain the 'scaling' value. Not used if set to ``None``.
distance : tuple(float, float), None
Distance and uncertainty to the calibration object (pc). Not used if set to ``None``,
in which case the returned absolute magnitude is ``(None, None)``.
Returns
-------
tuple(float, float)
Apparent magnitude and uncertainty.
tuple(float, float), tuple(None, None)
Absolute magnitude and uncertainty.
"""
specbox = self.get_spectrum(model_param=model_param)
if np.count_nonzero(specbox.error) == 0:
error = None
else:
error = specbox.error
synphot = photometry.SyntheticPhotometry(self.filter_name)
return synphot.spectrum_to_magnitude(specbox.wavelength,
specbox.flux,
error=error,
distance=distance)
| StarcoderdataPython |
3274558 | <reponame>GabrielAmare/TextEngine
from typing import Iterator
from item_engine import *
from .mood_lexer import mood_lexer
__all__ = ['gen_networks']
def gen_networks(mood_lexer_cfg: dict) -> Iterator[Network]:
yield Network(function=mood_lexer, **mood_lexer_cfg)
| StarcoderdataPython |
37653 | <gh_stars>0
import datetime
import htmlgenerator
from django.utils.translation import gettext as _
from .button import Button
from .icon import Icon
KIND_ICON_MAPPING = {
"error": "error--filled",
"info": "information--filled",
"info-square": "information--square--filled",
"success": "checkmark--filled",
"warning": "warning--filled",
"warning-alt": "warning--alt--filled",
}
class InlineNotification(htmlgenerator.DIV):
def __init__(
self,
title,
subtitle,
action=None,
kind="info",
lowcontrast=False,
hideclosebutton=False,
**attributes,
):
"""
action: typle with (action_name, javascript_onclick), e.g. ("Open Google", "windows.location='https://google.com'")
kind: can be one of "error" "info", "info-square", "success", "warning", "warning-alt"
"""
assert (
kind in KIND_ICON_MAPPING
), f"kind '{kind}' does not exists, must be one of {KIND_ICON_MAPPING.keys()}"
assert action is None or (
len(action) == 2
), "action must be a tuple with: (action_name, javascript_onclick)"
attributes["data-notification"] = True
attributes["_class"] = (
attributes.get("_class", "")
+ f" bx--inline-notification bx--inline-notification--{kind}"
)
if lowcontrast:
attributes["_class"] += " bx--inline-notification--low-contrast"
attributes["role"] = "alert"
children = [
htmlgenerator.DIV(
Icon(
KIND_ICON_MAPPING[kind],
size=20,
_class="bx--inline-notification__icon",
),
htmlgenerator.DIV(
htmlgenerator.P(title, _class="bx--inline-notification__title"),
htmlgenerator.P(
subtitle, _class="bx--inline-notification__subtitle"
),
_class="bx--inline-notification__text-wrapper",
),
_class="bx--inline-notification__details",
),
]
if action is not None:
children.append(
Button(
action[0],
onclick=action[1],
type="ghost",
small=True,
_class="bx--inline-notification__action-button",
)
)
if not hideclosebutton:
children.append(
htmlgenerator.BUTTON(
Icon(
"close", size=20, _class="bx--inline-notification__close-icon"
),
data_notification_btn=True,
_class="bx--inline-notification__close-button",
aria_label="close",
)
)
super().__init__(*children, **attributes)
class ToastNotification(htmlgenerator.DIV):
def __init__(
self,
title,
subtitle,
kind="info",
lowcontrast=False,
hideclosebutton=False,
hidetimestamp=False,
**attributes,
):
"""
kind: can be one of "error" "info", "info-square", "success", "warning", "warning-alt"
"""
assert (
kind in KIND_ICON_MAPPING
), f"kind '{kind}' does not exists, must be one of {KIND_ICON_MAPPING.keys()}"
self.hidetimestamp = hidetimestamp
attributes["data-notification"] = True
attributes["_class"] = (
attributes.get("_class", "")
+ f" bx--toast-notification bx--toast-notification--{kind}"
)
if lowcontrast:
attributes["_class"] += " bx--toast-notification--low-contrast"
attributes["role"] = "alert"
timestampelem = (
[
htmlgenerator.P(
_("Time stamp "), _class="bx--toast-notification__caption"
)
]
if not hidetimestamp
else []
)
children = [
Icon(
KIND_ICON_MAPPING[kind],
size=20,
_class="bx--toast-notification__icon",
),
htmlgenerator.DIV(
htmlgenerator.H3(title, _class="bx--toast-notification__title"),
htmlgenerator.P(subtitle, _class="bx--toast-notification__subtitle"),
*timestampelem,
_class="bx--toast-notification__details",
),
]
if not hideclosebutton:
children.append(
htmlgenerator.BUTTON(
Icon("close", size=20, _class="bx--toast-notification__close-icon"),
data_notification_btn=True,
_class="bx--toast-notification__close-button",
aria_label="close",
)
)
super().__init__(*children, **attributes)
def render(self, context):
if not self.hidetimestamp:
self[1][2].append(
"[" + datetime.datetime.now().time().isoformat()[:8] + "]"
)
return super().render(context)
| StarcoderdataPython |
3391674 | <gh_stars>100-1000
# -*- coding: utf-8 -*-
"""
Basic ProcfileLexer Test
~~~~~~~~~~~~~~~~~~~~
:copyright: Copyright 2006-2020 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import pytest
from pygments.token import Name, Punctuation, Text
from pygments.lexers.procfile import ProcfileLexer
@pytest.fixture(scope='module')
def lexer():
yield ProcfileLexer()
def test_basic_line(lexer):
text = 'task: executable --options'
tokens = lexer.get_tokens(text)
for index, token in enumerate(tokens):
if index == 0:
assert token == (Name.Label, 'task')
elif index == 1:
assert token == (Punctuation, ':')
else:
assert token[0] in (Text, Text.Whitespace)
def test_environment_variable(lexer):
text = '$XDG_SESSION_PATH'
token = list(lexer.get_tokens(text))[0]
assert token == (Name.Variable, text)
| StarcoderdataPython |
3220897 | # egrep.py
# Here is a script that reads in lines of text and spits back out the ones that match a regular expression:
import sys, re
# sys.argv is the list of command-line arguments
# sys.argv[0] is the name of the program itself
# sys.argv[1] will be the regex specified at the command line
regex = sys.argv[1]
# for every line passed into the script
for line in sys.stdin:
# if it matches the regex, write it to stdout
if re.search(regex, line):
sys.stdout.write(line) | StarcoderdataPython |
1694773 | <gh_stars>1-10
import os
import numpy as np
import pickle
import datetime
import matplotlib.pyplot as plt
from scipy.interpolate import interp1d
from functools import partial
from PyQt5.QtGui import *
from PyQt5.QtWidgets import *
from PyQt5.QtCore import *
import pyqtgraph as pg
from supra.GUI.Tools.Theme import theme
from supra.GUI.Tools.GUITools import *
from supra.Utils.Classes import *
from supra.GUI.Tools.CustomWidgets import MatplotlibPyQT
from supra.Utils.Formatting import *
class glmWindowDialog(QWidget):
def __init__(self, bam):
QWidget.__init__(self)
self.bam = bam
self.buildGUI()
def buildGUI(self):
self.setWindowTitle('Ray-Trace Viewer')
app_icon = QtGui.QIcon()
app_icon.addFile(os.path.join('supra', 'GUI', 'Images', 'BAM_no_wave.png'), QtCore.QSize(16, 16))
self.setWindowIcon(app_icon)
p = self.palette()
p.setColor(self.backgroundRole(), Qt.black)
self.setPalette(p)
theme(self)
layout = QGridLayout()
self.setLayout(layout)
self.glm_graph = MatplotlibPyQT()
self.glm_graph.ax = self.glm_graph.figure.add_subplot(111)
layout.addWidget(self.glm_graph, 1, 1, 15, 1)
# self.hvt_graph = MatplotlibPyQT()
# self.hvt_graph.ax = self.hvt_graph.figure.add_subplot(111)
# layout.addWidget(self.hvt_graph, 16, 1, 15, 1)
# stn_name_list = []
# for stn in self.bam.stn_list:
# stn_name_list.append("{:}-{:}".format(stn.metadata.network, stn.metadata.code))
# _, self.source_height = createLabelEditObj('Source Height Along Trajectory [m]', layout, 1, width=1, h_shift=1, tool_tip='', validate='float')
# _, self.station_combo = createComboBoxObj('Station', layout, 2, items=stn_name_list, width=1, h_shift=1, tool_tip='')
# self.trajmode = createToggle("Plot Trajectory?", layout, 3, width=1, h_shift=2, tool_tip='')
# self.netmode = createToggle("Run Ray Net?", layout, 9, width=1, h_shift=2, tool_tip='')
# self.run_trace_button = createButton("Run", layout, 4, 3, self.runRayTrace)
# self.clear_trace_button = createButton("Clear", layout, 5, 3, self.clearRayTrace)
# # _, self.ray_frac = createLabelEditObj('Fraction of Rays to Show', layout, 5, width=1, h_shift=1, tool_tip='', validate='int', default_txt='50')
# _, self.horizontal_tol = createLabelEditObj('Horizontal Tolerance', layout, 6, width=1, h_shift=1, tool_tip='', validate='float', default_txt='330')
# _, self.vertical_tol = createLabelEditObj('Vertical Tolerance', layout, 7, width=1, h_shift=1, tool_tip='', validate='float', default_txt='3000')
# self.pertstog = createToggle("Use Pertubations", layout, 8, width=1, h_shift=2, tool_tip='')
# _, self.source_lat = createLabelEditObj('Source Latitude', layout, 10, width=1, h_shift=1, tool_tip='', validate='float')
# _, self.source_lon = createLabelEditObj('Source Longitude', layout, 11, width=1, h_shift=1, tool_tip='', validate='float')
# self.save_ray = createButton("Export Ray", layout, 12, 3, self.exportRay)
self.load_glm_label, self.load_glm_edits, self.load_glm_buton = createFileSearchObj('Load GLM: ', layout, 13, width=1, h_shift=1)
self.load_glm_buton.clicked.connect(partial(fileSearch, ['CSV (*.csv)'], self.load_glm_edits))
self.load_glm_buton.clicked.connect(self.procGLM)
self.for_met_sim = createButton("Save For MetSim", layout, 13, 2, self.saveMetSim)
# self.draw_stat = createButton("Draw Station", layout, 14, 3, self.drawStat)
# self.draw_src = createButton("Draw Source", layout, 15, 3, self.drawSrc)
# self.draw_traj = createButton("Draw Trajectory", layout, 16, 3, self.drawTraj)
# _, self.draw_beam = createLabelEditObj('Beam Azimuth', layout, 17, width=1, h_shift=1, tool_tip='', validate='float')
# self.draw_beam_button = createButton("Draw", layout, 17, 4, self.drawBeam)
# self.hvt_graph.ax.set_xlabel("Time after Source [s]")
# self.hvt_graph.ax.set_ylabel("Height [km]")
traj = self.bam.setup.trajectory
# define line bottom boundary
max_height = traj.pos_i.elev
min_height = traj.pos_f.elev
points = traj.trajInterp2(div=50, min_p=min_height, max_p=max_height)
for pp, p in enumerate(points):
if pp == 0:
self.glm_graph.ax.scatter(p[1], p[0], c="g", label="Source Trajectory")
else:
self.glm_graph.ax.scatter(p[1], p[0], c="g")
self.glm_graph.ax.legend()
def saveMetSim(self):
""" Save GLM station as a fake observer camera at the end of the trajectory given
at the center of the Earth
"""
file_name = saveFile("csv", note="")
time, lon, lat, energy, E, T, lc_list, h_list = self.readGLM()
station_location = latLonAlt2ECEF(np.radians(lat[0]), np.radians(lon[0]), h_list[0])
with open(file_name, 'w+') as f:
for ii in range(len(time))[1:]:
point = latLonAlt2ECEF(np.radians(lat[ii]), np.radians(lon[ii]), h_list[ii])
dx = point[0] - station_location[0]
dy = point[1] - station_location[1]
dz = point[2] - station_location[2]
dh = np.sqrt(dx**2 + dy**2)
az = np.arctan2(dx, dy)
ze = np.arctan2(dz, dh)
f.write("{:}, {:}, {:}\n".format(T[ii], np.degrees(az), np.degrees(ze)))
print(printMessage("status"), "Output Complete!")
print(printMessage("info"), "Output as Time [s], Azimuth (North +East), Zenith - use MeasType = 2 in MetSim")
print(printMessage("info"), "Station Coordinates: {:.4f}N {:.4f}E {:.4f} m".format(lat[0], lon[0], h_list[0]))
print(printMessage("info"), "Reference Time: {:}".format(self.bam.setup.fireball_datetime))
def energyConverter(self, energy):
# See Jenniskens et al 2018
# Source to GLM satellite distance
R = 35780000 #m
#R = 42170000
# 4 pi r^2 : r - radius of the effective apperature
r = 0.0095
geo_f = 4*np.pi*R**2/r
blackbody_f = 1.018e3
E = np.array(energy)*geo_f*blackbody_f
return E
def timeConverter(self, time):
# time is seconds since 1970, online docs are wrong!
timestamp = datetime.datetime(year=1970, month=1, day=1, hour=0, minute=0, second=0, microsecond=0)
time_list = []
for t in time:
time_list.append((timestamp + datetime.timedelta(seconds=t/1e3) - self.bam.setup.fireball_datetime).total_seconds())
return time_list
def readGLM(self):
""" Returns time, lon, lat, energy given in GLM file
and converted energy (E), time from reference point (T) and
magnitude given by Borovicka definition I = 1500*10^(M/-2.5)
h_list, approximate height
"""
traj = self.bam.setup.trajectory
time = []
lon = []
lat = []
energy = []
print(printMessage("status"), "Loaded: {:}".format(self.load_glm_edits.text()))
with open(self.load_glm_edits.text(), 'r+') as f:
for line in f:
a = line.strip().split(',')
try:
float(a[0])
time.append(float(a[0]))
lon.append(float(a[1]))
lat.append(float(a[2]))
energy.append(float(a[3]))
except:
continue
E = self.energyConverter(energy)
T = self.timeConverter(time)
h_list = []
for t in T:
hhh = traj.approxHeight(t)
h_list.append(hhh)
lc_list = []
for ii in range(len(E) - 1):
mag = -2.5*np.log(E[ii]/1500)
lc_list.append(mag)
return time, lon, lat, energy, E, T, lc_list, h_list
def procGLM(self):
time, lon, lat, energy, E, T, lc_list = self.readGLM()
file_name = saveFile("csv", note="")
with open(file_name, 'w+') as f:
f.write("# Station: GLM\n")
for ll in range(len(lc_list)):
f.write("{:}, {:}, {:}\n".format(T, h_list/1000, lc_list))
self.glm_graph.ax.scatter(lon, lat, label="GLM")
self.glm_graph.ax.legend() | StarcoderdataPython |
3361218 | <reponame>quentinLeDilavrec/semantic
load(
"@bazel_tools//tools/build_defs/repo:http.bzl",
"http_archive",
)
_all_example_repos = {
"numpy": {
"data": [
"**/*.py",
],
"commit": "0<PASSWORD>",
"repo": "numpy/numpy",
"sha256": "8e60c567cbab3309afa9508ee61dfd207089ebb0056214fe60e863d81e098824",
},
"python": {
"data": [
"**/*.py",
],
"commit": "<PASSWORD>",
"repo": "thealgorithms/python",
"prefix": "Python",
"sha256": "bef087151bea1e479701d0ceed831809c1b916f513752dee914e9c7876b46ea9",
"excludes": [
"**/data structures/*",
"**/binary tree/*",
"**/graphs/*",
"**/Random Forest*/*",
"**/* */*",
],
},
"flask": {
"data": [
"**/*.py",
],
"commit": "<PASSWORD>",
"repo": "pallets/flask",
"sha256": "224d406f11b13cc8e4c7defd8dc94e0df957c1c90977172cfaa2ee88d8f85e77",
},
"httpie": {
"data": [
"**/*.py",
],
"commit": "358342d1c915d6462a080a77aefbb20166d0bd5d",
"repo": "jakubroztocil/httpie",
"sha256": "2b3172369954d883a2a609dc6bc34a944ce9817afb14733d87f208a40529899c",
},
"keras": {
"data": [
"**/*.py",
],
"commit": "e59570ae26670f788d6c649191031e4a8824f955",
"repo": "keras-team/keras",
"sha256": "2bda5bfd2a2b43d9f4d191e4ed980740429bb86d75e16355b1d33faf9d974ffd",
},
"requests": {
"data": [
"**/*.py",
],
"commit": "64bde6582d9b49e9345d9b8df16aaa26dc372d13",
"sha256": "8f9466ad314b2741c826b164b46bcedb260d424f717fd9553fea5164f493bd20",
"repo": "requests/requests",
},
"scikit-learn": {
"data": [
"**/*.py",
],
"commit": "d0f63a760d9993a7f68cfc5e1a075700d67c53d3",
"repo": "scikit-learn/scikit-learn",
"sha256": "4f337b87d45cabd7db9cd3883fd5168accad7f78bc48df3ae633832b4d0f30d0",
},
"scrapy": {
"data": [
"**/*.py",
],
"commit": "65d631329a1434ec013f24341e4b8520241aec70",
"sha256": "27b2dc9b1a55c356eeec651c76fe82be082c0e8980b2e4d9b99a4f63c733685b",
"repo": "scrapy/scrapy",
},
"pytorch": {
"data": [
"**/*.py",
],
"commit": "c865d46736db4afff51690a712e35ed8e3899490",
"repo": "pytorch/pytorch",
"sha256": "7b54b7a3c40aaf68bb9bd7dcc509389d29c5c37241f29c003bd04cd0dafb60ce",
},
"certbot": {
"data": [
"**/*.py",
],
"commit": "bb8222200a8cbd39a3ce9584ce6dfed6c5d05228",
"sha256": "3477f4c04897f7874249e6362567384246f409c62e1ff18c4d6fa54813f484c2",
"repo": "certbot/certbot",
},
"spec": {
"data": [
"**/*.rb",
],
"commit": "c3e6b9017926f44a76e2b966c4dd35fa84c4cd3b",
"repo": "ruby/spec",
"sha256": "33206954ff6fdbf5b872298efc2697c18ad5371eb55007d54f95c08ec7f46bb4",
},
"desktop": {
"data": [
"**/*.[tj]s",
],
"commit": "d1324f56d02dd9afca5d2e9da545905a7d41d671",
"repo": "desktop/desktop",
"sha256": "cfd1c6d313ff4e756b59da83f3f7799e021e0d0fe94ee4a93638c9b1aa19b5ca",
},
}
SEMANTIC_EXTERNAL_TEST_REPOSITORIES = ["@" + k + "//:src" for (k, v) in _all_example_repos.items()]
def _example_repo(name, data, repo, commit, since = "", excludes = [], sha256 = "", prefix = ""):
if prefix == "":
prefix = name
http_archive(
name = name,
build_file_content = """
filegroup(
name = "src",
data = glob(include = {}, exclude={}),
visibility = ["//visibility:public"]
)
""".format(data, excludes),
strip_prefix = prefix + "-" + commit,
sha256 = sha256,
urls = ["https://github.com/{repo}/archive/{commit}.tar.gz".format(repo = repo, commit = commit)],
)
def declare_example_repos():
for k, kwargs in _all_example_repos.items():
_example_repo(name = k, **kwargs)
| StarcoderdataPython |
1772096 | <gh_stars>0
# -*- coding: utf-8 -*-
import sys, os, requests
try:
sourceFileFullName = os.path.abspath(sys.argv[1])
sourceFileName = sourceFileFullName.replace("/","\\").split("\\")[-1]
except:
print("Missing input file")
sys.exit()
try:
sourceFileType = sys.argv[2].lower()
if sourceFileType != 'js' or sourceFileType != 'css':
print("Invalid source file type! The only valid ones are js and css.")
sys.exit()
except:
if sourceFileName.find(".js") != -1:
sourceFileType = 'js'
elif sourceFileName.find(".css") != -1:
sourceFileType = 'css'
else:
print("Couldn't determine source file type. PLEASE SPECIFY IT!")
sys.exit()
# Grab the file contents
with open(sourceFileFullName, 'r') as sourceFile:
source = sourceFile.read()
# Pack it, ship it
urlJS = 'https://javascript-minifier.com/raw'
urlCSS = "https://cssminifier.com/raw"
payload = {'input': source}
if sourceFileType == 'js':
print(f"Requesting JavaScript minification of {sourceFile.name} ...")
request = requests.post(urlJS, payload)
else: # sourceFileType == 'css'
print(f"Requesting CSS minification of {sourceFile.name} ...")
request = requests.post(urlCSS, payload)
# Write out minified version
dotIndexInFileName = sourceFileName.rfind('.')
if dotIndexInFileName != -1:
sourceFileNameLength = len(sourceFileName)
sourceFileNamePrefixLength = len(sourceFileFullName) - sourceFileNameLength
dotIndexinFullFileName = sourceFileNamePrefixLength + dotIndexInFileName
minifiedFileFullName = sourceFileFullName[:dotIndexinFullFileName] + '.min' + sourceFileFullName[dotIndexinFullFileName:]
else:
minifiedFileFullName = sourceFileFullName + '.min'
with open(minifiedFileFullName, 'w') as minifiedFile:
minifiedFile.write(request.text)
print(f"Minification complete. See {minifiedFile.name}")
| StarcoderdataPython |
3254475 | <reponame>ahcode0919/python-ds-algorithms
def pangram(string: str) -> bool:
alpha_set = set()
for char in string:
if char.isalpha():
alpha_set.add(char.lower())
return len(alpha_set) == 26
| StarcoderdataPython |
3316642 | '''Implementation of the Gamma distribution.'''
import torch
from .baseprior import ExpFamilyPrior
class GammaPrior(ExpFamilyPrior):
'''Gamma distribution.
parameters:
a: shape
b: rate
natural parameters:
eta1 = -b
eta2 = a - 1
sufficient statistics:
T_1(x) = x
T_2(x) = ln x
'''
__repr_str = '{classname}(shape={shape}, rate={rate})'
def __init__(self, shape, rate):
nparams = self.to_natural_parameters(shape, rate)
super().__init__(nparams)
def __repr__(self):
shape, rate = self.to_std_parameters(self.natural_parameters)
return self.__repr_str.format(
classname=self.__class__.__name__,
shape=repr(shape), rate=repr(rate)
)
def expected_value(self):
shape, rate = self.to_std_parameters(self.natural_parameters)
return shape / rate
def to_natural_parameters(self, shape, rate):
return torch.cat([-rate.view(1), (shape - 1).view(1)])
def _to_std_parameters(self, natural_parameters):
shape, rate = natural_parameters[1] + 1, -natural_parameters[0]
return shape, rate
def _expected_sufficient_statistics(self):
shape, rate = self.to_std_parameters(self.natural_parameters)
return torch.cat([(shape / rate).view(1),
(torch.digamma(shape) - torch.log(rate)).view(1)])
def _log_norm(self, natural_parameters=None):
if natural_parameters is None:
natural_parameters = self.natural_parameters
shape, rate = self.to_std_parameters(natural_parameters)
return torch.lgamma(shape) - shape * torch.log(rate)
__all__ = ['GammaPrior']
| StarcoderdataPython |
1759503 | import numpy as np
from scipy.optimize import minimize
from optimize_utils import *
def _minimize_rhc(*args, **kwargs):
# randomized hill-climbing
options = kwargs["options"]
method = options.pop("method")
kwargs["method"] = method
remaining = options["maxiter"]
best_result = None
while remaining > 0:
options["maxiter"] = remaining
result = minimize(*args, **kwargs)
if best_result is None or best_result.fun > result.fun:
best_result = result
remaining -= result.nfev
return best_result
def _minimize_twiddle(fun, x0, args=(), callback=None, tol=0.2, maxiter=1000):
nfev = 0
def score(x):
# nfev += 1 # TODO uncomment when nonlocal keyword can be used
res = fun(x, *args)
if callback is not None:
callback(x)
return res
p = x0.copy()
num_params = len(p)
dp = np.ones(num_params)
best_err = score(p)
nfev += 1
stop = False
n_iter = 0
while dp.sum() > tol and not stop:
n_iter += 1
stop = True
for i in range(num_params):
p[i] += dp[i]
err = score(p)
nfev += 1
if err < best_err:
best_err = err
dp[i] *= 1.1
else:
p[i] -= 2 * dp[i]
err = score(p)
nfev += 1
if err < best_err:
best_err = err
dp[i] *= 1.1
else:
p[i] += dp[i]
dp[i] *= 0.9
# subtraction 1 since each loop is 2 iterations
if nfev >= maxiter - 1:
break
else:
stop = False
return to_result(x=p, fun=best_err, niter=n_iter, nfev=nfev)
| StarcoderdataPython |
3218612 | <gh_stars>1-10
# -*- coding: utf-8 -*-
"""
flask.sessions
~~~~~~~~~~~~~~
Implements cookie based sessions based on itsdangerous.
:copyright: (c) 2015 by <NAME>.
:license: BSD, see LICENSE for more details.
"""
import uuid
import hashlib
from base64 import b64encode, b64decode
from datetime import datetime
from werkzeug.http import http_date, parse_date
from werkzeug.datastructures import CallbackDict
from . import Markup, json
from ._compat import iteritems, text_type
from .helpers import total_seconds
from itsdangerous import URLSafeTimedSerializer, BadSignature
class SessionMixin(object):
"""Expands a basic dictionary with an accessors that are expected
by Flask extensions and users for the session.
"""
def _get_permanent(self):
return self.get('_permanent', False)
def _set_permanent(self, value):
self['_permanent'] = bool(value)
#: this reflects the ``'_permanent'`` key in the dict.
permanent = property(_get_permanent, _set_permanent)
del _get_permanent, _set_permanent
#: some session backends can tell you if a session is new, but that is
#: not necessarily guaranteed. Use with caution. The default mixin
#: implementation just hardcodes ``False`` in.
new = False
#: for some backends this will always be ``True``, but some backends will
#: default this to false and detect changes in the dictionary for as
#: long as changes do not happen on mutable structures in the session.
#: The default mixin implementation just hardcodes ``True`` in.
modified = True
def _tag(value):
if isinstance(value, tuple):
return {' t': [_tag(x) for x in value]}
elif isinstance(value, uuid.UUID):
return {' u': value.hex}
elif isinstance(value, bytes):
return {' b': b64encode(value).decode('ascii')}
elif callable(getattr(value, '__html__', None)):
return {' m': text_type(value.__html__())}
elif isinstance(value, list):
return [_tag(x) for x in value]
elif isinstance(value, datetime):
return {' d': http_date(value)}
elif isinstance(value, dict):
return dict((k, _tag(v)) for k, v in iteritems(value))
elif isinstance(value, str):
try:
return text_type(value)
except UnicodeError:
from flask.debughelpers import UnexpectedUnicodeError
raise UnexpectedUnicodeError(u'A byte string with '
u'non-ASCII data was passed to the session system '
u'which can only store unicode strings. Consider '
u'base64 encoding your string (String was %r)' % value)
return value
class TaggedJSONSerializer(object):
"""A customized JSON serializer that supports a few extra types that
we take for granted when serializing (tuples, markup objects, datetime).
"""
def dumps(self, value):
return json.dumps(_tag(value), separators=(',', ':'))
def loads(self, value):
def object_hook(obj):
if len(obj) != 1:
return obj
the_key, the_value = next(iteritems(obj))
if the_key == ' t':
return tuple(the_value)
elif the_key == ' u':
return uuid.UUID(the_value)
elif the_key == ' b':
return b64decode(the_value)
elif the_key == ' m':
return Markup(the_value)
elif the_key == ' d':
return parse_date(the_value)
return obj
return json.loads(value, object_hook=object_hook)
session_json_serializer = TaggedJSONSerializer()
class SecureCookieSession(CallbackDict, SessionMixin):
"""Base class for sessions based on signed cookies."""
def __init__(self, initial=None):
def on_update(self):
self.modified = True
CallbackDict.__init__(self, initial, on_update)
self.modified = False
class NullSession(SecureCookieSession):
"""Class used to generate nicer error messages if sessions are not
available. Will still allow read-only access to the empty session
but fail on setting.
"""
def _fail(self, *args, **kwargs):
raise RuntimeError('The session is unavailable because no secret '
'key was set. Set the secret_key on the '
'application to something unique and secret.')
__setitem__ = __delitem__ = clear = pop = popitem = \
update = setdefault = _fail
del _fail
class SessionInterface(object):
"""The basic interface you have to implement in order to replace the
default session interface which uses werkzeug's securecookie
implementation. The only methods you have to implement are
:meth:`open_session` and :meth:`save_session`, the others have
useful defaults which you don't need to change.
The session object returned by the :meth:`open_session` method has to
provide a dictionary like interface plus the properties and methods
from the :class:`SessionMixin`. We recommend just subclassing a dict
and adding that mixin::
class Session(dict, SessionMixin):
pass
If :meth:`open_session` returns ``None`` Flask will call into
:meth:`make_null_session` to create a session that acts as replacement
if the session support cannot work because some requirement is not
fulfilled. The default :class:`NullSession` class that is created
will complain that the secret key was not set.
To replace the session interface on an application all you have to do
is to assign :attr:`flask.Flask.session_interface`::
app = Flask(__name__)
app.session_interface = MySessionInterface()
.. versionadded:: 0.8
"""
#: :meth:`make_null_session` will look here for the class that should
#: be created when a null session is requested. Likewise the
#: :meth:`is_null_session` method will perform a typecheck against
#: this type.
null_session_class = NullSession
#: A flag that indicates if the session interface is pickle based.
#: This can be used by flask extensions to make a decision in regards
#: to how to deal with the session object.
#:
#: .. versionadded:: 0.10
pickle_based = False
def make_null_session(self, app):
"""Creates a null session which acts as a replacement object if the
real session support could not be loaded due to a configuration
error. This mainly aids the user experience because the job of the
null session is to still support lookup without complaining but
modifications are answered with a helpful error message of what
failed.
This creates an instance of :attr:`null_session_class` by default.
"""
return self.null_session_class()
def is_null_session(self, obj):
"""Checks if a given object is a null session. Null sessions are
not asked to be saved.
This checks if the object is an instance of :attr:`null_session_class`
by default.
"""
return isinstance(obj, self.null_session_class)
def get_cookie_domain(self, app):
"""Helpful helper method that returns the cookie domain that should
be used for the session cookie if session cookies are used.
"""
if app.config['SESSION_COOKIE_DOMAIN'] is not None:
return app.config['SESSION_COOKIE_DOMAIN']
if app.config['SERVER_NAME'] is not None:
# chop off the port which is usually not supported by browsers
rv = '.' + app.config['SERVER_NAME'].rsplit(':', 1)[0]
# Google chrome does not like cookies set to .localhost, so
# we just go with no domain then. Flask documents anyways that
# cross domain cookies need a fully qualified domain name
if rv == '.localhost':
rv = None
# If we infer the cookie domain from the server name we need
# to check if we are in a subpath. In that case we can't
# set a cross domain cookie.
if rv is not None:
path = self.get_cookie_path(app)
if path != '/':
rv = rv.lstrip('.')
return rv
def get_cookie_path(self, app):
"""Returns the path for which the cookie should be valid. The
default implementation uses the value from the ``SESSION_COOKIE_PATH``
config var if it's set, and falls back to ``APPLICATION_ROOT`` or
uses ``/`` if it's ``None``.
"""
return app.config['SESSION_COOKIE_PATH'] or \
app.config['APPLICATION_ROOT'] or '/'
def get_cookie_httponly(self, app):
"""Returns True if the session cookie should be httponly. This
currently just returns the value of the ``SESSION_COOKIE_HTTPONLY``
config var.
"""
return app.config['SESSION_COOKIE_HTTPONLY']
def get_cookie_secure(self, app):
"""Returns True if the cookie should be secure. This currently
just returns the value of the ``SESSION_COOKIE_SECURE`` setting.
"""
return app.config['SESSION_COOKIE_SECURE']
def get_expiration_time(self, app, session):
"""A helper method that returns an expiration date for the session
or ``None`` if the session is linked to the browser session. The
default implementation returns now + the permanent session
lifetime configured on the application.
"""
if session.permanent:
return datetime.utcnow() + app.permanent_session_lifetime
def should_set_cookie(self, app, session):
"""Indicates whether a cookie should be set now or not. This is
used by session backends to figure out if they should emit a
set-cookie header or not. The default behavior is controlled by
the ``SESSION_REFRESH_EACH_REQUEST`` config variable. If
it's set to ``False`` then a cookie is only set if the session is
modified, if set to ``True`` it's always set if the session is
permanent.
This check is usually skipped if sessions get deleted.
.. versionadded:: 0.11
"""
if session.modified:
return True
save_each = app.config['SESSION_REFRESH_EACH_REQUEST']
return save_each and session.permanent
def open_session(self, app, request):
"""This method has to be implemented and must either return ``None``
in case the loading failed because of a configuration error or an
instance of a session object which implements a dictionary like
interface + the methods and attributes on :class:`SessionMixin`.
"""
raise NotImplementedError()
def save_session(self, app, session, response):
"""This is called for actual sessions returned by :meth:`open_session`
at the end of the request. This is still called during a request
context so if you absolutely need access to the request you can do
that.
"""
raise NotImplementedError()
class SecureCookieSessionInterface(SessionInterface):
"""The default session interface that stores sessions in signed cookies
through the :mod:`itsdangerous` module.
"""
#: the salt that should be applied on top of the secret key for the
#: signing of cookie based sessions.
salt = 'cookie-session'
#: the hash function to use for the signature. The default is sha1
digest_method = staticmethod(hashlib.sha1)
#: the name of the itsdangerous supported key derivation. The default
#: is hmac.
key_derivation = 'hmac'
#: A python serializer for the payload. The default is a compact
#: JSON derived serializer with support for some extra Python types
#: such as datetime objects or tuples.
serializer = session_json_serializer
session_class = SecureCookieSession
def get_signing_serializer(self, app):
if not app.secret_key:
return None
signer_kwargs = dict(
key_derivation=self.key_derivation,
digest_method=self.digest_method
)
return URLSafeTimedSerializer(app.secret_key, salt=self.salt,
serializer=self.serializer,
signer_kwargs=signer_kwargs)
def open_session(self, app, request):
s = self.get_signing_serializer(app)
if s is None:
return None
val = request.cookies.get(app.session_cookie_name)
if not val:
return self.session_class()
max_age = total_seconds(app.permanent_session_lifetime)
try:
data = s.loads(val, max_age=max_age)
return self.session_class(data)
except BadSignature:
return self.session_class()
def save_session(self, app, session, response):
domain = self.get_cookie_domain(app)
path = self.get_cookie_path(app)
# Delete case. If there is no session we bail early.
# If the session was modified to be empty we remove the
# whole cookie.
if not session:
if session.modified:
response.delete_cookie(app.session_cookie_name,
domain=domain, path=path)
return
# Modification case. There are upsides and downsides to
# emitting a set-cookie header each request. The behavior
# is controlled by the :meth:`should_set_cookie` method
# which performs a quick check to figure out if the cookie
# should be set or not. This is controlled by the
# SESSION_REFRESH_EACH_REQUEST config flag as well as
# the permanent flag on the session itself.
if not self.should_set_cookie(app, session):
return
httponly = self.get_cookie_httponly(app)
secure = self.get_cookie_secure(app)
expires = self.get_expiration_time(app, session)
val = self.get_signing_serializer(app).dumps(dict(session))
response.set_cookie(app.session_cookie_name, val,
expires=expires, httponly=httponly,
domain=domain, path=path, secure=secure)
| StarcoderdataPython |
1718866 | <gh_stars>1-10
from collections import namedtuple
# Names of each of the buffers holding the runtime simulation state, which are passed to OpenCL kernels.
# For more information on what these buffers represent see doc/model_design.md
Buffers = namedtuple(
"Buffers",
[
"place_activities",
"place_coords",
"place_hazards",
"place_counts",
"people_ages",
"people_obesity",
"people_cvd",
"people_diabetes",
"people_blood_pressure",
"people_statuses",
"people_transition_times",
"people_place_ids",
"people_baseline_flows",
"people_flows",
"people_hazards",
"people_prngs",
"params",
],
)
| StarcoderdataPython |
55431 | from abc import ABC, ABCMeta, abstractmethod
class IDatastore(ABC):
@abstractmethod
def put(self, key: str, value: str):
'''
Implement this function to insert data into database
'''
@abstractmethod
def get(self, key: str):
'''
Implement this function to retrieve data from database
'''
@abstractmethod
def connect(self):
'''
Implement this function to connect and interact
with the database
'''
| StarcoderdataPython |
13183 | """
Generate coulomb matrices for molecules.
See Montavon et al., _New Journal of Physics_ __15__ (2013) 095003.
"""
import numpy as np
from typing import Any, List, Optional
from deepchem.utils.typing import RDKitMol
from deepchem.utils.data_utils import pad_array
from deepchem.feat.base_classes import MolecularFeaturizer
class CoulombMatrix(MolecularFeaturizer):
"""Calculate Coulomb matrices for molecules.
Coulomb matrices provide a representation of the electronic structure of
a molecule. For a molecule with `N` atoms, the Coulomb matrix is a
`N X N` matrix where each element gives the strength of the
electrostatic interaction between two atoms. The method is described
in more detail in [1]_.
Examples
--------
>>> import deepchem as dc
>>> featurizers = dc.feat.CoulombMatrix(max_atoms=23)
>>> input_file = 'deepchem/feat/tests/data/water.sdf' # really backed by water.sdf.csv
>>> tasks = ["atomization_energy"]
>>> loader = dc.data.SDFLoader(tasks, featurizer=featurizers)
>>> dataset = loader.create_dataset(input_file)
References
----------
.. [1] Montavon, Grégoire, et al. "Learning invariant representations of
molecules for atomization energy prediction." Advances in neural information
processing systems. 2012.
Note
----
This class requires RDKit to be installed.
"""
def __init__(self,
max_atoms: int,
remove_hydrogens: bool = False,
randomize: bool = False,
upper_tri: bool = False,
n_samples: int = 1,
seed: Optional[int] = None):
"""Initialize this featurizer.
Parameters
----------
max_atoms: int
The maximum number of atoms expected for molecules this featurizer will
process.
remove_hydrogens: bool, optional (default False)
If True, remove hydrogens before processing them.
randomize: bool, optional (default False)
If True, use method `randomize_coulomb_matrices` to randomize Coulomb matrices.
upper_tri: bool, optional (default False)
Generate only upper triangle part of Coulomb matrices.
n_samples: int, optional (default 1)
If `randomize` is set to True, the number of random samples to draw.
seed: int, optional (default None)
Random seed to use.
"""
self.max_atoms = int(max_atoms)
self.remove_hydrogens = remove_hydrogens
self.randomize = randomize
self.upper_tri = upper_tri
self.n_samples = n_samples
if seed is not None:
seed = int(seed)
self.seed = seed
def _featurize(self, datapoint: RDKitMol, **kwargs) -> np.ndarray:
"""
Calculate Coulomb matrices for molecules. If extra randomized
matrices are generated, they are treated as if they are features
for additional conformers.
Since Coulomb matrices are symmetric, only the (flattened) upper
triangular portion is returned.
Parameters
----------
datapoint: rdkit.Chem.rdchem.Mol
RDKit Mol object
Returns
-------
np.ndarray
The coulomb matrices of the given molecule.
The default shape is `(num_confs, max_atoms, max_atoms)`.
If num_confs == 1, the shape is `(max_atoms, max_atoms)`.
"""
if 'mol' in kwargs:
datapoint = kwargs.get("mol")
raise DeprecationWarning(
'Mol is being phased out as a parameter, please pass "datapoint" instead.'
)
features = self.coulomb_matrix(datapoint)
if self.upper_tri:
features = [f[np.triu_indices_from(f)] for f in features]
features = np.asarray(features)
if features.shape[0] == 1:
# `(1, max_atoms, max_atoms)` -> `(max_atoms, max_atoms)`
features = np.squeeze(features, axis=0)
return features
def coulomb_matrix(self, mol: RDKitMol) -> np.ndarray:
"""
Generate Coulomb matrices for each conformer of the given molecule.
Parameters
----------
mol: rdkit.Chem.rdchem.Mol
RDKit Mol object
Returns
-------
np.ndarray
The coulomb matrices of the given molecule
"""
try:
from rdkit import Chem
from rdkit.Chem import AllChem
except ModuleNotFoundError:
raise ImportError("This class requires RDKit to be installed.")
# Check whether num_confs >=1 or not
num_confs = len(mol.GetConformers())
if num_confs == 0:
mol = Chem.AddHs(mol)
AllChem.EmbedMolecule(mol, AllChem.ETKDG())
if self.remove_hydrogens:
mol = Chem.RemoveHs(mol)
n_atoms = mol.GetNumAtoms()
z = [atom.GetAtomicNum() for atom in mol.GetAtoms()]
rval = []
for conf in mol.GetConformers():
d = self.get_interatomic_distances(conf)
m = np.outer(z, z) / d
m[range(n_atoms), range(n_atoms)] = 0.5 * np.array(z)**2.4
if self.randomize:
for random_m in self.randomize_coulomb_matrix(m):
random_m = pad_array(random_m, self.max_atoms)
rval.append(random_m)
else:
m = pad_array(m, self.max_atoms)
rval.append(m)
return np.asarray(rval)
def randomize_coulomb_matrix(self, m: np.ndarray) -> List[np.ndarray]:
"""Randomize a Coulomb matrix as decribed in [1]_:
1. Compute row norms for M in a vector row_norms.
2. Sample a zero-mean unit-variance noise vector e with dimension
equal to row_norms.
3. Permute the rows and columns of M with the permutation that
sorts row_norms + e.
Parameters
----------
m: np.ndarray
Coulomb matrix.
Returns
-------
List[np.ndarray]
List of the random coulomb matrix
References
----------
.. [1] Montavon et al., New Journal of Physics, 15, (2013), 095003
"""
rval = []
row_norms = np.asarray([np.linalg.norm(row) for row in m], dtype=float)
rng = np.random.RandomState(self.seed)
for i in range(self.n_samples):
e = rng.normal(size=row_norms.size)
p = np.argsort(row_norms + e)
new = m[p][:, p] # permute rows first, then columns
rval.append(new)
return rval
@staticmethod
def get_interatomic_distances(conf: Any) -> np.ndarray:
"""
Get interatomic distances for atoms in a molecular conformer.
Parameters
----------
conf: rdkit.Chem.rdchem.Conformer
Molecule conformer.
Returns
-------
np.ndarray
The distances matrix for all atoms in a molecule
"""
n_atoms = conf.GetNumAtoms()
coords = [
# Convert AtomPositions from Angstrom to bohr (atomic units)
conf.GetAtomPosition(i).__idiv__(0.52917721092) for i in range(n_atoms)
]
d = np.zeros((n_atoms, n_atoms), dtype=float)
for i in range(n_atoms):
for j in range(i):
d[i, j] = coords[i].Distance(coords[j])
d[j, i] = d[i, j]
return d
class CoulombMatrixEig(CoulombMatrix):
"""Calculate the eigenvalues of Coulomb matrices for molecules.
This featurizer computes the eigenvalues of the Coulomb matrices for provided
molecules. Coulomb matrices are described in [1]_.
Examples
--------
>>> import deepchem as dc
>>> featurizers = dc.feat.CoulombMatrixEig(max_atoms=23)
>>> input_file = 'deepchem/feat/tests/data/water.sdf' # really backed by water.sdf.csv
>>> tasks = ["atomization_energy"]
>>> loader = dc.data.SDFLoader(tasks, featurizer=featurizers)
>>> dataset = loader.create_dataset(input_file)
References
----------
.. [1] Montavon, Grégoire, et al. "Learning invariant representations of
molecules for atomization energy prediction." Advances in neural information
processing systems. 2012.
"""
def __init__(self,
max_atoms: int,
remove_hydrogens: bool = False,
randomize: bool = False,
n_samples: int = 1,
seed: Optional[int] = None):
"""Initialize this featurizer.
Parameters
----------
max_atoms: int
The maximum number of atoms expected for molecules this featurizer will
process.
remove_hydrogens: bool, optional (default False)
If True, remove hydrogens before processing them.
randomize: bool, optional (default False)
If True, use method `randomize_coulomb_matrices` to randomize Coulomb matrices.
n_samples: int, optional (default 1)
If `randomize` is set to True, the number of random samples to draw.
seed: int, optional (default None)
Random seed to use.
"""
self.max_atoms = int(max_atoms)
self.remove_hydrogens = remove_hydrogens
self.randomize = randomize
self.n_samples = n_samples
if seed is not None:
seed = int(seed)
self.seed = seed
def _featurize(self, datapoint: RDKitMol, **kwargs) -> np.ndarray:
"""
Calculate eigenvalues of Coulomb matrix for molecules. Eigenvalues
are returned sorted by absolute value in descending order and padded
by max_atoms.
Parameters
----------
datapoint: rdkit.Chem.rdchem.Mol
RDKit Mol object
Returns
-------
np.ndarray
The eigenvalues of Coulomb matrix for molecules.
The default shape is `(num_confs, max_atoms)`.
If num_confs == 1, the shape is `(max_atoms,)`.
"""
if 'mol' in kwargs:
datapoint = kwargs.get("mol")
raise DeprecationWarning(
'Mol is being phased out as a parameter, please pass "datapoint" instead.'
)
cmat = self.coulomb_matrix(datapoint)
features_list = []
for f in cmat:
w, v = np.linalg.eig(f)
w_abs = np.abs(w)
sortidx = np.argsort(w_abs)
sortidx = sortidx[::-1]
w = w[sortidx]
f = pad_array(w, self.max_atoms)
features_list.append(f)
features = np.asarray(features_list)
if features.shape[0] == 1:
# `(1, max_atoms)` -> `(max_atoms,)`
features = np.squeeze(features, axis=0)
return features
| StarcoderdataPython |
87701 | # import Criv pre-processing script
import prep_file
# -- Load parameters from user_param.txt
param = prep_file.read_input_file()
# -- Plot model geometry
# output saved in CrivApp/output/model.png
prep_file.build_model(param)
# -- Calculate Xfar
# output saved in CrivApp/output/plot_xfar.png
prep_file.compute_Xfar(param)
# -- Compute CRIV
prep_file.compute_CRIV(param)
# -- Plot regression line
# output saved in CrivApp/output/regression_line.png
prep_file.plot_CRIV()
# -- Get CRIV and R2 from regression line
# output saved in CrivApp/output/CRIV_value.txt and CrivApp/output/R2_value.txt
prep_file.write_CRIV(param)
# -- Get CRIV distribution from parameter distribution
prep_file.CRIV_distrib(param)
#plot CRIV and parameter distribution
prep_file.CRIV_dist_plot(param)
| StarcoderdataPython |
3213993 | <filename>api_watchdog/hooks/result_group/abstract.py
from abc import ABC, abstractmethod
from api_watchdog.collect import WatchdogResultGroup
class ResultGroupHook(ABC):
"""Abstract class for handling post run result group processing."""
@abstractmethod
def __call__(self, result_group: WatchdogResultGroup):
raise NotImplementedError
| StarcoderdataPython |
3357796 | # Copyright (c) The InferLO authors. All rights reserved.
# Licensed under the Apache License, Version 2.0 - see LICENSE.
import numpy as np
from inferlo import PairWiseFiniteModel
from inferlo.pairwise.optimization.map_lp import map_lp
from inferlo.testing import grid_potts_model, tree_potts_model, \
line_potts_model
def test_grid_4x4x2():
model = grid_potts_model(4, 4, al_size=2, seed=0)
max_lh_gt = model.max_likelihood(algorithm='bruteforce')
lp_res = map_lp(model)
max_lh_ub = lp_res.upper_bound
max_lh_lb = lp_res.lower_bound
assert (round(max_lh_ub, 3) >= round(np.log(model.evaluate(max_lh_gt)), 3)
>= round(max_lh_lb, 3))
def test_grid_3x3x4():
model = grid_potts_model(3, 3, al_size=4, seed=0)
max_lh_gt = model.max_likelihood(algorithm='bruteforce')
lp_res = map_lp(model)
max_lh_ub = lp_res.upper_bound
max_lh_lb = lp_res.lower_bound
assert (round(max_lh_ub, 3) >= round(np.log(model.evaluate(max_lh_gt)), 3)
>= round(max_lh_lb, 3))
def test_grid_2x2x10():
model = grid_potts_model(2, 2, al_size=10, seed=0)
max_lh_gt = model.max_likelihood(algorithm='bruteforce')
lp_res = map_lp(model)
max_lh_ub = lp_res.upper_bound
max_lh_lb = lp_res.lower_bound
assert (round(max_lh_ub, 3) >= round(np.log(model.evaluate(max_lh_gt)), 3)
>= round(max_lh_lb, 3))
def test_line_3x3():
model = line_potts_model(gr_size=3, al_size=3, seed=0)
max_lh_gt = model.max_likelihood(algorithm='tree_dp')
lp_res = map_lp(model)
max_lh_ub = lp_res.upper_bound
max_lh_lb = lp_res.lower_bound
assert np.allclose(max_lh_ub, np.log(model.evaluate(max_lh_gt)), max_lh_lb)
def test_line_20x10():
model = line_potts_model(gr_size=20, al_size=10, seed=0)
max_lh_gt = model.max_likelihood(algorithm='tree_dp')
lp_res = map_lp(model)
max_lh_ub = lp_res.upper_bound
max_lh_lb = lp_res.lower_bound
assert np.allclose(max_lh_ub, np.log(model.evaluate(max_lh_gt)), max_lh_lb)
def test_tree_50x2():
model = tree_potts_model(gr_size=50, al_size=2, seed=0)
max_lh_gt = model.max_likelihood(algorithm='tree_dp')
lp_res = map_lp(model)
max_lh_ub = lp_res.upper_bound
max_lh_lb = lp_res.lower_bound
assert np.allclose(max_lh_ub, np.log(model.evaluate(max_lh_gt)), max_lh_lb)
def test_disconnected():
model = PairWiseFiniteModel(size=4, al_size=5)
model.add_interaction(0, 1, np.random.random(size=(5, 5)))
model.add_interaction(2, 3, np.random.random(size=(5, 5)))
max_lh_gt = model.max_likelihood(algorithm='path_dp')
lp_res = map_lp(model)
max_lh_ub = lp_res.upper_bound
max_lh_lb = lp_res.lower_bound
assert np.allclose(max_lh_ub, np.log(model.evaluate(max_lh_gt)), max_lh_lb)
| StarcoderdataPython |
1759927 | <reponame>investing-algorithms/investing-algorithm-framework<filename>investing_algorithm_framework/core/market_services/market_service.py<gh_stars>1-10
from abc import ABC, abstractmethod
class MarketService(ABC):
@abstractmethod
def pair_exists(self, target_symbol: str, trading_symbol: str):
pass
@abstractmethod
def get_prices(self, symbols):
pass
@abstractmethod
def get_ticker(self, symbol):
pass
@abstractmethod
def get_tickers(self, symbols):
pass
@abstractmethod
def get_order_book(self, symbol):
pass
@abstractmethod
def get_balance(self):
pass
@abstractmethod
def create_limit_buy_order(
self,
target_symbol: str,
trading_symbol: str,
amount: float,
price: float
):
pass
@abstractmethod
def create_limit_sell_order(
self,
target_symbol: str,
trading_symbol: str,
amount: float,
price: float
):
pass
@abstractmethod
def create_market_sell_order(
self,
target_symbol: str,
trading_symbol: str,
amount: float,
):
pass
@abstractmethod
def get_orders(self, symbol: str, since=None):
pass
@abstractmethod
def get_order(self, order_id):
pass
@abstractmethod
def get_closed_orders(
self, target_symbol: str = None, trading_symbol: str = None
):
pass
@abstractmethod
def cancel_order(self, order_id):
pass
@abstractmethod
def get_ohclv(self, symbol, time_unit, since):
pass
@abstractmethod
def get_ohclvs(self, symbols, time_unit, since):
pass
| StarcoderdataPython |
3395234 | <reponame>lucianomc/casepro<filename>casepro/pods/base.py
import json
from confmodel import Config as ConfmodelConfig
from confmodel import fields
from django.apps import AppConfig
class PodConfig(ConfmodelConfig):
"""
This is the config that all pods should use as the base for their own config.
"""
index = fields.ConfigInt(
"A unique identifier for the specific instance of this pod. Automatically determined and set in the pod"
"registry.",
required=True,
)
title = fields.ConfigText("The title to show in the UI for this pod", default=None)
class Pod(object):
"""
The base class for all pod plugins.
"""
def __init__(self, pod_type, config):
self.pod_type = pod_type
self.config = config
@property
def config_json(self):
return json.dumps(self.config._config_data)
def read_data(self, params):
"""
Should return the data that should be used to create the display for the pod.
For the base implementation, the data should be an object with 'items' and 'actions' keys.
The items key should be a list of objects, that have 'name' and 'value' keys, with the value of the keys being
what will be displayed.
The 'actions' key should be a list of objects, that have 'type', 'name' and 'payload' keys, where type and
payload is what is sent to the 'perform_action' function to determine which button has been pressed, and 'name'
is the text that is displayed on the button.
Each action may include the following optional fields:
- ``busy_text``: used as the action's corresponding
button's text while waiting on a response from the pod's api side
when the action is triggered. Defaults to the value of the ``name``
field.
- ``confirm``: whether a confirmation modal should be shown to
confirm whether the user would like to perform the action. Defaults
to ``false``.
Example:
{
'items': [
{
'name': 'EDD',
'value': '2015-07-18',
},
],
'actions': [
{
'type': 'remove_edd',
'name': 'Remove EDD',
'payload': {},
'busy_text': 'Removing EDD',
'confirm': True
},
],
}
"""
return {}
def perform_action(self, type_, params):
"""
Should perform the action specified by the type and params (which are specified in the read function).
Returns a tuple (success, payload), where 'success' is a boolean value indicating whether the action was
successful or not. If true, a case action note will be created.
For the base implementation, payload is an object with a 'message' key, which is the error message if success
is false, or the message to place in the case action note if success is true.
"""
return (False, {"message": ""})
class PodPlugin(AppConfig):
name = "casepro.pods"
pod_class = Pod
config_class = PodConfig
# django application label, used to determine which pod type to use when loading pods configured in `settings.PODS`
label = "base_pod"
# default title to use when configuring each pod
title = "Pod"
# override to use a different angular controller
controller = "PodController"
# override to use a different angular directive
directive = "cp-pod"
# override with paths to custom scripts that the pod needs
scripts = ()
# override with paths to custom styles that the pod needs
styles = ()
| StarcoderdataPython |
117569 | <filename>sigma_graph/envs/figure8/rewards/rewards_simple.py
from math import ceil
# default hyper-parameters for rewards
DEFAULT_REWARDS = {
"step": {"reward_step_on": True, "red_2_blue": 4, "blue_2_red": -3, "red_overlay": -2, },
"episode": {
"reward_episode_on": True, "episode_decay_soft": True,
"health_lookup": {"type": "table", "reward": [32, 16, 8, 4, 2, 0], "damage": [0, 1, 2, 3, 4, 100]},
"faster_lookup": {"type": "segment", "pivot_step": 10, "reward_init": 16, "reward_decay": 1},
"soft_bound": {"dist": [1, 2], "decay_factor": [0.25, 0.125]}
},
}
def get_step_engage(r_engages_b, b_engages_r, team_switch=False, **rewards):
assert len(rewards), "No step rewards provided.."
step_reward = 0
if r_engages_b:
step_reward += rewards["red_2_blue"]
if b_engages_r:
step_reward += rewards["blue_2_red"]
if team_switch is True:
step_reward = -step_reward
return step_reward
def get_step_overlay(overlay, **rewards):
assert len(rewards), "No step rewards provided.."
return rewards["red_overlay"] if overlay else 0
def get_episode_reward_agent(health_lost_self, health_lost_opponent, threshold_self, threshold_opponent,
damage_cost_self=0, end_step_opponent=-1, **rewards):
assert len(rewards), "No episode rewards provided.."
episode_reward = 0
# discourage free loaders
# if damage_cost_self == 0:
# return episode_reward
threshold_offset = rewards["soft_bound"]["dist"][-1] if rewards["episode_decay_soft"] is True else 0
# give rewards for terminating the opponent agent
if health_lost_opponent >= threshold_opponent - threshold_offset:
# health based reward for surviving
episode_reward += get_reward_type(health_lost_self, **rewards["health_lookup"])
# speed based reward for fast termination
if end_step_opponent > 0:
episode_reward += get_reward_type(end_step_opponent, **rewards["faster_lookup"])
# apply soft boundary factor
if rewards["episode_decay_soft"] is True and health_lost_opponent < threshold_opponent:
_dist = threshold_opponent - health_lost_opponent
index = next(_idx for _idx, _val in enumerate(rewards["soft_bound"]["dist"]) if _val >= _dist)
episode_reward = ceil(episode_reward * rewards["soft_bound"]["decay_factor"][index])
return episode_reward
def get_reward_type(value, **_dict):
_reward = 0
_type = _dict["type"]
if _type == "none":
return _reward
elif _type == "table":
_reward = get_table_reward(value, **_dict)
elif _type == "segment":
_reward = get_segment_reward(value, **_dict)
else:
assert f"Reward function <episode:faster> not implemented:{_dict}"
return _reward
def get_table_reward(damage_taken, **_dict):
# find the index of the fist element in the list that no less than the target damage
index = next(_idx for _idx, _val in enumerate(_dict["damage"]) if _val >= damage_taken)
return _dict["reward"][index]
def get_segment_reward(step, **_dict):
# segment reward function: [rew_start (from step 0 to pivot_step) -> (linear rew_decay per step) -> 0]
step_start = _dict["pivot_step"]
reward_start = _dict["reward_init"]
reward_decay = _dict["reward_decay"]
if step > step_start:
_reward = reward_start - int((step - step_start) * reward_decay)
reward = _reward if _reward > 0 else 0
else:
reward = reward_start
return reward
def get_episode_reward_team(health_list_r, health_list_b, health_init,
damage_thres_r, damage_thres_b, damage_list_r, endtime_list_b, **rewards):
assert len(rewards), "No episode rewards provided.."
episode_reward = 0
# TODO>> team based reward for all agents in the team
return episode_reward
| StarcoderdataPython |
130136 | <filename>torchsample/transforms/affine3d_transforms.py
"""
Affine transforms implemented on torch tensors, and
requiring only one interpolation
"""
import math
import random
import torch as th
from ..utils import th_affine3d, th_random_choice
class RandomAffine3D(object):
def __init__(self,
rotation_range=None,
translation_range=None,
shear_range=None,
zoom_range=None,
interp='trilinear',
lazy=False):
"""
Perform an affine transforms with various sub-transforms, using
only one interpolation and without having to instantiate each
sub-transform individually.
Arguments
---------
rotation_range : one integer or float
image will be rotated randomly between (-degrees, degrees)
translation_range : float or 3-tuple of float between [0, 1)
first value:
fractional bounds of total depth to shift image
image will be depth shifted between
(-depth_range * depth_dimension, depth_range * depth_dimension)
second value:
fractional bounds of total width to shift image
Image will be vertically shifted between
(-width_range * width_dimension, width_range * width_dimension)
third value:
fractional bounds of total heigth to shift image
image will be horizontally shifted between
(-height_range * height_dimension, height_range * height_dimension)
shear_range : float
image will be sheared randomly between (-degrees, degrees)
zoom_range : list/tuple with two floats between [0, infinity).
first float should be less than the second
lower and upper bounds on percent zoom.
Anything less than 1.0 will zoom in on the image,
anything greater than 1.0 will zoom out on the image.
e.g. (0.7, 1.0) will only zoom in,
(1.0, 1.4) will only zoom out,
(0.7, 1.4) will randomly zoom in or out
interp : string in {'trilinear', 'nearest'} or list of strings
type of interpolation to use. You can provide a different
type of interpolation for each input, e.g. if you have two
inputs then you can say `interp=['trilinear','nearest']
"""
self.transforms = []
if rotation_range is not None:
rotation_tform = RandomRotate3D(rotation_range, lazy=True)
self.transforms.append(rotation_tform)
if translation_range is not None:
translation_tform = RandomTranslate3D(translation_range, lazy=True)
self.transforms.append(translation_tform)
if shear_range is not None:
shear_tform = RandomShear3D(shear_range, lazy=True)
self.transforms.append(shear_tform)
if zoom_range is not None:
zoom_tform = RandomZoom3D(zoom_range, lazy=True)
self.transforms.append(zoom_tform)
self.interp = interp
self.lazy = lazy
if len(self.transforms) == 0:
raise Exception('Must give at least one transform parameter')
def __call__(self, *inputs):
# collect all of the lazily returned tform matrices
tform_matrix = self.transforms[0](inputs[0])
for tform in self.transforms[1:]:
tform_matrix = tform_matrix.mm(tform(inputs[0]))
self.tform_matrix = tform_matrix
if self.lazy:
return tform_matrix
else:
outputs = Affine3D(tform_matrix,
interp=self.interp)(*inputs)
return outputs
class Affine3D(object):
def __init__(self,
tform_matrix,
interp='trilinear'):
"""
Perform an affine transforms with various sub-transforms, using
only one interpolation and without having to instantiate each
sub-transform individually.
Arguments
---------
tform_matrix : a 3x3 or 3x4 matrix
affine transformation matrix to apply
interp : string in {'trilinear', 'nearest'} or list of strings
type of interpolation to use. You can provide a different
type of interpolation for each input, e.g. if you have two
inputs then you can say `interp=['trilinear','nearest']
"""
self.tform_matrix = tform_matrix
self.interp = interp
def __call__(self, *inputs):
if not isinstance(self.interp, (tuple,list)):
interp = [self.interp]*len(inputs)
else:
interp = self.interp
outputs = []
for idx, _input in enumerate(inputs):
input_tf = th_affine3d(_input,
self.tform_matrix,
mode=interp[idx])
outputs.append(input_tf)
return outputs if idx >= 1 else outputs[0]
class Affine3DCompose(object):
def __init__(self,
transforms,
interp='trilinear'):
"""
Apply a collection of explicit affine transforms to an input image,
and to a target image if necessary
Arguments
---------
transforms : list or tuple
each element in the list/tuple should be an affine transform.
currently supported transforms:
- Rotate3D()
- Translate3D()
- Shear3D()
- Zoom3D()
interp : string in {'trilinear', 'nearest'} or list of strings
type of interpolation to use. You can provide a different
type of interpolation for each input, e.g. if you have two
inputs then you can say `interp=['trilinear','nearest']
"""
self.transforms = transforms
self.interp = interp
# set transforms to lazy so they only return the tform matrix
for t in self.transforms:
t.lazy = True
def __call__(self, *inputs):
# collect all of the lazily returned tform matrices
tform_matrix = self.transforms[0](inputs[0])
for tform in self.transforms[1:]:
tform_matrix = tform_matrix.mm(tform(inputs[0]))
if not isinstance(self.interp, (tuple,list)):
interp = [self.interp]*len(inputs)
else:
interp = self.interp
outputs = []
for idx, _input in enumerate(inputs):
input_tf = th_affine3d(_input,
tform_matrix,
mode=interp[idx])
outputs.append(input_tf)
return outputs if idx >= 1 else outputs[0]
class RandomRotate3D(object):
def __init__(self,
rotation_range,
axis=0,
interp='trilinear',
lazy=False):
"""
Randomly rotate an image between (-degrees, degrees). If the image
has multiple channels, the same rotation will be applied to each channel.
Arguments
---------
rotation_range : integer or float
image will be rotated between (-degrees, degrees) degrees
axis: integer in (0, 1, 2)
axis (z, y, x) for rotation. This axis will be fixed.
interp : string in {'trilinear', 'nearest'} or list of strings
type of interpolation to use. You can provide a different
type of interpolation for each input, e.g. if you have two
inputs then you can say `interp=['trilinear','nearest']
lazy : boolean
if true, only create the affine transform matrix and return that
if false, perform the transform on the tensor and return the tensor
"""
self.rotation_range = rotation_range
self.axis = axis
self.interp = interp
self.lazy = lazy
def __call__(self, *inputs):
degree = random.uniform(-self.rotation_range, self.rotation_range)
if self.lazy:
return Rotate3D(degree, axis=self.axis, lazy=True)(inputs[0])
else:
outputs = Rotate3D(degree, axis=self.axis,
interp=self.interp)(*inputs)
return outputs
class RandomChoiceRotate3D(object):
def __init__(self,
values,
axis=0,
p=None,
interp='trilinear',
lazy=False):
"""
Randomly rotate an image from a list of values. If the image
has multiple channels, the same rotation will be applied to each channel.
Arguments
---------
values : a list or tuple
the values from which the rotation value will be sampled
axis: integer in (0, 1, 2)
axis (z, y, x) for rotation. This axis will be fixed.
p : a list or tuple the same length as `values`
the probabilities of sampling any given value. Must sum to 1.
interp : string in {'trilinear', 'nearest'} or list of strings
type of interpolation to use. You can provide a different
type of interpolation for each input, e.g. if you have two
inputs then you can say `interp=['trilinear','nearest']
lazy : boolean
if true, only create the affine transform matrix and return that
if false, perform the transform on the tensor and return the tensor
"""
if isinstance(values, (list, tuple)):
values = th.FloatTensor(values)
self.values = values
self.axis = axis
if p is None:
p = th.ones(len(values)) / len(values)
else:
if abs(1.0-sum(p)) > 1e-3:
raise ValueError('Probs must sum to 1')
self.p = p
self.interp = interp
self.lazy = lazy
def __call__(self, *inputs):
degree = th_random_choice(self.values, p=self.p)
if self.lazy:
return Rotate3D(degree, axis=self.axis, lazy=True)(inputs[0])
else:
outputs = Rotate3D(degree, axis=self.axis,
interp=self.interp)(*inputs)
return outputs
class Rotate3D(object):
def __init__(self,
value,
axis=0,
interp='trilinear',
lazy=False):
"""
Randomly rotate an image between (-degrees, degrees). If the image
has multiple channels, the same rotation will be applied to each channel.
Arguments
---------
value : integer or float
image will be rotated degrees
axis: integer in (0, 1, 2)
axis (z, y, x) for rotation. This axis will be fixed.
interp : string in {'trilinear', 'nearest'} or list of strings
type of interpolation to use. You can provide a different
type of interpolation for each input, e.g. if you have two
inputs then you can say `interp=['trilinear','nearest']
lazy : boolean
if true, only create the affine transform matrix and return that
if false, perform the transform on the tensor and return the tensor
"""
self.value = value
self.axis = axis
self.interp = interp
self.lazy = lazy
def __call__(self, *inputs):
if not isinstance(self.interp, (tuple,list)):
interp = [self.interp]*len(inputs)
else:
interp = self.interp
theta = math.pi / 180 * self.value
if self.axis == 0:
rotation_matrix = th.FloatTensor([[1, 0, 0, 0],
[0, math.cos(theta), -math.sin(theta), 0],
[0, math.sin(theta), math.cos(theta), 0],
[0, 0, 0, 1]])
elif self.axis == 1:
rotation_matrix = th.FloatTensor([[math.cos(theta), 0, math.sin(theta), 0],
[0, 1, 0, 0],
[-math.sin(theta), 0, math.cos(theta), 0],
[0, 0, 0, 1]])
elif self.axis == 2:
rotation_matrix = th.FloatTensor([[math.cos(theta), -math.sin(theta), 0, 0],
[math.sin(theta), math.cos(theta), 0, 0],
[ 0, 0, 1, 0],
[ 0, 0, 0, 1]])
else:
raise ValueError('axis out of range [0-2]')
if self.lazy:
return rotation_matrix
else:
outputs = []
for idx, _input in enumerate(inputs):
input_tf = th_affine3d(_input,
rotation_matrix,
mode=interp[idx],
center=True)
outputs.append(input_tf)
return outputs if idx >= 1 else outputs[0]
class RandomTranslate3D(object):
def __init__(self,
translation_range,
interp='trilinear',
lazy=False):
"""
Randomly translate an image some fraction of total height and/or
some fraction of total width. If the image has multiple channels,
the same translation will be applied to each channel. Assumes CDWH
ordering.
Arguments
---------
translation_range : float or 3-tuple of float between [0, 1)
first value:
fractional bounds of total depth to shift image
image will be depth shifted between
(-depth_range * depth_dimension, depth_range * depth_dimension)
second value:
fractional bounds of total width to shift image
Image will be vertically shifted between
(-width_range * width_dimension, width_range * width_dimension)
third value:
fractional bounds of total heigth to shift image
image will be horizontally shifted between
(-height_range * height_dimension, height_range * height_dimension)
interp : string in {'trilinear', 'nearest'} or list of strings
type of interpolation to use. You can provide a different
type of interpolation for each input, e.g. if you have two
inputs then you can say `interp=['trilinear','nearest']
lazy : boolean
if true, only create the affine transform matrix and return that
if false, perform the transform on the tensor and return the tensor
"""
if isinstance(translation_range, float):
translation_range = (translation_range, translation_range, translation_range)
self.depth_range = translation_range[0]
self.width_range = translation_range[1]
self.height_range = translation_range[2]
self.interp = interp
self.lazy = lazy
def __call__(self, *inputs):
# height shift
random_height = random.uniform(-self.height_range, self.height_range)
# width shift
random_width = random.uniform(-self.width_range, self.width_range)
# depth shift
random_depth = random.uniform(-self.depth_range, self.depth_range)
if self.lazy:
return Translate3D([random_depth, random_width, random_height],
lazy=True)(inputs[0])
else:
outputs = Translate3D([random_depth, random_width, random_height],
interp=self.interp)(*inputs)
return outputs
class RandomChoiceTranslate3D(object):
def __init__(self,
values,
p=None,
interp='trilinear',
lazy=False):
"""
Randomly translate an image some fraction of total height and/or
some fraction of total width from a list of potential values.
If the image has multiple channels,
the same translation will be applied to each channel.
Arguments
---------
values : a list or tuple
the values from which the translation value will be sampled
p : a list or tuple the same length as `values`
the probabilities of sampling any given value. Must sum to 1.
interp : string in {'trilinear', 'nearest'} or list of strings
type of interpolation to use. You can provide a different
type of interpolation for each input, e.g. if you have two
inputs then you can say `interp=['trilinear','nearest']
lazy : boolean
if true, only create the affine transform matrix and return that
if false, perform the transform on the tensor and return the tensor
"""
if isinstance(values, (list, tuple)):
values = th.FloatTensor(values)
self.values = values
if p is None:
p = th.ones(len(values)) / len(values)
else:
if abs(1.0-sum(p)) > 1e-3:
raise ValueError('Probs must sum to 1')
self.p = p
self.interp = interp
self.lazy = lazy
def __call__(self, *inputs):
random_height = th_random_choice(self.values, p=self.p)
random_width = th_random_choice(self.values, p=self.p)
random_depth = th_random_choice(self.values, p=self.p)
if self.lazy:
return Translate3D([random_depth, random_width, random_height],
lazy=True)(inputs[0])
else:
outputs = Translate3D([random_depth, random_width, random_height],
interp=self.interp)(*inputs)
return outputs
class Translate3D(object):
def __init__(self,
value,
interp='trilinear',
lazy=False):
"""
Arguments
---------
value : float or 3-tuple of float
if single value, both horizontal, vertical and depth translation
will be this value * total height/width. Thus, value should
be a fraction of total height/width with range (-1, 1)
interp : string in {'trilinear', 'nearest'} or list of strings
type of interpolation to use. You can provide a different
type of interpolation for each input, e.g. if you have two
inputs then you can say `interp=['trilinear','nearest']
"""
if not isinstance(value, (tuple,list)):
value = (value, value, value)
if value[0] > 1 or value[0] < -1:
raise ValueError('Translation must be between -1 and 1')
if value[1] > 1 or value[1] < -1:
raise ValueError('Translation must be between -1 and 1')
if value[2] > 1 or value[2] < -1:
raise ValueError('Translation must be between -1 and 1')
self.depth_range = value[0]
self.width_range = value[1]
self.height_range = value[2]
self.interp = interp
self.lazy = lazy
def __call__(self, *inputs):
if not isinstance(self.interp, (tuple,list)):
interp = [self.interp]*len(inputs)
else:
interp = self.interp
tz = self.depth_range * inputs[0].size(1)
ty = self.width_range * inputs[0].size(2)
tx = self.height_range * inputs[0].size(3)
translation_matrix = th.FloatTensor([[1, 0, 0, tz],
[0, 1, 0, ty],
[0, 0, 1, tx],
[0, 0, 0, 1]])
if self.lazy:
return translation_matrix
else:
outputs = []
for idx, _input in enumerate(inputs):
input_tf = th_affine3d(_input,
translation_matrix,
mode=interp[idx],
center=True)
outputs.append(input_tf)
return outputs if idx >= 1 else outputs[0]
class RandomShear3D(object):
def __init__(self,
shear_range,
interp='trilinear',
lazy=False):
"""
Randomly shear an image with radians (-shear_range, shear_range)
Arguments
---------
shear_range : float
radian bounds on the shear transform
interp : string in {'trilinear', 'nearest'} or list of strings
type of interpolation to use. You can provide a different
type of interpolation for each input, e.g. if you have two
inputs then you can say `interp=['trilinear','nearest']
lazy : boolean
if false, perform the transform on the tensor and return the tensor
if true, only create the affine transform matrix and return that
"""
self.shear_range = shear_range
self.interp = interp
self.lazy = lazy
def __call__(self, *inputs):
shear_x = random.uniform(-self.shear_range, self.shear_range)
shear_y = random.uniform(-self.shear_range, self.shear_range)
if self.lazy:
return Shear3D([shear_x, shear_y],
lazy=True)(inputs[0])
else:
outputs = Shear3D([shear_x, shear_y],
interp=self.interp)(*inputs)
return outputs
class RandomChoiceShear3D(object):
def __init__(self,
values,
p=None,
interp='trilinear',
lazy=False):
"""
Randomly shear an image with a value sampled from a list of values.
Arguments
---------
values : a list or tuple
the values from which the rotation value will be sampled
p : a list or tuple the same length as `values`
the probabilities of sampling any given value. Must sum to 1.
interp : string in {'trilinear', 'nearest'} or list of strings
type of interpolation to use. You can provide a different
type of interpolation for each input, e.g. if you have two
inputs then you can say `interp=['trilinear','nearest']
lazy : boolean
if false, perform the transform on the tensor and return the tensor
if true, only create the affine transform matrix and return that
"""
if isinstance(values, (list, tuple)):
values = th.FloatTensor(values)
self.values = values
if p is None:
p = th.ones(len(values)) / len(values)
else:
if abs(1.0-sum(p)) > 1e-3:
raise ValueError('Probs must sum to 1')
self.p = p
self.interp = interp
self.lazy = lazy
def __call__(self, *inputs):
shear_x = th_random_choice(self.values, p=self.p)
shear_y = th_random_choice(self.values, p=self.p)
if self.lazy:
return Shear3D([shear_x, shear_y],
lazy=True)(inputs[0])
else:
outputs = Shear3D([shear_x, shear_y],
interp=self.interp)(*inputs)
return outputs
class Shear3D(object):
def __init__(self,
value,
interp='trilinear',
lazy=False):
if isinstance(value, (list, tuple)):
self.value = value
else:
self.value = (value, 0)
self.interp = interp
self.lazy = lazy
def __call__(self, *inputs):
if not isinstance(self.interp, (tuple,list)):
interp = [self.interp]*len(inputs)
else:
interp = self.interp
theta_x = (math.pi * self.value[0]) / 180
theta_y = (math.pi * self.value[1]) / 180
shear_matrix = th.FloatTensor([[1, 0, 0, 0],
[0, math.cos(theta_x), math.sin(theta_y), 0],
[0, -math.sin(theta_x), math.cos(theta_y), 0],
[0, 0, 0, 1]])
if self.lazy:
return shear_matrix
else:
outputs = []
for idx, _input in enumerate(inputs):
input_tf = th_affine3d(_input,
shear_matrix,
mode=interp[idx],
center=True)
outputs.append(input_tf)
return outputs if idx >= 1 else outputs[0]
class RandomZoom3D(object):
def __init__(self,
zoom_range,
interp='trilinear',
lazy=False):
"""
Randomly zoom in and/or out on an image
Arguments
---------
zoom_range : tuple or list with 2 values, both between (0, infinity)
lower and upper bounds on percent zoom.
Anything less than 1.0 will zoom in on the image,
anything greater than 1.0 will zoom out on the image.
e.g. (0.7, 1.0) will only zoom in,
(1.0, 1.4) will only zoom out,
(0.7, 1.4) will randomly zoom in or out
interp : string in {'trilinear', 'nearest'} or list of strings
type of interpolation to use. You can provide a different
type of interpolation for each input, e.g. if you have two
inputs then you can say `interp=['trilinear','nearest']
lazy : boolean
if false, perform the transform on the tensor and return the tensor
if true, only create the affine transform matrix and return that
"""
if not isinstance(zoom_range, list) and not isinstance(zoom_range, tuple):
raise ValueError('zoom_range must be tuple or list with 2 values')
self.zoom_range = zoom_range
self.interp = interp
self.lazy = lazy
def __call__(self, *inputs):
zx = random.uniform(self.zoom_range[0], self.zoom_range[1])
zy = random.uniform(self.zoom_range[0], self.zoom_range[1])
zz = random.uniform(self.zoom_range[0], self.zoom_range[1])
if self.lazy:
return Zoom3D([zz, zy, zx], lazy=True)(inputs[0])
else:
outputs = Zoom3D([zz, zy, zx],
interp=self.interp)(*inputs)
return outputs
class RandomChoiceZoom3D(object):
def __init__(self,
values,
p=None,
interp='trilinear',
lazy=False):
"""
Randomly zoom in and/or out on an image with a value sampled from
a list of values
Arguments
---------
values : a list or tuple
the values from which the applied zoom value will be sampled
p : a list or tuple the same length as `values`
the probabilities of sampling any given value. Must sum to 1.
interp : string in {'trilinear', 'nearest'} or list of strings
type of interpolation to use. You can provide a different
type of interpolation for each input, e.g. if you have two
inputs then you can say `interp=['trilinear','nearest']
lazy : boolean
if false, perform the transform on the tensor and return the tensor
if true, only create the affine transform matrix and return that
"""
if isinstance(values, (list, tuple)):
values = th.FloatTensor(values)
self.values = values
if p is None:
p = th.ones(len(values)) / len(values)
else:
if abs(1.0-sum(p)) > 1e-3:
raise ValueError('Probs must sum to 1')
self.p = p
self.interp = interp
self.lazy = lazy
def __call__(self, *inputs):
zx = th_random_choice(self.values, p=self.p)
zy = th_random_choice(self.values, p=self.p)
zz = th_random_choice(self.values, p=self.p)
if self.lazy:
return Zoom3D([zz, zy, zx], lazy=True)(inputs[0])
else:
outputs = Zoom3D([zz, zy, zx],
interp=self.interp)(*inputs)
return outputs
class Zoom3D(object):
def __init__(self,
value,
interp='trilinear',
lazy=False):
"""
Arguments
---------
value : float
Fractional zoom.
=1 : no zoom
>1 : zoom-in (value-1)%
<1 : zoom-out (1-value)%
interp : string in {'trilinear', 'nearest'} or list of strings
type of interpolation to use. You can provide a different
type of interpolation for each input, e.g. if you have two
inputs then you can say `interp=['trilinear','nearest']
lazy: boolean
If true, just return transformed
"""
if not isinstance(value, (tuple,list)):
value = (value, value, value)
self.value = value
self.interp = interp
self.lazy = lazy
def __call__(self, *inputs):
if not isinstance(self.interp, (tuple,list)):
interp = [self.interp]*len(inputs)
else:
interp = self.interp
zz, zy, zx = self.value
zoom_matrix = th.FloatTensor([[zz, 0, 0, 0],
[0, zy, 0, 0],
[0, 0, zx, 0],
[0, 0, 0, 1]])
if self.lazy:
return zoom_matrix
else:
outputs = []
for idx, _input in enumerate(inputs):
input_tf = th_affine3d(_input,
zoom_matrix,
mode=interp[idx],
center=True)
outputs.append(input_tf)
return outputs if idx >= 1 else outputs[0]
| StarcoderdataPython |
1736499 | <filename>prod-1/6-reduce/datavisualization/plot_hamming.py
import bmw
import numpy as np
import matplotlib.pyplot as plt
problem = bmw.Problem.parse(filepath='../../../data/3-refined')
dat1 = np.load('../../2-prod/test-0.npz')
constellation1 = dat1['constellation']
constellation_type_indices1 = dat1['constellation_type_indices']
dat2 = np.load('../test-0.npz')
constellation2 = dat2['constellation']
constellation1 = constellation1[:60, :]
constellation_type_indices1 = constellation_type_indices1[:60]
x = np.zeros(60,dtype=int)
y1 = np.zeros(60,dtype=int)
y2 = np.zeros(60,dtype=int)
for index in range(60):
x[index] = index
y1[index] = np.sum(constellation1[index,:])
y2[index] = np.sum(constellation2[index,:])
plt.rcParams.update({'font.size': 8})
plt.clf()
plt.bar(x,y1,color='b',label='Phase 1')
plt.bar(x,y2,color='r',label='Phase 2')
plt.axis([-1, 60, 50, 100])
plt.yticks(np.arange(0, 100+1, 25))
plt.legend(loc=1)
plt.xlabel('Test Vehicle')
plt.ylabel('Number of Features Per Test Vehicle')
plt.savefig('hamming_2.pdf',bbox_inches='tight')
| StarcoderdataPython |
4809802 | <filename>model/1_prepare_data_and_inference.py<gh_stars>0
'''
MIT License
Copyright (c) 2020 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
import numpy as np
import pandas as pd
from enthalpygradients import EnthalpyGradient
import time
from model.auxiliary import read_weather_data_scenario
from model.constants import COP_cooling, COP_heating, RH_base_cooling_perc, RH_base_heating_perc, T_base_cooling_C, \
T_base_heating_C, ACH_Commercial, ACH_Residential
from pointers import METADATA_FILE_PATH, INTERMEDIATE_RESULT_FILE_PATH
def main():
# local variables
output_path = INTERMEDIATE_RESULT_FILE_PATH
scenarios_array = pd.read_excel(METADATA_FILE_PATH, sheet_name='SCENARIOS')['SCENARIO'].values
cities_array = pd.read_excel(METADATA_FILE_PATH, sheet_name='CITIES')['CITY'].values
floor_area_predictions_df = pd.read_excel(METADATA_FILE_PATH, sheet_name="FLOOR_AREA").set_index('year')
climate_region_array = pd.read_excel(METADATA_FILE_PATH, sheet_name='CITIES')['Climate Region'].values
floor_area_climate_df = pd.read_excel(METADATA_FILE_PATH, sheet_name="FLOOR_AREA_CLIMATE").set_index('Climate Region')
# calculate specific energy consumption per major city
specific_thermal_consumption_per_city_df = calc_specific_energy_per_major_city(cities_array,
climate_region_array,
floor_area_climate_df,
scenarios_array)
# calculate weighted average per scenario
data_weighted_average_df = calc_weighted_average_per_scenario(specific_thermal_consumption_per_city_df)
# calculate the energy consumption per scenario incorporating variance in built areas
data_final_df = calc_total_energy_consumption_per_scenario(data_weighted_average_df, floor_area_predictions_df,
scenarios_array)
#save the results to disk
data_final_df.to_csv(output_path, index=False)
print("done")
def calc_total_energy_consumption_per_scenario(data_weighted_average_df, floor_area_predictions_df, scenarios_array):
data_final_df = pd.DataFrame()
for scenario in scenarios_array:
data_scenario = data_weighted_average_df[data_weighted_average_df["SCENARIO"] == scenario]
year = data_scenario['YEAR'].values[0]
data_floor_area_scenario = floor_area_predictions_df.loc[float(year)]
for sector in ['Residential', 'Commercial']:
# calculate totals
total_heating_kWhm2yr = \
data_scenario[data_scenario["BUILDING_CLASS"] == sector]['TOTAL_HEATING_kWh_m2_yr'].values[0]
total_cooling_kWhm2yr = \
data_scenario[data_scenario["BUILDING_CLASS"] == sector]['TOTAL_COOLING_kWh_m2_yr'].values[0]
# add uncertainty in total built area
mean_m2 = data_floor_area_scenario['GFA_mean_' + sector + '_m2']
std_m2 = data_floor_area_scenario['GFA_sd_' + sector + '_m2']
GFA_m2 = np.random.normal(mean_m2, std_m2, 100)
total_heating_EJ = GFA_m2 * total_heating_kWhm2yr * 3.6E-12
total_cooling_EJ = GFA_m2 * total_cooling_kWhm2yr * 3.6E-12
# list of fields to extract
dict_data = pd.DataFrame({"SCENARIO": scenario,
"YEAR": year,
"BUILDING_CLASS": sector,
"GFA_Bm2": GFA_m2 /1E9,
"TOTAL_HEATING_kWh_m2_yr": total_heating_kWhm2yr,
"TOTAL_COOLING_kWh_m2_yr": total_cooling_kWhm2yr,
"TOTAL_HEATING_EJ": total_heating_EJ,
"TOTAL_COOLING_EJ": total_cooling_EJ})
data_final_df = pd.concat([data_final_df, dict_data], ignore_index=True)
return data_final_df
def calc_weighted_average_per_scenario(specific_thermal_consumption_per_city_df):
data_mean_per_scenario = specific_thermal_consumption_per_city_df.groupby(
["YEAR", "BUILDING_CLASS", "SCENARIO", "CLIMATE"],
as_index=False).agg('mean')
data_mean_per_scenario["TOTAL_HEATING_kWh_m2_yr"] = data_mean_per_scenario["TOTAL_HEATING_kWh_m2_yr"] * \
data_mean_per_scenario["WEIGHT"]
data_mean_per_scenario["TOTAL_COOLING_kWh_m2_yr"] = data_mean_per_scenario["TOTAL_COOLING_kWh_m2_yr"] * \
data_mean_per_scenario["WEIGHT"]
data_weighted_average = data_mean_per_scenario.groupby(["YEAR", "BUILDING_CLASS", "SCENARIO"], as_index=False).agg(
'sum')
return data_weighted_average
def calc_specific_energy_per_major_city(cities_array, climate_region_array, floor_area_climate_df, scenarios_array):
specific_thermal_consumption_per_city_df = pd.DataFrame()
for city, climate in zip(cities_array, climate_region_array):
floor_area_climate = floor_area_climate_df.loc[climate]
for scenario in scenarios_array:
# read wheater data
T_outdoor_C, RH_outdoor_perc = read_weather_data_scenario(city, scenario)
# get the scanario year
year_scenario = scenario.split("_")[-1]
for sector, ACH in zip(['Residential', 'Commercial'], [ACH_Residential, ACH_Commercial]):
# calculate energy use intensities
# calculate specific energy consumption with daily enthalpy gradients model
eg = EnthalpyGradient(T_base_cooling_C, RH_base_cooling_perc)
sensible_cooling_kWhm2yr = eg.specific_thermal_consumption(T_outdoor_C, RH_outdoor_perc, type='cooling',
ACH=ACH, COP=COP_cooling)
latent_cooling_kWhm2yr = eg.specific_thermal_consumption(T_outdoor_C, RH_outdoor_perc,
type='dehumidification', ACH=ACH,
COP=COP_cooling)
eg = EnthalpyGradient(T_base_heating_C, RH_base_heating_perc)
sensible_heating_kWhm2yr = eg.specific_thermal_consumption(T_outdoor_C, RH_outdoor_perc, type='heating',
ACH=ACH, COP=COP_heating)
latent_heating_kWhm2yr = eg.specific_thermal_consumption(T_outdoor_C, RH_outdoor_perc,
type='humidification', ACH=ACH,
COP=COP_heating)
# calculate specific totals
total_heating_kWhm2yr = sensible_heating_kWhm2yr + latent_heating_kWhm2yr
total_cooling_kWhm2yr = sensible_cooling_kWhm2yr + latent_cooling_kWhm2yr
# list of fields to extract
dict_data = pd.DataFrame({"CITY": city,
"CLIMATE": climate,
"WEIGHT": floor_area_climate['GFA_mean_' + sector + '_perc'],
"SCENARIO": scenario,
"YEAR": year_scenario,
"BUILDING_CLASS": sector,
"TOTAL_HEATING_kWh_m2_yr": total_heating_kWhm2yr,
"TOTAL_COOLING_kWh_m2_yr": total_cooling_kWhm2yr}, index=[0])
specific_thermal_consumption_per_city_df = pd.concat(
[specific_thermal_consumption_per_city_df, dict_data], ignore_index=True)
print("city {} done".format(city))
return specific_thermal_consumption_per_city_df
if __name__ == "__main__":
t0 = time.time()
main()
t1 = round((time.time() - t0)/60,2)
print("finished after {} minutes".format(t1))
| StarcoderdataPython |
25749 | from campy.graphics.gwindow import GWindow
from campy.graphics.gobjects import GOval, GRect
from campy.gui.events.mouse import onmouseclicked
import random
WINDOW_WIDTH = 600
WINDOW_HEIGHT = 400
ZONE_WIDTH = 100
ZONE_HEIGHT = 100
BALL_RADIUS = 15
MAX_SPEED = 6
MIN_Y_SPEED = 2
class ZoneGraphics:
def __init__(self, window_width=WINDOW_WIDTH, window_height=WINDOW_HEIGHT,
zone_width=ZONE_WIDTH, zone_height=ZONE_HEIGHT, ball_radius=BALL_RADIUS):
# Create window
self.window = GWindow(window_width, window_height, title='Zone Game')
# Create zone
self.zone = GRect(zone_width, zone_height, x=(window_width - zone_width) / 2,
y=(window_height - zone_height) / 2)
self.zone.color = 'blue'
self.window.add(self.zone)
# Create ball and initialize velocity/position
self.ball = GOval(2 * ball_radius, 2 * ball_radius)
self.ball.filled = True
self.ball.fill_color = 'salmon'
self.dx = 0
self.dy = 0
self.reset_ball()
# Initialize mouse listeners
onmouseclicked(self.handle_click)
# Set ball position at random inside the window
def set_ball_position(self):
self.ball.x = random.randint(0, self.window.width - self.ball.width)
self.ball.y = random.randint(0, self.window.height - self.ball.height)
def set_ball_velocity(self):
self.dx = random.randint(0, MAX_SPEED)
if random.random() > 0.5:
self.dx = -self.dx
self.dy = random.randint(MIN_Y_SPEED, MAX_SPEED)
if random.random() > 0.5:
self.dy = -self.dy
def reset_ball(self):
self.set_ball_position()
while self.ball_in_zone():
self.set_ball_position()
self.set_ball_velocity()
self.window.add(self.ball)
def move_ball(self):
self.ball.move(self.dx, self.dy)
def handle_wall_collisions(self):
if self.ball.x + self.ball.width >= self.window.width or self.ball.x <= 0:
self.dx = -self.dx
if self.ball.y + self.ball.height >= self.window.height or self.ball.y <= 0:
self.dy = -self.dy
def ball_in_zone(self):
zone_left_side = self.zone.x
zone_right_side = self.zone.x + self.zone.width
ball_x_in_zone = zone_left_side <= self.ball.x <= zone_right_side - self.ball.width
zone_top_side = self.zone.y
zone_bottom_side = self.zone.y + self.zone.height
ball_y_in_zone = zone_top_side <= self.ball.y <= zone_bottom_side - self.ball.height
return ball_x_in_zone and ball_y_in_zone
def handle_click(self, event):
obj = self.window.get_object_at(event.x, event.y)
if self.ball == obj:
self.reset_ball()
| StarcoderdataPython |
1799524 | <filename>base/views/herosec_views.py
from rest_framework.decorators import api_view
from rest_framework.response import Response
from base.models import HeroSectionImage
from base.serializers import HeroSerializer
@api_view(['GET'])
def get_all_heroSec(request):
products = HeroSectionImage.objects.all()
serializer = HeroSerializer(products, many=True)
return Response(serializer.data)
@api_view(['GET'])
def get_heroSec(request, pk):
products = HeroSectionImage.objects.filter(hero_id=pk)
serializer = HeroSerializer(products, many=True)
return Response(serializer.data)
| StarcoderdataPython |
3262352 | #!/home/ash/anaconda3/envs/pytorch/bin/python
import numpy as np
import torch
import torch.nn as nn
from torchsummary import summary
from torch.autograd import Variable
import torch.nn.functional as F
from layers import conv1x1
class CRPBlock(nn.Module):
def __init__(self, in_planes, out_planes, n_stages):
super(CRPBlock, self).__init__()
for i in range(n_stages):
setattr(self, '{}_{}'.format(i + 1, 'pointwise'), conv1x1(in_planes if (i == 0) else out_planes, out_planes, False))
self.stride = 1
self.n_stages = n_stages
self.maxpool = nn.MaxPool2d(kernel_size=5, stride=1, padding=2)
def forward(self, x):
top = x
for i in range(self.n_stages):
top = self.maxpool(top)
top = getattr(self, '{}_{}'.format(i + 1, 'pointwise'))(top)
x = top + x
return x
class CRN(nn.Module):
'''
Cascaded refinement network
'''
def __init__(self, in_planes, out_planes, stages):
super().__init__()
self.layers = nn.ModuleList()
self.layers.append(conv1x1(in_planes, out_planes, stride=1))
for i in range(1, stages):
self.layers.append(conv1x1(out_planes, out_planes, stride=1))
self.maxpool = nn.MaxPool2d(kernel_size=5, stride=1, padding=2)
def forward(self, x):
top = x
for i in range(self.stages):
top = self.maxpool(top)
top = layers[i](x)
x = top + x
return x
class DepthDecoder(nn.Module):
'''
The decoder for depth is implemented in a cascaded refinement manner,
which decodes depth maps in a top-down pathway. Specifically, multiple-
scale features from encoder are used to predict maps of corresponding
sizes via a 3 × 3 convolution followed by sigmoid, and these maps are
refined in a coarse-to-fine manner towards the final depth map. Both
FeatureNet and DepthNet take image size of 320 × 1024 as inputs.
'''
def __init__(self, num_ch_enc):
super().__init__()
self.num_ch_enc = num_ch_enc
| StarcoderdataPython |
29211 | <reponame>congvmit/mipkit<filename>mipkit/faces/helpers.py<gh_stars>1-10
"""
The MIT License (MIT)
Copyright (c) 2021 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
Provided license texts might have their own copyrights and restrictions
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from collections import OrderedDict
FACIAL_LANDMARKS_68_IDXS = OrderedDict([
("mouth", (48, 68)),
("inner_mouth", (60, 68)),
("right_eyebrow", (17, 22)),
("left_eyebrow", (22, 27)),
("right_eye", (36, 42)),
("left_eye", (42, 48)),
("nose", (27, 36)),
("jaw", (0, 17))
])
# For dlib’s 5-point facial landmark detector:
FACIAL_LANDMARKS_5_IDXS = OrderedDict([
("right_eye", (2, 3)),
("left_eye", (0, 1)),
("nose", (4))
])
| StarcoderdataPython |
1768328 | # coding: utf-8
#
# Copyright 2022 :Barry-Thomas-Paul: Moss
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http: // www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Interface Class
# this is a auto generated file generated by Cheetah
# Libre Office Version: 7.3
# Namespace: com.sun.star.linguistic2
import typing
from abc import abstractmethod, ABC
if typing.TYPE_CHECKING:
from ..lang.locale import Locale as Locale_70d308fa
class XNumberText(ABC):
"""
This interface allows to spell out numbers and money amounts.
The current set of supported languages is:
**since**
LibreOffice 6.1
See Also:
`API XNumberText <https://api.libreoffice.org/docs/idl/ref/interfacecom_1_1sun_1_1star_1_1linguistic2_1_1XNumberText.html>`_
"""
__ooo_ns__: str = 'com.sun.star.linguistic2'
__ooo_full_ns__: str = 'com.sun.star.linguistic2.XNumberText'
__ooo_type_name__: str = 'interface'
__pyunointerface__: str = 'com.sun.star.linguistic2.XNumberText'
@abstractmethod
def getAvailableLanguages(self) -> 'typing.Tuple[Locale_70d308fa, ...]':
"""
returns a list of all supported languages.
"""
@abstractmethod
def getNumberText(self, aText: str, aLocale: 'Locale_70d308fa') -> str:
"""
spell out numbers and money amounts
Please note that text argument can contain prefixes separated by space, for example \"ordinal\" for ordinal numbers, \"ordinal-number\" for ordinal indicators and ISO 4217 currency codes.
Language modules list the supported prefixes by the input text \"help\".
Raises:
com.sun.star.lang.IllegalArgumentException: ``IllegalArgumentException``
"""
__all__ = ['XNumberText']
| StarcoderdataPython |
3365092 | ########################################################################################
### Closing and Opening
import cv2
import numpy as np
# Reading image from its path
img = cv2.imread(".\\chest_xray\\chest_xray\\train\\NORMAL\\NORMAL-28501-0001.jpeg")
# img = cv2.imread(".\\chest_xray\\chest_xray\\train\\PNEUMONIA\\BACTERIA-7422-0001.jpeg")
# Resizing image to (256 x 256)
img = cv2.resize(img, (256, 256))
# Defining Kernel
kernel = np.ones((5, 5), np.uint8)
# Using closing and opening
img = cv2.erode(img, kernel)
img = cv2.dilate(img, kernel)
img = cv2.erode(img, kernel)
# Displaying the image
cv2.imshow("Image", img)
cv2.waitKey(0)
########################################################################################
### High Pass Filter
import cv2
import numpy as np
# Reading image from its path
img = cv2.imread(".\\chest_xray\\chest_xray\\train\\NORMAL\\NORMAL-28501-0001.jpeg")
# Resizing image to (256 x 256)
img = cv2.resize(img, (256, 256))
# Using a high pass filter
kernel = np.array([[-1.0, -1.0, -1.0],
[-1.0, 8.0, -1.0],
[-1.0, -1.0, -1.0]])
kernel = kernel/(np.sum(kernel) if np.sum(kernel)!=0 else 1)
img = cv2.filter2D(img, -1, kernel)
# Displaying the image
cv2.imshow("Image", img)
cv2.waitKey(0)
########################################################################################
### Low Pass Filter
import cv2
import numpy as np
# Reading image from its path
img = cv2.imread(".\\chest_xray\\chest_xray\\train\\NORMAL\\NORMAL-28501-0001.jpeg")
# img = cv2.imread(".\\chest_xray\\chest_xray\\train\\PNEUMONIA\\BACTERIA-7422-0001.jpeg")
# Resizing image to (256 x 256)
img = cv2.resize(img, (256, 256))
# Using a low pass filter
kernel = (1.0 / 9.0) * np.array([[1.0, 1.0, 1.0],
[1.0, 1.0, 1.0],
[1.0, 1.0, 1.0]])
print(kernel)
kernel = kernel / (np.sum(kernel) if np.sum(kernel) != 0 else 1)
img = cv2.filter2D(img, -1, kernel)
# Displaying the image
cv2.imshow("Image", img)
cv2.waitKey(0)
########################################################################################
### Sobel Operator
import cv2
import numpy as np
# Reading image from its path
# img = cv2.imread(".\\chest_xray\\chest_xray\\train\\NORMAL\\NORMAL-28501-0001.jpeg")
img = cv2.imread(".\\chest_xray\\chest_xray\\train\\PNEUMONIA\\BACTERIA-7422-0001.jpeg")
# Resizing image to (256 x 256)
img = cv2.resize(img, (256, 256))
# Using Sobel operator
sobelx = cv2.Sobel(img, cv2.CV_64F, 1, 0)
sobely = cv2.Sobel(img, cv2.CV_64F, 0, 1)
abs_grad_x = cv2.convertScaleAbs(sobelx)
abs_grad_y = cv2.convertScaleAbs(sobely)
grad = cv2.addWeighted(abs_grad_x, 0.5, abs_grad_y, 0.5, 0)
# Displaying the image
cv2.imshow("Image", grad)
cv2.waitKey(0)
########################################################################################
### Thresholding with matlab
%%% Reading input image
img=imread('Images\8.jpeg');
% img=rgb2gray(img);
img=imresize(img,[256,256],'nearest');
temp = zeros([256, 256]);
% J = histeq(img);
for i = 1:256
for j = 1:256
if img(i, j) > 150
temp(i, j) = 255;
end
end
end
figure(1);
subplot(1,2,1), imshow(img), title("Original");
subplot(1,2,2), imshow(temp), title("Thresholded image");
% subplot(2,2,3), imhist(img, 256), title("Original Histogram");
% subplot(2,2,4), imhist(temp, 256), title("Final Histogram");
% temp1 = zeros([256, 256]);
%
% for i = 1:256
% for j = 1:256
% if J(i, j) > 150
% temp1(i, j) = 255;
% elseif J(i, j) > 120
% temp1(i, j) = 50;
% end
% end
% end
%
% figure(2);
% subplot(2,2,1), imshow(J), title("Hist Eq img");
% subplot(2,2,2), imshow(temp1), title("Hist Eq Thresholded");
% subplot(2,2,3), imhist(J), title("Hist Eq Histogram");
% subplot(2,2,4), imhist(temp1), title("Hist Eq Thresholded Histogram");
| StarcoderdataPython |
3340336 | # -*- coding: utf-8 -*-
"""
demeter
name:tcp.py
author:rabin
"""
import socket
import time
from demeter.core import *
from demeter.mqtt import *
from tornado.tcpserver import TCPServer
from tornado.ioloop import IOLoop
from tornado import stack_context
from tornado.escape import native_str
class Connection(object):
clients = set()
EOF = '|e|'
def __init__(self, stream, address):
Connection.clients.add(self)
self._pub = Pub()
self._stream = stream
self._address = address
self._stream.set_close_callback(self.on_close)
self.read_message()
def read_message(self):
self._message_callback = stack_context.wrap(self.on_message)
self._stream.read_until(self.EOF, self._message_callback)
def on_message(self, data):
data = data.replace(self.EOF, '')
temp = data.split('|:|')
key = temp[0]
value = temp[1]
self._pub.push(key, value)
#print "User said:", data[:-1], self._address
"""
for conn in Connection.clients:
conn.send_message(data)
"""
self.read_message()
def send_message(self, data):
self._stream.write(data)
def on_close(self):
#print "A user has left the chat room.", self._address
Connection.clients.remove(self)
class Server(TCPServer):
def handle_stream(self, stream, address):
#print "New connection :", address, stream
Connection(stream, address)
#print "connection num is:", len(Connection.clients)
class Client(object):
EOF = '|e|'
def __init__(self, host='0.0.0.0', port=8000):
self.connect = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.connect.connect((host, port))
def send(self, msg):
msg = msg + self.EOF
self.connect.sendall(msg)
#data = self.connect.recv(1024)
def close(self):
self.connect.close() | StarcoderdataPython |
1709175 | <filename>src/tf/load_data.py
'''
Data pre process for AFM and FM
@author:
<NAME> (<EMAIL>)
<NAME> (<EMAIL>)
'''
import numpy as np
import os
import pandas as pd
from scipy.sparse import csr_matrix
from sklearn.feature_extraction import DictVectorizer
class LoadData(object):
'''given the path of data, return the data format for AFM and FM
:param path
return:
Train_data: a dictionary, 'Y' refers to a list of y values; 'X' refers to a list of features_M dimension vectors with 0 or 1 entries
Test_data: same as Train_data
Validation_data: same as Train_data
'''
# Three files are needed in the path
def __init__(self, path, name_train, name_valid, name_test, loss_type="square_loss"):
self.path = path + "/"
self.trainfile = self.path + name_train
self.testfile = self.path + name_test
self.validationfile = self.path + name_valid
self.features_M = self.map_features( )
self.Train_data, self.Validation_data, self.Test_data = self.construct_data( loss_type )
def map_features(self): # map the feature entries in all files, kept in self.features dictionary
self.features = {}
self.read_features(self.trainfile)
self.read_features(self.testfile)
self.read_features(self.validationfile)
# print("features_M:", len(self.features))
return len(self.features)
def read_features(self, file): # read a feature file
f = open( file )
line = f.readline()
i = len(self.features)
while line:
items = line.strip().split(' ')
for item in items[1:]:
if item not in self.features:
self.features[ item ] = i
i = i + 1
line = f.readline()
f.close()
def construct_data(self, loss_type):
X_, Y_ , Y_for_logloss= self.read_data(self.trainfile)
if loss_type == 'log_loss':
Train_data = self.construct_dataset(X_, Y_for_logloss)
else:
Train_data = self.construct_dataset(X_, Y_)
#print("Number of samples in Train:" , len(Y_))
X_, Y_ , Y_for_logloss= self.read_data(self.validationfile)
if loss_type == 'log_loss':
Validation_data = self.construct_dataset(X_, Y_for_logloss)
else:
Validation_data = self.construct_dataset(X_, Y_)
#print("Number of samples in Validation:", len(Y_))
X_, Y_ , Y_for_logloss = self.read_data(self.testfile)
if loss_type == 'log_loss':
Test_data = self.construct_dataset(X_, Y_for_logloss)
else:
Test_data = self.construct_dataset(X_, Y_)
#print("Number of samples in Test:", len(Y_))
return Train_data, Validation_data, Test_data
def read_data(self, file):
# read a data file. For a row, the first column goes into Y_;
# the other columns become a row in X_ and entries are maped to indexs in self.features
f = open( file )
X_ = []
Y_ = []
Y_for_logloss = []
line = f.readline()
while line:
items = line.strip().split(' ')
Y_.append( 1.0*float(items[0]) )
if float(items[0]) > 0:# > 0 as 1; others as 0
v = 1.0
else:
v = 0.0
Y_for_logloss.append( v )
X_.append( [ self.features[item] for item in items[1:]] )
line = f.readline()
f.close()
return X_, Y_, Y_for_logloss
def construct_dataset(self, X_, Y_):
Data_Dic = {}
X_lens = [ len(line) for line in X_]
indexs = np.argsort(X_lens)
Data_Dic['Y'] = [ Y_[i] for i in indexs]
Data_Dic['X'] = [ X_[i] for i in indexs]
return Data_Dic
def truncate_features(self):
"""
Make sure each feature vector is of the same length
"""
num_variable = len(self.Train_data['X'][0])
for i in xrange(len(self.Train_data['X'])):
num_variable = min([num_variable, len(self.Train_data['X'][i])])
# truncate train, validation and test
for i in xrange(len(self.Train_data['X'])):
self.Train_data['X'][i] = self.Train_data['X'][i][0:num_variable]
for i in xrange(len(self.Validation_data['X'])):
self.Validation_data['X'][i] = self.Validation_data['X'][i][0:num_variable]
for i in xrange(len(self.Test_data['X'])):
self.Test_data['X'][i] = self.Test_data['X'][i][0:num_variable]
return num_variable
class sparse_to_dense(object):
def __init__(self, path, filename, header=None, cols=[]):
self.path = path + "/"
self.header = header
self.cols = cols
self.filepath = self.path + filename
self.data = self.read_data(self.filepath, self.header)
self.data_dense = self.long_to_wide(self.data, self.cols)
self.data_dense_mat = self.dense_matrix(self.data)
# Read in delimited file using Pandas
def read_data(self, filepath, header=None):
print("\tReading datafile [" + filepath + "]...")
data = pd.read_table(filepath, sep="\t", header=None, names=header)
return(data)
# Convert from Long to Wide format
def long_to_wide(self, data, cols=[], row_id=None, col_id=None):
col_names = list(data)
group_row = col_names[0] if row_id is None else row_id
group_col = col_names[1] if col_id is None else col_id
# Subset
if(len(cols)):
data = data[cols]
# Dense Matrix
print("\tGrouping by [" + group_row + "] and [" + group_col + "]")
data = data.groupby([group_row, group_col]).size().unstack(fill_value=0)
return(data)
# Convert to Dense Matrix Format
def dense_matrix(self, data, label_id=None, row_id=None, col_id=None):
col_names = list(data)
group_row = col_names[0] if row_id is None else row_id
group_col = col_names[1] if col_id is None else col_id
group_label = col_names[2] if label_id is None else label_id
# Convert to List of Dictionaries
X_raw = data.drop(group_label, axis=1)
# Convert int to string so that sklearn indexes them
X_raw.item_id = X_raw.item_id.astype(str)
X_raw.user_id = X_raw.user_id.astype(str)
# y = Labels
y = data.as_matrix(columns=[group_label])
# X - Features
data_to_dict = X_raw.T.to_dict().values()
data_to_dict = list(data_to_dict)
v = DictVectorizer(sparse=True)
X = v.fit_transform(data_to_dict)
X_data = X.toarray()
return X_data, y
# # Load data and get dense matrix form
# # https://github.com/coreylynch/pyFM
# data = sparse_to_dense(
# path="../../data/ml-100k",
# filename="u.data",
# header=['user_id','item_id','rating','timestamp'],
# cols=['user_id','item_id','rating'])
# print(data.data_dense_mat)
# X, y = data.data_dense_mat
# print(X)
# print(y)
# print(X.shape)
| StarcoderdataPython |
1715818 | """
AVM Fritz!BOX SmartHome Client
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Dokumentation zum Login-Verfahren:
http://www.avm.de/de/Extern/files/session_id/AVM_Technical_Note_-_Session_ID.pdf
Smart Home Interface:
http://www.avm.de/de/Extern/files/session_id/AHA-HTTP-Interface.pdf
"""
from __future__ import print_function, division
import hashlib
from collections import namedtuple
from xml.etree import ElementTree as ET
from requests import Session
try:
from bs4 import BeautifulSoup
except ImportError:
BeautifulSoup = None
from .actor import Actor
Device = namedtuple("Device", "deviceid connectstate switchstate")
LogEntry = namedtuple("LogEntry", "date time message hash")
class FritzBox(object):
"""
Provides easy access to a FritzBOX's SmartHome functions,
which are poorly documented by AVM...
A note about SIDs:
They expire after some time. If you have a long-running daemon,
you should call login() every 10 minutes or so else you'll get
nice 403 errors.
"""
def __init__(self, ip, username, password, use_tls=False, tls_cert_path=''):
if use_tls:
self.base_url = 'https://' + ip
else:
self.base_url = 'http://' + ip
self.username = username
self.password = password
self.sid = None
self.tls_cert_path = tls_cert_path
self.session = Session()
def login(self):
"""
Try to login and set the internal session id.
Please note:
- Any failed login resets all existing session ids, even of
other users.
- SIDs expire after some time
"""
response = self.session.get(self.base_url + '/login_sid.lua', verify=self.tls_cert_path, timeout=10)
xml = ET.fromstring(response.text)
if xml.find('SID').text == "0000000000000000":
challenge = xml.find('Challenge').text
url = self.base_url + "/login_sid.lua"
response = self.session.get(url, verify=self.tls_cert_path, params={
"username": self.username,
"response": self.calculate_response(challenge, self.password),
}, timeout=10)
xml = ET.fromstring(response.text)
sid = xml.find('SID').text
if xml.find('SID').text == "0000000000000000":
blocktime = int(xml.find('BlockTime').text)
exc = Exception("Login failed, please wait {} seconds".format(
blocktime
))
exc.blocktime = blocktime
raise exc
self.sid = sid
return sid
def calculate_response(self, challenge, password):
"""Calculate response for the challenge-response authentication"""
to_hash = (challenge + "-" + password).encode("UTF-16LE")
hashed = hashlib.md5(to_hash).hexdigest()
return "{0}-{1}".format(challenge, hashed)
#
# Useful public methods
#
def get_actors(self):
"""
Returns a list of Actor objects for querying SmartHome devices.
This is currently the only working method for getting temperature data.
"""
devices = self.homeautoswitch("getdevicelistinfos")
xml = ET.fromstring(devices)
actors = []
for device in xml.findall('device'):
actors.append(Actor(fritzbox=self, device=device))
return actors
def get_actor_by_ain(self, ain):
"""
Return a actor identified by it's ain or return None
"""
for actor in self.get_actors():
if actor.actor_id == ain:
return actor
#
# "Private" methods
#
def homeautoswitch(self, cmd, ain=None, param=None):
"""
Call a switch method.
Should only be used by internal library functions.
"""
assert self.sid, "Not logged in"
params = {
'switchcmd': cmd,
'sid': self.sid,
}
if param is not None:
params['param'] = param
if ain:
params['ain'] = ain
url = self.base_url + '/webservices/homeautoswitch.lua'
response = self.session.get(url, verify=self.tls_cert_path, params=params, timeout=10)
response.raise_for_status()
return response.text.strip().encode('utf-8')
def get_switch_actors(self):
"""
Get information about all actors
This needs 1+(5n) requests where n = number of actors registered
Deprecated, use get_actors instead.
Returns a dict:
[ain] = {
'name': Name of actor,
'state': Powerstate (boolean)
'present': Connected to server? (boolean)
'power': Current power consumption in mW
'energy': Used energy in Wh since last energy reset
'temperature': Current environment temperature in celsius
}
"""
actors = {}
for ain in self.homeautoswitch("getswitchlist").split(','):
actors[ain] = {
'name': self.homeautoswitch("getswitchname", ain),
'state': bool(self.homeautoswitch("getswitchstate", ain)),
'present': bool(self.homeautoswitch("getswitchpresent", ain)),
'power': self.homeautoswitch("getswitchpower", ain),
'energy': self.homeautoswitch("getswitchenergy", ain),
'temperature': self.homeautoswitch("getswitchtemperature", ain),
}
return actors
def set_switch_on(self, ain):
"""Switch the power of a actor ON"""
return self.homeautoswitch('setswitchon', ain)
def set_switch_off(self, ain):
"""Switch the power of a actor OFF"""
return self.homeautoswitch('setswitchoff', ain)
def set_switch_toggle(self, ain):
"""Toggle a power switch and return the new state"""
return self.homeautoswitch('setswitchtoggle', ain)
#
# DeviceID based methods
#
# Inspired by:
# https://github.com/valpo/fritzbox/blob/master/fritzbox/fritzautohome.py
#
def get_devices(self):
"""
Return a list of devices.
Deprecated, use get_actors instead.
"""
url = self.base_url + '/net/home_auto_query.lua'
response = self.session.get(url, verify=self.tls_cert_path, params={
'sid': self.sid,
'command': 'AllOutletStates',
'xhr': 0,
}, timeout=15)
response.raise_for_status()
data = response.json()
count = int(data["Outlet_count"])
devices = []
for i in range(1, count + 1):
device = Device(
int(data["DeviceID_{0}".format(i)]),
int(data["DeviceConnectState_{0}".format(i)]),
int(data["DeviceSwitchState_{0}".format(i)])
)
devices.append(device)
return devices
def get_consumption(self, deviceid, timerange="10"):
"""
Return all available energy consumption data for the device.
You need to divice watt_values by 100 and volt_values by 1000
to get the "real" values.
:return: dict
"""
tranges = ("10", "24h", "month", "year")
if timerange not in tranges:
raise ValueError(
"Unknown timerange. Possible values are: {0}".format(tranges)
)
url = self.base_url + "/net/home_auto_query.lua"
response = self.session.get(url, verify=self.tls_cert_path, params={
'sid': self.sid,
'command': 'EnergyStats_{0}'.format(timerange),
'id': deviceid,
'xhr': 0,
}, timeout=15)
response.raise_for_status()
data = response.json()
result = {}
# Single result values
values_map = {
'MM_Value_Amp': 'mm_value_amp',
'MM_Value_Power': 'mm_value_power',
'MM_Value_Volt': 'mm_value_volt',
'EnStats_average_value': 'enstats_average_value',
'EnStats_max_value': 'enstats_max_value',
'EnStats_min_value': 'enstats_min_value',
'EnStats_timer_type': 'enstats_timer_type',
'sum_Day': 'sum_day',
'sum_Month': 'sum_month',
'sum_Year': 'sum_year',
}
for avm_key, py_key in values_map.items():
result[py_key] = int(data[avm_key])
# Stats counts
count = int(data["EnStats_count"])
watt_values = [None for i in range(count)]
volt_values = [None for i in range(count)]
for i in range(1, count + 1):
watt_values[i - 1] = int(data["EnStats_watt_value_{}".format(i)])
volt_values[i - 1] = int(data["EnStats_volt_value_{}".format(i)])
result['watt_values'] = watt_values
result['volt_values'] = volt_values
return result
def get_logs(self):
"""
Return the system logs since the last reboot.
"""
assert BeautifulSoup, "Please install bs4 to use this method"
url = self.base_url + "/system/syslog.lua"
response = self.session.get(url, verify=self.tls_cert_path, params={
'sid': self.sid,
'stylemode': 'print',
}, timeout=15)
response.raise_for_status()
entries = []
tree = BeautifulSoup(response.text)
rows = tree.find('table').find_all('tr')
for row in rows:
columns = row.find_all("td")
date = columns[0].string
time = columns[1].string
message = columns[2].find("a").string
merged = "{} {} {}".format(date, time, message.encode("UTF-8"))
msg_hash = hashlib.md5(merged).hexdigest()
entries.append(LogEntry(date, time, message, msg_hash))
return entries
| StarcoderdataPython |
4827256 | <gh_stars>0
# terrascript/resource/github.py
import terrascript
class github_branch_protection(terrascript.Resource):
pass
class github_issue_label(terrascript.Resource):
pass
class github_membership(terrascript.Resource):
pass
class github_organization_block(terrascript.Resource):
pass
class github_organization_project(terrascript.Resource):
pass
class github_organization_webhook(terrascript.Resource):
pass
class github_project_column(terrascript.Resource):
pass
class github_repository_collaborator(terrascript.Resource):
pass
class github_repository_deploy_key(terrascript.Resource):
pass
class github_repository_project(terrascript.Resource):
pass
class github_repository_webhook(terrascript.Resource):
pass
class github_repository(terrascript.Resource):
pass
class github_team_membership(terrascript.Resource):
pass
class github_team_repository(terrascript.Resource):
pass
class github_team(terrascript.Resource):
pass
class github_user_gpg_key(terrascript.Resource):
pass
class github_user_invitation_accepter(terrascript.Resource):
pass
class github_user_ssh_key(terrascript.Resource):
pass
__all__ = [
"github_branch_protection",
"github_issue_label",
"github_membership",
"github_organization_block",
"github_organization_project",
"github_organization_webhook",
"github_project_column",
"github_repository_collaborator",
"github_repository_deploy_key",
"github_repository_project",
"github_repository_webhook",
"github_repository",
"github_team_membership",
"github_team_repository",
"github_team",
"github_user_gpg_key",
"github_user_invitation_accepter",
"github_user_ssh_key",
]
| StarcoderdataPython |
3354405 | import os
import sys
import urllib
import time
import logging
import json
import shutil
import gc
import pytest
import mock
def pytest_addoption(parser):
parser.addoption("--slow", action='store_true', default=False, help="Also run slow tests")
# Config
if sys.platform == "win32":
PHANTOMJS_PATH = "tools/phantomjs/bin/phantomjs.exe"
else:
PHANTOMJS_PATH = "phantomjs"
SITE_URL = "http://127.0.0.1:43110"
sys.path.insert(0, os.path.abspath(os.path.dirname(__file__) + "/../lib")) # External modules directory
sys.path.insert(0, os.path.abspath(os.path.dirname(__file__) + "/..")) # Imports relative to src dir
from Config import config
config.argv = ["none"] # Dont pass any argv to config parser
config.parse(silent=True) # Plugins need to access the configuration
logging.basicConfig(level=logging.DEBUG, stream=sys.stdout)
from Plugin import PluginManager
PluginManager.plugin_manager.loadPlugins()
config.loadPlugins()
config.parse() # Parse again to add plugin configuration options
config.data_dir = "src/Test/testdata" # Use test data for unittests
config.debug_socket = True # Use test data for unittests
config.verbose = True # Use test data for unittests
config.tor = "disabled" # Don't start Tor client
config.trackers = []
os.chdir(os.path.abspath(os.path.dirname(__file__) + "/../..")) # Set working dir
# Cleanup content.db caches
if os.path.isfile("%s/content.db" % config.data_dir):
os.unlink("%s/content.db" % config.data_dir)
if os.path.isfile("%s-temp/content.db" % config.data_dir):
os.unlink("%s-temp/content.db" % config.data_dir)
import gevent
from gevent import monkey
monkey.patch_all(thread=False)
from Site import Site
from Site import SiteManager
from User import UserManager
from File import FileServer
from Connection import ConnectionServer
from Crypt import CryptConnection
from Ui import UiWebsocket
from Tor import TorManager
from Content import ContentDb
from util import RateLimit
from Db import Db
# SiteManager.site_manager.load = mock.MagicMock(return_value=True) # Don't try to load from sites.json
# SiteManager.site_manager.save = mock.MagicMock(return_value=True) # Don't try to load from sites.json
@pytest.fixture(scope="session")
def resetSettings(request):
open("%s/sites.json" % config.data_dir, "w").write("{}")
open("%s/users.json" % config.data_dir, "w").write("""
{
"<KEY>": {
"certs": {},
"master_seed": "024bceac1105483d66585d8a60eaf20aa8c3254b0f266e0d626ddb6114e2949a",
"sites": {}
}
}
""")
@pytest.fixture(scope="session")
def resetTempSettings(request):
data_dir_temp = config.data_dir + "-temp"
if not os.path.isdir(data_dir_temp):
os.mkdir(data_dir_temp)
open("%s/sites.json" % data_dir_temp, "w").write("{}")
open("%s/users.json" % data_dir_temp, "w").write("""
{
"<KEY>": {
"certs": {},
"master_seed": "024bceac1105483d66585d8a60eaf20aa8c3254b0f266e0d626ddb6114e2949a",
"sites": {}
}
}
""")
def cleanup():
os.unlink("%s/sites.json" % data_dir_temp)
os.unlink("%s/users.json" % data_dir_temp)
request.addfinalizer(cleanup)
@pytest.fixture()
def site(request):
threads_before = [obj for obj in gc.get_objects() if isinstance(obj, gevent.Greenlet)]
# Reset ratelimit
RateLimit.queue_db = {}
RateLimit.called_db = {}
site = Site("1TeSTvb4w2PWE81S2rEELgmX2GCCExQGT")
site.announce = mock.MagicMock(return_value=True) # Don't try to find peers from the net
# Always use original data
assert "1TeSTvb4w2PWE81S2rEELgmX2GCCExQGT" in site.storage.getPath("") # Make sure we dont delete everything
shutil.rmtree(site.storage.getPath(""), True)
shutil.copytree(site.storage.getPath("") + "-original", site.storage.getPath(""))
def cleanup():
site.storage.deleteFiles()
site.content_manager.contents.db.deleteSite(site)
del SiteManager.site_manager.sites["1TeSTvb4w2PWE81S2rEELgmX2GCCExQGT"]
site.content_manager.contents.db.close()
db_path = "%s/content.db" % config.data_dir
os.unlink(db_path)
del ContentDb.content_dbs[db_path]
gevent.killall([obj for obj in gc.get_objects() if isinstance(obj, gevent.Greenlet) and obj not in threads_before])
request.addfinalizer(cleanup)
site = Site("1TeSTvb4w2PWE81S2rEELgmX2GCCExQGT") # Create new Site object to load content.json files
if not SiteManager.site_manager.sites:
SiteManager.site_manager.sites = {}
SiteManager.site_manager.sites["1TeSTvb4w2PWE81S2rEELgmX2GCCExQGT"] = site
return site
@pytest.fixture()
def site_temp(request):
threads_before = [obj for obj in gc.get_objects() if isinstance(obj, gevent.Greenlet)]
with mock.patch("Config.config.data_dir", config.data_dir + "-temp"):
site_temp = Site("1TeSTvb4w2PWE81S2rEELgmX2GCCExQGT")
site_temp.announce = mock.MagicMock(return_value=True) # Don't try to find peers from the net
def cleanup():
site_temp.storage.deleteFiles()
site_temp.content_manager.contents.db.deleteSite(site_temp)
site_temp.content_manager.contents.db.close()
db_path = "%s-temp/content.db" % config.data_dir
os.unlink(db_path)
del ContentDb.content_dbs[db_path]
gevent.killall([obj for obj in gc.get_objects() if isinstance(obj, gevent.Greenlet) and obj not in threads_before])
request.addfinalizer(cleanup)
return site_temp
@pytest.fixture(scope="session")
def user():
user = UserManager.user_manager.get()
user.sites = {} # Reset user data
return user
@pytest.fixture(scope="session")
def browser():
try:
from selenium import webdriver
browser = webdriver.PhantomJS(executable_path=PHANTOMJS_PATH, service_log_path=os.path.devnull)
browser.set_window_size(1400, 1000)
except Exception, err:
raise pytest.skip("Test requires selenium + phantomjs: %s" % err)
return browser
@pytest.fixture(scope="session")
def site_url():
try:
urllib.urlopen(SITE_URL).read()
except Exception, err:
raise pytest.skip("Test requires zeronet client running: %s" % err)
return SITE_URL
@pytest.fixture(scope="session")
def file_server(request):
request.addfinalizer(CryptConnection.manager.removeCerts) # Remove cert files after end
file_server = FileServer("127.0.0.1", 1544)
gevent.spawn(lambda: ConnectionServer.start(file_server))
# Wait for port opening
for retry in range(10):
time.sleep(0.1) # Port opening
try:
conn = file_server.getConnection("127.0.0.1", 1544)
conn.close()
break
except Exception, err:
print err
assert file_server.running
def stop():
file_server.stop()
request.addfinalizer(stop)
return file_server
@pytest.fixture()
def ui_websocket(site, file_server, user):
class WsMock:
def __init__(self):
self.result = None
def send(self, data):
self.result = json.loads(data)["result"]
ws_mock = WsMock()
ui_websocket = UiWebsocket(ws_mock, site, file_server, user, None)
def testAction(action, *args, **kwargs):
func = getattr(ui_websocket, "action%s" % action)
func(0, *args, **kwargs)
return ui_websocket.ws.result
ui_websocket.testAction = testAction
return ui_websocket
@pytest.fixture(scope="session")
def tor_manager():
try:
tor_manager = TorManager()
assert tor_manager.connect()
tor_manager.startOnions()
except Exception, err:
raise pytest.skip("Test requires Tor with ControlPort: %s, %s" % (config.tor_controller, err))
return tor_manager
@pytest.fixture()
def db(request):
db_path = "%s/zeronet.db" % config.data_dir
schema = {
"db_name": "TestDb",
"db_file": "%s/zeronet.db" % config.data_dir,
"maps": {
"data.json": {
"to_table": [
"test",
{"node": "test", "table": "test_importfilter", "import_cols": ["test_id", "title"]}
]
}
},
"tables": {
"test": {
"cols": [
["test_id", "INTEGER"],
["title", "TEXT"],
["json_id", "INTEGER REFERENCES json (json_id)"]
],
"indexes": ["CREATE UNIQUE INDEX test_id ON test(test_id)"],
"schema_changed": 1426195822
},
"test_importfilter": {
"cols": [
["test_id", "INTEGER"],
["title", "TEXT"],
["json_id", "INTEGER REFERENCES json (json_id)"]
],
"indexes": ["CREATE UNIQUE INDEX test_importfilter_id ON test_importfilter(test_id)"],
"schema_changed": 1426195822
}
}
}
if os.path.isfile(db_path):
os.unlink(db_path)
db = Db(schema, db_path)
db.checkTables()
def stop():
db.close()
os.unlink(db_path)
request.addfinalizer(stop)
return db
| StarcoderdataPython |
3210765 | #!/usr/bin/env python3
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
#
# device registry - layer for common used functions
#
"""IoT DR: device registry functions.
Will be deployed as Lambda layer."""
import logging
import sys
import time
import boto3
logger = logging.getLogger()
for h in logger.handlers:
logger.removeHandler(h)
h = logging.StreamHandler(sys.stdout)
FORMAT = '%(asctime)s [%(levelname)s] - %(filename)s:%(lineno)s - %(funcName)s - %(message)s'
h.setFormatter(logging.Formatter(FORMAT))
logger.addHandler(h)
logger.setLevel(logging.INFO)
class DeviceReplicationCreateThingException(Exception): pass
class DeviceReplicationDeleteThingException(Exception): pass
class DeviceReplicationUpdateThingException(Exception): pass
class DeviceReplicationGeneralException(Exception): pass
def get_iot_data_endpoint(region, iot_endpoints):
try:
logger.info('region: {} iot_endpoints: {}'.format(region, iot_endpoints))
iot_data_endpoint = None
for endpoint in iot_endpoints:
if region in endpoint:
logger.info('region: {} in endpoint: {}'.format(region, endpoint))
iot_data_endpoint = endpoint
break
if iot_data_endpoint is None:
logger.info('iot_data_endpoint not found calling describe_endpoint')
iot_data_endpoint = (
boto3.client('iot').
describe_endpoint(endpointType='iot:Data-ATS')['endpointAddress']
)
logger.info('iot_data_endpoint from describe_endpoint: {}'.format(iot_data_endpoint))
else:
logger.info('iot_data_endpoint from iot_endpoints: {}'.format(iot_data_endpoint))
return iot_data_endpoint
except Exception as e:
logger.error('{}'.format(e))
raise DeviceReplicationGeneralException(e)
def thing_exists(c_iot, thing_name):
logger.debug("entering thing_exists: thing_name: {}".format(thing_name))
try:
response = c_iot.describe_thing(thingName=thing_name)
logger.debug('response: {}'.format(response))
logger.info('thing_name "{}" exists'.format(thing_name))
return True
except c_iot.exceptions.ResourceNotFoundException:
logger.info('thing_name "{}" does not exist'.format(thing_name))
return False
except Exception as e:
logger.error('{}'.format(e))
raise DeviceReplicationGeneralException(e)
def policy_exists(c_iot, policy_name):
logger.info("policy_exists: policy_name: {}".format(policy_name))
try:
response = c_iot.get_policy(policyName=policy_name)
logger.debug('response: {}'.format(response))
logger.info('policy_name: {}: exists'.format(policy_name))
return True
except c_iot.exceptions.ResourceNotFoundException:
logger.info('policy_name: {}: does not exist'.format(policy_name))
return False
except Exception as e:
logger.error('{}'.format(e))
raise DeviceReplicationGeneralException(e)
def certificate_exists(c_iot, cert_id):
logger.info("certificate_exists: cert_id: {}".format(cert_id))
try:
response = c_iot.describe_certificate(certificateId=cert_id)
logger.debug('response: {}'.format(response))
logger.info('cert id "{}" exists'.format(cert_id))
return True
except c_iot.exceptions.ResourceNotFoundException:
logger.info('cert_id "{}" does not exist'.format(cert_id))
return False
except Exception as e:
logger.error('{}'.format(e))
raise DeviceReplicationGeneralException(e)
def thing_type_exists(c_iot, thing_type_name):
logger.info("thing_type_exists: thing_type_name: {}".format(thing_type_name))
try:
response = c_iot.describe_thing_type(thingTypeName=thing_type_name)
logger.debug('response: {}'.format(response))
logger.info('thing_type_name "{}" exists'.format(thing_type_name))
return True
except c_iot.exceptions.ResourceNotFoundException:
logger.info('thing_type_name "{}" does not exist'.format(thing_type_name))
return False
except Exception as e:
logger.error('{}'.format(e))
raise DeviceReplicationGeneralException(e)
def create_thing_type(c_iot, thing_type_name):
logger.info('create_thing_type: thing_type_name: {}'.format(thing_type_name))
try:
if not thing_type_exists(c_iot, thing_type_name):
response = c_iot.create_thing_type(thingTypeName=thing_type_name)
logger.info('create_thing_type: response: {}'.format(response))
except Exception as e:
logger.error('create_thing_type: {}'.format(e))
raise DeviceReplicationCreateThingException(e)
def create_thing(c_iot, c_iot_primary, thing_name, thing_type_name, attrs):
logger.info('create_thing: thing_name: {} thing_type_name: {} attrs: {}'.
format(thing_name, thing_type_name, attrs))
try:
if not thing_exists(c_iot_primary, thing_name):
logger.warning(
'thing_name "{}" does not exist in primary region "{}", will not being created'.
format(thing_name, c_iot_primary.meta.region_name))
return
if not thing_exists(c_iot, thing_name):
if thing_type_name and attrs:
logger.info('thing_name: {}: thing_type_name and attrs'.format(thing_name))
create_thing_type(c_iot, thing_type_name)
response = c_iot.create_thing(
thingName=thing_name,
thingTypeName=thing_type_name,
attributePayload=attrs
)
elif not thing_type_name and attrs:
logger.info('thing_name: {}: not thing_type_name and attrs'.format(thing_name))
response = c_iot.create_thing(
thingName=thing_name,
attributePayload=attrs
)
elif thing_type_name and not attrs:
logger.info('thing_name: {}: thing_type_name and not attrs'.format(thing_name))
create_thing_type(c_iot, thing_type_name)
response = c_iot.create_thing(
thingName=thing_name,
thingTypeName=thing_type_name
)
else:
logger.info('not thing_type_name and not attrs')
response = c_iot.create_thing(
thingName=thing_name
)
logger.info('thing_name: {}: create_thing: response: {}'.format(thing_name, response))
else:
logger.info('thing_name: {}: thing exists already'.format(thing_name))
except Exception as e:
logger.error('thing_name: {}: create_thing: {}'.format(thing_name, e))
raise DeviceReplicationCreateThingException(e)
def get_thing_principals(c_iot_primary, thing_name):
try:
response = c_iot_primary.list_thing_principals(thingName=thing_name)
logger.debug(response)
logger.info('thing_name: {}: principals: {}'.format(thing_name, response['principals']))
return response['principals']
except Exception as e:
logger.error('thing_name: {}: get_thing_principals: {}'.format(thing_name, e))
raise DeviceReplicationGeneralException(e)
def get_principal_things(c_iot, principal):
try:
response = c_iot.list_principal_things(maxResults=10, principal=principal)
logger.info('principal: {} things attached: {}'.format(principal, response['things']))
return response['things']
except Exception as e:
logger.error('{}'.format(e))
raise DeviceReplicationGeneralException(e)
def get_attached_policies(c_iot_primary, cert_arn):
try:
response = c_iot_primary.list_attached_policies(
target=cert_arn, recursive=False, pageSize=10
)
logger.debug(response)
logger.info('cert_arn: {}: policies: {}'.format(cert_arn, response['policies']))
return response['policies']
except Exception as e:
logger.error('cert_arn: {}: get_attached_policies: {}'.format(cert_arn, e))
raise DeviceReplicationGeneralException(e)
def get_and_create_policy(c_iot, c_iot_primary, policy_name):
try:
primary_region = c_iot_primary.meta.region_name
secondary_region = c_iot.meta.region_name
logger.info(
'primary_region: {} secondary_region: {} policy_name: {}'.format(
primary_region, secondary_region, policy_name
)
)
response = c_iot_primary.get_policy(policyName=policy_name)
logger.debug(response)
logger.info('primary_region: {} policy_document: {}'.format(
primary_region, response['policyDocument']))
policy_document_this_region = response['policyDocument'].replace(
primary_region, secondary_region
)
logger.info('secondary_region: {} policy_document: {}'.format(
secondary_region, policy_document_this_region))
response = c_iot.create_policy(
policyName=policy_name,
policyDocument=policy_document_this_region
)
logger.info('policy_name: {}: create_policy: response: {}'.format(policy_name, response))
except c_iot.exceptions.ResourceAlreadyExistsException:
logger.warning(
'policy_name {}: exists already - might have been created in a parallel thread'.format(
policy_name
)
)
except Exception as e:
logger.error('policy_name: {}: get_and_create_policy: {}'.format(policy_name, e))
raise DeviceReplicationCreateThingException(e)
def register_cert(c_iot, cert_pem):
try:
response = c_iot.register_certificate_without_ca(certificatePem=cert_pem, status='ACTIVE')
logger.info(response)
except c_iot.exceptions.ResourceAlreadyExistsException:
logger.warning(
'certificate exists already - might be created in another thread'
)
except Exception as e:
logger.error('register_cert: {}'.format(e))
raise DeviceReplicationCreateThingException(e)
def create_thing_with_cert_and_policy(
c_iot, c_iot_primary, thing_name, thing_type_name, attrs, retries, wait):
primary_region = c_iot_primary.meta.region_name
secondary_region = c_iot.meta.region_name
logger.info(
'thing_name: {} primary_region: {} secondary_region: {}'.format(
thing_name, primary_region, secondary_region
)
)
try:
if not thing_exists(c_iot_primary, thing_name):
logger.warning(
'thing_name "{}" does not exist in primary region "{}", will not be created'.
format(thing_name, primary_region
)
)
return
logger.debug('calling create_thing: c_iot: {} c_iot_primary: {} \
thing_name: {} thing_type_name: {} attrs: {}'.
format(c_iot, c_iot_primary, thing_name, thing_type_name, attrs))
create_thing(c_iot, c_iot_primary, thing_name, thing_type_name, attrs)
principals = []
retries = retries
wait = wait
i = 1
while not principals and i <= retries:
logger.info('{}: get_thing_principals for thing_name: {}'.format(i, thing_name))
i += 1
principals = get_thing_principals(c_iot_primary, thing_name)
time.sleep(wait*i)
if not principals:
logger.error('thing_name: {}: no principals attached'.format(thing_name))
raise DeviceReplicationCreateThingException(
'no principals attached to thing_name: {}'.format(thing_name))
for principal in principals:
cert_id = principal.split('/')[-1]
logger.info(
'thing_name: {}: principal: {} cert_id: {}'.format(
thing_name, principal, cert_id
)
)
response = c_iot_primary.describe_certificate(certificateId=cert_id)
cert_arn = response['certificateDescription']['certificateArn']
cert_pem = response['certificateDescription']['certificatePem']
logger.info('thing_name: {}: cert_arn: {}'.format(thing_name, cert_arn))
cert_arn_secondary_region = cert_arn.replace(primary_region, secondary_region)
logger.info(
'thing_name: {}: cert_arn_secondary_region: {}'.format(
thing_name, cert_arn_secondary_region
)
)
if not certificate_exists(c_iot, cert_id):
logger.info('thing_name: {}: register certificate without CA'.format(thing_name))
register_cert(c_iot, cert_pem)
policies = []
retries = retries
wait = wait
i = 1
while not policies and i <= retries:
logger.info(
'thing_name: {}: {}: get_attached_policies for cert_arn: {}'.format(
thing_name, i, cert_arn
)
)
i += 1
policies = get_attached_policies(c_iot_primary, cert_arn)
time.sleep(wait*i)
if not policies:
logger.error(
'thing_name: {}: no policies attached to cert_arn: {}'.format(
thing_name, cert_arn
)
)
raise DeviceReplicationCreateThingException(
'no policies attached to cert_arn: {}'.format(cert_arn))
for policy in policies:
policy_name = policy['policyName']
logger.info('thing_name: {}: policy_name: {}'.format(thing_name, policy_name))
if not policy_exists(c_iot, policy_name):
logger.info('thing_name: {}: get_and_create_policy'.format(thing_name))
get_and_create_policy(c_iot, c_iot_primary, policy_name)
response2 = c_iot.attach_policy(
policyName=policy_name,
target=cert_arn_secondary_region
)
logger.info(
'thing_name: {}: response attach_policy: {}'.format(
thing_name, response2
)
)
response3 = c_iot.attach_thing_principal(
thingName=thing_name,
principal=cert_arn_secondary_region
)
logger.info(
'thing_name: {} response attach_thing_principal: {}'.format(
thing_name, response3
)
)
except Exception as e:
logger.error('thing_name: {}: create_thing_with_cert_and_policy: {}'.format(thing_name, e))
raise DeviceReplicationCreateThingException(e)
def delete_shadow(thing_name, iot_data_endpoint):
try:
c_iot_data = boto3.client('iot-data', endpoint_url='https://{}'.format(iot_data_endpoint))
response = c_iot_data.delete_thing_shadow(thingName=thing_name)
logger.info(
'thing_name: {}: delete_thing_shadow: response: {}'.format(
thing_name, response
)
)
except c_iot_data.exceptions.ResourceNotFoundException:
logger.info('thing_name: {}: shadow does not exist'.format(thing_name))
except Exception as e:
logger.error('thing_name: {}: delete_shadow: {}'.format(thing_name, e))
raise DeviceReplicationGeneralException(e)
def delete_policy(c_iot, policy_name):
logger.info('policy_name: {}'.format(policy_name))
try:
response = c_iot.list_targets_for_policy(policyName=policy_name, pageSize=10)
targets = response['targets']
logger.debug('targets: {}'.format(targets))
if targets:
logger.info(
'policy_name: {}: targets attached, policy will not be deleted'.format(
policy_name
)
)
return
response = c_iot.list_policy_versions(policyName=policy_name)
logger.info('policy_name: {} versions: {}'.format(
policy_name, response['policyVersions']))
for version in response["policyVersions"]:
if not version['isDefaultVersion']:
logger.info(
'policy_name: {} deleting policy version: {}'.format(
policy_name, version['versionId']
)
)
c_iot.delete_policy_version(policyName=policy_name,
policyVersionId=version['versionId'])
logger.info('deleting policy: policy_name: {}'.format(policy_name))
c_iot.delete_policy(policyName=policy_name)
except c_iot.exceptions.ResourceNotFoundException:
logger.info('policy_name: {}: does not exist'.format(policy_name))
except Exception as e:
logger.error('delete_policy: {}'.format(e))
raise DeviceReplicationGeneralException(e)
def delete_thing(c_iot, thing_name, iot_data_endpoint):
logger.info('delete_thing: thing_name: {} iot_data_endpoint: {}'.format(
thing_name, iot_data_endpoint
)
)
try:
if not thing_exists(c_iot, thing_name):
logger.warning('delete_thing: thing does not exist: {}'.format(thing_name))
return
r_principals = c_iot.list_thing_principals(thingName=thing_name)
logger.info('thing_name: {} principals: {}'.format(thing_name, r_principals['principals']))
for arn in r_principals['principals']:
cert_id = arn.split('/')[-1]
logger.info(
'detach_thing_principal: thing_name: {} principal arn: {} cert_id: {}'.format(
thing_name, arn, cert_id
)
)
r_detach_thing = c_iot.detach_thing_principal(thingName=thing_name, principal=arn)
detach_thing_principal_status_code = \
r_detach_thing['ResponseMetadata']['HTTPStatusCode']
logger.info(
'thing_name: {} arn: {} detach_thing_principal_status_code: {} \
response detach_thing_principal: {}'.format(
thing_name, arn, detach_thing_principal_status_code, r_detach_thing
)
)
if detach_thing_principal_status_code != 200:
error_message = 'thing_name: {} arn: {} \
detach_thing_principal_status_code not equal 200: {} '.format(
thing_name, arn, detach_thing_principal_status_code
)
logger.error(error_message)
raise Exception(error_message)
# still things attached to the principal?
# If yes, don't deactivate cert or detach policies
things = get_principal_things(c_iot, arn)
if things:
logger.info(
'still things {} attached to principal {} - \
certificate will not be inactvated, policies will not be removed'.format(
things, arn
)
)
else:
logger.info('inactivate cert: thing_name: {} cert_id: {}'.format(
thing_name, cert_id))
r_upd_cert = c_iot.update_certificate(certificateId=cert_id,newStatus='INACTIVE')
logger.info('update_certificate: cert_id: {} response: {}'.format(
cert_id, r_upd_cert))
r_policies = c_iot.list_principal_policies(principal=arn)
logger.info('cert arn: {} policies: {}'.format(arn, r_policies['policies']))
for policy in r_policies['policies']:
policy_name = policy['policyName']
logger.info('detaching policy policy_name: {}'.format(policy_name))
r_detach_pol = c_iot.detach_policy(policyName=policy_name,target=arn)
logger.info(
'detach_policy: policy_name: {} response: {}'.format(
policy_name, r_detach_pol
)
)
delete_policy(c_iot, policy_name)
r_del_cert = c_iot.delete_certificate(certificateId=cert_id,forceDelete=True)
logger.info('delete_certificate: cert_id: {} response: {}'.format(
cert_id, r_del_cert))
r_del_thing = c_iot.delete_thing(thingName=thing_name)
logger.info('delete_thing: thing_name: {} response: {}'.format(thing_name, r_del_thing))
delete_shadow(thing_name, iot_data_endpoint)
except Exception as e:
logger.error('delete_thing: thing_name: {}: {}'.format(thing_name, e))
raise DeviceReplicationDeleteThingException(e)
def update_thing(c_iot, c_iot_primary, thing_name, thing_type_name, attrs, merge):
logger.info('update_thing: thing_name: {}'.format(thing_name))
try:
create_thing(c_iot, c_iot_primary, thing_name, "", {})
if thing_type_name:
create_thing_type(c_iot, thing_type_name)
response = c_iot.update_thing(
thingName=thing_name,
thingTypeName=thing_type_name,
attributePayload={
'attributes': attrs,
'merge': merge
}
)
else:
response = c_iot.update_thing(
thingName=thing_name,
attributePayload={
'attributes': attrs,
'merge': merge
}
)
logger.info('update_thing: response: {}'.format(response))
except Exception as e:
logger.error('update_thing: {}'.format(e))
raise DeviceReplicationUpdateThingException(e)
def delete_thing_create_error(c_dynamo, thing_name, table_name):
logger.info('delete_thing_create_error: thing_name: {}'.format(thing_name))
try:
response = c_dynamo.delete_item(
TableName=table_name,
Key={'thing_name': {'S': thing_name}, 'action': {'S': 'create-thing'}}
)
logger.info('delete_thing_create_error: {}'.format(response))
except Exception as e:
logger.error("delete_thing_create_error: {}".format(e))
raise DeviceReplicationGeneralException(e)
| StarcoderdataPython |
1628498 | '''
It is an example script that makes sum of two numbers
separated by space in the input stream...
'''
a, b = [int(num) for num in input().split()]
print('Sum:', a + b)
| StarcoderdataPython |
74311 | <gh_stars>1-10
# Generated by Django 2.2.10 on 2020-02-12 16:14
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('core', '0007_auto_20200206_0844'),
]
operations = [
migrations.RenameField(
model_name='historicalmessageaudit',
old_name='restrictions',
new_name='restriction',
),
migrations.RenameField(
model_name='messageaudit',
old_name='restrictions',
new_name='restriction',
),
]
| StarcoderdataPython |
43094 | <filename>fedlearner/scheduler/scheduler_service.py<gh_stars>1-10
# Copyright 2020 The FedLearner Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding: utf-8
import sys
import logging
from fedlearner.common import scheduler_service_pb2_grpc as ss_grpc
from fedlearner.common import common_pb2 as common_pb
from fedlearner.proxy.channel import make_insecure_channel, ChannelType
class SchedulerServer(ss_grpc.SchedulerServicer):
def __init__(self, receiver_fn):
super(SchedulerServer, self).__init__()
self._receiver_fn = receiver_fn
def SubmitTrainJob(self, request, context):
response = common_pb.Status()
try:
response = self._receiver_fn(request)
except Exception: # pylint: disable=broad-except
response.ode = common_pb.StatusCode.STATUS_UNKNOWN_ERROR
response.error_message = sys.exc_info()[0]
return response
class SchedulerClient(object):
def __init__(self, addr):
self._addr = addr
channel = make_insecure_channel(addr, mode=ChannelType.REMOTE)
self._stub = ss_grpc.SchedulerStub(channel)
def submit_train(self, request):
result = self._stub.SubmitTrainJob(request)
if result.code == common_pb.StatusCode.STATUS_SUCCESS:
logging.info("code [%d] submit success.", result.code)
return True
logging.error("code [%d] submit failed with error[%s].", result.code,
result.error_message)
return False
| StarcoderdataPython |
1765532 | <filename>src/printable.py
#! /usr/bin/env python3
# printable.py - print a table of printable ASCII characters in base 2, 8, 10, and 16
for i in range(32, 127):
print(f"{i:07b} | {i:03o} | {i:03d} | {i:2x} | {chr(i):1s}")
| StarcoderdataPython |
1768779 | import http
import logging
import sys
import time
from collections import abc
from copy import copy
from os import getpid
import click
TRACE_LOG_LEVEL = 5
class ColourizedFormatter(logging.Formatter):
"""
A custom log formatter class that:
* Outputs the LOG_LEVEL with an appropriate color.
* If a log call includes an `extras={"color_message": ...}` it will be used
for formatting the output, instead of the plain text message.
"""
level_name_colors = {
TRACE_LOG_LEVEL: lambda level_name: click.style(str(level_name), fg="blue"),
logging.DEBUG: lambda level_name: click.style(str(level_name), fg="cyan"),
logging.INFO: lambda level_name: click.style(str(level_name), fg="green"),
logging.WARNING: lambda level_name: click.style(str(level_name), fg="yellow"),
logging.ERROR: lambda level_name: click.style(str(level_name), fg="red"),
logging.CRITICAL: lambda level_name: click.style(
str(level_name), fg="bright_red"
),
}
def __init__(self, fmt=None, datefmt=None, style="%", use_colors=None):
if use_colors in (True, False):
self.use_colors = use_colors
else:
self.use_colors = sys.stdout.isatty()
super().__init__(fmt=fmt, datefmt=datefmt, style=style)
def color_level_name(self, level_name, level_no):
def default(level_name):
return str(level_name)
func = self.level_name_colors.get(level_no, default)
return func(level_name)
def should_use_colors(self):
return True
def formatMessage(self, record):
recordcopy = copy(record)
levelname = recordcopy.levelname
seperator = " " * (8 - len(recordcopy.levelname))
if self.use_colors:
levelname = self.color_level_name(levelname, recordcopy.levelno)
if "color_message" in recordcopy.__dict__:
recordcopy.msg = recordcopy.__dict__["color_message"]
recordcopy.__dict__["message"] = recordcopy.getMessage()
recordcopy.__dict__["levelprefix"] = levelname + ":" + seperator
return super().formatMessage(recordcopy)
class DefaultFormatter(ColourizedFormatter):
def should_use_colors(self):
return sys.stderr.isatty()
class AccessFormatter(ColourizedFormatter):
status_code_colours = {
1: lambda code: click.style(str(code), fg="bright_white"),
2: lambda code: click.style(str(code), fg="green"),
3: lambda code: click.style(str(code), fg="yellow"),
4: lambda code: click.style(str(code), fg="red"),
5: lambda code: click.style(str(code), fg="bright_red"),
}
def get_status_code(self, status_code: int):
try:
status_phrase = http.HTTPStatus(status_code).phrase
except ValueError:
status_phrase = ""
status_and_phrase = "%s %s" % (status_code, status_phrase)
if self.use_colors:
def default(code):
return status_and_phrase
func = self.status_code_colours.get(status_code // 100, default)
return func(status_and_phrase)
return status_and_phrase
def formatMessage(self, record):
recordcopy = copy(record)
(
client_addr,
method,
full_path,
http_version,
status_code,
) = recordcopy.args
status_code = self.get_status_code(status_code)
request_line = "%s %s HTTP/%s" % (method, full_path, http_version)
if self.use_colors:
request_line = click.style(request_line, bold=True)
recordcopy.__dict__.update(
{
"client_addr": client_addr,
"request_line": request_line,
"status_code": status_code,
}
)
return super().formatMessage(recordcopy)
class GunicornSafeAtoms(abc.Mapping):
"""Implement atoms necessary for gunicorn log.
This class does a few things:
- provide all atoms necessary for gunicorn log formatter
- collect response body size for reporting from ASGI messages
- provide mapping interface that returns '-' for missing atoms
- escapes double quotes found in atom strings
"""
def __init__(self, scope):
self.scope = scope
self.status_code = None
self.response_headers = {}
self.response_length = 0
self._request_headers = None
@property
def request_headers(self):
if self._request_headers is None:
self._request_headers = {
k.decode("ascii"): v.decode("ascii") for k, v in self.scope["headers"]
}
return self._request_headers
@property
def duration(self):
d = self.scope["response_end_time"] - self.scope["request_start_time"]
return d
def on_asgi_message(self, message):
if message["type"] == "http.response.start":
self.status_code = message["status"]
self.response_headers = {
k.decode("ascii"): v.decode("ascii") for k, v in message["headers"]
}
elif message["type"] == "http.response.body":
self.response_length += len(message.get("body", ""))
def _request_header(self, key):
return self.request_headers.get(key.lower())
def _response_header(self, key):
return self.response_headers.get(key.lower())
def _wsgi_environ_variable(self, key):
# FIXME: provide fallbacks to access WSGI environ (at least the
# required variables).
return None
def __getitem__(self, key):
if key in self.HANDLERS:
retval = self.HANDLERS[key](self)
elif key.startswith("{"):
if key.endswith("}i"):
retval = self._request_header(key[1:-2])
elif key.endswith("}o"):
retval = self._response_header(key[1:-2])
elif key.endswith("}e"):
retval = self._wsgi_environ_variable(key[1:-2])
else:
retval = None
else:
retval = None
if retval is None:
return "-"
if isinstance(retval, str):
return retval.replace('"', '\\"')
return retval
HANDLERS = {}
def _register_handler(key, handlers=HANDLERS):
def decorator(fn):
handlers[key] = fn
return fn
return decorator
@_register_handler("h")
def _remote_address(self, *args, **kwargs):
return self.scope["client"][0]
@_register_handler("l")
def _dash(self, *args, **kwargs):
return "-"
@_register_handler("u")
def _user_name(self, *args, **kwargs):
pass
@_register_handler("t")
def date_of_the_request(self, *args, **kwargs):
"""Date and time in Apache Common Log Format"""
return time.strftime("[%d/%b/%Y:%H:%M:%S %z]")
@_register_handler("r")
def status_line(self, *args, **kwargs):
full_raw_path = self.scope["raw_path"] + self.scope["query_string"]
full_path = full_raw_path.decode("ascii")
return "{method} {full_path} HTTP/{http_version}".format(
full_path=full_path, **self.scope
)
@_register_handler("m")
def request_method(self, *args, **kwargs):
return self.scope["method"]
@_register_handler("U")
def url_path(self, *args, **kwargs):
return self.scope["raw_path"].decode("ascii")
@_register_handler("q")
def query_string(self, *args, **kwargs):
return self.scope["query_string"].decode("ascii")
@_register_handler("H")
def protocol(self, *args, **kwargs):
return "HTTP/%s" % self.scope["http_version"]
@_register_handler("s")
def status(self, *args, **kwargs):
return self.status_code or "-"
@_register_handler("B")
def response_length(self, *args, **kwargs):
return self.response_length
@_register_handler("b")
def response_length_or_dash(self, *args, **kwargs):
return self.response_length or "-"
@_register_handler("f")
def referer(self, *args, **kwargs):
return self.request_headers.get("referer")
@_register_handler("a")
def user_agent(self, *args, **kwargs):
return self.request_headers.get("user-agent")
@_register_handler("T")
def request_time_seconds(self, *args, **kwargs):
return int(self.duration)
@_register_handler("D")
def request_time_microseconds(self, *args, **kwargs):
return int(self.duration * 1_000_000)
@_register_handler("L")
def request_time_decimal_seconds(self, *args, **kwargs):
return "%.6f" % self.duration
@_register_handler("p")
def process_id(self, *args, **kwargs):
return "<%s>" % getpid()
def __iter__(self):
# FIXME: add WSGI environ
yield from self.HANDLERS
for k, _ in self.scope["headers"]:
yield "{%s}i" % k.lower()
for k in self.response_headers:
yield "{%s}o" % k.lower()
def __len__(self):
# FIXME: add WSGI environ
return (
len(self.HANDLERS)
+ len(self.scope["headers"] or ())
+ len(self.response_headers)
)
| StarcoderdataPython |
9735 | <reponame>adidas/m3d-api<filename>test/core/s3_table_test_base.py
import os
from test.core.emr_system_unit_test_base import EMRSystemUnitTestBase
from test.core.tconx_helper import TconxHelper
class S3TableTestBase(EMRSystemUnitTestBase):
default_tconx = \
"test/resources/s3_table_test_base/tconx-bdp-emr_test-dev-bi_test101.json"
multi_partition_tconx = \
"test/resources/s3_table_test_base/tconx-bdp-emr_test-dev-bi_test102.json"
single_partition_tconx = \
"test/resources/s3_table_test_base/tconx-bdp-emr_test-dev-bi_test103.json"
def env_setup(
self,
tmpdir,
destination_system,
destination_database,
destination_environment,
destination_table
):
"""
This function builds on top of EMRSystemUnitTestBase.env_setup() and adds test-specific tconx file.
:param tmpdir: test case specific temporary directory where configuration files will be created.
:param destination_system: destination system code
:param destination_database: destination database code
:param destination_environment: destination environment code
:param destination_table: destination table code
:return: Function will return several parameters:
m3d_config_path: paths of test-specific config.json. Should be passed to M3D API calls.
scon_emr_path: paths of test-specific scon_emr
tconx_path: paths of test-specific tconx
m3d_config_dict: contents of test-specific config.json as dict
scon_emr_dict: contents of test-specific scon_emr as dict
"""
m3d_config_file, scon_emr_file, m3d_config_dict, scon_emr_dict = \
super(S3TableTestBase, self).env_setup(
tmpdir,
destination_system,
destination_database,
destination_environment
)
# tconx specific part
tconx_file = TconxHelper.setup_tconx_from_file(
m3d_config_dict["tags"]["config"],
destination_system,
destination_database,
destination_environment,
destination_table,
S3TableTestBase.default_tconx
)
return m3d_config_file, scon_emr_file, tconx_file, \
m3d_config_dict, scon_emr_dict
@staticmethod
def assert_one_hql_sent(dump_dir, expected_hql):
generated_files = map(lambda f: os.path.join(dump_dir, f), os.listdir(dump_dir))
hql_files = list(filter(lambda f: os.path.isfile(f) and f.endswith(".hql"), generated_files))
assert len(hql_files) == 1
hql_file = hql_files[0]
with open(hql_file, 'r') as hql_f:
generated_hql = hql_f.read()
generated_hql_processed = generated_hql.strip().lower()
expected_hql_processed = expected_hql.strip().lower()
assert generated_hql_processed == expected_hql_processed
| StarcoderdataPython |
1700312 | <gh_stars>0
from pathlib import Path
from fhir.resources.codesystem import CodeSystem
from oops_fhir.utils import CodeSystemConcept
__all__ = ["DeviceMetricColor"]
_resource = CodeSystem.parse_file(Path(__file__).with_suffix(".json"))
class DeviceMetricColor:
"""
DeviceMetricColor
Describes the typical color of representation.
Status: draft - Version: 4.0.1
Copyright None
http://hl7.org/fhir/metric-color
"""
black = CodeSystemConcept(
{
"code": "black",
"definition": "Color for representation - black.",
"display": "Color Black",
}
)
"""
Color Black
Color for representation - black.
"""
red = CodeSystemConcept(
{
"code": "red",
"definition": "Color for representation - red.",
"display": "Color Red",
}
)
"""
Color Red
Color for representation - red.
"""
green = CodeSystemConcept(
{
"code": "green",
"definition": "Color for representation - green.",
"display": "Color Green",
}
)
"""
Color Green
Color for representation - green.
"""
yellow = CodeSystemConcept(
{
"code": "yellow",
"definition": "Color for representation - yellow.",
"display": "Color Yellow",
}
)
"""
Color Yellow
Color for representation - yellow.
"""
blue = CodeSystemConcept(
{
"code": "blue",
"definition": "Color for representation - blue.",
"display": "Color Blue",
}
)
"""
Color Blue
Color for representation - blue.
"""
magenta = CodeSystemConcept(
{
"code": "magenta",
"definition": "Color for representation - magenta.",
"display": "Color Magenta",
}
)
"""
Color Magenta
Color for representation - magenta.
"""
cyan = CodeSystemConcept(
{
"code": "cyan",
"definition": "Color for representation - cyan.",
"display": "Color Cyan",
}
)
"""
Color Cyan
Color for representation - cyan.
"""
white = CodeSystemConcept(
{
"code": "white",
"definition": "Color for representation - white.",
"display": "Color White",
}
)
"""
Color White
Color for representation - white.
"""
class Meta:
resource = _resource
| StarcoderdataPython |
4809976 | <filename>seaice/images/test/util.py
from datetime import date
from functools import wraps
from unittest.mock import patch
class mock_today(object):
def __init__(self, year, month, day, module, datetime='dt'):
"""Fix the value of datetime.date.today() to easily test functionality that
depends on the real-world current day.
year, month, day: mock datetime.date.today() to equal
datetime.date(year, month, day)
module: patch datetime.date within this module
datetime: the name datetime is imported as within the given module
"""
self.date = date(year, month, day)
self.date_class = '{}.{}.date'.format(module, datetime)
def __call__(self, func):
@wraps(func)
def func_wrapper(*args):
with patch(self.date_class) as mock_date:
mock_date.today.return_value = self.date
mock_date.side_effect = lambda *args_, **kw: date(*args_, **kw)
return func(*args)
return func_wrapper
| StarcoderdataPython |
137503 | <gh_stars>1-10
import argparse
import numpy as np
from sklearn.linear_model import LogisticRegression
from sklearn.decomposition import PCA
from sklearn.metrics import accuracy_score, f1_score
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
from preprocessing_helper import get_features_map
import pandas as pd
import umap
parser = argparse.ArgumentParser(description='get dataset file')
parser.add_argument('--train_ds', type=str, default='data/CAE_dataset.csv',
help='.')
parser.add_argument('--test_ds', type=str, default='data/CAE_test_dataset.csv',
help='..')
args = parser.parse_args()
def remove_wrong_points(X, y):
X_zeros = []
for i in range(len(y)):
if y[i] == 0:
X_zeros.append(np.append(X[i], i))
X_zeros = np.array(X_zeros)
#X_zeros[:,:-1]
Kmean = KMeans(n_clusters=1).fit(X_zeros[:,:-1])
center = Kmean.cluster_centers_
center.reshape(81)
distance = []
for zeros in X_zeros:
dist = np.linalg.norm(zeros[:-1] - center)
distance.append((dist,zeros[-1]))
distance = sorted(distance, key=lambda a_entry: a_entry[0])
wrong_points = distance[int(0.8*len(distance)):]
wrong_points = np.array(wrong_points)
wrong_points = wrong_points.transpose()
#new_X = np.delete(X, wrong_points[1],0)
#new_y = np.delete(y, wrong_points[1],0)
#np.save("y_new.npy", new_y)
#np.save("x_new.npy", new_X)
for i in wrong_points[1]:
y[int(i)] = 1
return X, y
def graph_embedding(X_emb_1, X_emb_0):
fig = plt.figure(figsize=(8,5))
ax = fig.add_subplot(1, 1, 1)
# ones are in red
ax.scatter(X_emb_1[:,0], X_emb_1[:,1], c='red', s=5)
# zeros are in blue
ax.scatter(X_emb_0[:,0], X_emb_0[:,1], c='blue', s=5)
plt.show()
def clean_nans(df):
a = df.values
b = np.argwhere(np.isnan(a))
list_non_repeating = []
for e in b:
if e[0] not in list_non_repeating:
list_non_repeating.append(e[0])
df = df.drop(list_non_repeating)
return df
def get_split(df, test=False):
'''Big method to get pilot split'''
def get_features(df):
if test:
return [np.array(df.iloc[:, n]) for n in range(1, 11)]
return [np.array(df.iloc[:, n]) for n in range(1, 12)]
outter_pilot_id = None
def list_of_indexes(df):
pilot_id = np.array(df.iloc[:, -1])
outter_pilot_id = pilot_id
x0 = pilot_id[0] # pilot id is the current
x = [[0, pilot_id[0]]]
# x is a list where each element is a list with two elements, [the , the pilot id]
for i in range(len(pilot_id)):
if pilot_id[i] != x0:
x.append([i, pilot_id[i]])
x0 = pilot_id[i]
# find the number of ids that were counted twice
# len(x)
count = 0
# this check if there are duplicates
for i in range(len(x)):
for j in range(i + 1, len(x)):
if x[i][1] == x[j][1]:
# print(i,":",x[i],"\n",j,":",x[j])
count += 1
return x, count
def get_features_by_test_run(df):
if test:
features = np.transpose([np.array(df.iloc[:, n]) for n in range(1, 12)])
else:
features = np.transpose([np.array(df.iloc[:, n]) for n in range(1, 13)])
indexes, count = list_of_indexes(df)
features_by_run = []
if test:
features = np.transpose([np.array(df.iloc[:, n]) for n in range(1, 12)])
else:
features = np.transpose([np.array(df.iloc[:, n]) for n in range(1, 13)])
indexes, count = list_of_indexes(df)
real_pilot_id = [i[1] for i in indexes]
indexes = [i[0] for i in indexes]
features_by_run = []
j = 0
for i in range(len(features)):
if i == 0:
test_run = [features[i]]
elif features[i][-1] != features[i - 1][-1]:
features_by_run.append(test_run)
test_run = [features[i]]
else:
test_run.append(features[i])
# if i%1000==0:#trace
# print(i)#trace
features = get_features(df)
x, count = list_of_indexes(df)
features_by_run.append(test_run)
feat = features_by_run
defective_pilot = []
good_pilot = []
if not test:
for i in features_by_run:
if i[0][-2] == 0:
good_pilot.append(i)
elif i[0][-2] == 1:
defective_pilot.append(i)
else:
raise Exception
return defective_pilot, good_pilot
else:
return features_by_run, real_pilot_id
def list_to_np(X_good, X_def):
'''converts Steve's weird lists to normal human-readable formats'''
# GOOD
newlist = list()
for i in range(len(X_good)):
if len(X_good[i]) > 600:
newlist.append(X_good[i][:600])
X_good = np.array(newlist)[:, :, :10]
y_good = np.zeros(len(X_good))
# NOT SO GOOD
newlist = list()
for i in range(len(X_def)):
if len(X_def[i]) > 600:
newlist.append(X_def[i][:600])
X_def = np.array(newlist)[:, :, :10]
y_def = np.ones(len(X_def))
X = np.concatenate([X_good, X_def], axis=0)
y = np.concatenate([y_good, y_def], axis=0)
np.save('data/X_raw.npy', X)
np.save('data/y.npy', y)
return X, y
def load_data():
train_df = pd.read_csv(args.train_ds)
train_df = clean_nans(train_df)
defective_pilot, good_pilot = get_split(train_df)
# get X and y time series, (n_samples, 600, 10)
X, y = list_to_np(good_pilot, defective_pilot)
return X, y
if __name__ == '__main__':
X, y = load_data()
# pull out dank features
#X = get_features_map('data/X_raw.npy', pickle=True)
X = np.load('data/x_feature_arima.npy')
### TRAINING ###
X, y = remove_wrong_points(X, y)
pca = PCA(n_components=15)
X = pca.fit_transform(X, y)
reducer = umap.UMAP()
def repeated_umap(X, i):
embeddings = []
for e in range(i):
X_emb = reducer.fit_transform(X)
X_emb_1 = X_emb[np.argwhere(y == 1).reshape(np.argwhere(y == 1).shape[0])]
X_emb_0 = X_emb[np.argwhere(y == 0).reshape(np.argwhere(y == 0).shape[0])]
graph_embedding(X_emb_1, X_emb_0)
embeddings.append(X_emb)
embeddings = np.concatenate(embeddings, axis=1)
X = np.concatenate([X, embeddings], axis=1)
return X
X = repeated_umap(X, 1)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=1)
model = LogisticRegression(solver='saga')
model.fit(X_train, y_train)
pred = model.predict(X_test)
acc = accuracy_score(y_test, pred)
f1 = f1_score(y_test, pred)
print('Accuracy score: {}\t F1 score: {}.'.format(acc, f1))
## done training, now streamlining testing process
test_df = pd.read_csv(args.test_ds)
test_df = clean_nans(test_df)
features_by_run, pilot_id = get_split(test_df, test=True)
print(len(pilot_id))
#MANUAL shtuff
newlist = list()
new_pilot_list = list()
for i in range(len(features_by_run)):
if len(features_by_run[i]) > 600:
newlist.append(features_by_run[i][:600])
new_pilot_list.append(pilot_id[i])
XX = np.array(newlist)[:, :, :10]
np.save('data/XX.npy', XX)
XX = get_features_map('data/XX.npy', pickle=False)
XX = pca.transform(XX)
XX_emb = reducer.transform(XX)
XX = np.concatenate([XX, XX_emb], axis=1)
pred = model.predict(XX)
print()
print()
print(len(pred), len(new_pilot_list))
import csv
csvData = [['Pilot ID', 'Flag']]
for i in range(len(pred)):
csvData.append([new_pilot_list[i],pred[i]])
with open('submission.csv', 'w') as csvFile:
writer = csv.writer(csvFile)
writer.writerows(csvData)
csvFile.close()
| StarcoderdataPython |
4824001 | #!/usr/bin/python
#
# Copyright (c) 2019 Hewlett Packard Enterprise Development LP
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = '''
---
module: arubaoss_qos_policy
short_description: implements rest api for qos configuration
version_added: "2.6"
description:
- "This implements rest api's which can be used to configure qos
on device."
options:
class_name:
description:
- traffic class name
required: false
class_type:
description:
- traffic class type
required: false
choices: QCT_IP_V4, QCT_IP_V6
default: QCT_IP_V4
policy_name:
description:
- qos policy name
required: true
policy_type:
description:
- Type of qos. Onlye QOS_QPT is supported
required: false
action:
description:
- Type of qos action to take.
requried: false
default: QPAT_RATE_LIMIT
choices: QPAT_RATE_LIMIT, QPAT_PRIORITY, QPAT_DSCP_VALUE
action_value:
description:
- Value for each action.
required: false
sequence_no:
description:
- Sequence number for traffic class
required: false
author:
- <NAME> (@hpe)
'''
EXAMPLES = '''
- name: create qos policy
arubaoss_qos_policy:
policy_name: my_qos
- name: attach class to qos
arubaoss_qos_policy:
policy_name: my_qos
class_name: my_class
action: QPAT_RATE_LIMIT
action_value: 1000
sequence_no: "{{class_1.sequence_no}}"
- name: delete qos policy
arubaoss_qos_policy:
policy_name: my_qos
state: delete
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.arubaoss.arubaoss import run_commands,get_config
from ansible.module_utils.network.arubaoss.arubaoss import arubaoss_argument_spec
from ansible.module_utils._text import to_text
def qos(module):
params = module.params
url = '/qos/policies'
policy_id = params['policy_name'] + '~' + params['policy_type']
check_url = url + '/' + policy_id
if params['state'] == 'create':
data = {
'policy_name': params['policy_name'],
'policy_type': params['policy_type'],
}
method = 'POST'
else:
# Check if qos is applied to any port
qos_url = '/qos/ports-policies'
qos_config = get_config(module,qos_url)
if qos_config:
qos_config = module.from_json(to_text(qos_config))
for config in qos_config['qos_port_policy_element']:
if policy_id == config['policy_id']:
return {'msg': 'Cannot delete policy {}, active on port {}'.\
format(policy_id,config['port_id']),'change':False}
# Check if qos is applied to any port
qos_url = '/qos/vlans-policies'
qos_config = get_config(module,qos_url)
if qos_config:
qos_config = module.from_json(to_text(qos_config))
for config in qos_config['qos_vlan_policy_element']:
if policy_id == config['policy_id']:
return {'msg': 'Cannot delete policy {}, active on vlan {}'.\
format(policy_id,config['vlan_id']),'change':False}
data = {}
method = 'DELETE'
url = check_url
result = run_commands(module, url, data, method, check=check_url)
return result
def qos_class(module):
params = module.params
policy_id = params['policy_name'] + '~' + params['policy_type']
url = '/qos/policies/' + policy_id + '/policy-actions'
# Create qos if not to apply actions
if params['state'] == 'create':
qos(module)
method = 'POST'
if params['sequence_no'] > 0:
temp = url + '/' + str(params['sequence_no'])
if get_config(module, temp):
url = url + '/' + str(params['sequence_no'])
method = 'PUT'
if params['state'] == 'create':
class_id = params['class_name'] + '~' + params['class_type']
class_url = '/qos/traffic-classes/' + class_id
if not get_config(module, class_url):
return {'msg': 'class does not exist', 'changed':False}
if params['action_value'] == -1 or not params['action']:
return {'msg':'action and action_type are required','changed':False}
action = params['action']
action_value = params['action_value']
data = {
'policy_id': policy_id,
'traffic_class_id': class_id,
'first_action': {
'action_type': action,
},
}
if params['sequence_no'] > 0:
data['sequence_no'] = params['sequence_no']
if action == 'QPAT_RATE_LIMIT':
data['first_action']['rate_limit_in_kbps'] = action_value
elif action == 'QPAT_DSCP_VALUE':
data['first_action']['new_dscp_value'] = action_value
else:
data['first_action']['new_priority'] = action_value
qos_config = get_config(module, url)
if qos_config:
check_config = module.from_json(to_text(qos_config))
if params['sequence_no'] == 0:
for config in check_config['qos_policy_action_element']:
if class_id == config['traffic_class_id']:
return config
elif params['sequence_no'] > 0:
if check_config.get('traffic_class_id') and class_id == check_config['traffic_class_id']:
return check_config
result = run_commands(module, url, data, method)
else:
if params['sequence_no'] ==0:
return {'msg':'sequence_no is required','changed':False}
else:
url = url + '/' + str(params['sequence_no'])
result = run_commands(module, url, {}, 'DELETE',check=url)
return result
def run_module():
module_args = dict(
class_name=dict(type='str', required=False),
class_type=dict(type='str', required=False,default='QCT_IP_V4',
choices=['QCT_IP_V4','QCT_IP_V6']),
policy_name=dict(type='str', required=True),
policy_type=dict(type='str', required=False,default='QPT_QOS'),
state=dict(type='str', required=False, default='create',
choices=['create','delete']),
action=dict(type='str', required=False, default='QPAT_RATE_LIMIT',
choices=['QPAT_PRIORITY','QPAT_DSCP_VALUE','QPAT_RATE_LIMIT']),
action_value=dict(type='int', reqquired=False, default=-1),
sequence_no=dict(type='int', required=False, default=0),
)
module_args.update(arubaoss_argument_spec)
result = dict(changed=False,warnings='Not Supported')
module = AnsibleModule(
argument_spec=module_args,
supports_check_mode=True
)
if module.check_mode:
module.exit_json(**result)
try:
if module.params['class_name']:
result = qos_class(module)
else:
result = qos(module)
except Exception as err:
return module.fail_json(msg=err)
module.exit_json(**result)
def main():
run_module()
if __name__ == '__main__':
main()
| StarcoderdataPython |
1669533 | import numpy
import matplotlib.pyplot as plt
def fig4plot():
#setup plots
fig = plt.figure(figsize=(10.0, 7.5))
fig.subplots_adjust(left=0.1, right=0.9, wspace=0.3)
ax1 = fig.add_subplot(221)
ax1.set_yscale('log')
ax1.set_ylabel('|m|')
ax1.set_ylim(5.e-5, 1.e-2)
ax1.set_xlabel('n$_{\mathrm{s, b}}$')
ax1.set_xlim(1.5, 4.0)
ax2 = fig.add_subplot(222)
ax2.set_yscale('log')
ax2.set_ylabel('|m|')
ax2.set_ylim(5.e-5, 1.e-2)
ax2.set_xlabel('B/T')
ax2.set_xlim(0.0, 1.0)
ax3 = fig.add_subplot(223)
ax3.set_yscale('log')
ax3.set_ylabel('|m|')
ax3.set_ylim(5.e-5, 1.e-2)
ax3.set_xlabel('e$_{\mathrm{g}}$')
ax3.set_xlim(0.1, 0.6)
ax4 = fig.add_subplot(224)
ax4.set_yscale('log')
ax4.set_ylabel('|m|')
ax4.set_ylim(5.e-5, 1.e-2)
ax4.set_xlabel('y$_0$')
ax4.set_xlim(0.0, 0.5)
ax1.fill_between([1.5, 4.0], [1.e-3, 1.e-3], [1.e-5, 1.e-5],
color='grey', alpha=0.2, edgecolor='None')
ax1.fill_between([1.5, 4.0], [1.e-3/2, 1.e-3/2], [1.e-5, 1.e-5],
color='grey', alpha=0.2, edgecolor='None')
ax1.fill_between([1.5, 4.0], [1.e-3/5, 1.e-3/5], [1.e-5, 1.e-5],
color='grey', alpha=0.2, edgecolor='None')
ax2.fill_between([0.0, 1.0], [1.e-3, 1.e-3], [1.e-5, 1.e-5],
color='grey', alpha=0.2, edgecolor='None')
ax2.fill_between([0.0, 1.0], [1.e-3/2, 1.e-3/2], [1.e-5, 1.e-5],
color='grey', alpha=0.2, edgecolor='None')
ax2.fill_between([0.0, 1.0], [1.e-3/5, 1.e-3/5], [1.e-5, 1.e-5],
color='grey', alpha=0.2, edgecolor='None')
ax3.fill_between([0.1, 0.6], [1.e-3, 1.e-3], [1.e-5, 1.e-5],
color='grey', alpha=0.2, edgecolor='None')
ax3.fill_between([0.1, 0.6], [1.e-3/2, 1.e-3/2], [1.e-5, 1.e-5],
color='grey', alpha=0.2, edgecolor='None')
ax3.fill_between([0.1, 0.6], [1.e-3/5, 1.e-3/5], [1.e-5, 1.e-5],
color='grey', alpha=0.2, edgecolor='None')
ax4.fill_between([0.0, 0.5], [1.e-3, 1.e-3], [1.e-5, 1.e-5],
color='grey', alpha=0.2, edgecolor='None')
ax4.fill_between([0.0, 0.5], [1.e-3/2, 1.e-3/2], [1.e-5, 1.e-5],
color='grey', alpha=0.2, edgecolor='None')
ax4.fill_between([0.0, 0.5], [1.e-3/5, 1.e-3/5], [1.e-5, 1.e-5],
color='grey', alpha=0.2, edgecolor='None')
# load bulge sersic index data
calib = {'bulge_n':[], 'c1':[], 'c2':[], 'm1':[], 'm2':[]}
try:
with open('output/fig4_bulge_sersic_index.dat') as fil:
for line in fil:
line = line.replace('(', ' ')
line = line.replace(')', ' ')
line = line.replace(',', ' ')
line = ' '.join(line.split())
bulge_n, c1, c2, m1, m2 = line.split(' ')
calib['bulge_n'].append(float(bulge_n))
calib['c1'].append(float(c1))
calib['c2'].append(float(c2))
calib['m1'].append(float(m1))
calib['m2'].append(float(m2))
except IOError:
pass
ax1.plot(calib['bulge_n'], abs(numpy.array(calib['m1'])), 's', mfc='None', mec='red', mew=1.3)
ax1.plot(calib['bulge_n'], abs(numpy.array(calib['m1'])), color='red')
ax1.plot(calib['bulge_n'], abs(numpy.array(calib['m2'])), 'x', mfc='None', mec='red', mew=1.3)
ax1.plot(calib['bulge_n'], abs(numpy.array(calib['m2'])), color='red', ls='--')
# load bulge flux data
calib = {'bulge_flux':[], 'c1':[], 'c2':[], 'm1':[], 'm2':[]}
try:
with open('output/fig4_bulge_flux.dat') as fil:
for line in fil:
line = line.replace('(', ' ')
line = line.replace(')', ' ')
line = line.replace(',', ' ')
line = ' '.join(line.split())
bulge_flux, c1, c2, m1, m2 = line.split(' ')
calib['bulge_flux'].append(float(bulge_flux))
calib['c1'].append(float(c1))
calib['c2'].append(float(c2))
calib['m1'].append(float(m1))
calib['m2'].append(float(m2))
except IOError:
pass
ax2.plot(calib['bulge_flux'], abs(numpy.array(calib['m1'])), 's', mfc='None', mec='red', mew=1.3)
ax2.plot(calib['bulge_flux'], abs(numpy.array(calib['m1'])), color='red')
ax2.plot(calib['bulge_flux'], abs(numpy.array(calib['m2'])), 'x', mfc='None', mec='red', mew=1.3)
ax2.plot(calib['bulge_flux'], abs(numpy.array(calib['m2'])), color='red', ls='--')
# load galaxy ellipticity data
calib = {'gal_ellip':[], 'c1':[], 'c2':[], 'm1':[], 'm2':[]}
try:
with open('output/fig4_gal_ellip.dat') as fil:
for line in fil:
line = line.replace('(', ' ')
line = line.replace(')', ' ')
line = line.replace(',', ' ')
line = ' '.join(line.split())
gal_ellip, c1, c2, m1, m2 = line.split(' ')
calib['gal_ellip'].append(float(gal_ellip))
calib['c1'].append(float(c1))
calib['c2'].append(float(c2))
calib['m1'].append(float(m1))
calib['m2'].append(float(m2))
except IOError:
pass
ax3.plot(calib['gal_ellip'], abs(numpy.array(calib['m1'])), 's', mfc='None', mec='red', mew=1.3)
ax3.plot(calib['gal_ellip'], abs(numpy.array(calib['m1'])), color='red')
ax3.plot(calib['gal_ellip'], abs(numpy.array(calib['m2'])), 'x', mfc='None', mec='red', mew=1.3)
ax3.plot(calib['gal_ellip'], abs(numpy.array(calib['m2'])), color='red', ls='--')
# load y0 data
calib = {'y0':[], 'c1':[], 'c2':[], 'm1':[], 'm2':[]}
try:
with open('output/fig4_y0.dat') as fil:
for line in fil:
line = line.replace('(', ' ')
line = line.replace(')', ' ')
line = line.replace(',', ' ')
line = ' '.join(line.split())
y0, c1, c2, m1, m2 = line.split(' ')
calib['y0'].append(float(y0))
calib['c1'].append(float(c1))
calib['c2'].append(float(c2))
calib['m1'].append(float(m1))
calib['m2'].append(float(m2))
except IOError:
pass
ax4.plot(calib['y0'], abs(numpy.array(calib['m1'])), 's', mfc='None', mec='red', mew=1.3)
ax4.plot(calib['y0'], abs(numpy.array(calib['m1'])), color='red')
ax4.plot(calib['y0'], abs(numpy.array(calib['m2'])), 'x', mfc='None', mec='red', mew=1.3)
ax4.plot(calib['y0'], abs(numpy.array(calib['m2'])), color='red', ls='--')
plt.savefig('output/fig4.pdf', dpi=220)
if __name__ == '__main__':
fig4plot()
| StarcoderdataPython |
1624254 | import numpy as np
import random
from boxenv import *
from agent import *
NB_SKILLS = 6
COND = 'OUR'
STATE_DIM = 2
DIM = STATE_DIM
policy_function = GaussianPolicyFunction(STATE_DIM + NB_SKILLS, 2)
policy = GaussianPolicy()
d = SkillDiscriminator(DIM, NB_SKILLS)
# initial training task list
# TASKS = [(0.5, 0.8), (-0.5, 0.8)]
TASKS = [(1., 0.8), (0.33, 0.8), (-0.33, 0.8), (-1, 0.8)]
# create a stationary test task list
rng = np.random.default_rng(1)
TEST_TASKS = rng.random((10,2))
TEST_TASKS = TEST_TASKS * 2 - 1
TEST_TASKS[:,1] = 0.8
print('Testing zero-shot generalization on tasks ')
print(TEST_TASKS)
def compute_rewards(s1, s2, g):
"""
input: s1 - state before action
s2 - state after action
rewards based on proximity to each goal
"""
dist1 = np.linalg.norm(s1 - g)
dist2 = np.linalg.norm(s2 - g)
r = dist1 - dist2
return r
rewards = []
for SEED in [123, 456, 789]:
np.random.seed(SEED)
random.seed(SEED)
torch.manual_seed(SEED)
box = BoxWorld()
NB_TASKS = TEST_TASKS.shape[0]
policy_function.load_state_dict(torch.load(
'models/{}/{}/policy_seed{}.pth'.format(COND, len(TASKS), SEED)))
d.load_state_dict(torch.load(
'models/{}/{}/variational_seed{}.pth'.format(COND, len(TASKS), SEED)))
_, d_skills = d(torch.Tensor(TEST_TASKS))
d_skills = d_skills.detach().exp()
rewards.append([])
for gid in range(NB_TASKS):
rewards[-1].append([])
for i in range(10):
# sample a skill
w = np.random.choice(range(NB_SKILLS), p=d_skills[gid].numpy())
w_onehot = np.zeros(NB_SKILLS)
w_onehot[w] = 1
s = box.reset()
done = False
states = []
rewards[-1][gid].append(0)
while not done:
states.append(s)
s = torch.Tensor(np.concatenate((s, w_onehot)))
# get action and logprobs
mu, sigma = policy_function(s)
unscaled_action, logprob, entropy = policy.forward(mu, sigma)
# scale action to environment limits
a = box.scale_action(unscaled_action.detach().numpy())
# step the environment
s, _, done = box.step(a)
r = compute_rewards(states[-1], s, TEST_TASKS[gid])
rewards[-1][gid][-1] += r
rewards = np.stack(rewards)
print(rewards.mean(-1).mean(-1))
print(rewards.mean())
print(np.std(rewards.mean(-1).mean(-1)))
| StarcoderdataPython |
3324494 | import time
import socket
import sys
import os
import pygame
import threading
from pongClient import Paddle, Ball, WIDTH, HEIGHT, UP, DOWN
from constants import LEFT_PADDLE_ID, RIGHT_PADDLE_ID, DEFAULT_PORT, PADDLE_HEIGHT, PADDLE_WIDTH, FPS, SYMMETRIC, ASYMMETRIC , SERVER_NAME
import logging
from MyCrypt import *
logging.basicConfig(format='%(asctime)s - SERVER - %(levelname)s - %(message)s',
level=logging.INFO)
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server = SERVER_NAME
port = DEFAULT_PORT
server_ip = socket.gethostbyname(server)
keyf = open('key.key' , 'rb')
key = keyf.read()
cipher = Symmetric(key)
try:
s.bind((server, port))
except socket.error as e:
print(str(e))
s.listen(2)
print("Waiting for a connection")
pygame.init()
class Game:
def __init__(self):
self.ball = Ball()
self.leftPaddle = Paddle(LEFT_PADDLE_ID)
self.rightPaddle = Paddle(RIGHT_PADDLE_ID)
self.clock = pygame.time.Clock()
self.play = False
self.turn = LEFT_PADDLE_ID
logging.info('Game initiated')
def start(self):
logging.info('Starting game')
while True:
if self.play:
# Update ball position
self.ball.x += self.ball.speed_x
self.ball.y += self.ball.speed_y
# Calculate goals
if self.ball.x+self.ball.radius > WIDTH:
self.ball.speed_y = -self.ball.speed_y
self.ball.placeInFrontOfRightPaddle(self.rightPaddle)
self.play = False
self.turn = RIGHT_PADDLE_ID
logging.info('goal for left paddle')
self.leftPaddle.score += 1
elif self.ball.x-self.ball.radius < 0:
self.ball.speed_y = -self.ball.speed_y
self.ball.placeInFrontOfLeftPaddle(self.leftPaddle)
self.play = False
self.turn = LEFT_PADDLE_ID
logging.info('goal for right paddle')
self.rightPaddle.score += 1
#collision with horizontal walls
if self.ball.y+self.ball.radius > HEIGHT:
self.ball.speed_y = -self.ball.speed_y
elif self.ball.y-self.ball.radius < 0:
self.ball.speed_y = abs(self.ball.speed_y)
# Bounce ball off paddles
if self.ball.x-self.ball.radius <= (self.leftPaddle.x + PADDLE_WIDTH) and ((self.leftPaddle.y + PADDLE_HEIGHT) >= self.ball.y >= self.leftPaddle.y):
self.ball.x = self.leftPaddle.x + PADDLE_WIDTH + self.ball.radius
self.ball.speed_x = abs(self.ball.speed_x)
logging.info(f'game:start:bouncing ball off left paddle')
elif self.ball.x+self.ball.radius >= self.rightPaddle.x and ((self.rightPaddle.y + PADDLE_HEIGHT) >= self.ball.y >= self.rightPaddle.y):
self.ball.x = self.rightPaddle.x - self.ball.radius
self.ball.speed_x = -self.ball.speed_x
logging.info(f'game:start:bouncing ball off right paddle')
self.clock.tick(FPS)
def stop(self):
self.play = False
logging.info('game stopped successfully?!')
currentId = LEFT_PADDLE_ID
def playGame():
global game
game.play = True
game.start()
game = Game()
def threaded_client(conn, thread_id):
logging.info(
'threaded_client: created new thread with id : '+str(thread_id))
global currentId
global game
conn.send(str.encode(str(currentId)))
if (currentId == RIGHT_PADDLE_ID):
gameThread = threading.Thread(target=playGame, args=())
gameThread.start()
currentId = RIGHT_PADDLE_ID
reply = ''
while True:
try:
data = conn.recv(2048)
logging.info(f'Recieved data : {time.time_ns()}')
if SYMMETRIC:
reply = cipher.decrypt(data)
logging.info(f'server:decrypted message : {reply}')
else:
reply = data.decode('utf-8')
if not data:
conn.send(str.encode("Goodbye"))
break
else:
logging.info("Recieved: " + reply)
arr = reply.split(":")
if arr[1] != "NULL": # might cause problems
print("arr[1] : "+arr[1])
if game.play == False:
if (LEFT_PADDLE_ID == int(arr[0])) and (game.turn == LEFT_PADDLE_ID):
game.play = True
elif RIGHT_PADDLE_ID == int(arr[0])and (game.turn == RIGHT_PADDLE_ID):
game.play = True
if int(arr[0]) == LEFT_PADDLE_ID:
game.leftPaddle.move(int(arr[1]))
elif int(arr[0]) == RIGHT_PADDLE_ID:
game.rightPaddle.move(int(arr[1]))
if int(arr[0]) == LEFT_PADDLE_ID:
reply = f"ball:{game.ball.x},{game.ball.y}|paddle left:{game.leftPaddle.x},{game.leftPaddle.y}|paddle right:{game.rightPaddle.x},{game.rightPaddle.y}|score:{game.leftPaddle.score},{game.rightPaddle.score}|count:{threading.activeCount() - 1}"
elif int(arr[0]) == RIGHT_PADDLE_ID:
reply = f"ball:{game.ball.x},{game.ball.y}|paddle right:{game.rightPaddle.x},{game.rightPaddle.y}|paddle left:{game.leftPaddle.x},{game.leftPaddle.y}|score:{game.rightPaddle.score},{game.leftPaddle.score}|count:{threading.activeCount() - 1}"
logging.info(f"threaded_client {thread_id}: Sending: {reply}")
if SYMMETRIC:
reply = cipher.encrypt(reply)
logging.info(f'server:encypted message : {reply}')
else :
reply = str.encode(reply)
conn.sendall(reply)
except Exception as e:
print("*************************************\n")
print("SERVER: Breaker exception happened : "+str(e))
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
print(exc_type, fname, exc_tb.tb_lineno)
print("\n\n*************************************")
break
print("Connection Closed")
currentId = 0
game.stop()
conn.close()
thread_id = 0
while True:
thread_id = thread_id + 1
conn, addr = s.accept()
logging.info(f"Connected to: {addr}")
t1 = threading.Thread(target=threaded_client, args=(conn, thread_id,))
t1.start()
| StarcoderdataPython |
111274 | <gh_stars>1-10
#!/usr/bin/python
# Copyright (c) 2013 The Native Client Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# Tools for parsing ELF headers.
from driver_log import DriverOpen, DriverClose, Log, FixArch
class ELFHeader(object):
ELF_MAGIC = '\x7fELF'
ELF_TYPES = { 1: 'REL', # .o
2: 'EXEC', # .exe
3: 'DYN' } # .so
ELF_MACHINES = { 3: '386',
8: 'MIPS',
40: 'ARM',
62: 'X86_64' }
ELF_OSABI = { 0: 'UNIX',
3: 'LINUX',
123: 'NACL' }
ELF_ABI_VER = { 0: 'NONE',
7: 'NACL' }
def __init__(self, e_type, e_machine, e_osabi, e_abiver):
self.type = self.ELF_TYPES[e_type]
self.machine = self.ELF_MACHINES[e_machine]
self.osabi = self.ELF_OSABI[e_osabi]
self.abiver = self.ELF_ABI_VER[e_abiver]
self.arch = FixArch(self.machine) # For convenience
# If the file is not ELF, returns None.
# Otherwise, returns an ELFHeader object.
def GetELFHeader(filename):
fp = DriverOpen(filename, 'rb')
header = fp.read(16 + 2 + 2)
DriverClose(fp)
return DecodeELFHeader(header, filename)
def DecodeELFHeader(header, filename):
# Pull e_ident, e_type, e_machine
if header[0:4] != ELFHeader.ELF_MAGIC:
return None
e_osabi = DecodeLE(header[7])
e_abiver = DecodeLE(header[8])
e_type = DecodeLE(header[16:18])
e_machine = DecodeLE(header[18:20])
if e_osabi not in ELFHeader.ELF_OSABI:
Log.Fatal('%s: ELF file has unknown OS ABI (%d)', filename, e_osabi)
if e_abiver not in ELFHeader.ELF_ABI_VER:
Log.Fatal('%s: ELF file has unknown ABI version (%d)', filename, e_abiver)
if e_type not in ELFHeader.ELF_TYPES:
Log.Fatal('%s: ELF file has unknown type (%d)', filename, e_type)
if e_machine not in ELFHeader.ELF_MACHINES:
Log.Fatal('%s: ELF file has unknown machine type (%d)', filename, e_machine)
eh = ELFHeader(e_type, e_machine, e_osabi, e_abiver)
return eh
# filetype.IsELF calls this IsElf. Top-level tools should prefer filetype.IsELF,
# both for consistency (i.e., all checks for file type come from that library),
# and because its results are cached.
def IsELF(filename):
return GetELFHeader(filename) is not None
# Decode Little Endian bytes into an unsigned value
def DecodeLE(bytes):
value = 0
for b in reversed(bytes):
value *= 2
value += ord(b)
return value
| StarcoderdataPython |
1764529 | <gh_stars>0
#!/usr/local/bin/python2.7
import sys
import os
try:
if basePythonCodePath is not None:
pass
except NameError:
basePythonCodePath = os.curdir
sys.path.append(basePythonCodePath)
from mi.logging import config
config.add_configuration(os.path.join(basePythonCodePath, 'res', 'config', 'mi-logging.yml'))
from mi.core.log import get_logger
log = get_logger()
from mi.dataset.dataset_driver import DataSetDriver, ParticleDataHandler
from mi.dataset.parser.adcp_pd0 import AdcpPd0Parser
from mi.dataset.dataset_parser import DataSetDriverConfigKeys
config = {
DataSetDriverConfigKeys.PARTICLE_MODULE: 'mi.dataset.parser.adcpa_m_glider',
DataSetDriverConfigKeys.PARTICLE_CLASS: 'AdcpaMGliderInstrumentParticle'
}
try:
if particleDataHdlrObj is not None:
pass
except NameError:
particleDataHdlrObj = ParticleDataHandler()
try:
if sourceFilePath is not None:
pass
except NameError:
try:
sourceFilePath = sys.argv[1]
except IndexError:
print "Need a source file path"
sys.exit(1)
def state_callback(state, ingested):
pass
def pub_callback(data):
log.trace("Found data: %s", data)
def exception_callback(exception):
particleDataHdlrObj.setParticleDataCaptureFailure()
stream_handle = open(sourceFilePath, 'rb')
try:
parser = AdcpPd0Parser(config, None, stream_handle,
state_callback, pub_callback,
exception_callback)
driver = DataSetDriver(parser, particleDataHdlrObj)
driver.processFileStream()
finally:
stream_handle.close()
| StarcoderdataPython |
1653881 | <filename>message_media_webhooks/models/update_webhook_request.py
# -*- coding: utf-8 -*-
"""
message_media_webhooks.models.update_webhook_request
This file was automatically generated for MessageMedia by APIMATIC v2.0 ( https://apimatic.io )
"""
class UpdateWebhookRequest(object):
"""Implementation of the 'Update Webhook request' model.
TODO: type model description here.
Attributes:
url (string): TODO: type description here.
method (string): TODO: type description here.
encoding (string): TODO: type description here.
events (list of string): TODO: type description here.
template (string): TODO: type description here.
"""
# Create a mapping from Model property names to API property names
_names = {
"url":'url',
"method":'method',
"encoding":'encoding',
"events":'events',
"template":'template'
}
def __init__(self,
url=None,
method=None,
encoding=None,
events=None,
template=None):
"""Constructor for the UpdateWebhookRequest class"""
# Initialize members of the class
self.url = url
self.method = method
self.encoding = encoding
self.events = events
self.template = template
@classmethod
def from_dictionary(cls,
dictionary):
"""Creates an instance of this model from a dictionary
Args:
dictionary (dictionary): A dictionary representation of the object as
obtained from the deserialization of the server's response. The keys
MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if dictionary is None:
return None
# Extract variables from the dictionary
url = dictionary.get('url')
method = dictionary.get('method')
encoding = dictionary.get('encoding')
events = dictionary.get('events')
template = dictionary.get('template')
# Return an object of this model
return cls(url,
method,
encoding,
events,
template)
| StarcoderdataPython |
3217093 | <reponame>rupeshshrestha123/end2end-asr-pytorch<filename>utils/lm_functions.py
import torch
import os
import math
import torch.nn as nn
from models.lm.transformer_lm import TransformerLM
from utils.optimizer import NoamOpt
from utils import constant
# def save_model(model, epoch, opt, metrics, label2id, id2label, best_model=False):
# """
# Saving model, TODO adding history
# """
# if best_model:
# save_path = "{}/{}/best_model.th".format(
# constant.args.save_folder, constant.args.name)
# else:
# save_path = "{}/{}/epoch_{}.th".format(constant.args.save_folder,
# constant.args.name, epoch)
# if not os.path.exists(constant.args.save_folder + "/" + constant.args.name):
# os.makedirs(constant.args.save_folder + "/" + constant.args.name)
# print("SAVE MODEL to", save_path)
# args = {
# 'label2id': label2id,
# 'id2label': id2label,
# 'args': constant.args,
# 'epoch': epoch,
# 'model_state_dict': model.state_dict(),
# 'optimizer_state_dict': opt.optimizer.state_dict(),
# 'optimizer_params': {
# '_step': opt._step,
# '_rate': opt._rate,
# 'warmup': opt.warmup,
# 'factor': opt.factor,
# 'model_size': opt.model_size
# },
# 'metrics': metrics
# }
# torch.save(args, save_path)
# def load_model(load_path):
# """
# Loading model
# args:
# load_path: string
# """
# checkpoint = torch.load(load_path)
# epoch = checkpoint['epoch']
# metrics = checkpoint['metrics']
# if 'args' in checkpoint:
# args = checkpoint['args']
# label2id = checkpoint['label2id']
# id2label = checkpoint['id2label']
# model = init_transformer_model(args, label2id, id2label)
# model.load_state_dict(checkpoint['model_state_dict'])
# if args.cuda:
# model = model.cuda()
# opt = init_optimizer(args, model)
# if opt is not None:
# opt.optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
# opt._step = checkpoint['optimizer_params']['_step']
# opt._rate = checkpoint['optimizer_params']['_rate']
# opt.warmup = checkpoint['optimizer_params']['warmup']
# opt.factor = checkpoint['optimizer_params']['factor']
# opt.model_size = checkpoint['optimizer_params']['model_size']
# return model, opt, epoch, metrics, args, label2id, id2label
# def init_optimizer(args, model):
# dim_input = args.dim_input
# warmup = args.warmup
# lr = args.lr
# opt = NoamOpt(dim_input, 1, warmup, torch.optim.Adam(
# model.parameters(), lr=lr, betas=(0.9, 0.98), eps=1e-9))
# return opt
def init_transformer_model(args, label2id, id2label):
"""
Initiate a new transformer object
"""
if args.emb_cnn:
hidden_size = int(math.floor(
(args.sample_rate * args.window_size) / 2) + 1)
hidden_size = int(math.floor(hidden_size - 41) / 2 + 1)
hidden_size = int(math.floor(hidden_size - 21) / 2 + 1)
hidden_size *= 32
args.dim_input = hidden_size
num_layers = args.num_layers
num_heads = args.num_heads
dim_model = args.dim_model
dim_key = args.dim_key
dim_value = args.dim_value
dim_input = args.dim_input
dim_inner = args.dim_inner
dim_emb = args.dim_emb
src_max_len = args.src_max_len
tgt_max_len = args.tgt_max_len
dropout = args.dropout
emb_trg_sharing = args.emb_trg_sharing
model = TransformerLM(id2label, num_src_vocab=len(label2id), num_trg_vocab=len(label2id), num_layers=num_layers, dim_emb=dim_emb, dim_model=dim_model, dim_inner=dim_inner, num_heads=num_heads, dim_key=dim_key, dim_value=dim_value, dropout=dropout)
return model
| StarcoderdataPython |
3210620 | <reponame>FHPythonUtils/Blackt
"""Provides the wrapper methods to black. Requires black to be on the system path"""
from __future__ import annotations
import argparse
import os
import re
import subprocess
import sys
from argparse import ArgumentParser
from pathlib import Path
THISDIR = Path(__file__).resolve().parent
def main():
"""Main entry point"""
parser = ArgumentParser(add_help=False)
parser.add_argument("-h", "--help", action="store_true", default=argparse.SUPPRESS)
args, unknown = parser.parse_known_args()
if len(args.__dict__) + len(unknown) == 0 or "help" in args.__dict__:
print((THISDIR / "doc.txt").read_text(encoding="utf-8"))
sys.exit(0)
sourceFiles = []
for root, _dirs, files in os.walk("."):
for file in files:
if file.endswith(".py") or file.endswith(".pyi") or file.endswith(".ipynb"):
sourceFiles.append(os.path.join(root, file))
# Convert tabs to spaces
for file in sourceFiles:
convertFile(file, "\t", " ")
# Run black with forwarded args
exitCode, out = _doSysExec("black " + " ".join(unknown))
# Convert spaces to tabs
for file in sourceFiles:
convertFile(file, " ", "\t")
print(out.encode("utf-8").decode("unicode_escape")) # pylint: disable=no-member
sys.exit(exitCode)
def convertFile(file: str, find: str, replace: str):
"""Convert spaces to tabs of vice versa
Args:
file (str): file to modify
find (str): tabs/ spaces to find
replace (str): tabs/ spaces to replace
"""
lines = Path(file).read_text(encoding="utf-8").split("\n")
outLines = []
for line in lines:
match = re.match(f"^({find})*", line)
span = match.span()
outLines.append(replace * (span[1] // len(find)) + line[span[1] :])
Path(file).write_text("\n".join(outLines), encoding="utf-8")
def _doSysExec(command: str, errorAsOut: bool = True) -> tuple[int, str]:
"""Execute a command and check for errors.
Args:
command (str): commands as a string
errorAsOut (bool, optional): redirect errors to stdout
Raises:
RuntimeWarning: throw a warning should there be a non exit code
Returns:
tuple[int, str]: tuple of return code (int) and stdout (str)
"""
with subprocess.Popen(
command,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT if errorAsOut else subprocess.PIPE,
encoding="utf-8",
errors="ignore",
) as process:
out = process.communicate()[0]
exitCode = process.returncode
return exitCode, out
if __name__ == "__main__":
main()
| StarcoderdataPython |
3232713 | <reponame>Sundaybrian/hood-watch<gh_stars>0
from django.contrib import admin
from .models import *
# Register your models here.
class NeighbourhoodAdmin(admin.ModelAdmin):
filter_horizontal=('locations',)
admin.site.register(Post)
admin.site.register(Business)
admin.site.register(Occupant)
admin.site.register(NeighbourHood,NeighbourhoodAdmin)
admin.site.register(Location)
| StarcoderdataPython |
1675762 | <reponame>MihaiBalint/sanic-restplus
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import re
import pytz
import pytest
from datetime import date, datetime
from six import text_type
from sanic_restplus import inputs
class Iso8601DateTest(object):
@pytest.mark.parametrize('value,expected', [
('2011-01-01', date(2011, 1, 1)),
('2011-01-01T00:00:00+00:00', date(2011, 1, 1)),
('2011-01-01T23:59:59+00:00', date(2011, 1, 1)),
('2011-01-01T23:59:59.001000+00:00', date(2011, 1, 1)),
('2011-01-01T23:59:59+02:00', date(2011, 1, 1)),
])
def test_valid_values(self, value, expected):
assert inputs.date_from_iso8601(value) == expected
def test_error(self):
with pytest.raises(ValueError):
inputs.date_from_iso8601('2008-13-13')
def test_schema(self):
assert inputs.date_from_iso8601.__schema__ == {'type': 'string', 'format': 'date'}
class Iso8601DatetimeTest(object):
@pytest.mark.parametrize('value,expected', [
('2011-01-01', datetime(2011, 1, 1)),
('2011-01-01T00:00:00+00:00', datetime(2011, 1, 1, tzinfo=pytz.utc)),
('2011-01-01T23:59:59+00:00', datetime(2011, 1, 1, 23, 59, 59, tzinfo=pytz.utc)),
('2011-01-01T23:59:59.001000+00:00', datetime(2011, 1, 1, 23, 59, 59, 1000, tzinfo=pytz.utc)),
('2011-01-01T23:59:59+02:00', datetime(2011, 1, 1, 21, 59, 59, tzinfo=pytz.utc)),
])
def test_valid_values(self, value, expected):
assert inputs.datetime_from_iso8601(value) == expected
def test_error(self):
with pytest.raises(ValueError):
inputs.datetime_from_iso8601('2008-13-13')
def test_schema(self):
assert inputs.datetime_from_iso8601.__schema__ == {'type': 'string', 'format': 'date-time'}
class Rfc822DatetimeTest(object):
@pytest.mark.parametrize('value,expected', [
('Sat, 01 Jan 2011', datetime(2011, 1, 1, tzinfo=pytz.utc)),
('Sat, 01 Jan 2011 00:00', datetime(2011, 1, 1, tzinfo=pytz.utc)),
('Sat, 01 Jan 2011 00:00:00', datetime(2011, 1, 1, tzinfo=pytz.utc)),
('Sat, 01 Jan 2011 00:00:00 +0000', datetime(2011, 1, 1, tzinfo=pytz.utc)),
('Sat, 01 Jan 2011 00:00:00 -0000', datetime(2011, 1, 1, tzinfo=pytz.utc)),
('Sat, 01 Jan 2011 23:59:59 -0000', datetime(2011, 1, 1, 23, 59, 59, tzinfo=pytz.utc)),
('Sat, 01 Jan 2011 21:00:00 +0200', datetime(2011, 1, 1, 19, 0, 0, tzinfo=pytz.utc)),
('Sat, 01 Jan 2011 21:00:00 -0200', datetime(2011, 1, 1, 23, 0, 0, tzinfo=pytz.utc)),
])
def test_valid_values(self, value, expected):
assert inputs.datetime_from_rfc822(value) == expected
def test_error(self):
with pytest.raises(ValueError):
inputs.datetime_from_rfc822('Fake, 01 XXX 2011')
class NetlocRegexpTest(object):
@pytest.mark.parametrize('netloc,kwargs', [
('localhost', {'localhost': 'localhost'}),
('example.com', {'domain': 'example.com'}),
('www.example.com', {'domain': 'www.example.com'}),
('www.example.com:8000', {'domain': 'www.example.com', 'port': '8000'}),
('valid-with-hyphens.com', {'domain': 'valid-with-hyphens.com'}),
('subdomain.example.com', {'domain': 'subdomain.example.com'}),
('172.16.58.3', {'ipv4': '172.16.58.3'}),
('172.16.58.3:8000', {'ipv4': '172.16.58.3', 'port': '8000'}),
('valid-----hyphens.com', {'domain': 'valid-----hyphens.com'}),
('foo:bar<EMAIL>', {'auth': 'foo:bar', 'domain': 'example.com'}),
('foo:@<EMAIL>', {'auth': 'foo:', 'domain': 'example.com'}),
('<EMAIL>', {'auth': 'foo', 'domain': 'example.com'}),
('foo:@2001:db8:85a3::8a2e:370:7334', {'auth': 'foo:', 'ipv6': '2001:db8:85a3::8a2e:370:7334'}),
('[fdf8:f53e:61e4::18]:8001', {'ipv6': 'fdf8:f53e:61e4::18', 'port': '8001'}),
('foo2:qd1%r@<EMAIL>', {'auth': 'foo2:qd1%r', 'domain': 'example.com'}),
])
def test_match(self, netloc, kwargs):
match = inputs.netloc_regex.match(netloc)
assert match, 'Should match {0}'.format(netloc)
expected = {'auth': None, 'domain': None, 'ipv4': None, 'ipv6': None, 'localhost': None, 'port': None}
expected.update(kwargs)
assert match.groupdict() == expected
class URLTest(object):
def assert_bad_url(self, validator, value, details=None):
msg = '{0} is not a valid URL'
with pytest.raises(ValueError) as cm:
validator(value)
if details:
assert text_type(cm.value) == '. '.join((msg, details)).format(value)
else:
assert text_type(cm.value).startswith(msg.format(value))
@pytest.mark.parametrize('url', [
'http://www.djangoproject.com/',
'http://example.com/',
'http://www.example.com/',
'http://www.example.com/test',
'http://valid-with-hyphens.com/',
'http://subdomain.example.com/',
'http://valid-----hyphens.com/',
'http://example.com?something=value',
'http://example.com/index.php?something=value&another=value2',
])
def test_valid_values_default(self, url):
validator = inputs.URL()
assert validator(url) == url
@pytest.mark.parametrize('url', [
'foo',
'http://',
'http://example',
'http://example.',
'http://.com',
'http://invalid-.com',
'http://-invalid.com',
'http://inv-.alid-.com',
'http://inv-.-alid.com',
'foo bar baz',
'foo \u2713',
'http://@foo:<EMAIL>',
'http://:bar@example.com',
'http://bar:bar:bar@example.com',
'http://300:300:300:300',
'http://example.com:70000',
'http://example.com:0000',
])
def test_bad_urls(self, url):
# Test with everything enabled to ensure bad URL are really detected
validator = inputs.URL(ip=True, auth=True, port=True)
self.assert_bad_url(validator, url)
# msg = '{0} is not a valid URL'.format(url)
# with pytest.raises(ValueError) as cm:
# validator(url)
# assert text_type(cm.exception).startswith(msg)
@pytest.mark.parametrize('url', [
'google.com',
'domain.google.com',
'kevin:pass@google.com/path?query',
'google.com/path?\u2713',
])
def test_bad_urls_with_suggestion(self, url):
validator = inputs.URL()
self.assert_bad_url(validator, url, 'Did you mean: http://{0}')
@pytest.mark.parametrize('url', [
'http://172.16.58.3/',
'http://foo:bar@172.16.58.3/',
'http://172.16.17.320:8000/test',
'http://2001:dbfdf8:f53e:61e4::18:370:7334',
'http://[1fff:0:fc00:e968:6179::de52:7100]:8001'
])
def test_reject_ip(self, url):
validator = inputs.URL()
self.assert_bad_url(validator, url, 'IP is not allowed')
@pytest.mark.parametrize('url', [
'http://172.16.58.3/',
'http://172.16.58.3/test',
'http://2001:db8:85a3::8a2e:370:7334',
'http://[1fff:0:a88:85a3::ac1f]',
])
def test_allow_ip(self, url):
validator = inputs.URL(ip=True)
assert validator(url) == url
@pytest.mark.parametrize('url', [
'http://foo:bar@172.16.58.3/',
'http://foo:@2001:db8:85a3::8a2e:370:7334',
'http://foo:bar@[1fff:0:fc00:e968:6179::de52:7100]:8001',
'http://foo:@2001:db8:85a3::8a2e:370:7334',
'http://foo2:qd1%r@example.com',
])
def test_reject_auth(self, url):
# Test with IP and port to ensure only auth is rejected
validator = inputs.URL(ip=True, port=True)
self.assert_bad_url(validator, url, 'Authentication is not allowed')
@pytest.mark.parametrize('url', [
'http://foo:bar@example.com',
'http://foo:@example.com',
'http://foo@example.com',
])
def test_allow_auth(self, url):
validator = inputs.URL(auth=True)
assert validator(url) == url
@pytest.mark.parametrize('url', [
'http://localhost',
'http://127.0.0.1',
'http://127.0.1.1',
'http://::1',
])
def test_reject_local(self, url):
# Test with IP and port to ensure only auth is rejected
validator = inputs.URL(ip=True)
self.assert_bad_url(validator, url, 'Localhost is not allowed')
@pytest.mark.parametrize('url', [
'http://localhost',
'http://127.0.0.1',
'http://127.0.1.1',
'http://::1',
])
def test_allow_local(self, url):
validator = inputs.URL(ip=True, local=True)
assert validator(url) == url
@pytest.mark.parametrize('url', [
'http://172.16.58.3:8080/',
'http://foo:bar@172.16.58.3:8080/',
'http://foo:bar@[fdf8:f53e:61e4::18]:8001'
])
def test_reject_port(self, url):
# Test with auth and port to ensure only port is rejected
validator = inputs.URL(ip=True, auth=True)
self.assert_bad_url(validator, url, 'Custom port is not allowed')
@pytest.mark.parametrize('url', [
'http://example.com:80',
'http://example.com:8080',
'http://www.example.com:8000/test',
])
def test_allow_port(self, url):
validator = inputs.URL(port=True)
assert validator(url) == url
@pytest.mark.parametrize('url', [
'sip://somewhere.com',
'irc://somewhere.com',
])
def test_valid_restricted_schemes(self, url):
validator = inputs.URL(schemes=('sip', 'irc'))
assert validator(url) == url
@pytest.mark.parametrize('url', [
'http://somewhere.com',
'https://somewhere.com',
])
def test_invalid_restricted_schemes(self, url):
validator = inputs.URL(schemes=('sip', 'irc'))
self.assert_bad_url(validator, url, 'Protocol is not allowed')
@pytest.mark.parametrize('url', [
'http://example.com',
'http://example.com/test/',
'http://www.example.com/',
'http://www.example.com/test',
])
def test_valid_restricted_domains(self, url):
validator = inputs.URL(domains=['example.com', 'www.example.com'])
assert validator(url) == url
@pytest.mark.parametrize('url', [
'http://somewhere.com',
'https://somewhere.com',
])
def test_invalid_restricted_domains(self, url):
validator = inputs.URL(domains=['example.com', 'www.example.com'])
self.assert_bad_url(validator, url, 'Domain is not allowed')
@pytest.mark.parametrize('url', [
'http://somewhere.com',
'https://somewhere.com',
])
def test_valid_excluded_domains(self, url):
validator = inputs.URL(exclude=['example.com', 'www.example.com'])
assert validator(url) == url
@pytest.mark.parametrize('url', [
'http://example.com',
'http://example.com/test/',
'http://www.example.com/',
'http://www.example.com/test',
])
def test_excluded_domains(self, url):
validator = inputs.URL(exclude=['example.com', 'www.example.com'])
self.assert_bad_url(validator, url, 'Domain is not allowed')
def test_check(self):
validator = inputs.URL(check=True, ip=True)
assert validator('http://www.google.com') == 'http://www.google.com', 'Should check domain'
# This test will fail on a network where this address is defined
self.assert_bad_url(validator, 'http://this-domain-should-not-exist.com', 'Domain does not exists')
def test_schema(self):
assert inputs.URL().__schema__ == {'type': 'string', 'format': 'url'}
class UrlTest(object):
@pytest.mark.parametrize('url', [
'http://www.djangoproject.com/',
'http://localhost/',
'http://example.com/',
'http://www.example.com/',
'http://www.example.com:8000/test',
'http://valid-with-hyphens.com/',
'http://subdomain.example.com/',
'http://172.16.58.3/',
'http://172.16.58.3:8000/test',
'http://valid-----hyphens.com/',
'http://example.com?something=value',
'http://example.com/index.php?something=value&another=value2',
'http://foo:bar@example.<EMAIL>',
'http://foo:@example.com',
'http://foo@example.com',
'http://foo:@2001:db8:85a3::8a2e:370:7334',
'http://foo2:qd1%r@example.com',
])
def test_valid_url(self, url):
assert inputs.url(url) == url
@pytest.mark.parametrize('url', [
'foo',
'http://',
'http://example',
'http://example.',
'http://.com',
'http://invalid-.com',
'http://-invalid.com',
'http://inv-.alid-.com',
'http://inv-.-alid.com',
'foo bar baz',
'foo \u2713',
'http://@foo:bar@<EMAIL>',
'http://:bar@example.com',
'http://bar:bar:bar@example.com',
'http://300:300:300:300',
'http://example.com:70000',
])
def test_bad_url(self, url):
with pytest.raises(ValueError) as cm:
inputs.url(url)
assert text_type(cm.value).startswith('{0} is not a valid URL'.format(url))
@pytest.mark.parametrize('url', [
'google.com',
'domain.google.com',
'kevin:<EMAIL>/path?query',
'google.com/path?\u2713',
])
def test_bad_url_with_suggestion(self, url):
with pytest.raises(ValueError) as cm:
inputs.url(url)
assert text_type(cm.value) == '{0} is not a valid URL. Did you mean: http://{0}'.format(url)
def test_schema(self):
assert inputs.url.__schema__ == {'type': 'string', 'format': 'url'}
class IPTest(object):
@pytest.mark.parametrize('value', [
'172.16.58.3',
'127.0.0.1',
'2001:db8:85a3::8a2e:370:7334',
'::1',
])
def test_valid_value(self, value):
assert inputs.ip(value) == value
@pytest.mark.parametrize('value', [
'foo',
'http://',
'http://example',
'http://example.',
'http://.com',
'http://invalid-.com',
'http://-invalid.com',
'http://inv-.alid-.com',
'http://inv-.-alid.com',
'foo bar baz',
'foo \u2713',
'http://@foo:bar@<EMAIL>',
'http://:bar@example.<EMAIL>',
'http://bar:bar:bar@example.com',
'127.0'
])
def test_bad_value(self, value):
with pytest.raises(ValueError):
inputs.ip(value)
def test_schema(self):
assert inputs.ip.__schema__ == {'type': 'string', 'format': 'ip'}
class IPv4Test(object):
@pytest.mark.parametrize('value', [
'172.16.58.3',
'127.0.0.1',
])
def test_valid_value(self, value):
assert inputs.ipv4(value) == value
@pytest.mark.parametrize('value', [
'2001:db8:85a3::8a2e:370:7334',
'::1',
'foo',
'http://',
'http://example',
'http://example.',
'http://.com',
'http://invalid-.com',
'http://-invalid.com',
'http://inv-.alid-.com',
'http://inv-.-alid.com',
'foo bar baz',
'foo \u2713',
'http://@foo:bar@example.com',
'http://:bar@example.com',
'http://bar:bar:bar@example.com',
'127.0'
])
def test_bad_value(self, value):
with pytest.raises(ValueError):
inputs.ipv4(value)
def test_schema(self):
assert inputs.ipv4.__schema__ == {'type': 'string', 'format': 'ipv4'}
class IPv6Test(object):
@pytest.mark.parametrize('value', [
'2001:db8:85a3::8a2e:370:7334',
'::1',
])
def test_valid_value(self, value):
assert inputs.ipv6(value) == value
@pytest.mark.parametrize('value', [
'172.16.58.3',
'127.0.0.1',
'foo',
'http://',
'http://example',
'http://example.',
'http://.com',
'http://invalid-.com',
'http://-invalid.com',
'http://inv-.alid-.com',
'http://inv-.-alid.com',
'foo bar baz',
'foo \u2713',
'http://@foo:bar@example.com',
'http://:bar@example.com',
'http://bar:bar:<EMAIL>',
'127.0'
])
def test_bad_value(self, value):
with pytest.raises(ValueError):
inputs.ipv6(value)
def test_schema(self):
assert inputs.ipv6.__schema__ == {'type': 'string', 'format': 'ipv6'}
class EmailTest(object):
def assert_bad_email(self, validator, value, msg=None):
msg = msg or '{0} is not a valid email'
with pytest.raises(ValueError) as cm:
validator(value)
assert str(cm.value) == msg.format(value)
@pytest.mark.parametrize('value', [
'<EMAIL>',
'<EMAIL>',
'<EMAIL>',
'<EMAIL>',
'<EMAIL>',
'<EMAIL>',
'<EMAIL>',
'<EMAIL>',
])
def test_valid_value_default(self, value):
validator = inputs.email()
assert validator(value) == value
@pytest.mark.parametrize('value', [
'me@localhost',
'me@127.0.0.1',
'me@127.1.2.3',
'me@::1',
'me@200.8.9.10',
'me@2001:db8:85a3::8a2e:370:7334',
])
def test_invalid_value_default(self, value):
self.assert_bad_email(inputs.email(), value)
@pytest.mark.parametrize('value', [
'<EMAIL>',
'<EMAIL>',
])
def test_valid_value_check(self, value):
email = inputs.email(check=True)
assert email(value) == value
@pytest.mark.parametrize('value', [
'<EMAIL>',
'me@localhost',
'me@127.0.0.1',
'me@127.1.2.3',
'me@::1',
'me@172.16.58.3',
'me@2001:db8:85a3::8a2e:370:7334',
])
def test_invalid_values_check(self, value):
email = inputs.email(check=True)
self.assert_bad_email(email, value)
@pytest.mark.parametrize('value', [
'<EMAIL>',
'<EMAIL>',
'<EMAIL>',
'<EMAIL>',
'<EMAIL>',
'<EMAIL>',
'me@172.16.58.3',
'me@2001:db8:85a3::8a2e:370:7334',
])
def test_valid_value_ip(self, value):
email = inputs.email(ip=True)
assert email(value) == value
@pytest.mark.parametrize('value', [
'me@localhost',
'me@127.0.0.1',
'me@127.1.2.3',
'me@::1',
])
def test_invalid_value_ip(self, value):
email = inputs.email(ip=True)
self.assert_bad_email(email, value)
@pytest.mark.parametrize('value', [
'<EMAIL>',
'<EMAIL>',
'<EMAIL>',
'<EMAIL>',
'coucou@localhost',
'<EMAIL>',
'<EMAIL>',
'me@localhost',
])
def test_valid_value_local(self, value):
email = inputs.email(local=True)
assert email(value) == value
@pytest.mark.parametrize('value', [
'me@127.0.0.1',
'me@127.1.2.3',
'me@::1',
'me@200.8.9.10',
'me@2001:db8:85a3::8a2e:370:7334',
])
def test_invalid_value_local(self, value):
email = inputs.email(local=True)
self.assert_bad_email(email, value)
@pytest.mark.parametrize('value', [
'<EMAIL>',
'<EMAIL>',
'<EMAIL>',
'<EMAIL>',
'coucou@localhost',
'<EMAIL>',
'<EMAIL>',
'me@172.16.58.3',
'me@2001:db8:85a3::8a2e:370:7334',
'me@localhost',
'me@127.0.0.1',
'me@127.1.2.3',
'me@::1',
])
def test_valid_value_ip_and_local(self, value):
email = inputs.email(ip=True, local=True)
assert email(value) == value
@pytest.mark.parametrize('value', [
'<EMAIL>',
'<EMAIL>',
'<EMAIL>',
'<EMAIL>',
])
def test_valid_value_domains(self, value):
email = inputs.email(domains=('gmail.com', 'cmoi.fr'))
assert email(value) == value
@pytest.mark.parametrize('value', [
'<EMAIL>',
'<EMAIL>',
'me@localhost',
'me@127.0.0.1',
'me@127.1.2.3',
'me@::1',
'me@172.16.58.3',
'me@2001:db8:85a3::8a2e:370:7334',
])
def test_invalid_value_domains(self, value):
email = inputs.email(domains=('gmail.com', 'cmoi.fr'))
self.assert_bad_email(email, value, '{0} does not belong to the authorized domains')
@pytest.mark.parametrize('value', [
'<EMAIL>',
'<EMAIL>',
'<EMAIL>',
'<EMAIL>',
])
def test_valid_value_exclude(self, value):
email = inputs.email(exclude=('somewhere.com', 'foo.bar'))
assert email(value) == value
@pytest.mark.parametrize('value', [
'<EMAIL>',
'<EMAIL>',
])
def test_invalid_value_exclude(self, value):
email = inputs.email(exclude=('somewhere.com', 'foo.bar'))
self.assert_bad_email(email, value, '{0} belongs to a forbidden domain')
@pytest.mark.parametrize('value', [
'someone@',
'@somewhere',
'email.somewhere.com',
'[invalid!email]',
'me.@somewhere',
'me..something@somewhere',
])
def test_bad_email(self, value):
email = inputs.email()
self.assert_bad_email(email, value)
def test_schema(self):
assert inputs.email().__schema__ == {'type': 'string', 'format': 'email'}
class RegexTest(object):
@pytest.mark.parametrize('value', [
'123',
'1234567890',
'00000',
])
def test_valid_input(self, value):
num_only = inputs.regex(r'^[0-9]+$')
assert num_only(value) == value
@pytest.mark.parametrize('value', [
'abc',
'123abc',
'abc123',
'',
])
def test_bad_input(self, value):
num_only = inputs.regex(r'^[0-9]+$')
with pytest.raises(ValueError):
num_only(value)
def test_bad_pattern(self):
with pytest.raises(re.error):
inputs.regex('[')
def test_schema(self):
assert inputs.regex(r'^[0-9]+$').__schema__ == {'type': 'string', 'pattern': '^[0-9]+$'}
class BooleanTest(object):
def test_false(self):
assert inputs.boolean('False') is False
def test_0(self):
assert inputs.boolean('0') is False
def test_true(self):
assert inputs.boolean('true') is True
def test_1(self):
assert inputs.boolean('1') is True
def test_case(self):
assert inputs.boolean('FaLSE') is False
assert inputs.boolean('FaLSE') is False
def test_python_bool(self):
assert inputs.boolean(True) is True
assert inputs.boolean(False) is False
def test_bad_boolean(self):
with pytest.raises(ValueError):
inputs.boolean('blah')
with pytest.raises(ValueError):
inputs.boolean(None)
def test_checkbox(self):
assert inputs.boolean('on') is True
def test_non_strings(self):
assert inputs.boolean(0) is False
assert inputs.boolean(1) is True
assert inputs.boolean([]) is False
def test_schema(self):
assert inputs.boolean.__schema__ == {'type': 'boolean'}
class DateTest(object):
def test_later_than_1900(self):
assert inputs.date('1900-01-01') == datetime(1900, 1, 1)
def test_error(self):
with pytest.raises(ValueError):
inputs.date('2008-13-13')
def test_default(self):
assert inputs.date('2008-08-01') == datetime(2008, 8, 1)
def test_schema(self):
assert inputs.date.__schema__ == {'type': 'string', 'format': 'date'}
class NaturalTest(object):
def test_negative(self):
with pytest.raises(ValueError):
inputs.natural(-1)
def test_default(self):
assert inputs.natural(3) == 3
def test_string(self):
with pytest.raises(ValueError):
inputs.natural('foo')
def test_schema(self):
assert inputs.natural.__schema__ == {'type': 'integer', 'minimum': 0}
class PositiveTest(object):
def test_positive(self):
assert inputs.positive(1) == 1
assert inputs.positive(10000) == 10000
def test_zero(self):
with pytest.raises(ValueError):
inputs.positive(0)
def test_negative(self):
with pytest.raises(ValueError):
inputs.positive(-1)
def test_schema(self):
assert inputs.positive.__schema__ == {'type': 'integer', 'minimum': 0, 'exclusiveMinimum': True}
class IntRangeTest(object):
def test_valid_range(self):
int_range = inputs.int_range(1, 5)
assert int_range(3) == 3
def test_inclusive_range(self):
int_range = inputs.int_range(1, 5)
assert int_range(5) == 5
def test_lower(self):
int_range = inputs.int_range(0, 5)
with pytest.raises(ValueError):
int_range(-1)
def test_higher(self):
int_range = inputs.int_range(0, 5)
with pytest.raises(ValueError):
int_range(6)
def test_schema(self):
assert inputs.int_range(1, 5).__schema__ == {'type': 'integer', 'minimum': 1, 'maximum': 5}
interval_test_values = [(
# Full precision with explicit UTC.
'2013-01-01T12:30:00Z/P1Y2M3DT4H5M6S',
(
datetime(2013, 1, 1, 12, 30, 0, tzinfo=pytz.utc),
datetime(2014, 3, 5, 16, 35, 6, tzinfo=pytz.utc),
),
), (
# Full precision with alternate UTC indication
'2013-01-01T12:30+00:00/P2D',
(
datetime(2013, 1, 1, 12, 30, 0, tzinfo=pytz.utc),
datetime(2013, 1, 3, 12, 30, 0, tzinfo=pytz.utc),
),
), (
# Implicit UTC with time
'2013-01-01T15:00/P1M',
(
datetime(2013, 1, 1, 15, 0, 0, tzinfo=pytz.utc),
datetime(2013, 1, 31, 15, 0, 0, tzinfo=pytz.utc),
),
), (
# TZ conversion
'2013-01-01T17:00-05:00/P2W',
(
datetime(2013, 1, 1, 22, 0, 0, tzinfo=pytz.utc),
datetime(2013, 1, 15, 22, 0, 0, tzinfo=pytz.utc),
),
), (
# Date upgrade to midnight-midnight period
'2013-01-01/P3D',
(
datetime(2013, 1, 1, 0, 0, 0, tzinfo=pytz.utc),
datetime(2013, 1, 4, 0, 0, 0, 0, tzinfo=pytz.utc),
),
), (
# Start/end with UTC
'2013-01-01T12:00:00Z/2013-02-01T12:00:00Z',
(
datetime(2013, 1, 1, 12, 0, 0, tzinfo=pytz.utc),
datetime(2013, 2, 1, 12, 0, 0, tzinfo=pytz.utc),
),
), (
# Start/end with time upgrade
'2013-01-01/2013-06-30',
(
datetime(2013, 1, 1, tzinfo=pytz.utc),
datetime(2013, 6, 30, tzinfo=pytz.utc),
),
), (
# Start/end with TZ conversion
'2013-02-17T12:00:00-07:00/2013-02-28T15:00:00-07:00',
(
datetime(2013, 2, 17, 19, 0, 0, tzinfo=pytz.utc),
datetime(2013, 2, 28, 22, 0, 0, tzinfo=pytz.utc),
),
), ( # Resolution expansion for single date(time)
# Second with UTC
'2013-01-01T12:30:45Z',
(
datetime(2013, 1, 1, 12, 30, 45, tzinfo=pytz.utc),
datetime(2013, 1, 1, 12, 30, 46, tzinfo=pytz.utc),
),
), (
# Second with tz conversion
'2013-01-01T12:30:45+02:00',
(
datetime(2013, 1, 1, 10, 30, 45, tzinfo=pytz.utc),
datetime(2013, 1, 1, 10, 30, 46, tzinfo=pytz.utc),
),
), (
# Second with implicit UTC
'2013-01-01T12:30:45',
(
datetime(2013, 1, 1, 12, 30, 45, tzinfo=pytz.utc),
datetime(2013, 1, 1, 12, 30, 46, tzinfo=pytz.utc),
),
), (
# Minute with UTC
'2013-01-01T12:30+00:00',
(
datetime(2013, 1, 1, 12, 30, tzinfo=pytz.utc),
datetime(2013, 1, 1, 12, 31, tzinfo=pytz.utc),
),
), (
# Minute with conversion
'2013-01-01T12:30+04:00',
(
datetime(2013, 1, 1, 8, 30, tzinfo=pytz.utc),
datetime(2013, 1, 1, 8, 31, tzinfo=pytz.utc),
),
), (
# Minute with implicit UTC
'2013-01-01T12:30',
(
datetime(2013, 1, 1, 12, 30, tzinfo=pytz.utc),
datetime(2013, 1, 1, 12, 31, tzinfo=pytz.utc),
),
), (
# Hour, explicit UTC
'2013-01-01T12Z',
(
datetime(2013, 1, 1, 12, tzinfo=pytz.utc),
datetime(2013, 1, 1, 13, tzinfo=pytz.utc),
),
), (
# Hour with offset
'2013-01-01T12-07:00',
(
datetime(2013, 1, 1, 19, tzinfo=pytz.utc),
datetime(2013, 1, 1, 20, tzinfo=pytz.utc),
),
), (
# Hour with implicit UTC
'2013-01-01T12',
(
datetime(2013, 1, 1, 12, tzinfo=pytz.utc),
datetime(2013, 1, 1, 13, tzinfo=pytz.utc),
),
), (
# Interval with trailing zero fractional seconds should
# be accepted.
'2013-01-01T12:00:00.0/2013-01-01T12:30:00.000000',
(
datetime(2013, 1, 1, 12, tzinfo=pytz.utc),
datetime(2013, 1, 1, 12, 30, tzinfo=pytz.utc),
),
)]
class IsoIntervalTest(object):
@pytest.mark.parametrize('value,expected', interval_test_values)
def test_valid_value(self, value, expected):
assert inputs.iso8601interval(value) == expected
def test_error_message(self):
with pytest.raises(ValueError) as cm:
inputs.iso8601interval('2013-01-01/blah')
expected = 'Invalid argument: 2013-01-01/blah. argument must be a valid ISO8601 date/time interval.'
assert str(cm.value) == expected
@pytest.mark.parametrize('value', [
'2013-01T14:',
'',
'asdf',
'01/01/2013',
])
def test_bad_values(self, value):
with pytest.raises(ValueError):
inputs.iso8601interval(value)
def test_schema(self):
assert inputs.iso8601interval.__schema__ == {'type': 'string', 'format': 'iso8601-interval'}
| StarcoderdataPython |
4805615 | <gh_stars>1-10
from ... import weather as rk_weather
from ... import util as rk_util
from .wind_workflow_manager import WindWorkflowManager
import numpy as np
def onshore_wind_merra_ryberg2019_europe(placements, merra_path, gwa_50m_path, clc2012_path, output_netcdf_path=None, output_variables=None):
# TODO: Add range limitation over Europe by checking placements
"""
Simulates onshore wind generation in Europe using NASA's MERRA2 database [1].
Parameters
----------
placements : pandas Dataframe
A Dataframe object with the parameters needed by the simulation.
merra_path : str
Path to the MERRA2 data.
gwa_50m_path : str
Path to the Global Wind Atlas at 50m [2] rater file.
clc2012_path : str
Path to the CLC 2012 raster file [3].
output_netcdf_path : str, optional
Path to a directory to put the output files, by default None
output_variables : str, optional
Restrict the output variables to these variables, by default None
Returns
-------
xarray.Dataset
A xarray dataset including all the output variables you defined as your output variables.
Sources
------
[1] NASA (National Aeronautics and Space Administration). (2019). Modern-Era Retrospective analysis for Research and Applications, Version 2. NASA Goddard Earth Sciences (GES) Data and Information Services Center (DISC). https://disc.gsfc.nasa.gov/datasets?keywords=%22MERRA-2%22&page=1&source=Models%2FAnalyses MERRA-2
[2] DTU Wind Energy. (2019). Global Wind Atlas. https://globalwindatlas.info/
[3] Copernicus (European Union’s Earth Observation Programme). (2012). Corine Land Cover 2012. Copernicus. https://land.copernicus.eu/pan-european/corine-land-cover/clc-2012
"""
wf = WindWorkflowManager(placements)
wf.read(
variables=['elevated_wind_speed',
"surface_pressure",
"surface_air_temperature"],
source_type="MERRA",
source=merra_path,
set_time_index=True,
verbose=False)
wf.adjust_variable_to_long_run_average(
variable='elevated_wind_speed',
source_long_run_average=rk_weather.MerraSource.LONG_RUN_AVERAGE_WINDSPEED,
real_long_run_average=gwa_50m_path
)
wf.estimate_roughness_from_land_cover(
path=clc2012_path,
source_type="clc")
wf.logarithmic_projection_of_wind_speeds_to_hub_height()
wf.apply_air_density_correction_to_wind_speeds()
wf.convolute_power_curves(
scaling=0.06,
base=0.1
)
wf.simulate()
wf.apply_loss_factor(
loss=lambda x: rk_util.low_generation_loss(x, base=0.0, sharpness=5.0)
)
return wf.to_xarray(output_netcdf_path=output_netcdf_path, output_variables=output_variables)
def offshore_wind_merra_caglayan2019(placements, merra_path, output_netcdf_path=None, output_variables=None):
"""
Simulates offshore wind generation using NASA's MERRA2 database [1].
Parameters
----------
placements : pandas Dataframe
A Dataframe object with the parameters needed by the simulation.
merra_path : str
Path to the MERRA2 data.
output_netcdf_path : str, optional
Path to a directory to put the output files, by default None
output_variables : str, optional
Restrict the output variables to these variables, by default None
Returns
-------
xarray.Dataset
A xarray dataset including all the output variables you defined as your output variables.
Sources
------
[1] National Aeronautics and Space Administration. (2019). Modern-Era Retrospective analysis for Research and Applications, Version 2. NASA Goddard Earth Sciences (GES) Data and Information Services Center (DISC). https://disc.gsfc.nasa.gov/datasets?keywords=%22MERRA-2%22&page=1&source=Models%2FAnalyses MERRA-2
"""
wf = WindWorkflowManager(placements)
wf.read(
variables=['elevated_wind_speed', ],
source_type="MERRA",
source=merra_path,
set_time_index=True,
verbose=False)
wf.set_roughness(0.0002)
wf.logarithmic_projection_of_wind_speeds_to_hub_height()
wf.convolute_power_curves(
scaling=0.04, # TODO: Check values with Dil
base=0.5 # TODO: Check values with Dil
)
wf.simulate()
wf.apply_loss_factor(
loss=lambda x: rk_util.low_generation_loss(x, base=0.1, sharpness=3.5) # TODO: Check values with Dil
)
return wf.to_xarray(output_netcdf_path=output_netcdf_path, output_variables=output_variables)
def offshore_wind_era5_unvalidated(placements, era5_path, output_netcdf_path=None, output_variables=None):
"""
Simulates offshore wind generation using NASA's ERA5 database [1].
Parameters
----------
placements : pandas Dataframe
A Dataframe object with the parameters needed by the simulation.
era5_path : str
Path to the ERA5 data.
output_netcdf_path : str, optional
Path to a directory to put the output files, by default None
output_variables : str, optional
Restrict the output variables to these variables, by default None
Returns
-------
xarray.Dataset
A xarray dataset including all the output variables you defined as your output variables.
Sources
------
[1] European Centre for Medium-Range Weather Forecasts. (2019). ERA5 dataset. https://www.ecmwf.int/en/forecasts/datasets/reanalysis-datasets/era5.
"""
wf = WindWorkflowManager(placements)
wf.read(
variables=['elevated_wind_speed', ],
source_type="ERA5",
source=era5_path,
set_time_index=True,
verbose=False)
wf.set_roughness(0.0002)
wf.logarithmic_projection_of_wind_speeds_to_hub_height()
wf.convolute_power_curves(
scaling=0.04, # TODO: Check values with Dil
base=0.5 # TODO: Check values with Dil
)
wf.simulate()
wf.apply_loss_factor(
loss=lambda x: rk_util.low_generation_loss(x, base=0.1, sharpness=3.5) # TODO: Check values with Dil
)
return wf.to_xarray(output_netcdf_path=output_netcdf_path, output_variables=output_variables)
def onshore_wind_era5(placements, era5_path, gwa_100m_path, esa_cci_path, output_netcdf_path=None, output_variables=None):
"""
Simulates onshore wind generation using ECMWF's ERA5 database [1].
NOTE: Validation documentation is in progress...
Parameters
----------
placements : pandas Dataframe
A Dataframe object with the parameters needed by the simulation.
era5_path : str
Path to the ERA5 data.
gwa_100m_path : str
Path to the Global Wind Atlas at 100m [2] rater file.
esa_cci_path : str
Path to the ESA CCI raster file [3].
output_netcdf_path : str, optional
Path to a directory to put the output files, by default None
output_variables : str, optional
Restrict the output variables to these variables, by default None
Returns
-------
xarray.Dataset
A xarray dataset including all the output variables you defined as your output variables.
Sources
------
[1] European Centre for Medium-Range Weather Forecasts. (2019). ERA5 dataset. https://www.ecmwf.int/en/forecasts/datasets/reanalysis-datasets/era5
[2] DTU Wind Energy. (2019). Global Wind Atlas. https://globalwindatlas.info/
[3] ESA. Land Cover CCI Product User Guide Version 2. Tech. Rep. (2017). Available at: maps.elie.ucl.ac.be/CCI/viewer/download/ESACCI-LC-Ph2-PUGv2_2.0.pdf
"""
wf = WindWorkflowManager(placements)
wf.read(
variables=['elevated_wind_speed',
"surface_pressure",
"surface_air_temperature",
"boundary_layer_height"],
source_type="ERA5",
source=era5_path,
set_time_index=True,
verbose=False)
wf.adjust_variable_to_long_run_average(
variable='elevated_wind_speed',
source_long_run_average=rk_weather.Era5Source.LONG_RUN_AVERAGE_WINDSPEED,
real_long_run_average=gwa_100m_path
)
wf.estimate_roughness_from_land_cover(
path=esa_cci_path,
source_type="cci")
wf.logarithmic_projection_of_wind_speeds_to_hub_height(
consider_boundary_layer_height=True)
wf.apply_air_density_correction_to_wind_speeds()
wf.convolute_power_curves(
scaling=0.08,
base=0.40
)
# Adjust wind speeds
wf.sim_data['elevated_wind_speed'] = np.maximum(wf.sim_data['elevated_wind_speed']*0.75 + 1.20, 0 ) # Empirically found to improve simulation accuracy
# do simulation
wf.simulate()
return wf.to_xarray(output_netcdf_path=output_netcdf_path, output_variables=output_variables)
def onshore_wind_era5_validator(placements, era5_path, gwa_100m_path, esa_cci_path, convolution_scaling_factors=[0.06], convolution_base_factors=[0.1], loss_sharpness_factors=[5.0], loss_base_factors=[0.0], wind_speed_offsets=[0], wind_speed_scalings=[1.0]):
"""
Simulates onshore wind generation using ECMWF's ERA5 database [1]
Parameters
----------
placements : pandas Dataframe
A Dataframe object with the parameters needed by the simulation.
era5_path : str
Path to the ERA5 data.
gwa_100m_path : str
Path to the Global Wind Atlas at 100m [2] rater file.
esa_cci_path : str
Path to the ESA CCI raster file [3].
output_netcdf_path : str, optional
Path to a directory to put the output files, by default None
output_variables : str, optional
Restrict the output variables to these variables, by default None
Returns
-------
xarray.Dataset
A xarray dataset including all the output variables you defined as your output variables.
Sources
------
[1] European Centre for Medium-Range Weather Forecasts. (2019). ERA5 dataset. https://www.ecmwf.int/en/forecasts/datasets/reanalysis-datasets/era5
[2] DTU Wind Energy. (2019). Global Wind Atlas. https://globalwindatlas.info/
[3] ESA. Land Cover CCI Product User Guide Version 2. Tech. Rep. (2017). Available at: maps.elie.ucl.ac.be/CCI/viewer/download/ESACCI-LC-Ph2-PUGv2_2.0.pdf
"""
from collections import OrderedDict
from itertools import product
from json import dumps
import pandas as pd
import numpy as np
wf = WindWorkflowManager(placements)
wf.read(
variables=['elevated_wind_speed',
"surface_pressure",
"surface_air_temperature",
"boundary_layer_height"],
source_type="ERA5",
source=era5_path,
set_time_index=True,
verbose=False)
wf.adjust_variable_to_long_run_average(
variable='elevated_wind_speed',
source_long_run_average=rk_weather.Era5Source.LONG_RUN_AVERAGE_WINDSPEED,
real_long_run_average=gwa_100m_path
)
wf.estimate_roughness_from_land_cover(
path=esa_cci_path,
source_type="cci")
wf.logarithmic_projection_of_wind_speeds_to_hub_height(
consider_boundary_layer_height=True)
wf.apply_air_density_correction_to_wind_speeds()
power_curves = wf.powerCurveLibrary.copy()
wind_speeds = wf.sim_data['elevated_wind_speed'].copy()
outputs = {}
for (convolution_scaling_factor,
convolution_base_factor) \
in product(convolution_scaling_factors,
convolution_base_factors):
# Reset power curves
wf.powerCurveLibrary = power_curves.copy()
# Apply power curve convolution
try:
if not (convolution_scaling_factor == 0 and convolution_base_factor == 0):
wf.convolute_power_curves(
scaling=convolution_scaling_factor,
base=convolution_base_factor,
)
except Exception as e:
if isinstance(e, KeyboardInterrupt):
raise e
#output = np.full(wf.sim_data['elevated_wind_speed'].shape[0], np.nan)
print(" Failed :(")
continue
for (loss_sharpness_factor,
loss_base_factor,
wind_speed_offset,
wind_speed_scaling) \
in product(loss_sharpness_factors,
loss_base_factors,
wind_speed_offsets,
wind_speed_scalings):
print(convolution_scaling_factor,
convolution_base_factor,
loss_sharpness_factor,
loss_base_factor,
wind_speed_offset,
wind_speed_scaling)
name = dumps({
'convolution_scaling_factor': convolution_scaling_factor,
'convolution_base_factor': convolution_base_factor,
'loss_sharpness_factor': loss_sharpness_factor,
'loss_base_factor': loss_base_factor,
'wind_speed_offset':wind_speed_offset,
'wind_speed_scaling':wind_speed_scaling
})
try:
# Adjust wind speeds
wf.sim_data['elevated_wind_speed'] = np.maximum(
wind_speeds*wind_speed_scaling - wind_speed_offset,
0
)
wf.simulate()
wf.apply_loss_factor(
loss=lambda x: rk_util.low_generation_loss(
x,
base=loss_base_factor,
sharpness=loss_sharpness_factor)
)
output = wf.sim_data['capacity_factor'].mean(axis=1)
except Exception as e:
if isinstance(e, KeyboardInterrupt):
raise e
#output = np.full(wf.sim_data['elevated_wind_speed'].shape[0], np.nan)
print(" Failed :(")
continue
outputs[name] = output
return pd.DataFrame(outputs, index=wf.time_index)
def mean_capacity_factor_from_sectoral_weibull(placements, a_rasters, k_rasters, f_rasters, output=None):
pass
| StarcoderdataPython |
9370 | <reponame>opencv/openvino_training_extensions<filename>external/model-preparation-algorithm/tests/conftest.py
# Copyright (C) 2022 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#
try:
import e2e.fixtures
from e2e.conftest_utils import * # noqa
from e2e.conftest_utils import pytest_addoption as _e2e_pytest_addoption # noqa
from e2e import config # noqa
from e2e.utils import get_plugins_from_packages
pytest_plugins = get_plugins_from_packages([e2e])
except ImportError:
_e2e_pytest_addoption = None
pass
import config
import pytest
from ote_sdk.test_suite.pytest_insertions import *
from ote_sdk.test_suite.training_tests_common import REALLIFE_USECASE_CONSTANT
pytest_plugins = get_pytest_plugins_from_ote()
ote_conftest_insertion(default_repository_name='ote/training_extensions/external/model-preparation-algorithm')
@pytest.fixture
def ote_test_domain_fx():
return 'model-preparation-algorithm'
@pytest.fixture
def ote_test_scenario_fx(current_test_parameters_fx):
assert isinstance(current_test_parameters_fx, dict)
if current_test_parameters_fx.get('usecase') == REALLIFE_USECASE_CONSTANT:
return 'performance'
else:
return 'integration'
@pytest.fixture(scope='session')
def ote_templates_root_dir_fx():
import os.path as osp
import logging
logger = logging.getLogger(__name__)
root = osp.dirname(osp.dirname(osp.realpath(__file__)))
root = f'{root}/configs/'
logger.debug(f'overloaded ote_templates_root_dir_fx: return {root}')
return root
@pytest.fixture(scope='session')
def ote_reference_root_dir_fx():
import os.path as osp
import logging
logger = logging.getLogger(__name__)
root = osp.dirname(osp.dirname(osp.realpath(__file__)))
root = f'{root}/tests/reference/'
logger.debug(f'overloaded ote_reference_root_dir_fx: return {root}')
return root
# pytest magic
def pytest_generate_tests(metafunc):
ote_pytest_generate_tests_insertion(metafunc)
def pytest_addoption(parser):
ote_pytest_addoption_insertion(parser)
| StarcoderdataPython |
3399716 | <filename>search.py<gh_stars>10-100
"""
This file contains an example search engine that will search the inverted index that we build as part of our assignments in units 3 and 5.
"""
import sys,os,re
import math
import sqlite3
import time
# use simple dictionary data structures in Python to maintain lists with hash keys
docs = {}
resultslist = {}
term = {}
# regular expression or: extract words, extract ID rom path, check or hexa value
chars = re.compile(r'\W+')
pattid= re.compile(r'(\d{3})/(\d{3})/(\d{3})')
#
# Docs class: Used to store information about each unit document. In this is the Term object which stores each
# unique instance of termid or a docid.
#
class Docs():
terms = {}
#
# Term class: used to store information or each unique termid
#
class Term():
docfreq = 0
termfreq = 0
idf = 0.0
tfidf = 0.0
# split on any chars
def splitchars(line) :
return chars.split(line)
# this small routine is used to accumulate query idf values
def elenQ(elen, a):
return(float(math.pow(a.idf ,2))+ float(elen))
# this small routine is used to accumulate document tfidf values
def elenD(elen, a):
return(float(math.pow(a.tfidf ,2))+ float(elen))
"""
================================================================================================
>>> main
This section is the 'main' or starting point o the indexer program. The python interpreter will find this 'main' routine and execute it first.
================================================================================================
"""
if __name__ == '__main__':
#
# Create a sqlite database to hold the inverted index. The isolation_level statement turns
# on autocommit which means that changes made in the database are committed automatically
#
con = sqlite3.connect("/Data/GoogleDrive/InformationRetrival/indexer_part2")
con.isolation_level = None
cur = con.cursor()
#
#
#
line = input('Enter the search terms, each separated by a space: ')
#
# Capture the start time of the search so that we can determine the total running
# time required to process the search
#
t2 = time.localtime()
print('Processing Start Time: %.2d:%.2d' % (t2.tm_hour, t2.tm_min))
#
# This routine splits the contents of the line into tokens
l = splitchars(line)
#
# Get the total number of documents in the collection
#
q = "select count(*) from documentdictionary"
cur.execute(q)
row = cur.fetchone()
documents = row[0]
# Initialize maxterms variable. This will be used to determine the maximum number of search
# terms that exists in any one document.
#
maxterms = float(0)
# process the tokens (search terms) entered by the user
for elmt in l:
# This statement removes the newline character if found
elmt = elmt.replace('\n','')
# This statement converts all letters to lower case
lowerElmt = elmt.lower().strip()
#
# Execute query to determine if the term exists in the dictionary
#
q = "select count(*) from termdictionary where term = '%s'" % (lowerElmt)
cur.execute(q)
row = cur.fetchone()
#
# If the term exists in the dictionary retrieve all documents for the term and store in a list
#
if row[0] > 0:
q = "select distinct docid, tfidf, docfreq, termfreq, posting.termid from termdictionary,posting where posting.termid = termdictionary.termid and term = '%s' order by docid, posting.termid" % (lowerElmt)
cur.execute(q)
for row in cur:
i_termid = row[4]
i_docid = row[0]
if not ( i_docid in docs.keys()):
docs[i_docid] = Docs()
docs[i_docid].terms = {}
if not ( i_termid in docs[i_docid].terms.keys()):
docs[i_docid].terms[i_termid] = Term()
docs[i_docid].terms[i_termid].docfreq = row[2]
docs[i_docid].terms[i_termid].termfreq = row[3]
docs[i_docid].terms[i_termid].idf = 0.0
docs[i_docid].terms[i_termid].tfidf = 0.0
#
# Calculate tfidf values or both the query and each document
# Using the tfidf (or weight) value, accumulate the vectors and calculate
# the cosine similarity between the query and each document
#
#
# Calculate the denominator which is the euclidean length of the query
# multiplied by the euclidean length of the document
#
#
# This search engine will match on any number of terms and the cosine similarity of a
# document matches on 1 term that appears in a document in the collection tends to score highly
# the float(no_terms/maxtersm) portion of the equation is designed to give a higher weight
# to documents that match on more than 1 term in queries that have multiple terms.
# The remainder of the equation calculates the cosine similarity
#
#
# Sort the results found in order of decreasing cosine similarity. Because we cannot use a float
# value as an index to a list, I multiplied the cosine similarity value by 10,000 and converted
# to an integer. For example i the cosine similarity was calculated to be .98970 multiplying
# it by 10,000 would produce 9897.0 and converting to an integer would result in 9897 which can be
# used as an index that we can then sort in reverse order. To display the cosine similarity
# correctly in the results list we simply convert it back to a float value and divide by 10,000
#
keylist = resultslist.keys()
# sort in descending order
keylist.sort(reverse=True)
i = 0
for key in keylist:
i += 1
if i > 20:
continue
q = "select DocumentName from documentdictionary where docid = %d" % (resultslist[key])
cur.execute(q)
row = cur.fetchone()
print('Document: %s Has Relevance o %f' % (row[0], float(key)/10000))
con.close()
#
# Print ending time to show the processing duration of the query.
#
t2 = time.localtime()
print('End Time: %.2d:%.2d:%.2d' % (t2.tm_hour, t2.tm_min, t2.tm_sec))
| StarcoderdataPython |
112884 | <reponame>Anari-AI/pygears-vivado<filename>tests/ipgen/test_add.py
from pygears.lib import add
from pygears_vivado.test_utils import ipgen_test
from pygears import Intf
from pygears.typing import Tuple, Uint
@ipgen_test(top='/add', intf={'din': 'axi', 'dout': 'axi'})
def test_basic(tmpdir):
add(Intf(Tuple[Uint[16], Uint[16]]))
@ipgen_test(
top='/add',
intf={
's_axi': {
'type': 'axi',
'wdata': 'din',
'rdata': 'dout'
}
})
def test_combined(tmpdir):
add(Intf(Tuple[Uint[16], Uint[16]]))
| StarcoderdataPython |
1648970 | #!/usr/bin/env python
# Copyright 2020 <NAME>, <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class guivar:
splitpdbs = ['%(prog)s\n'
f' -i, --id <string> <string> \n '
f' Project ID for your analyses. One per trajectory.\n\n'
f' --pdbs [<traj.pdb>] [<traj.pdb>] \n'
f' Trajectory ID of your complex(s)\033[0m \n\n\n\n'
f'\n\033[1mUsage example:\033[0m\n\n'
" minnie splitpdbs -i sox4 sox18 --pdbs sox4.pdb sox18.pdb \n"
" minnie splitpdbs -i sox4 --pdbs sox4.pdb \n"]
findbonds = ['%(prog)s\n'
f' -i, --id <string> \n '
f' Project ID for your analysis.\n\n'
f' -f, --pdbfile [<.pdb>] (singleframe.pdb) \n'
f' Input PDB file in PDB format. \n\n'
f' -d, --folder [<path>] \n'
f' Input directory with PDB files.\n\n'
f' --itypes [<hbonds>/<ionic>/<hydrophobic>/<ring_stacking>/<all>] (hbonds)'
f'\n Calculates which types of interactions \n\n'
f' --intra [<"True">/<"False">] ("False") \n'
f' Include intra-monomer interactions. \033[0m \n\n\n\n'
f' --clean\n'
f' Remove intermediate files upon completion. \033[0m \n'
f'\n\n\n'
f'\n\033[1mUsage example:\033[0m\n\n'
" Single frame - minnie findbonds -i sox4 -f 'sox4/01_frames/md_0.pdb' --itypes all \n"
" Multiple frames - minnie findbonds -i sox4 -d 'sox4/01_frames/' --clean \n"
" Multiple frames - minnie findbonds -i sox4 -d 'sox4/01_frames/' --intra \n"]
timefilter = ['%(prog)s\n'
f' -i, --id <string> <string> \n '
f' sProject ID of your complex\n\n'
f' -f, --files [<.csv>] \n'
f' Files of your complex\n\n'
f' --per <float> \n'
f' Observation frequency to classify an interaction as critical\033[0m \n\n\n\n'
f'\n\033[1mUsage example:\033[0m\n\n'
" Single file - minnie timefilter -f sox4/02_interfacea_results/hbonds/sox4_merged_hbonds.csv -i sox4 --per 25 \n"
" Multiple files - minnie timefilter -f sox4/02_interfacea_results/*/sox4_merged_*.csv -i sox4 --per 25 \n"]
comparecx = ['%(prog)s\n'
f' -i, --id <string> <string> \n '
f' sProject ID of your complex\n\n'
f' --per <float> \n'
f' Observation frequency to classify an interaction as critical\033[0m \n\n\n\n'
f'\n\033[1mUsage example:\033[0m\n\n'
" minnie comparecx -i sox4 sox18 --per 25 \n"]
graph = ['%(prog)s\n'
f' -i, --id <string> <string> \n'
f' Project IDs of your complex(s)\n\n'
f' --per <float> \n'
f' Observation frequency to classify an interaction as critical \n\n'
f' -b, --between [<protein-dna>/<all>] \n'
f' Between protein-dna or keep all \n\n'
f' -c, --chainIDs <string> <string> \n'
f' Give ChainIDs to proceed\n\n'
f' --filename <string> \n'
f' Give a name to output file (optional)\n\n'
f' --colors [<hex colors>] ("#D9B4CC", "#6F81A6") \n'
f' Color IDs of the complexes (optional)\n\n'
f' --itypes [<hbonds>/<ionic>/<hydrophobic>/<ring_stacking>/<all>] (hbonds) \n'
f' Calculates which types of interactions \n\n'
f' -s [<specific>/<common>] (specific) \n'
f' Complex-specific or common interactions\033[0m \n\n\n\n'
f'Please do not use "--between" and "--chainIDs" options at the same time\n\n'
#"\n\033[1m Usage example: \033[0m\n\n"
"\nUsage example: \n\n"
" minnie graph -i 'sox4' 'sox18' --per 25 --itypes hbonds -s specific -c A+B C --colors '#D9B4CC' '#6F81A6' \n"
" minnie graph -i 'sox4' 'sox18' --per 25 --itypes ionic -c A+B C \n"
" minnie graph -i 'sox4' 'sox18' --per 25 --itypes ionic -b protein-dna \n"
" minnie graph -i 'sox4' 'sox18' --per 25 --itypes ionic -b protein-dna --filename sox4_sox18_protein_dna \n"]
| StarcoderdataPython |
1712079 | #!/usr/bin/python3
from secret import *
import sys
import requests
if(len(sys.argv) != 2):
print("error")
quit()
files = {'file': open(sys.argv[1], 'rb')}
param = {'token':token, 'channels':channel}
res = requests.post(url="https://slack.com/api/files.upload",params=param, files=files)
| StarcoderdataPython |
3249607 | import PyPDF2
pdfReader = PyPDF2.PdfFileReader(open('encrypted.pdf', 'rb'))
print(pdfReader.isEncrypted)
pdfReader.decrypt('rosebud')
page = pdfReader.getPage(0)
print(page)
| StarcoderdataPython |
1733399 | <reponame>Leofltt/rg_sound_generation<gh_stars>0
from typing import Dict
# "base_dir": "D:\soundofai\\pitch_shifted_all",
def get_config() -> Dict:
conf = {
"base_dir": "D:\soundofai\\pitch_shifted_all",
"csv_file_path": "D:\soundofai\\annot_data\\data\\may_13.csv",
"preprocess_dir": "tmp",
"audio_duration": 4,
"clip_at": -30,
"epsilon": 1e-5,
"clip_audio_at": 2,
"sample_rate": 16000,
"num_classes": 3,
"n_fft": 2048,
"hop_len": 512,
"n_mels": 128,
"scale_factor": 1.0,
"learning_rate": 2e-4,
"threshold": 18,
"all_features": [
'bright_vs_dark', 'full_vs_hollow', 'smooth_vs_rough',
'warm_vs_metallic', 'clear_vs_muddy', 'thin_vs_thick',
'pure_vs_noisy', 'rich_vs_sparse', 'soft_vs_hard'
],
"features": ["bright_vs_dark"],
"model_name": "bright_vs_dark",
"valid_split": 0.3,
"dry_run": False,
"reset_data": False,
"pitch_shifted": True
}
conf = dict(conf)
audio_duration_samples = (conf.get("audio_duration") - conf.get("clip_audio_at")) * conf.get("sample_rate")
conf["time_steps"] = 1 + audio_duration_samples // conf.get("hop_len")
conf["num_conv_blocks"] = 3
return conf
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.