id stringlengths 2 8 | text stringlengths 16 264k | dataset_id stringclasses 1 value |
|---|---|---|
9648003 | import logging
import signal
import socket
import threading
import traceback
import typing
from io import StringIO, BytesIO
from typing import Type, Optional, Union
from wsproto import ConnectionType, WSConnection
from wsproto.events import Ping, Request, AcceptConnection, CloseConnection, TextMessage, \
BytesMessage, RejectData
from webmesh.message_protocols import AbstractMessageProtocol
from webmesh.message_serializers import AbstractMessageSerializer
from webmesh.websocket.websocket_connection import WebSocketConnection
if typing.TYPE_CHECKING:
from webmesh.websocket.websocket_server import AbstractWebSocketHandler
class WebSocketClientProcess:
def __init__(self, handler: 'AbstractWebSocketHandler', connection: WebSocketConnection,
read_buffer_size: int,
serializer_type: Type[AbstractMessageSerializer],
protocol_type: Type[AbstractMessageProtocol]):
self.connection: WebSocketConnection = connection
self.read_buffer_size = read_buffer_size
self.logger = None
self.stop_event = None
self.serializer_type = serializer_type
self.protocol_type = protocol_type
self.handler = handler
self.serializer: Optional[AbstractMessageSerializer] = None
self.protocol: Optional[AbstractMessageProtocol] = None
def close(self, sig=None, term=None):
self.stop_event.set()
def listen(self):
self.logger = logging.getLogger(f'websocket.{self.connection.id}')
self.stop_event = threading.Event()
self.serializer = self.serializer_type()
self.protocol = self.protocol_type()
with self.connection.context() as ws:
ws.connection = WSConnection(ConnectionType.SERVER)
ws.logger = self.logger
txt_buffer = StringIO()
byt_buffer = BytesIO()
self.on_connect(ws)
data = True
while data and not self.stop_event.is_set():
data = _handle_proto(ws, self.read_buffer_size, txt_buffer, byt_buffer)
if isinstance(data, str):
self.on_text_message(ws, data)
self.on_disconnect(ws)
return ws.id
def on_connect(self, connection: WebSocketConnection):
self.logger.info('Client connected')
self.handler.on_connect(connection)
def on_text_message(self, connection: WebSocketConnection, data: str):
deserialized = self.serializer.deserialize(data)
unpacked = self.protocol.unpack(deserialized)
self.logger.debug(f'Received data on path {unpacked[0]}: {unpacked[1]}')
response = self.handler.on_message(connection, *unpacked)
if response is not None:
packed = self.protocol.pack(unpacked[0], response)
serialized = self.serializer.serialize(packed)
connection.send(TextMessage(data=serialized))
def on_disconnect(self, connection: WebSocketConnection):
self.handler.on_disconnect(connection)
self.logger.info('Client disconnected')
def _handle_proto(ws,
read_buffer_size: int,
text_buffer: StringIO,
bytes_buffer: BytesIO) -> Union[bool, str]:
try:
ws.recv(read_buffer_size)
for event in ws.events():
if isinstance(event, Ping):
print('ping!')
ws.send(event.response())
elif isinstance(event, Request):
ws.send(AcceptConnection())
elif isinstance(event, CloseConnection):
ws.send(event.response())
return False
elif isinstance(event, TextMessage):
text_buffer.write(event.data)
if event.message_finished:
return text_buffer.getvalue()
elif isinstance(event, BytesMessage):
bytes_buffer.write(event.data)
if event.message_finished:
ws.send(RejectData(bytes_buffer.getvalue()))
except socket.timeout:
pass
except Exception:
traceback.print_exc()
return False
return True
| StarcoderdataPython |
207600 | <filename>maze_solver/__main__.py
from utils import (
get_input_data,
solve_maze_from_file,
solve_test_mazes,
display_help,
)
def main() -> None:
"""Entry point of the script"""
mode, value = get_input_data()
if mode == 'file':
solve_maze_from_file(value)
elif mode == 'test':
solve_test_mazes()
else:
display_help()
if __name__ == '__main__':
main()
| StarcoderdataPython |
5107539 | <filename>src/wagtail_2fa/__init__.py
default_app_config = "wagtail_2fa.apps.Wagtail2faConfig"
__version__ = "1.5.0"
| StarcoderdataPython |
11343852 | <gh_stars>0
from django.contrib import admin
from musics.models import Music
class MusicAdmin(admin.ModelAdmin):
pass
admin.site.register(Music, MusicAdmin)
| StarcoderdataPython |
5198957 | <filename>apps/deadline/management.py
from django.conf import settings
from django.db.models import signals
from django.utils.translation import ugettext_noop as _
if "notification" in settings.INSTALLED_APPS:
from notification import models as notification
def create_notice_types(app, created_models, verbosity, **kwargs):
notification.create_notice_type("deadlines_notification",
_("Important dates and deadlines"),
_("administrators informed regarding important dates and deadlines"),
default=2)
signals.post_syncdb.connect(create_notice_types, sender=notification)
else:
print "Skipping creation of NoticeTypes as notification app not found"
| StarcoderdataPython |
6607369 | #!/usr/bin/python
import argparse
from passlib.context import CryptContext
parser = argparse.ArgumentParser()
parser.add_argument('--password', type=str, required=True)
args = parser.parse_args()
hasher = CryptContext(schemes=['bcrypt'])
print(hasher.hash(args.password))
| StarcoderdataPython |
6647246 | <filename>zulip_bots/zulip_bots/bots/codery/codery.py<gh_stars>1-10
import sys
import os
sys.path.insert(0, os.getcwd())
import requests
import calculator
import todo
import dictionary
import news
import geekjokes
import courses
import jobs
import leaderboard
import trendingproblems
from bs4 import BeautifulSoup
from typing import Dict
from typing import Any
def get_codery_result(codery_keywords: str) -> str:
help_message = "*Help for Codery* : \n\n" \
"The bot responds to messages starting with @Codery.\n\n" \
"`@Codery contests` will return top Contests today, their dates, time left and the links to each contest.\n" \
"`@Codery top contest` also returns the top Contest result.\n" \
"`@Codery trending` returns the top trending ploblems across all programming platforms.\n" \
"`@Codery dictionary <search term>` returns the meaning of that word in an instant.\n" \
"`@Codery jokes` keeps your morale boosted with programming jokes.\n" \
"`@Codery jobs <searchword>` returns the top jobs for that search word.\n" \
"`@Codery news <keyword>` returns the news for that key word.\n" \
"`@Codery man <function>` returns the user manual of that function.\n" \
"`@Codery top <n> contests` will return n number of top contests at that time.\n \n" \
"Example:\n" \
" * @Codery contests\n" \
" * @Codery top contest\n" \
" * @Codery jokes\n" \
" * @Codery top 7 contests\n" \
" * @Codery dictionary computer\n" \
" * @Codery search code\n" \
" * @Codery jobs pyhton\n" \
" * @Codery jobs java\n" \
" * @Codery trending\n" \
" * @Codery man execvp\n" \
" * @Codery news corona"
codery_keywords = codery_keywords.strip()
codery_keywords_list = codery_keywords.split(" ")
if codery_keywords == 'help':
return help_message
elif codery_keywords_list[0] == "todo":
return todo.get_todo_response(codery_keywords, CoderyHandler)
elif codery_keywords_list[0] == "jobs":
return jobs.get_jobs(codery_keywords, CoderyHandler)
elif codery_keywords_list[0] == "leaderboard":
return leaderboard.get_leaderboard()
elif codery_keywords_list[0] == "trending":
return trendingproblems.get_problems()
elif codery_keywords_list[0] == "search" or codery_keywords_list[0] == "dictionary":
return dictionary.get_dictionary_response(codery_keywords, CoderyHandler)
elif codery_keywords_list[0] == "courses" or codery_keywords_list[0] == "course":
return courses.get_courses(codery_keywords, CoderyHandler)
elif codery_keywords_list[0] == "jokes" or codery_keywords_list[0] == "joke":
return geekjokes.get_joke(codery_keywords, CoderyHandler)
elif codery_keywords_list[0] == "calculator":
return "The answer is"+calculator.get_calculator_response(codery_keywords, CoderyHandler)
elif codery_keywords_list[0] == "news":
return news.get_news_response(codery_keywords, CoderyHandler)
elif codery_keywords == 'contests':
URL = 'https://www.stopstalk.com/contests'
content = requests.get(URL)
soup = BeautifulSoup(content.text, 'html.parser')
contentTable = soup.find('table', {"class": "centered bordered"}) # Use dictionary to pass key : value pair
rows = contentTable.find_all('tr')
lo = []
i = 0
for row in rows[1:]:
lo.append("##")
columns = row.find_all('td')
for column in columns:
if column.get_text() != "":
lo.append((column.get_text()).strip() + "@@")
lo.append((columns[4].find('a')['href']).strip())
i += 1
l1 = "The top contests and hackathons of today are \n"
for r in lo:
allContest = r.split("##")
for eachContest in allContest:
attrList = eachContest.split("@@")
for attr in attrList:
l1 += attr+"\n"
return l1
# return a list of top contests
elif codery_keywords == 'top contest':
URL = 'https://www.stopstalk.com/contests'
content = requests.get(URL)
soup = BeautifulSoup(content.text, 'html.parser')
contentTable = soup.find('table', {"class": "centered bordered"}) # Use dictionary to pass key : value pair
rows = contentTable.find_all('tr')
lo = []
i = 0
for row in rows[1:]:
lo.append("##")
columns = row.find_all('td')
for column in columns:
if column.get_text() != "":
lo.append((column.get_text()).strip() + "@@")
lo.append((columns[4].find('a')['href']).strip())
i += 1
if i == 1:
break
l1 = ""
for r in lo:
allContest = r.split("##")
for eachContest in allContest:
attrList = eachContest.split("@@")
for attr in attrList:
l1 += attr+"\n"
return l1
# to return a list of n top contests
elif len(codery_keywords_list) == 3:
if codery_keywords_list[0] == "top" and codery_keywords_list[2] == "contests":
n = int(codery_keywords_list[1])
else:
help_message
URL = 'https://www.stopstalk.com/contests'
content = requests.get(URL)
soup = BeautifulSoup(content.text, 'html.parser')
contentTable = soup.find('table', {"class": "centered bordered"}) # Use dictionary to pass key : value pair
rows = contentTable.find_all('tr')
lo = []
i = 0
for row in rows[1:]:
lo.append("##")
columns = row.find_all('td')
for column in columns:
if column.get_text() != "":
lo.append((column.get_text()).strip() + "@@")
lo.append((columns[4].find('a')['href']).strip())
i += 1
if i == n:
break
l1 = ""
for r in lo:
allContest = r.split("##")
for eachContest in allContest:
attrList = eachContest.split("@@")
for attr in attrList:
l1 += attr+"\n"
return l1
elif codery_keywords == '' or codery_keywords is None:
return help_message
class CoderyHandler(object):
def handle_message(self, message: Dict[str, str], bot_handler: Any) -> None:
original_content = message['content']
result = get_codery_result(original_content)
bot_handler.send_reply(message, result)
handler_class = CoderyHandler
| StarcoderdataPython |
3254318 | <reponame>LucLapenta/is-it-raining-site
# users/urls.py
from django.urls import path
from .views import SignUpView, AlertListView, CreateAlertView, UpdateAlertView
from .models import Alert
from . import views
urlpatterns = [
path('signup/', SignUpView.as_view(), name='signup'),
path('profile/', views.view_profile, name='profile'),
path('alerts/', AlertListView.as_view(model=Alert), name='alerts'),
path('alert/create/', CreateAlertView.as_view(model=Alert), name='alert-create'),
path(r'^alert/update/(?P<alert_pk>\d+)/', UpdateAlertView.as_view(), name='edit_alert'),
path('profile/edit/', views.edit_profile, name='edit_profile')
] | StarcoderdataPython |
133577 | <gh_stars>0
# coding=utf-8
from matplotlib import pyplot as plt
from matplotlib import font_manager
interval = [0,5,10,15,20,25,30,35,40,45,60,90]
width = [5,5,5,5,5,5,5,5,5,15,30,60]
quantity = [836,2737,3723,3926,3596,1438,3273,642,824,613,215,47]
print(len(interval),len(width),len(quantity))
#设置图形大小
plt.figure(figsize=(20,8),dpi=80)
plt.bar(interval,quantity,width=width)
#设置x轴的刻度
temp_d = [5]+ width[:-1]
_x = [i-temp_d[interval.index(i)]*0.5 for i in interval]
clear
plt.xticks(_x,interval)
plt.grid(alpha=0.4)
plt.show()
| StarcoderdataPython |
1624687 | from click.testing import CliRunner
from mock import Mock, patch
from sigopt.cli import cli
class TestClusterCreateCli(object):
def test_cluster_create(self):
services = Mock()
runner = CliRunner()
with \
runner.isolated_filesystem(), \
patch('sigopt.orchestrate.controller.OrchestrateServiceBag', return_value=services):
open("cluster.yml", "w").close()
result = runner.invoke(cli, ["cluster", "create"])
assert result.exit_code == 0
| StarcoderdataPython |
134904 | <gh_stars>0
"""
pygame-menu
https://github.com/ppizarror/pygame-menu
WIDGET
This module contains the widgets of pygame-menu.
License:
-------------------------------------------------------------------------------
The MIT License (MIT)
Copyright 2017-2021 <NAME>. @ppizarror
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the Software
is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-------------------------------------------------------------------------------
"""
from pygame_menu.widgets.widget.button import Button
from pygame_menu.widgets.widget.colorinput import ColorInput
from pygame_menu.widgets.widget.dropselect import DropSelect
from pygame_menu.widgets.widget.dropselect_multiple import DropSelectMultiple
from pygame_menu.widgets.widget.frame import Frame
from pygame_menu.widgets.widget.hmargin import HMargin
from pygame_menu.widgets.widget.image import Image
from pygame_menu.widgets.widget.label import Label
from pygame_menu.widgets.widget.menubar import MenuBar
from pygame_menu.widgets.widget.menulink import MenuLink
from pygame_menu.widgets.widget.none import NoneWidget
from pygame_menu.widgets.widget.scrollbar import ScrollBar
from pygame_menu.widgets.widget.selector import Selector
from pygame_menu.widgets.widget.surface import SurfaceWidget
from pygame_menu.widgets.widget.table import Table
from pygame_menu.widgets.widget.textinput import TextInput
from pygame_menu.widgets.widget.toggleswitch import ToggleSwitch
from pygame_menu.widgets.widget.vmargin import VMargin
| StarcoderdataPython |
3522107 | from django.apps import AppConfig
class DeliveryOptionsConfig(AppConfig):
name = 'delivery_options'
| StarcoderdataPython |
1891896 | import numpy as np
from PIL import Image
import glob
import time
import sys
output_dir = '/hpctmp2/e0046667/'
data = []
total_amount_data = 0
first_simu = int(sys.argv[1])
last_simu = int(sys.argv[2])
for k in range(first_simu, last_simu + 1):
simulation_path = output_dir + "output"+str(k)
action_dirs = glob.glob(simulation_path + '/dataset/*')
action_dirs.sort()
nb_actions = len(action_dirs)
# Compute the number of frame
nb_frames = 0
for action_dir in action_dirs:
nb_frames += len(glob.glob(action_dir + '/*'))
print(nb_frames, "frames")
total_amount_data += nb_frames
images = np.zeros((nb_frames, 84, 84, 4), dtype='b')
image_index_to_action_index = np.zeros(nb_frames, dtype='b')
nb_frames = np.array([nb_frames])
i = 0
for action_dir in action_dirs:
print(i)
image_paths = glob.glob(action_dir + '/*')
image_paths.sort()
for image_path in image_paths:
# Prepare the image
image = Image.open(image_path).convert('L')
image = image.crop((0, 34, 160, 194))
image = image.resize((84, 84))
image = np.array(image, dtype='b')
# Save the image in 4 buffer
frame_number = int(image_path.split('/')[-1][:-4])
images[frame_number - 1, :, :, 0] = image
if frame_number < nb_frames:
images[frame_number, :, :, 1] = image
if frame_number + 1 < nb_frames:
images[frame_number + 1, :, :, 2] = image
if frame_number + 2 < nb_frames:
images[frame_number + 2, :, :, 3] = image
# and save the action
image_index_to_action_index[frame_number - 1] = i
i += 1
print('save')
np.savez(output_dir + 'CNN/data' + str(k), images=images,
image_index_to_action_index=image_index_to_action_index,
nb_frames=nb_frames)
print("total :",total_amount_data)
| StarcoderdataPython |
3232575 | import sys
import pandas as pd
# From Assignment 2, copied manually here just to remind you
# that you can copy stuff manually if importing isn't working out.
# You can just use this or you can replace it with your function.
def countTokens(text):
token_counts = {}
tokens = text.split(' ')
for word in tokens:
if not word in token_counts:
token_counts[word] = 0
token_counts[word] += 1
return token_counts
def largest_counts(data): # TODO: Finish implementing this function
# TODO: Cut up the rows in the dataset according to how you stored things.
# The below assumes test data is stored first and negative is stored before positive.
# If you did the same, no change is required.
neg_test_data = data[:12500]
neg_train_data = data[25000:37500]
pos_test_data = data[12500:25000]
pos_train_data = data[37500:50000]
# TODO: SORT the count dicts which countTokens() returns
# by value (count) in reverse (descending) order.
# It is your task to Google and learn how to do this, but we will help of course,
# if you come to use with questions. This can be daunting at first, but give it time.
# Spend some (reasonable) time across a few days if necessary, and you will do it!
# As is, the counts returned by the counter AREN'T sorted!
# So you won't be able to easily retrieve the most frequent words.
# NB: str.cat() turns whole column into one text
train_counts_pos_original = countTokens(pos_train_data["review"].str.cat())
train_counts_pos_cleaned = countTokens(
pos_train_data["cleaned_review"].str.cat())
train_counts_pos_lowercased = countTokens(
pos_train_data["lowercased"].str.cat())
train_counts_pos_no_stop = countTokens(
pos_train_data["no stopwords"].str.cat())
train_counts_pos_lemmatized = countTokens(
pos_train_data["lemmatized"].str.cat())
# Once the dicts are sorted, output the first 20 rows for each.
# This is already done below, but changes may be needed depending on what you did to sort the dicts.
# The [:19] "slicing" syntax expects a list. If you sorting call return a list (which is likely, as being sorted
# is conceptualy a properly of LISTS, NOT dicts),
# you may want to remove the additional list(dict_name.items()) conversion.
with open('counts.txt', 'w') as f:
f.write('Original POS reviews:\n')
for k, v in list(train_counts_pos_original.items())[:20]:
f.write('{}\t{}\n'.format(k, v))
f.write('Cleaned POS reviews:\n')
for k, v in list(train_counts_pos_cleaned.items())[:20]:
f.write('{}\t{}\n'.format(k, v))
f.write('Lowercased POS reviews:\n')
for k, v in list(train_counts_pos_lowercased.items())[:20]:
f.write('{}\t{}\n'.format(k, v))
f.write('No stopwords POS reviews:\n')
for k, v in list(train_counts_pos_no_stop.items())[:20]:
f.write('{}\t{}\n'.format(k, v))
f.write('Lemmatized POS reviews:\n')
for k, v in list(train_counts_pos_lemmatized.items())[:20]:
f.write('{}\t{}\n'.format(k, v))
# TODO: Do the same for all the remaining training dicts, per Assignment spec.
# TODO: Copy the output of the above print statements
# into your document/report, or otherwise create a table/visualization for these counts.
# Manually is fine, or you may explore bar charts in pandas! Be creative :).
def main(argv):
data = pd.read_csv(argv[1], index_col=[0])
# print(data.head()) # <- Verify the format. Comment this back out once done.
largest_counts(data)
if __name__ == "__main__":
main(sys.argv)
| StarcoderdataPython |
4821620 | from django.urls import path
from rest_framework_simplejwt.views import TokenObtainPairView, TokenRefreshView
from .views import ProfileRetrieveView
urlpatterns = [
path("token/", TokenObtainPairView.as_view(), name="token-obtain-pair"),
path("token/refresh/", TokenRefreshView.as_view(), name="token-refresh"),
path("profile/", ProfileRetrieveView.as_view(), name="profile")
]
| StarcoderdataPython |
8010885 | from django.conf.urls import url
from ...bruv.views.benthic_category import BenthicCategoryView
urlpatterns = [
url(r"habitat/substrate/$", BenthicCategoryView.as_view(), name="ajax_substrate"),
]
| StarcoderdataPython |
6436312 | from django.contrib import admin
from .models import CustomUser
class CustomUserAdmin(admin.ModelAdmin):
list_display = ('id', 'email', 'username', 'avatar', 'age', 'is_staff',)
search_fields = ('id', 'username',)
ordering = ('id',)
admin.site.register(CustomUser, CustomUserAdmin)
| StarcoderdataPython |
4821324 | <filename>ImageFinder/ImageFinder/ImageFinder.py
import cv2
import numpy
import os
import imghdr
import sys
AcceptableImages = ['jpeg','png','gif','bmp']
#Take in a image and look for images that are very similar or exactly the same as the original image
#Does this by naively just calculating the difference norm of the images and check if it is below a threshold
#Can navigate recursively through directories
#The images normalize the scale to 1080 width, because if the image has been resized, then we'd still want
#to see that one
ImageToFind = "I:\\Pictures\\S5\\20140614_161829.jpg"
LocationToLook = "I:\\Pictures\\S5\\cache"
Recurse = True
DifferenceValue = 0.02
NormalizedScale = 1080
if len(sys.argv)==3:
ImageToFind = sys.argv[1]
LocationToLook = sys.argv[2]
elif len(sys.argv)==4:
if sys.argv[1] == '-r':
Recurse = True
ImageToFind = sys.argv[2]
ImageToFind = ImageToFind.replace("\"","")
LocationToLook = sys.argv[3]
LocationToLook = LocationToLook.replace("\"","")
def GetScale(img, size):
(h,w) = img.shape[:2]
scaleW = size/w
img = cv2.resize(img,(int(w*scaleW),int(h*scaleW)))
return (img,int(w*scaleW),int(h*scaleW))
def FindImages(dir, recurse):
topDirs = os.listdir(dir)
print("Looking in: " + dir)
total = str(len(topDirs));
print("--" + str(len(topDirs))+ "-- files")
c = 0;
for fd in topDirs:
if os.path.isdir(dir+fd):
if recurse:
FindImages(dir+fd+"\\", recurse)
continue
else:
continue
imgPath = dir+"\\"+fd;
try:
type = imghdr.what(imgPath)
if type in AcceptableImages:
compare = cv2.imread(imgPath,0)
if compare is None:
print("Couldn't read: " + imgPath)
continue
(compare,cW,cH) = GetScale(compare,NormalizedScale)
if cW==oW and cH==oH:
normVal = cv2.norm(OrigImage,compare)/(cW*cH)
if normVal < DifferenceValue:
print("FOUND: " + imgPath)
TotalImageList.append((imgPath,str(normVal)))
print("\tDiff:" + str(normVal))
except:
print("Couldn't read a file")
c = c+1
if c%100==0:
print(str(c) + " out of " + total)
TotalImageList = []
OrigImage = cv2.imread(ImageToFind,0) #GreyScale
(OrigImage,oW,oH) = GetScale(OrigImage,NormalizedScale)
cv2.imshow("test",OrigImage)
cv2.waitKey()
topDirs = os.listdir(LocationToLook)
FindImages(LocationToLook, Recurse)
print("Image Locations:")
for foundimg in TotalImageList:
print(foundimg[0] + " " + foundimg[1])
| StarcoderdataPython |
6479379 | <filename>Meiju/spiders/Meijuspider.py<gh_stars>0
# -*- coding: utf-8 -*-
import scrapy
# -*- coding: utf-8 -*-
import scrapy
from lxml import etree
from Meiju.items import MeijuItem
# from .Meiju.pipelines import MeijuPipeline
class MeijuspiderSpider(scrapy.Spider):
name = 'Meijuspider'
allowed_domains = ['meijutt.com']
start_urls = ['http://meijutt.com/new100.html']
def parse(self, response):
# print(response.body)
content = etree.HTML(response.body.decode('GBK'))
movies = content.xpath('//ul[@class="top-list fn-clear"]/li')
for movie in movies:
# print(movie)
# 美剧名
a_list = movie.xpath('./h5/a')
a = a_list[0].text
# 美剧更新状态
stars = movie.xpath('.//span[@class="state1 new100state1"]/font')[0].text
# print(a,stars)
item = MeijuItem()
item['name'] = a
item['state'] = stars
print(a, '-----', stars)
# 使用yield返回数据
yield item
| StarcoderdataPython |
5127768 | import numpy as np
def softmax(predictions):
'''
Computes probabilities from scores
Arguments:
predictions, np array, shape is either (N) or (batch_size, N) -
classifier output
Returns:
probs, np array of the same shape as predictions -
probability for every class, 0..1
'''
# TODO implement softmax
# Your final implementation shouldn't have any loops
if predictions.ndim == 1:
predictions_normalized = predictions.copy() - predictions.max()
predictions_exp = np.exp(predictions_normalized)
exp_sum = predictions_exp.sum()
results = predictions_exp / exp_sum
else:
predictions_normalized = predictions.copy() - predictions.max(axis=1).reshape((-1, 1))
predictions_exp = np.exp(predictions_normalized)
exp_sum = predictions_exp.sum(axis=1)
results = predictions_exp / exp_sum.reshape((-1, 1))
return results
def l2_regularization(W, reg_strength):
'''
Computes L2 regularization loss on weights and its gradient
Arguments:
W, np array - weights
reg_strength - float value
Returns:
loss, single value - l2 regularization loss
gradient, np.array same shape as W - gradient of weight by l2 loss
'''
# TODO: Copy from previous assignment
loss = reg_strength * np.power(W, 2).sum()
grad = reg_strength * 2 * W
return loss, grad
def cross_entropy_loss(probs, target_index):
'''
Computes cross-entropy loss
Arguments:
probs, np array, shape is either (N) or (batch_size, N) -
probabilities for every class
target_index: np array of int, shape is (1) or (batch_size) -
index of the true class for given sample(s)
Returns:
loss: single value
'''
# TODO implement cross-entropy
# Your final implementation shouldn't have any loops
if probs.ndim == 1:
return -np.log(probs[target_index])
loss = 0.0
for i in range(probs.shape[0]):
loss -= np.log(probs[i][target_index[i]])
return loss
def softmax_with_cross_entropy(preds, target_index):
'''
Computes softmax and cross-entropy loss for model predictions,
including the gradient
Arguments:
predictions, np array, shape is either (N) or (batch_size, N) -
classifier output
target_index: np array of int, shape is (1) or (batch_size) -
index of the true class for given sample(s)
Returns:
loss, single value - cross-entropy loss
dprediction, np array same shape as predictions - gradient of predictions by loss value
'''
# TODO copy from the previous assignment
probs = softmax(preds)
loss = cross_entropy_loss(probs, target_index)
dprediction = probs.copy()
if preds.ndim == 1:
dprediction[target_index] -= 1
return loss, dprediction
else:
for ind, value in enumerate(target_index):
dprediction[ind, value] -= 1
return loss / probs.shape[0], dprediction / probs.shape[0]
class Param:
'''
Trainable parameter of the model
Captures both parameter value and the gradient
'''
def __init__(self, value):
self.value = value
self.grad = np.zeros_like(value)
class ReLULayer:
def __init__(self):
pass
def forward(self, X):
# TODO copy from the previous assignment
self.X = X
result = np.maximum(X, 0)
return result
def backward(self, d_out):
# TODO copy from the previous assignment
d_result = (self.X >= 0) * 1
d_result = np.multiply(d_out, d_result)
return d_result
def params(self):
return {}
class FullyConnectedLayer:
def __init__(self, n_input, n_output):
self.W = Param(0.001 * np.random.randn(n_input, n_output))
self.B = Param(0.001 * np.random.randn(1, n_output))
self.X = None
def forward(self, X):
# TODO copy from the previous assignment
self.X = Param(X)
result = X.dot(self.W.value) + self.B.value
return result
def backward(self, d_out):
# TODO copy from the previous assignment
dW = self.X.value.T.dot(d_out)
dX = d_out.dot(self.W.value.T)
dB = d_out.sum(axis=0).reshape((1, -1))
# It should be pretty similar to linear classifier from
# the previous assignment
self.B.grad += dB
self.W.grad += dW
d_input = dX
return d_input
def params(self):
return { 'W': self.W, 'B': self.B }
class ConvolutionalLayer:
def __init__(self, in_channels, out_channels,
filter_size, padding):
'''
Initializes the layer
Arguments:
in_channels, int - number of input channels
out_channels, int - number of output channels
filter_size, int - size of the conv filter
padding, int - number of 'pixels' to pad on each side
'''
self.filter_size = filter_size
self.in_channels = in_channels
self.out_channels = out_channels
self.W = Param(
np.random.randn(filter_size, filter_size,
in_channels, out_channels)
)
self.B = Param(np.zeros(out_channels))
self.padding = padding
def forward(self, X):
X_padded = np.pad(X, pad_width=((0, 0), (self.padding, self.padding), (self.padding, self.padding), (0, 0)), constant_values=0)
batch_size, height, width, channels = X.shape
self.X = Param(X)
self.X_padded = Param(X_padded)
out_height = height - self.filter_size + 2 * self.padding + 1
out_width = width - self.filter_size + 2 * self.padding + 1
# TODO: Implement forward pass
# Hint: setup variables that hold the result
# and one x/y location at a time in the loop below
result = np.zeros((batch_size, out_height, out_width, self.out_channels))
# It's ok to use loops for going over width and height
# but try to avoid having any other loops
for y in range(out_height):
for x in range(out_width):
# TODO: Implement forward pass for specific location
receptive_field = X_padded[:, y: y + self.filter_size, x: x + self.filter_size, :].reshape((batch_size, self.filter_size * self.filter_size * channels))
W_reshaped = self.W.value.reshape(self.filter_size*self.filter_size*self.in_channels, self.out_channels)
result[:, y, x, :] = receptive_field @ W_reshaped + self.B.value
return result
def backward(self, d_out):
# Hint: Forward pass was reduced to matrix multiply
# You already know how to backprop through that
# when you implemented FullyConnectedLayer
# Just do it the same number of times and accumulate gradients
batch_size, height, width, channels = self.X_padded.value.shape
_, out_height, out_width, out_channels = d_out.shape
# TODO: Implement backward pass
# Same as forward, setup variables of the right shape that
# aggregate input gradient and fill them for every location
# of the output
fs = self.filter_size
# Try to avoid having any other loops here too
for y in range(out_height):
for x in range(out_width):
dd_out = d_out[:, y, x, :] # (bs, out_chan)
receptive_field = self.X_padded.value[:, y: y + fs, x: x + fs, :] # (bs, fs, fs, in_chan)
receptive_field_reshaped = receptive_field.reshape((batch_size, fs * fs * channels)) # (bs, fs*fs*in_chan)
W_grad_reshaped = receptive_field_reshaped.T.dot(dd_out) # (fs*fs*in_chan, out_chan)
self.W.grad += W_grad_reshaped.reshape((fs, fs, channels, out_channels)) # (fs, fs, in_chan, out_chan)
X_grad_reshaped = dd_out.dot(self.W.value.reshape(fs*fs*self.in_channels, self.out_channels).T) # (bs, fs*fs*in_chan)
self.X_padded.grad[:, y:y + fs, x:x + fs, :] += X_grad_reshaped.reshape((batch_size, fs, fs, channels))
self.B.grad += dd_out.sum(axis=0)
if self.padding != 0:
d_input = self.X_padded.grad[:, self.padding: -self.padding, self.padding: -self.padding, :]
else:
d_input = self.X_padded.grad
self.X.grad = d_input
return d_input
def params(self):
return { 'W': self.W, 'B': self.B }
class MaxPoolingLayer:
def __init__(self, pool_size, stride):
'''
Initializes the max pool
Arguments:
pool_size, int - area to pool
stride, int - step size between pooling windows
'''
self.pool_size = pool_size
self.stride = stride
self.X = None
def forward(self, X):
batch_size, height, width, channels = X.shape
# TODO: Implement maxpool forward pass
# Hint: Similarly to Conv layer, loop on
# output x/y dimension
self.X = Param(X)
out_height = int((height - self.pool_size) / self.stride + 1)
out_width = int((width - self.pool_size) / self.stride + 1)
# TODO: Implement forward pass
# Hint: setup variables that hold the result
# and one x/y location at a time in the loop below
result = np.zeros((batch_size, out_height, out_width, channels))
for y in range(out_height):
for x in range(out_width):
result[:, y, x, :] = np.max(X[:, y*self.stride: y*self.stride + self.pool_size, x*self.stride: x*self.stride + self.pool_size, :], axis=(1, 2))
return result
def backward(self, d_out):
# TODO: Implement maxpool backward pass
ps = self.pool_size
stride = self.stride
batch_size, height, width, channels = self.X.value.shape
d_input = np.zeros_like(self.X.value)
out_height = int((height - ps) / stride + 1)
out_width = int((width - ps) / stride + 1)
for y in range(out_height):
for x in range(out_width):
window = self.X.value[:, y*stride: y*stride + ps, x*stride: x*stride + ps, :]
d_input[:, y*stride: y*stride + ps, x*stride: x*stride + ps, :] = (np.max(window, axis=(1, 2), keepdims=True) == window) * d_out[:, y, x, :].reshape((batch_size, 1, 1, channels))
return d_input
def params(self):
return {}
class Flattener:
def __init__(self):
self.X_shape = None
def forward(self, X):
batch_size, height, width, channels = X.shape
# TODO: Implement forward pass
# Layer should return array with dimensions
# [batch_size, hight*width*channels]
self.X_shape = X.shape
return X.reshape((batch_size, height*width*channels))
def backward(self, d_out):
# TODO: Implement backward pass
return d_out.reshape((self.X_shape))
def params(self):
# No params!
return {}
| StarcoderdataPython |
1944959 | """
Copyright Government of Canada 2017
Written by: <NAME>, National Microbiology Laboratory,
Public Health Agency of Canada
Licensed under the Apache License, Version 2.0 (the "License"); you may not use
this work except in compliance with the License. You may obtain a copy of the
License at:
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed
under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
import os
from quasitools.parsers.genes_file_parser import parse_genes_file
from quasitools.parsers.reference_parser import parse_references_from_fasta
from quasitools.parsers.mapped_read_parser import parse_mapped_reads_from_bam
from quasitools.nt_variant import NTVariantCollection
from quasitools.aa_variant import AAVariantCollection
from quasitools.mutations import MutationDB
from quasitools.aa_census import AACensus, CONFIDENT
import Bio.SeqIO
class PatientAnalyzer():
def __init__(self, id, output_dir, reads, reference,
genes_file, mutation_db, quiet, consensus_pct):
self.id = id
self.output_dir = output_dir
self.reads = reads
self.reference = reference
self.mutation_db = mutation_db
self.genes_file = genes_file
self.quiet = quiet
self.consensus_pct = consensus_pct
self.filtered = {}
self.filtered["status"] = 0
self.filtered["length"] = 0
self.filtered["score"] = 0
self.filtered["ns"] = 0
self.input_size = 0
self.determine_input_size()
self.references = parse_references_from_fasta(self.reference)
self.genes = parse_genes_file(genes_file, self.references[0].name)
self.filtered_reads = "%s/filtered.fastq" % output_dir
if not os.path.isdir(output_dir):
os.mkdir(output_dir)
def determine_input_size(self):
sequences = Bio.SeqIO.parse(self.reads, "fastq")
for seq in sequences:
self.input_size += 1
def filter_reads(self, filters):
if not self.quiet:
print("# Filtering reads...")
filtered_reads_file = open(self.filtered_reads, "w+")
seq_rec_obj = Bio.SeqIO.parse(self.reads, "fastq")
for seq in seq_rec_obj:
avg_score = (float(sum(seq.letter_annotations['phred_quality'])) /
float(len(seq.letter_annotations['phred_quality'])))
length = len(seq.seq)
if length < filters["length_cutoff"]:
self.filtered["length"] += 1
elif avg_score < filters["score_cutoff"]:
self.filtered["score"] += 1
elif filters['ns'] and 'n' in seq.seq.lower():
self.filtered['ns'] += 1
else:
Bio.SeqIO.write(seq, filtered_reads_file, "fastq")
self.filtered["status"] = 1
filtered_reads_file.close()
def analyze_reads(self, fasta_id, filters, reporting_threshold,
generate_consensus):
# Map reads against reference using bowtietwo
if not self.quiet:
print("# Mapping reads...")
bam = self.generate_bam(fasta_id)
if not self.quiet:
print("# Loading read mappings...")
# cmd_consensus
if generate_consensus:
cons_seq_file = open("%s/consensus.fasta" % self.output_dir, "w+")
mapped_read_collection_arr = []
for r in self.references:
mrc = parse_mapped_reads_from_bam(r, bam)
mapped_read_collection_arr.append(mrc)
if generate_consensus:
cons_seq_file.write('>{0}_{1}_{2}\n{3}'.format(
fasta_id, reporting_threshold, r.name,
mrc.to_consensus(self.consensus_pct)))
if generate_consensus:
cons_seq_file.close()
# cmd_callntvar
if not self.quiet:
print("# Identifying variants...")
variants = NTVariantCollection.from_mapped_read_collections(
filters["error_rate"], self.references,
*mapped_read_collection_arr)
variants.filter('q%s' % filters["min_qual"],
'QUAL<%s' % filters["min_qual"], True)
variants.filter('ac%s' % filters["min_ac"],
'AC<%s' % filters["min_ac"], True)
variants.filter('dp%s' % filters["min_dp"],
'DP<%s' % filters["min_dp"], True)
vcf_file = open("%s/hydra.vcf" % self.output_dir, "w+")
vcf_file.write(variants.to_vcf_file())
vcf_file.close()
# cmd_aa_census
if not self.quiet:
print("# Masking filtered variants...")
for mrc in mapped_read_collection_arr:
mrc.mask_unconfident_differences(variants)
if not self.quiet:
print("# Building amino acid census...")
# Determine which frames our genes are in
frames = set()
for gene in self.genes:
frames.add(self.genes[gene]['frame'])
aa_census = AACensus(self.reference, mapped_read_collection_arr,
self.genes, frames)
coverage_file = open("%s/coverage_file.csv" % self.output_dir, "w+")
coverage_file.write(aa_census.coverage(frames))
coverage_file.close()
# cmd_aavariants
if not self.quiet:
print("# Finding amino acid mutations...")
# Create AAVar collection and print the hmcf file
aa_vars = AAVariantCollection.from_aacensus(aa_census)
# Filter for mutant frequency
aa_vars.filter('mf%s' % filters['min_freq'],
'freq<%s' % filters['min_freq'], True)
# Build the mutation database and update collection
if self.mutation_db is not None:
mutation_db = MutationDB(self.mutation_db, self.genes)
aa_vars.apply_mutation_db(mutation_db)
mut_report = open("%s/mutation_report.hmcf" % self.output_dir, "w+")
mut_report.write(aa_vars.to_hmcf_file(CONFIDENT))
mut_report.close()
# cmd_drmutations
if not self.quiet:
print("# Writing drug resistant mutation report...")
dr_report = open("%s/dr_report.csv" % self.output_dir, "w+")
dr_report.write(aa_vars.report_dr_mutations(mutation_db,
reporting_threshold))
dr_report.close()
self.output_stats(mapped_read_collection_arr)
# This is a helper method that generates the bam file.
# It takes as an argument the fasta_id, which is used by bowtie2 as the
# RG-ID in the output bam file.
def generate_bam(self, fasta_id):
""" Runs bowtietwo local alignment on self.reads
to generate a bam file """
sorted_bam_fn = "%s/align.bam" % self.output_dir
bowtietwo_bam_output = sorted_bam_fn[0:sorted_bam_fn.rindex(".")]
bam_fn = "%s/tmp.bam" % self.output_dir
sam_fn = "%s/tmp.sam" % self.output_dir
# create the files
bam_fh = open(bam_fn, "w+")
sam_fh = open(sam_fn, "w+")
bowtietwo_index = self.reference[0:self.reference.rindex(".")]
bowtietwo_cmd = (("bowtie2 --local --rdg '8,3' --rfg '8,3' "
"--rg-id %s --ma 1 --mp '2,2' -S %s -x %s "
"-U %s") % (fasta_id, sam_fn, bowtietwo_index,
self.filtered_reads))
os.system(bowtietwo_cmd)
# Convert sam output to bam output
sam_to_bam_cmd = "samtools view -bt %s.fai -o %s %s" % (self.reference,
bam_fn, sam_fn)
os.system(sam_to_bam_cmd)
# Sort bam output
sort_bam_cmd = "samtools sort %s -T %s -o %s" % (bam_fn,
bowtietwo_bam_output,
sorted_bam_fn)
os.system(sort_bam_cmd)
# Index bam output
index_bam_cmd = "samtools index %s" % sorted_bam_fn
os.system(index_bam_cmd)
bam_fh.close()
sam_fh.close()
os.unlink(bam_fn)
os.unlink(sam_fn)
return sorted_bam_fn
def output_stats(self, mapped_read_collection_arr):
mr_len = len(mapped_read_collection_arr[0].mapped_reads)
stats_report = open("%s/stats.txt" % self.output_dir, "w+")
stats_report.write("Input Size: %i\n" % self.input_size)
stats_report.write("Number of reads filtered due to length: %i\n" %
self.filtered["length"])
stats_report.write(("Number of reads filtered due to average "
"quality score: %i\n") % self.filtered["score"])
stats_report.write(("Number of reads filtered due to presence "
"of Ns: %i\n") % self.filtered["ns"])
stats_report.write("Number of reads filtered due to excess "
"coverage: 0\n")
stats_report.write(("Number of reads filtered due to poor "
"mapping: %i\n") %
(self.input_size - self.filtered["length"] -
self.filtered["score"] - self.filtered["ns"] -
mr_len))
stats_report.write("Percentage of reads filtered: %0.2f" %
(float(self.input_size - mr_len) /
self.input_size * 100))
stats_report.close()
| StarcoderdataPython |
4898711 | <gh_stars>0
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
from sklearn.multiclass import OneVsRestClassifier
from sklearn.svm import LinearSVC
from sklearn.preprocessing import LabelEncoder
from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer
from data_cleaning import *
class OVR_SVC:
def __init__(self, data):
self.X = data['clean-text']
self.y = data['category']
le = LabelEncoder()
self.y = le.fit_transform(self.y)
self.reference = dict(zip(data['category'].to_numpy()), y)
self.reference = {k:v for k,v in sorted(self.reference.items(), key=lambda item: item[1])}
self.model = OneVsRestClassifier(LinearSVC(random_state=0))
self.x_train, self.x_test, self.y_train, self.y_test = train_test_split(self.X, self.y, test_size=0.3)
def vectorizer(self, type='tfidf'):
if type == 'tfidf':
vectorizer = TfidfVectorizer()
elif type == 'count':
vectorizer = CountVectorizer()
self.x_train = vectorizer.fit_transform(self.x_train)
self.x_test = vectorizer.transform(self.x_test)
def train_model(self):
self.model.fit(self.x_train, self.y_train)
def evaluate_model(self):
y_predicted = self.model.predict(self.x_test)
accuracy = self.model.score(self.x_test, self.y_test)
return y_predicted, accuracy
class TweetCategory:
def __init__(self, model, vectorizer, tweet_data, reference):
self.data = tweet_data
self.model = model
self.vectorizer = vectorizer
self.ref = reference
self.analyzer = SentimentIntensityAnalyzer()
def process_user_tweets(self):
self.data['clean-tweet'] = self.data['Tweet Content'].map(tweet_preprocess)
self.data = self.data[['Tweet Content', 'clean-tweet']].rename(columns={'Tweet Content': 'tweet'})
self.data['vader-sentiment'] = self.data['tweet'].apply(lambda x: self.analyzer.polarity_scores(x))
self.data['vader-pos'] = self.data['vader-sentiment'].apply(lambda x: x['pos'])
self.data['vader-neu'] = self.data['vader-sentiment'].apply(lambda x: x['neu'])
self.data['vader-neg'] = self.data['vader-sentiment'].apply(lambda x: x['neg'])
self.data['vader-compound'] = self.data['vader-sentiment'].apply(lambda x: x['compound'])
def predict_topics(self, sentiment_thresh, confidence_thresh):
self.predict_df = self.data[(self.data['vader-compound'] >= sentiment_thresh) & (self.data['clean-tweet'] != '')]
tweets_transformed = self.vectorizer.transform(self.predict_df['clean-tweet'])
predicted_category = self.model.predict(tweets_transformed)
p = np.array(self.model.decision_function(tweets_transformed))
probability = np.exp(p)/np.sum(np.exp(p), axis=1, keepdims=True)
probability_list = [max(prob) for prob in probability]
self.predict_df['predicted_label'] = predicted_category
self.predict_df['probability'] = probability_list
self.predict_df['predicted'] = self.predict_df['predicted_label'].apply(lambda x: self.ref[x])
top_categories = self.predict_df[self.predict_df['probability'] >= confidence_thresh]['predicted'].value_counts()[:3]
return top_categories
def user_tweet_df(tweets):
all_tweets = []
username = tweets[0]._json['user']['screen_name']
for tweet in tweets:
all_tweets.append(tweet._json['full_text'])
df = pd.DataFrame({'user': username, 'Tweet Content': all_tweets})
return df | StarcoderdataPython |
6694732 | """Libraries."""
| StarcoderdataPython |
1761595 | from argparse import Namespace
from .pg2.db_methods import Pg2DB
from .sqla.db_methods import AlchemyDB
from ...parsers import AGP_RDS_GENERATE
def get_connection_dict(args: Namespace) -> dict:
"""Create connection dict"""
db_connect = {
'host': args.host,
'port': args.port,
'password': <PASSWORD>,
'database': args.database
}
if args.run_option == 'pg2':
db_connect['user'] = args.username
if args.run_option == 'sqla':
db_connect['username'] = args.username
db_connect['drivername'] = args.drivername
return db_connect
DB_DICT = {
'pg2': Pg2DB,
'sqla': AlchemyDB
}
def main():
"""Main function for getting connection to database and running test"""
args, _ = AGP_RDS_GENERATE.parse_known_args()
connection = get_connection_dict(args)
db = DB_DICT[args.run_option](connection)
db.run_test(args.source)
| StarcoderdataPython |
6613871 | <filename>valveless/problem.py
import numpy as np
class Problem(object):
"""
This class encapsulates the relevant physical parameters of the problem into an object.
"""
def __init__(self, L, D, f, psi_ca, a_0, alpha_m, rho_t, mu_m, eta_f, gamma):
"""
Initializes and sets the various physical parameters.
Parameters
----------
L: scalar
Length of the along bubble the z axis [mm]
D: scalar
Length of transition region [mm]
f: scalar
Frequency of the driving wave [kHz]
psi_ca: scalar
Diameter oscillation amplitude for the bubble [mm]
a_0: scalar
Radius of gas bubble [mm]
alpha_m: scalar
Relative vole for tissue matrix []
rho_t: scalar
Tissue mass density, including tissue matrix and fluid [kg/cm³]
mu_m: scalar
Shear stiffness the tissue matrix [kg/(m*s)] = [Pa*s]
eta_f: scalar
Dynamic viscosity of the fluid [Pa*s
gamma: scalar
Viscous friction between fluid and tissue matrix.
"""
self.L = L
self.D = D
self.f = f
self.psi_ca = psi_ca
self.a_0 = a_0
self.alpha_m = alpha_m
self.alpha_f = 1- self.alpha_m
self.rho_t = rho_t
self.mu_m = mu_m
self.eta_f = eta_f
self.gamma = gamma
self.rho_m = self.alpha_m*self.rho_t
self.rho_f = self.alpha_m*self.rho_t
def _psi_c_amplitude(self, z):
"""
Helper function that returns the amplitude of the capillary surface vibrations at a specific position.
Parameters
----------
z: scalar
Position along z axis
Returns
-------
scalar:
The amplitude at a position z > 0.
"""
if z <= self.L/2 - self.D:
return self.psi_ca
elif z > self.L/2 - self.D and z < self.L/2:
return self.psi_ca/2*(1-np.cos(np.pi*(z-self.L/2)/self.D))
else:
return 0
def vec_psi_c_amplitude(self, z_array):
"""
Vectorized version of _psi_c_amplitude.
Parameters
----------
z_array: ndarray
1D array of positions along the z axis.
Returns
-------
ndarray
1D array of amplitudes.
"""
vec_psi_z = np.vectorize(self._psi_c_amplitude)
return vec_psi_z(z_array)
| StarcoderdataPython |
6627538 | #!/usr/bin/env python3
"""A very simple batch that tests basic functionality."""
import hail as hl
hl.init()
| StarcoderdataPython |
5175978 | '''
<NAME> 4/5/21
custom_invitation.py creates personalied invitations and puts
them all on separate pages in a .docx file. Invitations are
personalized by reading a .txt file of recipient names.
'''
import os
import docx
from docx.shared import Pt
def custom_invitation(text_file):
names_file = open(text_file, 'r')
names = names_file.readlines()
names_file.close()
doc = docx.Document()
page_break = 4
for name in names:
greeting = 'It would be a pleasure to have the company of\n'
address = 'at 11010 Memory Lane on the evening of\n'
date = 'April 1st\n'
time = "at 7 o'clock"
add_greeting = doc.add_paragraph(greeting, 'Heading 1')
add_name = doc.add_paragraph(name, 'Intense Quote')
add_address = doc.add_paragraph(address, 'Heading 1')
add_date = doc.add_paragraph(date, 'Heading 1')
add_time = doc.add_paragraph(time, 'Heading 1')
add_greeting.alignment = 1
add_name.alignment = 1
add_address.alignment = 1
add_date.alignment = 1
add_time.alignment = 1
doc.paragraphs[page_break].runs[0].add_break(docx.enum.text.WD_BREAK.PAGE)
page_break += 5
doc.save('invitations.docx')
os.chdir('Chapter 15')
custom_invitation('name_list.txt') | StarcoderdataPython |
1881288 | # Copyright © 2020 by <NAME>.
# Simple GUI with screens filled with widgets, some touchable.
# Colors:
# For best performance and lowest memory use, this GUI cean use a GS4_HSMB "4-bit greyscale"
# framebuffer. This is used to represent 16 colors and
# the display driver (it's display() method specifically) is expected to color-map from the 4 bits
# to however many bits the display has.
# The metrial theme takes up 11 colors, the remaining 5 can be used freely by the app, for example
# to draw charts.
# Thoughts about caching:
# Some caching is required in order to switch screens quickly. Many items, such as buttons, labels,
# headers can be rendered once into an "off-screen" framebuffer and then blitted into place.
# One difficulty is that buttons can change color when pressed or disabled, so just rendering each
# button once and blitting doesn't work. This also makes it not so great to pre-render each screen
# at start-up and then just render that and fill-in real-time data, because each widget really
# should be called with a "please render yourself if you look different than the way you got
# pre-rendered". Instead, the caching is pushed down into the widgets so each widget can cache its
# most recent state and either use that or re-render depending on current inputs/values.
import time
from u8g2_font import Font # only used for timing purposes
import seg7 # seven segment number display
try:
from micropython import const
except ImportError:
def const(x):
return x
# Screen holds a list of widgets that are displayed all together on the display.
class Screen:
cur = None
@classmethod
def config(cls, framebuf, theme, width, height):
cls.fb = framebuf # expected to be a subclass of the built-in framebuf
cls.theme = theme
cls.width = width
cls.height = height
# init colormap
c = 0
def set_color(rgb):
nonlocal c
cls.fb.color(c, rgb >> 16, (rgb >> 8) & 0xFF, rgb & 0xFF)
c += 1
return c - 1
theme.init(set_color)
def __init__(self, bgcolor=None):
self.bgcolor = bgcolor if bgcolor is not None else self.theme.grey
#
self.displaylist = []
self.last_draw = 0
# activate switches to this screen and renders it
def activate(self, wid=None):
Screen.cur = self
Screen.fb.fill(self.bgcolor)
Screen.fb.display() # this provides immediate feedback that the screen is changing
self.draw(fill=False)
# draw clears the framebuffer and display, iterates through the display list and calls
# each widget's draw() method, then pushes the framebuffer on the display
def draw(self, display=True, fill=True):
t0 = time.ticks_ms()
Font.ticks = 0
Font.ticks_hdr = 0
if fill:
Screen.fb.fill(self.bgcolor)
t1 = time.ticks_ms()
for wid in self.displaylist:
wid.draw()
t2 = time.ticks_ms()
if display:
Screen.fb.display()
self.last_draw = time.ticks_ms()
print(
"Redraw: fill=%d draw=%d(text:%d=%d%%) disp=%d tot=%d"
% (
time.ticks_diff(t1, t0),
time.ticks_diff(t2, t1),
Font.ticks,
Font.ticks * 100 // time.ticks_diff(t2, t1),
time.ticks_diff(self.last_draw, t2),
time.ticks_diff(self.last_draw, t0),
)
)
# add a widget to the display list for this screen.
def add(self, wid):
self.displaylist.append(wid)
wid.screen = self
@classmethod
def handle_touch(cls, ev):
if cls.cur is None:
return
print("Touch %d %d" % (ev[1], ev[2]))
for wid in cls.cur.displaylist:
if wid.handle(ev[1], ev[2], ev[0]):
print("Handled by", wid)
return
class BlueOrangeTheme:
@classmethod
def init(cls, set_color):
cls.primary = set_color(0x1E88E5)
cls.pri_light = set_color(0x6AB7FF)
cls.pri_dark = set_color(0x005CB2)
cls.secondary = set_color(0xF4511E)
cls.sec_light = set_color(0xFF844C)
cls.sec_dark = set_color(0xB91400)
cls.grey = set_color(0x9E9E9E)
cls.grey_light = set_color(0xCFCFCF)
cls.grey_dark = set_color(0x424242)
cls.white = set_color(0xFFFFFF)
cls.black = set_color(0x000000)
# Widget is the superclass for all display elements that get enqueued on a Screen. I has some
# simple methods to handle boilerplate, such as keeping track of the bounding box and testing
# whether an x-y coordinate is inside or not. It also has support for caching and blitting the
# widget.
class Widget:
def __init__(self, x, y, w, h):
self.x = x
self.y = y
self.w = w
self.h = h
self.fb = None # framebuffer for cached copy
# inside tests whether the x-y coordinates are within the bounding box passed to __init__.
def inside(self, x, y):
return x >= self.x and x < self.x + self.w and y >= self.y and y < self.y + self.h
# draw renders the widget from cache
def draw(self):
if self.fb:
Screen.fb.blit(self.fb, self.x, self.y)
# save copies the rendered widget (using its bounding box) to a cache framebuffer
def save(self):
if not self.fb:
self.fb = Screen.fb.allocfb(self.w, self.h)
self.fb.blit(Screen.fb, -self.x, -self.y)
# show what got drawn and saved
# Screen.fb.fill_rect(self.x + self.w // 2, self.y + self.h // 2, 4, 4, rgb565(0xFF0000))
def handle(self, x, y, press):
return False
# Drawing is an uncached widget that calls a call-back function when it needs to be rendered.
class Drawing(Widget):
def __init__(self, draw_cb, handle_cb=None):
super().__init__(0, 0, 0, 0)
self.draw_cb = draw_cb
self.handle_cb = handle_cb
def draw(self): # override super's method: no caching ever...
self.draw_cb()
def handle(self, x, y, press):
return self.handle_cb and self.handle_cb(x, y, press)
# TextField is a cached widget that displays a text label. It is basically a wrapper around a Label
# that enables caching.
class TextField(Widget):
def __init__(self, x, y, w, h, label, pre_cb=None, bgcolor=None):
super().__init__(x, y, w, h)
self.label = label
self.bgcolor = bgcolor
self.pre_cb = pre_cb
self.hash = None # hash for caching
def draw(self):
if self.pre_cb:
self.pre_cb(self)
# see whether we can render from cache
hash = (self.bgcolor, self.label.cache_hash())
if hash == self.hash:
super().draw()
return
# Nope, gotta do the full work
if self.bgcolor is not None:
Screen.fb.fill_rect(self.x, self.y, self.w, self.h, self.bgcolor)
dy = (self.h + self.label.height + 1) // 2
align = self.label.align
if align == "center":
self.label.draw(self.x + self.w // 2, self.y + dy)
elif align == "left":
self.label.draw(self.x, self.y + dy)
elif align == "right":
self.label.draw(self.x + self.w - 1, self.y + dy)
# Save to cache
super().save()
self.hash = hash
class Button(Widget):
def __init__(self, x, y, w, h, label, cb, pri=True):
super().__init__(x, y, w, h)
self.label = label
self.state = "enabled"
self.cb = cb # call-back to handle button press
self.pri = pri # use primary color (else secondary)
self.hash = None # hash for caching
def draw(self):
# figure out colors given button state
theme = self.screen.theme
txtcol = theme.black
if self.state == "disabled":
color = theme.grey_light
elif self.state == "pressed":
color = theme.pri_dark if self.pri else theme.sec_dark
txtcol = theme.white
else:
color = theme.primary if self.pri else theme.secondary
# see whether we can render from cache
hash = (self.state, self.pri, self.label.cache_hash())
if hash == self.hash:
super().draw()
return
# Nope, gotta do the full work
Screen.fb.fill_round_rect(self.x, self.y, self.w, self.h, self.h // 4, color)
dy = (self.h + self.label.height + 1) // 2
self.label.draw(self.x + self.w // 2, self.y + dy, txtcol)
# Save to cache
super().save()
self.hash = hash
def handle(self, x, y, press):
if not self.cb or not self.inside(x, y):
return False
if press:
self.state = "pressed"
self.draw()
Screen.fb.display()
else:
self.state = "enabled"
self.draw()
Screen.fb.display()
self.cb(self)
return True
# Label is a text label that can be drawn at an arbitrary x-y coordinate. It is NOT a Widget but
# provides a cache_hash method so a widget that incorporates a Label can compute a hash to detect
# when something changed and the cached version is out of date.
class Label:
@classmethod
def default_font(cls, f):
cls.font = f
def __init__(self, text, font=None, align="center", color=0):
self.font = font if font else Label.font
self.color = color
self.align = align
self.text = None
self.set_text(text)
def draw(self, x, y, color=None):
if color is None:
color = self.color
# draw from bottom up
i = len(self.text) - 1
while i >= 0:
if self.align == "center":
self.font.text(self.text[i], x - self.widths[i] // 2, y, color)
elif self.align == "right":
self.font.text(self.text[i], x - self.widths[i], y, color)
else:
self.font.text(self.text[i], x, y, color)
y -= self.font.height
i -= 1
def cache_hash(self):
return (self.font, self.color, self.align, self.text)
def set_text(self, text):
if text == self.text:
return
if "\n" in text:
# multi-line label
self.text = text.split("\n")
self.widths = [self.font.dim(t)[0] for t in self.text]
self.height = len(self.text) * self.font.height - (self.font.height - self.font.ascend)
self.width = max(self.widths)
else:
self.text = [text]
self.width, _, self.height = self.font.dim(text)
self.widths = [self.width]
# NumField displays a numeric field with a tag and a measurement unit, typical of what might be
# found in a sports activity tracker. The tag is small in the upper left, the unit in the upper
# right, and the center mid/bottom contains the number.
# A format string fmt is provided for the number, and value may actually be a tuple, this enables a
# display lke HH:MM:SS by passing a 3-tuple as value.
# The format string must be of fixed width and it cannot be changed without messing up the
# formatting.
class NumField:
# the fmt must be a printf format that is fixed-width
def __init__(
self,
tag, # tag to display in the upper left, e.g. "speed", "distance", ...
fmt, # format string for the numeric value, must be fixed width
value=0, # initial value(s), may be a tuple if fmt contains multiple % formats
w=None, # width in pixels for the field
h=None, # height in pixels
font=None, # font for the number, None to use seven-segment display
tag_font=None, # font for the tag and unit
color=0, # color for the number
tag_color=0, # color for the tag
unit=None, # unit to display in the upper right, None to omit, e.g. "mph", "m", ...
unit_color=0, # color for the unit
):
self.tag = tag
self.unit = unit
self.fmt = fmt
self.value = value
self.width = w
self.height = h
self.font = font
self.tag_font = tag_font
self.color = color
self.tag_color = tag_color
self.unit_color = unit_color
if font is None:
# calculate positioning using seg7
# tw, th, thb = tag_font.dim(tag) # tag width, height, height above baseline
tw, _, _ = tag_font.dim("0")
self.txo = tw * 2 // 3
self.tyo = tag_font.height
if unit is not None:
uw, _, _ = tag_font.dim(unit)
self.uxo = w - uw - tw * 2 // 3
# assume seg7 is height constrained
self.sh = h - 10 - tag_font.height # seg7 digit height
self.sw = self.sh * 3 // 7 # seg7 digit width
width = seg7.width(fmt % value, self.sw)
maxw = w * 9 // 10
print("SH %s: sh=%d sw=%d w=%d maxw=%d" % (tag, self.sh, self.sw, width, maxw))
if width > maxw:
# width-constrained
self.sw = self.sw * maxw // width
self.sh = self.sw * 7 // 3
width = seg7.width(fmt % value, self.sw)
print("S* %s: sh=%d sw=%d width=%d maxw=%d" % (tag, self.sh, self.sw, width, maxw))
self.sxo = (w - width) // 2 # seg7 tot width, centered
self.syo = h - 5 - self.sh
else:
# calculate positioning using regular fonts
nw, nh, nhb = font.dim(fmt % value) # number width, height, height above baseline
tw, th, thb = tag_font.dim(tag) # tag width, height, height above baseline
if w is not None:
self.nxo = (w - nw) // 2 # number X offset
self.txo = self.nxo # tag X offset
xtra = h - tag_font.height - font.ascend
self.tyo = xtra // 2 + tag_font.ascend # tag Y offset (to baseline)
self.nyo = xtra // 2 + tag_font.height + font.ascend # number Y off (to baseline)
else:
assert "not supported" == ""
def set(self, value=None):
self.value = value
def draw(self, x, y):
yt = y + self.tyo
# draw tag
self.tag_font.text(self.tag, x + self.txo, yt, self.tag_color)
# draw unit, if provided
if self.unit is not None:
self.tag_font.text(self.unit, x + self.uxo, yt, self.unit_color)
# draw number
txt = self.fmt % self.value
if self.font is None:
# no font -> draw seven degment digits using lines
seg7.draw_number(
Screen.fb, txt, x + self.sxo, y + self.syo, self.sw, self.sh, self.color, 3
)
else:
self.font.text(txt, x + self.nxo, y + self.nyo, self.color)
| StarcoderdataPython |
11350423 | from flask_wtf import FlaskForm
from wtforms import StringField, TextAreaField, SubmitField
from wtforms.validators import Required
class BioForm(FlaskForm):
bio = TextAreaField('Write A Short Bio About You...')
submit = SubmitField('Submit')
class UpdateProfile(FlaskForm):
bio = TextAreaField('Tell us about you.',validators = [Required()])
submit = SubmitField('Submit')
class BlogForm(FlaskForm):
title = StringField('Blog Title', validators=[Required()])
blog = TextAreaField('Write a Blog...')
submit = SubmitField('submit')
class CommentForm(FlaskForm):
title = StringField('Comment Title', validators=[Required()])
comment = TextAreaField('Write Comments...')
submit = SubmitField('submit') | StarcoderdataPython |
189957 | <reponame>orwonthe/big_muddy_pi
from big_muddy_io import BigMuddyIO
from flask import render_template
from flask import request
def servo_cycle_request(servo_cycler):
if request.method == 'POST':
if request.form.get('action') == "Once":
servo_cycler.cycle(1)
elif request.form.get('action') == "Twice":
servo_cycler.cycle(2)
elif request.form.get('action') == "Ten":
servo_cycler.cycle(10)
elif request.form.get('action') == "Hex":
servo_cycler.cycle(16)
elif request.form.get('action') == "Hundred":
servo_cycler.cycle(100)
elif request.form.get('action') == "Endless":
servo_cycler.cycle(-1)
return render_template('servo_cycle.html', tester=servo_cycler, subtitle="Servo Cycle") | StarcoderdataPython |
164070 | """Web socket proxy."""
import asyncio
import collections
import weakref
import aiohttp
from aiohttp import web
from aiohttp import WSMsgType
from being.serialization import dumps
from being.logging import get_logger
class WebSocket:
"""WebSocket connections. Interfaces with aiohttp web socket requests. Can
hold multiple open web socket connections simultaneously. Also has a message
queue / broker functionality to send messages from non-asyncio world.
Attributes:
sockets: Active web socket connections
queue: Message queue for synchronous senders.
"""
def __init__(self):
self.sockets = weakref.WeakSet()
self.queue = collections.deque(maxlen=100)
self.logger = get_logger('WebSocket')
self.brokerTask = None
async def send_json(self, data):
"""Send data as JSON to all connected web sockets.
Args:
data: Data to send as JSON.
"""
for ws in self.sockets.copy():
if ws.closed:
continue
try:
await ws.send_json(data, dumps=dumps)
except ConnectionResetError as err:
self.logger.exception(err)
def send_json_buffered(self, data):
"""Synchronous send_json(). Data goes into buffered and send at a later
stage (if broker task is running).
Args:
data: Data to send as JSON.
"""
self.queue.append(data)
async def handle_new_connection(self, request) -> web.WebSocketResponse:
"""Aiohttp new web socket connection request handler."""
ws = web.WebSocketResponse(autoclose=True)
await ws.prepare(request)
self.logger.info('Opened web socket')
self.sockets.add(ws)
try:
async for msg in ws:
if msg.type == WSMsgType.ERROR:
self.logger.error('Web socket error with exception %s', ws.exception())
break
finally:
self.logger.info('Discarding web socket')
self.sockets.discard(ws)
self.logger.debug('Web socket closed')
return ws
#pylint: disable=unused-argument
async def close_all_connections(self, app: web.Application = None):
"""Close all web sockets. Can be used with app.on_shutdown() /
app.on_cleanup().
"""
for ws in self.sockets.copy():
await ws.close(code=aiohttp.WSCloseCode.GOING_AWAY, message='Closing web socket')
async def broker_task(self):
"""Message broker task. Takes messages from queue and sends them over
all open web socket connections.
"""
while True:
for data in self.queue.copy():
await self.send_json(data)
self.queue.popleft()
await asyncio.sleep(.1)
#pylint: disable=unused-argument
async def start_broker(self, app: web.Application = None):
"""Start message broker task."""
await self.stop_broker()
self.brokerTask = asyncio.create_task(self.broker_task())
#pylint: disable=unused-argument
async def stop_broker(self, app: web.Application = None):
"""Stop message broker task."""
if not self.brokerTask:
return
self.brokerTask.cancel()
await self.brokerTask
self.brokerTask = None
| StarcoderdataPython |
3397288 | <filename>tests/test_wps_correlate_field.py
import pytest
from pywps import Service
from pywps.tests import client_for, assert_response_success
from .common import get_output
from climexp_numerical_wps.processes.correlate_field import CorrelateField
def test_wps_correlate_field():
client = client_for(Service(processes=[CorrelateField()]))
datainputs = "netcdf_field=cru_ts3.22.1901.2013.pre.dat.nc;"
datainputs += "netcdf_timeseries=nino3.nc;months=1:12;averaging_method=ave;lag=3;time_frequency=mon"
resp = client.get(
"?service=WPS&request=Execute&version=1.0.0&identifier=correlate_field&datainputs={}".format(
datainputs))
assert_response_success(resp)
assert get_output(resp.xml) == {'data': '', 'success': 'False'}
| StarcoderdataPython |
3267112 | <gh_stars>0
#!/usr/bin/python3
"""
This program inserts itself in between the two hoverboard control boards, each of which is connected to the program-running computer by a USB-serial converter. Commands sent from one board to another may be changed.
"""
import serial
import sys
import collections
master = '/dev/ttyUSB1'
slave = '/dev/ttyUSB0'
baudrate = 115200 #sys.argv[2]
logfile = sys.argv[1]
def lg(message, out):
print(message)
out.write(message)
out.write('\n')
def checksum(code):
"""Given a byte number expressed in hex ASCII, find mod-256 8-bit checksum."""
return sum(code) % 256
with open(logfile, 'w') as out:
lg('Attempting to open serial connection 1...', out)
with serial.Serial(master, 115200) as ser1:
lg(ser1.__repr__(), out)
lg('Serial connection 1 opened successfully.', out)
lg('Attempting to open serial connection 2...', out)
with serial.Serial(slave, 115200) as ser2:
lg(ser2.__repr__(), out)
lg('Serial connection 2 opened successfully.', out)
deq1 = bytearray([0x00]*16)
ldeq1 = deq1[:]
deq2 = bytearray([0x00]*16)
ldeq2 = deq2[:]
while True:
if ser1.in_waiting:
deq1 = deq1[1:] + deq1[:1]
deq1[-1] = ser1.read(1)[0]
if checksum(deq1[:-1]) == deq1[-1] and deq1[0] == 0xaa:
# modify signal
ldeq1 = deq1[:]
lg(ldeq1.hex() + ' ' + ldeq2.hex(), out)
ser2.write(deq1)
if ser2.in_waiting:
deq2 = deq2[1:] + deq2[:1]
deq2[-1] = ser2.read(1)[0]
if checksum(deq2[:-1]) == deq2[-1] and deq2[0] == 0xaa:
ldeq2 = deq2[:]
lg(ldeq1.hex() + ' ' + ldeq2.hex(), out)
ser1.write(deq2)
| StarcoderdataPython |
4886159 | # Problem 52 : Permuted multiples
def contain_same_digit(a, b):
"""
This function tests whether or not numbers a and b contains the same digits.
"""
list_a = list(str(a))
list_b = list(str(b))
if len(list_a) == len(list_b):
for elt in list_a:
if elt not in list_b:
return False
return True
else:
return False
print(contain_same_digit(125874, 251748))
def smallest_permuted_multiple():
"""
This function returns the smallest number i which respects the property,
i, 2i, 3i, 4i, 5i and 6i get the same digits.
"""
i = 1
while not (contain_same_digit(i, 2*i) and contain_same_digit(i, 3*i) and contain_same_digit(i, 4*i) and contain_same_digit(i, 5*i) and contain_same_digit(i, 6*i)):
i += 1
return i
print(smallest_permuted_multiple())
| StarcoderdataPython |
3489880 | <reponame>typo-team/tap-typo
'''
TapTypo tests
'''
# Copyright 2019-2020 Typo. All Rights Reserved.
#
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
#
# you may not use this file except in compliance with the
#
# License.
#
#
#
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
#
# Unless required by applicable law or agreed to in writing, software
#
# distributed under the License is distributed on an "AS IS" BASIS,
#
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
#
# implied. See the License for the specific language governing
#
# permissions and limitations under the License.
#
#
#
# This product includes software developed at
#
# or by Typo (https://www.typo.ai/).
from io import StringIO
import json
import unittest
from unittest.mock import patch
import singer
from tap_typo.typo import TapTypo
from test_utils.mock_functions import (
mock_requests_get_test_discover_mode,
mock_requests_get_test_resume_with_state, mock_requests_get_test_get_simple_audit_dataset,
mock_requests_get_test_get_simple_streaming_dataset, mock_requests_get_test_multi_page_no_limit,
mock_requests_post_get_token, mock_requests_get_test_request_token
)
from test_utils.outputs import (
TEST_DISCOVER_MODE_OUTPUT, TEST_RESUME_WITH_STATE_OUTPUT,
TEST_GET_SIMPLE_AUDIT_DATASET_OUTPUT, TEST_GET_SIMPLE_STREAMING_DATASET_OUTPUT,
TEST_MULTI_PAGE_NO_LIMIT_OUTPUT
)
from test_utils.utils import generate_config
# Singer Logger
LOGGER = singer.get_logger()
class TestTapTypo(unittest.TestCase):
'''
TapTypo tests
'''
maxDiff = None # Get the full diff when debugging tests
@patch('tap_typo.typo.requests.post')
@patch('tap_typo.typo.requests.get', new=mock_requests_get_test_request_token)
def test_request_token(self, mock_post):
'''
Request an access token from Typo
'''
mock_post.return_value.status_code = 200
mock_post.return_value.json.return_value = {'token': 'test'}
with self.assertLogs(LOGGER, level='INFO') as log:
tap = TapTypo(config=generate_config())
self.assertEqual(len(log.output), 1)
expected_headers = {
'Content-Type': 'application/json'
}
expected_payload = {
'apikey': 'typo_key',
'secret': 'typo_secret'
}
token = tap.request_token()
mock_post.assert_called_with(
'https://typo.ai/token',
data=json.dumps(expected_payload),
headers=expected_headers,
timeout=20
)
self.assertEqual(token, 'test')
@patch('tap_typo.typo.requests.post', new=mock_requests_post_get_token)
@patch('tap_typo.typo.requests.get', new=mock_requests_get_test_discover_mode)
def test_discover_mode(self):
'''
tap-typo will fetch the schema from Typo and construct a catalog
with the stream information.
Verified on this test:
- Discover mode output.
- Conversion of typo-provided field types into JSON schema types.
- Detection of key properties from typo-provided data.
'''
out = None
with patch('sys.stdout', new=StringIO()) as mock_stdout, self.assertLogs(LOGGER, level='INFO') as log:
tap = TapTypo(config=generate_config())
tap.discover()
out = mock_stdout.getvalue()
self.assertEqual(len(log.output), 1)
self.assertEqual(out, TEST_DISCOVER_MODE_OUTPUT)
@patch('tap_typo.typo.requests.post', new=mock_requests_post_get_token)
@patch('tap_typo.typo.requests.get', new=mock_requests_get_test_get_simple_audit_dataset)
def test_get_simple_audit_dataset(self):
'''
Fetch a simple dataset with 2 records from Typo
'''
out = None
with patch('sys.stdout', new=StringIO()) as mock_stdout, self.assertLogs(LOGGER, level='INFO') as log:
tap = TapTypo(config=generate_config())
tap.sync()
out = mock_stdout.getvalue()
self.assertEqual(len(log.output), 4)
self.assertEqual(out, TEST_GET_SIMPLE_AUDIT_DATASET_OUTPUT)
@patch('tap_typo.typo.requests.post', new=mock_requests_post_get_token)
@patch('tap_typo.typo.requests.get', new=mock_requests_get_test_get_simple_streaming_dataset)
def test_get_simple_streaming_dataset(self):
'''
Fetch a simple dataset with 2 records from Typo
'''
out = None
with patch('sys.stdout', new=StringIO()) as mock_stdout, self.assertLogs(LOGGER, level='INFO') as log:
tap = TapTypo(config=generate_config(audit_id=None))
tap.sync()
out = mock_stdout.getvalue()
self.assertEqual(len(log.output), 4)
self.assertEqual(out, TEST_GET_SIMPLE_STREAMING_DATASET_OUTPUT)
@patch('tap_typo.typo.requests.post', new=mock_requests_post_get_token)
@patch('tap_typo.typo.requests.get', new=mock_requests_get_test_multi_page_no_limit)
def test_multi_page_no_limit(self):
'''
Fetch two pages of a dataset until records end.
'''
out = None
with patch('sys.stdout', new=StringIO()) as mock_stdout, self.assertLogs(LOGGER, level='INFO') as log:
tap = TapTypo(config=generate_config(records_per_page=2))
tap.sync()
out = mock_stdout.getvalue()
self.assertEqual(len(log.output), 5)
self.assertEqual(out, TEST_MULTI_PAGE_NO_LIMIT_OUTPUT)
@patch('tap_typo.typo.requests.post', new=mock_requests_post_get_token)
@patch('tap_typo.typo.requests.get', new=mock_requests_get_test_resume_with_state)
def test_resume_with_state(self):
'''
Resume sync by providing state input
'''
out = None
with patch('sys.stdout', new=StringIO()) as mock_stdout, self.assertLogs(LOGGER, level='INFO') as log:
tap = TapTypo(
config=generate_config(records_per_page=5),
state={
'bookmarks': {
'tap-typo-mock_repository-mock_dataset-audit-123': {
'__typo_record_id': 6
}
}
})
tap.sync()
out = mock_stdout.getvalue()
self.assertEqual(len(log.output), 4)
self.assertEqual(out, TEST_RESUME_WITH_STATE_OUTPUT)
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
327883 | __version__ = '0.6.3-dev'
| StarcoderdataPython |
1709333 | import shutil
def escrever_arquivo(texto):
arquivo = open("teste.txt", "w")
arquivo.write(texto)
arquivo.close()
def atualizar_arquivo(nome_arquivo, texto):
arquivo = open(nome_arquivo, "a")
arquivo.write(texto)
arquivo.close()
def ler_arquivo(nome_arquivo):
arquivo = open(nome_arquivo, "r")
texto = arquivo.read()
print(texto)
def media_alunos(nome_arquivo):
arquivo = open(nome_arquivo, "r")
notas_aluno = arquivo.read().split("\n")
for elemento in notas_aluno:
info_aluno = elemento.split(",")
aluno = info_aluno[0]
info_aluno.pop(0)
media = lambda notas: sum([int(i) for i in notas]) / 4
print("Média do aluno {} é {}".format(aluno, media(info_aluno)))
def copiar_arquivo(nome_arquivo):
shutil.copy(nome_arquivo, ".")
def mover_arquivo(nome_arquivo):
shutil.move(nome_arquivo, ".")
if __name__ == '__main__':
# Exemplo 1:
# escrever_arquivo("primeira linha.\n")
# atualizar_arquivo("segunda linha.\n")
# ler_arquivo("teste.txt")
# Exemplo 2:
# aluno = "\nCesar,7,9,3,8"
# atualizar_arquivo("notas.txt", aluno)
# Exemplo 3:
# media_alunos("notas.txt")
# Exemplo 4:
copiar_arquivo("notas.txt")
| StarcoderdataPython |
11259134 | from datetime import datetime
import os
import uuid
from django.conf import settings
from django.urls import reverse
from django.db import models
from django.core.exceptions import ValidationError
from django.utils import timezone
from devilry.apps.core.models import Delivery
from devilry.apps.core.models import StaticFeedback
from devilry.apps.core.models import StaticFeedbackFileAttachment
from devilry.devilry_account.models import User
class FeedbackDraft(models.Model):
"""
Created by examiners when they provide feedback. A StaticFeedback is
automatically created when a FeedbackDraft is published.
"""
DEFAULT_FEEDBACKTEXT_EDITOR = 'devilry-markdown'
delivery = models.ForeignKey(Delivery, related_name='devilry_gradingsystem_feedbackdraft_set', on_delete=models.CASCADE)
feedbacktext_editor = models.CharField(
default=DEFAULT_FEEDBACKTEXT_EDITOR,
max_length=20,
choices=(
('devilry-markdown', 'Markdown editor'),
('wysiwyg-html', 'WYSIWYG html')
))
feedbacktext_raw = models.TextField(
blank=True, null=True)
feedbacktext_html = models.TextField(
blank=True, null=True)
points = models.PositiveIntegerField(
blank=False, null=False)
saved_by = models.ForeignKey(User, related_name='devilry_gradingsystem_feedbackdraft_set', on_delete=models.CASCADE)
published = models.BooleanField(
default=False,
help_text=('Has this draft been published as a StaticFeedback? '
'Setting this to true on create automatically creates a StaticFeedback.'))
staticfeedback = models.OneToOneField(
StaticFeedback,
blank=True, null=True,
related_name='devilry_gradingsystem_feedbackdraft_set',
help_text='The StaticFeedback where this was published if this draft has been published.', on_delete=models.CASCADE)
save_timestamp = models.DateTimeField(
blank=False, null=False,
help_text='Time when this feedback was saved. Since FeedbackDraft is immutable, this never changes.')
def __str__(self):
return 'FeedbackDraft#{} for Delivery#{} ({} - {})'.format(
self.id, self.delivery_id, self.saved_by, self.save_timestamp)
@classmethod
def __query_last_feedbackdraft_anyowner(cls, delivery):
return delivery.devilry_gradingsystem_feedbackdraft_set.first()
@classmethod
def get_last_feedbackdraft(cls, assignment, delivery, user):
"""
Get the last feedback draft accessible by the given user for the given ``delivery``.
The ``assignment`` is required for performance reasons since it is usually
available in the context where you use this method, and should not require
an extra query to lookup.
"""
queryset = cls.objects.filter(delivery=delivery)
if not assignment.feedback_workflow_allows_shared_feedback_drafts():
queryset = queryset.filter(saved_by=user)
return queryset.order_by('-save_timestamp').first()
@classmethod
def get_last_feedbackdraft_for_group(cls, assignment, group, user):
"""
Get the last feedback draft accessible by the given user for the given ``group``.
The ``assignment`` is required for performance reasons since it is usually
available in the context where you use this method, and should not require
an extra query to lookup.
"""
queryset = cls.objects.filter(delivery__deadline__assignment_group=group)
if not assignment.feedback_workflow_allows_shared_feedback_drafts():
queryset = queryset.filter(saved_by=user)
return queryset.order_by('-save_timestamp').first()
def clean(self):
if self.id is None: # If creating a new FeedbackDraft
if not self.published:
self.staticfeedback = None # We should NEVER set staticfeedback if published is not True
else:
raise ValidationError('FeedbackDraft is immutable (it can not be changed).')
if self.published and self.staticfeedback is None:
raise ValidationError('Published FeedbackDraft requires a StaticFeedback.')
def save(self, *args, **kwargs):
self.save_timestamp = timezone.now()
super(FeedbackDraft, self).save(*args, **kwargs)
def to_staticfeedback(self, assignment=None):
return StaticFeedback.from_points(
self.points,
assignment=assignment,
delivery=self.delivery,
rendered_view=self.feedbacktext_html,
saved_by=self.saved_by)
class Meta:
ordering = ['-save_timestamp']
def feedback_draft_file_upload_to(instance, filename):
extension = os.path.splitext(filename)[1]
return 'devilry_gradingsystem/feedbackdraftfile/{deliveryid}/{uuid}{extension}'.format(
deliveryid=instance.delivery_id,
uuid=str(uuid.uuid1()),
extension=extension)
class FeedbackDraftFileManager(models.Manager):
def filter_accessible_files(self, assignment, delivery, user):
"""
Get the feedback draft files accessible by the given user for the given ``delivery``.
The ``assignment`` is required for performance reasons since it is usually
available in the context where you use this method, and should not require
an extra query to lookup.
"""
queryset = self.get_queryset().filter(delivery=delivery)
if not assignment.feedback_workflow_allows_shared_feedback_drafts():
queryset = queryset.filter(saved_by=user)
return queryset
class FeedbackDraftFile(models.Model):
"""
A file that is part of the current draft.
Unlike :class:`.FeedbackDraft`, we only keep one copy of the files.
"""
delivery = models.ForeignKey(Delivery, related_name='+', on_delete=models.CASCADE)
saved_by = models.ForeignKey(User, related_name='+', on_delete=models.CASCADE)
#: The original filename.
filename = models.TextField(blank=False, null=False)
#: The uploaded file.
file = models.FileField(
upload_to=feedback_draft_file_upload_to
)
objects = FeedbackDraftFileManager()
def get_download_url(self):
return reverse('devilry_gradingsystem_feedbackdraftfile', kwargs={
'pk': self.pk,
'asciifilename': self.get_ascii_filename()
})
def __str__(self):
return 'FeedbackDraftFile#{} by user#{} on delivery#{}'.format(
self.pk, self.saved_by_id, self.delivery_id)
def to_staticfeedbackfileattachment(self, staticfeedback):
"""
Create a :class:`devilry.apps.core.models.StaticFeedbackFileAttachment`
from this FeedbackDraftFile.
"""
fileattachment = StaticFeedbackFileAttachment(
staticfeedback=staticfeedback, filename=self.filename)
fileattachment.file.save(self.filename, self.file)
return fileattachment
def get_ascii_filename(self):
return self.filename.encode('ascii', 'ignore')
class Meta:
ordering = ['filename'] # Should have the same ordering as StaticFeedbackFileAttachment
| StarcoderdataPython |
5045330 | #!/usr/bin/env python
import logging
import ioservice
import configuration
Ioservices = {}
callbacks = []
def init():
for s in configuration.services:
new_service = ioservice.ioservice(s["name"],
s["displays"], s["settings"])
new_service.subscribe(ioservice_change)
Ioservices[s["name"]] = new_service
def exit():
for s in Ioservices.itervalues():
s.exit()
def subscribe(callback):
callbacks.append(callback)
def ioservice_change(service):
for c in callbacks:
c(service)
| StarcoderdataPython |
12855347 | #ModBUS Communication between Schneider EM6436 Meter and Raspberry Pi
#First beta version.
#The meter is set with the following settings
#Communication : (RS484 to RS232 to USB) - BaudRate = 19200, Parity = N, Stopbits = 1, Device ID=1 (Hardcode in meter)
#Electical Settings: APri:50, Asec: 5, VPri: 415, Vsec:415, SYS: SINGLE
#To use the meter in Single Phase mode, Some address has to be commented.
#This program was tested on RPi3 running Rasbian Jessie Pixel from Noobs V2
#Debian Kernel = Linux raspberrypi 4.4.38-v7+ #938 SMP Thu Dec 15 15:22:21 GMT 2016 armv7l GNU/Linux
#Additional Packages: pymodbus,pyserial. (available in pyPi repo)
#V1.0b Feb2,2017
#Code by <NAME> (AWNA/058/15)
#Copyrights AmritaWNA Smartgrid Tag
import time
import pymodbus
import serial
from pymodbus.pdu import ModbusRequest
from pymodbus.client.sync import ModbusSerialClient as ModbusClient
from pymodbus.transaction import ModbusRtuFramer
#Diagnosis messages not requires.
#from pymodbus.diag_message import *
#from pymodbus.file_message import *
#from pymodbus.other_message import *
#from pymodbus.mei_message import *
#Endian library for decoding HEX to Float
from pymodbus.constants import Endian
from pymodbus.payload import BinaryPayloadDecoder as decode
from pymodbus.payload import BinaryPayloadBuilder as builder
#logging not required.
#import logging
#logging.basicConfig()
#log=logging.getLogger()
#log.setLevel(logging.DEBUG)
#EM6436 is defined as client
client = ModbusClient(method ='rtu',port='/dev/ttyUSB0',timeout=0.05)
client.connect()
while 1:
####################################################
#Read Whole Block (Bugs while decoding with Endian!!!)
# T_RMS=client.read_holding_registers(0xbb8,20,unit=1)
# R_RMS=client.read_holding_registers(0xbd6,20,unit=1) #Total R Phase RMS Block
# Y_RMS=client.read_holding_registers(0xbd6,20,unit=1) #Total Y Phase RMS Block
# B_RMS=client.read_holding_registers(0xbd6,20,unit=1) #Total B Phase RMS Block
#####################################################
#Current Values
A=client.read_holding_registers(3912,2,unit=1)
A_d = decode.fromRegisters(A.registers, endian=Endian.Little)
A_d ={'float':A_d.decode_32bit_float(),}
#####################################################
#Voltage Values
VLN=client.read_holding_registers(3910,2,unit=1)
VLN_d = decode.fromRegisters(VLN.registers, endian=Endian.Little)
VLN_d ={'float':VLN_d.decode_32bit_float(),}
######################################################
#Power Values
#NOTE: EM6436 does not give VAR Values!!!
W=client.read_holding_registers(3902,2,unit=1)
VA=client.read_holding_registers(3900,2,unit=1)
W_d = decode.fromRegisters(W.registers, endian=Endian.Little)
W_d ={'float':W_d.decode_32bit_float(),}
VA_d = decode.fromRegisters(VA.registers, endian=Endian.Little)
VA_d ={'float':VA_d.decode_32bit_float(),}
######################################################
#Power Factor Values
PF=client.read_holding_registers(3906,2,unit=1)
PF_d = decode.fromRegisters(PF.registers, endian=Endian.Little)
PF_d ={'float':PF_d.decode_32bit_float(),}
######################################################
#Frequency Value
F=client.read_holding_registers(3914,2,unit=1)
F_d = decode.fromRegisters(F.registers, endian=Endian.Little)
F_d ={'float':F_d.decode_32bit_float(),}
######################################################
#Energy Value
VAH=client.read_holding_registers(3958,2,unit=1)
WH=client.read_holding_registers(3960,2,unit=1)
VAH_d = decode.fromRegisters(VAH.registers, endian=Endian.Little)
VAH_d ={'float':VAH_d.decode_32bit_float(),}
WH_d = decode.fromRegisters(WH.registers, endian=Endian.Little)
WH_d ={'float':WH_d.decode_32bit_float(),}
######################################################
#Power Interruptions count
intr=client.read_holding_registers(3998,2,unit=1)
intr_d = decode.fromRegisters(intr.registers, endian=Endian.Little)
intr_d ={'16uint':intr_d.decode_16bit_uint(),}
######################################################
print "-" * 100
timestamp = time.strftime('%H:%M:%S %d-%m-%Y')
print timestamp
print "Current Values"
for i, value in A_d.iteritems():
print value
A=value
print "-" * 100
print "Voltage Values"
for i, value in VLN_d.iteritems():
print value
VLN=value
print "-" * 100
print "Power Factor Values"
for i, value in PF_d.iteritems():
print value
PF=value
print "-" * 100
print "Frequency Value"
for i, value in F_d.iteritems():
print value
F=value
print "-" * 100
print "Power Values"
for i, value in W_d.iteritems():
print value
W=value
for i, value in VA_d.iteritems():
print value
VA=value
print "-" * 100
print "Energy Value"
for i, value in VAH_d.iteritems():
print value
VAH=value
for i, value in WH_d.iteritems():
print value
WH=value
print "-" * 100
print "interruption"
for i, value in intr_d.iteritems():
print value
intr=value
print "-" * 100
client.close()
| StarcoderdataPython |
4872849 | import cv2
def compare_ratio(src_img, template, ratio_list):
optimal_maxVal = 0
optimal_ratio = 0
optimal_x = 0
optimal_y = 0
optimal_w = 0
optimal_h = 0
temp_h, temp_w = template.shape[:2]
for ratio in ratio_list:
resize_w = int(temp_w*ratio)
resize_h = int(temp_h*ratio)
resize_temp = cv2.resize(template, dsize=(resize_w, resize_h), interpolation=cv2.INTER_LINEAR)
resize_matching = cv2.matchTemplate(src_img, resize_temp,cv2.TM_CCOEFF_NORMED)
resize_minVal, resize_maxVal, resize_minLoc, resize_maxLoc = cv2.minMaxLoc(resize_matching)
if resize_maxVal >= optimal_maxVal:
optimal_ratio = round(ratio,7)
optimal_maxVal = resize_maxVal
optimal_x, optimal_y = resize_maxLoc
optimal_w = resize_w
optimal_h = resize_h
else:
pass
# print(ratio_list.index(ratio), round(ratio,7), resize_minVal, resize_maxVal, resize_minLoc, resize_maxLoc)
return optimal_maxVal, optimal_ratio, optimal_x, optimal_y, optimal_w, optimal_h
def ratio_template_matching(original_src_img, original_template, result_path = None):
src_img = cv2.cvtColor(original_src_img, cv2.COLOR_RGB2GRAY)
template = cv2.cvtColor(original_template, cv2.COLOR_RGB2GRAY)
dst_src_img = original_src_img
#matching_accuracy
matching_threshold = 0.95
#for resize check threshold
resize_threshold = 0
temp_h, temp_w = template.shape[:2]
src_h, src_w = src_img.shape[:2]
if src_w/temp_w < src_h/temp_h:
max_ratio = src_w/temp_w
else:
max_ratio = src_h/temp_h
ratio_list_1 = []
for i in range(int(max_ratio*10)+1):
ratio_list_1.append(round(0.1*(i+1),2))
optimal_maxVal, optimal_ratio, optimal_x, optimal_y, optimal_w, optimal_h = compare_ratio(src_img, template, ratio_list_1)
prvious_ratio = optimal_ratio
# print("첫번째자리수 비교 결과", optimal_ratio, optimal_maxVal, optimal_x, optimal_y, optimal_w, optimal_h)
if optimal_maxVal > matching_threshold:
pass
else:
ratio_list_2 = []
for i in range(21):
ratio_list_2.append(optimal_ratio-0.1+0.01*i)
optimal_maxVal, optimal_ratio, optimal_x, optimal_y, optimal_w, optimal_h = compare_ratio(src_img, template, ratio_list_2)
# print("소수 두번째자리수 비교 결과", optimal_ratio, optimal_maxVal, optimal_x, optimal_y, optimal_w, optimal_h)
if prvious_ratio == optimal_ratio:
pass
else:
prvious_ratio = optimal_ratio
if optimal_maxVal > matching_threshold:
pass
else:
ratio_list_3 = []
for i in range(21):
ratio_list_3.append(optimal_ratio-0.01+0.001*i)
optimal_maxVal, optimal_ratio, optimal_x, optimal_y, optimal_w, optimal_h = compare_ratio(src_img, template, ratio_list_3)
# print("소수 세번째자리수 비교 결과", optimal_ratio, optimal_maxVal, optimal_x, optimal_y, optimal_w, optimal_h)
if prvious_ratio == optimal_ratio:
pass
else:
prvious_ratio = optimal_ratio
if optimal_maxVal > matching_threshold:
pass
else:
ratio_list_4 = []
for i in range(21):
ratio_list_4.append(optimal_ratio-0.001+0.0001*i)
optimal_maxVal, optimal_ratio, optimal_x, optimal_y, optimal_w, optimal_h = compare_ratio(src_img, template, ratio_list_4)
# print("소수 네번째자리수 비교 결과", optimal_ratio, optimal_maxVal, optimal_x, optimal_y, optimal_w, optimal_h)
if prvious_ratio == optimal_ratio:
pass
else:
prvious_ratio = optimal_ratio
if optimal_maxVal > matching_threshold:
pass
else:
ratio_list_5 = []
for i in range(21):
ratio_list_5.append(optimal_ratio-0.0001+0.00001*i)
optimal_maxVal, optimal_ratio, optimal_x, optimal_y, optimal_w, optimal_h = compare_ratio(src_img, template, ratio_list_5)
prvious_ratio = optimal_ratio
# print("소수 다섯번째자리수 비교 결과", optimal_ratio, optimal_maxVal, optimal_x, optimal_y, optimal_w, optimal_h)
print("Matching Result", optimal_ratio, round(optimal_maxVal,7), optimal_x, optimal_y, optimal_w, optimal_h)
dst_src_img = cv2.rectangle(dst_src_img, (optimal_x, optimal_y),
(optimal_x + optimal_w, optimal_y + optimal_h), (0, 0, 255), 2)
dst_src_img = cv2.putText(dst_src_img,"ratio: {0}, accuracy {1}".format(optimal_ratio, round(optimal_maxVal,7)),(optimal_x, optimal_y), cv2.FONT_HERSHEY_COMPLEX, 2, (0,0,0), 3)
cv2.imshow("template", original_template)
cv2.imshow("result", dst_src_img)
if result_path != None:
cv2.imwrite(result_path, dst_src_img)
else:
pass
cv2.waitKey(0)
cv2.destroyAllWindows()
result_list = [optimal_ratio, round(optimal_maxVal,7), optimal_x, optimal_y, optimal_w, optimal_h]
return result_list
| StarcoderdataPython |
11203858 | <reponame>ClovisChen/LearningCNN
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import numpy as np
import pose.IMUPreInt as imu
import pose.PyLie.transform as tf
import pose.IMUPreInt as IMUPreInt
import pose.struct as line_struct
import data.euroc_reader as euroc
import cv2
def test_fuse_gravity(data_root):
reader = euroc.euroc_data(data_root)
reader.read_img_imu_gt()
for left, right in reader.img_list:
left_color = cv2.imread(left, cv2.IMREAD_COLOR)
left_img = cv2.cvtColor(left_color, cv2.COLOR_BGR2GRAY)
right_color = cv2.imread(right, cv2.IMREAD_COLOR)
right_img = cv2.cvtColor(right_color, cv2.COLOR_BGR2GRAY)
left_un = cv2.remap(left_img, reader.M1l, reader.M2l, cv2.INTER_LINEAR)
right_un = cv2.remap(right_img, reader.M1r, reader.M2r, cv2.INTER_LINEAR)
lines = line_struct.detect_lines(left_un)
line_struct.draw_lines(left_color, lines)
img = np.concatenate((left_color, right_color), axis=1)
cv2.imshow('left right', img)
cv2.waitKey(10)
est_imu_init = IMUPreInt.EstIMUInit()
if __name__ == '__main__':
data_root = '/home/bobin/data/euroc/MH_01_easy/'
test_fuse_gravity(data_root)
| StarcoderdataPython |
1672105 | <reponame>taharh/label-studio
# coding: utf-8
import sys
import os
import warnings
import glob
from importlib import import_module
import ruamel.yaml
from ruamel.yaml.error import UnsafeLoaderWarning, YAMLError # NOQA
from ruamel.yaml.tokens import * # NOQA
from ruamel.yaml.events import * # NOQA
from ruamel.yaml.nodes import * # NOQA
from ruamel.yaml.loader import BaseLoader, SafeLoader, Loader, RoundTripLoader # NOQA
from ruamel.yaml.dumper import BaseDumper, SafeDumper, Dumper, RoundTripDumper # NOQA
from ruamel.yaml.compat import StringIO, BytesIO, with_metaclass, nprint, nprintf # NOQA
from ruamel.yaml.resolver import VersionedResolver, Resolver # NOQA
from ruamel.yaml.representer import (
BaseRepresenter,
SafeRepresenter,
Representer,
RoundTripRepresenter,
)
from ruamel.yaml.constructor import (
BaseConstructor,
SafeConstructor,
Constructor,
RoundTripConstructor,
)
from ruamel.yaml.loader import Loader as UnsafeLoader
from ruamel.yaml.comments import CommentedMap, CommentedSeq, C_PRE
if False: # MYPY
from typing import List, Set, Dict, Union, Any, Callable, Optional, Text # NOQA
from ruamel.yaml.compat import StreamType, StreamTextType, VersionType # NOQA
from pathlib import Path
try:
from _ruamel_yaml import CParser, CEmitter # type: ignore
except: # NOQA
CParser = CEmitter = None
# import io
# YAML is an acronym, i.e. spoken: rhymes with "camel". And thus a
# subset of abbreviations, which should be all caps according to PEP8
class YAML:
def __init__(self, *, typ=None, pure=False, output=None, plug_ins=None): # input=None,
# type: (Any, Optional[Text], Any, Any, Any) -> None
"""
typ: 'rt'/None -> RoundTripLoader/RoundTripDumper, (default)
'safe' -> SafeLoader/SafeDumper,
'unsafe' -> normal/unsafe Loader/Dumper
'base' -> baseloader
pure: if True only use Python modules
input/output: needed to work as context manager
plug_ins: a list of plug-in files
"""
self.typ = ['rt'] if typ is None else (typ if isinstance(typ, list) else [typ])
self.pure = pure
# self._input = input
self._output = output
self._context_manager = None # type: Any
self.plug_ins = [] # type: List[Any]
for pu in ([] if plug_ins is None else plug_ins) + self.official_plug_ins():
file_name = pu.replace(os.sep, '.')
self.plug_ins.append(import_module(file_name))
self.Resolver = ruamel.yaml.resolver.VersionedResolver # type: Any
self.allow_unicode = True
self.Reader = None # type: Any
self.Representer = None # type: Any
self.Constructor = None # type: Any
self.Scanner = None # type: Any
self.Serializer = None # type: Any
self.default_flow_style = None # type: Any
self.comment_handling = None
typ_found = 1
setup_rt = False
if 'rt' in self.typ:
setup_rt = True
elif 'safe' in self.typ:
self.Emitter = (
ruamel.yaml.emitter.Emitter if pure or CEmitter is None else CEmitter
)
self.Representer = ruamel.yaml.representer.SafeRepresenter
self.Parser = ruamel.yaml.parser.Parser if pure or CParser is None else CParser
self.Composer = ruamel.yaml.composer.Composer
self.Constructor = ruamel.yaml.constructor.SafeConstructor
elif 'base' in self.typ:
self.Emitter = ruamel.yaml.emitter.Emitter
self.Representer = ruamel.yaml.representer.BaseRepresenter
self.Parser = ruamel.yaml.parser.Parser if pure or CParser is None else CParser
self.Composer = ruamel.yaml.composer.Composer
self.Constructor = ruamel.yaml.constructor.BaseConstructor
elif 'unsafe' in self.typ:
self.Emitter = (
ruamel.yaml.emitter.Emitter if pure or CEmitter is None else CEmitter
)
self.Representer = ruamel.yaml.representer.Representer
self.Parser = ruamel.yaml.parser.Parser if pure or CParser is None else CParser
self.Composer = ruamel.yaml.composer.Composer
self.Constructor = ruamel.yaml.constructor.Constructor
elif 'rtsc' in self.typ:
self.default_flow_style = False
# no optimized rt-dumper yet
self.Emitter = ruamel.yaml.emitter.Emitter
self.Serializer = ruamel.yaml.serializer.Serializer
self.Representer = ruamel.yaml.representer.RoundTripRepresenter
self.Scanner = ruamel.yaml.scanner.RoundTripScannerSC
# no optimized rt-parser yet
self.Parser = ruamel.yaml.parser.RoundTripParserSC
self.Composer = ruamel.yaml.composer.Composer
self.Constructor = ruamel.yaml.constructor.RoundTripConstructor
self.comment_handling = C_PRE
else:
setup_rt = True
typ_found = 0
if setup_rt:
self.default_flow_style = False
# no optimized rt-dumper yet
self.Emitter = ruamel.yaml.emitter.Emitter
self.Serializer = ruamel.yaml.serializer.Serializer
self.Representer = ruamel.yaml.representer.RoundTripRepresenter
self.Scanner = ruamel.yaml.scanner.RoundTripScanner
# no optimized rt-parser yet
self.Parser = ruamel.yaml.parser.RoundTripParser
self.Composer = ruamel.yaml.composer.Composer
self.Constructor = ruamel.yaml.constructor.RoundTripConstructor
del setup_rt
self.stream = None
self.canonical = None
self.old_indent = None
self.width = None
self.line_break = None
self.map_indent = None
self.sequence_indent = None
self.sequence_dash_offset = 0
self.compact_seq_seq = None
self.compact_seq_map = None
self.sort_base_mapping_type_on_output = None # default: sort
self.top_level_colon_align = None
self.prefix_colon = None
self.version = None
self.preserve_quotes = None
self.allow_duplicate_keys = False # duplicate keys in map, set
self.encoding = 'utf-8'
self.explicit_start = None
self.explicit_end = None
self.tags = None
self.default_style = None
self.top_level_block_style_scalar_no_indent_error_1_1 = False
# directives end indicator with single scalar document
self.scalar_after_indicator = None
# [a, b: 1, c: {d: 2}] vs. [a, {b: 1}, {c: {d: 2}}]
self.brace_single_entry_mapping_in_flow_sequence = False
for module in self.plug_ins:
if getattr(module, 'typ', None) in self.typ:
typ_found += 1
module.init_typ(self)
break
if typ_found == 0:
raise NotImplementedError(
'typ "{}"not recognised (need to install plug-in?)'.format(self.typ)
)
@property
def reader(self):
# type: () -> Any
try:
return self._reader # type: ignore
except AttributeError:
self._reader = self.Reader(None, loader=self)
return self._reader
@property
def scanner(self):
# type: () -> Any
try:
return self._scanner # type: ignore
except AttributeError:
self._scanner = self.Scanner(loader=self)
return self._scanner
@property
def parser(self):
# type: () -> Any
attr = '_' + sys._getframe().f_code.co_name
if not hasattr(self, attr):
if self.Parser is not CParser:
setattr(self, attr, self.Parser(loader=self))
else:
if getattr(self, '_stream', None) is None:
# wait for the stream
return None
else:
# if not hasattr(self._stream, 'read') and hasattr(self._stream, 'open'):
# # pathlib.Path() instance
# setattr(self, attr, CParser(self._stream))
# else:
setattr(self, attr, CParser(self._stream))
# self._parser = self._composer = self
# nprint('scanner', self.loader.scanner)
return getattr(self, attr)
@property
def composer(self):
# type: () -> Any
attr = '_' + sys._getframe().f_code.co_name
if not hasattr(self, attr):
setattr(self, attr, self.Composer(loader=self))
return getattr(self, attr)
@property
def constructor(self):
# type: () -> Any
attr = '_' + sys._getframe().f_code.co_name
if not hasattr(self, attr):
cnst = self.Constructor(preserve_quotes=self.preserve_quotes, loader=self)
cnst.allow_duplicate_keys = self.allow_duplicate_keys
setattr(self, attr, cnst)
return getattr(self, attr)
@property
def resolver(self):
# type: () -> Any
attr = '_' + sys._getframe().f_code.co_name
if not hasattr(self, attr):
setattr(self, attr, self.Resolver(version=self.version, loader=self))
return getattr(self, attr)
@property
def emitter(self):
# type: () -> Any
attr = '_' + sys._getframe().f_code.co_name
if not hasattr(self, attr):
if self.Emitter is not CEmitter:
_emitter = self.Emitter(
None,
canonical=self.canonical,
indent=self.old_indent,
width=self.width,
allow_unicode=self.allow_unicode,
line_break=self.line_break,
prefix_colon=self.prefix_colon,
brace_single_entry_mapping_in_flow_sequence=self.brace_single_entry_mapping_in_flow_sequence, # NOQA
dumper=self,
)
setattr(self, attr, _emitter)
if self.map_indent is not None:
_emitter.best_map_indent = self.map_indent
if self.sequence_indent is not None:
_emitter.best_sequence_indent = self.sequence_indent
if self.sequence_dash_offset is not None:
_emitter.sequence_dash_offset = self.sequence_dash_offset
# _emitter.block_seq_indent = self.sequence_dash_offset
if self.compact_seq_seq is not None:
_emitter.compact_seq_seq = self.compact_seq_seq
if self.compact_seq_map is not None:
_emitter.compact_seq_map = self.compact_seq_map
else:
if getattr(self, '_stream', None) is None:
# wait for the stream
return None
return None
return getattr(self, attr)
@property
def serializer(self):
# type: () -> Any
attr = '_' + sys._getframe().f_code.co_name
if not hasattr(self, attr):
setattr(
self,
attr,
self.Serializer(
encoding=self.encoding,
explicit_start=self.explicit_start,
explicit_end=self.explicit_end,
version=self.version,
tags=self.tags,
dumper=self,
),
)
return getattr(self, attr)
@property
def representer(self):
# type: () -> Any
attr = '_' + sys._getframe().f_code.co_name
if not hasattr(self, attr):
repres = self.Representer(
default_style=self.default_style,
default_flow_style=self.default_flow_style,
dumper=self,
)
if self.sort_base_mapping_type_on_output is not None:
repres.sort_base_mapping_type_on_output = self.sort_base_mapping_type_on_output
setattr(self, attr, repres)
return getattr(self, attr)
def scan(self, stream):
# type: (StreamTextType) -> Any
"""
Scan a YAML stream and produce scanning tokens.
"""
if not hasattr(stream, 'read') and hasattr(stream, 'open'):
# pathlib.Path() instance
with stream.open('rb') as fp:
return self.scan(fp)
_, parser = self.get_constructor_parser(stream)
try:
while self.scanner.check_token():
yield self.scanner.get_token()
finally:
parser.dispose()
try:
self._reader.reset_reader()
except AttributeError:
pass
try:
self._scanner.reset_scanner()
except AttributeError:
pass
def parse(self, stream):
# type: (StreamTextType) -> Any
"""
Parse a YAML stream and produce parsing events.
"""
if not hasattr(stream, 'read') and hasattr(stream, 'open'):
# pathlib.Path() instance
with stream.open('rb') as fp:
return self.parse(fp)
_, parser = self.get_constructor_parser(stream)
try:
while parser.check_event():
yield parser.get_event()
finally:
parser.dispose()
try:
self._reader.reset_reader()
except AttributeError:
pass
try:
self._scanner.reset_scanner()
except AttributeError:
pass
def compose(self, stream):
# type: (Union[Path, StreamTextType]) -> Any
"""
Parse the first YAML document in a stream
and produce the corresponding representation tree.
"""
if not hasattr(stream, 'read') and hasattr(stream, 'open'):
# pathlib.Path() instance
with stream.open('rb') as fp:
return self.load(fp)
constructor, parser = self.get_constructor_parser(stream)
try:
return constructor.composer.get_single_node()
finally:
parser.dispose()
try:
self._reader.reset_reader()
except AttributeError:
pass
try:
self._scanner.reset_scanner()
except AttributeError:
pass
def compose_all(self, stream):
# type: (Union[Path, StreamTextType]) -> Any
"""
Parse all YAML documents in a stream
and produce corresponding representation trees.
"""
constructor, parser = self.get_constructor_parser(stream)
try:
while constructor.composer.check_node():
yield constructor.composer.get_node()
finally:
parser.dispose()
try:
self._reader.reset_reader()
except AttributeError:
pass
try:
self._scanner.reset_scanner()
except AttributeError:
pass
# separate output resolver?
# def load(self, stream=None):
# if self._context_manager:
# if not self._input:
# raise TypeError("Missing input stream while dumping from context manager")
# for data in self._context_manager.load():
# yield data
# return
# if stream is None:
# raise TypeError("Need a stream argument when not loading from context manager")
# return self.load_one(stream)
def load(self, stream):
# type: (Union[Path, StreamTextType]) -> Any
"""
at this point you either have the non-pure Parser (which has its own reader and
scanner) or you have the pure Parser.
If the pure Parser is set, then set the Reader and Scanner, if not already set.
If either the Scanner or Reader are set, you cannot use the non-pure Parser,
so reset it to the pure parser and set the Reader resp. Scanner if necessary
"""
if not hasattr(stream, 'read') and hasattr(stream, 'open'):
# pathlib.Path() instance
with stream.open('rb') as fp:
return self.load(fp)
constructor, parser = self.get_constructor_parser(stream)
try:
return constructor.get_single_data()
finally:
parser.dispose()
try:
self._reader.reset_reader()
except AttributeError:
pass
try:
self._scanner.reset_scanner()
except AttributeError:
pass
def load_all(self, stream): # *, skip=None):
# type: (Union[Path, StreamTextType]) -> Any
if not hasattr(stream, 'read') and hasattr(stream, 'open'):
# pathlib.Path() instance
with stream.open('r') as fp:
for d in self.load_all(fp):
yield d
return
# if skip is None:
# skip = []
# elif isinstance(skip, int):
# skip = [skip]
constructor, parser = self.get_constructor_parser(stream)
try:
while constructor.check_data():
yield constructor.get_data()
finally:
parser.dispose()
try:
self._reader.reset_reader()
except AttributeError:
pass
try:
self._scanner.reset_scanner()
except AttributeError:
pass
def get_constructor_parser(self, stream):
# type: (StreamTextType) -> Any
"""
the old cyaml needs special setup, and therefore the stream
"""
if self.Parser is not CParser:
if self.Reader is None:
self.Reader = ruamel.yaml.reader.Reader
if self.Scanner is None:
self.Scanner = ruamel.yaml.scanner.Scanner
self.reader.stream = stream
else:
if self.Reader is not None:
if self.Scanner is None:
self.Scanner = ruamel.yaml.scanner.Scanner
self.Parser = ruamel.yaml.parser.Parser
self.reader.stream = stream
elif self.Scanner is not None:
if self.Reader is None:
self.Reader = ruamel.yaml.reader.Reader
self.Parser = ruamel.yaml.parser.Parser
self.reader.stream = stream
else:
# combined C level reader>scanner>parser
# does some calls to the resolver, e.g. BaseResolver.descend_resolver
# if you just initialise the CParser, to much of resolver.py
# is actually used
rslvr = self.Resolver
# if rslvr is ruamel.yaml.resolver.VersionedResolver:
# rslvr = ruamel.yaml.resolver.Resolver
class XLoader(self.Parser, self.Constructor, rslvr): # type: ignore
def __init__(selfx, stream, version=self.version, preserve_quotes=None):
# type: (StreamTextType, Optional[VersionType], Optional[bool]) -> None # NOQA
CParser.__init__(selfx, stream)
selfx._parser = selfx._composer = selfx
self.Constructor.__init__(selfx, loader=selfx)
selfx.allow_duplicate_keys = self.allow_duplicate_keys
rslvr.__init__(selfx, version=version, loadumper=selfx)
self._stream = stream
loader = XLoader(stream)
return loader, loader
return self.constructor, self.parser
def emit(self, events, stream):
# type: (Any, Any) -> None
"""
Emit YAML parsing events into a stream.
If stream is None, return the produced string instead.
"""
_, _, emitter = self.get_serializer_representer_emitter(stream, None)
try:
for event in events:
emitter.emit(event)
finally:
try:
emitter.dispose()
except AttributeError:
raise
def serialize(self, node, stream):
# type: (Any, Optional[StreamType]) -> Any
"""
Serialize a representation tree into a YAML stream.
If stream is None, return the produced string instead.
"""
self.serialize_all([node], stream)
def serialize_all(self, nodes, stream):
# type: (Any, Optional[StreamType]) -> Any
"""
Serialize a sequence of representation trees into a YAML stream.
If stream is None, return the produced string instead.
"""
serializer, _, emitter = self.get_serializer_representer_emitter(stream, None)
try:
serializer.open()
for node in nodes:
serializer.serialize(node)
serializer.close()
finally:
try:
emitter.dispose()
except AttributeError:
raise
def dump(self, data, stream=None, *, transform=None):
# type: (Any, Union[Path, StreamType], Any, Any) -> Any
if self._context_manager:
if not self._output:
raise TypeError('Missing output stream while dumping from context manager')
if transform is not None:
raise TypeError(
'{}.dump() in the context manager cannot have transform keyword '
''.format(self.__class__.__name__)
)
self._context_manager.dump(data)
else: # old style
if stream is None:
raise TypeError('Need a stream argument when not dumping from context manager')
return self.dump_all([data], stream, transform=transform)
def dump_all(self, documents, stream, *, transform=None):
# type: (Any, Union[Path, StreamType], Any) -> Any
if self._context_manager:
raise NotImplementedError
self._output = stream
self._context_manager = YAMLContextManager(self, transform=transform)
for data in documents:
self._context_manager.dump(data)
self._context_manager.teardown_output()
self._output = None
self._context_manager = None
def Xdump_all(self, documents, stream, *, transform=None):
# type: (Any, Any, Any) -> Any
"""
Serialize a sequence of Python objects into a YAML stream.
"""
if not hasattr(stream, 'write') and hasattr(stream, 'open'):
# pathlib.Path() instance
with stream.open('w') as fp:
return self.dump_all(documents, fp, transform=transform)
# The stream should have the methods `write` and possibly `flush`.
if self.top_level_colon_align is True:
tlca = max([len(str(x)) for x in documents[0]]) # type: Any
else:
tlca = self.top_level_colon_align
if transform is not None:
fstream = stream
if self.encoding is None:
stream = StringIO()
else:
stream = BytesIO()
serializer, representer, emitter = self.get_serializer_representer_emitter(
stream, tlca
)
try:
self.serializer.open()
for data in documents:
try:
self.representer.represent(data)
except AttributeError:
# nprint(dir(dumper._representer))
raise
self.serializer.close()
finally:
try:
self.emitter.dispose()
except AttributeError:
raise
# self.dumper.dispose() # cyaml
delattr(self, '_serializer')
delattr(self, '_emitter')
if transform:
val = stream.getvalue()
if self.encoding:
val = val.decode(self.encoding)
if fstream is None:
transform(val)
else:
fstream.write(transform(val))
return None
def get_serializer_representer_emitter(self, stream, tlca):
# type: (StreamType, Any) -> Any
# we have only .Serializer to deal with (vs .Reader & .Scanner), much simpler
if self.Emitter is not CEmitter:
if self.Serializer is None:
self.Serializer = ruamel.yaml.serializer.Serializer
self.emitter.stream = stream
self.emitter.top_level_colon_align = tlca
if self.scalar_after_indicator is not None:
self.emitter.scalar_after_indicator = self.scalar_after_indicator
return self.serializer, self.representer, self.emitter
if self.Serializer is not None:
# cannot set serializer with CEmitter
self.Emitter = ruamel.yaml.emitter.Emitter
self.emitter.stream = stream
self.emitter.top_level_colon_align = tlca
if self.scalar_after_indicator is not None:
self.emitter.scalar_after_indicator = self.scalar_after_indicator
return self.serializer, self.representer, self.emitter
# C routines
rslvr = (
ruamel.yaml.resolver.BaseResolver
if 'base' in self.typ
else ruamel.yaml.resolver.Resolver
)
class XDumper(CEmitter, self.Representer, rslvr): # type: ignore
def __init__(
selfx,
stream,
default_style=None,
default_flow_style=None,
canonical=None,
indent=None,
width=None,
allow_unicode=None,
line_break=None,
encoding=None,
explicit_start=None,
explicit_end=None,
version=None,
tags=None,
block_seq_indent=None,
top_level_colon_align=None,
prefix_colon=None,
):
# type: (StreamType, Any, Any, Any, Optional[bool], Optional[int], Optional[int], Optional[bool], Any, Any, Optional[bool], Optional[bool], Any, Any, Any, Any, Any) -> None # NOQA
CEmitter.__init__(
selfx,
stream,
canonical=canonical,
indent=indent,
width=width,
encoding=encoding,
allow_unicode=allow_unicode,
line_break=line_break,
explicit_start=explicit_start,
explicit_end=explicit_end,
version=version,
tags=tags,
)
selfx._emitter = selfx._serializer = selfx._representer = selfx
self.Representer.__init__(
selfx, default_style=default_style, default_flow_style=default_flow_style
)
rslvr.__init__(selfx)
self._stream = stream
dumper = XDumper(
stream,
default_style=self.default_style,
default_flow_style=self.default_flow_style,
canonical=self.canonical,
indent=self.old_indent,
width=self.width,
allow_unicode=self.allow_unicode,
line_break=self.line_break,
explicit_start=self.explicit_start,
explicit_end=self.explicit_end,
version=self.version,
tags=self.tags,
)
self._emitter = self._serializer = dumper
return dumper, dumper, dumper
# basic types
def map(self, **kw):
# type: (Any) -> Any
if 'rt' in self.typ:
return CommentedMap(**kw)
else:
return dict(**kw)
def seq(self, *args):
# type: (Any) -> Any
if 'rt' in self.typ:
return CommentedSeq(*args)
else:
return list(*args)
# helpers
def official_plug_ins(self):
# type: () -> Any
"""search for list of subdirs that are plug-ins, if __file__ is not available, e.g.
single file installers that are not properly emulating a file-system (issue 324)
no plug-ins will be found. If any are packaged, you know which file that are
and you can explicitly provide it during instantiation:
yaml = ruamel.yaml.YAML(plug_ins=['ruamel/yaml/jinja2/__plug_in__'])
"""
try:
bd = os.path.dirname(__file__)
except NameError:
return []
gpbd = os.path.dirname(os.path.dirname(bd))
res = [x.replace(gpbd, "")[1:-3] for x in glob.glob(bd + '/*/__plug_in__.py')]
return res
def register_class(self, cls):
# type:(Any) -> Any
"""
register a class for dumping loading
- if it has attribute yaml_tag use that to register, else use class name
- if it has methods to_yaml/from_yaml use those to dump/load else dump attributes
as mapping
"""
tag = getattr(cls, 'yaml_tag', '!' + cls.__name__)
try:
self.representer.add_representer(cls, cls.to_yaml)
except AttributeError:
def t_y(representer, data):
# type: (Any, Any) -> Any
return representer.represent_yaml_object(
tag, data, cls, flow_style=representer.default_flow_style
)
self.representer.add_representer(cls, t_y)
try:
self.constructor.add_constructor(tag, cls.from_yaml)
except AttributeError:
def f_y(constructor, node):
# type: (Any, Any) -> Any
return constructor.construct_yaml_object(node, cls)
self.constructor.add_constructor(tag, f_y)
return cls
# ### context manager
def __enter__(self):
# type: () -> Any
self._context_manager = YAMLContextManager(self)
return self
def __exit__(self, typ, value, traceback):
# type: (Any, Any, Any) -> None
if typ:
nprint('typ', typ)
self._context_manager.teardown_output()
# self._context_manager.teardown_input()
self._context_manager = None
# ### backwards compatibility
def _indent(self, mapping=None, sequence=None, offset=None):
# type: (Any, Any, Any) -> None
if mapping is not None:
self.map_indent = mapping
if sequence is not None:
self.sequence_indent = sequence
if offset is not None:
self.sequence_dash_offset = offset
@property
def indent(self):
# type: () -> Any
return self._indent
@indent.setter
def indent(self, val):
# type: (Any) -> None
self.old_indent = val
@property
def block_seq_indent(self):
# type: () -> Any
return self.sequence_dash_offset
@block_seq_indent.setter
def block_seq_indent(self, val):
# type: (Any) -> None
self.sequence_dash_offset = val
def compact(self, seq_seq=None, seq_map=None):
# type: (Any, Any) -> None
self.compact_seq_seq = seq_seq
self.compact_seq_map = seq_map
class YAMLContextManager:
def __init__(self, yaml, transform=None):
# type: (Any, Any) -> None # used to be: (Any, Optional[Callable]) -> None
self._yaml = yaml
self._output_inited = False
self._output_path = None
self._output = self._yaml._output
self._transform = transform
# self._input_inited = False
# self._input = input
# self._input_path = None
# self._transform = yaml.transform
# self._fstream = None
if not hasattr(self._output, 'write') and hasattr(self._output, 'open'):
# pathlib.Path() instance, open with the same mode
self._output_path = self._output
self._output = self._output_path.open('w')
# if not hasattr(self._stream, 'write') and hasattr(stream, 'open'):
# if not hasattr(self._input, 'read') and hasattr(self._input, 'open'):
# # pathlib.Path() instance, open with the same mode
# self._input_path = self._input
# self._input = self._input_path.open('r')
if self._transform is not None:
self._fstream = self._output
if self._yaml.encoding is None:
self._output = StringIO()
else:
self._output = BytesIO()
def teardown_output(self):
# type: () -> None
if self._output_inited:
self._yaml.serializer.close()
else:
return
try:
self._yaml.emitter.dispose()
except AttributeError:
raise
# self.dumper.dispose() # cyaml
try:
delattr(self._yaml, '_serializer')
delattr(self._yaml, '_emitter')
except AttributeError:
raise
if self._transform:
val = self._output.getvalue()
if self._yaml.encoding:
val = val.decode(self._yaml.encoding)
if self._fstream is None:
self._transform(val)
else:
self._fstream.write(self._transform(val))
self._fstream.flush()
self._output = self._fstream # maybe not necessary
if self._output_path is not None:
self._output.close()
def init_output(self, first_data):
# type: (Any) -> None
if self._yaml.top_level_colon_align is True:
tlca = max([len(str(x)) for x in first_data]) # type: Any
else:
tlca = self._yaml.top_level_colon_align
self._yaml.get_serializer_representer_emitter(self._output, tlca)
self._yaml.serializer.open()
self._output_inited = True
def dump(self, data):
# type: (Any) -> None
if not self._output_inited:
self.init_output(data)
try:
self._yaml.representer.represent(data)
except AttributeError:
# nprint(dir(dumper._representer))
raise
# def teardown_input(self):
# pass
#
# def init_input(self):
# # set the constructor and parser on YAML() instance
# self._yaml.get_constructor_parser(stream)
#
# def load(self):
# if not self._input_inited:
# self.init_input()
# try:
# while self._yaml.constructor.check_data():
# yield self._yaml.constructor.get_data()
# finally:
# parser.dispose()
# try:
# self._reader.reset_reader() # type: ignore
# except AttributeError:
# pass
# try:
# self._scanner.reset_scanner() # type: ignore
# except AttributeError:
# pass
def yaml_object(yml):
# type: (Any) -> Any
""" decorator for classes that needs to dump/load objects
The tag for such objects is taken from the class attribute yaml_tag (or the
class name in lowercase in case unavailable)
If methods to_yaml and/or from_yaml are available, these are called for dumping resp.
loading, default routines (dumping a mapping of the attributes) used otherwise.
"""
def yo_deco(cls):
# type: (Any) -> Any
tag = getattr(cls, 'yaml_tag', '!' + cls.__name__)
try:
yml.representer.add_representer(cls, cls.to_yaml)
except AttributeError:
def t_y(representer, data):
# type: (Any, Any) -> Any
return representer.represent_yaml_object(
tag, data, cls, flow_style=representer.default_flow_style
)
yml.representer.add_representer(cls, t_y)
try:
yml.constructor.add_constructor(tag, cls.from_yaml)
except AttributeError:
def f_y(constructor, node):
# type: (Any, Any) -> Any
return constructor.construct_yaml_object(node, cls)
yml.constructor.add_constructor(tag, f_y)
return cls
return yo_deco
########################################################################################
def warn_deprecation(fun, method, arg=''):
# type: (Any, Any, str) -> None
from ruamel.yaml.compat import _F
warnings.warn(
_F(
'\n{fun} will be removed, use\n\n yaml=YAML({arg})\n yaml.{method}(...)\n\ninstead', # NOQA
fun=fun,
method=method,
arg=arg,
),
PendingDeprecationWarning, # this will show when testing with pytest/tox
stacklevel=3,
)
########################################################################################
def scan(stream, Loader=Loader):
# type: (StreamTextType, Any) -> Any
"""
Scan a YAML stream and produce scanning tokens.
"""
warn_deprecation('scan', 'scan', arg="typ='unsafe', pure=True")
loader = Loader(stream)
try:
while loader.scanner.check_token():
yield loader.scanner.get_token()
finally:
loader._parser.dispose()
def parse(stream, Loader=Loader):
# type: (StreamTextType, Any) -> Any
"""
Parse a YAML stream and produce parsing events.
"""
warn_deprecation('parse', 'parse', arg="typ='unsafe', pure=True")
loader = Loader(stream)
try:
while loader._parser.check_event():
yield loader._parser.get_event()
finally:
loader._parser.dispose()
def compose(stream, Loader=Loader):
# type: (StreamTextType, Any) -> Any
"""
Parse the first YAML document in a stream
and produce the corresponding representation tree.
"""
warn_deprecation('compose', 'compose', arg="typ='unsafe', pure=True")
loader = Loader(stream)
try:
return loader.get_single_node()
finally:
loader.dispose()
def compose_all(stream, Loader=Loader):
# type: (StreamTextType, Any) -> Any
"""
Parse all YAML documents in a stream
and produce corresponding representation trees.
"""
warn_deprecation('compose', 'compose', arg="typ='unsafe', pure=True")
loader = Loader(stream)
try:
while loader.check_node():
yield loader._composer.get_node()
finally:
loader._parser.dispose()
def load(stream, Loader=None, version=None, preserve_quotes=None):
# type: (Any, Any, Any, Any) -> Any
"""
Parse the first YAML document in a stream
and produce the corresponding Python object.
"""
warn_deprecation('load', 'load', arg="typ='unsafe', pure=True")
if Loader is None:
warnings.warn(UnsafeLoaderWarning.text, UnsafeLoaderWarning, stacklevel=2)
Loader = UnsafeLoader
loader = Loader(stream, version, preserve_quotes=preserve_quotes) # type: Any
try:
return loader._constructor.get_single_data()
finally:
loader._parser.dispose()
try:
loader._reader.reset_reader()
except AttributeError:
pass
try:
loader._scanner.reset_scanner()
except AttributeError:
pass
def load_all(stream, Loader=None, version=None, preserve_quotes=None):
# type: (Any, Any, Any, Any) -> Any # NOQA
"""
Parse all YAML documents in a stream
and produce corresponding Python objects.
"""
warn_deprecation('load_all', 'load_all', arg="typ='unsafe', pure=True")
if Loader is None:
warnings.warn(UnsafeLoaderWarning.text, UnsafeLoaderWarning, stacklevel=2)
Loader = UnsafeLoader
loader = Loader(stream, version, preserve_quotes=preserve_quotes) # type: Any
try:
while loader._constructor.check_data():
yield loader._constructor.get_data()
finally:
loader._parser.dispose()
try:
loader._reader.reset_reader()
except AttributeError:
pass
try:
loader._scanner.reset_scanner()
except AttributeError:
pass
def safe_load(stream, version=None):
# type: (StreamTextType, Optional[VersionType]) -> Any
"""
Parse the first YAML document in a stream
and produce the corresponding Python object.
Resolve only basic YAML tags.
"""
warn_deprecation('safe_load', 'load', arg="typ='safe', pure=True")
return load(stream, SafeLoader, version)
def safe_load_all(stream, version=None):
# type: (StreamTextType, Optional[VersionType]) -> Any
"""
Parse all YAML documents in a stream
and produce corresponding Python objects.
Resolve only basic YAML tags.
"""
warn_deprecation('safe_load_all', 'load_all', arg="typ='safe', pure=True")
return load_all(stream, SafeLoader, version)
def round_trip_load(stream, version=None, preserve_quotes=None):
# type: (StreamTextType, Optional[VersionType], Optional[bool]) -> Any
"""
Parse the first YAML document in a stream
and produce the corresponding Python object.
Resolve only basic YAML tags.
"""
warn_deprecation('round_trip_load_all', 'load')
return load(stream, RoundTripLoader, version, preserve_quotes=preserve_quotes)
def round_trip_load_all(stream, version=None, preserve_quotes=None):
# type: (StreamTextType, Optional[VersionType], Optional[bool]) -> Any
"""
Parse all YAML documents in a stream
and produce corresponding Python objects.
Resolve only basic YAML tags.
"""
warn_deprecation('round_trip_load_all', 'load_all')
return load_all(stream, RoundTripLoader, version, preserve_quotes=preserve_quotes)
def emit(
events,
stream=None,
Dumper=Dumper,
canonical=None,
indent=None,
width=None,
allow_unicode=None,
line_break=None,
):
# type: (Any, Optional[StreamType], Any, Optional[bool], Union[int, None], Optional[int], Optional[bool], Any) -> Any # NOQA
"""
Emit YAML parsing events into a stream.
If stream is None, return the produced string instead.
"""
warn_deprecation('emit', 'emit', arg="typ='safe', pure=True")
getvalue = None
if stream is None:
stream = StringIO()
getvalue = stream.getvalue
dumper = Dumper(
stream,
canonical=canonical,
indent=indent,
width=width,
allow_unicode=allow_unicode,
line_break=line_break,
)
try:
for event in events:
dumper.emit(event)
finally:
try:
dumper._emitter.dispose()
except AttributeError:
raise
dumper.dispose() # cyaml
if getvalue is not None:
return getvalue()
enc = None
def serialize_all(
nodes,
stream=None,
Dumper=Dumper,
canonical=None,
indent=None,
width=None,
allow_unicode=None,
line_break=None,
encoding=enc,
explicit_start=None,
explicit_end=None,
version=None,
tags=None,
):
# type: (Any, Optional[StreamType], Any, Any, Optional[int], Optional[int], Optional[bool], Any, Any, Optional[bool], Optional[bool], Optional[VersionType], Any) -> Any # NOQA
"""
Serialize a sequence of representation trees into a YAML stream.
If stream is None, return the produced string instead.
"""
warn_deprecation('serialize_all', 'serialize_all', arg="typ='safe', pure=True")
getvalue = None
if stream is None:
if encoding is None:
stream = StringIO()
else:
stream = BytesIO()
getvalue = stream.getvalue
dumper = Dumper(
stream,
canonical=canonical,
indent=indent,
width=width,
allow_unicode=allow_unicode,
line_break=line_break,
encoding=encoding,
version=version,
tags=tags,
explicit_start=explicit_start,
explicit_end=explicit_end,
)
try:
dumper._serializer.open()
for node in nodes:
dumper.serialize(node)
dumper._serializer.close()
finally:
try:
dumper._emitter.dispose()
except AttributeError:
raise
dumper.dispose() # cyaml
if getvalue is not None:
return getvalue()
def serialize(node, stream=None, Dumper=Dumper, **kwds):
# type: (Any, Optional[StreamType], Any, Any) -> Any
"""
Serialize a representation tree into a YAML stream.
If stream is None, return the produced string instead.
"""
warn_deprecation('serialize', 'serialize', arg="typ='safe', pure=True")
return serialize_all([node], stream, Dumper=Dumper, **kwds)
def dump_all(
documents,
stream=None,
Dumper=Dumper,
default_style=None,
default_flow_style=None,
canonical=None,
indent=None,
width=None,
allow_unicode=None,
line_break=None,
encoding=enc,
explicit_start=None,
explicit_end=None,
version=None,
tags=None,
block_seq_indent=None,
top_level_colon_align=None,
prefix_colon=None,
):
# type: (Any, Optional[StreamType], Any, Any, Any, Optional[bool], Optional[int], Optional[int], Optional[bool], Any, Any, Optional[bool], Optional[bool], Any, Any, Any, Any, Any) -> Any # NOQA
"""
Serialize a sequence of Python objects into a YAML stream.
If stream is None, return the produced string instead.
"""
warn_deprecation('dump_all', 'dump_all', arg="typ='unsafe', pure=True")
getvalue = None
if top_level_colon_align is True:
top_level_colon_align = max([len(str(x)) for x in documents[0]])
if stream is None:
if encoding is None:
stream = StringIO()
else:
stream = BytesIO()
getvalue = stream.getvalue
dumper = Dumper(
stream,
default_style=default_style,
default_flow_style=default_flow_style,
canonical=canonical,
indent=indent,
width=width,
allow_unicode=allow_unicode,
line_break=line_break,
encoding=encoding,
explicit_start=explicit_start,
explicit_end=explicit_end,
version=version,
tags=tags,
block_seq_indent=block_seq_indent,
top_level_colon_align=top_level_colon_align,
prefix_colon=prefix_colon,
)
try:
dumper._serializer.open()
for data in documents:
try:
dumper._representer.represent(data)
except AttributeError:
# nprint(dir(dumper._representer))
raise
dumper._serializer.close()
finally:
try:
dumper._emitter.dispose()
except AttributeError:
raise
dumper.dispose() # cyaml
if getvalue is not None:
return getvalue()
return None
def dump(
data,
stream=None,
Dumper=Dumper,
default_style=None,
default_flow_style=None,
canonical=None,
indent=None,
width=None,
allow_unicode=None,
line_break=None,
encoding=enc,
explicit_start=None,
explicit_end=None,
version=None,
tags=None,
block_seq_indent=None,
):
# type: (Any, Optional[StreamType], Any, Any, Any, Optional[bool], Optional[int], Optional[int], Optional[bool], Any, Any, Optional[bool], Optional[bool], Optional[VersionType], Any, Any) -> Optional[Any] # NOQA
"""
Serialize a Python object into a YAML stream.
If stream is None, return the produced string instead.
default_style ∈ None, '', '"', "'", '|', '>'
"""
warn_deprecation('dump', 'dump', arg="typ='unsafe', pure=True")
return dump_all(
[data],
stream,
Dumper=Dumper,
default_style=default_style,
default_flow_style=default_flow_style,
canonical=canonical,
indent=indent,
width=width,
allow_unicode=allow_unicode,
line_break=line_break,
encoding=encoding,
explicit_start=explicit_start,
explicit_end=explicit_end,
version=version,
tags=tags,
block_seq_indent=block_seq_indent,
)
def safe_dump_all(documents, stream=None, **kwds):
# type: (Any, Optional[StreamType], Any) -> Optional[Any]
"""
Serialize a sequence of Python objects into a YAML stream.
Produce only basic YAML tags.
If stream is None, return the produced string instead.
"""
warn_deprecation('safe_dump_all', 'dump_all', arg="typ='safe', pure=True")
return dump_all(documents, stream, Dumper=SafeDumper, **kwds)
def safe_dump(data, stream=None, **kwds):
# type: (Any, Optional[StreamType], Any) -> Optional[Any]
"""
Serialize a Python object into a YAML stream.
Produce only basic YAML tags.
If stream is None, return the produced string instead.
"""
warn_deprecation('safe_dump', 'dump', arg="typ='safe', pure=True")
return dump_all([data], stream, Dumper=SafeDumper, **kwds)
def round_trip_dump(
data,
stream=None,
Dumper=RoundTripDumper,
default_style=None,
default_flow_style=None,
canonical=None,
indent=None,
width=None,
allow_unicode=None,
line_break=None,
encoding=enc,
explicit_start=None,
explicit_end=None,
version=None,
tags=None,
block_seq_indent=None,
top_level_colon_align=None,
prefix_colon=None,
):
# type: (Any, Optional[StreamType], Any, Any, Any, Optional[bool], Optional[int], Optional[int], Optional[bool], Any, Any, Optional[bool], Optional[bool], Optional[VersionType], Any, Any, Any, Any) -> Optional[Any] # NOQA
allow_unicode = True if allow_unicode is None else allow_unicode
warn_deprecation('round_trip_dump', 'dump')
return dump_all(
[data],
stream,
Dumper=Dumper,
default_style=default_style,
default_flow_style=default_flow_style,
canonical=canonical,
indent=indent,
width=width,
allow_unicode=allow_unicode,
line_break=line_break,
encoding=encoding,
explicit_start=explicit_start,
explicit_end=explicit_end,
version=version,
tags=tags,
block_seq_indent=block_seq_indent,
top_level_colon_align=top_level_colon_align,
prefix_colon=prefix_colon,
)
# Loader/Dumper are no longer composites, to get to the associated
# Resolver()/Representer(), etc., you need to instantiate the class
def add_implicit_resolver(
tag, regexp, first=None, Loader=None, Dumper=None, resolver=Resolver
):
# type: (Any, Any, Any, Any, Any, Any) -> None
"""
Add an implicit scalar detector.
If an implicit scalar value matches the given regexp,
the corresponding tag is assigned to the scalar.
first is a sequence of possible initial characters or None.
"""
if Loader is None and Dumper is None:
resolver.add_implicit_resolver(tag, regexp, first)
return
if Loader:
if hasattr(Loader, 'add_implicit_resolver'):
Loader.add_implicit_resolver(tag, regexp, first)
elif issubclass(
Loader, (BaseLoader, SafeLoader, ruamel.yaml.loader.Loader, RoundTripLoader)
):
Resolver.add_implicit_resolver(tag, regexp, first)
else:
raise NotImplementedError
if Dumper:
if hasattr(Dumper, 'add_implicit_resolver'):
Dumper.add_implicit_resolver(tag, regexp, first)
elif issubclass(
Dumper, (BaseDumper, SafeDumper, ruamel.yaml.dumper.Dumper, RoundTripDumper)
):
Resolver.add_implicit_resolver(tag, regexp, first)
else:
raise NotImplementedError
# this code currently not tested
def add_path_resolver(tag, path, kind=None, Loader=None, Dumper=None, resolver=Resolver):
# type: (Any, Any, Any, Any, Any, Any) -> None
"""
Add a path based resolver for the given tag.
A path is a list of keys that forms a path
to a node in the representation tree.
Keys can be string values, integers, or None.
"""
if Loader is None and Dumper is None:
resolver.add_path_resolver(tag, path, kind)
return
if Loader:
if hasattr(Loader, 'add_path_resolver'):
Loader.add_path_resolver(tag, path, kind)
elif issubclass(
Loader, (BaseLoader, SafeLoader, ruamel.yaml.loader.Loader, RoundTripLoader)
):
Resolver.add_path_resolver(tag, path, kind)
else:
raise NotImplementedError
if Dumper:
if hasattr(Dumper, 'add_path_resolver'):
Dumper.add_path_resolver(tag, path, kind)
elif issubclass(
Dumper, (BaseDumper, SafeDumper, ruamel.yaml.dumper.Dumper, RoundTripDumper)
):
Resolver.add_path_resolver(tag, path, kind)
else:
raise NotImplementedError
def add_constructor(tag, object_constructor, Loader=None, constructor=Constructor):
# type: (Any, Any, Any, Any) -> None
"""
Add an object constructor for the given tag.
object_onstructor is a function that accepts a Loader instance
and a node object and produces the corresponding Python object.
"""
if Loader is None:
constructor.add_constructor(tag, object_constructor)
else:
if hasattr(Loader, 'add_constructor'):
Loader.add_constructor(tag, object_constructor)
return
if issubclass(Loader, BaseLoader):
BaseConstructor.add_constructor(tag, object_constructor)
elif issubclass(Loader, SafeLoader):
SafeConstructor.add_constructor(tag, object_constructor)
elif issubclass(Loader, Loader):
Constructor.add_constructor(tag, object_constructor)
elif issubclass(Loader, RoundTripLoader):
RoundTripConstructor.add_constructor(tag, object_constructor)
else:
raise NotImplementedError
def add_multi_constructor(tag_prefix, multi_constructor, Loader=None, constructor=Constructor):
# type: (Any, Any, Any, Any) -> None
"""
Add a multi-constructor for the given tag prefix.
Multi-constructor is called for a node if its tag starts with tag_prefix.
Multi-constructor accepts a Loader instance, a tag suffix,
and a node object and produces the corresponding Python object.
"""
if Loader is None:
constructor.add_multi_constructor(tag_prefix, multi_constructor)
else:
if False and hasattr(Loader, 'add_multi_constructor'):
Loader.add_multi_constructor(tag_prefix, constructor)
return
if issubclass(Loader, BaseLoader):
BaseConstructor.add_multi_constructor(tag_prefix, multi_constructor)
elif issubclass(Loader, SafeLoader):
SafeConstructor.add_multi_constructor(tag_prefix, multi_constructor)
elif issubclass(Loader, ruamel.yaml.loader.Loader):
Constructor.add_multi_constructor(tag_prefix, multi_constructor)
elif issubclass(Loader, RoundTripLoader):
RoundTripConstructor.add_multi_constructor(tag_prefix, multi_constructor)
else:
raise NotImplementedError
def add_representer(data_type, object_representer, Dumper=None, representer=Representer):
# type: (Any, Any, Any, Any) -> None
"""
Add a representer for the given type.
object_representer is a function accepting a Dumper instance
and an instance of the given data type
and producing the corresponding representation node.
"""
if Dumper is None:
representer.add_representer(data_type, object_representer)
else:
if hasattr(Dumper, 'add_representer'):
Dumper.add_representer(data_type, object_representer)
return
if issubclass(Dumper, BaseDumper):
BaseRepresenter.add_representer(data_type, object_representer)
elif issubclass(Dumper, SafeDumper):
SafeRepresenter.add_representer(data_type, object_representer)
elif issubclass(Dumper, Dumper):
Representer.add_representer(data_type, object_representer)
elif issubclass(Dumper, RoundTripDumper):
RoundTripRepresenter.add_representer(data_type, object_representer)
else:
raise NotImplementedError
# this code currently not tested
def add_multi_representer(data_type, multi_representer, Dumper=None, representer=Representer):
# type: (Any, Any, Any, Any) -> None
"""
Add a representer for the given type.
multi_representer is a function accepting a Dumper instance
and an instance of the given data type or subtype
and producing the corresponding representation node.
"""
if Dumper is None:
representer.add_multi_representer(data_type, multi_representer)
else:
if hasattr(Dumper, 'add_multi_representer'):
Dumper.add_multi_representer(data_type, multi_representer)
return
if issubclass(Dumper, BaseDumper):
BaseRepresenter.add_multi_representer(data_type, multi_representer)
elif issubclass(Dumper, SafeDumper):
SafeRepresenter.add_multi_representer(data_type, multi_representer)
elif issubclass(Dumper, Dumper):
Representer.add_multi_representer(data_type, multi_representer)
elif issubclass(Dumper, RoundTripDumper):
RoundTripRepresenter.add_multi_representer(data_type, multi_representer)
else:
raise NotImplementedError
class YAMLObjectMetaclass(type):
"""
The metaclass for YAMLObject.
"""
def __init__(cls, name, bases, kwds):
# type: (Any, Any, Any) -> None
super().__init__(name, bases, kwds)
if 'yaml_tag' in kwds and kwds['yaml_tag'] is not None:
cls.yaml_constructor.add_constructor(cls.yaml_tag, cls.from_yaml) # type: ignore
cls.yaml_representer.add_representer(cls, cls.to_yaml) # type: ignore
class YAMLObject(with_metaclass(YAMLObjectMetaclass)): # type: ignore
"""
An object that can dump itself to a YAML stream
and load itself from a YAML stream.
"""
__slots__ = () # no direct instantiation, so allow immutable subclasses
yaml_constructor = Constructor
yaml_representer = Representer
yaml_tag = None # type: Any
yaml_flow_style = None # type: Any
@classmethod
def from_yaml(cls, constructor, node):
# type: (Any, Any) -> Any
"""
Convert a representation node to a Python object.
"""
return constructor.construct_yaml_object(node, cls)
@classmethod
def to_yaml(cls, representer, data):
# type: (Any, Any) -> Any
"""
Convert a Python object to a representation node.
"""
return representer.represent_yaml_object(
cls.yaml_tag, data, cls, flow_style=cls.yaml_flow_style
)
| StarcoderdataPython |
6636592 | # coding: utf-8
import requests
from .horizon import Horizon
from .keypair import Keypair
from .exceptions import AccountNotExistError, NotValidParamError
from .horizon import HORIZON_LIVE, HORIZON_TEST
class Address(object):
"""The :class:`Address` object, which represents an address (public key) on
Stellar's network.
An :class:`Address` is initialized via a public key string, or derived via
a secret seed. The network on which the account exists is also specified,
as it is used to verify and set attributes via connecting to Horizon. It
mostly exists as a helper class for Horizon operations on a given account
ID.
:param str address: The address string that represents this
:class:`Address`.
:param str secret: The secret seed string that is used to derive the
address for this :class:`Address`.
:param str network: The network to connect to for verifying and retrieving
additional attributes from. Must be either 'PUBLIC' or 'TESTNET'.
:param Horizon horizon: The :class:`Horizon` instance to use for
connecting to for additional information for the account to which this
address corresponds to.
"""
# TODO: Make network an enum
def __init__(
self, address=None, secret=None, network='TESTNET', horizon=None):
if address is None and secret is None:
# FIXME: Throw a better exception
raise Exception('oops,need a stellar address or secret')
if address is None and secret is not None:
self.address = Keypair.from_seed(secret).address().decode()
else:
self.address = address
self.secret = secret
if network.upper() != 'PUBLIC':
self.network = 'TESTNET'
else:
self.network = 'PUBLIC'
if horizon:
if isinstance(horizon, Horizon):
self.horizon = horizon
else:
self.horizon = Horizon(horizon)
elif network.upper() == 'PUBLIC':
self.horizon = Horizon(HORIZON_LIVE)
else:
self.horizon = Horizon(HORIZON_TEST)
self.sequence = None
self.balances = None
self.paging_token = None
self.thresholds = None
self.flags = None
self.signers = None
self.data = None
def get(self):
"""Retrieve the account data that corresponds to this :class:`Address`.
Retrieve the account data from Horizon for the account that corresponds
to this :class:`Address`. Attempt to retrieve the following attributes
from Horizon:
* Sequence Number
* Balances
* Paging Token
* Thresholds
* Flags
* Signers
* Data
:raises AccountNotExistError: If the account does not exist, shown by a
404 response from a Horizon server.
:raises Exception: If any other problems come up, or if a network
connection happens.
"""
try:
acc = self.horizon.account(self.address)
if acc.get('sequence'):
self.sequence = acc.get('sequence')
self.balances = acc.get('balances')
self.paging_token = acc.get('paging_token')
self.thresholds = acc.get('thresholds')
self.flags = acc.get('flags')
self.signers = acc.get('signers')
self.data = acc.get('data')
elif acc.get('status') == 404:
raise AccountNotExistError(acc.get('title'))
else:
# FIXME: Throw a more specific exception.
raise Exception(acc.get('detail'))
except requests.ConnectionError:
raise Exception('network problem')
def payments(self, sse=False, **kwargs):
"""Retrieve the payments JSON from this instance's Horizon server.
Retrieve the payments JSON response for the account associated with
this :class:`Address`.
:param bool sse: Use the SSE client for connecting to Horizon.
"""
check_params(kwargs)
return self.horizon.account_payments(
self.address, params=kwargs, sse=sse)
def offers(self, **kwargs):
"""Retrieve the offers JSON from this instance's Horizon server.
Retrieve the offers JSON response for the account associated with
this :class:`Address`.
:param bool sse: Use the SSE client for connecting to Horizon.
"""
check_params(kwargs)
return self.horizon.account_offers(self.address, params=kwargs)
def transactions(self, sse=False, **kwargs):
"""Retrieve the transactions JSON from this instance's Horizon server.
Retrieve the transactions JSON response for the account associated with
this :class:`Address`.
:param bool sse: Use the SSE client for connecting to Horizon.
"""
check_params(kwargs)
return self.horizon.account_transactions(
self.address, params=kwargs, sse=sse)
def operations(self, sse=False, **kwargs):
"""Retrieve the operations JSON from this instance's Horizon server.
Retrieve the operations JSON response for the account associated with
this :class:`Address`.
:param bool sse: Use the SSE client for connecting to Horizon.
"""
check_params(kwargs)
return self.horizon.account_operations(
self.address, params=kwargs, sse=sse)
def effects(self, sse=False, **kwargs):
"""Retrieve the effects JSON from this instance's Horizon server.
Retrieve the effects JSON response for the account associated with
this :class:`Address`.
:param bool sse: Use the SSE client for connecting to Horizon.
"""
check_params(kwargs)
return self.horizon.account_effects(
self.address, params=kwargs, sse=sse)
# TODO: Make this a private method of the Address class.
def check_params(data):
"""Check for appropriate keywords for a Horizon request method.
Check a dict of arguments to make sure that they only contain allowable
params for requests to Horizon, such as 'cursor', 'limit', and 'order'.
"""
params_allowed = {'cursor', 'limit', 'order'}
params = set(data.keys())
if params - params_allowed:
raise NotValidParamError('not valid params')
| StarcoderdataPython |
1938853 | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""This module defines and registers the example gym environments."""
import subprocess
from pathlib import Path
from typing import Iterable
from compiler_gym.datasets import Benchmark, Dataset
from compiler_gym.datasets.uri import BenchmarkUri
from compiler_gym.envs.llvm.llvm_benchmark import get_system_library_flags
from compiler_gym.spaces import Reward
from compiler_gym.third_party import llvm
from compiler_gym.util.registration import register
from compiler_gym.util.runfiles_path import runfiles_path, site_data_path
UNROLLING_PY_SERVICE_BINARY: Path = runfiles_path(
"examples/example_unrolling_service/service_py/example-unrolling-service-py"
)
BENCHMARKS_PATH: Path = runfiles_path("examples/example_unrolling_service/benchmarks")
NEURO_VECTORIZER_HEADER: Path = runfiles_path(
"compiler_gym/third_party/neuro-vectorizer/header.h"
)
class RuntimeReward(Reward):
"""An example reward that uses changes in the "runtime" observation value
to compute incremental reward.
"""
def __init__(self):
super().__init__(
name="runtime",
observation_spaces=["runtime"],
default_value=0,
default_negates_returns=True,
deterministic=False,
platform_dependent=True,
)
self.baseline_runtime = 0
def reset(self, benchmark: str, observation_view):
del benchmark # unused
self.baseline_runtime = observation_view["runtime"]
def update(self, action, observations, observation_view):
del action # unused
del observation_view # unused
return float(self.baseline_runtime - observations[0]) / self.baseline_runtime
class SizeReward(Reward):
"""An example reward that uses changes in the "size" observation value
to compute incremental reward.
"""
def __init__(self):
super().__init__(
name="size",
observation_spaces=["size"],
default_value=0,
default_negates_returns=True,
deterministic=False,
platform_dependent=True,
)
self.baseline_size = 0
def reset(self, benchmark: str, observation_view):
del benchmark # unused
self.baseline_runtime = observation_view["size"]
def update(self, action, observations, observation_view):
del action # unused
del observation_view # unused
return float(self.baseline_size - observations[0]) / self.baseline_size
class UnrollingDataset(Dataset):
def __init__(self, *args, **kwargs):
super().__init__(
name="benchmark://unrolling-v0",
license="MIT",
description="Unrolling example dataset",
site_data_base=site_data_path(
"example_dataset"
), # TODO: what should we set this to? we are not using it
)
self._benchmarks = {
"/offsets1": Benchmark.from_file_contents(
"benchmark://unrolling-v0/offsets1",
self.preprocess(BENCHMARKS_PATH / "offsets1.c"),
),
"/conv2d": Benchmark.from_file_contents(
"benchmark://unrolling-v0/conv2d",
self.preprocess(BENCHMARKS_PATH / "conv2d.c"),
),
}
@staticmethod
def preprocess(src: Path) -> bytes:
"""Front a C source through the compiler frontend."""
# TODO(github.com/facebookresearch/CompilerGym/issues/325): We can skip
# this pre-processing, or do it on the service side, once support for
# multi-file benchmarks lands.
cmd = [
str(llvm.clang_path()),
"-E",
"-o",
"-",
"-I",
str(NEURO_VECTORIZER_HEADER.parent),
src,
] + get_system_library_flags()
return subprocess.check_output(
cmd,
timeout=300,
)
def benchmark_uris(self) -> Iterable[str]:
yield from (f"benchmark://unrolling-v0{k}" for k in self._benchmarks.keys())
def benchmark_from_parsed_uri(self, uri: BenchmarkUri) -> Benchmark:
if uri.path in self._benchmarks:
return self._benchmarks[uri.path]
else:
raise LookupError("Unknown program name")
# Register the unrolling example service on module import. After importing this module,
# the unrolling-py-v0 environment will be available to gym.make(...).
register(
id="unrolling-py-v0",
entry_point="compiler_gym.service.client_service_compiler_env:ClientServiceCompilerEnv",
kwargs={
"service": UNROLLING_PY_SERVICE_BINARY,
"rewards": [RuntimeReward(), SizeReward()],
"datasets": [UnrollingDataset()],
},
)
| StarcoderdataPython |
8035822 | class SettlementOrderGid(object):
def __init__(self):
self._id = 0
def get(self):
res = self._id
self._id += 1
return res
settlement_order_gid = SettlementOrderGid()
| StarcoderdataPython |
347509 | <reponame>anniyanvr/nesta<filename>nesta/core/routines/meetup/health_tagging/topic_discovery_task.py
'''
Topic discovery
===============
Task to automatically discover relevant topics from meetup data,
defined as the most frequently occurring from a set of categories.
'''
import luigi
import datetime
import json
from nesta.core.luigihacks import s3
from nesta.core.orms.orm_utils import get_mysql_engine
from nesta.packages.meetup.meetup_utils import get_members_by_percentile
from nesta.packages.meetup.meetup_utils import get_core_topics
S3PREFIX = "s3://nesta-production-intermediate"
class TopicDiscoveryTask(luigi.Task):
'''Task to automatically discover relevant topics from meetup data,
defined as the most frequently occurring from a set of categories.
Args:
db_config_env (str): Environmental variable pointing to the path of the DB config.
routine_id (str): The routine UID.
core_categories (list): A list of category_shortnames from which to identify topics.
members_perc (int): A percentile to evaluate the minimum number of members.
topic_perc (int): A percentile to evaluate the most frequent topics.
test (bool): Test mode.
'''
db_config_env = luigi.Parameter()
routine_id = luigi.Parameter()
core_categories = luigi.ListParameter()
members_perc = luigi.IntParameter(default=10)
topic_perc = luigi.IntParameter(default=10)
test = luigi.BoolParameter(default=True)
def output(self):
'''Points to the S3 Target'''
return s3.S3Target(f"{S3PREFIX}/meetup-topics-{self.routine_id}.json")
def run(self):
'''Extract the topics of interest'''
database = 'dev' if self.test else 'production'
engine = get_mysql_engine(self.db_config_env, 'mysqldb', database)
members_limit = get_members_by_percentile(engine, perc=self.members_perc)
topics = get_core_topics(engine,
core_categories=self.core_categories,
members_limit=members_limit,
perc=self.topic_perc)
# Write the intermediate output
with self.output().open('wb') as outstream:
outstream.write(json.dumps(list(topics)).encode('utf8'))
| StarcoderdataPython |
1982727 | <reponame>PNNL-Comp-Mass-Spec/DtaRefinery<gh_stars>0
from aux_sys_err_prediction_module.additive.my_additive_regression_analysis import do_additive_regression_analysis as additive_approach
from aux_sys_err_prediction_module.simple_shift.my_simple_shift import do_simple_shift as simple_shift
from numpy import log, array, median, zeros
def bypass_refining( Controller, xTandemInput, dtaEntries):
xtColumns = list(xTandemInput[0])
massErrorPpmIndex = xtColumns.index('massErrorPpm')
xtPpm = array([x[massErrorPpmIndex] for x in xTandemInput[1:]])
return xtPpm, zeros(len(dtaEntries[1:]),'d')
def do_predict_systematic_errors( Controller, xTandemInput, dtaEntries):
#get the approach function
approachToUse = {'additiveRegression': additive_approach,
'simpleShift': simple_shift,
'bypassRefining': bypass_refining
}[Controller.updatedSettings['refiningPars']['choices']['refining method']]
#run the function and get the data (new ppms)
#TODO: not sure this is the best way to return results
xtPpm, dtaPpm = approachToUse(Controller, xTandemInput, dtaEntries)
#returns just systematic ppms
return xtPpm, dtaPpm
# WARNING!! the order of ppm errors has to be the same as in INPUTS
| StarcoderdataPython |
3565453 | import sqlalchemy
import urllib.request
import zipfile
import pandas as pd
URL = "https://download.geonames.org/export/dump/cities500.zip"
urllib.request.urlretrieve(URL, "cities500.zip")
with zipfile.ZipFile("./cities500.zip", "r") as zip_ref:
zip_ref.extractall(".")
column_names = """\
geonameid : integer id of record in geonames database
name : name of geographical point (utf8) varchar(200)
asciiname : name of geographical point in plain ascii characters, varchar(200)
alternatenames : alternatenames, comma separated, ascii names automatically transliterated, convenience attribute from alternatename table, varchar(10000)
latitude : latitude in decimal degrees (wgs84)
longitude : longitude in decimal degrees (wgs84)
feature class : see http://www.geonames.org/export/codes.html, char(1)
feature code : see http://www.geonames.org/export/codes.html, varchar(10)
country code : ISO-3166 2-letter country code, 2 characters
cc2 : alternate country codes, comma separated, ISO-3166 2-letter country code, 200 characters
admin1 code : fipscode (subject to change to iso code), see exceptions below, see file admin1Codes.txt for display names of this code; varchar(20)
admin2 code : code for the second administrative division, a county in the US, see file admin2Codes.txt; varchar(80)
admin3 code : code for third level administrative division, varchar(20)
admin4 code : code for fourth level administrative division, varchar(20)
population : bigint (8 byte int)
elevation : in meters, integer
dem : digital elevation model, srtm3 or gtopo30, average elevation of 3''x3'' (ca 90mx90m) or 30''x30'' (ca 900mx900m) area in meters, integer. srtm processed by cgiar/ciat.
timezone : the iana timezone id (see file timeZone.txt) varchar(40)
modification date : date of last modification in yyyy-MM-dd format"""
# load columns of interest and set column names
usecols = [1, 2, 4, 5, 8, 15]
data = pd.read_csv("./cities500.txt", sep="\t", header=None, usecols=usecols)
columns = [line.split(": ")[0].strip() for line in column_names.split("\n")]
columns = [columns[i] for i in usecols]
data.columns = columns
# export as sqlite3
db = sqlalchemy.create_engine('sqlite:///cities500.sqlite')
data.to_sql('cities500', db, if_exists="replace")
| StarcoderdataPython |
5182093 | <gh_stars>0
import unittest
import numpy as np
from scipy.stats import binom, hypergeom
from scipy import stats
from scipy.special import factorial
from functools import partial
from pyapprox.numerically_generate_orthonormal_polynomials_1d import *
from pyapprox.orthonormal_polynomials_1d import *
from pyapprox.univariate_quadrature import gauss_jacobi_pts_wts_1D, \
gauss_hermite_pts_wts_1D
from pyapprox.variables import float_rv_discrete
class TestNumericallyGenerateOrthonormalPolynomials1D(unittest.TestCase):
def test_krawtchouk(self):
num_coef = 6
ntrials = 10
p = 0.5
xk = np.array(range(ntrials+1), dtype='float')
pk = binom.pmf(xk, ntrials, p)
ab_lanczos = lanczos(xk, pk, num_coef)
ab_stieltjes = stieltjes(xk, pk, num_coef)
ab_exact = krawtchouk_recurrence(num_coef, ntrials, p)
# ab_lanczos[-1, 0] is a dummy entry so set to exact so
# comparison will pass if all other entries are correct
ab_lanczos[-1, 0] = ab_exact[-1, 0]
assert np.allclose(ab_lanczos, ab_exact)
assert np.allclose(ab_stieltjes, ab_exact)
from pyapprox.univariate_quadrature import gauss_quadrature
x, w = gauss_quadrature(ab_lanczos, num_coef)
moments = np.array([(x**ii).dot(w) for ii in range(num_coef)])
true_moments = np.array([(xk**ii).dot(pk)for ii in range(num_coef)])
assert np.allclose(moments, true_moments)
p = evaluate_orthonormal_polynomial_1d(x, num_coef-1, ab_lanczos)
assert np.allclose((p.T*w).dot(p), np.eye(num_coef))
p = evaluate_orthonormal_polynomial_1d(xk, num_coef-1, ab_lanczos)
assert np.allclose((p.T*pk).dot(p), np.eye(num_coef))
def test_discrete_chebyshev(self):
num_coef = 5
nmasses = 10
xk = np.array(range(nmasses), dtype='float')
pk = np.ones(nmasses)/nmasses
ab_lanczos = lanczos(xk, pk, num_coef)
ab_stieltjes = stieltjes(xk, pk, num_coef)
ab_exact = discrete_chebyshev_recurrence(num_coef, nmasses)
# ab_lanczos[-1, 0] is a dummy entry so set to exact so
# comparison will pass if all other entries are correct
ab_lanczos[-1, 0] = ab_exact[-1, 0]
assert np.allclose(ab_lanczos, ab_exact)
assert np.allclose(ab_stieltjes, ab_exact)
from pyapprox.univariate_quadrature import gauss_quadrature
x, w = gauss_quadrature(ab_lanczos, num_coef)
moments = np.array([(x**ii).dot(w) for ii in range(num_coef)])
true_moments = np.array([(xk**ii).dot(pk)for ii in range(num_coef)])
assert np.allclose(moments, true_moments)
p = evaluate_orthonormal_polynomial_1d(x, num_coef-1, ab_lanczos)
assert np.allclose((p.T*w).dot(p), np.eye(num_coef))
p = evaluate_orthonormal_polynomial_1d(xk, num_coef-1, ab_lanczos)
assert np.allclose((p.T*pk).dot(p), np.eye(num_coef))
def test_float_rv_discrete(self):
num_coef, nmasses = 5, 10
# works for both lanczos and chebyshev algorithms
#xk = np.geomspace(1,512,num=nmasses)
#pk = np.ones(nmasses)/nmasses
# works only for chebyshev algorithms
pk = np.geomspace(1, 512, num=nmasses)
pk /= pk.sum()
xk = np.arange(0, nmasses)
#ab = lanczos(xk,pk,num_coef)
ab = modified_chebyshev_orthonormal(
num_coef, [xk, pk], probability=True)
from pyapprox.univariate_quadrature import gauss_quadrature
x, w = gauss_quadrature(ab, num_coef)
moments = np.array([(x**ii).dot(w) for ii in range(num_coef)])
true_moments = np.array([(xk**ii).dot(pk)for ii in range(num_coef)])
assert np.allclose(moments, true_moments), (moments, true_moments)
p = evaluate_orthonormal_polynomial_1d(x, num_coef-1, ab)
assert np.allclose((p.T*w).dot(p), np.eye(num_coef))
p = evaluate_orthonormal_polynomial_1d(xk, num_coef-1, ab)
assert np.allclose((p.T*pk).dot(p), np.eye(num_coef))
def test_modified_chebyshev(self):
nterms = 10
alpha_stat, beta_stat = 2, 2
probability_measure = True
# using scipy to compute moments is extermely slow
# moments = [stats.beta.moment(n,alpha_stat,beta_stat,loc=-1,scale=2)
# for n in range(2*nterms)]
quad_x, quad_w = gauss_jacobi_pts_wts_1D(
4*nterms, beta_stat-1, alpha_stat-1)
true_ab = jacobi_recurrence(
nterms, alpha=beta_stat-1, beta=alpha_stat-1,
probability=probability_measure)
ab = modified_chebyshev_orthonormal(
nterms, [quad_x, quad_w], get_input_coefs=None, probability=True)
assert np.allclose(true_ab, ab)
get_input_coefs = partial(
jacobi_recurrence, alpha=beta_stat-2, beta=alpha_stat-2)
ab = modified_chebyshev_orthonormal(
nterms, [quad_x, quad_w], get_input_coefs=get_input_coefs,
probability=True)
assert np.allclose(true_ab, ab)
def test_rv_discrete_large_moments(self):
"""
When Modified_chebyshev_orthonormal is used when the moments of discrete
variable are very large it will fail. To avoid this rescale the
variables to [-1,1] like is done for continuous random variables
"""
N, degree = 100, 5
xk, pk = np.arange(N), np.ones(N)/N
rv = float_rv_discrete(name='float_rv_discrete', values=(xk, pk))
xk_canonical = xk/(N-1)*2-1
ab = modified_chebyshev_orthonormal(
degree+1, [xk_canonical, pk])
p = evaluate_orthonormal_polynomial_1d(xk_canonical, degree, ab)
w = rv.pmf(xk)
assert np.allclose(np.dot(p.T*w, p), np.eye(degree+1))
ab = predictor_corrector(
degree+1, (xk_canonical, pk), xk_canonical.min(),
xk_canonical.max(),
interval_size=xk_canonical.max()-xk_canonical.min())
p = evaluate_orthonormal_polynomial_1d(xk_canonical, degree, ab)
assert np.allclose(np.dot(p.T*w, p), np.eye(degree+1))
def test_predictor_corrector_known_scipy_pdf(self):
nterms = 5
quad_options = {'nquad_samples': 10, 'atol': 1e-8, 'rtol': 1e-8,
'max_steps': 10000, 'verbose': 1}
rv = stats.beta(1, 1, -1, 2)
ab = predictor_corrector_known_scipy_pdf(nterms, rv, quad_options)
true_ab = jacobi_recurrence(nterms, 0, 0)
assert np.allclose(ab, true_ab)
rv = stats.norm()
ab = predictor_corrector_known_scipy_pdf(nterms, rv, quad_options)
true_ab = hermite_recurrence(nterms)
assert np.allclose(ab, true_ab)
# lognormal is a very hard test
rv = stats.lognorm(1)
# mean, std = 1e4, 7.5e3
# beta = std*np.sqrt(6)/np.pi
# mu = mean - beta*np.euler_gamma
# rv = stats.gumbel_r(loc=mu, scale=beta)
ab = predictor_corrector_known_scipy_pdf(nterms, rv, quad_options)
def integrand(x):
p = evaluate_orthonormal_polynomial_1d(x, nterms-1, ab)
G = np.empty((x.shape[0], nterms**2))
kk = 0
for ii in range(nterms):
for jj in range(nterms):
G[:, kk] = p[:, ii]*p[:, jj]
kk += 1
return G*rv.pdf(x)[:, None]
lb, ub = rv.interval(1)
xx, __ = gauss_quadrature(ab, nterms)
interval_size = xx.max()-xx.min()
quad_opts = quad_options.copy()
del quad_opts['nquad_samples']
res = integrate_using_univariate_gauss_legendre_quadrature_unbounded(
integrand, lb, ub, quad_options['nquad_samples'],
interval_size=interval_size, **quad_opts)
res = np.reshape(res, (nterms, nterms), order='C')
print(np.absolute(res-np.eye(nterms)).max())
assert np.absolute(res-np.eye(nterms)).max() < 2e-4
def test_predictor_corrector_function_of_independent_variables(self):
"""
Test 1: Sum of Gaussians is a Gaussian
Test 2: Product of uniforms on [0,1]
"""
nvars, nterms = 2, 5
variables = [stats.norm(0, 1)]*nvars
nquad_samples_1d = 50
quad_rules = [gauss_hermite_pts_wts_1D(nquad_samples_1d)]*nvars
def fun(x):
return x.sum(axis=0)
ab = predictor_corrector_function_of_independent_variables(
nterms, quad_rules, fun)
rv = stats.norm(0, np.sqrt(nvars))
measures = rv.pdf
lb, ub = rv.interval(1)
interval_size = rv.interval(0.99)[1] - rv.interval(0.99)[0]
ab_full = predictor_corrector(nterms, rv.pdf, lb, ub, interval_size)
assert np.allclose(ab_full, ab)
nvars = 2
def measure(x):
return (-1)**(nvars-1)*np.log(x)**(nvars-1)/factorial(nvars-1)
def fun(x):
return x.prod(axis=0)
quad_opts = {'verbose': 0, 'atol': 1e-6, 'rtol': 1e-6}
ab_full = predictor_corrector(nterms, measure, 0, 1, 1, quad_opts)
xx, ww = gauss_jacobi_pts_wts_1D(nquad_samples_1d, 0, 0)
xx = (xx+1)/2
quad_rules = [(xx, ww)]*nvars
ab = predictor_corrector_function_of_independent_variables(
nterms, quad_rules, fun)
assert np.allclose(ab_full, ab)
def test_predictor_corrector_product_of_functions_of_independent_variables(
self):
nvars, nterms = 3, 4
def measure(x):
return (-1)**(nvars-1)*np.log(x)**(nvars-1)/factorial(nvars-1)
def fun(x):
return x.prod(axis=0)
nquad_samples_1d = 20
xx, ww = gauss_jacobi_pts_wts_1D(nquad_samples_1d, 0, 0)
xx = (xx+1)/2
quad_rules = [(xx, ww)]*nvars
funs = [lambda x: x]*nvars
ab = predictor_corrector_product_of_functions_of_independent_variables(
nterms, quad_rules, funs)
quad_opts = {'verbose': 3, 'atol': 1e-5, 'rtol': 1e-5}
ab_full = predictor_corrector(nterms, measure, 0, 1, 1, quad_opts)
assert np.allclose(ab, ab_full, atol=1e-5, rtol=1e-5)
def test_arbitraty_polynomial_chaos(self):
nterms = 5
alpha_stat, beta_stat = 1, 1
true_ab = jacobi_recurrence(
nterms, alpha=beta_stat-1, beta=alpha_stat-1,
probability=True)
rv = stats.uniform(-1, 2)
moments = [rv.moment(n) for n in range(2*nterms+1)]
ab = arbitrary_polynomial_chaos_recursion_coefficients(moments, nterms)
assert np.allclose(true_ab, ab)
if __name__ == "__main__":
num_gen_orthonormal_poly_1d_test_suite = \
unittest.TestLoader().loadTestsFromTestCase(
TestNumericallyGenerateOrthonormalPolynomials1D)
unittest.TextTestRunner(verbosity=2).run(
num_gen_orthonormal_poly_1d_test_suite)
"""
print("----------------------------")
print("Lanczos test (deprecated)")
print("----------------------------")
A = np.zeros((ntrials+2,ntrials+2));
A[0,0] = 1;
A[0,1:] = np.sqrt(pmfVals);
A[1:,0] = np.sqrt(pmfVals);
for i in range(1,ntrials+2):
A[i,i] = x[i-1]
e1 = np.zeros(ntrials+2); e1[0] = 1;
abAN = lanczos_deprecated(A,e1)[:N]
print(np.allclose(abWG,abAN))
"""
| StarcoderdataPython |
213022 | import torch
from .. import FF
class PositionwiseFF(torch.nn.Module):
"""Positionwise Feed-forward layer.
Arguments:
Input:
Output:
"""
def __init__(self, model_dim, ff_dim, activ='relu'):
super().__init__()
self.model_dim = model_dim
self.ff_dim = ff_dim
self.activ = activ
# Create the layers
self.func = torch.nn.Sequential(
FF(self.model_dim, self.ff_dim, activ=self.activ),
FF(self.ff_dim, self.model_dim, activ=None),
)
def forward(self, inputs):
x, mask = inputs
return (x, self.func(x), mask)
| StarcoderdataPython |
1920675 | import json
import pandas as pd
import os
from constants import *
import csv
from Scrape import *
from bs4 import BeautifulSoup
import requests
from GetUrls import *
from pathlib import Path
import datetime
import sys
import zerorpc
from functions import *
import numpy as np
import time
from time import sleep
| StarcoderdataPython |
9737855 | <filename>python/rsml/src/rsml/misc.py
"""
Practical functionality to process rsml-formated mtg
"""
def plant_vertices(g):
""" return the list of mtg vertices that represent plants """
return g.vertices(scale=1)
def root_vertices(g):
""" return the list of mtg vertices that represent root axes """
return g.vertices(scale=g.max_scale())
def root_tree(g, suborder=None):
""" return the list of root axes in topological order
If suborder is given, it should be a dictionary of (root-id,value) which is
used to sort sibling root w.r.t. their respective value.
"""
if suborder is None:
sort = lambda x:x[::-1]
else:
sort = lambda x: sorted(x, key=suborder.get)[::-1]
# init with axes with no parent
axes = sort([a for a in root_vertices(g) if g.parent(a) is None])
# parse root axes in depth-first-order
tree = []
while len(axes):
axe = axes.pop()
tree.append(axe)
axes.extend(sort(g.children(axe)))
return tree
def root_order(g, tree=None):
""" return a dictionary of the (numeric) axe order
The order is select as:
- the value of the 'order' property, if present
- otherwise, 1 for axe with no parent or the parent order +1
tree is the optional list of root id in `g`. If not given, it is computed
"""
# init order dict
if 'order' in g.property_names():
order = g.property('order').copy()
else:
order = {}
if tree is None:
tree = root_tree(g)
# parse all axes s.t. parent are processed before children
for root in tree:
parent = g.parent(root)
order.setdefault(root, 1 if parent is None else order[parent]+1)
return order
def hausdorff_distance(polyline1,polyline2):
"""
Compute the hausdorff distance from `polyline1` to `polyline2`
:Inputs:
`polyline1`:
a (k,n1) array for the n1 points of the 1st polyline in k-dimension
`polyline2`:
a (k,n2) array for the n2 points of the 2nd polyline in k-dimension
:Output:
The hausdorff distance:
max( D(polyline1,polyline2), D(polyline2,polyline1) )
where
D(p1,p2) = max_(i in p1) |p1[i] - closest-projection-on-p2|
"""
import numpy as _np
p1 = _np.asfarray(polyline1)
p2 = _np.asfarray(polyline2)
norm = lambda x: (x**2).sum(axis=0)**.5
def max_min_dist(points, polyline):
v1 = polyline[:,:-1] # 1st segment vertex, shape (k,n2-1)
v2 = polyline[:, 1:] # 2nd segment vertex, shape (k,n2-1)
sdir = v2-v1 # direction vector of segment
lsl = norm(sdir) # distance between v1 and v2
lsl = _np.maximum(lsl,2**-5)
sdir /= lsl # make sdir unit vectors
# distance from v1 to the projection of points on segments
# disallow projection out of segment: values are in [0,lsl]
on_edge = ((points[:,:,None]-v1[:,None,:])*sdir[:,None,:]).sum(axis=0) # (n1,n2-1)
on_edge = _np.minimum(_np.maximum(on_edge,0),lsl[None,:])
# points projection on sdir
nproj = v1[:,None,:] + on_edge[None,:,:]*sdir[:,None,:] # (k,n1,n2-1)
# distance from points to "points projection on sdir"
return norm(nproj - points[:,:,None]).min(axis=1).max()
return max(max_min_dist(p1,p2), max_min_dist(p2,p1))
| StarcoderdataPython |
5046804 | from django.shortcuts import render
# Create your views here.
# coding:utf-8
from django.http import HttpResponse
def index(request):
return HttpResponse(u'Django测试站')
| StarcoderdataPython |
5089077 | # -*- coding: utf-8 -*-
#
# Sorted list implementation.
from __future__ import print_function
from sys import hexversion
from .sortedlist import recursive_repr
from bisect import bisect_left, bisect_right, insort
from itertools import chain, repeat, starmap
from collections import MutableSequence
from operator import iadd, add
from functools import wraps
from math import log
if hexversion < 0x03000000:
from itertools import izip as zip
from itertools import imap as map
else:
from functools import reduce
def identity(value):
return value
class SortedListWithKey(MutableSequence):
"""
SortedListWithKey provides most of the same methods as a list but keeps
the items in sorted order.
"""
def __init__(self, iterable=None, key=identity, load=1000):
"""
SortedListWithKey provides most of the same methods as a list but
keeps the items in sorted order.
An optional *iterable* provides an initial series of items to populate
the SortedListWithKey.
An optional *load* specifies the load-factor of the list. The default
load factor of '1000' works well for lists from tens to tens of millions
of elements. Good practice is to use a value that is the cube root of
the list size. With billions of elements, the best load factor depends
on your usage. It's best to leave the load factor at the default until
you start benchmarking.
"""
self._len, self._maxes, self._lists, self._keys, self._index = 0, [], [], [], []
self._key, self._load, self._twice, self._half = key, load, load * 2, load >> 1
self._offset = 0
if iterable is not None:
self.update(iterable)
def clear(self):
"""Remove all the elements from the list."""
self._len = 0
del self._maxes[:]
del self._lists[:]
del self._keys[:]
del self._index[:]
def add(self, val):
"""Add the element *val* to the list."""
_maxes, _lists, _keys = self._maxes, self._lists, self._keys
key = self._key(val)
if _maxes:
pos = bisect_right(_maxes, key)
if pos == len(_maxes):
pos -= 1
_maxes[pos] = key
_keys[pos].append(key)
_lists[pos].append(val)
else:
idx = bisect_right(_keys[pos], key)
_keys[pos].insert(idx, key)
_lists[pos].insert(idx, val)
self._expand(pos)
else:
_maxes.append(key)
_keys.append([key])
_lists.append([val])
self._len += 1
def _expand(self, pos):
"""
Splits sublists that are more than double the load level.
Updates the index when the sublist length is less than double the load
level. This requires incrementing the nodes in a traversal from the leaf
node to the root. For an example traversal see self._loc.
"""
_lists, _keys, _index = self._lists, self._keys, self._index
if len(_keys[pos]) > self._twice:
_maxes, _load = self._maxes, self._load
half = _keys[pos][_load:]
half_list = _lists[pos][_load:]
del _keys[pos][_load:]
del _lists[pos][_load:]
_maxes[pos] = _keys[pos][-1]
_maxes.insert(pos + 1, half[-1])
_keys.insert(pos + 1, half)
_lists.insert(pos + 1, half_list)
del _index[:]
elif len(_index) > 0:
child = self._offset + pos
while child > 0:
_index[child] += 1
child = (child - 1) >> 1
_index[0] += 1
def update(self, iterable):
"""Update the list by adding all elements from *iterable*."""
_maxes, _lists, _keys = self._maxes, self._lists, self._keys
values = sorted(iterable, key=self._key)
if _maxes:
if len(values) * 4 >= self._len:
values.extend(chain.from_iterable(_lists))
values.sort(key=self._key)
self.clear()
else:
_add = self.add
for val in values:
_add(val)
return
_load, _index = self._load, self._index
_lists.extend(values[pos:(pos + _load)]
for pos in range(0, len(values), _load))
_keys.extend(list(map(self._key, _list)) for _list in _lists)
_maxes.extend(sublist[-1] for sublist in _keys)
self._len = len(values)
del _index[:]
def __contains__(self, val):
"""Return True if and only if *val* is an element in the list."""
_maxes = self._maxes
if not _maxes:
return False
key = self._key(val)
pos = bisect_left(_maxes, key)
if pos == len(_maxes):
return False
_keys = self._keys
_lists = self._lists
idx = bisect_left(_keys[pos], key)
len_keys = len(_keys)
len_sublist = len(_keys[pos])
while True:
if _keys[pos][idx] != key:
return False
if _lists[pos][idx] == val:
return True
idx += 1
if idx == len_sublist:
pos += 1
if pos == len_keys:
return False
len_sublist = len(_keys[pos])
idx = 0
def discard(self, val):
"""
Remove the first occurrence of *val*.
If *val* is not a member, does nothing.
"""
_maxes = self._maxes
if not _maxes:
return
key = self._key(val)
pos = bisect_left(_maxes, key)
if pos == len(_maxes):
return
_keys = self._keys
_lists = self._lists
idx = bisect_left(_keys[pos], key)
len_keys = len(_keys)
len_sublist = len(_keys[pos])
while True:
if _keys[pos][idx] != key:
return
if _lists[pos][idx] == val:
self._delete(pos, idx)
return
idx += 1
if idx == len_sublist:
pos += 1
if pos == len_keys:
return
len_sublist = len(_keys[pos])
idx = 0
def remove(self, val):
"""
Remove first occurrence of *val*.
Raises ValueError if *val* is not present.
"""
_maxes = self._maxes
if not _maxes:
raise ValueError('{0} not in list'.format(repr(val)))
key = self._key(val)
pos = bisect_left(_maxes, key)
if pos == len(_maxes):
raise ValueError('{0} not in list'.format(repr(val)))
_keys = self._keys
_lists = self._lists
idx = bisect_left(_keys[pos], key)
len_keys = len(_keys)
len_sublist = len(_keys[pos])
while True:
if _keys[pos][idx] != key:
raise ValueError('{0} not in list'.format(repr(val)))
if _lists[pos][idx] == val:
self._delete(pos, idx)
return
idx += 1
if idx == len_sublist:
pos += 1
if pos == len_keys:
raise ValueError('{0} not in list'.format(repr(val)))
len_sublist = len(_keys[pos])
idx = 0
def _delete(self, pos, idx):
"""
Delete the item at the given (pos, idx).
Combines lists that are less than half the load level.
Updates the index when the sublist length is more than half the load
level. This requires decrementing the nodes in a traversal from the leaf
node to the root. For an example traversal see self._loc.
"""
_maxes, _lists, _keys, _index = self._maxes, self._lists, self._keys, self._index
keys_pos = _keys[pos]
lists_pos = _lists[pos]
del keys_pos[idx]
del lists_pos[idx]
self._len -= 1
len_keys_pos = len(keys_pos)
if len_keys_pos > self._half:
_maxes[pos] = keys_pos[-1]
if len(_index) > 0:
child = self._offset + pos
while child > 0:
_index[child] -= 1
child = (child - 1) >> 1
_index[0] -= 1
elif len(_keys) > 1:
if not pos:
pos += 1
prev = pos - 1
_keys[prev].extend(_keys[pos])
_lists[prev].extend(_lists[pos])
_maxes[prev] = _keys[prev][-1]
del _keys[pos]
del _lists[pos]
del _maxes[pos]
del _index[:]
self._expand(prev)
elif len_keys_pos:
_maxes[pos] = keys_pos[-1]
else:
del _keys[pos]
del _lists[pos]
del _maxes[pos]
del _index[:]
def _loc(self, pos, idx):
"""Convert an index pair (alpha, beta) into a single index that corresponds to
the position of the value in the sorted list.
Most queries require the index be built. Details of the index are
described in self._build_index.
Indexing requires traversing the tree from a leaf node to the root. The
parent of each node is easily computable at (pos - 1) // 2.
Left-child nodes are always at odd indices and right-child nodes are
always at even indices.
When traversing up from a right-child node, increment the total by the
left-child node.
The final index is the sum from traversal and the index in the sublist.
For example, using the index from self._build_index:
_index = 14 5 9 3 2 4 5
_offset = 3
Tree:
14
5 9
3 2 4 5
Converting index pair (2, 3) into a single index involves iterating like
so:
1. Starting at the leaf node: offset + alpha = 3 + 2 = 5. We identify
the node as a left-child node. At such nodes, we simply traverse to
the parent.
2. At node 9, position 2, we recognize the node as a right-child node
and accumulate the left-child in our total. Total is now 5 and we
traverse to the parent at position 0.
3. Iteration ends at the root.
Computing the index is the sum of the total and beta: 5 + 3 = 8.
"""
if not pos:
return idx
_index = self._index
if not len(_index):
self._build_index()
total = 0
# Increment pos to point in the index to len(self._lists[pos]).
pos += self._offset
# Iterate until reaching the root of the index tree at pos = 0.
while pos:
# Right-child nodes are at odd indices. At such indices
# account the total below the left child node.
if not (pos & 1):
total += _index[pos - 1]
# Advance pos to the parent node.
pos = (pos - 1) >> 1
return total + idx
def _pos(self, idx):
"""Convert an index into a pair (alpha, beta) that can be used to access
the corresponding _lists[alpha][beta] position.
Most queries require the index be built. Details of the index are
described in self._build_index.
Indexing requires traversing the tree to a leaf node. Each node has
two children which are easily computable. Given an index, pos, the
left-child is at pos * 2 + 1 and the right-child is at pos * 2 + 2.
When the index is less than the left-child, traversal moves to the
left sub-tree. Otherwise, the index is decremented by the left-child
and traversal moves to the right sub-tree.
At a child node, the indexing pair is computed from the relative
position of the child node as compared with the offset and the remaining
index.
For example, using the index from self._build_index:
_index = 14 5 9 3 2 4 5
_offset = 3
Tree:
14
5 9
3 2 4 5
Indexing position 8 involves iterating like so:
1. Starting at the root, position 0, 8 is compared with the left-child
node (5) which it is greater than. When greater the index is
decremented and the position is updated to the right child node.
2. At node 9 with index 3, we again compare the index to the left-child
node with value 4. Because the index is the less than the left-child
node, we simply traverse to the left.
3. At node 4 with index 3, we recognize that we are at a leaf node and
stop iterating.
4. To compute the sublist index, we subtract the offset from the index
of the leaf node: 5 - 3 = 2. To compute the index in the sublist, we
simply use the index remaining from iteration. In this case, 3.
The final index pair from our example is (2, 3) which corresponds to
index 8 in the sorted list.
"""
if idx < 0:
last_len = len(self._lists[-1])
if (-idx) <= last_len:
return len(self._lists) - 1, last_len + idx
idx += self._len
if idx < 0:
raise IndexError('list index out of range')
elif idx >= self._len:
raise IndexError('list index out of range')
if idx < len(self._lists[0]):
return 0, idx
_index = self._index
if not _index:
self._build_index()
pos = 0
child = 1
len_index = len(_index)
while child < len_index:
index_child = _index[child]
if idx < index_child:
pos = child
else:
idx -= index_child
pos = child + 1
child = (pos << 1) + 1
return (pos - self._offset, idx)
def _build_index(self):
"""Build an index for indexing the sorted list.
Indexes are represented as binary trees in a dense array notation
similar to a binary heap.
For example, given a _lists representation storing integers:
[0]: 1 2 3
[1]: 4 5
[2]: 6 7 8 9
[3]: 10 11 12 13 14
The first transformation maps the sub-lists by their length. The
first row of the index is the length of the sub-lists.
[0]: 3 2 4 5
Each row after that is the sum of consecutive pairs of the previous row:
[1]: 5 9
[2]: 14
Finally, the index is built by concatenating these lists together:
_index = 14 5 9 3 2 4 5
An offset storing the start of the first row is also stored:
_offset = 3
When built, the index can be used for efficient indexing into the list.
See the comment and notes on self._pos for details.
"""
row0 = list(map(len, self._lists))
if len(row0) == 1:
self._index[:] = row0
self._offset = 0
return
head = iter(row0)
tail = iter(head)
row1 = list(starmap(add, zip(head, tail)))
if len(row0) & 1:
row1.append(row0[-1])
if len(row1) == 1:
self._index[:] = row1 + row0
self._offset = 1
return
size = 2 ** (int(log(len(row1) - 1, 2)) + 1)
row1.extend(repeat(0, size - len(row1)))
tree = [row0, row1]
while len(tree[-1]) > 1:
head = iter(tree[-1])
tail = iter(head)
row = list(starmap(add, zip(head, tail)))
tree.append(row)
reduce(iadd, reversed(tree), self._index)
self._offset = size * 2 - 1
def _slice(self, slc):
start, stop, step = slc.start, slc.stop, slc.step
if step == 0:
raise ValueError('slice step cannot be zero')
# Set defaults for missing values.
if step is None:
step = 1
if step > 0:
if start is None:
start = 0
if stop is None:
stop = len(self)
elif stop < 0:
stop += len(self)
else:
if start is None:
start = len(self) - 1
if stop is None:
stop = -1
elif stop < 0:
stop += len(self)
if start < 0:
start += len(self)
# Fix indices that are too big or too small.
# Slice notation is surprisingly permissive
# where normal indexing would raise IndexError.
if step > 0:
if start < 0:
start = 0
elif start > len(self):
start = len(self)
if stop < 0:
stop = 0
elif stop > len(self):
stop = len(self)
else:
if start < 0:
start = -1
elif start >= len(self):
start = len(self) - 1
if stop < 0:
stop = -1
elif stop > len(self):
stop = len(self)
return start, stop, step
def __delitem__(self, idx):
"""Remove the element at *idx*. Supports slicing."""
if isinstance(idx, slice):
start, stop, step = self._slice(idx)
if ((step == 1) and (start < stop)
and ((stop - start) * 8 >= self._len)):
values = self[:start]
if stop < self._len:
values += self[stop:]
self.clear()
self.update(values)
return
indices = range(start, stop, step)
# Delete items from greatest index to least so
# that the indices remain valid throughout iteration.
if step > 0:
indices = reversed(indices)
_pos, _delete = self._pos, self._delete
for index in indices:
pos, idx = _pos(index)
_delete(pos, idx)
else:
pos, idx = self._pos(idx)
self._delete(pos, idx)
def __getitem__(self, idx):
"""Return the element at *idx*. Supports slicing."""
_lists = self._lists
if isinstance(idx, slice):
start, stop, step = self._slice(idx)
if step == 1 and start < stop:
if start == 0 and stop == self._len:
return self.as_list()
start_pos, start_idx = self._pos(start)
if stop == self._len:
stop_pos = len(_lists) - 1
stop_idx = len(_lists[stop_pos])
else:
stop_pos, stop_idx = self._pos(stop)
if start_pos == stop_pos:
return _lists[start_pos][start_idx:stop_idx]
prefix = _lists[start_pos][start_idx:]
middle = _lists[(start_pos + 1):stop_pos]
result = reduce(iadd, middle, prefix)
result += _lists[stop_pos][:stop_idx]
return result
if step == -1 and start > stop:
result = self[(stop + 1):(start + 1)]
result.reverse()
return result
# Return a list because a negative step could
# reverse the order of the items and this could
# be the desired behavior.
indices = range(start, stop, step)
return [self[index] for index in indices]
else:
pos, idx = self._pos(idx)
return _lists[pos][idx]
def _check_order(self, idx, key, val):
_keys, _len = self._keys, self._len
pos, loc = self._pos(idx)
if idx < 0:
idx += _len
# Check that the inserted value is not less than the
# previous value.
if idx > 0:
idx_prev = loc - 1
pos_prev = pos
if idx_prev < 0:
pos_prev -= 1
idx_prev = len(_keys[pos_prev]) - 1
if _keys[pos_prev][idx_prev] > key:
msg = '{0} not in sort order at index {1}'.format(repr(val), idx)
raise ValueError(msg)
# Check that the inserted value is not greater than
# the previous value.
if idx < (_len - 1):
idx_next = loc + 1
pos_next = pos
if idx_next == len(_keys[pos_next]):
pos_next += 1
idx_next = 0
if _keys[pos_next][idx_next] < key:
msg = '{0} not in sort order at index {1}'.format(repr(val), idx)
raise ValueError(msg)
def __setitem__(self, index, value):
"""
Replace the item at position *index* with *value*.
Supports slice notation. Raises a :exc:`ValueError` if the sort order
would be violated. When used with a slice and iterable, the
:exc:`ValueError` is raised before the list is mutated if the sort order
would be violated by the operation.
"""
_maxes, _lists, _keys, _pos = self._maxes, self._lists, self._keys, self._pos
_check_order = self._check_order
if isinstance(index, slice):
start, stop, step = self._slice(index)
indices = range(start, stop, step)
if step != 1:
if not hasattr(value, '__len__'):
value = list(value)
indices = list(indices)
if len(value) != len(indices):
raise ValueError(
'attempt to assign sequence of size {0}'
' to extended slice of size {1}'
.format(len(value), len(indices)))
# Keep a log of values that are set so that we can
# roll back changes if ordering is violated.
log = []
_append = log.append
for idx, val in zip(indices, value):
pos, loc = _pos(idx)
key = self._key(val)
_append((idx, _keys[pos][loc], key, _lists[pos][loc], val))
_keys[pos][loc] = key
_lists[pos][loc] = val
if len(_keys[pos]) == (loc + 1):
_maxes[pos] = key
try:
# Validate ordering of new values.
for idx, oldkey, newkey, oldval, newval in log:
_check_order(idx, newkey, newval)
except ValueError:
# Roll back changes from log.
for idx, oldkey, newkey, oldval, newval in log:
pos, loc = _pos(idx)
_keys[pos][loc] = oldkey
_lists[pos][loc] = oldval
if len(_keys[pos]) == (loc + 1):
_maxes[pos] = oldkey
raise
else:
# Test ordering using indexing. If the value given
# doesn't support getitem, convert it to a list.
if not hasattr(value, '__getitem__'):
value = list(value)
# Check that the given values are ordered properly.
keys = list(map(self._key, value))
ordered = all(keys[pos - 1] <= keys[pos]
for pos in range(1, len(keys)))
if not ordered:
raise ValueError('given sequence not in sort order')
# Check ordering in context of sorted list.
if start and len(value):
pos, loc = _pos(start - 1)
if _keys[pos][loc] > keys[0]:
msg = '{0} not in sort order at index {1}'.format(repr(value[0]), start)
raise ValueError(msg)
if stop != len(self) and len(value):
# "stop" is exclusive so we don't need
# to add one for the index.
pos, loc = _pos(stop)
if _keys[pos][loc] < keys[-1]:
msg = '{0} not in sort order at index {1}'.format(repr(value[-1]), stop)
raise ValueError(msg)
# Delete the existing values.
del self[index]
# Insert the new values.
_insert = self.insert
for idx, val in enumerate(value):
_insert(start + idx, val)
else:
pos, loc = _pos(index)
key = self._key(value)
_check_order(index, key, value)
_keys[pos][loc] = key
_lists[pos][loc] = value
if len(_lists[pos]) == (loc + 1):
_maxes[pos] = key
def __iter__(self):
"""Create an iterator over the list."""
return chain.from_iterable(self._lists)
def __reversed__(self):
"""Create an iterator to traverse the list in reverse."""
return chain.from_iterable(map(reversed, reversed(self._lists)))
def islice(self, start=None, stop=None, reverse=False):
"""
Returns an iterator that slices `self` from `start` to `stop` index,
inclusive and exclusive respectively.
When `reverse` is `True`, values are yielded from the iterator in
reverse order.
Both `start` and `stop` default to `None` which is automatically
inclusive of the beginning and end.
"""
_len = self._len
if not _len:
return iter(())
start, stop, step = self._slice(slice(start, stop))
if start >= stop:
return iter(())
_pos = self._pos
min_pos, min_idx = _pos(start)
if stop == _len:
max_pos = len(self._lists) - 1
max_idx = len(self._lists[-1])
else:
max_pos, max_idx = _pos(stop)
return self._islice(min_pos, min_idx, max_pos, max_idx, reverse)
def _islice(self, min_pos, min_idx, max_pos, max_idx, reverse):
"""
Returns an iterator that slices `self` using two index pairs,
`(min_pos, min_idx)` and `(max_pos, max_idx)`; the first inclusive
and the latter exclusive. See `_pos` for details on how an index
is converted to an index pair.
When `reverse` is `True`, values are yielded from the iterator in
reverse order.
"""
_lists = self._lists
if min_pos > max_pos:
return iter(())
elif min_pos == max_pos and not reverse:
return iter(_lists[min_pos][min_idx:max_idx])
elif min_pos == max_pos:
return reversed(_lists[min_pos][min_idx:max_idx])
elif min_pos + 1 == max_pos and not reverse:
return chain(_lists[min_pos][min_idx:], _lists[max_pos][:max_idx])
elif min_pos + 1 == max_pos:
return chain(
reversed(_lists[max_pos][:max_idx]),
reversed(_lists[min_pos][min_idx:]),
)
elif not reverse:
return chain(
_lists[min_pos][min_idx:],
chain.from_iterable(_lists[(min_pos + 1):max_pos]),
_lists[max_pos][:max_idx],
)
else:
temp = map(reversed, reversed(_lists[(min_pos + 1):max_pos]))
return chain(
reversed(_lists[max_pos][:max_idx]),
chain.from_iterable(temp),
reversed(_lists[min_pos][min_idx:]),
)
def irange(self, minimum=None, maximum=None, inclusive=(True, True),
reverse=False):
"""
Create an iterator of values between `minimum` and `maximum`.
`inclusive` is a pair of booleans that indicates whether the minimum
and maximum ought to be included in the range, respectively. The
default is (True, True) such that the range is inclusive of both
minimum and maximum.
Both `minimum` and `maximum` default to `None` which is automatically
inclusive of the start and end of the list, respectively.
When `reverse` is `True` the values are yielded from the iterator in
reverse order; `reverse` defaults to `False`.
"""
minimum = self._key(minimum) if minimum is not None else None
maximum = self._key(maximum) if maximum is not None else None
return self.irange_key(
min_key=minimum, max_key=maximum,
inclusive=inclusive, reverse=reverse,
)
def irange_key(self, min_key=None, max_key=None, inclusive=(True, True),
reverse=False):
"""
Create an iterator of values between `min_key` and `max_key`.
`inclusive` is a pair of booleans that indicates whether the min_key
and max_key ought to be included in the range, respectively. The
default is (True, True) such that the range is inclusive of both
`min_key` and `max_key`.
Both `min_key` and `max_key` default to `None` which is automatically
inclusive of the start and end of the list, respectively.
When `reverse` is `True` the values are yielded from the iterator in
reverse order; `reverse` defaults to `False`.
"""
_maxes = self._maxes
if not _maxes:
return iter(())
_keys = self._keys
# Calculate the minimum (pos, idx) pair. By default this location
# will be inclusive in our calculation.
if min_key is None:
min_pos = 0
min_idx = 0
elif inclusive[0]:
min_pos = bisect_left(_maxes, min_key)
if min_pos == len(_maxes):
return iter(())
min_idx = bisect_left(_keys[min_pos], min_key)
else:
min_pos = bisect_right(_maxes, min_key)
if min_pos == len(_maxes):
return iter(())
min_idx = bisect_right(_keys[min_pos], min_key)
# Calculate the maximum (pos, idx) pair. By default this location
# will be exclusive in our calculation.
if max_key is None:
max_pos = len(_maxes) - 1
max_idx = len(_keys[max_pos])
elif inclusive[1]:
max_pos = bisect_right(_maxes, max_key)
if max_pos == len(_maxes):
max_pos -= 1
max_idx = len(_keys[max_pos])
else:
max_idx = bisect_right(_keys[max_pos], max_key)
else:
max_pos = bisect_left(_maxes, max_key)
if max_pos == len(_maxes):
max_pos -= 1
max_idx = len(_keys[max_pos])
else:
max_idx = bisect_left(_keys[max_pos], max_key)
return self._islice(min_pos, min_idx, max_pos, max_idx, reverse)
def __len__(self):
"""Return the number of elements in the list."""
return self._len
def bisect_left(self, val):
"""
Similar to the *bisect* module in the standard library, this returns an
appropriate index to insert *val*. If *val* is already present, the
insertion point will be before (to the left of) any existing entries.
"""
return self.bisect_key_left(self._key(val))
def bisect_right(self, val):
"""
Same as *bisect_left*, but if *val* is already present, the insertion
point will be after (to the right of) any existing entries.
"""
return self.bisect_key_right(self._key(val))
bisect = bisect_right
def bisect_key_left(self, key):
"""
Similar to the *bisect* module in the standard library, this returns an
appropriate index to insert a value with a given *key*. If values with
*key* are already present, the insertion point will be before (to the
left of) any existing entries.
"""
_maxes = self._maxes
if not _maxes:
return 0
pos = bisect_left(_maxes, key)
if pos == len(_maxes):
return self._len
idx = bisect_left(self._keys[pos], key)
return self._loc(pos, idx)
def bisect_key_right(self, key):
"""
Same as *bisect_key_left*, but if *key* is already present, the insertion
point will be after (to the right of) any existing entries.
"""
_maxes = self._maxes
if not _maxes:
return 0
pos = bisect_right(_maxes, key)
if pos == len(_maxes):
return self._len
idx = bisect_right(self._keys[pos], key)
return self._loc(pos, idx)
bisect_key = bisect_key_right
def count(self, val):
"""Return the number of occurrences of *val* in the list."""
_maxes = self._maxes
if not _maxes:
return 0
key = self._key(val)
pos = bisect_left(_maxes, key)
if pos == len(_maxes):
return 0
_keys = self._keys
_lists = self._lists
idx = bisect_left(_keys[pos], key)
total = 0
len_keys = len(_keys)
len_sublist = len(_keys[pos])
while True:
if _keys[pos][idx] != key:
return total
if _lists[pos][idx] == val:
total += 1
idx += 1
if idx == len_sublist:
pos += 1
if pos == len_keys:
return total
len_sublist = len(_keys[pos])
idx = 0
def copy(self):
"""Return a shallow copy of the sorted list."""
return self.__class__(self, key=self._key, load=self._load)
__copy__ = copy
def append(self, val):
"""
Append the element *val* to the list. Raises a ValueError if the *val*
would violate the sort order.
"""
_maxes, _lists, _keys = self._maxes, self._lists, self._keys
key = self._key(val)
if not _maxes:
_maxes.append(key)
_keys.append([key])
_lists.append([val])
self._len = 1
return
pos = len(_keys) - 1
if key < _keys[pos][-1]:
msg = '{0} not in sort order at index {1}'.format(repr(val), self._len)
raise ValueError(msg)
_maxes[pos] = key
_keys[pos].append(key)
_lists[pos].append(val)
self._len += 1
self._expand(pos)
def extend(self, values):
"""
Extend the list by appending all elements from the *values*. Raises a
ValueError if the sort order would be violated.
"""
_maxes, _keys, _lists, _load = self._maxes, self._keys, self._lists, self._load
if not isinstance(values, list):
values = list(values)
keys = list(map(self._key, values))
if any(keys[pos - 1] > keys[pos]
for pos in range(1, len(keys))):
raise ValueError('given sequence not in sort order')
offset = 0
if _maxes:
if keys[0] < _keys[-1][-1]:
msg = '{0} not in sort order at index {1}'.format(repr(values[0]), self._len)
raise ValueError(msg)
if len(_keys[-1]) < self._half:
_lists[-1].extend(values[:_load])
_keys[-1].extend(keys[:_load])
_maxes[-1] = _keys[-1][-1]
offset = _load
len_keys = len(_keys)
for idx in range(offset, len(keys), _load):
_lists.append(values[idx:(idx + _load)])
_keys.append(keys[idx:(idx + _load)])
_maxes.append(_keys[-1][-1])
_index = self._index
if len_keys == len(_keys):
len_index = len(_index)
if len_index > 0:
len_values = len(values)
child = len_index - 1
while child:
_index[child] += len_values
child = (child - 1) >> 1
_index[0] += len_values
else:
del _index[:]
self._len += len(values)
def insert(self, idx, val):
"""
Insert the element *val* into the list at *idx*. Raises a ValueError if
the *val* at *idx* would violate the sort order.
"""
_maxes, _lists, _keys, _len = self._maxes, self._lists, self._keys, self._len
if idx < 0:
idx += _len
idx = max(idx, 0)
if idx > _len:
idx = _len
key = self._key(val)
if not _maxes:
# The idx must be zero by the inequalities above.
_maxes.append(key)
_lists.append([val])
_keys.append([key])
self._len = 1
return
if not idx:
if key > _keys[0][0]:
msg = '{0} not in sort order at index {1}'.format(repr(val), 0)
raise ValueError(msg)
else:
_keys[0].insert(0, key)
_lists[0].insert(0, val)
self._expand(0)
self._len += 1
return
if idx == _len:
pos = len(_keys) - 1
if _keys[pos][-1] > key:
msg = '{0} not in sort order at index {1}'.format(repr(val), _len)
raise ValueError(msg)
else:
_keys[pos].append(key)
_lists[pos].append(val)
_maxes[pos] = _keys[pos][-1]
self._expand(pos)
self._len += 1
return
pos, idx = self._pos(idx)
idx_before = idx - 1
if idx_before < 0:
pos_before = pos - 1
idx_before = len(_keys[pos_before]) - 1
else:
pos_before = pos
before = _keys[pos_before][idx_before]
if before <= key <= _keys[pos][idx]:
_lists[pos].insert(idx, val)
_keys[pos].insert(idx, key)
self._expand(pos)
self._len += 1
else:
msg = '{0} not in sort order at index {1}'.format(repr(val), idx)
raise ValueError(msg)
def pop(self, idx=-1):
"""
Remove and return item at *idx* (default last). Raises IndexError if
list is empty or index is out of range. Negative indices are supported,
as for slice indices.
"""
if (idx < 0 and -idx > self._len) or (idx >= self._len):
raise IndexError('pop index out of range')
pos, idx = self._pos(idx)
val = self._lists[pos][idx]
self._delete(pos, idx)
return val
def index(self, val, start=None, stop=None):
"""
Return the smallest *k* such that L[k] == val and i <= k < j`. Raises
ValueError if *val* is not present. *stop* defaults to the end of the
list. *start* defaults to the beginning. Negative indices are supported,
as for slice indices.
"""
_len, _maxes = self._len, self._maxes
if not _maxes:
raise ValueError('{0} is not in list'.format(repr(val)))
if start is None:
start = 0
if start < 0:
start += _len
start = max(start, 0)
if stop is None:
stop = _len
if stop < 0:
stop += _len
if stop > _len:
stop = _len
if stop <= start:
raise ValueError('{0} is not in list'.format(repr(val)))
stop -= 1
key = self._key(val)
pos = bisect_left(_maxes, key)
if pos == len(_maxes):
raise ValueError('{0} is not in list'.format(repr(val)))
_keys = self._keys
_lists = self._lists
idx = bisect_left(_keys[pos], key)
len_keys = len(_keys)
len_sublist = len(_keys[pos])
while True:
if _keys[pos][idx] != key:
raise ValueError('{0} is not in list'.format(repr(val)))
if _lists[pos][idx] == val:
loc = self._loc(pos, idx)
if start <= loc <= stop:
return loc
elif loc > stop:
break
idx += 1
if idx == len_sublist:
pos += 1
if pos == len_keys:
raise ValueError('{0} is not in list'.format(repr(val)))
len_sublist = len(_keys[pos])
idx = 0
raise ValueError('{0} is not in list'.format(repr(val)))
def as_list(self):
"""Very efficiently convert the SortedListWithKey to a list."""
return reduce(iadd, self._lists, [])
def __add__(self, that):
"""
Return a new sorted list containing all the elements in *self* and
*that*. Elements in *that* do not need to be properly ordered with
respect to *self*.
"""
values = self.as_list()
values.extend(that)
return self.__class__(values, key=self._key, load=self._load)
def __iadd__(self, that):
"""
Update *self* to include all values in *that*. Elements in *that* do not
need to be properly ordered with respect to *self*.
"""
self.update(that)
return self
def __mul__(self, that):
"""
Return a new sorted list containing *that* shallow copies of each item
in SortedListWithKey.
"""
values = self.as_list() * that
return self.__class__(values, key=self._key, load=self._load)
def __imul__(self, that):
"""
Increase the length of the list by appending *that* shallow copies of
each item.
"""
values = self.as_list() * that
self.clear()
self.update(values)
return self
def __eq__(self, that):
"""Compare two Sequences for equality."""
return ((self._len == len(that))
and all(lhs == rhs for lhs, rhs in zip(self, that)))
def __ne__(self, that):
"""Compare two Sequences for inequality."""
return ((self._len != len(that))
or any(lhs != rhs for lhs, rhs in zip(self, that)))
def __lt__(self, that):
"""Compare two Sequences for less than."""
return ((self._len <= len(that))
and all(lhs < rhs for lhs, rhs in zip(self, that)))
def __le__(self, that):
"""Compare two Sequences for less than equal."""
return ((self._len <= len(that))
and all(lhs <= rhs for lhs, rhs in zip(self, that)))
def __gt__(self, that):
"""Compare two Sequences for greater than."""
return ((self._len >= len(that))
and all(lhs > rhs for lhs, rhs in zip(self, that)))
def __ge__(self, that):
"""Compare two Sequences for greater than equal."""
return ((self._len >= len(that))
and all(lhs >= rhs for lhs, rhs in zip(self, that)))
@recursive_repr
def __repr__(self):
"""Return string representation of SortedListWithKey."""
temp = '{0}({1}, key={2}, load={3})'
return temp.format(
self.__class__.__name__,
repr(list(self)),
repr(self._key),
repr(self._load)
)
def _check(self):
try:
# Check load parameters.
assert self._load >= 4
assert self._half == (self._load >> 1)
assert self._twice == (self._load * 2)
# Check empty sorted list case.
if self._maxes == []:
assert self._keys == []
assert self._lists == []
return
assert len(self._maxes) > 0 and len(self._keys) > 0 and len(self._lists) > 0
# Check all sublists are sorted.
assert all(sublist[pos - 1] <= sublist[pos]
for sublist in self._keys
for pos in range(1, len(sublist)))
# Check beginning/end of sublists are sorted.
for pos in range(1, len(self._keys)):
assert self._keys[pos - 1][-1] <= self._keys[pos][0]
# Check length of _maxes and _lists match.
assert len(self._maxes) == len(self._lists) == len(self._keys)
# Check _keys matches _key mapped to _lists.
assert all(len(val_list) == len(key_list)
for val_list, key_list in zip(self._lists, self._keys))
assert all(self._key(val) == key for val, key in
zip((_val for _val_list in self._lists for _val in _val_list),
(_key for _key_list in self._keys for _key in _key_list)))
# Check _maxes is a map of _keys.
assert all(self._maxes[pos] == self._keys[pos][-1]
for pos in range(len(self._maxes)))
# Check load level is less than _twice.
assert all(len(sublist) <= self._twice for sublist in self._lists)
# Check load level is greater than _half for all
# but the last sublist.
assert all(len(self._lists[pos]) >= self._half
for pos in range(0, len(self._lists) - 1))
# Check length.
assert self._len == sum(len(sublist) for sublist in self._lists)
# Check index.
if len(self._index):
assert len(self._index) == self._offset + len(self._lists)
assert self._len == self._index[0]
def test_offset_pos(pos):
from_index = self._index[self._offset + pos]
return from_index == len(self._lists[pos])
assert all(test_offset_pos(pos)
for pos in range(len(self._lists)))
for pos in range(self._offset):
child = (pos << 1) + 1
if self._index[pos] == 0:
assert child >= len(self._index)
elif child + 1 == len(self._index):
assert self._index[pos] == self._index[child]
else:
child_sum = self._index[child] + self._index[child + 1]
assert self._index[pos] == child_sum
except:
import sys
import traceback
traceback.print_exc(file=sys.stdout)
print('len', self._len)
print('load', self._load, self._half, self._twice)
print('offset', self._offset)
print('len_index', len(self._index))
print('index', self._index)
print('len_maxes', len(self._maxes))
print('maxes', self._maxes)
print('len_keys', len(self._keys))
print('keys', self._keys)
print('len_lists', len(self._lists))
print('lists', self._lists)
raise
| StarcoderdataPython |
48200 | <filename>src/03.Structure.py
## 连接
multiLineStr = 'hell' + \
'o, w' + \
'orld'
print(multiLineStr)
# 会输出 hello, world
## 条件分支
### 条件控制
furry = True
small = True
if furry:
if small:
print(" It' s a cat.")
else:
print(" It' s a bear!")
else:
if small:
print(" It' s a skink!")
else:
print(" It' s a human. Or a hairless bear.")
# 会输出 It' s a cat.
## while 循环
whileList = []
whileNum = 1
while True:
if len(whileList) == 5:
break
if whileNum % 2 == 0:
whileList.append(whileNum)
whileNum += 1
print(whileList)
# 会输出 [2, 4, 6, 8, 10]
whileList = []
whileNum = 1
while whileNum < 10:
if len(whileList) == 5:
break
if whileNum % 2 == 0:
whileList.append(whileNum)
whileNum += 1
else:
whileList.append(9)
print(whileList)
# 会输出 [2, 4, 6, 8, 9]
## for 循环
forList = []
for forNum in [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]:
if len(forList) == 5:
break
if forNum % 2 == 0:
forList.append(forNum)
else:
forList.append(11)
print(forList)
# 会输出 [2, 4, 6, 8, 10, 11]
| StarcoderdataPython |
6615104 | from abc import ABCMeta, abstractmethod
import json
import time
from indy import ledger
import asyncio
from .AbstractConnector import AbstractConnector
class IndyConnector(AbstractConnector):
def __init__(self, socketio, sessionid, indy_dic):
self.moduleName = "IndyConnector"
self.indy_dic = indy_dic
print(f"##{self.moduleName}.__init__")
def getValidatorInformation(self, validatorURL):
"""Get the validator information including version, name, ID, and other information"""
print(f"##{self.moduleName}.getValidatorInformation()")
def sendSignedTransaction(self, signedTransaction):
"""Request a verifier to execute a ledger operation"""
print(f"##{self.moduleName}.sendSignedTransaction()")
def getBalance(self, address):
"""Get balance of an account for native token on a leder"""
print(f"##{self.moduleName}.getBalance()")
def execSyncFunction(self, address, funcName, args):
"""Execute a synchronous function held by a smart contract"""
print(f"##{self.moduleName}.execSyncFunction()")
command = args['method']['command']
if command== 'indy_ledger_submit_request':
return self.load_schema_or_credential_definition(args['args'])
print(f"##{self.moduleName} unknown command : {command}")
return "unknown command."
def load_schema_or_credential_definition(self, args):
"""Execute a synchronous function held by a smart contract"""
print(f"##{self.moduleName}.load_schema_or_credential_definition()")
pool_handle = self.indy_dic['pool_handle']
responseStr = self.run_coroutine_ensure_previous_request_applied(pool_handle, args, lambda response: response['result']['data'] is not None)
response = json.loads(responseStr)
return response
def startMonitor(self, clientId, cb):
"""Request a validator to start monitoring ledger"""
print(f"##{self.moduleName}.startMonitor()")
def stopMonitor(self, clientId):
"""Request a validator to stop monitoring ledger"""
print(f"##{self.moduleName}.stopMonitor()")
def cb(self, callbackData):
"""Callback function to call when receiving data from Ledger"""
print(f"##{self.moduleName}.cb()")
def nop(self):
"""Nop function for testing"""
print(f"##{self.moduleName}.nop()")
async def ensure_previous_request_applied(self, pool_handle, checker_request, checker):
for _ in range(3):
response = json.loads(await ledger.submit_request(pool_handle, checker_request))
try:
if checker(response):
return json.dumps(response)
except TypeError:
pass
time.sleep(5)
def run_coroutine_ensure_previous_request_applied(self, pool_handle, checker_request, checker, loop=None):
if loop is None:
loop = asyncio.get_event_loop()
results = loop.run_until_complete(self.ensure_previous_request_applied(pool_handle, checker_request, checker))
return results
| StarcoderdataPython |
9751997 | import os
import glob
import random
from sklearn.model_selection import train_test_split
def filter_bird_dataset(image_dir, seg_dir, save_dir,
save_name_prefix,
max_count=100,
select_class_name=None,
select_class_count=None,
test_ratio=0.2,
seg_image_ratio=0.25,
):
os.makedirs(save_dir, exist_ok=True)
class_dirs = os.listdir(image_dir)
train_file = os.path.join(save_dir, save_name_prefix+"_train.txt")
test_file = os.path.join(save_dir, save_name_prefix+"_test.txt")
# random select n classes
if (select_class_name is None) and (select_class_count is not None) and isinstance(select_class_count, int):
random.seed(42)
random.shuffle(class_dirs)
select_class_name = class_dirs[:select_class_count]
print("selected_classes: ", sorted(select_class_name))
images = []
for c in select_class_name:
c_images = glob.glob(os.path.join(image_dir, c, "*.jpg"), recursive=True)
if len(c_images) > max_count:
random.shuffle(c_images)
c_images = c_images[:max_count]
images += c_images
train_images, test_images = train_test_split(images, test_size=test_ratio, shuffle=True)
# select 25% images to extract segmentation data
train_img_count = len(train_images)
test_img_count = len(test_images)
train_labels = [os.path.dirname(x).split("/")[-1] for x in train_images]
test_labels = [os.path.dirname(x).split("/")[-1] for x in test_images]
train_seg_idx = random.sample(list(range(train_img_count)), int(train_img_count * seg_image_ratio))
test_seg_idx = random.sample(list(range(test_img_count)), int(test_img_count * seg_image_ratio))
train_segs = ["none" for _ in range(train_img_count)]
test_segs = ["none" for _ in range(test_img_count)]
# for idx in train_seg_idx:
for idx in range(train_img_count):
image_name = os.path.basename(train_images[idx])
image_class = os.path.dirname(train_images[idx]).split("/")[-1]
seg_name = os.path.join(seg_dir, image_class, image_name.replace("jpg", "png"))
train_segs[idx] = seg_name
# for idx in test_seg_idx:
for idx in range(test_img_count):
image_name = os.path.basename(test_images[idx])
image_class = os.path.dirname(test_images[idx]).split("/")[-1]
seg_name = os.path.join(seg_dir, image_class, image_name.replace("jpg", "png"))
test_segs[idx] = seg_name
print("=======write=========")
print("Num of train images: {}".format(train_img_count))
print("Num of test images: {}".format(test_img_count))
with open(train_file, "w") as f:
for img, lbl, seg in zip(train_images, train_labels, train_segs):
f.write(img + "," + lbl + "," + seg + "\n")
f.close()
with open(test_file, "w") as f:
for img, lbl, seg in zip(test_images, test_labels, test_segs):
f.write(img + "," + lbl + "," + seg + "\n")
f.close()
def filter_busi_dataset(image_dir, save_dir,
save_name_prefix,
max_count=250,
select_class_name=None,
test_ratio=0.2,
seg_image_ratio=0.25,
):
os.makedirs(save_dir, exist_ok=True)
class_dirs = os.listdir(image_dir)
train_file = os.path.join(save_dir, save_name_prefix+"_train.txt")
test_file = os.path.join(save_dir, save_name_prefix+"_test.txt")
# random select n classes
if (select_class_name is None) and (select_class_count is not None) and isinstance(select_class_count, int):
random.seed(42)
random.shuffle(class_dirs)
select_class_name = class_dirs[:select_class_count]
print("selected_classes: ", sorted(select_class_name))
images = []
for c in select_class_name:
c_images = glob.glob(os.path.join(image_dir, c, "*_mask.png"), recursive=True)
if len(c_images) > max_count:
random.shuffle(c_images)
c_images = c_images[:max_count]
images += c_images
images = [image.replace("_mask", "") for image in images]
train_images, test_images = train_test_split(images, test_size=test_ratio, shuffle=True)
# select 25% images to extract segmentation data
train_img_count = len(train_images)
test_img_count = len(test_images)
train_labels = [os.path.dirname(x).split("/")[-1] for x in train_images]
test_labels = [os.path.dirname(x).split("/")[-1] for x in test_images]
train_seg_idx = random.sample(list(range(train_img_count)), int(train_img_count * seg_image_ratio))
# test_seg_idx = random.sample(list(range(test_img_count)), int(test_img_count * seg_image_ratio))
train_segs = ["none" for _ in range(train_img_count)]
test_segs = ["none" for _ in range(test_img_count)]
for idx in train_seg_idx:
#for idx in range(train_img_count):
seg_name = train_images[idx].replace(".png", "_mask.png")
train_segs[idx] = seg_name
# for idx in test_seg_idx:
for idx in range(test_img_count):
seg_name = test_images[idx].replace(".png", "_mask.png")
test_segs[idx] = seg_name
print("=======write=========")
print("Num of train images: {}".format(train_img_count))
print("Num of test images: {}".format(test_img_count))
with open(train_file, "w") as f:
for img, lbl, seg in zip(train_images, train_labels, train_segs):
f.write(img + "," + lbl + "," + seg + "\n")
f.close()
with open(test_file, "w") as f:
for img, lbl, seg in zip(test_images, test_labels, test_segs):
f.write(img + "," + lbl + "," + seg + "\n")
f.close()
if __name__ == "__main__":
#image_dir="/home/zongfan2/Documents/ECE549_project/CUB_200_2011/CUB_200_2011/images"
#seg_dir="/home/zongfan2/Documents/ECE549_project/bird_seg"
#save_dir="/home/zongfan2/Documents/ECE549_project/ECE549_project/data"
#save_name_prefix="bird"
#max_count=100
#select_class_name=None
#select_class_count=8
#test_ratio=0.2
#seg_image_ratio=0.25
#filter_bird_dataset(image_dir, seg_dir, save_dir, save_name_prefix, max_count, select_class_name, select_class_count, test_ratio, seg_image_ratio)
image_dir="/shared/anastasio5/COVID19/data/Dataset_BUSI_with_GT"
save_dir="/home/zongfan2/Documents/ECE549_project/ECE549_project/data"
save_name_prefix="busi"
max_count=250
select_class_name=["malignant", "benign"]
test_ratio=0.2
seg_image_ratio=0.25
filter_busi_dataset(image_dir, save_dir, save_name_prefix, max_count, select_class_name, test_ratio, seg_image_ratio)
| StarcoderdataPython |
1826746 | <filename>blog/urls.py<gh_stars>10-100
from django.contrib.sitemaps.views import sitemap
from django.urls import (
path, re_path
)
from .sitemaps import PostSitemap
from .views import (
PostListView, PostDetailView, PostMyListView, PostMyDetailView, PostCreateView, PostUpdateView, PostDeleteView,
PostCategoryView, PostTagView, TagView, RssView,
PostArchiveIndexView, PostYearArchiveView, PostMonthArchiveView, PostDayArchiveView,
PostAttachmentUploadView, PostAttachmentDeleteView,
)
sitemaps_post = {
'sitemap': PostSitemap(),
}
app_name = 'blog'
urlpatterns = [
path('<str:blog>/posts/',
PostListView.as_view(), name='post-list'),
re_path(r'^(?P<blog>[-\w]+)/(?P<pk>\d+)/(?P<slug>[-\w]+)/$',
PostDetailView.as_view(), name='post-detail'),
path('<str:blog>/posts/me/',
PostMyListView.as_view(), name='post-my-list'),
re_path(r'^(?P<blog>[-\w]+)/posts/me/(?P<pk>\d+)/(?P<slug>[-\w]+)/$',
PostMyDetailView.as_view(), name='post-my-detail'),
path('<str:blog>/posts/me/new/',
PostCreateView.as_view(), name='post-create'),
re_path(r'^(?P<blog>[-\w]+)/posts/me/(?P<pk>\d+)/(?P<slug>[-\w]+)/edit$',
PostUpdateView.as_view(), name='post-edit'),
re_path(r'^(?P<blog>[-\w]+)/posts/me/(?P<pk>\d+)/(?P<slug>[-\w]+)/delete$',
PostDeleteView.as_view(), name='post-delete'),
re_path(r'^(?P<blog>[-\w]+)/category/(?P<slug>[-\w]+)/$',
PostCategoryView.as_view(), name='post-category'),
re_path(r'^(?P<blog>[-\w]+)/tags/(?P<slug>[-\w]+)/$',
PostTagView.as_view(), name='post-tag'),
path('<str:blog>/tags/',
TagView.as_view(), name='tag'),
path('<str:blog>/rss/',
RssView(), name='rss'),
path('<str:blog>/archive/',
PostArchiveIndexView.as_view(), name='post-archive'),
path('<str:blog>/archive/<int:year>/',
PostYearArchiveView.as_view(), name='post-archive-year'),
path('<str:blog>/archive/<int:year>/<int:month>/',
PostMonthArchiveView.as_view(month_format='%m'), name='post-archive-month'),
path('<str:blog>/archive/<int:year>/<int:month>/<int:day>/',
PostDayArchiveView.as_view(month_format='%m'), name='post-archive-day'),
path('sitemap-posts.xml',
sitemap, {'sitemaps': sitemaps_post}, name='django.contrib.sitemaps.views.sitemap'),
path('upload-file',
PostAttachmentUploadView.as_view(), name='post-file-upload'),
path('delete-file',
PostAttachmentDeleteView.as_view(), name='post-file-delete'),
]
| StarcoderdataPython |
8180097 | <reponame>amadavan/PhasorPy
import numpy as np
import scipy as sp
import scipy.sparse
import stukapy as st
def constructLP(network, formulation, alphap):
if formulation == 'ISF':
c, Aub, bub, Cub, Aeq, beq, Ceq, lb, ub = [], [], [], [], [], [], [], [], []
p0 = 1.
for lineOutage in network.lineOutages:
p0 -= lineOutage['prob']
# State: yk, dgk+ (positive post-generation), dgk- (negative post-generation), ddkp (load shed), slack (flexibility)
H = sp.sparse.csc_matrix(lineOutage['ISF'])
Hg = sp.sparse.csc_matrix(lineOutage['ISF'][:, network.gen['GEN_BUS'] - 1])
# Delta formulation
ck = alphap * np.concatenate((lineOutage['prob'] * np.ones((1,)),
np.zeros((network.n_g,)),
np.zeros((network.n_g,)),
np.zeros((network.n_b,)),
999999 * np.ones((network.n_g,)),
999999 * np.ones((network.n_l - 1,))))
# Constraints
# yk >= c^T dgk+ + c^T dgk- + v^T ddkp + c^T g - z
# H(g - d) - slack <= f_dal
# dgk+ - dgk- + g >= 0
# dgk+ - dgk- + g <= gmax
# H(g + dgk+ - dgk- + ddkp - s - d) <= f_ste
# dgk+ - dgk- <= delta g
# -dgk+ + dgk- <= delta g
Aubk = sp.sparse.bmat(
[[-1, network.gencost['COST'][:, -2], network.gencost['COST'][:, -2], network.voll, None, None],
[None, None, None, None, None, -sp.sparse.eye(network.n_l - 1)],
[None, None, None, None, None, -sp.sparse.eye(network.n_l - 1)],
[None, -sp.sparse.eye(network.n_g), sp.sparse.eye(network.n_g), None, -sp.sparse.eye(network.n_g),
None],
[None, sp.sparse.eye(network.n_g), -sp.sparse.eye(network.n_g), None, -sp.sparse.eye(network.n_g),
None],
[None, Hg, -Hg, H, None, -sp.sparse.eye(network.n_l - 1)],
[None, -Hg, Hg, -H, None, -sp.sparse.eye(network.n_l - 1)],
[None, sp.sparse.eye(network.n_g), -sp.sparse.eye(network.n_g), None, -sp.sparse.eye(network.n_g),
None],
[None, -sp.sparse.eye(network.n_g), sp.sparse.eye(network.n_g), None, -sp.sparse.eye(network.n_g),
None]
], 'csc')
Cubk = sp.sparse.bmat([[-1, 0, network.gencost['COST'][:, -2]],
[None, None, Hg],
[None, None, -Hg],
[None, None, -sp.sparse.eye(network.n_g)],
[None, None, sp.sparse.eye(network.n_g)],
[None, None, Hg],
[None, None, -Hg],
[np.zeros((network.n_g, 1)), None, None],
[np.zeros((network.n_g, 1)), None, None]
], 'csc')
bubk = np.concatenate((np.zeros((1,)),
lineOutage['branch']['RATE_C'] + H.dot(network.bus['PD']),
lineOutage['branch']['RATE_C'] - H.dot(network.bus['PD']),
np.zeros((network.n_g,)),
network.gen['PMAX'],
lineOutage['branch']['RATE_B'] + H.dot(network.bus['PD']),
lineOutage['branch']['RATE_B'] - H.dot(network.bus['PD']),
network.gen['RAMP_AGC'] * 5,
network.gen['RAMP_AGC'] * 5
))
Aeqk = sp.sparse.bmat([[np.zeros((1, 1)), np.ones((network.n_g,)), -np.ones((network.n_g,)),
np.ones((network.n_b,)), np.zeros((network.n_g,)), np.zeros((network.n_l - 1,))]],
'csc')
Ceqk = sp.sparse.bmat([[np.zeros((1, 1)), np.zeros((1, 1)), np.ones((network.n_g,))]], 'csc')
beqk = np.array([np.sum(network.bus['PD'])])
lbk = np.concatenate((np.zeros((1,)), np.zeros((network.n_g,)), np.zeros((network.n_g,)),
np.zeros((network.n_b,)), np.zeros((network.n_g,)), np.zeros((network.n_l - 1,))))
ubk = np.concatenate((st.inf * np.ones((1,)), st.inf * np.ones((network.n_g,)),
st.inf * np.ones((network.n_g,)), network.bus['PD'], st.inf * np.ones((network.n_g,)),
st.inf * np.ones((network.n_l - 1,))))
c.append(ck)
Aub.append(Aubk)
bub.append(bubk)
Cub.append(Cubk)
Aeq.append(Aeqk)
beq.append(beqk)
Ceq.append(Ceqk)
lb.append(lbk)
ub.append(ubk)
c0 = np.concatenate((np.ones((1,)), alphap * p0 * np.ones((1,)), np.zeros((network.n_g,))))
H = sp.sparse.csc_matrix(network.ISF)
Hg = sp.sparse.csc_matrix(network.ISF[:, network.gen['GEN_BUS'] - 1])
Aub0 = sp.sparse.bmat([[-np.ones((1, 1)), -np.ones((1, 1)), network.gencost['COST'][:, -2]],
[None, None, Hg],
[None, None, -Hg]], 'csc')
bub0 = np.concatenate((np.zeros((1,)),
network.branch['RATE_A'] + H.dot(network.bus['PD']),
network.branch['RATE_A'] - H.dot(network.bus['PD'])))
Aeq0 = sp.sparse.bmat([[np.zeros((1, 1)), np.zeros((1, 1)), np.ones((network.n_g,))]], 'csc')
beq0 = np.array([np.sum(network.bus['PD'])])
lb0 = np.concatenate((np.zeros((2,)), np.zeros((network.n_g,))))
ub0 = np.concatenate((st.inf * np.ones((2,)), network.gen['PMAX']))
c.append(c0)
Aub.append(Aub0)
bub.append(bub0)
Aeq.append(Aeq0)
beq.append(beq0)
lb.append(lb0)
ub.append(ub0)
else:
raise NotImplementedError
return c, Aub, bub, Cub, Aeq, beq, Ceq, lb, ub
def interpretState(network, x):
pass
| StarcoderdataPython |
12859223 | <gh_stars>1-10
import torch
import numpy as np
import copy
def remove(path):
data = torch.load(path)
location_list, action_list = [np.reshape(st[0], (1, 8)) for st in data], [st[1] for st in data]
location_list = np.concatenate(location_list, axis=0)
action_list = np.asarray(action_list)
action_0 = action_list == 0
action_1 = action_list == 1
action_2 = action_list == 2
action_3 = action_list == 3
location_0 = location_list[action_0, :]
location_1 = location_list[action_1, :]
location_2 = location_list[action_2, :]
location_3 = location_list[action_3, :]
action_0 = action_list[action_list == 0]
action_1 = action_list[action_list == 1]
action_2 = action_list[action_list == 2]
action_3 = action_list[action_list == 3]
action_l = copy.deepcopy([action_0, action_1, action_2, action_3])
location_l = copy.deepcopy([location_0, location_1, location_2, location_3])
a_hori, l_hori = [], []
for a, l in zip(action_l, location_l):
a = a[l[:, 0] > 0.1]
l = l[l[:, 0] > 0.1]
a_hori.append(a)
l_hori.append(l)
location_hori = np.concatenate(l_hori, axis=0)
action_hori = np.concatenate(a_hori, axis=0)
print("horizontal : ", location_hori.shape, action_hori.shape)
# Vertical
action_l = copy.deepcopy([action_0, action_1, action_2, action_3])
location_l = copy.deepcopy([location_0, location_1, location_2, location_3])
a_verti, l_verti = [], []
for a, l in zip(action_l, location_l):
a = a[l[:, 1] < 0.8]
l = l[l[:, 1] < 0.8]
a_verti.append(a)
l_verti.append(l)
location_verti = np.concatenate(l_verti, axis=0)
action_verti = np.concatenate(a_verti, axis=0)
print("vertical : ", location_verti.shape, action_verti.shape)
# Save
for i, (a, l) in enumerate(zip(a_verti, l_verti)):
torch.save([l, a], f"/home/seungjae/Desktop/lunarlander/replay_buffer_vertical_{i}.pt")
for i, (a, l) in enumerate(zip(a_hori, l_hori)):
torch.save([l, a], f"/home/seungjae/Desktop/lunarlander/replay_buffer_horizontal_{i}.pt")
# torch.save([location_hori, action_hori], "/home/seungjae/Desktop/lunarlander/replay_buffer_horizontal.pt")
# torch.save([location_verti, action_verti], "/home/seungjae/Desktop/lunarlander/replay_buffer_vertical.pt")
if __name__ == "__main__":
path = "/home/seungjae/Desktop/lunarlander/replay_buffer.pt"
remove(path) | StarcoderdataPython |
3428752 | <filename>ncskos/test/test_ld_functions.py
"""
Unit tests for ld_functions against a test URI
Created on 5Oct.,2016
@author: <NAME>
"""
import unittest
from pprint import pprint
from ncskos import ld_functions # ConceptFetcher, CliValuesValidator
SHOW_DEBUG_OUTPUT = False
TEST_SKOS_PARAMS = {
'lang': 'pl',
'altLabels': True,
'narrower': True,
'broader': True,
}
INVALID_SKOS_PARAMS = {
'name': '<NAME>',
'altLabels': 'True',
'narrower': 'False',
'broader': True,
}
TEST_URI = 'http://pid.geoscience.gov.au/def/voc/netCDF-LD-eg-ToS/sea_surface_temperature'
INVALID_URI = 'This is not a URI'
EXPECTED_RESULT = {
'skos__broader': 'http://pid.geoscience.gov.au/def/voc/netCDF-LD-eg-ToS/surface_temperature',
'skos__narrower': 'http://pid.geoscience.gov.au/def/voc/netCDF-LD-eg-ToS/sea_surface_skin_temperature, \
http://pid.geoscience.gov.au/def/voc/netCDF-LD-eg-ToS/sea_surface_subskin_temperature, \
http://pid.geoscience.gov.au/def/voc/netCDF-LD-eg-ToS/square_of_sea_surface_temperature',
'skos__prefLabel_pl': 'temperatura powierzchni morza',
'skos__altLabels': 'SST'
}
VALID_MIMETYPES = {
'text/turtle': 'turtle',
'text/ntriples': 'nt',
'text/nt': 'nt',
'text/n3': 'nt',
'application/rdf+xml': 'rdf',
'application/rdf+json': 'json-ld'
}
INVALID_MIMETYPE = 'nothing'
# Shared instance so we only invoke the constructor once
concept_fetcher_object = None
class TestCliValuesValidator(unittest.TestCase):
"""Unit tests for CliValuesValidator class"""
def test_is_a_uri(self):
"""
Perform test of CliValuesValidator.is_a_uri
"""
print 'Testing CliValuesValidator.is_a_uri function'
assert ld_functions.CliValuesValidator.is_a_uri(
TEST_URI), 'CliValuesValidator.is_a_uri() function failed'
assert not ld_functions.CliValuesValidator.is_a_uri(INVALID_URI), \
'Negative CliValuesValidator.is_a_uri() function failed'
class TestConceptFetcherConstructor(unittest.TestCase):
"""Unit tests for ConceptFetcher constructor - RUN THIS BEFORE ANY OTHER ConceptFetcher TESTS"""
def test_constructor(self):
"""
Perform test of constructor
"""
print 'Testing ConceptFetcher constructor'
global concept_fetcher_object
concept_fetcher_object = ld_functions.ConceptFetcher(
TEST_SKOS_PARAMS, debug=SHOW_DEBUG_OUTPUT)
assert concept_fetcher_object, 'NCLDDump constructor failed'
class TestConceptFetcherLowLevel(unittest.TestCase):
"""Lowest-level unit tests for ConceptFetcher class"""
def test_valid_command_line_args(self):
print 'Testing valid_command_line_args function'
global concept_fetcher_object
assert concept_fetcher_object.valid_command_line_args(TEST_SKOS_PARAMS), \
'Failed valid_command_line_args test with %s' % TEST_SKOS_PARAMS
try:
assert not concept_fetcher_object.valid_command_line_args(TEST_SKOS_PARAMS), \
'Failed negative valid_command_line_args test with %s' % INVALID_SKOS_PARAMS
except:
pass
def test_valid_skos_concept_uri(self):
print 'Testing valid_skos_concept_uri function'
global concept_fetcher_object
assert concept_fetcher_object.valid_skos_concept_uri(TEST_URI), \
'Failed valid_skos_concept_uri test with %s' % TEST_URI
try:
assert not concept_fetcher_object.valid_skos_concept_uri(INVALID_URI), \
'Failed negative valid_skos_concept_uri test with %s' % INVALID_URI
except:
pass
def test_dereference_uri(self):
print 'Testing dereference_uri function'
global concept_fetcher_object
assert '<Response [200]>' in str(concept_fetcher_object.dereference_uri(TEST_URI)), \
'Failed dereference_uri test with %s' % TEST_URI
try:
assert '<Response [200]>' not in str(concept_fetcher_object.dereference_uri(INVALID_URI)), \
'Failed negative dereference_uri test with %s' % INVALID_URI
except:
pass
def test_get_rdflib_rdf_format(self):
print 'Testing get_rdflib_rdf_format function'
global concept_fetcher_object
for mimetype, rdf_format in VALID_MIMETYPES.items():
assert concept_fetcher_object.get_rdflib_rdf_format(
mimetype + ';charset=utf-8') == rdf_format
try:
assert concept_fetcher_object.get_rdflib_rdf_format(INVALID_MIMETYPE) not in VALID_MIMETYPES.values(), \
'Failed negative get_rdflib_rdf_format test with "%s"' % INVALID_MIMETYPE
except:
pass
def test_valid_skos(self):
print 'Testing test_valid_skos function'
global concept_fetcher_object
concept_fetcher_object.parse_rdf(
concept_fetcher_object.dereference_uri(TEST_URI)) # Need self.g graph object
assert concept_fetcher_object.valid_skos(
TEST_URI), 'Failed valid_skos test with "%s"' % TEST_URI
try:
concept_fetcher_object.parse_rdf(
concept_fetcher_object.dereference_uri(INVALID_URI)) # This will prob fail
assert not concept_fetcher_object.valid_skos(INVALID_URI), \
'Failed negative valid_skos test with "%s"' % INVALID_URI
except:
pass
class TestConceptFetcherMidLevel(unittest.TestCase):
"""Mid-level unit tests for ConceptFetcher class"""
def test_get_prefLabel(self):
print 'Testing get_prefLabel function'
global concept_fetcher_object
# Test default language: RResult should look something like ('sea
# surface temperature', 'en')
get_prefLabel_result = concept_fetcher_object.get_prefLabel(TEST_URI)
assert get_prefLabel_result[1] == 'en', \
'Default prefLabel language "%s" does not match "%s"' % (
get_prefLabel_result[1], 'en')
assert get_prefLabel_result[0] == 'sea surface temperature', \
'Failed default language get_prefLabel test with "%s"' % TEST_URI
# Result should look something like ('temperatura powierzchni morza',
# 'pl')
get_prefLabel_result = concept_fetcher_object.get_prefLabel(
TEST_URI, lang=TEST_SKOS_PARAMS['lang'])
assert get_prefLabel_result[1] == TEST_SKOS_PARAMS['lang'], \
'prefLabel language "%s" does not match "%s"' % (
get_prefLabel_result[1], TEST_SKOS_PARAMS['lang'])
assert get_prefLabel_result[0] == EXPECTED_RESULT['skos__prefLabel_%s' % get_prefLabel_result[1]], \
'Failed "%s" language get_prefLabel test with "%s"' % (
TEST_SKOS_PARAMS['lang'], TEST_URI)
def test_get_altLabels(self):
print 'Testing get_altLabels function'
global concept_fetcher_object
get_altLabels_result = [
str(altlabel) for altlabel in concept_fetcher_object.get_altLabels(TEST_URI)]
assert sorted(get_altLabels_result) == [
item for item in sorted(EXPECTED_RESULT['skos__altLabels'].split(', ')) if item
]
def test_get_narrower(self):
print 'Testing get_narrower function'
global concept_fetcher_object
get_narrower_result = [
str(altlabel) for altlabel in concept_fetcher_object.get_narrower(TEST_URI)]
assert sorted(get_narrower_result) == [
item for item in sorted(EXPECTED_RESULT['skos__narrower'].split(', ')) if item
]
def test_get_broader(self):
print 'Testing get_broader function'
global concept_fetcher_object
get_broader_result = [
str(altlabel) for altlabel in concept_fetcher_object.get_broader(TEST_URI)]
assert sorted(get_broader_result) == [
item for item in sorted(EXPECTED_RESULT['skos__broader'].split(', ')) if item
]
class TestConceptFetcherSystem(unittest.TestCase):
"""Top-level unit test for ConceptFetcher class"""
def test_get_results(self):
print 'Testing get_results function'
global concept_fetcher_object
result_dict = concept_fetcher_object.get_results(TEST_URI)
if SHOW_DEBUG_OUTPUT:
print 'Result for %s:' % TEST_URI
pprint(result_dict)
assert result_dict == EXPECTED_RESULT, 'Failed get_results test with "%s"' % TEST_URI
# Define test suites
def test_suite():
"""Returns a test suite of all the tests in this module."""
test_classes = [
TestCliValuesValidator,
TestConceptFetcherConstructor,
TestConceptFetcherLowLevel,
TestConceptFetcherMidLevel,
TestConceptFetcherSystem
]
suite_list = map(
unittest.defaultTestLoader.loadTestsFromTestCase,
test_classes
)
suite = unittest.TestSuite(suite_list)
return suite
# Define main function
def main():
unittest.TextTestRunner(verbosity=4).run(test_suite())
if __name__ == '__main__':
main()
| StarcoderdataPython |
3563712 | from copy import deepcopy
import numpy as np
from rdkit import Chem
from rdkit import DataStructs
from rdkit.Chem.AtomPairs import Pairs
from rdkit.Chem.Scaffolds import MurckoScaffold
from reinvent_scoring.scoring.diversity_filters.reinvent_core.base_diversity_filter import BaseDiversityFilter
from reinvent_scoring.scoring.diversity_filters.reinvent_core.diversity_filter_parameters import DiversityFilterParameters
from reinvent_scoring.scoring.score_summary import FinalSummary
class ScaffoldSimilarity(BaseDiversityFilter):
"""Penalizes compounds based on atom pair Tanimoto similarity to previously generated Murcko Scaffolds."""
def __init__(self, parameters: DiversityFilterParameters):
super().__init__(parameters)
self._scaffold_fingerprints = {}
def update_score(self, score_summary: FinalSummary, step=0) -> np.array:
score_summary = deepcopy(score_summary)
scores = score_summary.total_score
smiles = score_summary.scored_smiles
for i in score_summary.valid_idxs:
smile = self._chemistry.convert_to_rdkit_smiles(smiles[i])
scaffold = self._calculate_scaffold(smile)
# check, if another scaffold should be used as "bucket", because it is very similar as defined by the
# "minsimilarity" threshold; if not, this call is a no-op and the smiles' normal Murcko scaffold will be used in case
# -> usage of the "murcko scaffold filter" is actually a special case, where "minsimilarity" is 1.0
scaffold = self._find_similar_scaffold(scaffold)
scores[i] = 0 if self._smiles_exists(smile) else scores[i]
if scores[i] >= self.parameters.minscore:
self._add_to_memory(i, scores[i], smile, scaffold, score_summary.scaffold_log, step)
scores[i] = self._penalize_score(scaffold, scores[i])
return scores
def _calculate_scaffold(self, smile):
mol = Chem.MolFromSmiles(smile)
if mol:
try:
scaffold = MurckoScaffold.GetScaffoldForMol(mol)
return Chem.MolToSmiles(scaffold, isomericSmiles=False)
except ValueError:
scaffold_smiles = ''
else:
scaffold_smiles = ''
return scaffold_smiles
def _find_similar_scaffold(self, scaffold):
"""
this function tries to find a "similar" scaffold (according to the threshold set by parameter "minsimilarity") and if at least one
scaffold satisfies this criteria, it will replace the smiles' scaffold with the most similar one
-> in effect, this reduces the number of scaffold buckets in the memory (the lower parameter "minsimilarity", the more
pronounced the reduction)
generate a "mol" scaffold from the smile and calculate an atom pair fingerprint
:param scaffold: scaffold represented by a smiles string
:return: closest scaffold given a certain similarity threshold
"""
if scaffold is not '':
fp = Pairs.GetAtomPairFingerprint(Chem.MolFromSmiles(scaffold))
# make a list of the stored fingerprints for similarity calculations
fps = list(self._scaffold_fingerprints.values())
# check, if a similar scaffold entry already exists and if so, use this one instead
if len(fps) > 0:
similarity_scores = DataStructs.BulkDiceSimilarity(fp, fps)
closest = np.argmax(similarity_scores)
if similarity_scores[closest] >= self.parameters.minsimilarity:
scaffold = list(self._scaffold_fingerprints.keys())[closest]
fp = self._scaffold_fingerprints[scaffold]
self._scaffold_fingerprints[scaffold] = fp
return scaffold
| StarcoderdataPython |
1889435 | <filename>login/admin.py
# -*- coding: utf-8 -*-
""" Add the login module to the Django admin """
from __future__ import unicode_literals
# Register your models here.
| StarcoderdataPython |
5008481 | <filename>sudokuer.py
#!/usr/bin/env python
# coding: utf-8
# default background color: #300a24
# default text color: #839496
from collections import namedtuple
Entry = namedtuple('Entry', 'pos val')
ROWS = [range(9*i, 9*(i+1)) for i in range(9)]
COLUMNS = [range(i, i+80, 9) for i in range(9)]
BOXES = []
for i in range(9):
b = []
for l in range((i/3)*3, (i/3)*3+3):
for c in range((i%3)*3, (i%3)*3+3):
b.append(9*l+c)
BOXES.append(b)
def make_sudoku_solver(puzzle):
solver = SudokuSolver()
solver.set_sudoku(puzzle)
if solver.check_sudoku():
return solver
raise Exception("invalid input puzzle")
class SudokuSolver(object):
def __init__(self):
self.puzzle = []
self.stack = []
self.peercache = {}
#self.counter = 0
def set_sudoku(self, puzzle):
self.puzzle = puzzle
def check_sudoku(self):
for pos in range(81):
if self.puzzle[pos] == 0:
continue
else:
v = self.puzzle[pos]
if not self.__check_value(pos, v, initial_check=True):
return False
return True
def __check_value(self, pos, x, initial_check=False):
if pos not in self.peercache:
line = pos / 9
col = pos % 9
box = (line / 3) * 3 + col / 3
row = ROWS[line]
column = COLUMNS[col]
boxbox = BOXES[box]
self.peercache[pos] = set(row + column + boxbox)
for i in self.peercache[pos]:
if initial_check and i == pos:
continue
if self.puzzle[i] == x:
return False
#for i in row:
# if initial_check and i == pos:
# continue
# if self.puzzle[i] == x:
# return False
#for i in column:
# if initial_check and i == pos:
# continue
# if self.puzzle[i] == x:
# return False
#for i in boxbox:
# if initial_check and i == pos:
# continue
# if self.puzzle[i] == x:
# return False
return True
#i = line * 9
#while i < (line+1) * 9:
# if initial_check and i == pos:
# i += 1
# continue
# if self.puzzle[i] != 0 and self.puzzle[i] == x:
# return False
# i += 1
#i = col
#while i <= (col + 72):
# if initial_check and i == pos:
# i += 9
# continue
# if self.puzzle[i] != 0 and self.puzzle[i] == x:
# return False
# i += 9
#topleft = (box / 3) * 27 + (box % 3) * 3
#i = topleft
#while i <= (topleft+20):
# for j in range(3):
# if initial_check and (i+j) == pos:
# continue
# if self.puzzle[i+j] != 0 and self.puzzle[i+j] == x:
# return False
# i += 9
#return True
def make_guess(self, pos, b):
guess = b+1
while guess <= 9:
if self.__check_value(pos, guess):
return guess
guess += 1
return guess
def solve(self):
while 1:
i = 0
while i < 81:
if self.puzzle[i] == 0:
#print '.',
break
i += 1
if i >= 81:
print ("done!")
break
v = self.make_guess(i, 0)
#print ';; v', v
if v <= 9:
self.puzzle[i] = v
self.stack.append(Entry(i, v))
else:
while len(self.stack) > 0:
#print 'stack depth:', len(self.stack)
e = self.stack.pop()
v = self.make_guess(e.pos, e.val)
#print ';; adjust v', v
if v <= 9:
self.puzzle[e.pos] = v
self.stack.append(Entry(e.pos, v))
break
else:
# reset the cell referred by top of the stack
self.puzzle[e.pos] = 0
if len(self.stack) == 0:
raise Exception("bad sudoku, no solution")
def show_solution(self):
lines = []
for i in range(81):
if i % 9 == 0:
l = []
l.append(str(self.puzzle[i]))
elif (i+1) % 9 == 0:
l.append(str(self.puzzle[i]))
lines.append(' '.join(l))
else:
l.append(str(self.puzzle[i]))
print '\n'.join(lines)
if __name__ == '__main__':
sdksolver = make_sudoku_solver([
#0,3,0, 0,0,4, 5,0,0,
#0,0,7, 6,0,0, 0,0,2,
#0,0,1, 0,0,9, 0,0,0,
#0,1,0, 0,0,3, 8,0,7,
#0,8,6, 0,9,0, 0,0,4,
#0,0,0, 0,0,0, 0,0,0,
#0,0,2, 0,8,0, 0,0,5,
#0,0,0, 0,2,0, 3,0,0,
#0,5,0, 4,0,0, 7,0,0,
#===================
#9,3,0, 0,0,0, 4,8,0,
#0,0,4, 0,0,0, 0,0,3,
#6,0,1, 0,3,0, 0,9,5,
#1,0,0, 3,0,8, 0,5,0,
#0,0,0, 1,9,6, 0,0,0,
#0,0,0, 7,2,5, 0,0,0,
#0,0,2, 0,0,0, 0,0,9,
#8,4,0, 0,0,0, 5,1,0,
#5,0,9, 0,8,0, 0,4,7,
#===================
#0,0,0, 0,0,6, 0,0,0,
#0,5,9, 0,0,0, 0,0,8,
#2,0,0, 0,0,8, 0,0,0,
#0,4,5, 0,0,0, 0,0,0,
#0,0,3, 0,0,0, 0,0,0,
#0,0,6, 0,0,3, 0,5,4,
#0,0,0, 3,2,5, 0,0,6,
#0,0,0, 0,0,0, 0,0,0,
#0,0,0, 0,0,0, 0,0,0,
#===================
#0,0,3, 0,0,0, 0,1,0,
#0,1,0, 0,3,6, 7,0,0,
#0,2,8, 0,0,5, 0,0,0,
#0,0,0, 8,0,0, 5,3,0,
#0,0,1, 6,0,0, 0,0,0,
#0,0,0, 5,0,0, 6,7,0,
#0,4,6, 0,0,7, 0,0,0,
#0,3,0, 0,4,8, 9,0,0,
#0,0,2, 0,0,0, 0,4,0,
0,0,0, 0,0,0, 0,0,0,
9,1,0, 7,0,0, 0,0,0,
7,0,0, 0,0,8, 0,0,0,
0,0,0, 0,0,5, 0,0,0,
0,0,4, 0,8,2, 0,0,0,
6,0,0, 0,0,0, 9,1,0,
0,0,5, 0,0,0, 0,0,8,
0,0,0, 0,0,0, 0,0,2,
0,0,0, 1,0,0, 0,7,0,
])
sdksolver.solve()
sdksolver.show_solution()
| StarcoderdataPython |
394931 | <reponame>MosyMosy/ivadomed<filename>lab/pre_encoder_to_unet.py<gh_stars>0
from ivadomed.models import Unet
import torch
device = torch.device("cpu")
model = Unet(depth=4, in_channel=1)
model.decoder = torch.load('./pretrained/model_seg_rat_axon-myelin_sem.pt',
map_location=torch.device(device)).decoder
state_encoder = torch.load('/mnt/d/Work/Globus/ImageNet_1channel/checkpoint_best.pkl',
map_location=torch.device(device))['state_dict']
from collections import OrderedDict
new_state = OrderedDict()
for k, v in state_encoder.items():
name = k[7:] # remove `module.`
new_state[name] = v
state_encoder = new_state
model.encoder.load_state_dict(state_encoder)
torch.save(model, './pretrained/model_seg_unet_pre_encoder_1ch_ImageNet.pt')
| StarcoderdataPython |
12803899 | <gh_stars>0
# Generated by Django 3.2 on 2022-02-03 14:11
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('customer', '0002_alter_dccustomer_email'),
]
operations = [
migrations.CreateModel(
name='DcPolicy',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now=True)),
('updated_at', models.DateTimeField(auto_now_add=True)),
('type', models.CharField(db_index=True, max_length=200, verbose_name='Policy Type')),
('premium', models.DecimalField(decimal_places=3, default=200, max_digits=200, verbose_name='Premium')),
('cover', models.DecimalField(decimal_places=3, default=200000, max_digits=200, verbose_name='Cover')),
('state', models.IntegerField(blank=True, choices=[(0, 'New'), (1, 'Quoted'), (2, 'Active')], default=0, null=True, verbose_name='State')),
('customer_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='my_policies', to='customer.dccustomer')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='DcPolicyHistory',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now=True)),
('updated_at', models.DateTimeField(auto_now_add=True)),
('state', models.IntegerField(blank=True, choices=[(0, 'New'), (1, 'Quoted'), (2, 'Active')], default=0, null=True, verbose_name='State')),
('policy', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='histories', to='policy.dcpolicy')),
],
options={
'abstract': False,
},
),
]
| StarcoderdataPython |
5061207 | <filename>blaze/mahimahi/__init__.py
""" This module defines classes and methods for interacting with Mahimahi """
from .mahimahi import MahiMahiConfig
| StarcoderdataPython |
4809244 | <reponame>krontzo/nume.py
## module goldSearch
''' a,b = bracket(f,xStart,h)
Finds the brackets (a,b) of a minimum point of the
user-supplied scalar function f(x).
The search starts downhill from xStart with a step
length h.
x,fMin = search(f,a,b,tol=1.0e-6)
Golden section method for determining x that minimizes
the user-supplied scalar function f(x).
The minimum must be bracketed in (a,b).
'''
import math
def bracket(f,x1,h):
c = 1.618033989
f1 = f(x1)
x2 = x1 + h; f2 = f(x2)
# Determine downhill direction and change sign of h if needed
if f2 > f1:
h = -h
x2 = x1 + h; f2 = f(x2)
# Check if minimum between x1 - h and x1 + h
if f2 > f1: return x2,x1 - h
# Search loop
for i in range (100):
h = c*h
x3 = x2 + h; f3 = f(x3)
if f3 > f2: return x1,x3
x1 = x2; x2 = x3
f1 = f2; f2 = f3
print("Bracket did not find a mimimum")
def search(f,a,b,tol=1.0e-9):
nIter = int(math.ceil(-2.078087*math.log(tol/abs(b-a))))
R = 0.618033989
C = 1.0 - R
# First telescoping
x1 = R*a + C*b; x2 = C*a + R*b
f1 = f(x1); f2 = f(x2)
# Main loop
for i in range(nIter):
if f1 > f2:
a = x1
x1 = x2; f1 = f2
x2 = C*a + R*b; f2 = f(x2)
else:
b = x2
x2 = x1; f2 = f1
x1 = R*a + C*b; f1 = f(x1)
if f1 < f2: return x1,f1
else: return x2,f2
| StarcoderdataPython |
5149503 | <gh_stars>0
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Defines a compiler integration that uses an externally-supplied Zephyr project."""
import collections
import logging
import multiprocessing
import os
import re
import tempfile
import textwrap
import shlex
import shutil
import subprocess
import sys
import yaml
import tvm.micro
from . import base
from .. import compiler
from .. import debugger
from ..transport import debug
from ..transport import file_descriptor
from ..transport import serial
from ..transport import Transport, TransportClosedError, TransportTimeouts
from ..transport import wakeup
_LOG = logging.getLogger(__name__)
class SubprocessEnv(object):
def __init__(self, default_overrides):
self.default_overrides = default_overrides
def run(self, cmd, **kw):
env = dict(os.environ)
for k, v in self.default_overrides.items():
env[k] = v
return subprocess.check_output(cmd, env=env, **kw)
class ProjectNotFoundError(Exception):
"""Raised when the project_dir supplied to ZephyrCompiler does not exist."""
class FlashRunnerNotSupported(Exception):
"""Raised when the FLASH_RUNNER for a project isn't supported by this Zephyr adapter."""
class ZephyrCompiler(tvm.micro.Compiler):
"""A Compiler instance that builds against a pre-existing zephyr project."""
def __init__(
self,
project_dir=None,
board=None,
west_cmd=None,
zephyr_base=None,
zephyr_toolchain_variant=None,
env_vars=None,
):
"""Configure the compiler for use.
Parameters
----------
project_dir : str
Path to the pre-existing Zephyr project.
board : str
Name of the Zephyr board to build for (i.e. passed to `west build -b`)
west_cmd : Optional[list]
If given, argv that invoke the west build tool. Used only for flashing.
zephyr_base : Optional[str]
If given, path to Zephyr, as would normally be present in the ZEPHYR_BASE environment
variable. If not given, consults this environment variable. This value must be set in
one of those two places.
zephyr_toolchain_variant: Optional[str]
If given, overrides the toolchain used by Zephyr. If not given, uses the default
zephyr toolchain. When running on OS X outside of docker, you need to specify this.
env_vars : Optional[Dict[str,str]]
If given, additional environment variables present when invoking west, cmake, or make.
"""
self._project_dir = project_dir
if not os.path.exists(project_dir):
# Raise this error instead of a potentially-more-cryptic compiler error due to a missing
# prj.conf.
raise ProjectNotFoundError(
f"project_dir supplied to ZephyrCompiler does not exist: {project_dir}"
)
self._board = board
if west_cmd is None:
self._west_cmd = [sys.executable, "-mwest.app.main"]
elif isinstance(west_cmd, str):
self._west_cmd = [west_cmd]
elif isinstance(west_cmd, list):
self._west_cmd = west_cmd
else:
raise TypeError("west_cmd: expected string, list, or None; got %r" % (west_cmd,))
env = {}
if zephyr_toolchain_variant is not None:
env["ZEPHYR_TOOLCHAIN_VARIANT"] = zephyr_toolchain_variant
self._zephyr_base = zephyr_base or os.environ["ZEPHYR_BASE"]
assert (
self._zephyr_base is not None
), f"Must specify zephyr_base=, or ZEPHYR_BASE must be in environment variables"
env["ZEPHYR_BASE"] = self._zephyr_base
if env_vars:
env.update(env_vars)
self._subprocess_env = SubprocessEnv(env)
OPT_KEY_TO_CMAKE_DEFINE = {
"cflags": "CFLAGS",
"ccflags": "CXXFLAGS",
"ldflags": "LDFLAGS",
}
@classmethod
def _options_to_cmake_args(cls, options):
args = []
for key, define in cls.OPT_KEY_TO_CMAKE_DEFINE.items():
if key in options:
quoted_opts = [shlex.quote(o).replace(";", "\\;") for o in options[key]]
args.append(f'-DEXTRA_{define}={" ".join(quoted_opts)}')
if "cmake_args" in options:
args.extend(options["cmake_args"])
return args
def library(self, output, sources, options=None):
project_name = os.path.basename(output)
if project_name.startswith("lib"):
project_name = project_name[3:]
lib_prj_conf = os.path.join(output, "prj.conf")
if self._project_dir is not None:
project_dir_conf = os.path.join(self._project_dir, "prj.conf")
if os.path.exists(project_dir_conf):
shutil.copy(project_dir_conf, lib_prj_conf)
else:
with open(lib_prj_conf, "w") as prj_conf_f:
prj_conf_f.write("CONFIG_CPLUSPLUS=y\n")
cmakelists_path = os.path.join(output, "CMakeLists.txt")
with open(cmakelists_path, "w") as cmake_f:
sources = " ".join(f'"{o}"' for o in sources)
cmake_f.write(
textwrap.dedent(
f"""\
cmake_minimum_required(VERSION 3.13.1)
find_package(Zephyr HINTS $ENV{{ZEPHYR_BASE}})
project({project_name}_prj)
target_sources(app PRIVATE)
zephyr_library_named({project_name})
target_sources({project_name} PRIVATE {sources})
target_sources(app PRIVATE main.c)
target_link_libraries(app PUBLIC {project_name})
"""
)
)
if "include_dirs" in options:
cmake_f.write(
f"target_include_directories({project_name} PRIVATE "
f'{" ".join(os.path.abspath(d) for d in options["include_dirs"])})\n'
)
with open(os.path.join(output, "main.c"), "w"):
pass
# expected not to exist after populate_tvm_libs
build_dir = os.path.join(output, "__tvm_build")
os.mkdir(build_dir)
self._subprocess_env.run(
["cmake", "..", f"-DBOARD={self._board}"] + self._options_to_cmake_args(options),
cwd=build_dir,
)
num_cpus = multiprocessing.cpu_count()
self._subprocess_env.run(
["make", f"-j{num_cpus}", "VERBOSE=1", project_name], cwd=build_dir
)
return tvm.micro.MicroLibrary(build_dir, [f"lib{project_name}.a"])
def binary(self, output, objects, options=None, link_main=True, main_options=None):
assert link_main, "Must pass link_main=True"
assert self._project_dir is not None, "Must supply project_dir= to build binaries"
copied_libs = base.populate_tvm_objs(self._project_dir, objects)
# expected not to exist after populate_tvm_objs
cmake_args = [
"cmake",
os.path.abspath(self._project_dir),
f"-DBOARD={self._board}",
] + self._options_to_cmake_args(options)
if "include_dirs" in options:
cmake_args.append(
"-DTVM_INCLUDE_DIRS="
f'{";".join(os.path.abspath(d) for d in options["include_dirs"])}'
)
cmake_args.append(f'-DTVM_LIBS={";".join(copied_libs)}')
self._subprocess_env.run(cmake_args, cwd=output)
self._subprocess_env.run(["make"], cwd=output)
return tvm.micro.MicroBinary(
output,
binary_file=os.path.join("zephyr", "zephyr.elf"),
debug_files=[os.path.join("zephyr", "zephyr.elf")],
labelled_files={
"cmake_cache": ["CMakeCache.txt"],
"device_tree": [os.path.join("zephyr", "zephyr.dts")],
},
immobile="qemu" in self._board,
)
@property
def flasher_factory(self):
return compiler.FlasherFactory(
ZephyrFlasher,
(self._board,),
dict(
zephyr_base=self._zephyr_base,
project_dir=self._project_dir,
subprocess_env=self._subprocess_env.default_overrides,
west_cmd=self._west_cmd,
),
)
CACHE_ENTRY_RE = re.compile(r"(?P<name>[^:]+):(?P<type>[^=]+)=(?P<value>.*)")
CMAKE_BOOL_MAP = dict(
[(k, True) for k in ("1", "ON", "YES", "TRUE", "Y")]
+ [(k, False) for k in ("0", "OFF", "NO", "FALSE", "N", "IGNORE", "NOTFOUND", "")]
)
def read_cmake_cache(file_name):
"""Read a CMakeCache.txt-like file and return a dictionary of values."""
entries = collections.OrderedDict()
with open(file_name, encoding="utf-8") as f:
for line in f:
m = CACHE_ENTRY_RE.match(line.rstrip("\n"))
if not m:
continue
if m.group("type") == "BOOL":
value = CMAKE_BOOL_MAP[m.group("value").upper()]
else:
value = m.group("value")
entries[m.group("name")] = value
return entries
class BoardError(Exception):
"""Raised when an attached board cannot be opened (i.e. missing /dev nodes, etc)."""
class BoardAutodetectFailed(Exception):
"""Raised when no attached hardware is found matching the board= given to ZephyrCompiler."""
class ZephyrFlasher(tvm.micro.compiler.Flasher):
"""A Flasher implementation that delegates to Zephyr/west."""
def __init__(
self,
board,
zephyr_base=None,
project_dir=None,
subprocess_env=None,
nrfjprog_snr=None,
openocd_serial=None,
flash_args=None,
debug_rpc_session=None,
serial_timeouts=None,
west_cmd=None,
):
zephyr_base = zephyr_base or os.environ["ZEPHYR_BASE"]
sys.path.insert(0, os.path.join(zephyr_base, "scripts", "dts"))
try:
import dtlib # pylint: disable=import-outside-toplevel
self._dtlib = dtlib
finally:
sys.path.pop(0)
self._board = board
self._zephyr_base = zephyr_base
self._project_dir = project_dir
self._west_cmd = west_cmd
self._flash_args = flash_args
self._openocd_serial = openocd_serial
self._autodetected_openocd_serial = None
self._subprocess_env = SubprocessEnv(subprocess_env)
self._debug_rpc_session = debug_rpc_session
self._nrfjprog_snr = nrfjprog_snr
self._serial_timeouts = serial_timeouts
def _get_nrf_device_args(self):
nrfjprog_args = ["nrfjprog", "--ids"]
nrfjprog_ids = subprocess.check_output(nrfjprog_args, encoding="utf-8")
if not nrfjprog_ids.strip("\n"):
raise BoardAutodetectFailed(
f'No attached boards recognized by {" ".join(nrfjprog_args)}'
)
boards = nrfjprog_ids.split("\n")[:-1]
if len(boards) > 1:
if self._nrfjprog_snr is None:
raise BoardError(
"Multiple boards connected; specify one with nrfjprog_snr=: "
f'{", ".join(boards)}'
)
if str(self._nrfjprog_snr) not in boards:
raise BoardError(
f"nrfjprog_snr ({self._nrfjprog_snr}) not found in {nrfjprog_args}: {boards}"
)
return ["--snr", str(self._nrfjprog_snr)]
if not boards:
return []
return ["--snr", boards[0]]
# kwargs passed to usb.core.find to find attached boards for the openocd flash runner.
BOARD_USB_FIND_KW = {
"nucleo_f746zg": {"idVendor": 0x0483, "idProduct": 0x374B},
"stm32f746g_disco": {"idVendor": 0x0483, "idProduct": 0x374B},
}
def openocd_serial(self, cmake_entries):
"""Find the serial port to use for a board with OpenOCD flash strategy."""
if self._openocd_serial is not None:
return self._openocd_serial
if self._autodetected_openocd_serial is None:
import usb # pylint: disable=import-outside-toplevel
find_kw = self.BOARD_USB_FIND_KW[cmake_entries["BOARD"]]
boards = usb.core.find(find_all=True, **find_kw)
serials = []
for b in boards:
serials.append(b.serial_number)
if len(serials) == 0:
raise BoardAutodetectFailed(f"No attached USB devices matching: {find_kw!r}")
serials.sort()
self._autodetected_openocd_serial = serials[0]
_LOG.debug("zephyr openocd driver: autodetected serial %s", serials[0])
return self._autodetected_openocd_serial
def _get_openocd_device_args(self, cmake_entries):
return ["--serial", self.openocd_serial(cmake_entries)]
@classmethod
def _get_flash_runner(cls, cmake_entries):
flash_runner = cmake_entries.get("ZEPHYR_BOARD_FLASH_RUNNER")
if flash_runner is not None:
return flash_runner
with open(cmake_entries["ZEPHYR_RUNNERS_YAML"]) as f:
doc = yaml.load(f, Loader=yaml.FullLoader)
return doc["flash-runner"]
def _get_device_args(self, cmake_entries):
flash_runner = self._get_flash_runner(cmake_entries)
if flash_runner == "nrfjprog":
return self._get_nrf_device_args()
if flash_runner == "openocd":
return self._get_openocd_device_args(cmake_entries)
raise BoardError(
f"Don't know how to find serial terminal for board {cmake_entries['BOARD']} with flash "
f"runner {flash_runner}"
)
def flash(self, micro_binary):
cmake_entries = read_cmake_cache(
micro_binary.abspath(micro_binary.labelled_files["cmake_cache"][0])
)
if "qemu" in cmake_entries["BOARD"]:
return ZephyrQemuTransport(micro_binary.base_dir, startup_timeout_sec=30.0)
build_dir = os.path.dirname(
micro_binary.abspath(micro_binary.labelled_files["cmake_cache"][0])
)
# The nRF5340DK requires an additional `nrfjprog --recover` before each flash cycle.
# This is because readback protection is enabled by default when this device is flashed.
# Otherwise, flashing may fail with an error such as the following:
# ERROR: The operation attempted is unavailable due to readback protection in
# ERROR: your device. Please use --recover to unlock the device.
if (
self._board.startswith("nrf5340dk")
and self._get_flash_runner(cmake_entries) == "nrfjprog"
):
recover_args = ["nrfjprog", "--recover"]
recover_args.extend(self._get_nrf_device_args())
self._subprocess_env.run(recover_args, cwd=build_dir)
west_args = (
self._west_cmd
+ ["flash", "--build-dir", build_dir, "--skip-rebuild"]
+ self._get_device_args(cmake_entries)
)
if self._flash_args is not None:
west_args.extend(self._flash_args)
self._subprocess_env.run(west_args, cwd=build_dir)
return self.transport(micro_binary)
def _find_nrf_serial_port(self, cmake_entries):
com_ports = subprocess.check_output(
["nrfjprog", "--com"] + self._get_device_args(cmake_entries), encoding="utf-8"
)
ports_by_vcom = {}
for line in com_ports.split("\n")[:-1]:
parts = line.split()
ports_by_vcom[parts[2]] = parts[1]
return {"port_path": ports_by_vcom["VCOM2"]}
def _find_openocd_serial_port(self, cmake_entries):
return {"grep": self.openocd_serial(cmake_entries)}
def _find_serial_port(self, micro_binary):
cmake_entries = read_cmake_cache(
micro_binary.abspath(micro_binary.labelled_files["cmake_cache"][0])
)
flash_runner = self._get_flash_runner(cmake_entries)
if flash_runner == "nrfjprog":
return self._find_nrf_serial_port(cmake_entries)
if flash_runner == "openocd":
return self._find_openocd_serial_port(cmake_entries)
raise FlashRunnerNotSupported(
f"Don't know how to deduce serial port for flash runner {flash_runner}"
)
def transport(self, micro_binary):
"""Instantiate the transport for use with non-QEMU Zephyr."""
dt_inst = self._dtlib.DT(
micro_binary.abspath(micro_binary.labelled_files["device_tree"][0])
)
uart_baud = (
dt_inst.get_node("/chosen")
.props["zephyr,console"]
.to_path()
.props["current-speed"]
.to_num()
)
_LOG.debug("zephyr transport: found UART baudrate from devicetree: %d", uart_baud)
port_kwargs = self._find_serial_port(micro_binary)
serial_transport = serial.SerialTransport(
timeouts=self._serial_timeouts, baudrate=uart_baud, **port_kwargs
)
if self._debug_rpc_session is None:
return serial_transport
return debug.DebugWrapperTransport(
debugger.RpcDebugger(
self._debug_rpc_session,
debugger.DebuggerFactory(
ZephyrDebugger,
(
" ".join(shlex.quote(x) for x in self._west_cmd),
os.path.dirname(micro_binary.abspath(micro_binary.label("cmake_cache")[0])),
micro_binary.abspath(micro_binary.debug_files[0]),
self._zephyr_base,
),
{},
),
),
serial_transport,
)
class QemuStartupFailureError(Exception):
"""Raised when the qemu pipe is not present within startup_timeout_sec."""
class QemuFdTransport(file_descriptor.FdTransport):
"""An FdTransport subclass that escapes written data to accomodate the QEMU monitor.
It's supposedly possible to disable the monitor, but Zephyr controls most of the command-line
arguments for QEMU and there are too many options which implictly enable the monitor, so this
approach seems more robust.
"""
def write_monitor_quit(self):
file_descriptor.FdTransport.write(self, b"\x01x", 1.0)
def close(self):
file_descriptor.FdTransport.close(self)
def timeouts(self):
assert False, "should not get here"
def write(self, data, timeout_sec):
"""Write data, escaping for QEMU monitor."""
to_write = bytearray()
escape_pos = []
for i, b in enumerate(data):
if b == 0x01:
to_write.append(b)
escape_pos.append(i)
to_write.append(b)
num_written = file_descriptor.FdTransport.write(self, to_write, timeout_sec)
num_written -= sum(1 if x < num_written else 0 for x in escape_pos)
return num_written
class ZephyrQemuTransport(Transport):
"""The user-facing Zephyr QEMU transport class."""
def __init__(self, base_dir, startup_timeout_sec=5.0, **kwargs):
self.base_dir = base_dir
self.startup_timeout_sec = startup_timeout_sec
self.kwargs = kwargs
self.proc = None
self.fd_transport = None
self.pipe_dir = None
def timeouts(self):
return TransportTimeouts(
session_start_retry_timeout_sec=2.0,
session_start_timeout_sec=self.startup_timeout_sec,
session_established_timeout_sec=5.0,
)
def open(self):
self.pipe_dir = tempfile.mkdtemp()
self.pipe = os.path.join(self.pipe_dir, "fifo")
self.write_pipe = os.path.join(self.pipe_dir, "fifo.in")
self.read_pipe = os.path.join(self.pipe_dir, "fifo.out")
os.mkfifo(self.write_pipe)
os.mkfifo(self.read_pipe)
self.proc = subprocess.Popen(
["make", "run", f"QEMU_PIPE={self.pipe}"],
cwd=self.base_dir,
**self.kwargs,
)
# NOTE: although each pipe is unidirectional, open both as RDWR to work around a select
# limitation on linux. Without this, non-blocking I/O can't use timeouts because named
# FIFO are always considered ready to read when no one has opened them for writing.
self.fd_transport = wakeup.WakeupTransport(
QemuFdTransport(
os.open(self.read_pipe, os.O_RDWR | os.O_NONBLOCK),
os.open(self.write_pipe, os.O_RDWR | os.O_NONBLOCK),
self.timeouts(),
),
b"\xfe\xff\xfd\x03\0\0\0\0\0\x02" b"fw",
)
self.fd_transport.open()
def close(self):
if self.fd_transport is not None:
self.fd_transport.child_transport.write_monitor_quit()
self.proc.wait()
self.fd_transport.close()
self.fd_transport = None
if self.proc is not None:
self.proc = None
if self.pipe_dir is not None:
shutil.rmtree(self.pipe_dir)
self.pipe_dir = None
def read(self, n, timeout_sec):
if self.fd_transport is None:
raise TransportClosedError()
return self.fd_transport.read(n, timeout_sec)
def write(self, data, timeout_sec):
if self.fd_transport is None:
raise TransportClosedError()
return self.fd_transport.write(data, timeout_sec)
class ZephyrDebugger(debugger.GdbDebugger):
"""A Zephyr debugger implementation."""
def __init__(self, west_cmd, build_dir, elf_path, zephyr_base):
super(ZephyrDebugger, self).__init__()
self._west_cmd = shlex.split(west_cmd)
self._build_dir = build_dir
self._elf_path = elf_path
self._zephyr_base = zephyr_base
def popen_kwargs(self):
env = dict(os.environ)
env["ZEPHYR_BASE"] = self._zephyr_base
return dict(
args=self._west_cmd
+ [
"debug",
"--skip-rebuild",
"--build-dir",
self._build_dir,
"--elf-file",
self._elf_path,
],
env=env,
)
| StarcoderdataPython |
5079048 | #!/usr/bin/env python3
"""
MiSTer SNES Controller display based on retrospy
(c) 2021 <NAME>
License: MIT
To be honest, this script is really shitty and I just did it quick and
dirty on a free afternoon to simply show inputs on my SNES compatible
controller from my MiSTer FPGA. The client viewer from the
suggested application retrospy does not work properly on Linux based
operating systems to my knowledge.
Perhaps this script is useful for anybody out there, yet I cannot
imagine somebody would like to use it without a lot of refactoring etc.
However, this script requires a working SSH connection to the MiSTer and
the retrospy binary for MiSTer found at:
https://github.com/retrospy/RetroSpy/releases/latest/download/retrospy
Put this binary to: /media/fat/retrospy/retrospy
Just as you would with the retrospy installer.
This script will simply run the retrospy binary on the device using
a SSH subprocess to fetch the button state information. I suggest using
key-file authentication and the SSH agent to simplify the fetching
process.
Please run the retrospy binary directly on your device to test which
bits are switched depending on your controller. Most probably you will
have to adapt the self.buttons object in the MisterClient class based
on that information.
"""
import sys
import subprocess
import threading
from PyQt5.QtWidgets import QApplication, QWidget, QLabel
from PyQt5.QtGui import QIcon, QPixmap
# adapt following parameters if needed
ssh_host = "mister"
ssh_port = 22
username = "root"
command = "/media/fat/retrospy/retrospy /dev/input/js0"
class MisterClient(threading.Thread):
def __init__(self, app):
threading.Thread.__init__(self)
self.killed = False
# mapping of bit from retrospy to button in the gui
# adapt according to your output in retrospy
self.buttons = {
16: app.a,
17: app.b,
19: app.x,
20: app.y,
22: app.l1,
23: app.r1,
26: app.select,
27: app.start,
160: app.r,
174: app.l,
192: app.d,
206: app.u,
}
self.p = None
def run(self):
self.p = subprocess.Popen(
["ssh", "%s" % ssh_host, command],
shell=False,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
while self.p.poll() is None:
for line_raw in self.p.stdout:
line = line_raw.decode("ascii")
for key, label in self.buttons.items():
if len(line) < 224: continue
label.setHidden(line[key] == "0")
def kill(self):
self.p.kill()
class App(QWidget):
def __init__(self):
super().__init__()
self.title = "Mister SNES Controller"
self.left = 10
self.top = 10
self.width = 640
self.height = 480
self.setStyleSheet("background-color: #00ffff;")
self.initUI()
def initUI(self):
self.setWindowTitle(self.title)
self.setGeometry(self.left, self.top, self.width, self.height)
bgLabel = QLabel(self)
bgImage = QPixmap("images/snes_controller.png")
bgLabel.setPixmap(bgImage)
aImg = QPixmap("images/a_pressed.png")
self.a = QLabel(self)
self.a.move(810, 209)
self.a.setPixmap(aImg)
self.a.setHidden(True)
bImg = QPixmap("images/b_pressed.png")
self.b = QLabel(self)
self.b.move(718, 282)
self.b.setPixmap(bImg)
self.b.setHidden(True)
xImg = QPixmap("images/x_pressed.png")
self.x = QLabel(self)
self.x.move(718, 139)
self.x.setPixmap(xImg)
self.x.setHidden(True)
yImg = QPixmap("images/y_pressed.png")
self.y = QLabel(self)
self.y.move(625, 211)
self.y.setPixmap(yImg)
self.y.setHidden(True)
startImg = QPixmap("images/s_pressed.png")
self.start = QLabel(self)
self.start.move(461, 247)
self.start.setPixmap(startImg)
self.start.setHidden(True)
selectImg = QPixmap("images/s_pressed.png")
self.select = QLabel(self)
self.select.move(355, 246)
self.select.setPixmap(selectImg)
self.select.setHidden(True)
l1Img = QPixmap("images/l_pressed.png")
self.l1 = QLabel(self)
self.l1.move(102, 31)
self.l1.setPixmap(l1Img)
self.l1.setHidden(True)
r1Img = QPixmap("images/r_pressed.png")
self.r1 = QLabel(self)
self.r1.move(648, 31)
self.r1.setPixmap(r1Img)
self.r1.setHidden(True)
uImg = QPixmap("images/up_pressed.png")
self.u = QLabel(self)
self.u.move(174, 164)
self.u.setPixmap(uImg)
self.u.setHidden(True)
dImg = QPixmap("images/down_pressed.png")
self.d = QLabel(self)
self.d.move(174, 277)
self.d.setPixmap(dImg)
self.d.setHidden(True)
lImg = QPixmap("images/left_pressed.png")
self.l = QLabel(self)
self.l.move(120, 219)
self.l.setPixmap(lImg)
self.l.setHidden(True)
rImg = QPixmap("images/right_pressed.png")
self.r = QLabel(self)
self.r.move(232, 219)
self.r.setPixmap(rImg)
self.r.setHidden(True)
self.resize(bgImage.width(), bgImage.height())
self.show()
if __name__ == "__main__":
app = QApplication(sys.argv)
ex = App()
client = MisterClient(ex)
client.start()
app.exec_()
client.kill()
sys.exit()
| StarcoderdataPython |
3372555 | <reponame>drewilson23/xzceb-flask_eng_fr
from .. import translator
| StarcoderdataPython |
12832518 | <filename>ray/adaptdl_ray/adaptdl/utils.py
# Copyright 2021 Petuum, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, List
from collections import Counter, defaultdict
from copy import deepcopy
from ray import tune
from ray.util.placement_group import get_current_placement_group
from adaptdl_ray.adaptdl import config
def pgf_to_allocation(pgf) -> List[str]:
""" Convert a Placement Groups Factory to AdaptDL allocation"""
bundles = pgf._bundles[1:]
allocs, node_keys, num_devices = [], [], []
for bundle in bundles:
node_keys += [k.split(":")[1] for k, v in bundle.items()
if k.startswith("node")]
num_devices += [int(v) for k, v in bundle.items()
if k == config.default_device()]
for node, count in zip(node_keys, num_devices):
allocs += [node] * count
return allocs
def allocation_to_pgf(alloc: List[str], resources_per_node=None):
""" Convert AdaptDL allocation to a Placement Group Factory"""
if not resources_per_node:
resources_per_node = {"CPU": 1.0}
if config.default_device() == "GPU":
resources_per_node["GPU"] = 1.0
def _construct_bundle(node, number_of_instances):
resources = deepcopy(resources_per_node)
resources["CPU"] *= number_of_instances
if "GPU" in resources:
resources["GPU"] *= number_of_instances
if "adaptdl_virtual" not in node:
resources[f"node:{node}"] = 0.01
return resources
assert len(alloc) > 0
resources = [{"CPU": 0.001}]
alloc = Counter(alloc)
for node, res in alloc.items():
resources.append(_construct_bundle(node, res))
return tune.PlacementGroupFactory(resources)
def pgf_to_num_replicas(pgf) -> int:
""" Extract the number of replicas of the trial from its PGF"""
return sum(int(bundle.get(config.default_device(), 0))
for bundle in pgf._bundles[1:])
def pgs_to_resources(pgs: List[Dict]) -> Dict:
""" Return node-level resource usage by all PGs in pgs."""
# Note that every bundle is tagged with the node resource
resources = defaultdict(Counter)
for pg in pgs:
for bundle in pg["bundle_cache"][1:]:
# Every bundle has a node resource
node_ip = [k.split(":")[1] for k in bundle.keys()
if k.startswith("node")][0]
for k, v in bundle.items():
resources[node_ip][k] += v
return resources
def unique_nodes_pg() -> int:
nodes = []
if get_current_placement_group() is None:
return 0
else:
for bundle in get_current_placement_group().bundle_specs:
for resource in bundle:
if "node" in resource:
nodes.append(resource)
return len(set(nodes))
| StarcoderdataPython |
11254953 | <filename>tests/test_session.py<gh_stars>0
from LSP.plugin.core.protocol import WorkspaceFolder
from LSP.plugin.core.sessions import create_session, Session, InitializeError, ACQUIRE_READY_LOCK_TIMEOUT
from LSP.plugin.core.types import ClientConfig
from LSP.plugin.core.types import Settings
from test_mocks import MockClient
from test_mocks import TEST_CONFIG
from test_mocks import TEST_LANGUAGE
import unittest
import unittest.mock
import sublime
try:
from typing import Any, List, Dict, Tuple, Callable, Optional
assert Any and List and Dict and Tuple and Callable and Optional and Session
except ImportError:
pass
class SessionTest(unittest.TestCase):
def assert_if_none(self, session: 'Optional[Session]') -> 'Session':
self.assertIsNotNone(session)
assert session # mypy
return session
def assert_initialized(self, session: Session) -> None:
try:
with session.acquire_timeout():
return
except InitializeError:
pass
self.fail("session failed to initialize")
def make_session(self, bootstrap_client, on_pre_initialize=None, on_post_initialize=None,
on_post_exit=None) -> Session:
project_path = "/"
folders = [WorkspaceFolder.from_path(project_path)]
return self.assert_if_none(
create_session(
config=TEST_CONFIG,
workspace_folders=folders,
env=dict(),
settings=Settings(),
bootstrap_client=bootstrap_client,
on_pre_initialize=on_pre_initialize,
on_post_initialize=on_post_initialize,
on_post_exit=on_post_exit))
# @unittest.skip("need an example config")
def test_can_create_session(self):
config = ClientConfig(
"test",
["cmd.exe"] if sublime.platform() == "windows" else ["ls"],
None, [], [], None, [TEST_LANGUAGE])
project_path = "/"
folders = [WorkspaceFolder.from_path(project_path)]
session = self.assert_if_none(
create_session(config, folders, dict(), Settings()))
session.client.transport.close()
def test_can_get_started_session(self):
post_initialize_callback = unittest.mock.Mock()
session = self.make_session(
MockClient(),
on_post_initialize=post_initialize_callback)
self.assert_initialized(session)
self.assertIsNotNone(session.client)
self.assertTrue(session.has_capability("testing"))
self.assertTrue(session.get_capability("testing"))
assert post_initialize_callback.call_count == 1
def test_pre_initialize_callback_is_invoked(self):
pre_initialize_callback = unittest.mock.Mock()
post_initialize_callback = unittest.mock.Mock()
session = self.make_session(
MockClient(),
on_pre_initialize=pre_initialize_callback,
on_post_initialize=post_initialize_callback)
self.assert_initialized(session)
self.assertIsNotNone(session.client)
self.assertTrue(session.has_capability("testing"))
self.assertTrue(session.get_capability("testing"))
assert pre_initialize_callback.call_count == 1
assert post_initialize_callback.call_count == 1
def test_can_shutdown_session(self):
post_initialize_callback = unittest.mock.Mock()
post_exit_callback = unittest.mock.Mock()
session = self.make_session(
MockClient(),
on_post_initialize=post_initialize_callback,
on_post_exit=post_exit_callback)
self.assert_initialized(session)
self.assertIsNotNone(session.client)
self.assertTrue(session.has_capability("testing"))
assert post_initialize_callback.call_count == 1
session.end()
self.assertIsNone(session.client)
self.assertFalse(session.has_capability("testing"))
self.assertIsNone(session.get_capability("testing"))
assert post_exit_callback.call_count == 1
def test_initialize_failure(self):
def async_response(f: 'Callable') -> None:
# resolve the request one second after the timeout triggers (so it's always too late).
timeout_ms = 1000 * (ACQUIRE_READY_LOCK_TIMEOUT + 1)
sublime.set_timeout(f, timeout_ms=timeout_ms)
client = MockClient(async_response=async_response)
session = self.make_session(client)
with self.assertRaises(InitializeError):
session.handles_path("foo")
| StarcoderdataPython |
11356357 | <filename>src/Interview_exp/goog/subarray_sums/AllSubstriSubArrayProbs.py
'''LC3: Longest Substring Without Repeating Characters
https://leetcode.com/problems/longest-substring-without-repeating-characters/
Given a string, find the length of the longest substring without repeating characters.
Example 1:
Input: "abcabcbb"
Output: 3
Explanation: The answer is "abc", with the length of 3.
Example 2:
Input: "bbbbb"
Output: 1
Explanation: The answer is "b", with the length of 1.
Example 3:
Input: "pwwkew"
Output: 3
Explanation: The answer is "wke", with the length of 3'''
class Solution:
def lengthOfLongestSubstring(self, s):
"""
:type s: str
:rtype: int
"""
lookup = collections.defaultdict(int)
l, r, counter, res = 0, 0, 0, 0
while r < len(s):
lookup[s[r]] += 1
if lookup[s[r]] == 1:
counter += 1
r += 1
while l < r and counter < r - l:
lookup[s[l]] -= 1
if lookup[s[l]] == 0:
counter -= 1
l += 1
res = max(res, r - l)
return res
'''LC159:Longest substring with atmost 2 distinct chars
https://leetcode.com/problems/longest-substring-with-at-most-two-distinct-characters/
Given a string s , find the length of the longest substring t that contains at most 2 distinct characters.
Example 1:
Input: "eceba"
Output: 3
Explanation: t is "ece" which its length is 3.
Example 2:
Input: "ccaabbb"
Output: 5
Explanation: t is "aabbb" which its length is 5'''
class Solution(object):
def lengthOfLongestSubstringTwoDistinct(self, s):
"""
:type s: str
:rtype: int
"""
lookup = collections.defaultdict(int)
l, r, counter, res = 0, 0, 0, 0
while r < len(s):
lookup[s[r]] += 1
if lookup[s[r]] == 1:
counter += 1
r += 1
while l < r and counter > 2:
lookup[s[l]] -= 1
if lookup[s[l]] == 0:
counter -= 1
l += 1
res = max(res, r - l)
return res
'''LC340: Longest Substring with At Most K Distinct Characters:
Given a string, find the length of the longest substring T that contains at most k distinct characters.
Example 1:
Input: s = "eceba", k = 2
Output: 3
Explanation: T is "ece" which its length is 3.
Example 2:
Input: s = "aa", k = 1
Output: 2
Explanation: T is "aa" which its length is 2'''
class Solution(object):
def lengthOfLongestSubstringKDistinct(self, s, k):
"""
:type s: str
:type k: int
:rtype: int
"""
lookup = collections.defaultdict(int)
l, r, counter, res = 0, 0, 0, 0
while r < len(s):
lookup[s[r]] += 1
if lookup[s[r]] == 1:
counter += 1
r += 1
while l < r and counter > k:
lookup[s[l]] -= 1
if lookup[s[l]] == 0:
counter -= 1
l += 1
res = max(res, r - l)
return res
'''LC992: Subarrays with K Different Integers
Given an array A of positive integers,
call a (contiguous, not necessarily distinct)
subarray of A good if the number of different
integers in that subarray is exactly K.
(For example, [1,2,3,1,2] has 3 different integers: 1, 2, and 3.)
Example 1:
Input: A = [1,2,1,2,3], K = 2
Output: 7
Explanation: Subarrays formed with exactly 2 different integers: [1,2], [2,1], [1,2], [2,3], [1,2,1], [2,1,2], [1,2,1,2].
Example 2:
Input: A = [1,2,1,3,4], K = 3
Output: 3
Explanation: Subarrays formed with exactly 3
different integers: [1,2,1,3], [2,1,3], [1,3,4]'''
import collections
class Solution:
def subarraysWithKDistinct(self, A: 'List[int]', K: 'int') -> 'int':
return self.subarraysWithAtMostKDistinct(A, K) - self.subarraysWithAtMostKDistinct(A, K-1)
def subarraysWithAtMostKDistinct(self, s, k):
lookup = collections.defaultdict(int)
l, r, counter, res = 0, 0, 0, 0
while r < len(s):
lookup[s[r]] += 1
if lookup[s[r]] == 1:
counter += 1
r += 1
while l < r and counter > k:
lookup[s[l]] -= 1
if lookup[s[l]] == 0:
counter -= 1
l += 1
res += r - l
return res
'''
LC1248: Count Number of Nice Subarrays
Given an array of integers nums and an integer
k. A subarray is called nice if there are k
odd numbers on it.
Return the number of nice sub-arrays.
Example 1:
Input: nums = [1,1,2,1,1], k = 3
Output: 2
Explanation: The only sub-arrays with 3 odd numbers are [1,1,2,1] and [1,2,1,1].
Example 2:
Input: nums = [2,4,6], k = 1
Output: 0
Explanation: There is no odd numbers in the array.
Example 3:
Input: nums = [2,2,2,1,2,2,1,2,2,2], k = 2
Output: 16'''
class Solution(object):
def numberOfSubarrays(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: int
"""
l, r, oddsCounter, res = 0, 0, 0, 0
while r < len(nums):
if nums[r] % 2 == 1:
oddsCounter += 1
while l < r and oddsCounter > k:
if nums[l] % 2 == 1:
oddsCounter -= 1
l += 1
if oddsCounter == k: res += 1
i = l
while oddsCounter == k and i<r and nums[i]%2 == 0:
res += 1
i += 1
r += 1
return res | StarcoderdataPython |
6545561 | <filename>func_without_wsgi_middleware.py
# 素のWSGIアプリ
# middlewareの作り方は以下を参照
# http://gihyo.jp/dev/feature/01/wsgi/0003
# python func_without_wsgi_middleware.py
from wsgiref.simple_server import make_server
def hello_app(environ, start_response):
start_response('200 OK', [('Content-Type', 'text/plain')])
return [b"Hello, world."]
if __name__ == "__main__":
httpd = make_server('', 8000, hello_app)
print("Serving on port 8000...")
httpd.serve_forever() | StarcoderdataPython |
5039569 | print ("hello python")
print("TPP")
print ("如果我是DJ,你会爱我吗“)
| StarcoderdataPython |
11252371 |
##########################################################################
#
# Copyright (c) 2008, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import maya.cmds as cmds
import maya.OpenMaya
import IECore
import IECoreMaya
## \todo Reimplement this in terms of the new Panel base class.
## \todo Prefix methods which aren't intended to be public to make them either private or protected.
class ParameterPanel :
panels = {}
@staticmethod
def trashDropCallback( dragControl, dropControl, messages, x, y, dragType, container ) :
if len(messages) == 2 and messages.pop(0) == "ParameterUI" :
argsDictStr = messages.pop()
argsDict = eval( argsDictStr )
container.removeControl( argsDict['nodeName'], argsDict['longParameterName'], deferredUIDeletion = True )
@staticmethod
def newControlDropCallback( dragControl, dropControl, messages, x, y, dragType, container ) :
if len(messages) == 2 and messages.pop(0) == "ParameterUI" :
argsDictStr = messages.pop()
argsDict = eval( argsDictStr )
container.addControl( argsDict['nodeName'], argsDict['longParameterName'] )
class ParameterUIContainer :
def __init__( self ) :
self.formLayout = None
self.containerLayout = maya.cmds.setParent( query = True )
self.parameters = []
self.parameterLayouts = {}
self.restoreParameters = []
def addControl( self, node, longParameterName ) :
fnPH = IECoreMaya.FnParameterisedHolder( node )
parameterised = fnPH.getParameterised()[0]
parameter = parameterised.parameters()
for p in longParameterName.split( '.' ) :
if p :
parameter = getattr( parameter, p )
assert( self.containerLayout )
maya.cmds.setParent( self.containerLayout )
if not ( node, longParameterName ) in self.parameterLayouts:
n = longParameterName.split(".")
if type( n ) is list:
# Take off the last element (to retrieve the parent parameter name),
# because ParameterUI.create will add it again.
n.pop()
parentParameterName = ".".join( n )
else :
parentParameterName = longParameterName
newLayout = IECoreMaya.ParameterUI.create(
node,
parameter,
labelWithNodeName = True,
longParameterName = parentParameterName,
withCompoundFrame = True
).layout()
self.parameters.append( ( node, longParameterName ) )
self.parameterLayouts[ ( node, longParameterName ) ] = newLayout
maya.cmds.file(
modified = True
)
def removeControl( self, node, longParameterName, deferredUIDeletion = False ) :
# In case another panel's control has been dropped onto our trash can
if not ( node, longParameterName ) in self.parameterLayouts :
return
layoutToDelete = self.parameterLayouts[ ( node, longParameterName ) ]
if maya.cmds.layout( layoutToDelete, query = True, exists = True ) :
if deferredUIDeletion :
# We don't want to delete the UI in the middle of a drag'n'drop event, it crashes Maya
maya.cmds.evalDeferred( "import maya.cmds as cmds; cmds.deleteUI( '%s', layout = True)" % ( layoutToDelete ) )
else :
maya.cmds.deleteUI( '%s' % ( layoutToDelete ), layout = True)
del self.parameterLayouts[ ( node, longParameterName ) ]
self.parameters.remove( ( node, longParameterName ) )
maya.cmds.file(
modified = True
)
# We have the "args" argument to allow use in a Maya UI callback, which passes us extra arguments that we don't need
def removeAllControls( self, args = None ) :
toRemove = list( self.parameters )
for i in toRemove :
self.removeControl( i[0], i[1] )
assert( len( self.parameters ) == 0 )
assert( len( self.parameterLayouts ) == 0 )
def add( self, panel ):
try :
self.parameterLayouts = {}
# If "add" has been called, and we already have some parameters, it means we're been torn-off and should
# recreate all our controls in the new window.
if not self.restoreParameters and self.parameters :
self.restoreParameters = list( self.parameters )
self.parameters = []
maya.cmds.waitCursor( state = True )
menuBar = maya.cmds.scriptedPanel(
panel,
query = True,
control = True
)
self.formLayout = maya.cmds.formLayout(
numberOfDivisions = 100
)
maya.cmds.setParent(
menuBar
)
editMenu = maya.cmds.menu(
label = "Edit"
)
maya.cmds.menuItem(
label = "Remove All",
parent = editMenu,
command = IECore.curry( ParameterPanel.ParameterUIContainer.removeAllControls, self )
)
maya.cmds.setParent( self.formLayout )
scrollPane = maya.cmds.scrollLayout(
dropCallback = IECore.curry( ParameterPanel.newControlDropCallback, container = self ),
parent = self.formLayout
)
trashCan = maya.cmds.iconTextStaticLabel(
image = "smallTrash.xpm",
label = "",
height = 20,
dropCallback = IECore.curry( ParameterPanel.trashDropCallback, container = self ),
parent = self.formLayout
)
maya.cmds.rowLayout( parent = scrollPane )
self.containerLayout = maya.cmds.columnLayout(
)
maya.cmds.formLayout(
self.formLayout,
edit=True,
attachForm = [
( trashCan, 'bottom', 5 ),
( trashCan, 'right', 5 ),
( scrollPane, 'top', 5 ),
( scrollPane, 'left', 5 ),
( scrollPane, 'right', 5 ),
( scrollPane, 'bottom', 25 )
],
attachNone = [
( trashCan, 'left' ),
( trashCan, 'top' )
]
)
for i in self.restoreParameters :
self.addControl( i[0], i[1] )
self.restoreParameters = []
except :
raise
finally :
maya.cmds.waitCursor( state = False )
def delete( self ) :
self.removeAllControls()
def init( self ) :
# Called after a scene has been loaded (or after a "file | new" operation), and "add" has already been called.
pass
def remove( self ) :
# Called before tearing-off
pass
def restoreData( self ) :
version = 1
return repr( ( version, self.parameters ) )
def restore( self, data ) :
self.removeAllControls()
dataTuple = eval( data )
version = dataTuple[0]
self.restoreParameters = dataTuple[1]
if self.formLayout :
for i in self.restoreParameters :
self.addControl( i[0], i[1] )
self.restoreParameters = []
@staticmethod
def create( panel ) :
ParameterPanel.panels[ panel ] = ParameterPanel.ParameterUIContainer()
@staticmethod
def init( panel ) :
assert( panel in ParameterPanel.panels )
ParameterPanel.panels[ panel ].init()
@staticmethod
def add( panel ) :
assert( panel in ParameterPanel.panels )
ParameterPanel.panels[ panel ].add( panel )
@staticmethod
def remove( panel ) :
assert( panel in ParameterPanel.panels )
ParameterPanel.panels[ panel ].remove()
@staticmethod
def delete( panel ) :
assert( panel in ParameterPanel.panels )
ParameterPanel.panels[ panel ].delete()
@staticmethod
def save( panel ) :
assert( panel in ParameterPanel.panels )
return 'ieParameterPanelRestore("%s", "%s")' % ( panel, ParameterPanel.panels[ panel ].restoreData() )
@staticmethod
def restore( panel, data ) :
assert( panel in ParameterPanel.panels )
ParameterPanel.panels[ panel ].restore( data )
| StarcoderdataPython |
9607146 | <reponame>multipaths/diffuPy<filename>src/diffupy/cli.py
# -*- coding: utf-8 -*-
"""Command line interface for diffuPy."""
import json
import logging
import os
import pickle
import time
from typing import Optional, Callable, Union
import click
from .constants import CSV, EMOJI, JSON, METHODS, OUTPUT, RAW, Z
from .diffuse import diffuse as run_diffusion
from .kernels import regularised_laplacian_kernel
from .process_input import process_map_and_format_input_data_for_diff
from .process_network import get_kernel_from_network_path
from .process_network import process_graph_from_file
logger = logging.getLogger(__name__)
@click.group(help='DiffuPy')
def main():
"""Command line interface for diffuPy."""
logging.basicConfig(format="%(asctime)s - %(levelname)s - %(name)s - %(message)s")
@main.command()
@click.option(
'-g', '--graph',
help='Input network',
required=True,
type=click.Path(exists=True, dir_okay=False)
)
@click.option(
'-o', '--output',
help='Output path to store the generated kernel pickle',
default=os.path.join(OUTPUT, 'kernel.json'),
show_default=True,
type=click.Path(file_okay=True)
)
@click.option('-l', '--log', is_flag=True, help='Activate debug mode')
def kernel(
graph: str,
output: Optional[str] = os.path.join(OUTPUT, 'kernel.json'),
log: bool = False
):
"""Generate a kernel for a given network.
:param network: Path to the network as a (NetworkX) graph to be transformed to kernel.
:param output: Path (with file name) for the generated scores output file. By default '$OUTPUT/diffusion_scores.csv'
:param log: Logging profiling option.
"""
# Configure logging level
if log:
logging.basicConfig(level=logging.DEBUG)
logger.setLevel(logging.DEBUG)
else:
logging.basicConfig(level=logging.INFO)
logger.setLevel(logging.INFO)
click.secho(f'{EMOJI} Loading graph from {graph} {EMOJI}')
click.secho(f'{EMOJI} Generating regularized Laplacian kernel from graph. This might take a while... {EMOJI}')
exe_t_0 = time.time()
kernel = regularised_laplacian_kernel(process_graph_from_file(graph))
exe_t_f = time.time()
# Export numpy array
with open(output, 'wb') as file:
pickle.dump(kernel, file, protocol=4)
running_time = exe_t_f - exe_t_0
click.secho(f'{EMOJI} Kernel exported to: {output} in {running_time} seconds {EMOJI}')
@main.command()
@click.option(
'-i', '--input',
help='Input data',
required=True,
type=click.Path(exists=True, dir_okay=False)
)
@click.option(
'-n', '--network',
help='Path to the network graph or kernel',
required=True,
type=click.Path(exists=True, dir_okay=False)
)
@click.option(
'-o', '--output',
type=click.File('w'),
help="Output file",
default=os.path.join(OUTPUT, 'diffusion_scores.csv'),
)
@click.option(
'-m', '--method',
help='Diffusion method',
type=click.Choice(METHODS),
default=Z,
)
@click.option(
'-b', '--binarize',
help='If logFC provided in dataset, convert logFC to binary (e.g., up-regulated entities to 1, down-regulated to '
'-1). For scoring methods that accept quantitative values (i.e., raw & z), node labels can also be codified '
'with LogFC (in this case, set binarize==False).',
type=bool,
default=False,
show_default=False,
)
@click.option(
'-t', '--threshold',
help='Codify node labels by applying a threshold to logFC in input.',
default=None,
type=float,
)
@click.option(
'-a', '--absolute_value',
help='Codify node labels by applying threshold to | logFC | in input. If absolute_value is set to False,'
'node labels will be signed.',
type=bool,
default=False,
show_default=False,
)
@click.option(
'-p', '--p_value',
help='Statistical significance (p-value).',
type=float,
default=0.05,
show_default=True,
)
@click.option(
'-f', '--format_output',
help='Choose CSV or JSON output scores file format.',
type=str,
default=CSV,
show_default=True,
)
def diffuse(
input: str,
network: str,
output: Optional[str] = os.path.join(OUTPUT, 'diffusion_scores.csv'),
method: Union[str, Callable] = Z,
binarize: Optional[bool] = False,
threshold: Optional[float] = None,
absolute_value: Optional[bool] = False,
p_value: Optional[float] = 0.05,
format_output: Optional[str] = CSV,
kernel_method: Optional[Callable] = regularised_laplacian_kernel
):
"""Run a diffusion method for the provided input_scores over a given network.
:param input: Path to a (miscellaneous format) data input to be processed/formatted.
:param network: Path to the network as a (NetworkX) graph or as a (diffuPy.Matrix) kernel.
:param output: Path (with file name) for the generated scores output file. By default '$OUTPUT/diffusion_scores.csv'
:param method: Elected method ["raw", "ml", "gm", "ber_s", "ber_p", "mc", "z"] or custom method FUNCTION(network, scores, kargs). By default 'raw'
:param binarize: If logFC provided in dataset, convert logFC to binary. By default False
:param threshold: Codify node labels by applying a threshold to logFC in input. By default None
:param absolute_value: Codify node labels by applying threshold to | logFC | in input. By default False
:param p_value: Statistical significance. By default 0.05
:param format_output: Elected output format ["CSV", "JSON"]. By default 'CSV'
:param kernel_method: Callable method for kernel computation.
"""
click.secho(f'{EMOJI} Loading graph from {network} {EMOJI}')
kernel = get_kernel_from_network_path(network, False, kernel_method=kernel_method)
click.secho(f'{EMOJI} Processing data input from {input}. {EMOJI}')
formated_input_scores = process_map_and_format_input_data_for_diff(input,
kernel,
method,
binarize,
absolute_value,
p_value,
threshold,
)
click.secho(f'{EMOJI} Computing the diffusion algorithm. {EMOJI}')
results = run_diffusion(
formated_input_scores,
method,
k=kernel
)
if format_output == CSV:
results.as_csv(output)
if format_output == JSON:
json.dump(results, output, indent=2)
click.secho(f'{EMOJI} Diffusion performed with success. Output located at {output} {EMOJI}\n')
if __name__ == '__main__':
main()
| StarcoderdataPython |
6684787 | from sqlalchemy import Column, Integer, String, ForeignKey, BigInteger, DateTime, Boolean
from ..util.db import Base
class Message(Base):
__tablename__ = "messages"
id = Column(Integer, primary_key=True)
message_id = Column(BigInteger)
author = Column(BigInteger)
awaiting = Column(String)
emote = Column(String)
chapter = Column(Integer)
created_on = Column(DateTime)
reminder = Column(Boolean)
def __init__(self, message_id, awaiting, emote):
self.message_id = message_id
self.awaiting = awaiting
self.emote = emote
self.reminder = False
| StarcoderdataPython |
3252829 | <gh_stars>0
"""Contains methods and classes to collect data from
Yahoo Finance API
"""
import pandas as pd
import baostock as bs
import yfinance as yf
from .lxcUrl import hsDownloadData
class YahooDownloader:
"""Provides methods for retrieving daily stock data from
Yahoo Finance API
Attributes
----------
start_date : str
start date of the data (modified from config.py)
end_date : str
end date of the data (modified from config.py)
ticker_list : list
a list of stock tickers (modified from config.py)
Methods
-------
fetch_data()
Fetches data from yahoo API
"""
def __init__(self, start_date: str, end_date: str, ticker_list: list):
self.start_date = start_date
self.end_date = end_date
self.ticker_list = ticker_list
def lxcDownload(self,tic):
df = pd.read_csv("./"+"lxcData" + "/" + str(tic) + ".csv", index_col=0)
date = df['date']
df = df.drop("date",axis=1)
print(df)
df.index = pd.to_datetime(date)
df.sort_index(inplace=True)
df.index.name = "date"
return df
def fetch_data(self) -> pd.DataFrame:
"""Fetches data from Yahoo API
Parameters
----------
Returns
-------
`pd.DataFrame`
7 columns: A date, open, high, low, close, volume and tick symbol
for the specified stock ticker
"""
# Download and save the data in a pandas DataFrame:
data_df = pd.DataFrame()
print('lxc:',len(self.ticker_list))
lxc_temp = 1
for tic in self.ticker_list:
#print('download ',lxc_temp,'个数据')
lxc_temp = lxc_temp+1
temp_df = yf.download(tic, start=self.start_date, end=self.end_date)
#temp_df = hsDownloadData(en_prod_code =tic, begin_date=self.start_date, end_date=self.end_date)
#print('type temp_df is:', type(temp_df))
#print('temp_df is:',temp_df)
temp_df["tic"] = tic
data_df = data_df.append(temp_df)
# reset the index, we want to use numbers as index instead of dates
data_df = data_df.reset_index()
try:
# convert the column names to standardized names
data_df.columns = [
"date",
"open",
"high",
"low",
"close",
"adjcp",
"volume",
"tic",
]
# use adjusted close price instead of close price
data_df["close"] = data_df["adjcp"]
# drop the adjusted close price column
data_df = data_df.drop("adjcp", 1)
except NotImplementedError:
print("the features are not supported currently")
# create day of the week column (monday = 0)
data_df["day"] = data_df["date"].dt.dayofweek
# convert date to standard string format, easy to filter
data_df["date"] = data_df.date.apply(lambda x: x.strftime("%Y-%m-%d"))
# drop missing data
data_df = data_df.dropna()
data_df = data_df.reset_index(drop=True)
print("Shape of DataFrame: ", data_df.shape)
# print("Display DataFrame: ", data_df.head())
data_df = data_df.sort_values(by=['date','tic']).reset_index(drop=True)
return data_df
def lxc_fetch_data(self) -> pd.DataFrame:
"""Fetches data from Yahoo API
Parameters
----------
Returns
-------
`pd.DataFrame`
7 columns: A date, open, high, low, close, volume and tick symbol
for the specified stock ticker
"""
# Download and save the data in a pandas DataFrame:
# 登陆系统
def round_amount(vol):
data = round(float(vol),2)
return data
lg = bs.login()
# 显示登陆返回信息
print('login respond error_code:' + lg.error_code)
print('login respond error_msg:' + lg.error_msg)
# 获取行业分类数据
rs = bs.query_stock_industry()
# rs = bs.query_stock_basic(code_name="浦发银行")
print('query_stock_industry error_code:' + rs.error_code)
print('query_stock_industry respond error_msg:' + rs.error_msg)
# 打印结果集
lxc_list = []
data_df = pd.DataFrame()
while (rs.error_code == '0') & rs.next():
# 获取一条记录,将记录合并在一起
temp = rs.get_row_data()
lxc_temp = temp
if (temp[3] == "食品饮料"):
lxc_list.append(temp[1])
temp_df = bs.query_history_k_data_plus(temp[1], "date,open,high,low,close,volume", self.start_date, self.end_date).get_data()
if(len(temp_df)<1):
continue
temp_df["tic"] = str(temp[1])
temp_df["open"] = temp_df["open"].apply(round_amount)
temp_df["high"] = temp_df["high"].apply(round_amount)
temp_df["low"] = temp_df["low"].apply(round_amount)
temp_df["close"] = temp_df["close"].apply(round_amount)
temp_df["volume"] = temp_df["volume"].apply(round_amount)
data_df = data_df.append(temp_df)
date = data_df["date"]
data_df = data_df.drop("date",axis = 1)
data_df.index = pd.to_datetime(date)
data_df.index.name="date"
print("data_df is:",data_df)
data_df = data_df.reset_index()
try:
# convert the column names to standardized names
data_df.columns = [
"date",
"open",
"high",
"low",
"close",
"volume",
"tic",
]
# use adjusted close price instead of close price
#data_df["close"] = data_df["adjcp"]
# drop the adjusted close price column
#data_df = data_df.drop("adjcp", 1)
except NotImplementedError:
print("the features are not supported currently")
# create day of the week column (monday = 0)
data_df["day"] = data_df["date"].dt.dayofweek
# convert date to standard string format, easy to filter
data_df["date"] = data_df.date.apply(lambda x: x.strftime("%Y-%m-%d"))
# drop missing data Y
data_df = data_df.dropna()
data_df = data_df.reset_index(drop=True)
print("Shape of DataFrame: ", data_df.shape)
# print("Display DataFrame: ", data_df.head())
data_df = data_df.sort_values(by=['date','tic']).reset_index(drop=True)
return data_df
def select_equal_rows_stock(self, df):
df_check = df.tic.value_counts()
df_check = pd.DataFrame(df_check).reset_index()
df_check.columns = ["tic", "counts"]
mean_df = df_check.counts.mean()
equal_list = list(df.tic.value_counts() >= mean_df)
names = df.tic.value_counts().index
select_stocks_list = list(names[equal_list])
df = df[df.tic.isin(select_stocks_list)]
return df
| StarcoderdataPython |
3479057 | from polygon import *
import pytest
def test_polygon():
abs_tol = 0.001
rel_tol = 0.001
try:
p = Polygon(2, 10)
assert False, ('Creating a Polygon with 2 sides: '
' Exception expected, not received')
except ValueError:
pass
n = 3
R = 1
p = Polygon(n, R)
assert str(p) == 'Polygon(n=3, R=1)', f'actual: {str(p)}'
assert p.count_vertices == n, (f'actual: {p.count_vertices},'
f' expected: {n}')
assert p.count_edges == n, f'actual: {p.count_edges}, expected: {n}'
assert p.circumradius == R, f'actual: {p.circumradius}, expected: {n}'
assert p.interior_angle == 60, (f'actual: {p.interior_angle},'
' expected: 60')
n = 4
R = 1
p = Polygon(n, R)
assert p.interior_angle == 90, (f'actual: {p.interior_angle}, '
' expected: 90')
assert math.isclose(p.area, 2,
rel_tol=abs_tol,
abs_tol=abs_tol), (f'actual: {p.area},'
' expected: 2.0')
assert math.isclose(p.side_length, math.sqrt(2),
rel_tol=rel_tol,
abs_tol=abs_tol), (f'actual: {p.side_length},'
f' expected: {math.sqrt(2)}')
assert math.isclose(p.perimeter, 4 * math.sqrt(2),
rel_tol=rel_tol,
abs_tol=abs_tol), (f'actual: {p.perimeter},'
f' expected: {4 * math.sqrt(2)}')
assert math.isclose(p.apothem, 0.707,
rel_tol=rel_tol,
abs_tol=abs_tol), (f'actual: {p.perimeter},'
' expected: 0.707')
p = Polygon(6, 2)
assert math.isclose(p.side_length, 2,
rel_tol=rel_tol, abs_tol=abs_tol)
assert math.isclose(p.apothem, 1.73205,
rel_tol=rel_tol, abs_tol=abs_tol)
assert math.isclose(p.area, 10.3923,
rel_tol=rel_tol, abs_tol=abs_tol)
assert math.isclose(p.perimeter, 12,
rel_tol=rel_tol, abs_tol=abs_tol)
assert math.isclose(p.interior_angle, 120,
rel_tol=rel_tol, abs_tol=abs_tol)
p = Polygon(12, 3)
assert math.isclose(p.side_length, 1.55291,
rel_tol=rel_tol, abs_tol=abs_tol)
assert math.isclose(p.apothem, 2.89778,
rel_tol=rel_tol, abs_tol=abs_tol)
assert math.isclose(p.area, 27,
rel_tol=rel_tol, abs_tol=abs_tol)
assert math.isclose(p.perimeter, 18.635,
rel_tol=rel_tol, abs_tol=abs_tol)
assert math.isclose(p.interior_angle, 150,
rel_tol=rel_tol, abs_tol=abs_tol)
p1 = Polygon(3, 10)
p2 = Polygon(10, 10)
p3 = Polygon(15, 10)
p4 = Polygon(15, 100)
p5 = Polygon(15, 100)
assert p2 > p1
assert p2 < p3
assert p3 != p4
assert p1 != p4
assert p4 == p5 | StarcoderdataPython |
4991000 | from progressbar import Bar, ETA, Percentage, ProgressBar, RotatingMarker
import jieba
import pickle
from pymongo import MongoClient
class SimpleVocab:
def __init__(self):
self.word_to_idx={}
self.words=[]
self.vocabulary_size=10000
self.reserved=['PAD','UNK_0','UNK_1','UNK_2','UNK_3']
def add_word(self, word):
if word not in self.word_to_idx:
self.word_to_idx[word]=len(self.words)
self.words.append({'word':word,'freq':1})
else:
self.words[self.word_to_idx[word]]['freq']+=1
def add_reserved_words(self):
for word in self.reserved:
self.add_word(word)
def scan(self, storage):
self.add_reserved_words()
print('Fitting storage...')
widgets = ['Progress: ', Percentage(), ' ', Bar(),' ', ETA()]
# Step #1 find a list of most 'vocabulary_size' frequent words
cursor=storage.find()
num_session=cursor.count()
count_session=0
pbar = ProgressBar(widgets=widgets, maxval=num_session).start()
for message in cursor:
for word in " ".join(jieba.cut(message['content'])).split():
self.add_word(word)
count_session+=1
pbar.update(count_session)
pbar.finish()
print('Sorting vocab...')
self.words[len(self.reserved):]=sorted(self.words[len(self.reserved):],key=lambda item:item['freq'],reverse=True)
self.vocabulary_size=min(self.vocabulary_size,len(self.words))
self.words=self.words[0:self.vocabulary_size]
self.word_to_idx.clear()
for i in range(len(self.words)):
self.word_to_idx[self.words[i]['word']]=i
print(self.words[0:100])
print('Done!')
def save(self,fname):
pickle.dump([self.word_to_idx,self.words],open( fname, "wb" ))
def load(self,fname):
l=pickle.load(open(fname, "rb" ))
self.word_to_idx=l[0]
self.words=l[1]
def to_idx(self, word):
if word in self.word_to_idx:
return self.word_to_idx[word]
return None
def to_word(self, idx):
if idx<len(self.words):
return self.words[idx]
return None
'''
Tests
db = MongoClient().sonny['dataset']
sv = SimpleVocab()
sv.scan(db)
sv.save('/root/source/Sonny/experiments/data/simple_vocab.dat')
''' | StarcoderdataPython |
1728732 | import pandas as pd
import os
import numpy as np
def restrict_variable_to_possible_ranges(df, variable_name, possible_value_ranges, verbose=False):
"""
Restricts a variable to the possible ranges in the possible_value_ranges dataframe.
"""
variable_range = possible_value_ranges[possible_value_ranges['variable_label'] == variable_name]
variable_range = variable_range.iloc[0]
clean_df = df.copy()
# set score to np.nan if outside of range
clean_df.loc[(df['scale'] == variable_name) & (df['score'] < variable_range['Min']), 'score'] = np.nan
clean_df.loc[(df['scale'] == variable_name) & (df['score'] > variable_range['Max']), 'score'] = np.nan
if verbose:
print(f'Excluding {clean_df.score.isna().sum()} observations because out of range')
excluded_df = df[clean_df.score.isna()]
clean_df = clean_df.dropna()
return clean_df, excluded_df
def preprocess_scales(scales_df, verbose=False):
scales_df['patient_admission_id'] = scales_df['patient_id'].astype(str) + '_' + scales_df['begin_date'].apply(
lambda bd: ''.join(bd.split(' ')[0].split('.')))
columns_to_drop = ['nr', 'patient_id', 'eds_end_4digit', 'eds_manual', 'DOB', 'begin_date',
'end_date', 'death_date', 'death_hosp', 'eds_final_id',
'eds_final_begin', 'eds_final_end', 'eds_final_patient_id',
'eds_final_birth', 'eds_final_death', 'eds_final_birth_str',
'date_from', 'date_to']
scales_df.drop(columns_to_drop, axis=1, inplace=True)
possible_value_ranges_file = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))),
'possible_ranges_for_variables.xlsx')
possible_value_ranges = pd.read_excel(possible_value_ranges_file)
glasgow_equivalents = ['Glasgow + pupilles', 'Glasgow + pupilles + sensibilité/motricité', 'Glasgow',
'Glasgow urgence',
'Neurologie - Glasgow']
scales_df.loc[scales_df['scale'].isin(glasgow_equivalents), 'scale'] = 'Glasgow Coma Scale'
NIHSS_equivalents = ['NIHSS - National Institute oh Health Stroke Scale',
'NIHSS - National Institute of Health Stroke Scale']
scales_df.loc[scales_df['scale'].isin(NIHSS_equivalents), 'scale'] = 'NIHSS'
pain_scale_equivalents = ['Douleur - b - Echelle numérique', 'Douleur - a - EVA', 'Douleur - c - Echelle verbale']
scales_df.loc[scales_df['scale'].isin(pain_scale_equivalents), 'scale'] = 'pain scale'
# drop rows with scale = 'Douleur - h - CPOT' as not comparable with other scales
scales_df.drop(scales_df[scales_df['scale'].str.contains('CPOT')].index, inplace=True)
if verbose:
print('Preprocessing NIHSS')
cleaned_scales_df, _ = restrict_variable_to_possible_ranges(scales_df, 'NIHSS',
possible_value_ranges, verbose=verbose)
if verbose:
print('Glasgow Coma Scale')
cleaned_scales_df, _ = restrict_variable_to_possible_ranges(cleaned_scales_df,
'Glasgow Coma Scale',
possible_value_ranges, verbose=verbose)
return cleaned_scales_df
| StarcoderdataPython |
8074691 | import arcade
from arcade.gui import *
import random
import math
import CONST
from moviepy.editor import *
import pygame
import time
from Player import Player
from Supporter import Supporter
from Bullets import Bullets
from ProTrump import ProTrump
from Redneck import Redneck
from Boss import Boss
from Capitol import Capitol
from Coequipier import Coequipier
from Tweet import Tweet
from Gui import Gui
from Strike import Strike
class Manager(arcade.Window):
music = arcade.Sound("audios/background_music.mp3")
def __init__(self):
# show history
pygame.display.set_caption('Redneck Rumble')
clip = VideoFileClip("video/begin_cut.mp4")
clip.preview()
pygame.quit()
# Objects
self.player = None
self.supporters = []
self.bullets = []
self.capitol = None
self.coequipier = None
self.gui = None
# Game parameters
self.score = 0
self.time = 0
self.spawn_interval = 5
self.boost_speed = 1
self.win_state = 0
self.off = 0
self.retry = 0
self.weapon_count = 0
self.boss = False
# Interaction parameters
self.dirkey_change = False
self.up_pressed = False
self.down_pressed = False
self.left_pressed = False
self.right_pressed = False
self.leftclick_pressed = False
self.leftclick_x = 0
self.leftclick_y = 0
self.mouse_x = 0
self.mouse_y = 0
self.background = None
self.ui_manager = None
self.music_list = []
self.current_song_index = 0
self.current_player = None
self.strike_button = None
self.button_normal = arcade.load_texture("sprites/gui/strike2.png")
self.button_hovered_texture = arcade.load_texture("sprites/gui/strike_over2.png")
super().__init__(CONST.SCREEN_WIDTH, CONST.SCREEN_HEIGHT, CONST.SCREEN_TITLE)
def setup(self):
self.player = Player()
self.capitol = Capitol()
self.coequipier = None
self.gui = Gui(0,CONST.MAX_VOTES)
self.supporters = []
self.bullets = []
self.ui_manager = UIManager()
self.tweet = Tweet()
self.strike_button = Strike(
center_x = CONST.STRIKE_BUTTON_X,
center_y = CONST.STRIKE_BUTTON_Y,
normal_texture=self.button_normal,
hover_texture=self.button_hovered_texture,
text=''
)
self.ui_manager.add_ui_element(self.strike_button)
arcade.set_background_color(arcade.color.AMAZON)
self.background = arcade.load_texture("tileset/background.png")
self.music_list = ["audios/background_music.mp3"]
self.current_song_index = 0
self.music = arcade.Sound(self.music_list[self.current_song_index], streaming=True)
self.play_song()
def end_game(self):
if self.strike_button.already_clicked == "True_all" :
self.strike_button.sound.stop(self.strike_button.manager)
else :
Manager.music.stop(self.current_player)
arcade.close_window()
self.off = 1
if self.win_state:
clip = VideoFileClip("video/win_cut.mp4")
clip.preview()
else:
clip = VideoFileClip("video/game_over_cut.mp4")
clip.preview()
pygame.quit()
exit()
def advance_song(self):
self.current_song_index += 1
if self.current_song_index >= len(self.music_list):
self.current_song_index = 0
def play_song(self):
self.current_player = self.music.play(CONST.MUSIC_VOLUME)
time.sleep(0.03)
def on_draw(self):
if not self.off:
arcade.start_render()
arcade.draw_lrwh_rectangle_textured(0, 0,CONST.SCREEN_WIDTH, CONST.SCREEN_HEIGHT,self.background)
self.capitol.draw()
self.player.draw()
self.gui.draw()
self.tweet.draw()
self.strike_button.draw()
#self.coequipier.draw()
for b in self.bullets:
b.draw()
for s in self.supporters:
s.draw()
def check_sound (self):
if self.strike_button.already_clicked == "True" :
self.strike_button.already_clicked
Manager.music.stop(self.current_player)
self.strike_button.already_clicked = "True_all"
if self.strike_button.already_clicked == "False":
self.current_player = Manager.music.play(CONST.MUSIC_VOLUME)
self.strike_button.already_clicked = "None"
def on_update(self, delta_time):
if not self.off:
self.time = self.time + 1
self.gui.votes_count = int(CONST.MAX_VOTES - (self.time/60*2))
# Create supporter
if self.time % (self.spawn_interval * 30) == 0:
r = random.random()
if r < CONST.REDNECK_PROBABILITY:
s = Redneck(1)
else:
s = ProTrump(1)
self.supporters.append(s)
if self.gui.votes_count <= 60 and not self.boss:
self.supporters.append(Boss(1))
self.boss = True
# Distribute events
self.distribute_events()
self.player.update()
self.tweet.update()
for s in self.supporters:
s.boost_speed = max(1,self.tweet.activated * CONST.TWEET_SPEED_BOOST)
self.boost_speed = max(1,self.tweet.activated * CONST.TWEET_SPEED_BOOST)
for b in self.bullets:
b.update()
for s in self.supporters:
if s.type == "Redneck":
s.update(self.player.sprite.center_x, self.player.sprite.center_y)
else:
s.update()
# Fire a bullet
bullet = self.player.fire(self.mouse_x,self.mouse_y)
if bullet != None:
self.bullets.append(bullet)
if self.coequipier is not None:
nearest = None
dist = 1e9
for s in self.supporters:
d = math.sqrt((s.sprite.center_x-CONST.SCREEN_WIDTH/2)**2 + (s.sprite.center_y-CONST.SCREEN_HEIGHT/2)**2)
if d < dist and d < self.coequipier.range :
dist = d
nearest = s
if nearest is not None:
bullet = self.coequipier.fire(nearest.sprite.center_x,nearest.sprite.center_y)
if bullet != None:
self.bullets.append(bullet)
for b in self.bullets:
b.update()
for s in self.supporters:
if arcade.check_for_collision(b.sprite, s.sprite) and b.last_touch != s:
s.hit_points -= b.damage
if s.hit_points <= 0:
self.gui.dollars_count += s.cashprize
b.last_touch = s
b.hit_points -= 1
break
self.bullets = [b for b in self.bullets if b.hit_points > 0]
self.supporters = [s for s in self.supporters if s.hit_points > 0]
# Remove bullets
self.bullets = [b for b in self.bullets if b.sprite.right > 0 and b.sprite.left < (CONST.SCREEN_WIDTH - 1) and b.sprite.bottom > 0 and b.sprite.top < (CONST.SCREEN_HEIGHT - 1)]
# Collisions player <-> supporters
stunned = False
for s in self.supporters:
if arcade.check_for_collision(self.player.sprite, s.sprite):
if s.type == "Redneck":
s.hit_points -= CONST.REDNECK_HP_DECREASE
s.is_on_player = True
self.player.stun = True
stunned = True
if not stunned:
self.player.stun = False
self.supporters = [s for s in self.supporters if s.hit_points > 0]
# Collisions capitol <-> supporters
for s in self.supporters:
if arcade.check_for_collision(self.capitol.sprite, s.sprite):
self.capitol.hit(s.damage)
s.hit_points = 0
self.supporters = [s for s in self.supporters if s.hit_points > 0]
# Collisions capitol <-> bullets
for b in self.bullets:
if arcade.check_for_collision(self.capitol.sprite, b.sprite) and b.sender != "Coequipier":
b.hit_points = 0
self.bullets = [b for b in self.bullets if b.hit_points > 0]
self.check_sound()
""" ENDING CONDITIONS """
if self.capitol.hit_point <= 0:
self.win_state = 0
self.end_game()
""" WIN CONDITIONS """
if self.gui.votes_count <= 0:
self.win_state = 1
self.end_game()
if self.music.get_stream_position(self.current_player) == 0.0:
# self.advance_song()
self.play_song()
def upgrade(self, action):
if self.spawn_interval > 2:
self.spawn_interval += -1
if action == "PL_ATK_2X":
self.player.weapon.ammo_dmg *= 2
elif action == "PL_SPD_2X":
self.weapon_count +=1
self.player.weapon.rate /= 2
self.gui.weapon_sprite = arcade.Sprite(CONST.WEAPON_SPRITE[self.weapon_count], CONST.SPRITE_SCALING_WEAPON)
elif action == "PL_PT":
self.player.weapon.ammo_hit_point += 1
elif action == "SUPPORT":
self.coequipier = Coequipier()
elif action == "SUPPORT_ATK_2X":
self.coequipier.weapon.ammo_dmg *= 2
elif action == "SUPPORT_SPD_2X":
self.coequipier.weapon.rate /= 2
elif action == "SUPPORT_RNG_2X":
self.coequipier.range *= 2
elif action == "SHIELD":
self.capitol.shield += 30
""" EVENTS """
def on_mouse_press(self, x, y, button, modifiers):
if button == arcade.MOUSE_BUTTON_LEFT:
self.leftclick_pressed = True
self.leftclick_x = x
self.leftclick_y = y
a = arcade.SpriteList()
a.append(self.gui.col1_upgrade_sprite)
a.append(self.gui.col2_upgrade_sprite)
a.append(self.gui.col3_upgrade_sprite)
upgrade = arcade.get_sprites_at_point((x,y), a)
if len(upgrade) > 0:
upgrade = upgrade[-1]
action = self.gui.select_upgrade(upgrade)
self.upgrade(action)
def on_mouse_motion(self, x, y, dx, dy):
self.mouse_x = x
self.mouse_y = y
def on_mouse_release(self, x, y, button, modifiers):
if button == arcade.MOUSE_BUTTON_LEFT:
self.leftclick_pressed = False
def on_key_press(self, key, modifiers):
if key == arcade.key.Z:
self.up_pressed = True
self.dirkey_change = True
elif key == arcade.key.S:
self.down_pressed = True
self.dirkey_change = True
elif key == arcade.key.Q:
self.left_pressed = True
self.dirkey_change = True
elif key == arcade.key.D:
self.right_pressed = True
self.dirkey_change = True
def on_key_release(self, key, modifiers):
if key == arcade.key.Z:
self.up_pressed = False
self.dirkey_change = True
elif key == arcade.key.S:
self.down_pressed = False
self.dirkey_change = True
elif key == arcade.key.Q:
self.left_pressed = False
self.dirkey_change = True
elif key == arcade.key.D:
self.right_pressed = False
self.dirkey_change = True
def distribute_events(self):
# Player
if self.dirkey_change:
self.player.update_keys(self.up_pressed, self.down_pressed, self.left_pressed, self.right_pressed)
self.player.auto_fire = self.leftclick_pressed
| StarcoderdataPython |
3407176 | <reponame>hawkhai/pyinstaller
from . import mod1
from .mod2 import *
| StarcoderdataPython |
162638 | <gh_stars>0
"""
Copyright 2020 <NAME>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
__all__ = ("DockerAdapter", )
from ..logger import getLogger
from ..configuration import dm_conf
from .interface import Interface, ContainerState, EngineAPIError, NotFound, CEAdapterError
import docker
import docker.errors
import docker.types
import typing
logger = getLogger(__name__.split(".", 1)[-1])
error_map = {
docker.errors.APIError: EngineAPIError,
docker.errors.NotFound: NotFound
}
container_state_map = {
"created": ContainerState.stopped,
"restarting": ContainerState.running,
"running": ContainerState.running,
"removing": ContainerState.running,
"paused": ContainerState.stopped,
"exited": ContainerState.stopped,
"dead": ContainerState.stopped
}
class DockerAdapter(Interface):
def __init__(self):
self.__client = docker.DockerClient(base_url=dm_conf.CE.socket)
def __getVolName(self, c_name, v_name) -> str:
return "{}_{}".format(c_name, v_name)
def __createVolume(self, c_name, v_name):
try:
self.__client.volumes.create(name=v_name, labels={c_name: None})
except Exception as ex:
logger.error("can't create volume '{}' - {}".format(v_name, ex))
raise error_map.setdefault(ex, CEAdapterError)(ex)
def __removeVolume(self, name):
try:
volume = self.__client.volumes.get(name)
volume.remove()
except Exception as ex:
logger.error("can't remove volume '{}' - {}".format(name, ex))
raise error_map.setdefault(ex, CEAdapterError)(ex)
def __initVolumes(self, c_name, volumes):
volumes = [self.__getVolName(c_name, vol) for vol in volumes]
existing_volumes = self.__client.volumes.list(filters={"label": c_name})
if existing_volumes:
existing_volumes = [vol.name for vol in existing_volumes]
new_volumes = set(volumes) - set(existing_volumes)
missing_volumes = set(existing_volumes) - set(volumes)
for volume in new_volumes:
self.__createVolume(c_name, volume)
for volume in missing_volumes:
self.__removeVolume(volume)
def __purgeVolumes(self, c_name):
volumes = self.__client.volumes.list(filters={"label": c_name})
for volume in volumes:
try:
volume.remove(force=True)
except Exception as ex:
logger.error("can't purge volume '{}' - {}".format(volume.name, ex))
# raise error_map.setdefault(ex, CEAdapterError)(ex)
def __purgeImages(self):
try:
self.__client.images.prune(filters={"dangling": False})
except Exception as ex:
logger.error("can't remove images - {]".format(ex))
def listContainers(self) -> dict:
try:
container_objs = self.__client.containers.list(all=True)
deployments = dict()
for container in container_objs:
deployments[container.name] = {
"image": container.image.tags[0],
"hash": container.image.id,
"state": container_state_map[container.status]
}
return deployments
except Exception as ex:
logger.error("can't list deployments - {}".format(ex))
raise error_map.setdefault(ex, CEAdapterError)(ex)
def startContainer(self, name: str) -> None:
try:
container_obj = self.__client.containers.get(name)
container_obj.start()
except Exception as ex:
logger.error("can't start deployment '{}' - {}".format(name, ex))
raise error_map.setdefault(ex, CEAdapterError)(ex)
def stopContainer(self, name: str) -> None:
try:
container_obj = self.__client.containers.get(name)
container_obj.stop()
# container_obj.wait()
except Exception as ex:
logger.error("can't stop deployment '{}' - {}".format(name, ex))
raise error_map.setdefault(ex, CEAdapterError)(ex)
def createContainer(self, name: str, dpy_conf: dict, srv_conf: typing.Optional[dict] = None, env_conf: typing.Optional[dict] = None) -> None:
try:
self.__client.images.pull(repository=dpy_conf["image"])
params = dict()
params["name"] = name
params["network"] = dm_conf.CE.network_name
params["image"] = dpy_conf["image"]
params["detach"] = True
if dpy_conf["volumes"]:
self.__initVolumes(name, dpy_conf["volumes"])
params["volumes"] = {self.__getVolName(name, volume): {"bind": target, "mode": "rw"} for volume, target in dpy_conf["volumes"].items()}
if dpy_conf["devices"]:
params["devices"] = {"{}:{}:rwm".format(device, target) for device, target in dpy_conf["devices"].items()}
if dpy_conf["ports"]:
params["ports"] = {"{}/{}".format(port["container"], port["protocol"] or "tcp"): port["host"] for port in dpy_conf["ports"]}
if all((srv_conf, env_conf)):
params["environment"] = {**srv_conf, **env_conf}
else:
if env_conf:
params["environment"] = env_conf
if srv_conf:
params["environment"] = srv_conf
self.__client.containers.create(**params)
except Exception as ex:
logger.error("can't create container '{}' - {}".format(name, ex))
raise error_map.setdefault(ex, CEAdapterError)(ex)
def removeContainer(self, name: str, purge=False) -> None:
try:
container_obj = self.__client.containers.get(name)
container_obj.remove()
if purge:
self.__purgeVolumes(name)
self.__purgeImages()
except Exception as ex:
logger.error("can't remove deployment '{}' - {}".format(name, ex))
raise error_map.setdefault(ex, CEAdapterError)(ex)
| StarcoderdataPython |
8065592 | from datetime import datetime
from anubis.models import TheiaSession
def mark_session_ended(theia_session: TheiaSession):
"""
Mark the database entries for the
theia session as ended.
:param theia_session:
:return:
"""
theia_session.active = False
theia_session.state = "Ended"
theia_session.ended = datetime.now() | StarcoderdataPython |
9609265 | <gh_stars>1-10
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
os.environ['AUTOGRAPH_VERBOSITY'] = '10'
import tensorflow as tf
tf.compat.v1.logging.info('TensorFlow')
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)
tf.compat.v1.logging.info('TensorFlow')
import numpy as np
import scipy.spatial.distance as ds
import pandas as pd
from bilm import Batcher, BidirectionalLanguageModel, weight_layers
# python bin/dump_weights.py --save_dir swb/checkpoint --outfile swb/model/manga_weights.hdf5
Teste = 3
# Incluído para limpar o escopo
tf.reset_default_graph()
# Location of pretrained LM. Here we use the test fixtures.
modeldir = os.path.join('swb', 'model')
vocab_file = os.path.join(modeldir, 'manga_vocab.txt' if (Teste==1 or Teste==3) else 'ombra_vocab.txt')
options_file = os.path.join(modeldir, 'manga_options.json' if (Teste==1 or Teste==3) else 'ombra_options.json')
weight_file = os.path.join(modeldir, 'manga_weights.hdf5' if (Teste==1 or Teste==3) else 'ombra_weights.hdf5')
# Create a Batcher to map text to character ids.
batcher = Batcher(vocab_file, 50)
# Input placeholders to the biLM.
context_character_ids = tf.compat.v1.placeholder('int32', shape=(None, None, 50))
# Build the biLM graph.
bilm = BidirectionalLanguageModel(options_file, weight_file)
# Get ops to compute the LM embeddings.
context_embeddings_op = bilm(context_character_ids)
# Get an op to compute ELMo (weighted average of the internal biLM layers)
elmo_context_input = weight_layers('input', context_embeddings_op, l2_coef=0.0)
# Now we can compute embeddings for some sents.
raw_context1 = ['manga longas começam do ombro até o pulso .',
'manga curtas são mais apertadas em camisa e em vestido .',
'a manga tem polpa doce e suculenta .',
'o sumo da manga é muito apreciado , uma delícia .']
raw_context2 = ['ombra longas começam do ombro até o pulso .',
'ombra curtas são mais apertadas em camisa e em vestido .',
'a manga tem polpa doce e suculenta .',
'o sumo da manga é muito apreciado , uma delícia .']
if Teste==1:
raw_context = raw_context1
elif Teste==2:
raw_context = raw_context2
else:
# C:\Projetos\ELMo\bilm-tf\bin\swb\train\mangas
traindir = os.path.join('swb', 'train', 'mangas', 'pre_Manga_mix4.txt')
with open(traindir, "r", encoding = "utf-8") as f:
lines = f.readlines()
raw_context = [manga_sent for manga_sent in lines if 'manga' in manga_sent]
tokenized_context = [sentence.split() for sentence in raw_context]
##################################
# Imprimir as sentenças por token
##################################
if (Teste==1 or Teste==2):
with tf.Session() as sess:
# It is necessary to initialize variables once before running inference.
sess.run(tf.global_variables_initializer())
# Create batches of data.
context_ids = batcher.batch_sentences(tokenized_context)
print("Shape of context ids = ", context_ids.shape)
# Compute ELMo representations (here for the input only, for simplicity).
elmo_context_input_ = sess.run(
elmo_context_input['weighted_op'],
feed_dict={context_character_ids: context_ids}
)
print("Shape of generated embeddings = ",elmo_context_input_.shape)
print(tokenized_context)
#########################################################################################
# Computing euclidean distance between words embedding
print("Euclidean Distance Comparison Manga Fruit (1 & 2) and Manga Vestment (3 & 4)) - T", Teste )
# 1
euc_dist_manga_s0_s1 = np.linalg.norm(elmo_context_input_[0,0,:]
- elmo_context_input_[1,0,:])
print("\nSentence 1 x Sentence 2 = ", tokenized_context[0][0],
np.round(euc_dist_manga_s0_s1, 2), tokenized_context[1][0])
euc_dist_manga_s0_s2 = np.linalg.norm(elmo_context_input_[0,0,:]
- elmo_context_input_[2,1,:])
print("\nSentence 1 x Sentence 3 = ", tokenized_context[0][0],
np.round(euc_dist_manga_s0_s2, 2), tokenized_context[2][1])
euc_dist_manga_s0_s3 = np.linalg.norm(elmo_context_input_[0,0,:]
- elmo_context_input_[3,3,:])
print("\nSentence 1 x Sentence 4 = ", tokenized_context[0][0],
np.round(euc_dist_manga_s0_s3, 2), tokenized_context[3][3],)
# 2
euc_dist_manga_s1_s2 = np.linalg.norm(elmo_context_input_[1,0,:]
- elmo_context_input_[2,1,:])
print("\nSentence 2 x Sentence 3 = ", tokenized_context[1][0],
np.round(euc_dist_manga_s1_s2, 2), tokenized_context[2][1],)
euc_dist_manga_s1_s3 = np.linalg.norm(elmo_context_input_[1,0,:]
- elmo_context_input_[3,3,:])
print("\nSentence 2 x Sentence 4 = ", tokenized_context[1][0],
np.round(euc_dist_manga_s1_s3, 2), tokenized_context[3][3],)
# 3
euc_dist_manga_s2_s3 = np.linalg.norm(elmo_context_input_[2,1,:]
- elmo_context_input_[3,3,:])
print("\nSentence 3 x Sentence 4 = ", tokenized_context[2][1],
np.round(euc_dist_manga_s2_s3, 2), tokenized_context[3][3],)
# Computing cosine distance between words embedding
print("\n\nCosine Distance Comparison Manga (Fruit) and Manga (Clothing) - T", Teste)
# 1
cos_dist_manga_s0_s1 = ds.cosine(elmo_context_input_[0,0,:]
,elmo_context_input_[1,0,:])
print("\nSentence 1 x Sentence 2 = ", tokenized_context[0][0]
, np.round(cos_dist_manga_s0_s1, 3), tokenized_context[1][0])
cos_dist_manga_s0_s2 = ds.cosine(elmo_context_input_[0,0,:]
,elmo_context_input_[2,1,:])
print("\nSentence 1 x Sentence 3 = ", tokenized_context[0][0]
, np.round(cos_dist_manga_s0_s2, 3), tokenized_context[2][1])
cos_dist_manga_s0_s3 = ds.cosine(elmo_context_input_[0,0,:]
,elmo_context_input_[3,3,:])
print("\nSentence 1 x Sentence 4 = ", tokenized_context[0][0]
, np.round(cos_dist_manga_s0_s3, 3), tokenized_context[3][3])
# 2
cos_dist_manga_s0w0_s2w0 = ds.cosine(elmo_context_input_[1,0,:]
,elmo_context_input_[2,1,:])
print("\nSentence 2 x Sentence 3 = ", tokenized_context[1][0]
, np.round(cos_dist_manga_s0_s2, 3), tokenized_context[2][1])
cos_dist_manga_s1_s3 = ds.cosine(elmo_context_input_[1,0,:]
,elmo_context_input_[3,3,:])
print("\nSentence 2 x Sentence 4 = ", tokenized_context[1][0]
, np.round(cos_dist_manga_s1_s3, 3), tokenized_context[3][3])
# 3
cos_dist_manga_s2_s3 = ds.cosine(elmo_context_input_[2,1,:]
,elmo_context_input_[3,3,:])
print("\nSentence 3 x Sentence 4 = ", tokenized_context[2][1]
, np.round(cos_dist_manga_s2_s3, 3), tokenized_context[3][3])
######################################################################
else:
lbl = open('manga_vec_lbl.tsv', "w", encoding = "utf-8")
prime = True
with tf.Session() as sess:
# It is necessary to initialize variables once before running inference.
sess.run(tf.global_variables_initializer())
# Create loop of data (memory constraint).
size = len(tokenized_context)
valores = []
for i in range(size):
if 'manga' in tokenized_context[i]:
manga_pos = tokenized_context[i].index("manga")
if prime:
linha = " ".join(tokenized_context[i])
prime = False
else:
linha = "\n" + " ".join(tokenized_context[i])
lbl.write(linha)
tokens = [tokenized_context[i]]
context_ids = batcher.batch_sentences(tokens)
# Compute ELMo representations (here for the input only, for simplicity).
elmo_context_input_ = sess.run(
elmo_context_input['weighted_op'],
feed_dict={context_character_ids: context_ids}
)
vetor = elmo_context_input_[0,manga_pos,:]
inorm = 1 / np.linalg.norm(vetor)
valores.append(vetor * inorm)
#norm = np.linalg.norm(valores)
# salva
df_manga = pd.DataFrame(valores)
df_manga.to_csv('manga_vec.tsv', sep = '\t', header=False, index=None)
| StarcoderdataPython |
191176 | <filename>tankmonitor.py
from threading import Lock, Thread
from tornado.web import Application, RequestHandler, HTTPError
from tornado.httpserver import HTTPServer
from tornado.template import Template
from tornado.ioloop import IOLoop, PeriodicCallback
from tornado.gen import coroutine
from tornado.concurrent import run_on_executor
from sockjs.tornado import SockJSRouter, SockJSConnection
import logging
from tanklogger import TankLogger, TankLogRecord, TankAlert
from functools import partial
from datetime import datetime
from time import time
from serial import Serial
from email.mime.text import MIMEText
from concurrent.futures import ThreadPoolExecutor
import smtplib
import base64
import settings as appconfig
from PIL import Image, ImageDraw, ImageFont
import pcd8544.lcd as lcd
import netifaces as ni
import wiringpi2 as wiringpi
logging.basicConfig()
log = logging.getLogger(__name__)
log.setLevel(logging.INFO)
listen_port = 4242
disp_contrast_on = 0xB0
disp_contrast_off = 0x80
disp_font = ImageFont.truetype("/usr/share/fonts/truetype/freefont/FreeMonoBold.ttf", 34)
disp_font_sm = ImageFont.truetype("/usr/share/fonts/truetype/freefont/FreeMonoBold.ttf", 9)
BTN_IN = 2 # wiringpi pin ID
BTN_OUT = 3 # wiringpi pin ID
VALVE_GPIO = 6 # wiringpi pin ID
thread_pool = ThreadPoolExecutor(2)
class EventConnection(SockJSConnection):
event_listeners = set()
def on_open(self, request):
self.event_listeners.add(self)
def on_close(self):
self.event_listeners.remove(self)
@classmethod
def notify_all(cls, msg_dict):
import json
for event_listener in EventConnection.event_listeners:
event_listener.send(json.dumps(msg_dict))
class MainPageHandler(RequestHandler):
def get(self, *args, **kwargs):
self.render('main.html')
logger_map = {
'10': 'tensec_logger',
'60': 'minute_logger',
'3600': 'hour_logger'
}
class LogDownloadHandler(RequestHandler):
def get(self, logger_interval):
fmt = self.get_argument('format', 'nvd3') # or tsv
deltas = self.get_argument('deltas', False)
logger = getattr(self.application, logger_map[logger_interval], None)
if logger:
records = logger.deltas if deltas else logger.records
if fmt == 'nvd3':
self.finish({'key': 'Tank Level',
'values': list(records)})
elif fmt == 'tsv':
self.set_header('Content-Type', 'text/plain')
if deltas:
self.write('"Timestamp"\t"Rate of Change (%s/min)"\n' % appconfig.LOG_UNIT)
else:
self.write('"Timestamp"\t"%s"\n' % appconfig.LOG_UNIT)
self.write_tsv(records)
self.finish()
def write_tsv(self, records):
for record in records:
timestamp = datetime.fromtimestamp(record.timestamp).strftime('%Y-%m-%d %H:%M:%S')
self.write(str(timestamp))
self.write('\t')
self.write(str(record.depth))
self.write('\n')
class ValveHandler(RequestHandler):
"""Callers can use the GET method to get the status of the creek intake valve and use the
POST method to toggle the status of the creek intake valve.
In both cases the response is a json dict like so:
{
"valve": 0,
"transition_time": "2015-03-18T12:00:12"
}
Indicating the current status of the valve: 0 means that the IO pin is low (the valve is
normally-open, so the valve will be open). 1 means that the IO pin is high and the valve is
closed. transition_time is the time of the most recent state change, in the server's time
zone, or null if the transition time is not known."""
_valve_state = False
_transition_time = None
def get(self, *args, **kwargs):
self.finish(ValveHandler.get_state())
def post(self, *args, **kwargs):
auth_header = self.request.headers.get('Authorization')
if auth_header is None or not auth_header.startswith('Basic '):
self.set_status(401, reason="Valve control requires authentication")
self.set_header('WWW-Authenticate', 'Basic realm=Restricted')
self.finish()
return
else:
auth_decoded = base64.decodestring(auth_header[6:])
hdr_auth = dict()
hdr_auth['username'], hdr_auth['password'] = auth_decoded.split(':', 2)
if hdr_auth != appconfig.CREDENTIALS:
raise HTTPError(403, reason="Valve control credentials invalid")
ValveHandler._valve_state = not ValveHandler._valve_state
ValveHandler._transition_time = datetime.now().isoformat()[:19]
wiringpi.digitalWrite(VALVE_GPIO, int(ValveHandler._valve_state))
self.finish(ValveHandler.get_state())
@staticmethod
def get_state():
return {
'valve': ValveHandler._valve_state,
'transition_time': ValveHandler._transition_time
}
class TankMonitor(Application):
def __init__(self, handlers=None, **settings):
super(TankMonitor, self).__init__(handlers, **settings)
rate_threshold = appconfig.ALERT_RATE_THRESHOLD
self.level_threshold = appconfig.ALERT_LEVEL_THRESHOLD
self.tensec_logger = TankLogger(10, alert_rate_threshold=rate_threshold)
self.minute_logger = TankLogger(60, alert_rate_threshold=rate_threshold)
self.hour_logger = TankLogger(3600, alert_rate_threshold=rate_threshold)
self.latest_raw_val = None
self.display_expiry = 0
def log_tank_depth(self, tank_depth):
"""This method can be called from outside the app's IOLoop. It's the
only method that can be called like that"""
log.debug("Logging depth: " + str(tank_depth))
IOLoop.current().add_callback(partial(self._offer_log_record, time(),
tank_depth))
@coroutine
def _offer_log_record(self, timestamp, depth):
log_record = TankLogRecord(timestamp=timestamp, depth=depth)
if depth < self.level_threshold:
yield AlertMailer.offer(TankAlert(timestamp=timestamp, depth=depth, delta=None))
for logger in self.tensec_logger, self.minute_logger, self.hour_logger:
alert = logger.offer(log_record)
if alert:
yield AlertMailer.offer(alert)
EventConnection.notify_all({
'event': 'log_value',
'timestamp': timestamp,
'value': depth
})
def update_display(self):
ip_addr = ni.ifaddresses('eth0')[ni.AF_INET][0]['addr']
now = time()
if now < self.display_expiry:
im = Image.new('1', (84, 48))
draw = ImageDraw.Draw(im)
draw.text((0, 5), self.latest_raw_val, font=disp_font, fill=1)
draw.text((0, 0), ip_addr, font=disp_font_sm, fill=1)
draw.text((5, 36), "mm to surface", font=disp_font_sm, fill=1)
lcd.show_image(im)
# clean up
del draw
del im
lcd.set_contrast(disp_contrast_on)
else:
lcd.set_contrast(disp_contrast_off)
lcd.cls()
def poll_display_button(self):
btn_down = wiringpi.digitalRead(BTN_IN)
if btn_down:
self.display_expiry = time() + 60
def _set_latest_raw_val(self, val):
self.latest_raw_val = val
def set_latest_raw_val(self, val):
"""This method can be called from any thread."""
IOLoop.instance().add_callback(self._set_latest_raw_val, val)
class MaxbotixHandler():
def __init__(self, tank_monitor, **kwargs):
"""kwargs will be passed through to the serial port constructor"""
self.port_lock = Lock()
self.serial_port = None
self.set_serial_port(**kwargs)
self.stop_reading = False
self.tank_monitor = tank_monitor
self.calibrate_m = 1
self.calibrate_b = 0
def read(self):
log.info("Starting MaxbotixHandler read")
val = None
while not self.stop_reading:
try:
with self.port_lock:
val = self.serial_port.read()
if val == 'R':
val = self.serial_port.read(4)
self.tank_monitor.set_latest_raw_val(val)
self.tank_monitor.log_tank_depth(self.convert(val))
except:
print "Unable to convert value '" + str(val) + "'"
import traceback
traceback.print_exc()
def calibrate(self, m, b):
""" Defines the parameters for a linear equation y=mx+b, which is used
to convert the output of the sensor to whatever units are specified in the settings file.
"""
log.info("Calibrating Maxbotix interface with m=%2.4f, b=%2.4f" % (m, b))
self.calibrate_m = float(m)
self.calibrate_b = float(b)
def convert(self, val):
converted = self.calibrate_m * float(val) + self.calibrate_b
if log.isEnabledFor(logging.DEBUG):
log.debug("Raw value %2.4f converted to %2.4f" % (float(val), converted))
return converted
def shutdown(self):
self.stop_reading = True
def set_serial_port(self, **kwargs):
with self.port_lock:
self.serial_port = Serial(**kwargs)
class AlertMailer(object):
last_alert = None
alert_mail = Template(open('templates/tanklevel.txt', 'rb').read())
@staticmethod
def send_message(alert_text, tank_alert):
msg = MIMEText(alert_text)
msg[
'Subject'] = "[TWUC Alert] Tank Level Warning" if not tank_alert.delta else "[TWUC Alert] Tank Delta Warning"
msg['From'] = appconfig.EMAIL['sending_address']
msg['To'] = ', '.join(appconfig.EMAIL['distribution'])
conn = None
try:
conn = smtplib.SMTP(
"%s:%d" % (appconfig.EMAIL['smtp_server'], appconfig.EMAIL['smtp_port']))
if appconfig.EMAIL['smtp_tls']:
conn.starttls()
conn.login(appconfig.EMAIL['sending_address'], appconfig.EMAIL['sending_password'])
conn.sendmail(appconfig.EMAIL['sending_address'], appconfig.EMAIL['distribution'],
msg.as_string())
finally:
if conn:
conn.quit()
@staticmethod
@coroutine
def offer(tank_alert):
offer_time = time()
if AlertMailer.last_alert is None or \
(offer_time - AlertMailer.last_alert) > appconfig.EMAIL['period']:
alert_text = AlertMailer.alert_mail.generate(alert=tank_alert)
log.warn("Sending e-mail alert due to " + str(tank_alert))
log.warn(alert_text)
AlertMailer.last_alert = offer_time
yield thread_pool.submit(lambda: AlertMailer.send_message(alert_text, tank_alert))
if __name__ == "__main__":
event_router = SockJSRouter(EventConnection, '/event')
handlers = [
(r'/', MainPageHandler),
(r'/logger/(.*)', LogDownloadHandler), # arg is log interval
(r'/valve', ValveHandler)
]
handlers += event_router.urls
tornado_settings = {
'static_path': 'static',
'template_path': 'templates',
'debug': True
}
lcd.init()
lcd.gotoxy(0, 0)
lcd.set_contrast(disp_contrast_on)
lcd.cls()
lcd.text("LCD Init")
wiringpi.pinMode(BTN_OUT, 1)
wiringpi.digitalWrite(BTN_OUT, 1)
wiringpi.pinMode(VALVE_GPIO, 1)
wiringpi.digitalWrite(VALVE_GPIO, 0)
wiringpi.pinMode(BTN_IN, 0)
app = TankMonitor(handlers, **tornado_settings)
maxbotix = MaxbotixHandler(tank_monitor=app, port='/dev/ttyAMA0', timeout=10)
maxbotix.calibrate(appconfig.MAXBOTICS['calibrate_m'],
appconfig.MAXBOTICS['calibrate_b'])
ioloop = IOLoop.instance()
disp_print_cb = PeriodicCallback(app.update_display, callback_time=500, io_loop=ioloop)
disp_print_cb.start()
button_poll_cb = PeriodicCallback(app.poll_display_button, callback_time=100, io_loop=ioloop)
button_poll_cb.start()
http_server = HTTPServer(app)
http_server.listen(listen_port)
log.info("Listening on port " + str(listen_port))
maxbotix_thread = Thread(target=maxbotix.read)
maxbotix_thread.daemon = True
maxbotix_thread.start()
ioloop.start()
| StarcoderdataPython |
3491076 | # <auto-generated>
# This code was generated by the UnitCodeGenerator tool
#
# Changes to this file will be lost if the code is regenerated
# </auto-generated>
def to_bits(value):
return value * 8000.0
def to_kilobits(value):
return value * 8.0
def to_megabits(value):
return value / 125.0
def to_gigabits(value):
return value / 125000.0
def to_terabits(value):
return value / 1.25e+8
def to_megabytes(value):
return value / 1000.0
def to_gigabytes(value):
return value / 1e+6
def to_terabytes(value):
return value / 1e+9
def to_kibibits(value):
return value * 7.8125
def to_mebibits(value):
return value * 0.00762939
| StarcoderdataPython |
278621 | <reponame>Akshat-Mantri/Rock-Paper_Scissor<filename>Rock Paper Scissor.py
#########################################################################
# Importing a random library
import random
# getting the player Input
def player_choose():
char_list = ['Stone', 'Paper', 'Scissor']
player_choice = ''
while player_choice.capitalize() not in char_list:
player_choice = input("Enter your character (Stone, Paper, Scissor):- ")
return player_choice.capitalize()
# Getting a computer Input with random Lib
def computer_choose():
comp_choice = random.randint(1, 3)
if comp_choice == 1:
return "Stone"
elif comp_choice == 2:
return 'Paper'
elif comp_choice == 3:
return "Scissor"
# Checking for win!
def check_win(player, computer):
if (player == 'Stone') and (computer == 'Stone'):
return '!! Tie !!'
elif (player == 'Stone') and (computer == 'Paper'):
return 'You Loose :('
elif (player == 'Stone') and (computer == 'Scissor'):
return '!! You Won !!'
elif (player == 'Paper') and (computer == 'Paper'):
return '!! Tie !!'
elif (player == 'Paper') and (computer == 'Stone'):
return '!! You Won !!'
elif (player == 'Paper') and (computer == 'Scissor'):
return 'You Loose :('
elif (player == 'Scissor') and (computer == 'Scissor'):
return '!! Tie !!'
elif (player == 'Scissor') and (computer == 'Stone'):
return 'You Loose :('
elif (player == 'Scissor') and (computer == 'Paper'):
return '!! You Won !!'
# Asking for want to play more
def replay():
inp = ''
while inp.capitalize() not in ['Yes', 'No']:
inp = input('Do you want to play (Yes, No):- ')
return inp.capitalize() == 'Yes'
# Main Game Loop
game_on = True
while game_on:
p_move = player_choose()
c_move = computer_choose()
win = check_win(p_move, c_move)
print(f'Your Move:- {p_move}')
print(f'Computer Move:- {c_move}')
print(win)
if not replay():
break
#############################################################################
| StarcoderdataPython |
1602172 | <gh_stars>0
######importe######
#pip3 install colorama
import colorama #mögliche Farben: Magenta, Green, Red, Cyan, Yellow, White, Black
from colorama import init, Fore, Style
init(autoreset=True)
import spacy
nlp=spacy.load("de")
from mitsatzstrukturDEF import *
######code#######
print (Fore.CYAN+"GRAMMARCHECK")
print ("Gib einen Satz ein:")
while True:
eingabe= nlp(input(">>> "))
###funktionenzuweisung###
for token in eingabe:
# print(token.pos_)
if "DET" in token.pos_:
ARTIKEL(token.text)
PLURAL_A(token.text)
if "ADJ" in token.pos_:
ADJEKTIV(token.text)
if "NOUN" in token.pos_:
NOMEN(token.text)
PLURAL_N(token.text)
if "VERB" in token.pos_:
VERB(token.text)
if "AUX" in token.pos_:
HILFSVERB(token.text)
HILFSVERB_P(token.text)
if "PRON" in token.pos_:
PRONOMEN(token.text)
# print (artikel)
# print (adjektiv)
# print (nomen)
# print (verb)
# print (hilfsverb)
# print (pronomen)
cool_list = []
###regeln#####
switch=0
for token in eingabe:
cool_list.append(token.pos_)
try:
#regel für doppelte satzelemente
if cool_list.count("VERB")>=2 or cool_list.count("NOUN")>=2 or cool_list.count("DET")>=2 or cool_list.count("PRON")>=2 or cool_list.count("PROPN")>=2 or cool_list.count("ADJ")>=2:
print (Fore.RED+"Der Satz ist inkorrekt, weil min. ein Satzelement zu oft vorkommt!")
switch=1
# regel für doppelte subjekte
elif ("PROPN" in cool_list and "PRON" in cool_list) or ("NOUN" in cool_list and "PROPN" in cool_list) or ("NOUN" in cool_list and "PRON" in cool_list):
print (Fore.RED+ "Der Satz ist inkorrekt, weil er mehr als ein Subjekt enthält!")
switch=1
#regel für satzstellung
elif "PRON" in cool_list or "PROPN" in cool_list or "DET" in cool_list or "NOUN" in cool_list or "ADJ" in cool_list:
cool_string = " ".join(cool_list)
# print(cool_string)
if "PROPN DET" in cool_string or "NOUN ADJ" in cool_string or "VERB NOUN" in cool_string or "ADJ DET" in cool_string or "VERB PRON" in cool_string or "DET VERB NOUN" in cool_string:
print(Fore.RED+"Der Satz ist inkorrekt, weil etwas mit der Satzstruktur nicht stimmt!")
switch=1
#regel für sätze mit hilfsverb
if "AUX" in cool_list and "PROPN" in cool_list and "ADJ" in cool_list and switch==0:
if adjektiv[0]=="prädikativ" and hilfsverb[0]=="S3":
print (Fore.GREEN+"Der Satz ist korrekt!")
switch=1
else:
print(Fore.RED+"Der Satz ist inkorrekt, weil die Wörter nicht kongruent sind!")
elif "AUX" in cool_list and "PRON" in cool_list and "ADJ" in cool_list and switch==0:
if adjektiv[0]=="prädikativ" and (pronomen[0]==hilfsverb[0]) or (pronomen[0]=="S3/P2" and hilfsverb[0]=="S3") or (pronomen[0]=="S3/P2" and hilfsverb[0]=="P2"):
print (Fore.GREEN+"Der Satz ist korrekt!")
switch=1
else:
print(Fore.RED + "Der Satz ist inkorrekt, weil die Wörter nicht kongruent sind!")
elif "AUX" in cool_list and "ADJ" in cool_list and "DET" in cool_list and "NOUN" in cool_list and switch==0:
if artikel[0]=="D":
if adjektiv[0]=="prädikativ" and artikel[1] == nomen[0] and artikel[2]=="F/P" and nomen[1]==hilfsverb[1]:
print (Fore.GREEN +"Der Satz ist korrekt!")
switch=1
elif adjektiv[0]=="prädikativ" and artikel[1] == nomen[0] and artikel[2]== nomen[1] and nomen[1]==hilfsverb[1]:
print (Fore.GREEN +"Der Satz ist korrekt!")
switch=1
else:
print (Fore.RED + "Der Satz ist inkorrekt, weil die Wörter nicht kongruent sind!")
if artikel[0]=="I":
if adjektiv[0]=="prädikativ" and artikel[1] == nomen[0] and artikel[2]== nomen[1] and nomen[1] == hilfsverb[1]:
print (Fore.GREEN +"Der Satz ist korrekt!")
switch=1
elif adjektiv[0]=="prädikativ" and artikel[1]=="M/N" and nomen[0]=="M" or nomen[0]=="N" and artikel[2]== nomen[1] and nomen[1] == hilfsverb[1]:
print (Fore.GREEN +"Der Satz ist korrekt!")
switch=1
elif adjektiv[0]=="prädikativ"and artikel[1] == nomen[0] and artikel[2]== "F/P" and nomen[1] == hilfsverb[1]:
print (Fore.GREEN +"Der Satz ist korrekt!")
switch=1
else:
print (Fore.RED + "Der Satz ist inkorrekt, weil die Wörter nicht kongruent sind!")
#regel für sätze mit adjektiv
elif "ADJ" in cool_list and "DET" in cool_list and "NOUN" in cool_list and "VERB" in cool_list and switch==0:
if artikel[0]=="D":
if adjektiv[0]== nomen[1] and adjektiv[1] == nomen[1] and artikel[1] == nomen[0] and artikel[2]== "F/P" and nomen[1] == verb[0]:
print (Fore.GREEN +"Der Satz ist korrekt!")
elif adjektiv[0]== artikel[0] and adjektiv[1] == nomen[1] and artikel[1] == nomen[0] and artikel[2]== "F/P" and nomen[1] == verb[0]:
print (Fore.GREEN +"Der Satz ist korrekt!")
elif artikel[0]==adjektiv[0] and adjektiv[1] == nomen[1] and artikel[1] == nomen[0] and artikel[2]== nomen[1] and nomen[1] == verb[0]:
print (Fore.GREEN +"Der Satz ist korrekt!")
else:
print (Fore.RED +"Der Satz ist inkorrekt, weil die Wörter nicht kongruent sind!")
if artikel[0]=="I":
if adjektiv[0]==nomen[0] and artikel[1] == nomen[0] and artikel[2]== nomen[1] and nomen[1] == verb [0]:
print (Fore.GREEN +"Der Satz ist korrekt!")
elif adjektiv[0]==nomen[0] and artikel[1]=="M/N" and nomen[0]=="M" or nomen[0]=="N" and artikel[2]== nomen[1] and nomen[1] == verb[0]:
print (Fore.GREEN +"Der Satz ist korrekt!")
elif adjektiv[0]==nomen[0] and artikel[1] == nomen[0] and artikel[2]== "F/P" and nomen[1] == verb [0]:
print (Fore.GREEN +"Der Satz ist korrekt!")
else:
print (Fore.RED + "Der Satz ist inkorrekt, weil die Wörter nicht kongruent sind!")
#regel für pronomen
elif "PRON" in cool_list:
if pronomen[0] == verb [0] and switch==0:
print (Fore.GREEN +"Der Satz ist korrekt!")
elif switch==0:
print(Fore.RED + "Der Satz ist inkorrekt, weil keine Kongruenz zwischen Pronomen und Verb besteht!")
#regel für eigennamen
elif "PROPN" in cool_list and switch==0:
if verb[0]=="S3/P2":
print (Fore.GREEN +"Der Satz ist korrekt!")
else:
print(Fore.RED + "Der Satz ist inkorrekt, weil keine Kongruenz mit dem Verb besteht!")
#regel für sätze ohne adjektiv
elif "DET" in cool_list and "NOUN" in cool_list and "VERB" in cool_list and switch==0:
if artikel[0]=="D":
if artikel[1] == nomen[0] and artikel[2]== "F/P" and nomen[1] == verb [0]:
print (Fore.GREEN +"Der Satz ist korrekt!")
elif artikel[1] == nomen[0] and artikel[2]== nomen[1] and nomen[1] == verb [0]:
print (Fore.GREEN +"Der Satz ist korrekt!")
else:
print (Fore.RED + "Der Satz ist inkorrekt, weil die Wörter nicht kongruent sind!")
if artikel[0]=="I":
if artikel[1] == nomen[0] and artikel[2]== nomen[1] and nomen[1] == verb [0]:
print (Fore.GREEN +"Der Satz ist korrekt!")
elif artikel[1]=="M/N" and nomen[0]=="M" or nomen[0]=="N" and artikel[2]== nomen[1] and nomen[1] == verb [0]:
print (Fore.GREEN +"Der Satz ist korrekt!")
elif artikel[1] == nomen[0] and artikel[2]== "F/P" and nomen[1] == verb [0]:
print (Fore.GREEN +"Der Satz ist korrekt!")
else:
print (Fore.RED + "Der Satz ist inkorrekt, weil die Wörter nicht kongruent sind!")
#unbekannte satzstruktur
elif switch==0:
print (Fore.YELLOW+"Diese Satzstruktur kennen wir nicht.")
#regel für indexfehler
except IndexError:
if switch==0:
print (Fore.YELLOW+"Oopps...Da gab es einen Fehler!")
pass
print ("Willst du einen weiteren Satz testen?")
eingabe= input(">>> ")
if eingabe == "Ja":
del artikel[:]
del adjektiv[:]
del nomen[:]
del verb[:]
del hilfsverb[:]
del pronomen[:]
print ("Dann gib einen weiteren Satz ein:")
continue
if eingabe == "Nein":
print ("OK!")
break
| StarcoderdataPython |
5014810 | <reponame>rit-bikeshare/backend
from django.contrib import admin
class BikeshareAdminSite(admin.AdminSite):
index_title = site_header = 'Bikeshare administration'
site_title = 'Bikeshare admin'
site_url = None
login_form = logout_template = logout_template = password_change_template = password_change_done_template = None | StarcoderdataPython |
1988704 | <filename>kitty/themes/color_setter.py
from itertools import starmap
def get_numbered(coolors_export):
hex_values = [prop.rstrip(';').split(': ')[1][:-2] for prop in coolors_export.splitlines()]
return '\n'.join(starmap('color{} {}'.format, enumerate(hex_values)))
colors = '''\
--space-cadet: #24283bff;
--ultra-red: #f7768eff;
--pistachio: #9ece6aff;
--earth-yellow: #e0af68ff;
--cornflower-blue: #7aa2f7ff;
--bright-lilac: #bb9af7ff;
--light-sky-blue: #7dcfffff;
--lavender-web: #dce1f9ff;
--raisin-black: #1a1b26ff;'''
print(get_numbered(colors))
| StarcoderdataPython |
3302618 | import discord
from discord.ext import commands
from .. import __version__, __author__
class Basics(commands.Cog):
def __init__(self, client):
self.client = client
@commands.command(help=_("calculates bot latency"))
async def ping(self, ctx):
latency = int(round(self.client.latency * 1000, 0))
await ctx.send(f"Pong! {latency}ms")
@commands.command(help=_("Bot info"))
async def info(self, ctx):
msg = _(
"This bot was created to simulate an anarcho-capitalist economy on Discord"
)
embed = discord.Embed(
title=_("An Anarcho-capitalist Bot"),
description=(msg + "\n[GitHub](https://github.com/Erogue-Lord/ancap-bot)"),
color=0xFAFF00,
)
embed.set_author(name=f"Ancap Bot {__version__}")
embed.set_footer(text=_("Created by {}").format(__author__))
await ctx.send(embed=embed)
def setup(client):
client.add_cog(Basics(client))
| StarcoderdataPython |
4936889 | from __future__ import print_function
from builtins import str
from past.builtins import basestring
from builtins import object
import socket, sys, time, uuid, json, inspect, collections
from xml.sax.saxutils import escape
from nltk.corpus import wordnet
from xml.etree.ElementTree import Element
from EHR.APIConstants import APIConstants
from EHR.APIVariables import APIVariables
class Utilities(object):
MODELS_PATH = "models_subset";
@staticmethod
def classLengthSort(classA, classB): return len(str(classA)) - len(str(classB));
@staticmethod
def mergeListOfLists(list): return [item for sublist in list for item in sublist];
@staticmethod
def dictKeyFromValue(dictionary, searchValue):
for key, value in list(dictionary.items()): # items() in Python 3+
if value == searchValue:
return key;
@staticmethod
def removeLastCharIfNumber(string):
if ( Utilities.isNumber(string[len(string) - 1]) ):
return string[:-1];
else:
return string;
@staticmethod
def isNumber(string):
try:
float(string)
return True
except ValueError:
return False
@staticmethod
def mergeDicts(dictionaries):
superDictionary = collections.defaultdict(set)
for dictionary in dictionaries:
for key, value in list(dictionary.items()): # items() in Python 3+
superDictionary[key].update(value)
return superDictionary;
# Find different grammatical forms of words in camelcase string.
@staticmethod
def differentWordForms(wordsInString):
replacements = set();
for word in Utilities.listFromCapitals(wordsInString):
for lemma in Utilities.lemmas(word):
replacement = wordsInString.replace(word, lemma.title());
replacements.add(replacement);
return replacements;
@staticmethod
def lemmas(word):
lemmas = set();
for lemma in wordnet.lemmas(word):
for related_lemma in lemma.derivationally_related_forms():
lemmas.add(related_lemma.name());
return lemmas;
@staticmethod
def getXMLElements(root, depthToElement={}, children=True, parents=True, duplicates=True, recurse=True, attributes=False, depth=0):
if (attributes):
for attributeKey in list(root.attrib.keys()):
root.append(Element(attributeKey));
for elem in root.getchildren():
if children: # if is child
if len(elem.getchildren()) == 0 and len(list(elem.attrib.keys())) == 0: #TODO: Make elements optional
depthToElement.setdefault(depth, []).append(elem);
if parents: # if is parent
if len(elem.getchildren()) > 0 or len(list(elem.attrib.keys())): #TODO: Make elements optional
depthToElement.setdefault(depth, []).append(elem);
if ( recurse ):
# Record depth allowing us to order ehrClasses by tree position, so we look at most nested first.
Utilities.getXMLElements(elem, depthToElement, children, parents, duplicates, recurse, attributes, depth+1);
if ( not duplicates ):
for depth in depthToElement:
replacementElements = [];
for element in depthToElement[depth]:
#print element;
if element.tag not in [replacementElement.tag for replacementElement in replacementElements]:
replacementElements.append(element);
depthToElement[depth] = replacementElements;
return depthToElement
@staticmethod
def capitalToSeparation(word):
index = 0;
for letter in list(word):
if index > 0 and letter.isupper():
word = word[0:index] + "_" + word[index:len(word)]
index += 1
index += 1
return word
def splitOnCharacter(word):
# If no separator is supplied (e.g. for TPP), assume compound words are defined by capitals.
if TranslationUtilities.SEPARATOR == "": return listFromCapitals(word);
if ( TranslationUtilities.SEPARATOR not in word ):
return word;
else:
return word.spit( TranslationUtilities.SEPARATOR );
@staticmethod
def listFromCapitals(word):
withSeparators = Utilities.capitalToSeparation(word);
if "_" not in withSeparators:
return [word];
else:
return Utilities.capitalToSeparation(word).split("_");
@staticmethod
def separationToCapital(word):
full_word = "";
for section in word.split("_"):
full_word += section.capitalize();
return full_word;
# NB. FHIR is not a hierarchy.
@staticmethod
def JSONfromFHIRClass(FHIRClass, nullValues):
# Create new object to represent this class.
FHIRObject = FHIRClass();
for attribute in FHIRClass.elementProperties(FHIRClass()):
invert_op = getattr(attribute[2], "elementProperties", None)
# Don't expand from within FHIRReferences, as it has a recursive reference to identifier (also doesn't appear to be captured correctly by the parser, e.g. organisation from Patient).
# Extensions classes appear in every class so don't show anything unique.
if callable(invert_op) and "FHIRReference" not in str(FHIRClass) and "Extension" not in str(attribute[2]):
subJSON = Utilities.JSONfromFHIRClass(attribute[2], nullValues)
setattr(FHIRObject, str(attribute[0]), subJSON)
else:
if (nullValues):
setattr(FHIRObject, str(attribute[0]), None)
else:
setattr(FHIRObject, str(attribute[0]), str(attribute[2]))
return FHIRObject.__dict__;
# Can include parent keys (e.g. "name": { "family": ... }, becomes family_name) as this helps with similarity checks.
@staticmethod
def getReplaceJSONKeys(data, parents=None, keys=list(), search=None, replace=None):
if isinstance(data, dict):
for k, v in list(data.items()):
if parents != None and len(parents) > 0:
k = k + "_" + parents
keys.append(k);
if (k == search):
data[search] = replace
if not isinstance(v, basestring) and not v is None:
if parents != None:
parents = k;
keys = Utilities.getReplaceJSONKeys(v, parents, keys, search, replace)
if parents != None:
parents = ""
return keys
@staticmethod
def printJSONValues(data):
if isinstance(data, dict):
for k, v in list(data.items()):
if isinstance(v, basestring):
print(k)
else:
Utilities.printJSONValues(v)
elif isinstance(data, list):
for v in data:
if not isinstance(v, str):
Utilities.printJSONValues(v)
else:
print(data);
@staticmethod
def ehrRequest(data):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_address = (APIVariables.ADDRESS, APIVariables.PORT)
sock.connect(server_address);
response=""
try:
request = '<?xml version="1.0" encoding="utf-8"?>' + \
'<ClientIntegrationRequest>' + \
'<RequestUID>' + str(uuid.uuid4()) + '</RequestUID>' + \
data + \
'</ClientIntegrationRequest>'
print(request);
sock.sendall(request.encode('utf-8'))
sock.settimeout(20);
time.sleep(2)
BUFF_SIZE = 4096
response = ""
while True:
part = sock.recv(BUFF_SIZE)
response += part
if len(part) < BUFF_SIZE:
break
try:
formatted = xml.dom.minidom.parseString(response)
pretty_xml_as_string = formatted.toprettyxml()
return pretty_xml_as_string
except xml.parsers.expat.ExpatError:
return "Cannot parse response. Is the EHR running?"
finally:
sock.close()
def cmpToKey(mycmp):
'Convert a cmp= function into a key= function'
class K(object):
def __init__(self, obj, *args):
self.obj = obj
def __lt__(self, other):
return mycmp(self.obj, other.obj) < 0
def __gt__(self, other):
return mycmp(self.obj, other.obj) > 0
def __eq__(self, other):
return mycmp(self.obj, other.obj) == 0
def __le__(self, other):
return mycmp(self.obj, other.obj) <= 0
def __ge__(self, other):
return mycmp(self.obj, other.obj) >= 0
def __ne__(self, other):
return mycmp(self.obj, other.obj) != 0
return K
| StarcoderdataPython |
247556 | <filename>test/test_proto_inspect.py<gh_stars>0
# coding=utf-8
import pytest
from proto_inspect import (
ProtoMessage,
signed_to_uint,
uint_to_signed,
read_varint,
write_varint,
bytes_to_encode_varint,
)
# suppress 'not found' linting
pytest.raises = pytest.raises
def test_parse_empty_message():
ProtoMessage.parse(b'')
def test_zig_zag():
for i in range(1000):
assert signed_to_uint(uint_to_signed(i)) == i
def test_varint_parsing():
for i in range(1000):
serialized = write_varint(i)
assert len(serialized) == bytes_to_encode_varint(i)
embedded = b'foo' + serialized + b'bar'
assert read_varint(embedded, offset=3) == (i, len(serialized))
def test_varint_no_negatives():
with pytest.raises(ValueError):
write_varint(-1)
with pytest.raises(ValueError):
bytes_to_encode_varint(-1)
def test_truncated_varint():
serialized = write_varint(999999999)
assert read_varint(serialized) == (999999999, len(serialized))
with pytest.raises(ValueError):
read_varint(serialized[:-1])
| StarcoderdataPython |
1693558 | <filename>tests/sdk/queries/alerts/filters/test_alert_filter.py
from datetime import datetime
from time import time
from tests.sdk.queries.conftest import CONTAINS
from tests.sdk.queries.conftest import IN_RANGE
from tests.sdk.queries.conftest import IS
from tests.sdk.queries.conftest import IS_IN
from tests.sdk.queries.conftest import IS_NOT
from tests.sdk.queries.conftest import NOT_CONTAINS
from tests.sdk.queries.conftest import NOT_IN
from tests.sdk.queries.conftest import ON_OR_AFTER
from tests.sdk.queries.conftest import ON_OR_BEFORE
from py42.sdk.queries.alerts.filters import Actor
from py42.sdk.queries.alerts.filters import AlertState
from py42.sdk.queries.alerts.filters import DateObserved
from py42.sdk.queries.alerts.filters import Description
from py42.sdk.queries.alerts.filters import RuleId
from py42.sdk.queries.alerts.filters import RuleName
from py42.sdk.queries.alerts.filters import RuleSource
from py42.sdk.queries.alerts.filters import RuleType
from py42.sdk.queries.alerts.filters import Severity
from py42.sdk.queries.alerts.filters.alert_filter import create_contains_filter_group
from py42.sdk.queries.alerts.filters.alert_filter import (
create_not_contains_filter_group,
)
from py42.util import MICROSECOND_FORMAT
def format_timestamp_with_microseconds(test_time):
test_date = datetime.utcfromtimestamp(test_time)
return format_datetime_with_microseconds(test_date)
def format_datetime_with_microseconds(test_date):
prefix = test_date.strftime(MICROSECOND_FORMAT)
timestamp_str = prefix
return timestamp_str
def test_create_contains_filter_group_returns_filter_group_with_correct_json_representation():
term = "test_eq_term"
value_list = "string_to_contain"
_group = create_contains_filter_group(term, value_list)
assert (
str(_group) == '{"filterClause":"AND", "filters":[{"operator":"CONTAINS", '
'"term":"test_eq_term", "value":"string_to_contain"}]}'
)
def test_create_not_contains_filter_group_returns_filter_group_with_correct_json_representation():
term = "test_eq_term"
value_list = "string_to_not_contain"
_group = create_not_contains_filter_group(term, value_list)
assert (
str(_group)
== '{"filterClause":"AND", "filters":[{"operator":"DOES_NOT_CONTAIN", '
'"term":"test_eq_term", "value":"string_to_not_contain"}]}'
)
def test_date_observed_on_or_after_str_gives_correct_json_representation():
test_time = time()
formatted = format_timestamp_with_microseconds(test_time)
_filter = DateObserved.on_or_after(test_time)
expected = ON_OR_AFTER.format("createdAt", formatted)
assert str(_filter) == expected
def test_date_observed_on_or_before_str_gives_correct_json_representation():
test_time = time()
formatted = format_timestamp_with_microseconds(test_time)
_filter = DateObserved.on_or_before(test_time)
expected = ON_OR_BEFORE.format("createdAt", formatted)
assert str(_filter) == expected
def test_date_observed_does_not_have_within_the_last_option():
assert not hasattr(DateObserved(), "within_the_last")
def test_date_observed_in_range_str_gives_correct_json_representation():
test_before_time = time()
test_after_time = time() + 30 # make sure timestamps are actually different
formatted_before = format_timestamp_with_microseconds(test_before_time)
formatted_after = format_timestamp_with_microseconds(test_after_time)
_filter = DateObserved.in_range(test_before_time, test_after_time)
expected = IN_RANGE.format("createdAt", formatted_before, formatted_after)
assert str(_filter) == expected
def test_date_observed_on_same_day_str_gives_correct_json_representation():
test_time = time()
test_date = datetime.utcfromtimestamp(test_time)
start_time = datetime(test_date.year, test_date.month, test_date.day, 0, 0, 0)
end_time = datetime(test_date.year, test_date.month, test_date.day, 23, 59, 59)
formatted_before = format_datetime_with_microseconds(start_time)
formatted_after = format_datetime_with_microseconds(end_time)
_filter = DateObserved.on_same_day(test_time)
expected = IN_RANGE.format("createdAt", formatted_before, formatted_after)
assert str(_filter) == expected
def test_actor_eq_str_gives_correct_json_representation():
_filter = Actor.eq("test.testerson")
expected = IS.format("actor", "test.testerson")
assert str(_filter) == expected
def test_actor_not_eq_str_gives_correct_json_representation():
_filter = Actor.not_eq("test.testerson")
expected = IS_NOT.format("actor", "test.testerson")
assert str(_filter) == expected
def test_actor_is_in_str_gives_correct_json_representation():
items = ["test.testerson", "flag.flagerson", "mock.mockerson"]
_filter = Actor.is_in(items)
expected = IS_IN.format("actor", *sorted(items))
assert str(_filter) == expected
def test_actor_not_in_str_gives_correct_json_representation():
items = ["test.testerson", "flag.flagerson", "mock.mockerson"]
_filter = Actor.not_in(items)
expected = NOT_IN.format("actor", *sorted(items))
assert str(_filter) == expected
def test_actor_contains_str_gives_correct_json_representation():
_filter = Actor.contains("test")
expected = CONTAINS.format("actor", "test")
assert str(_filter) == expected
def test_actor_not_contains_str_gives_correct_json_representation():
_filter = Actor.not_contains("test")
expected = NOT_CONTAINS.format("actor", "test")
assert str(_filter) == expected
def test_severity_eq_str_gives_correct_json_representation():
_filter = Severity.eq("HIGH")
expected = IS.format("severity", "HIGH")
assert str(_filter) == expected
def test_severity_not_eq_str_gives_correct_json_representation():
_filter = Severity.not_eq("HIGH")
expected = IS_NOT.format("severity", "HIGH")
assert str(_filter) == expected
def test_severity_is_in_str_gives_correct_json_representation():
items = ["HIGH", "MEDIUM", "LOW"]
_filter = Severity.is_in(items)
expected = IS_IN.format("severity", *sorted(items))
assert str(_filter) == expected
def test_severity_not_in_str_gives_correct_json_representation():
items = ["HIGH", "MEDIUM", "LOW"]
_filter = Severity.not_in(items)
expected = NOT_IN.format("severity", *sorted(items))
assert str(_filter) == expected
def test_rule_name_eq_str_gives_correct_json_representation():
_filter = RuleName.eq("Departing Employee")
expected = IS.format("name", "Departing Employee")
assert str(_filter) == expected
def test_rule_name_not_eq_str_gives_correct_json_representation():
_filter = RuleName.not_eq("Departing Employee")
expected = IS_NOT.format("name", "Departing Employee")
assert str(_filter) == expected
def test_rule_name_is_in_str_gives_correct_json_representation():
items = ["rule 1", "rule 2", "rule 3"]
_filter = RuleName.is_in(items)
expected = IS_IN.format("name", *sorted(items))
assert str(_filter) == expected
def test_rule_name_not_in_str_gives_correct_json_representation():
items = ["rule 1", "rule 2", "rule 3"]
_filter = RuleName.not_in(items)
expected = NOT_IN.format("name", *sorted(items))
assert str(_filter) == expected
def test_rule_name_contains_str_gives_correct_json_representation():
_filter = RuleName.contains("test")
expected = CONTAINS.format("name", "test")
assert str(_filter) == expected
def test_rule_name_not_contains_str_gives_correct_json_representation():
_filter = RuleName.not_contains("test")
expected = NOT_CONTAINS.format("name", "test")
assert str(_filter) == expected
def test_rule_id_eq_str_gives_correct_json_representation():
_filter = RuleId.eq("rule123")
expected = IS.format("ruleId", "rule123")
assert str(_filter) == expected
def test_rule_id_not_eq_str_gives_correct_json_representation():
_filter = RuleId.not_eq("rule123")
expected = IS_NOT.format("ruleId", "rule123")
assert str(_filter) == expected
def test_rule_id_is_in_str_gives_correct_json_representation():
items = ["rule1", "rule2", "rule3"]
_filter = RuleId.is_in(items)
expected = IS_IN.format("ruleId", *sorted(items))
assert str(_filter) == expected
def test_rule_id_not_in_str_gives_correct_json_representation():
items = ["rule 1", "rule 2", "rule 3"]
_filter = RuleId.not_in(items)
expected = NOT_IN.format("ruleId", *sorted(items))
assert str(_filter) == expected
def test_rule_type_eq_str_gives_correct_json_representation():
_filter = RuleType.eq("rule123")
expected = IS.format("type", "rule123")
assert str(_filter) == expected
def test_rule_type_not_eq_str_gives_correct_json_representation():
_filter = RuleType.not_eq("rule123")
expected = IS_NOT.format("type", "rule123")
assert str(_filter) == expected
def test_rule_type_is_in_str_gives_correct_json_representation():
items = ["rule1", "rule2", "rule3"]
_filter = RuleType.is_in(items)
expected = IS_IN.format("type", *sorted(items))
assert str(_filter) == expected
def test_rule_type_not_in_str_gives_correct_json_representation():
items = ["rule 1", "rule 2", "rule 3"]
_filter = RuleType.not_in(items)
expected = NOT_IN.format("type", *sorted(items))
assert str(_filter) == expected
def test_rule_source_eq_str_gives_correct_json_representation():
_filter = RuleSource.eq("rule123")
expected = IS.format("ruleSource", "rule123")
assert str(_filter) == expected
def test_rule_source_not_eq_str_gives_correct_json_representation():
_filter = RuleSource.not_eq("rule123")
expected = IS_NOT.format("ruleSource", "rule123")
assert str(_filter) == expected
def test_rule_source_is_in_str_gives_correct_json_representation():
items = ["rule1", "rule2", "rule3"]
_filter = RuleSource.is_in(items)
expected = IS_IN.format("ruleSource", *sorted(items))
assert str(_filter) == expected
def test_rule_source_not_in_str_gives_correct_json_representation():
items = ["rule 1", "rule 2", "rule 3"]
_filter = RuleSource.not_in(items)
expected = NOT_IN.format("ruleSource", *sorted(items))
assert str(_filter) == expected
def test_description_eq_str_gives_correct_json_representation():
_filter = Description.eq("Departing Employee")
expected = IS.format("description", "Departing Employee")
assert str(_filter) == expected
def test_description_not_eq_str_gives_correct_json_representation():
_filter = Description.not_eq("Departing Employee")
expected = IS_NOT.format("description", "Departing Employee")
assert str(_filter) == expected
def test_description_is_in_str_gives_correct_json_representation():
items = ["desc1", "desc2", "desc3"]
_filter = Description.is_in(items)
expected = IS_IN.format("description", *sorted(items))
assert str(_filter) == expected
def test_description_not_in_str_gives_correct_json_representation():
items = ["desc1", "desc2", "desc3"]
_filter = Description.not_in(items)
expected = NOT_IN.format("description", *sorted(items))
assert str(_filter) == expected
def test_description_contains_str_gives_correct_json_representation():
_filter = Description.contains("test")
expected = CONTAINS.format("description", "test")
assert str(_filter) == expected
def test_description_not_contains_str_gives_correct_json_representation():
_filter = Description.not_contains("test")
expected = NOT_CONTAINS.format("description", "test")
assert str(_filter) == expected
def test_alert_state_eq_str_gives_correct_json_representation():
_filter = AlertState.eq("OPEN")
expected = IS.format("state", "OPEN")
assert str(_filter) == expected
def test_alert_state_not_eq_str_gives_correct_json_representation():
_filter = AlertState.not_eq("OPEN")
expected = IS_NOT.format("state", "OPEN")
assert str(_filter) == expected
def test_alert_state_is_in_str_gives_correct_json_representation():
items = ["OPEN", "DISMISSED", "OTHER"]
_filter = AlertState.is_in(items)
expected = IS_IN.format("state", *sorted(items))
assert str(_filter) == expected
def test_alert_state_not_in_str_gives_correct_json_representation():
items = ["OPEN", "DISMISSED", "other"]
_filter = AlertState.not_in(items)
expected = NOT_IN.format("state", *sorted(items))
assert str(_filter) == expected
def test_rule_source_choices_returns_set():
choices = RuleSource.choices()
valid_set = {"Alerting", "Departing Employee", "High Risk Employee"}
assert set(choices) == valid_set
def test_rule_type_choices_returns_set():
choices = RuleType.choices()
valid_set = {
"FedEndpointExfiltration",
"FedCloudSharePermissions",
"FedFileTypeMismatch",
}
assert set(choices) == valid_set
def test_severity_choices_returns_set():
choices = Severity.choices()
valid_set = {"HIGH", "MEDIUM", "LOW"}
assert set(choices) == valid_set
def test_alert_state_choices_returns_set():
choices = AlertState.choices()
valid_set = {"OPEN", "RESOLVED", "PENDING", "IN_PROGRESS"}
assert set(choices) == valid_set
| StarcoderdataPython |
1630203 | """
Exchange information.
This information should be filled in when connecting to a service.
Some of this should be filled from
Note that the host used for job management and status updates is
going to be different from that used for mapping operations within
the job.
*CLIENT_HOST* | "user:password@host:port/virtual_host"
Host for messages to and from the job manager and computation monitor.
The user supplies this when they make a connection.
*SERVICE_HOST* | "user:password@host:port/virtual_host"
Host for messages within the computation. The administrator supplies
this when the configure the compute cluster.
*EXCHANGE* | string
Exchange name to use for the system.
*MAX_QUEUE* | int
The maximum number of messages any process should have outstanding. This
should be somewhat greater than the number of computers in the cluster,
but not so large that the computation saturates the exchange.
"""
CLIENT_HOST = "guest:<EMAIL>:5672/"
SERVICE_HOST = "guest:<EMAIL>:5672/"
EXCHANGE = "park"
MAX_QUEUE = 1000
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.