text
string
size
int64
token_count
int64
from django.contrib import admin from .models import Book, Favorite admin.site.register(Book) admin.site.register(Favorite)
126
40
from flask import Flask, jsonify, request PORT = 5000 app = Flask(__name__) @app.route('/') def index(): return 'Hello World' @app.route('/data') def data(): return jsonify({'error': False, 'data': 123}) if __name__ == "__main__": print('Running on http://127.0.0.1:' + str(PORT)) app.run('0.0.0.0', PORT)
329
136
from django.core import exceptions from django.http import FileResponse from django.utils.text import format_lazy from django.utils.translation import gettext_lazy as _ from rest_framework import status from rest_framework.decorators import action from rest_framework.exceptions import ValidationError from rest_framework.parsers import MultiPartParser from rest_framework.permissions import IsAuthenticated from rest_framework.response import Response from shared.audit_log.viewsets import AuditLoggingModelViewSet from shared.oidc.auth import EAuthRestAuthentication from applications.api.v1.auth import StaffAuthentication from applications.api.v1.permissions import ( ALLOWED_APPLICATION_UPDATE_STATUSES, ALLOWED_APPLICATION_VIEW_STATUSES, ApplicationPermission, get_user_company, StaffPermission, SummerVoucherPermission, ) from applications.api.v1.serializers import ( ApplicationSerializer, AttachmentSerializer, SummerVoucherSerializer, ) from applications.enums import ApplicationStatus from applications.models import Application, SummerVoucher class ApplicationViewSet(AuditLoggingModelViewSet): queryset = Application.objects.all() serializer_class = ApplicationSerializer permission_classes = [IsAuthenticated, ApplicationPermission] def get_queryset(self): """ Fetch all DRAFT status applications of the user & company. Should inlcude only 1 application since we don't allow creation of multiple DRAFT applications per user & company. """ queryset = ( super() .get_queryset() .select_related("company") .prefetch_related("summer_vouchers") ) user = self.request.user if user.is_anonymous: return queryset.none() user_company = get_user_company(self.request) return queryset.filter( company=user_company, user=user, status__in=ALLOWED_APPLICATION_VIEW_STATUSES, ) def create(self, request, *args, **kwargs): """ Allow only 1 (DRAFT) application per user & company. """ if self.get_queryset().filter(status=ApplicationStatus.DRAFT).exists(): raise ValidationError("Company & user can have only one draft application") return super().create(request, *args, **kwargs) def update(self, request, *args, **kwargs): """ Allow to update only DRAFT status applications. """ instance = self.get_object() if instance.status not in ALLOWED_APPLICATION_UPDATE_STATUSES: raise ValidationError("Only DRAFT applications can be updated") return super().update(request, *args, **kwargs) def destroy(self, request, *args, **kwargs): return Response(status=status.HTTP_405_METHOD_NOT_ALLOWED) class SummerVoucherViewSet(AuditLoggingModelViewSet): queryset = SummerVoucher.objects.all() serializer_class = SummerVoucherSerializer authentication_classes = [EAuthRestAuthentication, StaffAuthentication] permission_classes = [IsAuthenticated, SummerVoucherPermission | StaffPermission] def get_queryset(self): """ Fetch summer vouchers of DRAFT status applications of the user & company. """ queryset = ( super() .get_queryset() .select_related("application") .prefetch_related("attachments") ) user = self.request.user if user.is_staff: return queryset elif user.is_anonymous: return queryset.none() user_company = get_user_company(self.request) return queryset.filter( application__company=user_company, application__user=user, application__status__in=ALLOWED_APPLICATION_VIEW_STATUSES, ) def create(self, request, *args, **kwargs): return Response(status=status.HTTP_405_METHOD_NOT_ALLOWED) def update(self, request, *args, **kwargs): return Response(status=status.HTTP_405_METHOD_NOT_ALLOWED) def retrieve(self, request, *args, **kwargs): return Response(status=status.HTTP_405_METHOD_NOT_ALLOWED) def list(self, request, *args, **kwargs): return Response(status=status.HTTP_405_METHOD_NOT_ALLOWED) def destroy(self, request, *args, **kwargs): return Response(status=status.HTTP_405_METHOD_NOT_ALLOWED) @action( methods=("POST",), detail=True, url_path="attachments", parser_classes=(MultiPartParser,), ) def post_attachment(self, request, *args, **kwargs): """ Upload a single file as attachment """ obj = self.get_object() if obj.application.status not in ALLOWED_APPLICATION_UPDATE_STATUSES: raise ValidationError( "Attachments can be uploaded only for DRAFT applications" ) # Validate request data serializer = AttachmentSerializer( data={ "summer_voucher": obj.id, "attachment_file": request.data["attachment_file"], "content_type": request.data["attachment_file"].content_type, "attachment_type": request.data["attachment_type"], } ) serializer.is_valid(raise_exception=True) serializer.save() return Response(serializer.data, status=status.HTTP_201_CREATED) @action( methods=( "GET", "DELETE", ), detail=True, url_path="attachments/(?P<attachment_pk>[^/.]+)", ) def handle_attachment(self, request, attachment_pk, *args, **kwargs): obj = self.get_object() if request.method == "GET": """ Read a single attachment as file """ attachment = obj.attachments.filter(pk=attachment_pk).first() if not attachment or not attachment.attachment_file: return Response( { "detail": format_lazy( _("File not found."), ) }, status=status.HTTP_404_NOT_FOUND, ) return FileResponse(attachment.attachment_file) elif request.method == "DELETE": """ Delete a single attachment as file """ if obj.application.status not in ALLOWED_APPLICATION_UPDATE_STATUSES: raise ValidationError( "Attachments can be deleted only for DRAFT applications" ) if ( obj.application.status not in AttachmentSerializer.ATTACHMENT_MODIFICATION_ALLOWED_STATUSES ): return Response( {"detail": _("Operation not allowed for this application status.")}, status=status.HTTP_403_FORBIDDEN, ) try: instance = obj.attachments.get(id=attachment_pk) except exceptions.ObjectDoesNotExist: return Response( {"detail": _("File not found.")}, status=status.HTTP_404_NOT_FOUND ) instance.delete() return Response(status=status.HTTP_204_NO_CONTENT)
7,407
1,999
#!env/bin/python3 from app import app app.run(debug=True, host="localhost", port=8202)
88
36
from bs4 import BeautifulSoup as bs import requests import sys import os import platform amountOfLinks = len(sys.argv)-1 urlCounter = 0 urlList = [] missingFiles = [] userAgent = "Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2225.0 Safari/537.36" dirSep = "" system = platform.system() cLastPageFlag = False if(system == 'Windows'): dirSep = "\\" else: dirSep = "/" print("\n======Starting Scraper========") #Checks if there are links present and puts then in a list if they are if amountOfLinks <= 0: print("\nPlease enter at least 1 link as argument.\ne.g. https://yiff.party/patreon/1\n") print("============0/0===============\n") sys.exit() for n in range(amountOfLinks): urlList.append(sys.argv[n+1]) try: startPage = int(sys.argv[1])-1 urlList.pop(0) amountOfLinks -= 1 except: startPage = 0 try: cLastPage = int(sys.argv[2]) cLastPageFlag = True urlList.pop(0) amountOfLinks -= 1 if cLastPage < startPage: sys.exit() except SystemExit: sys.exit("Please choose a lower starting page. Your current pagenumbers are: Starting Page: " + (startPage) + ", Last Page: " + str(cLastPage)) except: pass #Creates Image Directory if not os.path.isdir("."+ dirSep +"Images"+ dirSep +""): os.mkdir("."+ dirSep +"Images"+ dirSep +"") def getFlag(): return cLastPageFlag def setFlag(boolean): cLastPageFlag = boolean def accountForDuplicates(aDict): counter = 0 bList = [] cList = [] newDict = {} aDict = sorted(aDict.items(), key=lambda item: item[1]) #print(aDict) for i1 in range(len(aDict)): #print(aDict[i1][1]) bList.append(aDict[i1][1]) for i2 in range(len(aDict)): cList.append(aDict[i2][0]) bList.append("buffer") cList.append("buffer") for h in range(len(bList)-1): if bList[h] == bList[h+1]: #print(bList[h]) #updatedItem = {cList[h]:} newDict[cList[h]] = (str(counter) + " " + bList[h]) counter += 1 else: newDict[cList[h]] = bList[h] return newDict def makeConformUrl(aList): for k in range(len(aList)-1): if(str(aList[k]).startswith("/")): aList[k] = "https://yiff.party" + str(aList[k]) return aList def downloader(myUrl, myImageName, myPatreonAuthor): #recursively tries to download the images - in the case of the site not accepting anymore requests try: r = requests.get(myUrl, headers = {'User-Agent': userAgent}, timeout=(2,5), stream=True) if r.status_code == 200: with open("."+ dirSep +"Images"+ dirSep +"" + myPatreonAuthor + ""+ dirSep +"" + myImageName, 'wb') as f: for chunk in r: f.write(chunk) else: print("beep -- file skipped: " + myUrl) except: print("Skipped " + myUrl) missingFiles.append(myUrl) return def downloadImages(url, urlCounter): imageNameDict = {} linkList = [] imgContainerUrls = [] imageCounter = 0 #Gets the Patreon Author's number. Fails if link is shorter than https://yiff.party/patreon/1. #Also Creates a directory for the images. try: patreonAuthor = url.split("/")[4] except IndexError: print("\nThe given url might not be valid.\nSkipping url: " + url + "\n") print("============" + str(urlCounter) + "/" + str(amountOfLinks) + "===============\n") return else: if not os.path.isdir("."+ dirSep +"Images"+ dirSep +"" + patreonAuthor + ""+ dirSep +""): os.mkdir("."+ dirSep +"Images"+ dirSep +"" + patreonAuthor + ""+ dirSep +"") #Gets the page and converts/reads it. response = requests.get(url, headers = {'User-Agent': userAgent}) soup = bs(response.text, "html.parser") newUrl = "https://yiff.party/render_posts?s=patreon&c=" + patreonAuthor + "&p=" #searches for the highest page number lastPage = soup.find_all('a', {'class':'btn pag-btn'}) try: lastPage = int(lastPage[1]["data-pag"]) #print(lastPage) cLPFlag = getFlag() if cLPFlag: if cLastPage > lastPage: sys.exit() lastPage = cLastPage startPage = startPage setFlag(False) else: startPage = 0 for i in range(startPage, lastPage): imgContainerUrls.append(newUrl + str(i+1)) #appends the page number to the url except SystemExit: sys.exit("Last Page Number is too high. Please choose a number lower or equal than: " + str(lastPage)) except: lastPage = 1 imgContainerUrls.append(newUrl + str(1)) #print(imgContainerUrls) for containerUrl in imgContainerUrls: #print(containerUrl) response = requests.get(containerUrl, headers = {'User-Agent': userAgent}) soup = bs(response.text, "html.parser") containersPart1 = soup.find_all('div', {'class': 'card-action'}) containersPart2 = soup.find_all('div', {'class': 'post-body'}) containersPart3 = soup.find_all('div', {'class': 'card-attachments'}) containers = containersPart1 + containersPart2 + containersPart3 #Checks if there are any images and returns an error if not. Also skips the url. try: containers[0] except IndexError: page = containerUrl.split("p=")[1] print("\nCould not find Images. The cause might be a invalid url or there just aren't any Images.") missingFiles.append("Page " + page + " was skipped. You can retry scraping this page with: python " + sys.argv[0] + " " + page + " " + page + " urls") #print("Skipping url: " + url + "\n") #print("============" + str(urlCounter) + "/" + str(amountOfLinks) + "===============\n") continue containerCounter1 = len(containersPart1) #amount of containers with class 'card-action' containerCounter2 = len(containersPart2) #amount of containers with class 'post-body' i = 0 #Searches for Image-Boxes. for container in containers: i += 1 if i <= containerCounter1: try: shortLink = container.a['href'] except: continue elif i <= containerCounter2 and i > containerCounter1: try: shortLink = container.p.a['href'] except: continue else: try: subContainer = container.p subContainer = subContainer.find_all('a') for subCont in subContainer: linkList.append(subCont['href']) except: continue linkList.append(shortLink) linkList = makeConformUrl(sorted(linkList)) linkList = list(dict.fromkeys(linkList)) for h in range(0, len(linkList)-1): updatedValue = {str(h):str(linkList[h].split("/")[len(linkList[h].split("/"))-1])} imageNameDict.update(updatedValue) imageNameDict = accountForDuplicates(imageNameDict) #print(len(linkList)) #print(imageNameDict) #print(imageCounter) #print('\n'.join(map(str, sorted(linkList)))) #Loops through the Image Urls amd downloads them. for i in range(len(linkList)-1): imageName = imageNameDict[str(i)] urlI = linkList[i] print("Downloading " + imageName) #Shows the name of the current downloading image downloader(urlI, imageName, patreonAuthor) imageCounter += 1 #Just a finishing message. if imageCounter == 0: print("No files downloaded. Maybe there are no files or you messed up the order of the arguments: python " + sys.argv[0] + " [start page] [last page] urls") else: print("\nSuccessfully downloaded " + str(imageCounter) + " Images/Files!\n") print("============" + str(urlCounter) + "/" + str(amountOfLinks) + "===============\n") f = open("SkippedLinks.txt", "w+") for files in missingFiles: f.write(files + "\n") f.close() #Loops through all Yiff.party-Urls and downloads the images. for url in urlList: urlCounter += 1 downloadImages(url, urlCounter)
8,647
2,886
import json import requests import time from discord_webhook import DiscordWebhook, DiscordEmbed webhook_url = 'https://discordapp.com/api/webhooks/672159508675690497/4UtaClAc7rKMJsEvbR4iYf-Razv4M3ZWtkYDOxBzLfiDzJhI7RSFpoLn6iijBiRcaNOR' webhook = DiscordWebhook(webhook_url) pid = '508214-660' headers = { 'Connection': 'keep-alive', 'accept': 'application/json', 'Origin': 'https://www.goat.com', 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.88 Safari/537.36', 'content-type': 'application/x-www-form-urlencoded', 'Sec-Fetch-Site': 'cross-site', 'Sec-Fetch-Mode': 'cors', 'Referer': 'https://www.goat.com/search?query='+ pid, 'Accept-Encoding': 'gzip, deflate, br', 'Accept-Language': 'en-US,en;q=0.9', } params = { 'x-algolia-agent': 'Algolia for vanilla JavaScript 3.25.1', 'x-algolia-application-id': '2FWOTDVM2O', 'x-algolia-api-key': 'ac96de6fef0e02bb95d433d8d5c7038a', } data = { "distinct": 'true', 'facetFilters': 'product_category: shoes', 'facets': 'size', 'hitsPerPage': '48', 'numericFilters': '[]', 'page': '0', 'query': pid, 'clickAnalytics': "true" } response = requests.post('https://2fwotdvm2o-dsn.algolia.net/1/indexes/product_variants_v2/query', headers=headers, params=params,json=data) response_json = response.json() response_json_dict = response_json['hits'][0] product_id = response_json_dict['product_template_id'] print(product_id) def obtainBasicInfo(): webhook = DiscordWebhook(url=webhook_url) r_api = requests.get('https://www.goat.com/web-api/v1/product_variants?productTemplateId='+ str(product_id),headers=headers) data = r_api.json() embed = DiscordEmbed(title=response_json_dict['name'], url=headers['Referer'], color=242424) embed.set_thumbnail(url=response_json_dict['main_picture_url']) sizes = [] shoe_conditions = [] box_conditions = [] prices = [] for i in data: sizes.append(str(i['size'])) shoe_conditions.append(i['shoeCondition']) box_conditions.append(i['boxCondition']) prices.append(str(int(i['lowestPriceCents']['amountUsdCents'])/100)) print(' Size: ' + str(i['size']) + '\n' + ' Shoe condition: ' + i['shoeCondition'] + '\n' + ' Box condition: ' + i['boxCondition'] + '\n' + ' $' + str(int(i['lowestPriceCents']['amountUsdCents'])/100) + '\n' + '-----------------') embed.add_embed_field(name='Size', value=(str(i['size']))) embed.add_embed_field(name='Shoe Condition', value=str(i['shoeCondition'])) embed.add_embed_field(name='Box Condition', value=str(i['boxCondition'])) embed.add_embed_field(name='Price', value='$' + str(int(i['lowestPriceCents']['amountUsdCents'])/100)) webhook.add_embed(embed) send_hook = webhook.execute() time.sleep(2) embed.fields = [] print(sizes) print(shoe_conditions) print(box_conditions) print(prices) obtainBasicInfo()
3,031
1,188
"""Unit tests for the pytai application. License: MIT License Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ import unittest import xml.etree.ElementTree as ET from typing import Union, Callable from unittest.mock import patch, MagicMock from pathlib import Path try: from .. import application from .xml_utils import * except ImportError: if __name__ == "__main__": import sys sys.exit(f'This script needs to be run from the root folder:\n' f'python -m pytai.tests.{Path(sys.argv[0]).stem}\n' f'python -m unittest pytai.tests.{Path(sys.argv[0]).stem}') else: raise class MockView(MagicMock): """Mock class to mock the application's View""" def __init__(self, *args, **kwargs): super().__init__() def add_tree_item(self, parent_handle: Union[ET.Element, str], **kwargs) -> ET.ElementTree: """Build an XML tree using the provided input.""" if parent_handle == "": self.root = ET.Element("root") return self.root d = {k: str(v) for k, v in kwargs.items()} return ET.SubElement(parent_handle, "node", **d) def schedule_function(self, time_ms: int, callback: Callable[[], None]) -> None: callback() def start_worker(self, callback: Callable[[], bool]) -> None: reschedule = True while reschedule: reschedule = callback() class TestOffsets(unittest.TestCase): @classmethod def setUpClass(cls): cls.tmp_path = Path(__file__).resolve().parent / "tmp" cls.tmp_path.mkdir(parents=True, exist_ok=True) @staticmethod def get_resource_path(file_name: str): return Path(__file__).resolve().parent / "resources" / file_name def generic_test(self, file_type): path = self.get_resource_path(f"{file_type}.{file_type}") format = {"kaitai_format": file_type} with patch(__name__ + '.application.v.View', MockView()): app = application.Application(file = path, format = format) with open(self.tmp_path / "actual_output.xml", "w") as o: o.write(xml_to_str(app.view.root)) expected_xml = xml_from_file(self.get_resource_path(f"{file_type}.xml")) try: xml_compare(app.view.root, expected_xml) except RuntimeError as e: self.fail(str(e)) def test_png(self): self.generic_test("png") def test_bmp(self): self.generic_test("bmp") def test_zip(self): self.generic_test("zip") def test_elf(self): self.generic_test("elf") def test_wav(self): self.generic_test("wav") if __name__ == "__main__": unittest.main()
3,898
1,294
# Copyright 2019 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import numpy as np from PIL import Image from pose_engine import PoseEngine import cv2 import argparse import common from edgetpu.detection.engine import DetectionEngine BODY_PARTS = {"nose": 0, "left eye": 1, "right eye": 2, "left ear": 3, "right ear": 4, "left shoulder": 5, "right shoulder": 6, "left elbow": 7, "right elbow": 8, "left wrist": 9, "right wrist": 10, "left hip": 11, "right hip": 12, "left knee": 13, "right knee": 14, "left ankle": 15, "right ankle": 16} EDGES = ( ('nose', 'left eye'), ('nose', 'right eye'), ('nose', 'left ear'), ('nose', 'right ear'), ('left ear', 'left eye'), ('right ear', 'right eye'), ('left eye', 'right eye'), ('left shoulder', 'right shoulder'), ('left shoulder', 'left elbow'), ('left shoulder', 'left hip'), ('right shoulder', 'right elbow'), ('right shoulder', 'right hip'), ('left elbow', 'left wrist'), ('right elbow', 'right wrist'), ('left hip', 'right hip'), ('left hip', 'left knee'), ('right hip', 'right knee'), ('left knee', 'left ankle'), ('right knee', 'right ankle'), ) import zmq from datetime import datetime def main(): parser = argparse.ArgumentParser() parser.add_argument('--camera_idx', type=str, help='Index of which video source to use. ', default = 1) parser.add_argument('--model', type=str, help='Pose model to use. ', default = '') parser.add_argument('--detect', action='store_true', help='Detect person', default = False) parser.add_argument('--filtered_labels', type=str, help='Filtered labels. ', default = '0') parser.add_argument('--zmq', action='store_true', help='Send via ZeroMQ', default = False) args = parser.parse_args() #engine = PoseEngine('models/posenet_mobilenet_v1_075_481_641_quant_decoder_edgetpu.tflite') engine = PoseEngine(args.model) _, image_height, image_width, _ = engine.get_input_tensor_shape() if args.detect: detect_engine = DetectionEngine('../examples-camera/all_models/mobilenet_ssd_v2_coco_quant_postprocess_edgetpu.tflite') print("Load all models done!") if args.zmq: # imagezmq sender #import imagezmq #sender_img = imagezmq.ImageSender(connect_to='tcp://*:5555', REQ_REP=False) # REQ_REP=False: use PUB/SUB (non-block) context = zmq.Context() socket = context.socket(zmq.PUB) socket.bind("tcp://*:5555") cap = cv2.VideoCapture(args.camera_idx) while cap.isOpened(): ret, frame = cap.read() if not ret: break cv2_im = frame cv2_im_rgb = cv2.cvtColor(cv2_im, cv2.COLOR_BGR2RGB) pil_image = Image.fromarray(cv2_im_rgb) pil_image.resize((image_width, image_height), Image.NEAREST) detect_objs = [] if args.detect: detect_objs = detect_engine.detect_with_image(pil_image, threshold=0.5, keep_aspect_ratio=True, relative_coord=True, top_k=10) if args.filtered_labels: detect_objs = [obj for obj in detect_objs if str(obj.label_id) in args.filtered_labels] poses, inference_time = engine.DetectPosesInImage(np.uint8(pil_image)) cv2_im, all_points = draw_skel_and_kp(cv2_im, poses, detect_objs) #print(all_points.shape) if args.zmq: # imagezmq send image #from datetime import datetime #timestamp = datetime.timestamp(datetime.now()) #sender_img.send_image(timestamp, cv2_im_rgb) # zmq send points timestamp = datetime.timestamp(datetime.now()) send_array(socket, np.array(all_points).astype(np.float), timestamp) cv2.namedWindow("frame", cv2.WND_PROP_FULLSCREEN) cv2.setWindowProperty("frame",cv2.WND_PROP_FULLSCREEN,cv2.WINDOW_FULLSCREEN) cv2.imshow('frame', cv2_im) if cv2.waitKey(1) & 0xFF == ord('q'): break cap.release() cv2.destroyAllWindows() def draw_skel_and_kp( img, poses, detect_objs, min_pose_score=0.3, min_part_score=0.2): out_img = img adjacent_keypoints = [] cv_keypoints = [] all_points = [] for pose in poses: if pose.score < min_pose_score: continue xys = {} points = [(-1., -1.)] * 17 for label, keypoint in pose.keypoints.items(): if keypoint.score < min_part_score: continue # Coord kp_y = keypoint.yx[0] kp_x = keypoint.yx[1] xys[label] = (kp_x, kp_y) cv_keypoints.append(cv2.KeyPoint(int(kp_x), int(kp_y), 10. * keypoint.score)) points[BODY_PARTS[label]] = (int(kp_x), int(kp_y)) all_points.append(np.array(np.stack([p for p in points], axis=0))) results = [] for a, b in EDGES: if a not in xys or b not in xys: continue ax, ay = xys[a] bx, by = xys[b] results.append(np.array([[ax, ay], [bx, by]]).astype(np.int32),) adjacent_keypoints.extend(results) if len(all_points) > 0: all_points = np.stack([points for points in all_points], axis=0) height, width, channels = img.shape for obj in detect_objs: x0, y0, x1, y1 = obj.bounding_box.flatten().tolist() x0, y0, x1, y1 = int(x0*width), int(y0*height), int(x1*width), int(y1*height) out_img = cv2.rectangle(out_img, (x0, y0), (x1, y1), (0, 255, 0), 1) # fill color out_img = cv2.drawKeypoints( out_img, cv_keypoints, outImage=np.array([]), color=(0, 0, 0), flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS) out_img = cv2.polylines(out_img, adjacent_keypoints, isClosed=False, color=(0, 255, 255), thickness=2) return out_img, np.array(all_points) def send_array(socket, A, msg='None', flags=0, copy=True, track=False): """send a numpy array with metadata""" md = dict( msg = msg, dtype = str(A.dtype), shape = A.shape, ) socket.send_json(md, flags|zmq.SNDMORE) return socket.send(A, flags, copy=copy, track=track) if __name__ == '__main__': main()
6,898
2,427
# Copyright (c) 2019 Science and Technology Facilities Council # All rights reserved. # Modifications made as part of the fparser project are distributed # under the following license: # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # 1. Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. '''Test Fortran Include Statement: This file tests the parsing of an include statement. Whilst include is not part of the standard Fortran rules (the include should include code as the code is being parsed) there are cases where users might like to keep the include statement in the Fortran parse tree and output it again. ''' import pytest from fparser.api import get_reader from fparser.two.Fortran2003 import Include_Stmt, InternalError from fparser.two.utils import NoMatchError def test_include_stmt(f2003_create): '''Check that a basic include statement is parsed correctly. Input separately as a string and as a reader object ''' def check_include(reader): '''Internal helper function to avoid code replication.''' ast = Include_Stmt(reader) assert "INCLUDE 'my-non-existant-file.inc'" in str(ast) assert repr(ast).replace("u'", "'") == \ ("Include_Stmt(Include_Filename(" "'my-non-existant-file.inc'))") line = "include 'my-non-existant-file.inc'" check_include(line) reader = get_reader(line) check_include(reader) def test_spaces(f2003_create): '''Check that spaces are allowed before and after an include keyword as well as after the file string. ''' line = " include 'my-non-existant-file.inc' " ast = Include_Stmt(line) assert "INCLUDE 'my-non-existant-file.inc'" in str(ast) def test_no_space(f2003_create): '''Check that no space is required between the include keyword and the file string. ''' line = "include'my-non-existant-file.inc'" ast = Include_Stmt(line) assert "INCLUDE 'my-non-existant-file.inc'" in str(ast) def test_case(f2003_create): '''Check that different case is allowed for the include keyword.''' line = "InClUdE 'my-non-existant-file.inc'" ast = Include_Stmt(line) assert "INCLUDE 'my-non-existant-file.inc'" in str(ast) def test_double_quotes(f2003_create): '''Check that double quotes are allowed for the file string.''' line = 'include "my-non-existant-file.inc"' ast = Include_Stmt(line) assert "INCLUDE 'my-non-existant-file.inc'" in str(ast) def test_errors(f2003_create): '''Check that syntax errors produce a NoMatchError exception.''' for line in [None, "", " ", "includ", "includ 'x'", "include", "include ''", "include \"x'", "include 'x\"", "include 'xxx", "include \"xxx", "include xxx'", "include xxx\"", "include x'x'", "include 'x'x", "x include 'x'"]: with pytest.raises(NoMatchError) as excinfo: _ = Include_Stmt(line) assert "Include_Stmt: '{0}'".format(line) in str(excinfo.value) def test_include_filename_error(f2003_create, monkeypatch): '''Check that we raise an InternalError if a return from Include_Filename is None or an empty string. This should never happen as any matching errors would cause this class to raise an exception. ''' monkeypatch.setattr("fparser.two.Fortran2003.Include_Filename", lambda file_name: None) line = "include ' '" with pytest.raises(InternalError) as excinfo: _ = Include_Stmt(line) assert ("Include_Filename should never return None or an empty " "name") in str(excinfo.value)
4,989
1,613
import sublime import os import textwrap import hyperhelpcore from hyperhelpcore.common import log, hh_syntax from hyperhelpcore.core import help_index_list ###---------------------------------------------------------------------------- def loaded(): """ Do package setup at package load time. """ hha_setting.obj = sublime.load_settings("HyperHelpAuthor.sublime-settings") hha_setting.default = { "update_header_on_save": True, "reload_index_on_save": True, "lint_output_to_view": False, "author_view_settings": { "rulers": [80], "match_selection": True, "draw_indent_guides": True } } hyperhelpcore.initialize() def unloaded(): """ Do package cleanup at unload time. """ pass ###---------------------------------------------------------------------------- def hha_setting(key): """ Get a HyperHelpAuthor setting from a cached settings object. """ default = hha_setting.default.get(key, None) return hha_setting.obj.get(key, default) def is_authoring_source(view): """ Given a view object, tells you if that view represents a help source file. """ if view.match_selector(0, "text.hyperhelp.help"): return not view.is_read_only() return False def package_for_view(view): """ Given a view object, provides you back the help index tuple for the help package that contains this file. This may be None if this file is not a Sublime package file, or if it doesn't correspond to a loaded help package. This does not verify that the file is actually a part of the provided help package, only that it is in the document root for said package. """ if view.file_name() is not None: spp = sublime.packages_path() if view.file_name().startswith(spp): file_name = view.file_name()[len(spp)+1:] for pkg_name, pkg_info in help_index_list().items(): if file_name.startswith(pkg_info.doc_root): return pkg_info return None def local_help_filename(pkg_info, help_file): """ Determine what the full file name of a help file from a given package would be if it was stored locally. """ return os.path.normpath(os.path.join(sublime.packages_path(), pkg_info.doc_root, help_file)) def local_help_index(pkg_info): """ Determine what the full file name of the help index file for the given package would be if it was stored locally. """ return os.path.normpath(os.path.join(sublime.packages_path(), pkg_info.index_file[len("Packages/"):])) def format_template(template, *args): """ Given incoming text, remove all common indent, then strip away the leading and trailing whitespace from it. This is a modified version of code from Default/new_templates.py from the core Sublime code. """ return textwrap.dedent(template % args).strip() def open_local_help(pkg_info, help_file, window=None): """ Attempt to open the provided help file locally for editing. """ window = window if window is not None else sublime.active_window() local_path = local_help_filename(pkg_info, help_file) if not os.path.exists(local_path): return log(format_template( """ Specified help file does not exist; cannot open. Note: HyperHelpAuthor can not currently open help files from packed packages for editing. """), dialog=True) view = window.open_file(local_path) view.settings().set("_hh_auth", True) if not view.is_loading(): apply_authoring_settings(view) def open_help_index(pkg_info, window=None): """ Attempt to open the provided help index file localy for editing. """ window = window if window is not None else sublime.active_window() # The index file is stored as a resource file spec, so strip the prefix local_path = local_help_index(pkg_info) if not os.path.exists(local_path): return log(format_template( """ Specified help index does not exist; cannot open. Note: HyperHelpAuthor can not currently open help indexes from packed packages for editing. """), dialog=True) window.open_file(local_path) def apply_authoring_settings(view): """ Given a view, apply the appropriate settings to it to ensure that it is set up properly for editing. """ # Ensure help files with no header get the appropriate syntax set view.assign_syntax(hh_syntax("HyperHelp-Help.sublime-syntax")) author_view_settings = hha_setting("author_view_settings") settings = view.settings() for option in author_view_settings: settings.set(option, author_view_settings[option]) ###----------------------------------------------------------------------------
4,992
1,395
from __future__ import unicode_literals from django.db import models, migrations class Migration(migrations.Migration): dependencies = [ ('player', '0001_initial'), ] operations = [ migrations.RemoveField( model_name='music', name='thumbnail', ), ]
318
93
import os, csv, time, shutil from bin import cardbank from bin import builder def add_build(add_cards): '''Build card bank by specifically adding new cards.''' # process new cards to add if not add_cards: return add_card_map = {} for card in add_cards: add_card_map[card["inf"]] = card # read in existing card bank card_bank = [] if os.path.exists("bank/card-bank-built.csv"): card_bank = cardbank.read("bank/card-bank-built.csv", build_forms=False) # start HTTP session for reuse session = builder.session() print("Building new card bank..") try: # rebuild card bank, starting with populating from existing new_cards = [] updated_cards = [] new_card_bank = [] errored = [] for card in card_bank: if card["inf"] not in add_card_map: # no change, just add existing card new_card_bank.append(card) else: # changed, replace with new card definition add_card = add_card_map[card["inf"]] del add_card_map[card["inf"]] _build_card_and_add(add_card, new_card_bank, new_cards, errored, session) # add all brand new cards for inf, card in add_card_map.items(): _build_card_and_add(card, new_card_bank, new_cards, errored, session) finally: session.close() finish_build(new_card_bank, new_cards, updated_cards, errored) def build_from_difference(force_rebuild=[]): '''Build card bank by rectifying differences in card bank basic and built.''' # read card bank basic (card bank with all basic definitions but not built out) card_bank_basic = cardbank.read("bank/card-bank-basic.csv", build_forms=False) # read existing, card bank built existing = [] if os.path.exists("bank/card-bank-built.csv"): existing = cardbank.read("bank/card-bank-built.csv", build_forms=False) existing_map = {} for card in existing: existing_map[card["inf"]] = card # start HTTP session for reuse session = builder.session() print("Building new card bank..") # build card bank from card bank basic new_cards = [] updated_cards = [] new_card_bank = [] errored = [] for card in card_bank_basic: # if already exists, check if requiring update only (unless in list of force rebuild) if (card["inf"] not in force_rebuild) and (card["inf"] in existing_map): existing_card = existing_map[card["inf"]] # if any of the built fields are different, something's wrong, rebuilt it entirely rebuild = False for field in builder.BUILT_FIELDS: if not field in existing_card or not existing_card[field]: rebuild = True break # if not rebuilding, just update the supplied fields, which doesn't affect build fields update = False for field in builder.SUPPLIED_FIELDS: if rebuild: break if field not in existing_card or existing_card[field] != card[field]: update = True existing_card[field] = card[field] # if no rebuild needed, append existing [and updated] card and continue if not rebuild: new_card_bank.append(existing_card) if update: updated_cards.append(existing_card) continue # if doesn't exist or need rebuilding, rebuild card _build_card_and_add(card, new_card_bank, new_cards, errored, session) finish_build(new_card_bank, new_cards, updated_cards, errored) def _build_card_and_add(card, new_card_bank, new_cards, errored, session=None): # get verb tenses tense_map = builder.get(card["inf"], session=session) # if warning returned, then invalid somehow if isinstance(tense_map, Warning): errored.append((card, str(tense_map))) # otherwise build card and append to card bank else: builder.build(card, tense_map) new_card_bank.append(card) new_cards.append(card) # don't spam the website time.sleep(1) def finish_build(new_card_bank, new_cards, updated_cards, errored): '''Finish build, save card bank, and print information about build.''' print("") if not new_cards and not updated_cards and not errored: print("No changes") return if new_cards or updated_cards: # backup if os.path.exists("bank/card-bank-built.csv"): shutil.copyfile("bank/card-bank-built.csv", "bank/card-bank-built.bkp.csv") print("Old card bank backed up as: bank/card-bank-built.bkp.csv") # write new with open("bank/card-bank-built.csv", "w", newline="", encoding="utf-8") as csvf: writer = csv.DictWriter(csvf, fieldnames=builder.FIELDS) writer.writeheader() writer.writerows(new_card_bank) print("New card bank written to: bank/card-bank-built.csv") if new_cards: print("\nNew cards created:") for card in new_cards: print(" {0}".format(card["inf"])) if updated_cards: print("\nCards updated:") for card in updated_cards: print(" {0}".format(card["inf"])) if errored: print("\nError building card(s) for:") for pair in errored: print(" {0} : {1}".format(pair[0]["inf"], pair[1])) # if called straight-up, build from difference between basic and build card bank if __name__ == "__main__": build_from_difference()
5,717
1,672
from mongoengine import StringField, EmailField, BooleanField from flask.ext.login import UserMixin import requests import json from mongoengine import Document from social.apps.flask_app.me.models import FlaskStorage class User(Document, UserMixin): username = StringField(max_length=200) password = StringField(max_length=200, default='') name = StringField(max_length=100) fullname = StringField(max_length=100) first_name = StringField(max_length=100) last_name = StringField(max_length=100) email = EmailField() active = BooleanField(default=True) def facebook_api(self, url, fields=None): params = { 'access_token': self.get_social_auth("facebook").extra_data['access_token'] } if fields: params["fields"] = ",".join(fields) res = requests.get(url, params=params) if res.status_code != 200: raise Exception("Status was %s" % res.status_code) return json.loads(res.content) def get_facebook_albums(self): return self.facebook_api("https://graph.facebook.com/v2.2/me/albums", fields=["id", "name"])["data"] def get_facebook_photos(self, album_id): photos = [] url = "https://graph.facebook.com/v2.2/%s/photos" % album_id while url: ret = self.facebook_api(url, fields=[ "id", "created_time", "from", "height", "width", "name", "source" ]) photos += ret["data"] url = ret.get("paging", {}).get("next") return photos def get_social_auth(self, provider): return FlaskStorage.user.get_social_auth_for_user(self, provider=provider).get() def is_active(self): return self.active
1,749
550
""" ******************************************************************************** compas_blender.geometry ******************************************************************************** .. currentmodule:: compas_blender.geometry Object-oriented convenience wrappers for native Blender geometry. .. autosummary:: :toctree: generated/ BlenderCurve BlenderMesh BlenderPoint BlenderSurface """ try: import bpy except ImportError: pass class BlenderGeometry(object): def __init__(self, obj): self.object = obj self.name = obj.name self.geometry = obj.data self.otype = obj.type self.attributes = {} @property def location(self): return list(self.object.location) @classmethod def from_selection(cls): raise NotImplementedError @classmethod def from_name(cls, name): return BlenderGeometry(obj=bpy.data.objects[name]) @staticmethod def find(guid): raise NotImplementedError @staticmethod def refresh(): bpy.ops.wm.redraw_timer(type='DRAW_WIN_SWAP', iterations=1) def delete(self): raise NotImplementedError def purge(self): raise NotImplementedError def hide(self): raise NotImplementedError def show(self): raise NotImplementedError def select(self): raise NotImplementedError def unselect(self): raise NotImplementedError def closest_point(self, *args, **kwargs): raise NotImplementedError def closest_points(self, *args, **kwargs): raise NotImplementedError from .point import BlenderPoint from .curve import BlenderCurve from .mesh import BlenderMesh from .surface import BlenderSurface __all__ = [ 'BlenderGeometry', 'BlenderPoint', 'BlenderCurve', 'BlenderMesh', 'BlenderSurface', ]
1,924
587
from twisted.internet import reactor, task class CounterManager(object): counters = [] @classmethod def add_counter(cls, counter): cls.counters.append(counter) @classmethod def has_active_counters(cls): return all([not c.is_active for c in cls.counters]) class Counter(object): def __init__(self, name, between_time, counter=5): self.name = name self.between_time = between_time self.counter = counter self.is_active = True CounterManager.add_counter(self) def start(self): self.loop_handler = task.LoopingCall(self.count) self.loop_handler.start(self.between_time) def count(self): if self.counter == 0: self.is_active = False self.loop_handler.stop() if CounterManager.has_active_counters(): print 'No counters active. Stopping!' reactor.stop() else: print self.name + ':', self.counter self.counter -= 1 print 'Start' Counter('1', 0.5).start() Counter('2', 1).start() Counter('3', 0.1).start() reactor.run()
1,136
360
# Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from copy import deepcopy import pytest from data_validation import consts from data_validation.config_manager import ConfigManager COLUMN_VALIDATION_CONFIG = { # BigQuery Specific Connection Config "source_conn": None, "target_conn": None, # Validation Type consts.CONFIG_TYPE: "Column", # Configuration Required Depending on Validator Type consts.CONFIG_SCHEMA_NAME: "bigquery-public-data.new_york_citibike", consts.CONFIG_TABLE_NAME: "citibike_trips", consts.CONFIG_CALCULATED_FIELDS: [], consts.CONFIG_GROUPED_COLUMNS: [], consts.CONFIG_FILTERS: [ { consts.CONFIG_TYPE: consts.FILTER_TYPE_CUSTOM, consts.CONFIG_FILTER_SOURCE: "column_name > 100", consts.CONFIG_FILTER_TARGET: "column_name_target > 100", } ], } QUERY_LIMIT = 100 COLUMN_VALIDATION_CONFIG_LIMIT = deepcopy(COLUMN_VALIDATION_CONFIG) COLUMN_VALIDATION_CONFIG_LIMIT[consts.CONFIG_LIMIT] = QUERY_LIMIT QUERY_GROUPS_TEST = [ { consts.CONFIG_FIELD_ALIAS: "start_alias", consts.CONFIG_SOURCE_COLUMN: "starttime", consts.CONFIG_TARGET_COLUMN: "starttime", consts.CONFIG_CAST: "date", } ] AGGREGATES_TEST = [ { consts.CONFIG_FIELD_ALIAS: "sum_starttime", consts.CONFIG_SOURCE_COLUMN: "starttime", consts.CONFIG_TARGET_COLUMN: "starttime", consts.CONFIG_TYPE: "sum", } ] CALCULATED_MULTIPLE_TEST = [ { consts.CONFIG_FIELD_ALIAS: "concat_start_station_name_end_station_name", consts.CONFIG_CALCULATED_SOURCE_COLUMNS: [ "start_station_name", "end_station_name", ], consts.CONFIG_CALCULATED_TARGET_COLUMNS: [ "start_station_name", "end_station_name", ], consts.CONFIG_TYPE: "concat", }, { consts.CONFIG_FIELD_ALIAS: "concat_calcs", consts.CONFIG_CALCULATED_SOURCE_COLUMNS: [ "ifnull_start_station_name", "rstrip_start_station_name", "upper_start_station_name", ], consts.CONFIG_CALCULATED_TARGET_COLUMNS: [ "ifnull_start_station_name", "rstrip_start_station_name", "upper_start_station_name", ], consts.CONFIG_TYPE: "concat", "depth": 1, }, { consts.CONFIG_FIELD_ALIAS: "ifnull_start_station_name", consts.CONFIG_CALCULATED_SOURCE_COLUMNS: ["start_station_name"], consts.CONFIG_CALCULATED_TARGET_COLUMNS: ["start_station_name"], consts.CONFIG_TYPE: "ifnull", }, { consts.CONFIG_FIELD_ALIAS: "length_start_station_name", consts.CONFIG_CALCULATED_SOURCE_COLUMNS: ["start_station_name"], consts.CONFIG_CALCULATED_TARGET_COLUMNS: ["start_station_name"], consts.CONFIG_TYPE: "length", }, { consts.CONFIG_FIELD_ALIAS: "rstrip_start_station_name", consts.CONFIG_CALCULATED_SOURCE_COLUMNS: ["start_station_name"], consts.CONFIG_CALCULATED_TARGET_COLUMNS: ["start_station_name"], consts.CONFIG_TYPE: "rstrip", }, { consts.CONFIG_FIELD_ALIAS: "upper_start_station_name", consts.CONFIG_CALCULATED_SOURCE_COLUMNS: ["start_station_name"], consts.CONFIG_CALCULATED_TARGET_COLUMNS: ["start_station_name"], consts.CONFIG_TYPE: "upper", }, ] class MockIbisClient(object): pass @pytest.fixture def module_under_test(): import data_validation.validation_builder return data_validation.validation_builder def test_import(module_under_test): assert module_under_test is not None def test_column_validation(module_under_test): mock_config_manager = ConfigManager( COLUMN_VALIDATION_CONFIG, MockIbisClient(), MockIbisClient(), verbose=False ) builder = module_under_test.ValidationBuilder(mock_config_manager) assert not builder.verbose assert builder.config_manager.query_limit is None def test_column_validation_aggregates(module_under_test): mock_config_manager = ConfigManager( COLUMN_VALIDATION_CONFIG, MockIbisClient(), MockIbisClient(), verbose=False ) builder = module_under_test.ValidationBuilder(mock_config_manager) mock_config_manager.append_aggregates(AGGREGATES_TEST) builder.add_config_aggregates() assert list(builder.get_metadata().keys()) == ["sum_starttime"] def test_validation_add_groups(module_under_test): mock_config_manager = ConfigManager( COLUMN_VALIDATION_CONFIG, MockIbisClient(), MockIbisClient(), verbose=False ) builder = module_under_test.ValidationBuilder(mock_config_manager) mock_config_manager.append_query_groups(QUERY_GROUPS_TEST) builder.add_config_query_groups() assert list(builder.get_group_aliases()) == ["start_alias"] def test_column_validation_calculate(module_under_test): mock_config_manager = ConfigManager( COLUMN_VALIDATION_CONFIG, MockIbisClient(), MockIbisClient(), verbose=False ) builder = module_under_test.ValidationBuilder(mock_config_manager) mock_config_manager.append_calculated_fields(CALCULATED_MULTIPLE_TEST) builder.add_config_calculated_fields() print(sorted(list(builder.get_calculated_aliases()))) assert sorted(list(builder.get_calculated_aliases())) == [ "concat_calcs", "concat_start_station_name_end_station_name", "ifnull_start_station_name", "length_start_station_name", "rstrip_start_station_name", "upper_start_station_name", ] def test_column_validation_limit(module_under_test): mock_config_manager = ConfigManager( COLUMN_VALIDATION_CONFIG_LIMIT, MockIbisClient(), MockIbisClient(), verbose=False, ) builder = module_under_test.ValidationBuilder(mock_config_manager) builder.add_query_limit() assert builder.source_builder.limit == QUERY_LIMIT def test_validation_add_filters(module_under_test): mock_config_manager = ConfigManager( COLUMN_VALIDATION_CONFIG, MockIbisClient(), MockIbisClient(), verbose=False ) builder = module_under_test.ValidationBuilder(mock_config_manager) builder.add_config_filters() filter_field = builder.source_builder.filters[0] assert filter_field.left == "column_name > 100"
6,889
2,292
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for # license information. # # Code generated by Microsoft (R) AutoRest Code Generator 2.3.33.0 # Changes may cause incorrect behavior and will be lost if the code is # regenerated. # -------------------------------------------------------------------------- from msrest.serialization import Model class PrivateEndpointConnection(Model): """The Private Endpoint Connection resource. :param private_endpoint: The resource of private end point. :type private_endpoint: ~_restclient.models.PrivateEndpoint :param private_link_service_connection_state: A collection of information about the state of the connection between service consumer and provider. :type private_link_service_connection_state: ~_restclient.models.PrivateLinkServiceConnectionState :param provisioning_state: The provisioning state of the private endpoint connection resource. Possible values include: 'Succeeded', 'Creating', 'Deleting', 'Failed' :type provisioning_state: str or ~_restclient.models.PrivateEndpointConnectionProvisioningState """ _validation = { 'private_link_service_connection_state': {'required': True}, } _attribute_map = { 'private_endpoint': {'key': 'properties.privateEndpoint', 'type': 'PrivateEndpoint'}, 'private_link_service_connection_state': {'key': 'properties.privateLinkServiceConnectionState', 'type': 'PrivateLinkServiceConnectionState'}, 'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'}, } def __init__(self, private_link_service_connection_state, private_endpoint=None, provisioning_state=None): super(PrivateEndpointConnection, self).__init__() self.private_endpoint = private_endpoint self.private_link_service_connection_state = private_link_service_connection_state self.provisioning_state = provisioning_state
2,163
561
import traceback from typing import Any, Callable, Dict, List, Optional, Set, Tuple from chainalytic_icon.common import config, util class ApiBundle(object): """ The interface to external consumers/applications """ def __init__(self, working_dir: str): super(ApiBundle, self).__init__() self.working_dir = working_dir self.collator = None self.logger = util.get_child_logger('provider.api_bundle') def set_collator(self, collator: 'Collator'): self.collator = collator async def call_api(self, api_id: str, api_params: dict) -> Dict: ret = {'status': 0, 'result': None} func = getattr(self, api_id) if hasattr(self, api_id) else None try: if func: self.logger.debug(f'Found API: {api_id}, calling...') ret['result'] = await func(api_params) ret['status'] = 1 else: self.logger.warning(f'API not found: {api_id}') ret['status'] = -1 ret['result'] = f'API not found: {api_id}' except Exception as e: ret['status'] = 0 ret['result'] = f'{str(e)}\n{traceback.format_exc()}' self.logger.error(f'ERROR when calling API: {api_id}') self.logger.error(f'{str(e)}\n{traceback.format_exc()}') return ret # ################# # APIs to be called # async def last_block_height(self, api_params: dict) -> Optional[int]: if 'transform_id' in api_params: return await self.collator.last_block_height(api_params['transform_id']) async def latest_upstream_block_height(self, api_params: dict) -> Optional[int]: return await self.collator.latest_upstream_block_height() async def get_block(self, api_params: dict) -> Optional[dict]: if 'transform_id' in api_params: return await self.collator.get_block(api_params['height'], api_params['transform_id']) # ######################## # For `stake_history` only # async def latest_unstake_state(self, api_params: dict) -> Optional[dict]: return await self.collator.latest_unstake_state() # ########################### # For `contract_history` only # async def contract_transaction(self, api_params: dict) -> Optional[dict]: return await self.collator.contract_transaction( api_params['address'], int(api_params['size']) ) async def contract_internal_transaction(self, api_params: dict) -> Optional[dict]: return await self.collator.contract_internal_transaction( api_params['address'], int(api_params['size']) ) async def contract_stats(self, api_params: dict) -> Optional[dict]: return await self.collator.contract_stats(api_params['address']) async def contract_list(self, api_params: dict) -> Optional[dict]: return await self.collator.contract_list() async def max_tx_per_contract(self, api_params: dict) -> Optional[dict]: return await self.collator.max_tx_per_contract()
3,100
929
# date: 2019.05.05 # author: Bartłomiej 'furas' Burek import robobrowser br = robobrowser.RoboBrowser(user_agent='Mozilla/5.0 (X11; Linux i586; rv:31.0) Gecko/20100101 Firefox/31.0') br.parser = 'lxml' br.open("https://www.just-eat.fr") print(br.get_forms()) iframe_src = br.select('iframe')[0]['src'] print(iframe_src) br.open("https://www.just-eat.fr"+iframe_src) print(br.parsed) br.open("https://www.just-eat.fr") print(br.get_forms())
447
212
import copy import torch from utils import helpers from utils.layers import conv, linear, batch_norm def ticketfy(model, split_rate, split_mode="kels"): conv_layers, linear_layers, bn_layers = helpers.get_layers(model) for n, _ in conv_layers: cur_conv = helpers.rgetattr(model, n) helpers.rsetattr( model, n, conv.SplitConv(cur_conv.in_channels, cur_conv.out_channels, kernel_size=cur_conv.kernel_size, stride=cur_conv.stride, padding=cur_conv.padding, dilation=cur_conv.dilation, groups=cur_conv.groups, bias=cur_conv.bias != None, padding_mode=cur_conv.padding_mode, split_rate=split_rate, split_mode=split_mode)) for i, (n, _) in enumerate(linear_layers): cur_linear = helpers.rgetattr(model, n) helpers.rsetattr( model, n, linear.SplitLinear(cur_linear.in_features, cur_linear.out_features, bias=cur_linear.bias != None, split_rate=split_rate, split_mode=split_mode, last_layer=i == len(linear_layers) - 1)) for n, _ in bn_layers: cur_bn = helpers.rgetattr(model, n) helpers.rsetattr( model, n, batch_norm.SplitBatchNorm( cur_bn.num_features, eps=cur_bn.eps, momentum=cur_bn.momentum, track_running_stats=cur_bn.track_running_stats, split_rate=split_rate)) def regenerate(model, evolve_mode="rand", device="cpu"): for _, m in model.named_modules(): if hasattr(m, "weight") and m.weight is not None: if hasattr(m, "mask"): ## Conv and Linear but not BN assert m.split_rate < 1.0 if m.__class__ == conv.SplitConv or m.__class__ == linear.SplitLinear: m.split_reinitialize(evolve_mode, device) else: raise NotImplemented('Invalid layer {}'.format( m.__class__)) def extract_ticket(model, split_rate): split_model = copy.deepcopy(model) for n, m in split_model.named_modules(): if hasattr(m, "weight") and m.weight is not None: if hasattr(m, "mask"): m.extract_slim() # if src_m.__class__ == conv_type.SplitConv: # elif src_m.__class__ == linear_type.SplitLinear: elif m.__class__ == batch_norm.SplitBatchNorm: ## BatchNorm has bn_maks not mask m.extract_slim() return split_model
2,880
834
from utils import scrape_helper url = "http://www.investopedia.com/terms/1/" links = scrape_helper.get_term_links_from_page(url) print(links)
145
58
#!/usr/bin/env python # # Copyright 2014 Google Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Drop hints from a font.""" __author__ = "roozbeh@google.com (Roozbeh Pournader)" import array import sys from fontTools import ttLib def drop_hints_from_glyphs(font): """Drops the hints from a font's glyphs.""" glyf_table = font["glyf"] for glyph_index in range(len(glyf_table.glyphOrder)): glyph_name = glyf_table.glyphOrder[glyph_index] glyph = glyf_table[glyph_name] if glyph.numberOfContours > 0: if glyph.program.bytecode: glyph.program.bytecode = array.array("B") def drop_tables(font, tables): """Drops the listed tables from a font.""" for table in tables: if table in font: del font[table] def main(argv): """Drop the hints from the first file specified and save as second.""" font = ttLib.TTFont(argv[1]) drop_hints_from_glyphs(font) drop_tables(font, ["cvt ", "fpgm", "hdmx", "LTSH", "prep", "VDMX"]) font.save(argv[2]) if __name__ == "__main__": main(sys.argv)
1,626
550
# Copyright (c) 2014, Menno Smits # Released subject to the New BSD License # Please see http://en.wikipedia.org/wiki/BSD_licenses """ A lexical analyzer class for IMAP responses. Although Lexer does all the work, TokenSource is the class to use for external callers. """ from __future__ import unicode_literals from . import six __all__ = ["TokenSource"] CTRL_CHARS = frozenset(c for c in range(32)) ALL_CHARS = frozenset(c for c in range(256)) SPECIALS = frozenset(c for c in six.iterbytes(b' ()%"[')) NON_SPECIALS = ALL_CHARS - SPECIALS - CTRL_CHARS WHITESPACE = frozenset(c for c in six.iterbytes(b' \t\r\n')) BACKSLASH = ord('\\') OPEN_SQUARE = ord('[') CLOSE_SQUARE = ord(']') DOUBLE_QUOTE = ord('"') class TokenSource(object): """ A simple iterator for the Lexer class that also provides access to the current IMAP literal. """ def __init__(self, text): self.lex = Lexer(text) self.src = iter(self.lex) @property def current_literal(self): return self.lex.current_source.literal def __iter__(self): return self.src class Lexer(object): """ A lexical analyzer class for IMAP """ def __init__(self, text): self.sources = (LiteralHandlingIter(self, chunk) for chunk in text) self.current_source = None def read_until(self, stream_i, end_char, escape=True): token = bytearray() try: for nextchar in stream_i: if escape and nextchar == BACKSLASH: escaper = nextchar nextchar = six.next(stream_i) if nextchar != escaper and nextchar != end_char: token.append(escaper) # Don't touch invalid escaping elif nextchar == end_char: break token.append(nextchar) else: raise ValueError("No closing '%s'" % chr(end_char)) except StopIteration: raise ValueError("No closing '%s'" % chr(end_char)) token.append(end_char) return token def read_token_stream(self, stream_i): whitespace = WHITESPACE wordchars = NON_SPECIALS read_until = self.read_until while True: # Whitespace for nextchar in stream_i: if nextchar not in whitespace: stream_i.push(nextchar) break # done skipping over the whitespace # Non-whitespace token = bytearray() for nextchar in stream_i: if nextchar in wordchars: token.append(nextchar) elif nextchar == OPEN_SQUARE: token.append(nextchar) token.extend(read_until(stream_i, CLOSE_SQUARE, escape=False)) else: if nextchar in whitespace: yield token elif nextchar == DOUBLE_QUOTE: assert not token token.append(nextchar) token.extend(read_until(stream_i, nextchar)) yield token else: # Other punctuation, eg. "(". This ends the current token. if token: yield token yield bytearray([nextchar]) break else: if token: yield token break def __iter__(self): for source in self.sources: self.current_source = source for tok in self.read_token_stream(iter(source)): yield bytes(tok) # imaplib has poor handling of 'literals' - it both fails to remove the # {size} marker, and fails to keep responses grouped into the same logical # 'line'. What we end up with is a list of response 'records', where each # record is either a simple string, or tuple of (str_with_lit, literal) - # where str_with_lit is a string with the {xxx} marker at its end. Note # that each element of this list does *not* correspond 1:1 with the # untagged responses. # (http://bugs.python.org/issue5045 also has comments about this) # So: we have a special object for each of these records. When a # string literal is processed, we peek into this object to grab the # literal. class LiteralHandlingIter: def __init__(self, lexer, resp_record): self.lexer = lexer if isinstance(resp_record, tuple): # A 'record' with a string which includes a literal marker, and # the literal itself. self.src_text = resp_record[0] assert self.src_text.endswith(b"}"), self.src_text self.literal = resp_record[1] else: # just a line with no literals. self.src_text = resp_record self.literal = None def __iter__(self): return PushableIterator(six.iterbytes(self.src_text)) class PushableIterator(object): NO_MORE = object() def __init__(self, it): self.it = iter(it) self.pushed = [] def __iter__(self): return self def __next__(self): if self.pushed: return self.pushed.pop() return six.next(self.it) # For Python 2 compatibility next = __next__ def push(self, item): self.pushed.append(item)
5,438
1,582
from .data_collator import ( DataCollatorWithPaddingWithAdditionalFeatures, EncoderDecoderDataCollatorWithPadding, ) from .datasets import *
149
46
# _*_ coding: utf-8 _*_ """ Created by lr on 2019/08/29. 此模块用来编写flasgger中api列表下的详细操作信息 """ from app.api_docs.v1 import user, client, token, \ banner, theme, product, category, \ address, order, pay from app.api_docs.cms import cms_user, file __author__ = 'lr'
274
142
# Copyright (C) 2017 Mandiant, Inc. All Rights Reserved. import logging from typing import List, Tuple from dataclasses import dataclass import viv_utils import envi.memory import viv_utils.emulator_drivers from envi import Emulator from . import api_hooks logger = logging.getLogger("floss") MAX_MAPS_SIZE = 1024 * 1024 * 100 # 100MB max memory allocated in an emulator instance def is_import(emu, va): """ Return True if the given VA is that of an imported function. """ # TODO: also check location type t = emu.getVivTaint(va) if t is None: return False return t[1] == "import" # type aliases for envi.memory map MemoryMapDescriptor = Tuple[ # va int, # size int, # perms int, # name str, ] # type aliases for envi.memory map MemoryMap = Tuple[ # start int, # end int, # descriptor MemoryMapDescriptor, # content bytes, ] # type aliases for envi.memory map Memory = List[MemoryMap] @dataclass class Snapshot: """ A snapshot of the state of the CPU and memory. Attributes: memory: a snapshot of the memory contents sp: the stack counter pc: the instruction pointer """ memory: Memory sp: int pc: int def get_map_size(emu): size = 0 for mapva, mapsize, mperm, mfname in emu.getMemoryMaps(): mapsize += size return size class MapsTooLargeError(Exception): pass def make_snapshot(emu: Emulator) -> Snapshot: """ Create a snapshot of the current CPU and memory. """ if get_map_size(emu) > MAX_MAPS_SIZE: logger.debug("emulator mapped too much memory: 0x%x", get_map_size(emu)) raise MapsTooLargeError() return Snapshot(emu.getMemorySnap(), emu.getStackCounter(), emu.getProgramCounter()) @dataclass class Delta: """ a pair of snapshots from before and after an operation. facilitates diffing the state of an emulator. """ pre: Snapshot post: Snapshot class DeltaCollectorHook(viv_utils.emulator_drivers.Hook): """ hook that collects Deltas at each imported API call. """ def __init__(self, pre_snap: Snapshot): super(DeltaCollectorHook, self).__init__() self._pre_snap = pre_snap self.deltas: List[Delta] = [] def hook(self, callname, driver, callconv, api, argv): if is_import(driver._emu, driver._emu.getProgramCounter()): try: self.deltas.append(Delta(self._pre_snap, make_snapshot(driver._emu))) except MapsTooLargeError: logger.debug("despite call to import %s, maps too large, not extracting strings", callname) pass def emulate_function( emu: Emulator, function_index, fva: int, return_address: int, max_instruction_count: int ) -> List[Delta]: """ Emulate a function and collect snapshots at each interesting place. These interesting places include calls to imported API functions and the final state of the emulator. Emulation continues until the return address is hit, or the given max_instruction_count is hit. Some library functions are shimmed, such as memory allocation routines. This helps "normal" routines emulate correct using standard library function. These include: - GetProcessHeap - RtlAllocateHeap - AllocateHeap - malloc :type function_index: viv_utils.FunctionIndex :param fva: The start address of the function to emulate. :param return_address: The expected return address of the function. Emulation stops here. :param max_instruction_count: The max number of instructions to emulate. This helps avoid unexpected infinite loops. """ try: pre_snap = make_snapshot(emu) except MapsTooLargeError: logger.warn("initial snapshot mapped too much memory, can't extract strings") return [] delta_collector = DeltaCollectorHook(pre_snap) try: logger.debug("Emulating function at 0x%08X", fva) driver = viv_utils.emulator_drivers.DebuggerEmulatorDriver(emu) monitor = api_hooks.ApiMonitor(emu.vw, function_index) driver.add_monitor(monitor) driver.add_hook(delta_collector) with api_hooks.defaultHooks(driver): driver.runToVa(return_address, max_instruction_count) except viv_utils.emulator_drivers.InstructionRangeExceededError: logger.debug("Halting as emulation has escaped!") except envi.InvalidInstruction: logger.debug("vivisect encountered an invalid instruction. will continue processing.", exc_info=True) except envi.UnsupportedInstruction: logger.debug("vivisect encountered an unsupported instruction. will continue processing.", exc_info=True) except envi.BreakpointHit: logger.debug( "vivisect encountered an unexpected emulation breakpoint. will continue processing.", exc_info=True ) except viv_utils.emulator_drivers.StopEmulation: pass except Exception: logger.debug("vivisect encountered an unexpected exception. will continue processing.", exc_info=True) logger.debug("Ended emulation at 0x%08X", emu.getProgramCounter()) deltas = delta_collector.deltas try: deltas.append(Delta(pre_snap, make_snapshot(emu))) except MapsTooLargeError: logger.debug("failed to create final snapshot, emulator mapped too much memory, skipping") pass return deltas
5,512
1,658
#!/usr/bin/env python3 # -*- coding: utf-8 -*- # @Time : 18-5-21 下午3:33 # @Author : Luo Yao # @Site : http://icode.baidu.com/repos/baidu/personal-code/Luoyao # @File : lanenet_hnet_data_processor.py # @IDE: PyCharm Community Edition """ 实现LaneNet中的HNet训练数据流 """ import os.path as ops import json import cv2 import numpy as np try: from cv2 import cv2 except ImportError: pass class DataSet(object): """ 实现数据集类 """ def __init__(self, dataset_info_file): """ :param dataset_info_file: json文件列表 """ self._label_image_path, self._label_gt_pts = self._init_dataset(dataset_info_file) self._random_dataset() self._next_batch_loop_count = 0 def _init_dataset(self, dataset_info_file): """ 从json标注文件中获取标注样本信息 :param dataset_info_file: :return: """ label_image_path = [] label_gt_pts = [] for json_file_path in dataset_info_file: assert ops.exists(json_file_path), '{:s} not exist'.format(json_file_path) src_dir = ops.split(json_file_path)[0] with open(json_file_path, 'r') as file: for line in file: info_dict = json.loads(line) image_dir = ops.split(info_dict['raw_file'])[0] image_dir_split = image_dir.split('/')[1:] image_dir_split.append(ops.split(info_dict['raw_file'])[1]) image_path = ops.join(src_dir, info_dict['raw_file']) assert ops.exists(image_path), '{:s} not exist'.format(image_path) label_image_path.append(image_path) h_samples = info_dict['h_samples'] lanes = info_dict['lanes'] gt_pts = [] for lane in lanes: assert len(h_samples) == len(lane) lane_pts = [] for index in range(len(lane)): if lane[index] == -2: continue else: ptx = lane[index] pty = h_samples[index] lane_pts.append([ptx, pty]) if not lane_pts: continue if len(lane_pts) <= 3: continue gt_pts.append(lane_pts) label_gt_pts.append(gt_pts) return np.array(label_image_path), np.array(label_gt_pts) def _random_dataset(self): """ :return: """ assert self._label_image_path.shape[0] == self._label_gt_pts.shape[0] random_idx = np.random.permutation(self._label_image_path.shape[0]) self._label_image_path = self._label_image_path[random_idx] self._label_gt_pts = self._label_gt_pts[random_idx] def next_batch(self, batch_size): """ :param batch_size: :return: """ assert self._label_gt_pts.shape[0] == self._label_image_path.shape[0] idx_start = batch_size * self._next_batch_loop_count idx_end = batch_size * self._next_batch_loop_count + batch_size if idx_end > self._label_image_path.shape[0]: self._random_dataset() self._next_batch_loop_count = 0 return self.next_batch(batch_size) else: gt_img_list = self._label_image_path[idx_start:idx_end] gt_pts_list = self._label_gt_pts[idx_start:idx_end] gt_imgs = [] for gt_img_path in gt_img_list: gt_imgs.append(cv2.imread(gt_img_path, cv2.IMREAD_COLOR)) self._next_batch_loop_count += 1 return gt_imgs, gt_pts_list if __name__ == '__main__': import glob json_file_list = glob.glob('{:s}/*.json'.format('/home/baidu/DataBase/Semantic_Segmentation/' 'TUSimple_Lane_Detection/training')) json_file_list = [tmp for tmp in json_file_list if 'test' not in tmp] val = DataSet(json_file_list) a1, a2 = val.next_batch(1) print(a1) print(a2) src_image = cv2.imread(a1[0], cv2.IMREAD_COLOR) image = np.zeros(shape=[src_image.shape[0], src_image.shape[1]], dtype=np.uint8) for pt in a2[0]: ptx = pt[0] pty = pt[1] image[pty, ptx] = 255 import matplotlib.pyplot as plt plt.imshow(image, cmap='gray') plt.show()
4,575
1,535
####################################################################### # Copyright (C) 2017 Shangtong Zhang(zhangshangtong.cpp@gmail.com) # # Permission given to modify the code as long as you keep this # # declaration at the top # ####################################################################### class ConstantSchedule: def __init__(self, val): self.val = val def __call__(self, steps=1): return self.val class LinearSchedule: def __init__(self, start, end=None, steps=None): if end is None: end = start steps = 1 self.inc = (end - start) / float(steps) self.current = start self.end = end if end > start: self.bound = min else: self.bound = max def __call__(self, steps=1): val = self.current self.current = self.bound(self.current + self.inc * steps, self.end) return val
994
265
import requests import bs4 dateList = [] higlist = [] lowlist= [] r = requests.get( 'https://coinmarketcap.com/currencies/bitcoin/historical-data/') soup = bs4.BeautifulSoup(r.text, "lxml") tr = soup.find_all('tr',{'class':'text-right'}) for item in tr: dateList.append(item.find('td', {'class':'text-left'}).text)
327
130
import os from colorama import Fore, Style from pathlib import Path DIF = "dif10" ECHO10 = "echo10" UMM_JSON = "umm-json" ROOT_DIR = ( # go up one directory Path(__file__).resolve().parents[1] ) SCHEMAS_BASE_PATH = f"{ROOT_DIR}/schemas" SCHEMAS = { "json": [ "checks", "check_messages", "check_messages_override", "checks_override", "rule_mapping", "rules_override", UMM_JSON ], "csv": [ "granuledataformat", "instruments", "locations", "projects", "providers", "platforms", "sciencekeywords", "rucontenttype" ], "xsd": [ f"{DIF}_xml", f"{ECHO10}_xml" ], "xml": [ "catalog" ] } SCHEMA_PATHS = { schema: f"{SCHEMAS_BASE_PATH}/{schema}.{filetype}" for filetype, schemas in SCHEMAS.items() for schema in schemas } VERSION_FILE = f"{SCHEMAS_BASE_PATH}/version.txt" COLOR = { "title": Fore.GREEN, "info": Fore.BLUE, "error": Fore.RED, "warning": Fore.YELLOW, "reset": Style.RESET_ALL, "bright": Style.BRIGHT } GCMD_BASIC_URL = "https://gcmdservices.gsfc.nasa.gov/kms/concepts/concept_scheme/" GCMD_KEYWORDS = [ "granuledataformat", "instruments", "locations", "platforms", "projects", "providers", "rucontenttype", "sciencekeywords" ] GCMD_LINKS = { keyword: f"{GCMD_BASIC_URL}{keyword}?format=csv" for keyword in GCMD_KEYWORDS } CMR_URL = "https://cmr.earthdata.nasa.gov"
1,517
595
""" Author: Narinder Singh Project: Cilia Segmentation Date: 27 Feb 2019 Course: CSCI 8360 @ UGA Semester: Spring 2019 Module: Utilities.py Description: This module contains methods and classes that make life easier. """ import os import sys import numpy as np import matplotlib.pyplot as matplot from scipy.misc import imsave from PIL import Image from Config import * MASKS_PATH = os.path.join(DATA_FILES_PATH, "masks/") LIT_MASKS_PATH = os.path.join(MASKS_PATH, "lit/") FRAMES_PATH = os.path.join(DATA_FILES_PATH, "data/frames") # Stretching constant for masks to scale the range of grayscales from [0, 2] to [0, 255] MASK_STRETCHING_CONSTANT = 127.5 class UtilitiesError(Exception): pass class BadHashError(UtilitiesError): pass class ProgressBar: """ A handrolled implementation of a progress bar. The bar displays the progress as a ratio like this: (1/360). """ def __init__(self, max = 100, message = "Initiating ....."): """ Initialize the bar with the total number of units (scale). """ self.max = max self.current = 0 print message + '\n' def update(self, add = 1): """ Record progress. """ self.current += add self._clear() self._display() def _display(self): """ Print the completion ratio on the screen. """ print "(" + str(self.current) + "/" + str(self.max) + ")" def _clear(self): """ Erase the old ratio from the console. """ sys.stdout.write("\033[F") sys.stdout.flush() def flen(filename): """ File LENgth computes and returns the number of lines in a file. @filename <string> is path to a file. This is an epensive method to call for the whole file is read to determine the number of lines. returns: <integer> line count """ # Read and count lines. with open(filename, 'r') as infile: return sum((1 for line in infile)) def isImageFile(fpath): """ Returns whether or not the given path or filename is for an image file. The method is crude at the moment and just checks for some popular formats. """ path, fname = os.path.split(fpath) if fname.endswith(("png", "jpeg", "gif", "tiff", "bmp")): return True else: return False def invertMask(mask): """ Inverts a numpy binary mask. """ return mask == False def readMask(hash, binarize=True): """ Reads the mask for the given hash and if binarize flag is set, makes the mask binary (True/False : Cilia/Not-cilia) """ fpath = os.path.join(MASKS_PATH, hash + ".png") if not os.path.isfile(fpath): raise BadHashError("Hash: " + hash + " does not exist OR does not have a mask against it.") img = Image.open(fpath) mat = np.asarray(img, np.int32) mat.setflags(write=1) if binarize: ciliaMask = mat == CILIA_GRAYSCALE backgroundMask = invertMask(ciliaMask) mat[ciliaMask] = True mat[backgroundMask] = False return mat def displayMask(hash, binarize=True): """ Displays the cilia mask against the given hash value. """ mask = readMask(hash, binarize) if binarize: im = Image.fromarray(mask * 255) else: im = Image.fromarray(mask * MASK_STRETCHING_CONSTANT) im.show() def displayHeatMap(mat): """ Dispalys the heat map for the given matrix. """ matplot.imshow(mat, cmap='hot') matplot.show() def readLines(filepath): """ Reads and returns the lines of the given file as a list. """ lines = [] with open(filepath, 'r') as infile: for line in infile: lines.append(line.strip()) return lines def getVideoFramesDirectory(hash): """ Returns the video frames directory for the given hash. """ dir = os.path.join(FRAMES_PATH, hash) if not os.path.isdir(dir): raise BadHashError("No frame directory found against the hash: " + hash) else: return dir def mean(collection): """ Mean for a numeric collection """ return sum(collection) / (len(collection) or 1) def stretchAndSaveMasks(hashes): """ This method stretches the contrast for the masks by rescaling them to 0-255 grayscale making the white regions in the masks cilia cells. """ # Read each mask and hash for hash in hashes: mask = readMask(hash, binarize=False) result = mask * MASK_STRETCHING_CONSTANT imsave(os.path.join(LIT_MASKS_PATH, hash + ".png"), result) if __name__ == '__main__': # Quick testing etc. hashes = readLines(TRAIN_FILE) stretchAndSaveMasks(hashes)
4,294
1,627
# This example requires the 'members' and 'message_content' privileged intents import re import os import discord from discord.ext import commands from embed_messages.SH_Embed import ScribbleHubEmbed from embed_messages.AO3_Embed import ArchiveOfOurOwnEmbed from embed_messages.FF_Embed import FanFictionDotNetEmbed from embed_messages.FL_Embed import FictionDotLiveEmbed from dotenv import load_dotenv load_dotenv() BOT_TOKEN = os.getenv('TOKEN') description = """An example bot to showcase the discord.ext.commands extension module. There are a number of utility commands being showcased here.""" intents = discord.Intents.default() intents.members = True # intents.message_content = True """ This worked perfectly about an hour ago and now it throws the following error: (virtualenv) nonso@HPEnvy:~/Documents/Code/Projects/Summarium$ python3 main.py Traceback (most recent call last): File "main.py", line 25, in <module> intents.message_content = True AttributeError: 'Intents' object has no attribute 'message_content' (virtualenv) nonso@HPEnvy:~/Documents/Code/Projects/Summarium$ So I commented that line out and ran my code again and it worked somehow even though it shouldn't. Putting this comment here incase it causes chaos later on. """ bot = commands.Bot(command_prefix="?", description=description, intents=intents) @bot.event async def on_ready(): print(f"Logged in as {bot.user} (ID: {bot.user.id})") print("____________________________________________") @bot.event async def on_message(message): if message.author.id == bot.user.id: return if message.author.bot: return # Do not reply to other bots # Pulling out all URLs URLs = re.findall( r""" \b((?:https?://)?(?:(?:www\.)?(?:[\da-z\.-]+)\.(?:[a-z]{2,6}) |(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}(?:25[0-5] |2[0-4][0-9]|[01]?[0-9][0-9]?)|(?:(?:[0-9a-fA-F]{1,4}:){7,7} [0-9a-fA-F]{1,4}|(?:[0-9a-fA-F]{1,4}:){1,7}:|(?:[0-9a-fA-F] {1,4}:){1,6}:[0-9a-fA-F]{1,4}|(?:[0-9a-fA-F]{1,4}:){1,5}(?:: [0-9a-fA-F]{1,4}){1,2}|(?:[0-9a-fA-F]{1,4}:){1,4}(?::[0-9a-fA-F] {1,4}){1,3}|(?:[0-9a-fA-F]{1,4}:){1,3}(?::[0-9a-fA-F]{1,4}){1,4} |(?:[0-9a-fA-F]{1,4}:){1,2}(?::[0-9a-fA-F]{1,4}){1,5}|[0-9a-fA-F] {1,4}:(?:(?::[0-9a-fA-F]{1,4}){1,6})|:(?:(?::[0-9a-fA-F]{1,4}){1,7}|:) |fe80:(?::[0-9a-fA-F]{0,4}){0,4}%[0-9a-zA-Z]{1,}|::(?:ffff(?::0{1,4}) {0,1}:){0,1}(?:(?:25[0-5]|(?:2[0-4]|1{0,1}[0-9]){0,1}[0-9])\.){3,3} (?:25[0-5]|(?:2[0-4]|1{0,1}[0-9]){0,1}[0-9])|(?:[0-9a-fA-F]{1,4}:) {1,4}:(?:(?:25[0-5]|(?:2[0-4]|1{0,1}[0-9]){0,1}[0-9])\.){3,3}(?:25 [0-5]|(?:2[0-4]|1{0,1}[0-9]){0,1}[0-9])))(?::[0-9]{1,4}|[1-5][0-9] {4}|6[0-4][0-9]{3}|65[0-4][0-9]{2}|655[0-2][0-9]|6553[0-5])? (?:/[\w\.-]*)*/?)\b """, message.content, re.VERBOSE) for i in URLs: if re.search(r"(^https://www\.scribblehub\.com/(series|read|profile))/\d+", i, re.IGNORECASE): await message.reply(embed=ScribbleHubEmbed(i)) elif re.search(r"^https://archiveofourown\.org/(\bseries\b|\bworks\b|\bcollections\b)/", i, re.IGNORECASE): await message.reply(embed=ArchiveOfOurOwnEmbed(i)) elif re.search(r"^https://(www|m)\.(\bfanfiction\b\.\bnet\b)/s/\d+/\d+/\w*", i, re.IGNORECASE): await message.reply(file=FanFictionDotNetEmbed(i)[0], embed=FanFictionDotNetEmbed(i)[1]) elif re.search(r'^https?://fiction\.live/(?:stories|Sci-fi)/[^\/]+/([0-9a-zA-Z\-]+)/?.*', i, re.IGNORECASE): await message.reply(embed=FictionDotLiveEmbed(i)) bot.run(BOT_TOKEN)
3,467
1,739
""" This module contains utility classes and methods to be used in tests """
76
18
# # ***** BEGIN LICENSE BLOCK ***** # Zimbra Collaboration Suite Server # Copyright (C) 2010, 2012, 2013, 2014, 2015, 2016 Synacor, Inc. # # This program is free software: you can redistribute it and/or modify it under # the terms of the GNU General Public License as published by the Free Software Foundation, # version 2 of the License. # # This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; # without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. # See the GNU General Public License for more details. # You should have received a copy of the GNU General Public License along with this program. # If not, see <https://www.gnu.org/licenses/>. # ***** END LICENSE BLOCK ***** # import conf from com.zimbra.cs.ldap.LdapServerConfig import GenericLdapConfig from com.zimbra.cs.ldap import LdapClient from com.zimbra.cs.ldap import LdapUsage from com.zimbra.cs.ldap import ZAttributes from com.zimbra.cs.ldap import ZLdapContext from com.zimbra.cs.ldap import ZLdapFilter from com.zimbra.cs.ldap import ZLdapFilterFactory from com.zimbra.cs.ldap.ZLdapFilterFactory import FilterId from com.zimbra.cs.ldap import ZSearchControls from com.zimbra.cs.ldap import ZSearchResultEntry; from com.zimbra.cs.ldap import ZMutableEntry from com.zimbra.cs.ldap import ZSearchResultEnumeration from com.zimbra.cs.ldap import ZSearchScope from com.zimbra.cs.ldap.LdapException import LdapSizeLimitExceededException from logmsg import * # (Key, DN, requires_master) keymap = { "ldap_common_loglevel" : ("olcLogLevel", "cn=config", False), "ldap_common_threads" : ("olcThreads", "cn=config", False), "ldap_common_toolthreads" : ("olcToolThreads", "cn=config", False), "ldap_common_require_tls" : ("olcSecurity", "cn=config", False), "ldap_common_writetimeout" : ("olcWriteTimeout", "cn=config", False), "ldap_common_tlsdhparamfile" : ("olcTLSDHParamFile", "cn=config", False), "ldap_common_tlsprotocolmin" : ("olcTLSProtocolMin", "cn=config", False), "ldap_common_tlsciphersuite" : ("olcTLSCipherSuite", "cn=config", False), "ldap_db_maxsize" : ("olcDbMaxsize", "olcDatabase={3}mdb,cn=config", False), "ldap_db_envflags" : ("olcDbEnvFlags", "olcDatabase={3}mdb,cn=config", False), "ldap_db_rtxnsize" : ("olcDbRtxnSize", "olcDatabase={3}mdb,cn=config", False), "ldap_accesslog_maxsize" : ("olcDbMaxsize", "olcDatabase={2}mdb,cn=config", True), "ldap_accesslog_envflags" : ("olcDbEnvFlags", "olcDatabase={2}mdb,cn=config", True), "ldap_overlay_syncprov_checkpoint" : ("olcSpCheckpoint", "olcOverlay={0}syncprov,olcDatabase={3}mdb,cn=config", True), "ldap_overlay_syncprov_sessionlog" : ("olcSpSessionlog", "olcOverlay={0}syncprov,olcDatabase={3}mdb,cn=config", True), "ldap_overlay_accesslog_logpurge" : ("olcAccessLogPurge", "olcOverlay={1}accesslog,olcDatabase={3}mdb,cn=config", True) } class Ldap: master = False mLdapConfig = None @classmethod def initLdap(cls, c = None): if c: cls.cf = c Log.logMsg(5, "Creating ldap context") ldapUrl = "ldapi:///" bindDN = "cn=config" try: cls.mLdapConfig = GenericLdapConfig(ldapUrl, cls.cf.ldap_starttls_required, bindDN, cls.cf.ldap_root_password) except Exception, e: Log.logMsg(1, "LDAP CONFIG FAILURE (%s)" % e) else: cls.cf = conf.Config() @classmethod def modify_attribute(cls, key, value): if cls.cf.ldap_is_master: atbase = "cn=accesslog" atfilter = "(objectClass=*)" atreturn = ['1.1'] zfilter = ZLdapFilterFactory.getInstance().fromFilterString(FilterId.ZMCONFIGD, atfilter) searchControls = ZSearchControls.createSearchControls(ZSearchScope.SEARCH_SCOPE_BASE, ZSearchControls.SIZE_UNLIMITED, atreturn) mLdapContext = LdapClient.getContext(cls.mLdapConfig, LdapUsage.SEARCH) try: ne = mLdapContext.searchDir(atbase, zfilter, searchControls) except: cls.master = False else: cls.master = True Log.logMsg(5, "Ldap config is master") LdapClient.closeContext(mLdapContext) (attr, dn, xform) = Ldap.lookupKey(key) if attr is not None: v = xform % (value,) atreturn = [attr] searchControls = ZSearchControls.createSearchControls(ZSearchScope.SEARCH_SCOPE_BASE, ZSearchControls.SIZE_UNLIMITED, atreturn) mLdapContext = LdapClient.getContext(cls.mLdapConfig, LdapUsage.SEARCH) ne = mLdapContext.searchDir(dn, zfilter, searchControls) entry = ne.next() entryAttrs = entry.getAttributes() origValue = entryAttrs.getAttrString(attr) attrPresent = entryAttrs.hasAttribute(attr) LdapClient.closeContext(mLdapContext) if origValue != v: if attr == "olcSpSessionlog" and not attrPresent: Log.logMsg(4, "olcSpSessionlog attribute is not present and can't replace it") else: Log.logMsg(4, "Setting %s to %s" % (key, v)) mLdapContext = LdapClient.getContext(cls.mLdapConfig, LdapUsage.MOD) mEntry = LdapClient.createMutableEntry() mEntry.setAttr(attr, v) try: mLdapContext.replaceAttributes(dn, mEntry.getAttributes()) LdapClient.closeContext(mLdapContext) except: return 1; @classmethod def lookupKey(cls, key): if key in keymap: (attr, dn, requires_master) = keymap[key] if re.match("ldap_db_", key) and not cls.master: dn = "olcDatabase={2}mdb,cn=config" xform = "%s" if key == "ldap_common_require_tls": xform = "ssf=%s" if requires_master and not cls.master: Log.logMsg(5, "LDAP: Trying to modify key: %s when not a master" % (key,)) return (None, None, None) else: Log.logMsg(5, "Found key %s and dn %s for %s (%s)" % (attr, dn, key, cls.master)) return (attr, dn, xform) else: Log.logMsg(1, "UNKNOWN KEY %s" % (key,)) raise Exception("Key error") Ldap.initLdap()
5,895
2,442
""" Generic publisher for graphana """ import abc import six from decisionengine.framework.modules import Publisher import decisionengine_modules.graphite_client as graphite DEFAULT_GRAPHITE_HOST = 'fermicloud399.fnal.gov' DEFAULT_GRAPHITE_PORT = 2004 DEFAULT_GRAPHITE_CONTEXT = "" @six.add_metaclass(abc.ABCMeta) class GenericPublisher(Publisher.Publisher): def __init__(self, config): self.graphite_host = config.get('graphite_host', DEFAULT_GRAPHITE_HOST) self.graphite_port = config.get('graphite_port', DEFAULT_GRAPHITE_PORT) self.graphite_context_header = config.get( 'graphite_context', DEFAULT_GRAPHITE_CONTEXT) self.publush_to_graphite = config.get('publish_to_graphite') self.output_file = config.get('output_file') @abc.abstractmethod def consumes(self): # this must be implemented by the inherited class return None @abc.abstractmethod # this must be implemented by the inherited class def graphite_context(self, data_block): return None def publish(self, data_block): """ Publish data :type data_block: :obj:`~datablock.DataBlock` :arg data_block: data block """ if not self.consumes(): return data = data_block[self.consumes()[0]] if self.graphite_host and self.publush_to_graphite: end_point = graphite.Graphite( host=self.graphite_host, pickle_port=self.graphite_port) end_point.send_dict(self.graphite_context(data)[0], self.graphite_context( data)[1], debug_print=False, send_data=True) csv_data = data.to_csv(self.output_file, index=False) if not self.output_file: print(csv_data)
1,768
558
# (C) Datadog, Inc. 2020-present # All rights reserved # Licensed under a 3-clause BSD style license (see LICENSE) import pytest from datadog_checks.base import OpenMetricsBaseCheckV2 from datadog_checks.base.constants import ServiceCheck from datadog_checks.dev.testing import requires_py3 from .utils import get_check pytestmark = [requires_py3, pytest.mark.openmetrics, pytest.mark.openmetrics_interface] def test_default_config(aggregator, dd_run_check, mock_http_response): class Check(OpenMetricsBaseCheckV2): __NAMESPACE__ = 'test' def get_default_config(self): return {'metrics': ['.+'], 'rename_labels': {'foo': 'bar'}} mock_http_response( """ # HELP go_memstats_alloc_bytes Number of bytes allocated and still in use. # TYPE go_memstats_alloc_bytes gauge go_memstats_alloc_bytes{foo="baz"} 6.396288e+06 """ ) check = Check('test', {}, [{'openmetrics_endpoint': 'test'}]) dd_run_check(check) aggregator.assert_metric( 'test.go_memstats_alloc_bytes', 6396288, metric_type=aggregator.GAUGE, tags=['endpoint:test', 'bar:baz'] ) aggregator.assert_all_metrics_covered() def test_service_check_dynamic_tags(aggregator, dd_run_check, mock_http_response): mock_http_response( """ # HELP go_memstats_alloc_bytes Number of bytes allocated and still in use. # TYPE go_memstats_alloc_bytes gauge go_memstats_alloc_bytes{foo="baz"} 6.396288e+06 # HELP state Node state # TYPE state gauge state{bar="baz"} 3 """ ) check = get_check( {'metrics': ['.+', {'state': {'type': 'service_check', 'status_map': {'3': 'ok'}}}], 'tags': ['foo:bar']} ) dd_run_check(check) aggregator.assert_metric( 'test.go_memstats_alloc_bytes', 6396288, metric_type=aggregator.GAUGE, tags=['endpoint:test', 'foo:bar', 'foo:baz'], ) aggregator.assert_service_check('test.state', ServiceCheck.OK, tags=['endpoint:test', 'foo:bar']) aggregator.assert_service_check('test.openmetrics.health', ServiceCheck.OK, tags=['endpoint:test', 'foo:bar']) aggregator.assert_all_metrics_covered() assert len(aggregator.service_check_names) == 2 aggregator.reset() check.set_dynamic_tags('baz:foo') dd_run_check(check) aggregator.assert_metric( 'test.go_memstats_alloc_bytes', 6396288, metric_type=aggregator.GAUGE, tags=['endpoint:test', 'foo:bar', 'foo:baz', 'baz:foo'], ) aggregator.assert_service_check('test.state', ServiceCheck.OK, tags=['endpoint:test', 'foo:bar']) aggregator.assert_service_check('test.openmetrics.health', ServiceCheck.OK, tags=['endpoint:test', 'foo:bar']) aggregator.assert_all_metrics_covered() assert len(aggregator.service_check_names) == 2
2,864
999
"""Set of configrations.""" _CONFIGS = { "postgres": { "host": "0.0.0.0", "port": "5432", "user": <USER>, "pwd": <USER>, "db": "postgres", }, "mongodb": { "host": "0.0.0.0", "port": "27017", "user": <USER>, "pwd": <PASS>, }, "clickhouse": { "host": "0.0.0.0", "port": "8123", "user": <USER>, "pwd": <PASS>, "db": "db_live", }, "aws-s3": { "url": <URL>, "login": <USER>, "password": <PASS>, }, }
566
232
# coding: utf-8 from __future__ import unicode_literals from yargy import ( rule, and_, or_, not_, ) from yargy.interpretation import fact from yargy.predicates import ( eq, length_eq, gram, tag, is_single, is_capitalized ) from yargy.predicates.bank import DictionaryPredicate as dictionary from yargy.relations import gnc_relation from natasha.data import load_dict from yargy.rule.transformators import RuleTransformator from yargy.rule.constructors import Rule from yargy.predicates.constructors import AndPredicate Name = fact( 'Name', ['first', 'middle', 'last', 'nick'] ) FIRST_DICT = set(load_dict('first.txt')) MAYBE_FIRST_DICT = set(load_dict('maybe_first.txt')) LAST_DICT = set(load_dict('last.txt')) ########## # # COMPONENTS # ########### IN_FIRST = dictionary(FIRST_DICT) IN_MAYBE_FIRST = dictionary(MAYBE_FIRST_DICT) IN_LAST = dictionary(LAST_DICT) gnc = gnc_relation() ######## # # FIRST # ######## TITLE = is_capitalized() NOUN = gram('NOUN') NAME_CRF = tag('I') ABBR = gram('Abbr') SURN = gram('Surn') NAME = and_( gram('Name'), not_(ABBR) ) PATR = and_( gram('Patr'), not_(ABBR) ) FIRST = and_( NAME_CRF, or_( NAME, IN_MAYBE_FIRST, IN_FIRST ) ).interpretation( Name.first.inflected() ).match(gnc) FIRST_ABBR = and_( ABBR, TITLE ).interpretation( Name.first ).match(gnc) ########## # # LAST # ######### LAST = and_( NAME_CRF, or_( SURN, IN_LAST ) ).interpretation( Name.last.inflected() ).match(gnc) ######## # # MIDDLE # ######### MIDDLE = PATR.interpretation( Name.middle.inflected() ).match(gnc) MIDDLE_ABBR = and_( ABBR, TITLE ).interpretation( Name.middle ).match(gnc) ######### # # FI IF # ######### FIRST_LAST = rule( FIRST, LAST ) LAST_FIRST = rule( LAST, FIRST ) ########### # # ABBR # ########### ABBR_FIRST_LAST = rule( FIRST_ABBR, '.', LAST ) LAST_ABBR_FIRST = rule( LAST, FIRST_ABBR, '.', ) ABBR_FIRST_MIDDLE_LAST = rule( FIRST_ABBR, '.', MIDDLE_ABBR, '.', LAST ) LAST_ABBR_FIRST_MIDDLE = rule( LAST, FIRST_ABBR, '.', MIDDLE_ABBR, '.' ) ############## # # MIDDLE # ############# FIRST_MIDDLE = rule( FIRST, MIDDLE ) FIRST_MIDDLE_LAST = rule( FIRST, MIDDLE, LAST ) LAST_FIRST_MIDDLE = rule( LAST, FIRST, MIDDLE ) ############## # # SINGLE # ############# JUST_FIRST = FIRST JUST_LAST = LAST ######## # # FULL # ######## NAME = or_( FIRST_LAST, LAST_FIRST, ABBR_FIRST_LAST, LAST_ABBR_FIRST, ABBR_FIRST_MIDDLE_LAST, LAST_ABBR_FIRST_MIDDLE, FIRST_MIDDLE, FIRST_MIDDLE_LAST, LAST_FIRST_MIDDLE, JUST_FIRST, JUST_LAST, ).interpretation( Name ) class StripCrfTransformator(RuleTransformator): def visit_term(self, item): if isinstance(item, Rule): return self.visit(item) elif isinstance(item, AndPredicate): predicates = [_ for _ in item.predicates if _ != NAME_CRF] return AndPredicate(predicates) else: return item SIMPLE_NAME = NAME.transform( StripCrfTransformator )
3,256
1,446
#!/usr/bin/env python37 # -*- encoding: utf-8 -*- ''' @File : json2yaml.py @Time : 2020/01/12 16:44:48 @Author : BenzenPenxil @Version : 1.0 @Contact : lunzhipenxil@gmail.com @License : (C)Copyright 2017-2020, Penx.Studio @Desc : None ''' # here put the import lib import json import yaml import codecs import base64 import os import re import tkinter from tkinter import filedialog from tkinter import messagebox from tkinter import ttk import webbrowser import pyperclip from j2y_data import * j2y_version = "1.0.9.20200605.1" project_site = "http://benzenpenxil.xyz/json2yaml-for-dice/" #class type_system_info: # def __init__(self, name): # self.name = name # #system_info = type_system_info(os.name) class type_deck: def __init__(self, name, author, version, command, desc, includes, info, default, import_list): self.name = name self.author = author self.version = version self.command = command self.desc = desc self.includes = includes self.info = info self.default = default self.import_list = import_list deck = type_deck("","","","","",[],"","",[]) #测试用代码 #deck.name = "彩六干员" #deck.author = "仑质" #deck.version = "191230" #deck.command = "彩六干员" #deck.desc = "抽取彩六干员" #deck.includes = ["干员档案","干员性别"] #deck.info = "牌堆转换器测试用" #deck.default = "干员档案" input_file_name = "" output_file_name = "" output_file_name += deck.command giveback_flag = 0 versiontran_flag = 0 tabtran_flag = 0 infoadd_flag = 1 import_flag = 1 dict_import_default = {"性别": ["男", "女", "不明"]} dict_import_default.update(dict_from_shiki) dict_for_import = {} dict_for_import.update(dict_import_default) #dict_for_import.update({"测试": ["测试"]}) list_for_import_record = [] def filter_emoji(desstr, restr="[EMOJI]"): try: co = re.compile(u'[\U00010000-\U0010ffff]') except re.error: co = re.compile(u'[\uD800-\uDBFF][\uDC00-\uDFFF]') return co.sub(restr, desstr) def item_get_import_list(dict_this): output_list = [] for key_this in list(dict_this.keys()): for item_this in dict_this.get(key_this): for in_item_key_this in re.finditer("\{%{0,1}(.*?)\}", item_this): in_item_key_this_str = in_item_key_this.group().lstrip("{").lstrip("%").rstrip("}") if in_item_key_this_str.find("{") < 0 and in_item_key_this_str.find("}") < 0: if in_item_key_this_str in list(dict_this.keys()): pass else: if in_item_key_this_str in output_list: pass else: output_list.append(in_item_key_this_str) return output_list def str_get_import_list(str_this, dict_this): global list_for_import_record output_list = [] for in_item_key_this in re.finditer("\{%{0,1}(.*?)\}", str_this): in_item_key_this_str = in_item_key_this.group().lstrip("{").lstrip("%").rstrip("}") if in_item_key_this_str not in list_for_import_record: if in_item_key_this_str.find("{") < 0 and in_item_key_this_str.find("}") < 0: if in_item_key_this_str in list(dict_this.keys()): pass else: if in_item_key_this_str in output_list: pass else: output_list.append(in_item_key_this_str) list_for_import_record.append(in_item_key_this_str) return output_list def item_tran(item_this, flag): dice_flag = 0 item_this_new="" for i in range(0, len(item_this)): if item_this[i] == "{": if i + 1 <= len(item_this): if item_this[i + 1] != "%": if flag == 0: item_this_new += "{%" else: item_this_new += "{$" else: item_this_new += "{$" elif item_this[i] == "%": if i - 1 >= 0: if item_this[i - 1] == "{": pass else: item_this_new += item_this[i] else: item_this_new += item_this[i] elif item_this[i] == "\n": item_this_new += "\\n" elif item_this[i] == ",": item_this_new += "," elif item_this[i] == "D": if dice_flag == 0: item_this_new += item_this[i] else: item_this_new += "d" elif item_this[i] == "[": dice_flag += 1 item_this_new += item_this[i] elif item_this[i] == "]": dice_flag -= 1 item_this_new += item_this[i] else: item_this_new += item_this[i] return item_this_new def item_tran2show(item_this): item_this_new = "" for i in range(0, len(item_this)): if item_this[i] == "\n": item_this_new += "\\n" elif item_this[i] == ",": item_this_new += "," else: item_this_new += item_this[i] item_this_new = filter_emoji(item_this_new) return item_this_new def add_import_work(dict_need_import, dict_for_import, output_str, dict_this): global tabtran_flag global giveback_flag global list_for_import_record dict_need_import_next = [] for key_this in dict_need_import: if key_this in list(dict_for_import.keys()): output_str += key_this + ":\n" if tabtran_flag != 0: output_tran_flag_tmp = 1 for item_this in dict_for_import.get(key_this): dict_need_import_next += str_get_import_list(item_this, dict_this) item_this = item_tran(item_this, giveback_flag) if output_tran_flag_tmp == 0: output_str += " - \"" + item_this + "\"\n" else: output_tran_flag_tmp = 0 output_str += " - \" " + item_this + "\"\n" if dict_need_import_next != []: output_str = add_import_work(dict_need_import_next, dict_for_import, output_str, dict_this) return output_str def json2yaml_work(): global giveback_flag global versiontran_flag global infoadd_flag global import_flag global deck global input_file_name global output_file_name global root global progress_obj global list_for_import_record progress_obj["value"] = 0 root.update() output_str = "#必要信息\nname: " + deck.name output_str += "\nauthor: " + deck.author output_str += "(使用Json2Yaml转换生成)" output_str += "\nversion: " if versiontran_flag == 0: output_str += deck.version else: output_str += deck.version.replace(".", "_") output_str += "\ncommand: " + deck.command output_str += "\ndesc: " + deck.desc if deck.includes != [""]: deck.includes_str = "" for str_now in deck.includes: deck.includes_str += " - \"" + str_now + "\"\n" output_str += "\nincludes:\n" + deck.includes_str else: output_str += "\n" if infoadd_flag != 0: output_str += "\n#作者信息\ninfo:\n - \"" + "本牌堆使用Json2Yaml(By BenzenPenxil)自动转换生成\\n转换器版本号:" + j2y_version + "\\n牌堆原作者:" + deck.author + "\"\n" output_str += "\n#牌堆部分\n" progress_obj["value"] = 5 root.update() try: with open(input_file_name,"r",encoding="utf-8") as input_file_obj: input_str = input_file_obj.read() if input_str.startswith(codecs.BOM_UTF8.decode("UTF-8")): input_dict = json.loads(input_str[1:], encoding="utf-8") else: input_dict = json.loads(input_str, encoding="utf-8") progress_obj["value"] = 10 root.update() except json.decoder.JSONDecodeError as error_info: input_file_name = "" file_name_str.set(file_name_head + "请确保文件的Json格式没有错误") tkinter.messagebox.showerror("json.decoder.JSONDecodeError", error_info) progress_obj["value"] = 0 root.update() except UnicodeDecodeError as error_info: input_file_name = "" file_name_str.set(file_name_head + "请确保文件编码格式是UTF-8") tkinter.messagebox.showerror("UnicodeDecodeError", error_info) progress_obj["value"] = 0 root.update() else: output_tran_flag_tmp = 0 if deck.default in input_dict: output_str += "default:\n" if tabtran_flag != 0: output_tran_flag_tmp = 1 for item_this in input_dict.get(deck.default): item_this = item_tran(item_this, giveback_flag) if output_tran_flag_tmp == 0: output_str += " - \"" + item_this + "\"\n" else: output_tran_flag_tmp = 0 output_str += " - \" " + item_this + "\"\n" progress_obj["value"] = 15 root.update() count_work = 0 for key_this in list(input_dict.keys()): count_work += len(input_dict.get(key_this)) + 1 if count_work <= 75: count_step = 1 else: count_step = int(count_work / 75) id_count_all = 0 for key_this in list(input_dict.keys()): output_str += key_this + ":\n" id_count_all += 1 progress_obj["value"] = int(id_count_all * 75 / count_work + 15) root.update() if tabtran_flag != 0: output_tran_flag_tmp = 1 for item_this in input_dict.get(key_this): item_this = item_tran(item_this, giveback_flag) if output_tran_flag_tmp == 0: output_str += " - \"" + item_this + "\"\n" else: output_tran_flag_tmp = 0 output_str += " - \" " + item_this + "\"\n" id_count_all += 1 if id_count_all % (count_step) == 0: progress_obj["value"] = int(id_count_all * 75 / count_work + 15) root.update() if import_flag != 0: list_for_import_record = [] dict_need_import_next = [] deck.import_list = item_get_import_list(input_dict) deck_import_list = deck.import_list.copy() for key_this in deck_import_list: if key_this in list(dict_for_import.keys()): deck.import_list.remove(key_this) output_str += key_this + ":\n" if tabtran_flag != 0: output_tran_flag_tmp = 1 for item_this in dict_for_import.get(key_this): dict_need_import_next += str_get_import_list(item_this, input_dict) item_this = item_tran(item_this, giveback_flag) if output_tran_flag_tmp == 0: output_str += " - \"" + item_this + "\"\n" else: output_tran_flag_tmp = 0 output_str += " - \" " + item_this + "\"\n" if dict_need_import_next != []: output_str = add_import_work(dict_need_import_next, dict_for_import, output_str, input_dict) if len(deck.import_list) != 0: tkinter.messagebox.showwarning("未解决的依赖项", "以下依赖项未找到:\n - " + "\n - ".join(deck.import_list) + "\n将会导致抽取时无法正常调用。") with open(output_file_name, "w", encoding="utf-8") as output_file_obj: output_file_obj.write(output_str) progress_obj["value"] = 100 root.update() def select_file(): global file_name_str global tree global input_file_name global root global progress_obj progress_obj["value"] = 0 root.update() file_name = tkinter.filedialog.askopenfilenames(title="请选择Json文件", filetypes=[("Json", "*.json"), ("All Files", "*")]) progress_obj["value"] = 5 root.update() if len(file_name) == 1: for file_name_now in file_name: try: with open(file_name_now,"r",encoding="utf-8") as input_file_obj: input_str = input_file_obj.read() if input_str.startswith(codecs.BOM_UTF8.decode("UTF-8")): input_dict = json.loads(input_str[1:], encoding="utf-8") else: input_dict = json.loads(input_str, encoding="utf-8") progress_obj["value"] = 25 root.update() except json.decoder.JSONDecodeError as error_info: input_file_name = "" file_name_str.set(file_name_head + "请确保文件的Json格式没有错误") tkinter.messagebox.showerror("json.decoder.JSONDecodeError", error_info) progress_obj["value"] = 0 root.update() except UnicodeDecodeError as error_info: input_file_name = "" file_name_str.set(file_name_head + "请确保文件编码格式是UTF-8") tkinter.messagebox.showerror("UnicodeDecodeError", error_info) progress_obj["value"] = 0 root.update() else: count_work = 0 for key_this in list(input_dict.keys()): count_work += len(input_dict.get(key_this)) + 1 input_file_name = file_name_now file_name_str.set(file_name_head + "\"" + file_name_now + "\"") if len(tree.get_children()) != 0: for tree_children_now in tree.get_children(): tree.delete(tree_children_now) if count_work <= 75: count_step = 1 else: count_step = int(count_work / 75) tree_id_0_count = 0 tree_id_all_count = 0 for key_this in list(input_dict.keys()): tree_id_0 = tree.insert("", tree_id_0_count, key_this + "#" + str(tree_id_all_count), text=key_this, value=str(tree_id_all_count)) tree_id_all_count += 1 tree_id_1_count = 0 progress_obj["value"] = int(tree_id_all_count * 75 / count_work + 25) root.update() for item_this in input_dict.get(key_this): item_this = item_tran2show(item_this) tree_id_1 = tree.insert(tree_id_0, tree_id_1_count, item_this + "#" + str(tree_id_all_count), text=item_this, value=str(tree_id_all_count)) tree_id_all_count += 1 tree_id_1_count += 1 if tree_id_all_count % (count_step) == 0: progress_obj["value"] = int(tree_id_all_count * 75 / count_work + 25) root.update() #print(tree_id_all_count) tree_id_0_count += 1 if import_flag != 0: import_list_tmp = item_get_import_list(input_dict) #print("|".join(import_list_tmp)) if len(import_list_tmp) != 0: tkinter.messagebox.showwarning("存在依赖项", "扫描中发现以下被引用项不包含于导入文件中:\n - " + "\n - ".join(import_list_tmp) + "\n请确保可以提供这些依赖项。" ) progress_obj["value"] = 100 root.update() #print(tree_id_all_count) #print(tree.get_children()) elif len(file_name) == 0: if len(input_file_name) == 0: file_name_str.set(file_name_head + "请选择文件!") tkinter.messagebox.showwarning("警告", "请选择文件!") progress_obj["value"] = 0 root.update() else: if len(input_file_name) == 0: file_name_str.set(file_name_head + "请一次只选择一个文件!") tkinter.messagebox.showwarning("警告", "请一次只选择一个文件!") progress_obj["value"] = 0 root.update() def tran_save(): global t1 global t2 global t3 global t4 global t5 global t6 global deck global input_file_name global output_file_name global root #file_name = tkinter.filedialog.asksaveasfilename(initialdir = "./test") if len(input_file_name) != 0: file_path = tkinter.filedialog.askdirectory(title="选择保存路径") if len(file_path) != 0: #print(file_path) try: deck.name = t1.get() deck.author = t2.get() deck.version = t3.get() deck.command = t1.get() deck.desc = t4.get() deck.includes = t5.get().split(",") deck.default = t6.get() except UnicodeDecodeError as error_info: tkinter.messagebox.showerror("UnicodeDecodeError", error_info) progress_obj["value"] = 0 root.update() else: output_file_name = file_path + "/" + deck.name #print(deck.includes) json2yaml_work() tkinter.messagebox.showinfo("完成","已保存到 " + output_file_name) else: tkinter.messagebox.showwarning("警告", "请选择保存路径!") progress_obj["value"] = 0 root.update() else: tkinter.messagebox.showwarning("警告", "请先选择要转换的Json文件!") progress_obj["value"] = 0 root.update() def load_import(): global dict_for_import global dict_import_default input_file_name = "" progress_obj["value"] = 0 root.update() file_name = tkinter.filedialog.askopenfilenames(title="请选择Json文件", filetypes=[("Json", "*.json"), ("All Files", "*")]) progress_obj["value"] = 5 root.update() if len(file_name) > 0: count_error = 0 for file_name_now in file_name: try: with open(file_name_now,"r",encoding="utf-8") as input_file_obj: input_str = input_file_obj.read() if input_str.startswith(codecs.BOM_UTF8.decode("UTF-8")): input_dict = json.loads(input_str[1:], encoding="utf-8") else: input_dict = json.loads(input_str, encoding="utf-8") progress_obj["value"] = 25 root.update() except json.decoder.JSONDecodeError as error_info: input_file_name = "" file_name_str.set(file_name_head + "请确保文件的Json格式没有错误") #tkinter.messagebox.showerror("json.decoder.JSONDecodeError", error_info) count_error += 1 progress_obj["value"] = 0 root.update() except UnicodeDecodeError as error_info: input_file_name = "" file_name_str.set(file_name_head + "请确保文件编码格式是UTF-8") #tkinter.messagebox.showerror("UnicodeDecodeError", error_info) count_error += 1 progress_obj["value"] = 0 root.update() else: dict_for_import.update(**input_dict) dict_for_import.update(**dict_import_default) #dict_for_import = {**dict_for_import, **input_dict} #print(len(dict_for_import)) progress_obj["value"] = 100 root.update() tkinter.messagebox.showinfo("依赖项已更新", "当前已载入" + str(len(dict_for_import)) + "个备用依赖项\n尝试载入" + str(len(file_name)) + "个文件\n其中共有" + str(count_error) + "个加载失败") elif len(file_name) == 0: if len(input_file_name) == 0: file_name_str.set(file_name_head + "请选择文件!") tkinter.messagebox.showwarning("警告", "请选择文件!") progress_obj["value"] = 0 root.update() else: if len(input_file_name) == 0: file_name_str.set(file_name_head + "请一次只选择一个文件!") tkinter.messagebox.showwarning("警告", "请一次只选择一个文件!") progress_obj["value"] = 0 root.update() def clear_conf(): global t1 global t2 global t3 global t4 global t5 global t6 t1.set("") t2.set("") t3.set("") t4.set("") t5.set("") t6.set("") def giveback_switch(): global giveback_flag global root global set_menu if giveback_flag == 0: giveback_flag = 1 set_menu.entryconfig(1, label="忽略不放回[√]") else: giveback_flag = 0 set_menu.entryconfig(1, label="忽略不放回[×]") #print(str(giveback_flag)) def versiontran_switch(): global versiontran_flag global root global set_menu if versiontran_flag == 0: versiontran_flag = 1 set_menu.entryconfig(2, label="版本号优化[√]") else: versiontran_flag = 0 set_menu.entryconfig(2, label="版本号优化[×]") def tabtran_switch(): global tabtran_flag global root global set_menu if tabtran_flag == 0: tabtran_flag = 1 set_menu.entryconfig(3, label="排版格式优化[√]") else: tabtran_flag = 0 set_menu.entryconfig(3, label="排版格式优化[×]") def infoadd_switch(): global infoadd_flag global root global set_menu if infoadd_flag == 0: infoadd_flag = 1 set_menu.entryconfig(4, label="附加Info项[√]") else: infoadd_flag = 0 set_menu.entryconfig(4, label="附加Info项[×]") def import_switch(): global import_flag global root global set_menu if import_flag == 0: import_flag = 1 set_menu.entryconfig(5, label="尝试解决依赖项[√]") else: import_flag = 0 set_menu.entryconfig(5, label="尝试解决依赖项[×]") def show_info(): tkinter.messagebox.showinfo("Json2Yaml By BenzenPenxil","Json2Yaml基于Python\n\n项目主页:\n" + project_site + "\n\n作者:仑质(BenzenPenxil)\n版本:" + j2y_version + "\n有问题请联系QQ:137334701") def show_project_site(): tkinter.messagebox.showinfo("提示", "将通过浏览器访问 " + project_site) try: webbrowser.open(project_site) except webbrowser.Error as error_info: tkinter.messagebox.showerror("webbrowser.Error", error_info) def tree_copy(obj, event=None): length_select = len(obj.selection()) if length_select != 0: str_select = obj.selection()[0] str_select_len = len(str_select) for i in range(1, str_select_len): if str_select[-i] == "#": pyperclip.copy(str_select[-str_select_len: - i]) break def tree_set_name(obj, event=None): global t1 length_select = len(obj.selection()) if length_select != 0: str_select = obj.selection()[0] str_select_len = len(str_select) for i in range(1, str_select_len): if str_select[-i] == "#": t1.set(str_select[-str_select_len: - i]) break def tree_add_includes(obj, event=None): global t5 length_select = len(obj.selection()) if length_select != 0: str_select = obj.selection()[0] str_select_len = len(str_select) for i in range(1, str_select_len): if str_select[-i] == "#": if t5.get().split(",") != [""]: if str_select[-str_select_len: - i] in t5.get().split(","): pass else: tmp_t5_str = t5.get().split(",") tmp_t5_str.append(str_select[-str_select_len: - i]) t5.set(",".join(tmp_t5_str)) else: t5.set(str_select[-str_select_len: - i]) break def tree_set_default(obj, event=None): global t5 global t6 length_select = len(obj.selection()) if length_select != 0: str_select = obj.selection()[0] str_select_len = len(str_select) for i in range(1, str_select_len): if str_select[-i] == "#": t6.set(str_select[-str_select_len: - i]) if t5.get().split(",") != [""]: if "default" in t5.get().split(","): pass else: tmp_t5_str = t5.get().split(",") tmp_t5_str.append("default") t5.set(",".join(tmp_t5_str)) else: t5.set("default") break def tree_rightKey(event, obj): tree_rightkey_menu.delete(0, tkinter.END) tree_rightkey_menu.add_command(label="复制", command=lambda: tree_copy(obj, event)) tree_rightkey_menu.add_command(label="设为名称/指令", command=lambda: tree_set_name(obj, event)) tree_rightkey_menu.add_command(label="加入子指令", command=lambda: tree_add_includes(obj, event)) tree_rightkey_menu.add_command(label="设为default", command=lambda: tree_set_default(obj, event)) tree_rightkey_menu.post(event.x_root, event.y_root) def entry_cut(editor, event=None): editor.event_generate("<<Cut>>") def entry_copy(editor, event=None): editor.event_generate("<<Copy>>") def entry_paste(editor, event=None): editor.event_generate('<<Paste>>') def entry_clear(editor, event=None): global root root.globalsetvar(editor["textvariable"], "") def entry_rightKey(event, editor): entry_rightkey_menu.delete(0, tkinter.END) entry_rightkey_menu.add_command(label='剪切', command=lambda: entry_cut(editor)) entry_rightkey_menu.add_command(label='复制', command=lambda: entry_copy(editor)) entry_rightkey_menu.add_command(label='粘贴', command=lambda: entry_paste(editor)) entry_rightkey_menu.add_command(label='清空', command=lambda: entry_clear(editor)) entry_rightkey_menu.post(event.x_root,event.y_root) if __name__ == "__main__": root = tkinter.Tk() root.title("Json2Yaml By BenzenPenxil") root.geometry("560x624") root.resizable(width=False, height=False) file_name_head = "Json文件路径:" file_name_str = tkinter.StringVar() file_name_str.set(file_name_head + "请选择文件!") L1 = tkinter.Label(root, textvariable = file_name_str) L1.place(x=0, y=0, width=560,height=32) entry_rightkey_menu = tkinter.Menu(root,tearoff=False) EtL1 = tkinter.Label(root, text = "名称/指令") EtL1.place(x=0, y=432, width=60,height=32) t1 = tkinter.StringVar() t1.set("名称与指令") Et1 = tkinter.Entry(root, textvariable=t1) Et1.place(x=60, y=432, width=500, height=32) Et1.bind("<Button-3>", lambda x: entry_rightKey(x, Et1)) EtL2 = tkinter.Label(root, text = "作者") EtL2.place(x=0, y=464, width=60,height=32) t2 = tkinter.StringVar() t2.set("作者") Et2 = tkinter.Entry(root, textvariable=t2) Et2.place(x=60, y=464, width=500, height=32) Et2.bind("<Button-3>", lambda x: entry_rightKey(x, Et2)) EtL3 = tkinter.Label(root, text = "版本") EtL3.place(x=0, y=496, width=60,height=32) t3 = tkinter.StringVar() t3.set("版本") Et3 = tkinter.Entry(root, textvariable=t3) Et3.place(x=60, y=496, width=500, height=32) Et3.bind("<Button-3>", lambda x: entry_rightKey(x, Et3)) EtL4 = tkinter.Label(root, text = "描述") EtL4.place(x=0, y=528, width=60,height=32) t4 = tkinter.StringVar() t4.set("描述") Et4 = tkinter.Entry(root, textvariable=t4) Et4.place(x=60, y=528, width=500, height=32) Et4.bind("<Button-3>", lambda x: entry_rightKey(x, Et4)) EtL5 = tkinter.Label(root, text = "子指令") EtL5.place(x=0, y=560, width=60,height=32) t5 = tkinter.StringVar() t5.set("子指令") Et5 = tkinter.Entry(root, textvariable=t5) Et5.place(x=60, y=560, width=500, height=32) Et5.bind("<Button-3>", lambda x: entry_rightKey(x, Et5)) EtL6 = tkinter.Label(root, text = "Default") EtL6.place(x=0, y=592, width=60,height=32) t6 = tkinter.StringVar() t6.set("Default") Et6 = tkinter.Entry(root, textvariable=t6) Et6.place(x=60, y=592, width=500, height=32) Et6.bind("<Button-3>", lambda x: entry_rightKey(x, Et6)) #Btn1 = tkinter.Button(root, text = "选择文件", command = select_file) #Btn1.place(x=500, y=0, width=60, height=32) #Btn2 = tkinter.Button(root, text="开始转换", command = tran_save) #Btn2.place(x=500, y=592, width=60, height=32) #Btn3 = tkinter.Button(root, text="i", command = show_info) #Btn3.place(x=528, y=432, width=32, height=32) menu_bar = tkinter.Menu(root) file_menu = tkinter.Menu(menu_bar, tearoff=0) set_menu = tkinter.Menu(menu_bar, tearoff=0) info_menu = tkinter.Menu(menu_bar, tearoff=0) menu_bar.add_cascade(label="文件", menu=file_menu) menu_bar.add_cascade(label="操作", menu=set_menu) menu_bar.add_cascade(label="关于", menu=info_menu) file_menu.add_command(label="导入文件", command=select_file) file_menu.add_command(label="开始转换", command=tran_save) file_menu.add_command(label="加载依赖项", command=load_import) set_menu.add_command(label="清空所有设置栏", command=clear_conf) set_menu.add_command(label="忽略不放回[×]", command=giveback_switch) set_menu.add_command(label="版本号优化[√]", command=versiontran_switch) set_menu.add_command(label="排版格式优化[×]", command=tabtran_switch) set_menu.add_command(label="附加Info项[√]", command=infoadd_switch) set_menu.add_command(label="尝试解决依赖项[√]", command=import_switch) info_menu.add_command(label="关于", command=show_info) info_menu.add_command(label="查看项目", command=show_project_site) root.config(menu=menu_bar) if giveback_flag != 0: set_menu.entryconfig(1, label="忽略不放回[√]") else: set_menu.entryconfig(1, label="忽略不放回[×]") if versiontran_flag != 0: set_menu.entryconfig(2, label="版本号优化[√]") else: set_menu.entryconfig(2, label="版本号优化[×]") if tabtran_flag != 0: set_menu.entryconfig(3, label="排版格式优化[√]") else: set_menu.entryconfig(3, label="排版格式优化[×]") if infoadd_flag != 0: set_menu.entryconfig(4, label="附加Info项[√]") else: set_menu.entryconfig(4, label="附加Info项[×]") if import_flag != 0: set_menu.entryconfig(5, label="尝试解决依赖项[√]") else: set_menu.entryconfig(5, label="尝试解决依赖项[×]") t1.set("填入牌堆名,这同时也将是该牌堆的对应指令") t2.set("填入作者") t3.set("填入版本号") t4.set("填入对于该牌堆的描述") t5.set(",".join(["填入子指令并用半角逗号隔开"])) t6.set("要设置子指令缺省时的调用项请设置此项") #测试用 #t1.set(deck.name) #t2.set(deck.author) #t3.set(deck.version) #t4.set(deck.desc) #t5.set(",".join(deck.includes)) #t6.set(deck.default) tree = ttk.Treeview(root) tree.place(x=0, y=32, width=545, height=401) tree_rightkey_menu = tkinter.Menu(root,tearoff=False) tree.bind("<Button-3>", lambda x: tree_rightKey(x, tree)) progress_obj = ttk.Progressbar(root, orient="horizontal", length=560, mode="determinate") progress_obj.place(x=0, y=32, width=560, height=25) progress_obj["maximum"] = 100 progress_obj["value"] = 0 tree.columnconfigure(0, weight=1) tree_yscroll = ttk.Scrollbar(root, orient="vertical", command=tree.yview) tree_yscroll.place(x=544, y=57, width=16, height=375) tree.configure(yscrollcommand=tree_yscroll.set) with open("tmp.ico", "wb+") as tmp: tmp.write(base64.b64decode(favicon_ico)) root.iconbitmap("tmp.ico") os.remove("tmp.ico") root.mainloop() #json2yaml_work()
31,144
11,474
import logging import os import math from metagen.helpers.exceptions import ValidationError logger = logging.getLogger(__name__) cli_log = logging.getLogger("metagen.cli") def key_len(value, type_="ApiKey"): """Ensure an API Key or ID has valid length.""" if value is not None and len(value) < 36: l = len(value) raise ValidationError("{} must be 36 characters long, not {}".format(type_.upper(), str(l))) else: return value def collapse_path(path): """Convert a path back to ~/ from expanduser().""" home_dir = os.path.expanduser("~") abs_path = os.path.abspath(path) return abs_path.replace(home_dir, "~") def is_file(string): if os.path.exists(string): return True else: return False def convert_size(size): if (size == 0): return '0B' size_name = ("B", "KB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB") index = int(math.floor(math.log(size, 1024))) p = math.pow(1024, index) s = round(size/p, 2) return '{}{}'.format(s, size_name[index])
1,058
383
from behave import * from hamcrest import * import subprocess import shlex import os import tempfile import utils @given('a local copy of the repo on the {branch} branch') def step_impl(context, branch): context.mock_developer_dir = tempfile.mkdtemp(prefix='kevlar') utils.shell_command('git -C {0} clone -q file:///{1} . -b {2}'.format(context.mock_developer_dir, context.mock_github_dir, branch)) utils.shell_command('git -C {0} checkout -q {1}'.format(context.mock_developer_dir, branch)) utils.shell_command('git -C {0} config --local user.signingkey 794267AC'.format(context.mock_developer_dir)) utils.shell_command('git -C {0} config --local user.name "Local Test"'.format(context.mock_developer_dir)) utils.shell_command('git -C {0} config --local user.email "donut-reply@polysync.io"'.format(context.mock_developer_dir)) utils.shell_command('git -C {0} config --local gpg.program gpg2'.format(context.mock_developer_dir)) @given('I create a new {branch} branch') def step_impl(context, branch): command = 'git -C {0} checkout -b {1}'.format(context.mock_developer_dir, branch) utils.run_with_project_in_path(command, context) @given('the {release_tag} release tag already exists') def step_impl(context, release_tag): command = 'git -C {0} tag -s {1} -m {1}'.format(context.mock_github_dir, release_tag) utils.run_with_project_in_path(command, context) @given('the GPG signing key is not available') def step_impl(context): command = 'git -C {0} config --local user.signingkey 00000000'.format(context.mock_developer_dir) utils.run_with_project_in_path(command, context) @given('I have done some work on the repo') def step_impl(context): utils.shell_command('cp -a {0}/features/test_file.txt {1}/test_file.txt'.format(os.getcwd(), context.mock_developer_dir)) @given('the project contains subdirectory {directory}') def step_impl(context, directory): wd = '{0}/{1}'.format(context.mock_developer_dir, directory) utils.run_with_project_in_path('mkdir {0}/{1}'.format(context.mock_developer_dir, directory), context) context.wd = wd @given('the {branch} branch contains unsigned commits') def step_impl(context, branch): utils.run_with_project_in_path('git -C {0} commit --allow-empty --no-gpg-sign -m "creating an unsigned commit"'.format(context.mock_developer_dir), context) @given('the {tag} tag is unsigned') def step_impl(context, tag): utils.run_with_project_in_path('git -C {0} tag -a {1} -m {1}'.format(context.mock_developer_dir, tag), context) @given('the {tag} tag contains unsigned commits') def step_impl(context, tag): utils.run_with_project_in_path('git -C {0} commit --allow-empty --no-gpg-sign -m "creating an unsigned commit"'.format(context.mock_developer_dir), context) utils.run_with_project_in_path('git -C {0} tag -s {1} -m {1}'.format(context.mock_developer_dir, tag), context) @when('the {command} command is run with the -h flag') def step_impl(context, command): cmd = 'git -C {0} {1} -h'.format(context.mock_developer_dir, command) context.out, context.err, context.rc = utils.run_with_project_in_path(cmd, context) @when('I run git {action} from the {directory} directory') def step_impl(context, action, directory): command = 'git -C {0} {1}'.format(context.wd, action) context.out, context.err, context.rc = utils.run_with_project_in_path(command, context) @when('I run git-{action}') def step_impl(context, action): command = 'git -C {0} {1}'.format(context.mock_developer_dir, action) context.out, context.err, context.rc = utils.run_with_project_in_path(command, context) @then('the script should return {exit_code}') def step_impl(context, exit_code): assert_that(context.rc, equal_to(int(exit_code))) @then('the merge commit should be signed') def step_impl(context): command = "git -C {0} verify-commit {1}".format(context.mock_github_dir, context.sha_hash) unused, verify_output, rc = utils.run_with_project_in_path(command, context) assert_that(verify_output, contains_string('Signature made')) @then('the repo should be returned to the state it was in before I ran the script') def step_impl(context): exists = False original_string = 'A working file with some text' with open('{0}/test_file.txt'.format(context.mock_developer_dir), 'r') as check_file: for line in check_file: if original_string in line: exists = True assert_that(exists, True) @then('the repo should be returned to the {branch} branch when I am done') def step_impl(context, branch): out, err, rc = utils.run_with_project_in_path('git -C {0} branch'.format(context.mock_developer_dir), context) assert_that(out, contains_string(branch)) @then('the {directory} directory should exist when I am done') def step_impl(context, directory): out, err, rc = utils.shell_command('ls {0}/{1}'.format(context.mock_developer_dir, directory)) assert_that(context.rc, equal_to(0)) @then('the terminal displays usage options for the {command} command') def step_impl(context, command): assert_that(context.out, contains_string('usage:')) @then('the terminal prints an error') def step_impl(context): assert_that(context.out, contains_string('ERROR:')) @then('the script exits with status 0') def step_impl(context): assert_that(context.rc, equal_to(0))
5,387
1,813
import math def obs2state(obs, multiplier=1000): x_pos = int(math.floor(obs[0]*multiplier)) y_pos = int(math.floor(obs[1]*multiplier)) y_vel = y_vel = int(obs[2]) state_string = str(x_pos) + '_' + str(y_pos) + '_' + str(y_vel) return state_string
268
113
"""Version details for python-marketman This file shamelessly taken from the requests library""" __title__ = 'python-marketman' __description__ = 'A basic Marketman.com REST API client.' __url__ = 'https://github.com/LukasKlement/python-marketman' __version__ = '0.1' __author__ = 'Lukas Klement' __author_email__ = 'lukas.klement@me.com' __license__ = 'Apache 2.0' __maintainer__ = 'Lukas Klement' __maintainer_email__ = 'lukas.klement@me.com' __keywords__ = 'python marketman marketman.com'
494
180
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Plots exceedance frequency and the relationships between discharge and drainage area for gauged basins Written by Adam M. Forte for "Low variability runoff inhibits coupling of climate, tectonics, and topography in the Greater Caucasus" If you use this code or derivatives, please cite the original paper. """ import pandas as pd import numpy as np import matplotlib.pyplot as plt from matplotlib import colors from cmcrameri import cm def survive(Q): Qstar=Q/np.mean(Q) Qstar_sort=np.sort(Qstar) Qn=len(Qstar) Qrank=np.arange(1,Qn+1,1) Q_freq_excd=(Qn+1-Qrank)/Qn return Qstar_sort,Q_freq_excd # Load data from gauged basins qdf=pd.read_csv('data_tables/grdc_summary_values.csv') mR=qdf['mean_runoff_mm_day'].to_numpy() da=qdf['DA_km2'].to_numpy() mRain=qdf['mnTRMM_mm_day'].to_numpy() ID=qdf['ID'].to_numpy() N=len(ID) f1=plt.figure(num=1,figsize=(14,5)) ax1=plt.subplot(1,2,1) ax1.set_ylim((10**-4,1)) ax1.set_xlim((0.01,500)) ax1.set_yscale('log') ax1.set_xscale('log') ax1.set_xlabel('Runoff [mm/day]') ax1.set_ylabel('Exceedance Frequency') rain_norm=colors.Normalize(vmin=1,vmax=6) mQ=np.zeros(ID.shape) for i in range(N): df=pd.read_csv('data_tables/grdc_discharge_time_series/GRDC_'+str(ID[i])+'.csv') Q=df['Q'].to_numpy() mQ[i]=np.mean(Q)/(60*60*24) [Qstar_sort,Q_freq_excd]=survive(Q) Rainv=np.ones(Q.shape)*mRain[i] ax1.scatter(Qstar_sort*mR[i],Q_freq_excd,c=Rainv,norm=rain_norm,cmap=cm.batlow_r,s=2) ax2=plt.subplot(1,2,2) ax2.set_ylim((10**-1,10**3)) ax2.set_xlim((50,5000)) ax2.set_yscale('log') ax2.set_xscale('log') ax2.set_xlabel('Catchment Area [km]') ax2.set_ylabel('Mean Q [m3/s]') runs=np.arange(1,7,1) inter=np.linspace(0,1,len(runs)) colors=[cm.batlow_r(x) for x in inter] sc1=ax2.scatter(da,mQ,c=mRain,marker='o',s=40,norm=rain_norm,cmap=cm.batlow_r) xx=np.linspace(50,5000,100) con=(1000**2)/(1000*24*60*60) for i, color in enumerate(colors): ax2.plot(xx,xx*runs[i]*con,c=color,zorder=0,linestyle=':') cbar1=plt.colorbar(sc1,ax=ax2) cbar1.ax.set_ylabel('Mean Rainfall [mm/day]') f1.savefig('dischage.pdf') f1.savefig('dischage.tif',dpi=300)
2,191
1,071
from flask import Blueprint api = Blueprint('api', __name__) from . import authentication, videos, shows, users, comments, errors
131
35
from openatom.UNIVERSAL_CONSTANTS import * from openatom.azimuthal_quantum_number import AzimuthalQNum class PrincipalQNum(): def __init__(self, shellIdx): self.label = self.assignShellLabel(shellIdx) self.principalQuantumNumVal = shellIdx + 1 self.azimuthalArray = [] self.azimuthalArray = [AzimuthalQNum(len(self.azimuthalArray)) for i in range(self.principalQuantumNumVal)] # shellArray.append(PrincipalQNum(len(shellArray))) def assignShellLabel(self, shellIdx): shellMap = { 0 : 'K', 1 : 'L', 2 : 'M', 3 : 'N', 4 : 'O', 5 : 'P' } return shellMap[shellIdx]
706
238
""" Build SSW scripts from Jinja 2 templates """ import os import datetime import subprocess import tempfile from jinja2 import (Environment as Env, FileSystemLoader, PackageLoader) from scipy.io import readsav from .read_config import defaults from .util import SSWIDLError, IDLLicenseError class Environment(object): """ Environment for running SSW and IDL scripts Parameters ---------- ssw_packages : list, optional List of SSW packages to load, e.g. 'sdo/aia', 'chianti' ssw_paths : list, optional List of SSW paths to pass to `ssw_path` extra_paths : list, optional Additional paths to add to the IDL namespace ssw_home : str, optional Root of SSW tree idl_home : str, optional Path to IDL executable """ def __init__(self, ssw_packages=None, ssw_paths=None, extra_paths=None, ssw_home=None, idl_home=None,): self.ssw_packages = ssw_packages if ssw_packages is not None else [] self.ssw_paths = ssw_paths if ssw_paths is not None else [] self.extra_paths = extra_paths if extra_paths is not None else [] self.env = Env(loader=PackageLoader('hissw', 'templates')) self._setup_home(ssw_home, idl_home,) def _setup_home(self, ssw_home, idl_home,): """ Setup SSW and IDL home locations """ self.ssw_home = defaults['ssw_home'] if ssw_home is None else ssw_home if self.ssw_home is None: raise ValueError('''ssw_home must be set at instantiation or in the hisswrc file.''') self.idl_home = defaults['idl_home'] if idl_home is None else idl_home if self.idl_home is None: raise ValueError('''idl_home must be set at instantiation or in the hisswrc file.''') def custom_script(self, script, args): """ Generate custom IDL scripts from templates """ if os.path.isfile(script): env = Env(loader=FileSystemLoader(os.path.dirname(script))) idl_script = env.get_template(os.path.basename(script)).render(**args) else: env = Env() idl_script = env.from_string(script).render(**args) return idl_script def procedure_script(self, script, save_vars, save_filename): """ Render inner procedure file """ if save_vars is None: save_vars = [] params = {'script': script, 'save_vars': save_vars, 'save_filename': save_filename} return self.env.get_template('procedure.pro').render(**params) def command_script(self, procedure_filename): """ Generate parent IDL script """ params = {'ssw_paths': self.ssw_paths, 'extra_paths': self.extra_paths, 'procedure_filename': procedure_filename} return self.env.get_template('parent.pro').render(**params) def shell_script(self, command_filename): """ Generate shell script for starting up SSWIDL """ params = {'ssw_home': self.ssw_home, 'ssw_packages': self.ssw_packages, 'idl_home': self.idl_home, 'command_filename': command_filename} return self.env.get_template('startup.sh').render(**params) def run(self, script, args=None, save_vars=None, verbose=True): """ Set up the SSWIDL environment and run the supplied scripts. Parameters ---------- script : str Literal script or path to script file args : dict, optional Input arguments to script save_vars : list, optional Variables to save and return from the IDL namespace verbose : bool, optional """ args = {} if args is None else args with tempfile.TemporaryDirectory() as tmpdir: # Get filenames fn_template = os.path.join( tmpdir, '{name}_'+datetime.datetime.now().strftime('%Y%m%d-%H%M%S')+'.{ext}') save_filename = fn_template.format(name='idl_vars', ext='sav') procedure_filename = fn_template.format(name='idl_procedure', ext='pro') command_filename = fn_template.format(name='idl_script', ext='pro') shell_filename = fn_template.format(name='ssw_shell', ext='sh') # Render and save scripts idl_script = self.custom_script(script, args) with open(procedure_filename, 'w') as f: f.write(self.procedure_script(idl_script, save_vars, save_filename)) with open(command_filename, 'w') as f: f.write(self.command_script(procedure_filename)) with open(shell_filename, 'w') as f: f.write(self.shell_script(command_filename,)) # Execute subprocess.call(['chmod', 'u+x', shell_filename]) cmd_output = subprocess.run([shell_filename], shell=True, stderr=subprocess.PIPE, stdout=subprocess.PIPE) self._check_for_errors(cmd_output, verbose) results = readsav(save_filename) return results def _check_for_errors(self, output, verbose): """ Check IDL output to try and decide if an error has occurred """ stdout = output.stdout.decode('utf-8') stderr = output.stderr.decode('utf-8') # NOTE: For some reason, not only errors are output to stderr so we # have to check it for certain keywords to see if an error occurred if 'execution halted' in stderr.lower(): raise SSWIDLError(stderr) if 'failed to acquire license' in stderr.lower(): raise IDLLicenseError(stderr) if verbose: print(f'{stderr}\n{stdout}')
5,853
1,690
""" Entity-relation diagram (ERD) GraphViz dot-file generator. Usage: python -m erd db.json -o db.dot Then pass the result to the GraphViz `dot` tool: dot db.dot -T png -o db.png Inspired by: https://github.com/ehne/ERDot """ import argparse import json import re from pathlib import Path from typing import Dict, List import pydantic class Spec(pydantic.BaseModel): tables: Dict[str, Dict[str, str]] = {} enums: Dict[str, List[str]] = {} relations: List[str] = [] # NOTE: this would be simpler with a declarative templating tool (e.g. Jinja2), # but we don't have one in the project at the moment of writing this tool. # So, imperative we go... FONT = "Arial" COLUMN_TYPE_COLOR = "gray40" # See: https://graphviz.org/doc/info/colors.html GRAPHVIZ_TEMPLATE = """ digraph G {{ graph [ nodesep=0.5; rankdir="LR"; cencentrate=true; splines="spline"; fontname="{font}"; pad="0.2,0.2" ]; node [shape=plain, fontname="{font}"]; edge [ dir=both, fontsize=12, arrowsize=0.9, penwidth=1.0, labelangle=32, labeldistance=1.8, fontname="{font}]" ]; {tables} {enums} {relations} }} """ def render_table(name: str, columns: Dict[str, str]) -> str: label_lines = [ '<table border="0" cellborder="1" cellspacing="0">', f"<tr><td><i>{name}</i></td></tr>", ] for key, type_ in columns.items(): port = key.replace("+", "").replace("*", "") display_name = key.replace("*", "PK ").replace("+", "FK ") label_lines += [ f'<tr><td port="{port}" align="left" cellpadding="5">{display_name}' " " f'<font color="{COLUMN_TYPE_COLOR}">{type_}</font></td></tr>' ] label_lines += ["</table>"] label = "\n".join(label_lines) return f'"{name}" [label=<{label}>];' def render_enum(name: str, items: List[str]) -> str: label_lines = [ '<table border="0" cellborder="1" cellspacing="0">', f"<tr><td><i>{name}</i></td></tr>", ] label_lines += [ f'<tr><td align="left" cellpadding="5">{item}</td></tr>' for item in items ] label_lines += ["</table>"] label = "\n".join(label_lines) return f'"{name}" [label=<{label}>];' def render_relation(relation: str) -> str: # Example: src:dest_id *--1 dest:id m = re.match( r"^(?P<source_name>\w+):(?P<source_fk>\w+) (?P<left_cardinality>[\d\+\*])--(?P<right_cardinality>[\d\+\*]) (?P<dest_name>\w+):(?P<dest_pk>\w+)$", # noqa: E501 relation, ) assert m is not None, f"Invalid relation format: {relation!r}" ( source_name, source_fk, left_cardinality, right_cardinality, dest_name, dest_pk, ) = m.groups() left_props = { "*": "arrowtail=ocrow", "+": "arrowtail=ocrowtee", }.get(left_cardinality, "arrowtail=noneotee") right_props = { "*": "arrowtail=ocrow", "+": "arrowtail=ocrowtee", }.get(right_cardinality, "arrowtail=noneotee") return "\n".join( ( f'"{source_name}":"{source_fk}"->"{dest_name}":"{dest_pk}" [', f"{right_props},", f"{left_props},", "];", ) ) def render(content: str) -> str: spec = Spec(**json.loads(content)) tables = "\n".join( render_table(name, columns) for name, columns in spec.tables.items() ) enums = "\n".join(render_enum(name, items) for name, items in spec.enums.items()) relations = "\n".join(render_relation(relation) for relation in spec.relations) return GRAPHVIZ_TEMPLATE.format( font=FONT, tables=tables, enums=enums, relations=relations ) def main() -> None: parser = argparse.ArgumentParser() parser.add_argument("input_file", type=Path) parser.add_argument("-o", "--output-file", type=Path) args = parser.parse_args() content = args.input_file.read_text() result = render(content) args.output_file.write_text(result) if __name__ == "__main__": main()
4,129
1,505
# pylint: disable=no-self-use,invalid-name import unittest import spacy from scispacy.hyponym_detector import HyponymDetector class TestHyponymDetector(unittest.TestCase): def setUp(self): super().setUp() self.nlp = spacy.load("en_core_sci_sm") self.detector = HyponymDetector(self.nlp, extended=True) self.nlp.add_pipe("hyponym_detector", config={"extended": True}, last=True) def test_sentences(self): text = ( "Recognizing that the preferred habitats for the species " "are in the valleys, systematic planting of keystone plant " "species such as fig trees (Ficus) creates the best microhabitats." ) doc = self.nlp(text) fig_trees = doc[21:23] plant_species = doc[17:19] assert doc._.hearst_patterns == [("such_as", plant_species, fig_trees)] doc = self.nlp("SARS, or other coronaviruses, are bad.") assert doc._.hearst_patterns == [("other", doc[4:5], doc[0:1])] doc = self.nlp("Coronaviruses, including SARS and MERS, are bad.") assert doc._.hearst_patterns == [ ("include", doc[0:1], doc[3:4]), ("include", doc[0:1], doc[5:6]), ] def test_find_noun_compound_head(self): doc = self.nlp("The potassium channel is good.") head = self.detector.find_noun_compound_head(doc[1]) assert head == doc[2] doc = self.nlp("Planting of large plants.") head = self.detector.find_noun_compound_head(doc[3]) # Planting is a noun, but not a compound with 'plants'. assert head != doc[0] assert head == doc[3] def test_expand_noun_phrase(self): doc = self.nlp("Keystone plant habitats are good.") chunk = self.detector.expand_to_noun_compound(doc[1], doc) assert chunk == doc[0:3]
1,862
634
import os from contextlib import contextmanager import pytest from django import setup as django_setup from django.core.cache import caches from django.test import TransactionTestCase # Transaction rollback emulation # http://docs.djangoproject.com/en/2.0/topics/testing/overview/#rollback-emulation TransactionTestCase.serialized_rollback = True @pytest.fixture def api_user(): from django.contrib.auth import get_user_model user_model = get_user_model() user = user_model(username='test', email='test@test.ru', is_active=True) user.set_password('test_password') user.save() return user def pytest_configure(): os.environ.setdefault("DJANGO_SETTINGS_MODULE", "_project_.settings") django_setup() @pytest.fixture(scope='session') def base_url(live_server): return live_server.url @pytest.fixture(autouse=True) def clear_caches(): for cache in caches.all(): cache.clear() # HELPERS @pytest.fixture(scope='function') def assert_num_queries_lte(pytestconfig): from django.db import connection from django.test.utils import CaptureQueriesContext @contextmanager def _assert_num_queries(num): with CaptureQueriesContext(connection) as context: yield queries = len(context) if queries > num: msg = f"Expected to perform less then {num} queries" \ f" but {queries} were done" if pytestconfig.getoption('verbose') > 0: sqls = (q['sql'] for q in context.captured_queries) msg += '\n\nQueries:\n========\n\n%s' % '\n\n'.join(sqls) else: msg += " (add -v option to show queries)" pytest.fail(msg) return _assert_num_queries
1,787
552
"""Steps up and down""" import calendar import numpy as np from pandas.io.sql import read_sql from pyiem import network from pyiem.plot.use_agg import plt from pyiem.util import get_autoplot_context, get_dbconn PDICT = {'spring': '1 January - 30 June', 'fall': '1 July - 31 December'} def get_description(): """ Return a dict describing how to call this plotter """ desc = dict() desc['data'] = True desc['description'] = """This plot analyzes the number of steps down in low temperature during the fall season and the number of steps up in high temperature during the spring season. These steps are simply having a newer colder low or warmer high for the season to date period. """ desc['arguments'] = [ dict(type='station', name='station', default='IA2203', label='Select Station', network='IACLIMATE'), dict(type='select', name='season', options=PDICT, label='Select which half of year', default='fall'), ] return desc def plotter(fdict): """ Go """ pgconn = get_dbconn('coop') ctx = get_autoplot_context(fdict, get_description()) station = ctx['station'] season = ctx['season'] table = "alldata_%s" % (station[:2],) nt = network.Table("%sCLIMATE" % (station[:2],)) df = read_sql(""" WITH obs as ( SELECT day, year, month, high, low, case when month > 6 then 'fall' else 'spring' end as season from """ + table + """ WHERE station = %s), data as ( SELECT year, day, season, max(high) OVER (PARTITION by year, season ORDER by day ASC ROWS BETWEEN 183 PRECEDING and CURRENT ROW) as mh, min(low) OVER (PARTITION by year, season ORDER by day ASC ROWS BETWEEN 183 PRECEDING and CURRENT ROW) as ml from obs), lows as ( SELECT year, day, ml as level, season, rank() OVER (PARTITION by year, ml ORDER by day ASC) from data WHERE season = 'fall'), highs as ( SELECT year, day, mh as level, season, rank() OVER (PARTITION by year, mh ORDER by day ASC) from data WHERE season = 'spring') (SELECT year, day, extract(doy from day) as doy, level, season from lows WHERE rank = 1) UNION (SELECT year, day, extract(doy from day) as doy, level, season from highs WHERE rank = 1) """, pgconn, params=[station]) df2 = df[df['season'] == season] (fig, ax) = plt.subplots(3, 1, figsize=(7, 10)) dyear = df2.groupby(['year']).count() ax[0].bar(dyear.index, dyear['level'], facecolor='tan', edgecolor='tan') ax[0].axhline(dyear['level'].mean(), lw=2) ax[0].set_ylabel("Yearly Events Avg: %.1f" % (dyear['level'].mean(), )) ax[0].set_xlim(dyear.index.min()-1, dyear.index.max()+1) title = "%s Steps %s" % (PDICT[season], "Down" if season == 'fall' else 'Up') ax[0].set_title("%s [%s]\n%s in Temperature" % (nt.sts[station]['name'], station, title)) ax[0].grid(True) ax[1].hist(np.array(df2['level'], 'f'), bins=np.arange(df2['level'].min(), df2['level'].max()+1, 2), normed=True, facecolor='tan') ax[1].set_ylabel("Probability Density") ax[1].axvline(32, lw=2) ax[1].grid(True) ax[1].set_xlabel(r"Temperature $^\circ$F, 32 degrees highlighted") ax[2].hist(np.array(df2['doy'], 'f'), bins=np.arange(df2['doy'].min(), df2['doy'].max()+1, 3), normed=True, facecolor='tan') ax[2].set_xticks((1, 32, 60, 91, 121, 152, 182, 213, 244, 274, 305, 335, 365)) ax[2].set_xticklabels(calendar.month_abbr[1:]) ax[2].set_xlim(df2['doy'].min() - 3, df2['doy'].max() + 3) ax[2].set_ylabel("Probability Density") ax[2].grid(True) ax[2].set_xlabel("Day of Year, 3 Day Bins") return fig, df if __name__ == '__main__': plotter(dict())
4,063
1,420
# Разработать класс IterInt, который наследует функциональность стандартного типа int, но добавляет # возможность итерировать по цифрам числа class IterInt(int): pass n = IterInt(12346) for digit in n: print("digit = ", digit) # Выведет: # digit = 1 # digit = 2 # digit = 3 # digit = 4 # digit = 6
310
124
def forward(w,s,b,y): Yhat= w * s + b output = (Yhat-y)**2 return output, Yhat def derivative_W(x, output, Yhat, y): return ((2 * output) * (Yhat - y)) * x # w def derivative_B(b, output, Yhat, y): return ((2 * output) * (Yhat - y)) * b #bias def main(): w = 1.0 #weight x = 2.0 #sample b = 1.0 #bias y = 2.0*x #rule learning = 1e-1 epoch = 3 for i in range(epoch+1): output, Yhat = forward(w,x,b,y) print("-----------------------------------------------------------------------------") print("w:",w) print("\tw*b:",w*x) print("x:",x,"\t\tsum:", w*x+b) print("\tb:",b,"\t\t\tg1:",abs(Yhat-y),"\tg2:",abs(Yhat-y)**2,"\tloss:",output) print("\t\tY=2*x:", y) print("-----------------------------------------------------------------------------") if output == 0.0: break gw = derivative_W(x, output, Yhat, y) gb = derivative_B(b, output, Yhat, y) w -= learning * gw b -= learning * gb if __name__ == '__main__': main()
1,100
429
import struct import os current_pe = None class PE: """Basic PE parsing. Ref: - https://hshrzd.wordpress.com/pe-bear/ - https://blog.kowalczyk.info/articles/pefileformat.html """ X86_64 = 0x8664 X86_32 = 0x14c ARM = 0x1c0 ARM64 = 0xaa64 ARMNT = 0x1c4 AM33 = 0x1d3 IA64 = 0x200 EFI = 0xebc MIPS = 0x166 MIPS16 = 0x266 MIPSFPU = 0x366 MIPSFPU16 = 0x466 WCEMIPSV2 = 0x169 POWERPC = 0x1f0 POWERPCFP = 0x1f1 SH3 = 0x1a2 SH3DSP = 0x1a3 SH4 = 0x1a6 SH5 = 0x1a8 THUMP = 0x1c2 RISCV32 = 0x5032 RISCV64 = 0x5064 RISCV128 = 0x5128 M32R = 0x9041 dos_magic = b'MZ' ptr_to_pe_header = None pe_magic = b'PE' machine = X86_32 num_of_sections = None size_of_opt_header = None dll_charac = None opt_magic = b'\x02\x0b' entry_point = None base_of_code = None image_base = None def __init__(self, pe=""): if not os.access(pe, os.R_OK): err("'{0}' not found/readable".format(pe)) err("Failed to get file debug information, most of gef features will not work") return with open(pe, "rb") as fd: # off 0x0 self.dos_magic = fd.read(2) if self.dos_magic != PE.dos_magic: self.machine = None return # off 0x3c fd.seek(0x3c) self.ptr_to_pe_header, = struct.unpack("<I", fd.read(4)) # off_pe + 0x0 fd.seek(self.ptr_to_pe_header) self.pe_magic = fd.read(2) # off_pe + 0x4 fd.seek(self.ptr_to_pe_header + 0x4) self.machine, self.num_of_sections = struct.unpack("<HH", fd.read(4)) # off_pe + 0x14 fd.seek(self.ptr_to_pe_header + 0x14) self.size_of_opt_header, self.dll_charac = struct.unpack("<HH", fd.read(4)) # off_pe + 0x18 self.opt_magic = fd.read(2) # off_pe + 0x28 fd.seek(self.ptr_to_pe_header + 0x28) self.entry_point, self.base_of_code = struct.unpack("<II", fd.read(8)) # off_pe + 0x30 self.image_base, = struct.unpack("<I", fd.read(4)) return def is_valid(self): return self.dos_magic == PE.DOS_MAGIC and self.pe_magic == PE.pe_magic def get_machine_name(self): return { 0x14c: "X86", 0x166: "MIPS", 0x169: "WCEMIPSV2", 0x1a2: "SH3", 0x1a3: "SH3DSP", 0x1a6: "SH4", 0x1a8: "SH5", 0x1c0: "ARM", 0x1c2: "THUMP", 0x1c4: "ARMNT", 0x1d3: "AM33", 0x1f0: "PowerPC", 0x1f1: "PowerPCFP", 0x200: "IA64", 0x266: "MIPS16", 0x366: "MIPSFPU", 0x466: "MIPSFPU16", 0xebc: "EFI", 0x5032: "RISCV32", 0x5064: "RISCV64", 0x5128: "RISCV128", 0x8664: "X86_64", 0x9041: "M32R", 0xaa64: "ARM64", None: None }[self.machine] @lru_cache() def get_pe_headers(filename=None): """Return an PE object with info from `filename`. If not provided, will return the currently debugged file.""" if filename is None: filename = get_filepath() if filename.startswith("target:"): warn("Your file is remote, you should try using `gef-remote` instead") return return PE(filename) @lru_cache() def is_pe64(filename=None): """Checks if `filename` is an PE64.""" pe = current_pe or get_pe_headers(filename) return pe.machine == PE.X86_64 @lru_cache() def is_pe32(filename=None): """Checks if `filename` is an PE32.""" pe = current_pe or get_pe_headers(filename) return pe.machine == PE.X86_32
4,273
1,694
import pytest from decimal import Decimal, getcontext from ecommerce.factories import ProductFactory, DiscountFactory from ecommerce.discounts import ( DiscountType, PercentDiscount, FixedPriceDiscount, DollarsOffDiscount, ) pytestmark = [pytest.mark.django_db] @pytest.fixture() def products(): return ProductFactory.create_batch(5) @pytest.fixture() def discounts(): return DiscountFactory.create_batch(10) def test_discount_factory_generation(discounts): """ Runs through discounts and makes sure all the ones that come out of the factory are recognizable by the test suite. (This is a sort of sanity check - if a new discount type gets added and the tests aren't updated, this test will fail.) """ for discount in discounts: discount_logic = DiscountType.for_discount(discount) what_type = ( type(discount_logic) is DollarsOffDiscount, type(discount_logic) is FixedPriceDiscount, type(discount_logic) is PercentDiscount, ) assert any(what_type) def test_discount_factory_adjustment(discounts, products): """ Tests discounting products. This runs through each factory-generated product and applies all of the discounts that have been generated, then compares the discounted price to the discount generated in the test. """ for product in products: for discount in discounts: discount_logic = DiscountType.for_discount(discount) if type(discount_logic) is DollarsOffDiscount: discounted_price = product.price - discount.amount if discounted_price < 0: discounted_price = 0 elif type(discount_logic) is FixedPriceDiscount: discounted_price = discount.amount elif type(discount_logic) is PercentDiscount: discounted_price = round( product.price * Decimal(discount.amount / 100), 2 ) else: discounted_price = None assert ( discounted_price > 0 and discounted_price == discount_logic.get_product_price(product) )
2,230
636
""" This module is the custom resource used by the MSAM's CloudFormation templates to populate the web bucket with contents of the MSAM web archive. """ # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0 import json import os from subprocess import call import boto3 from botocore.exceptions import ClientError import resource_tools WEB_FOLDER = "/tmp/msam" def lambda_handler(event, context): """ Lambda entry point. Print the event first. """ print("Event Input: %s" % json.dumps(event)) bucket_name = event["ResourceProperties"]["BucketName"] result = {'Status': 'SUCCESS', "StackId": event["StackId"], "RequestId": event["RequestId"], "LogicalResourceId": event["LogicalResourceId"], 'Data': {}, 'ResourceId': bucket_name} if event.get("PhysicalResourceId", False): result["PhysicalResourceId"] = event["PhysicalResourceId"] else: result["PhysicalResourceId"] = "{}-{}".format(resource_tools.stack_name(event), event["LogicalResourceId"]) try: if event["RequestType"] == "Create" or event["RequestType"] == "Update": print(event["RequestType"]) replace_bucket_contents(bucket_name) elif event["RequestType"] == "Delete": print(event["RequestType"]) delete_bucket_contents(bucket_name) except ClientError as client_error: print("Exception: %s" % client_error) result = { 'Status': 'FAILED', "StackId": event["StackId"], "RequestId": event["RequestId"], "LogicalResourceId": event["LogicalResourceId"], 'Data': { "Exception": str(client_error) }, 'ResourceId': None } resource_tools.send(event, context, result['Status'], result['Data'], result["PhysicalResourceId"]) def replace_bucket_contents(bucket_name): """ This function is responsible for removing any existing contents in the specified bucket, and adding contents from the zip archive. """ client = boto3.client("s3") region = os.environ["AWS_REGION"] stamp = os.environ["BUILD_STAMP"] code_bucket = os.environ["BUCKET_BASENAME"] source = "https://{code_bucket}-{region}.s3.amazonaws.com/msam/msam-web-{stamp}.zip".format(code_bucket=code_bucket, region=region, stamp=stamp) # empty the bucket delete_bucket_contents(bucket_name) # execute these commands to download the zip and extract it locally command_list = [ "rm -f /tmp/msam-web-{stamp}.zip".format(stamp=stamp), "rm -rf {folder}".format(folder=WEB_FOLDER), "mkdir {folder}".format(folder=WEB_FOLDER), "unzip msam-web-{stamp}.zip -d {folder}".format(stamp=stamp, folder=WEB_FOLDER), "ls -l {folder}".format(folder=WEB_FOLDER) ] for command in command_list: print(call(command, shell=True)) # upload each local file to the bucket, preserve folders for dirpath, _, filenames in os.walk(WEB_FOLDER): for name in filenames: local = "{}/{}".format(dirpath, name) remote = local.replace("{}/".format(WEB_FOLDER), "") content_type = None if remote.endswith(".js"): content_type = "application/javascript" elif remote.endswith(".html"): content_type = "text/html" else: content_type = "binary/octet-stream" client.put_object(Bucket=bucket_name, Key=remote, Body=open(local, 'rb'), ContentType=content_type) def delete_bucket_contents(bucket_name): """ This function is responsible for removing all contents from the specified bucket. """ client = boto3.client("s3") response = client.list_objects_v2(Bucket=bucket_name) if "Contents" in response: for item in response["Contents"]: client.delete_object(Bucket=bucket_name, Key=item["Key"])
3,952
1,179
from tailow.operators.base import Operator class SizeOperator(Operator): """ operator to query for arrays by number of elements """ def to_query(self, field_name, value): return {"$size": value} def get_value(self, field, value): return field.to_son(value)
304
92
# Generated by Django 2.0.7 on 2020-05-18 05:54 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('cart', '0004_auto_20200518_1139'), ] operations = [ migrations.AddField( model_name='orderitem', name='quantity', field=models.IntegerField(default=1), ), ]
385
139
# -*- coding: utf-8 -*- # Part of Odoo. See LICENSE file for full copyright and licensing details. from odoo import api, fields, models, _ class BaseLanguageInstall(models.TransientModel): _name = "base.language.install" _description = "Install Language" @api.model def _default_language(self): """ Display the selected language when using the 'Update Terms' action from the language list view """ if self._context.get('active_model') == 'res.lang': lang = self.env['res.lang'].browse(self._context.get('active_id')) return lang.code return False @api.model def _get_languages(self): return [[code, name] for code, _, name in self.env['res.lang'].get_available()] lang = fields.Selection(_get_languages, string='Language', required=True, default=_default_language) overwrite = fields.Boolean('Overwrite Existing Terms', default=True, help="If you check this box, your customized translations will be overwritten and replaced by the official ones.") state = fields.Selection([('init', 'init'), ('done', 'done')], string='Status', readonly=True, default='init') def lang_install(self): self.ensure_one() mods = self.env['ir.module.module'].search([('state', '=', 'installed')]) mods.with_context(overwrite=self.overwrite)._update_translations(self.lang) self.state = 'done' self.env.cr.execute('ANALYZE ir_translation') return { 'name': _('Language Pack'), 'view_mode': 'form', 'view_id': False, 'res_model': 'base.language.install', 'domain': [], 'context': dict(self._context, active_ids=self.ids), 'type': 'ir.actions.act_window', 'target': 'new', 'res_id': self.id, } def reload(self): return { 'type': 'ir.actions.client', 'tag': 'reload', }
2,097
591
import urllib2 import threading from bs4 import BeautifulSoup import re import json import sys import os import django from stock_list import getlist, getLSEList from extract_stock_info import get_info, getLSEInfo from extract_stock_history import get_historical_info from extract_sector_history import get_sector_history, get_sector_dict from extract_stock_news import get_stock_news from extract_NT_transactions import get_NT_transactions import time from pymongo import MongoClient import warnings import exceptions warnings.filterwarnings("ignore", category=exceptions.RuntimeWarning, module='django.db.backends.sqlite3.base', lineno=53) if __name__ == '__main__': path = os.path.abspath(os.path.join(os.path.dirname(__file__), '../MADjangoProject')) if not path in sys.path: sys.path.insert(1, path) del path os.environ['DJANGO_SETTINGS_MODULE'] = 'MADjangoProject.settings' django.setup() from market.models import Stock, StockHistory, SectorHistory sec_dict = get_sector_dict() print 'Fethcing Indices...' ALL_Stocks = getLSEList(collection=Stock) def get_share_info(): for share in ALL_Stocks: print 'Fetching info of ' + share['name'] info = getLSEInfo(share['query'], share['symbol'],collection=Stock, sector_dict=sec_dict) import threading print 'Distributing Jobs ...' threads = [] # callables = [get_nt] callables = [get_share_info] for f in callables: t = threading.Thread(target=f) t.setDaemon(True) threads.append(t) t.start() for t in threads: t.join()
1,627
517
#!/usr/bin/pypy from sys import * from random import * n, m, CMAX, d1, d2 = map(int, argv[1:]) #print randint(0, n) print 1 x0, y0 = randint(-20, -10), randint(-20, -10) dx, dy = randint(-d1, -1), randint(1, d1) x1, y1 = x0 + dx, y0 + dy dx, dy = -dy, dx print x0, y0 print x1, y1 print x1 + dx, y1 + dy print x0 + dx, y0 + dy print n for i in xrange(n): print m x, y = randint(0, CMAX), randint(0, CMAX) for j in xrange(m): print x, y, 0 x += randint(-d2, d2) y += randint(-d2, d2) x = max(0, x) y = max(0, y)
566
274
# -*- coding: utf-8 -*- # ------------------------------------------------------------------------------ # # Copyright 2018-2019 Fetch.AI Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # ------------------------------------------------------------------------------ """Implementation of the 'aea run' subcommand.""" import sys from pathlib import Path from typing import List, Optional import click from aea import __version__ from aea.aea import AEA from aea.aea_builder import AEABuilder from aea.cli.common import ( AEA_LOGO, ConnectionsOption, check_aea_project, logger, ) from aea.cli.install import install from aea.configurations.base import PublicId from aea.helpers.base import load_env_file AEA_DIR = str(Path(".")) def _prepare_environment(click_context, env_file: str, is_install_deps: bool) -> None: """ Prepare the AEA project environment. :param click_context: the click context :param env_file: the path to the envrionemtn file. :param is_install_deps: whether to install the dependencies """ load_env_file(env_file) if is_install_deps: if Path("requirements.txt").exists(): click_context.invoke(install, requirement="requirements.txt") else: click_context.invoke(install) def _build_aea( connection_ids: Optional[List[PublicId]], skip_consistency_check: bool ) -> AEA: try: builder = AEABuilder.from_aea_project( Path("."), skip_consistency_check=skip_consistency_check ) aea = builder.build(connection_ids=connection_ids) return aea except Exception as e: # TODO use an ad-hoc exception class for predictable errors # all the other exceptions should be logged with logger.exception logger.error(str(e)) sys.exit(1) def _run_aea(aea: AEA) -> None: click.echo(AEA_LOGO + "v" + __version__ + "\n") click.echo("{} starting ...".format(aea.name)) try: aea.start() except KeyboardInterrupt: click.echo(" {} interrupted!".format(aea.name)) # pragma: no cover except Exception as e: logger.exception(e) sys.exit(1) finally: click.echo("{} stopping ...".format(aea.name)) aea.stop() @click.command() @click.option( "--connections", "connection_ids", cls=ConnectionsOption, required=False, default=None, help="The connection names to use for running the agent. Must be declared in the agent's configuration file.", ) @click.option( "--env", "env_file", type=click.Path(), required=False, default=".env", help="Specify an environment file (default: .env)", ) @click.option( "--install-deps", "is_install_deps", is_flag=True, required=False, default=False, help="Install all the dependencies before running the agent.", ) @click.pass_context @check_aea_project def run( click_context, connection_ids: List[PublicId], env_file: str, is_install_deps: bool ): """Run the agent.""" skip_consistency_check = click_context.obj.config["skip_consistency_check"] _prepare_environment(click_context, env_file, is_install_deps) aea = _build_aea(connection_ids, skip_consistency_check) _run_aea(aea)
3,787
1,193
#!/usr/bin/env python3 import logging import functools import argparse import collections import fileinput import json import csv import re import requests import box TAX = 0.13 SPECIALIZATION_COLUMNS_INDEXES = { 'salary.average': 1, 'salary.minimum': 2, 'salary.maximum': 3, 'number': 4, } def save_stats(args, stats, filename_parts, headers, row_handler, sort_key_getter): stats_rows = [] for number, stats_row in enumerate(stats.items()): logging.info('process the stats row #%d', number) name, data = stats_row stats_rows.append(row_handler(name, data)) stats_rows.sort(key=sort_key_getter) if args.query: filename_parts.append(args.query.pattern) if args.handicapped: filename_parts.append('handicapped') if args.remote: filename_parts.append('remote') if args.no_experience: filename_parts.append('no_experience') filename = '.'.join(filename_parts + ['csv']) with open(filename, 'w', newline='') as csv_file: csv_writer = csv.writer(csv_file) csv_writer.writerow(headers) for number, stats_row in enumerate(stats_rows): logging.info('output the stats row #%d', number) csv_writer.writerow(stats_row) logging.basicConfig( format='%(asctime)s [%(levelname)s] %(message)s', level=logging.INFO, ) parser = argparse.ArgumentParser() parser.add_argument('-q', '--query', type=lambda query: re.compile(query, re.I)) parser.add_argument('-H', '--handicapped', action='store_true') parser.add_argument('-r', '--remote', action='store_true') parser.add_argument('-e', '--no_experience', action='store_true') parser.add_argument('--data_1', required=True, choices=['profareas', 'specializations']) parser.add_argument('-s', '--sort', required=True, choices=SPECIALIZATION_COLUMNS_INDEXES.keys()) parser.add_argument('--data_2', choices=['areas', 'cities']) args = parser.parse_args() currencies = {} dictionaries = box.Box(requests.get('https://api.hh.ru/dictionaries').json()) for currency in dictionaries.currency: currencies[currency.code] = currency.rate specializations = collections.defaultdict(list) cities = collections.Counter() for number, line in enumerate(fileinput.input(files=[])): logging.info('process the vacancy #%d', number) try: if args.query and args.query.search(line) is None: continue vacancy = box.Box(json.loads(line)) if args.handicapped and not vacancy.accept_handicapped: continue if args.remote and vacancy.schedule.id != 'remote': continue if args.no_experience and vacancy.experience.id != 'noExperience': continue if args.data_2 == 'areas': cities[vacancy.area.name] += 1 elif args.data_2 == 'cities' and vacancy.address and vacancy.address.city: cities[vacancy.address.city] += 1 salary = None if vacancy.salary: if vacancy.salary.to: salary = vacancy.salary.to # override the salary with its minimum, if there are specified both limits if vacancy.salary['from']: salary = vacancy.salary['from'] if not salary: continue salary /= currencies[vacancy.salary.currency] if vacancy.salary.gross: salary -= salary * TAX for specialization in vacancy.specializations: if args.data_1 == 'profareas': name = specialization.profarea_name else: name = '{} / {}'.format(specialization.profarea_name, specialization.name) specializations[name].append(salary) except Exception as exception: logging.error('error: %s', exception) save_stats = functools.partial(save_stats, args) if specializations: logging.info('save the %s stats', args.data_1) save_stats( stats=specializations, filename_parts=[args.data_1, args.sort], headers=['Name', 'Salary, average', 'Salary, minimum', 'Salary, maximum', 'Number'], row_handler=lambda name, salaries: [ name, round(sum(salaries) / len(salaries)), round(min(salaries)), round(max(salaries)), len(salaries), ], sort_key_getter=lambda specialization_row: [ -specialization_row[SPECIALIZATION_COLUMNS_INDEXES[args.sort]], specialization_row[0].lower(), ], ) if cities: logging.info('save the %s stats', args.data_2) save_stats( stats=cities, filename_parts=[args.data_2], headers=['Name', 'Number'], row_handler=lambda name, number: [name, number], sort_key_getter=lambda city_row: [-city_row[1], city_row[0].lower()], )
4,817
1,498
"""An environment to skip k frames and return a max between the last two.""" import gym import numpy as np class MaxFrameskipEnv(gym.Wrapper): """An environment to skip k frames and return a max between the last two.""" def __init__(self, env, skip: int=4) -> None: """ Initialize a new max frame skip env around an existing environment. Args: env: the environment to wrap around skip: the number of frames to skip (i.e. hold an action for) Returns: None """ gym.Wrapper.__init__(self, env) # most recent raw observations (for max pooling across time steps) self._obs_buffer = np.zeros((2, *env.observation_space.shape), dtype=np.uint8) self._skip = skip def step(self, action): """Repeat action, sum reward, and max over last observations.""" # the total reward from `skip` frames having `action` held on them total_reward = 0.0 done = None # perform the action `skip` times for i in range(self._skip): obs, reward, done, info = self.env.step(action) total_reward += reward # assign the buffer with the last two frames if i == self._skip - 2: self._obs_buffer[0] = obs if i == self._skip - 1: self._obs_buffer[1] = obs # break the loop if the game terminated if done: break # Note that the observation on the done=True frame doesn't matter # (because the next state isn't evaluated when done is true) max_frame = self._obs_buffer.max(axis=0) return max_frame, total_reward, done, info def reset(self, **kwargs): return self.env.reset(**kwargs) # explicitly define the outward facing API of this module __all__ = [MaxFrameskipEnv.__name__]
1,895
521
from .provider import EnergyProvider from .pwr_source import PowerSource from .consumption_manager import ConsumptionManager
125
29
"""Script containing the DeepLoco environments.""" import gym import numpy as np import os import sys import cv2 try: sys.path.append(os.path.join(os.environ["TERRAINRL_PATH"], "simAdapter")) import terrainRLSim # noqa: F401 except (KeyError, ImportError, ModuleNotFoundError): pass class BipedalSoccer(gym.Env): """Bipedal Soccer environment. In this environment, a bipedal agent is placed in an open field with a soccer ball. The agent is rewarded for moving to the ball, and additionally dribbling the ball to the target. The reward function is a weighted sum of the agent's distance from the ball and the distance of the ball from a desired goal position. This reward is positive to discourage the agent from falling prematurely. Attributes ---------- wrapped_env : gym.Env the original environment, which add more dimensions than wanted here """ def __init__(self): """Instantiate the environment.""" self.wrapped_env = terrainRLSim.getEnv( "PD-Biped3D-HLC-Soccer-v1", render=False) # Add the time horizon. self.horizon = 512 @property def observation_space(self): """See parent class.""" return self.wrapped_env.observation_space @property def action_space(self): """See parent class.""" return self.wrapped_env.action_space def step(self, action): """See parent class.""" obs, rew, done, info = self.wrapped_env.step(np.array([action])) return obs[0], rew[0][0], done, info def reset(self): """See parent class.""" return self.wrapped_env.reset()[0] def render(self, mode='human'): """See parent class.""" return self.wrapped_env.render(mode=mode) class BipedalObstacles(gym.Env): """Bipedal Obstacles environment. In this environment, a bipedal agent is placed in an open field with obstacles scattered throughout the world. The goal of the agent is to walk around the world and reach a goal position. Attributes ---------- wrapped_env : gym.Env the original environment, which add more dimensions than wanted here """ def __init__(self, render): """Instantiate the environment. Parameters ---------- render : bool whether to render the environment """ self.t = 0 if render: self.wrapped_env = gym.make("PD-Biped3D-HLC-Obstacles-render-v2") else: self.wrapped_env = gym.make("PD-Biped3D-HLC-Obstacles-v2") # Add the time horizon. self.horizon = 2000 @property def observation_space(self): """See parent class.""" return gym.spaces.Box( low=20 * self.wrapped_env.observation_space.low[:-2], high=20 * self.wrapped_env.observation_space.high[:-2], dtype=np.float32) @property def context_space(self): """See parent class.""" return gym.spaces.Box( low=20 * self.wrapped_env.observation_space.low[-2:], high=20 * self.wrapped_env.observation_space.high[-2:], dtype=np.float32) @property def action_space(self): """See parent class.""" return self.wrapped_env.action_space @property def current_context(self): """See parent class.""" return self.wrapped_env.env.getObservation()[-2:] def step(self, action): """See parent class.""" self.t += 1 obs, rew, done, info = self.wrapped_env.step(action) done = done or self.t >= self.horizon return obs[:-2], rew, done, info def reset(self): """See parent class.""" self.t = 0 return self.wrapped_env.reset()[:-2] def render(self, mode='human'): """See parent class.""" image = self.wrapped_env.env.render( headless_step=True) if mode == 'human': f = np.flip(image.astype(np.float32) / 255.0, axis=0) f = np.flip(f, axis=2) cv2.imshow("PD-Biped3D-HLC-Obstacles-v2", f) cv2.waitKey(1) elif mode == 'rgb_array': return image
4,236
1,344
# -*- coding: utf-8 -*- """Top-level package for Test.""" __author__ = """Lana Maidenbaum""" __email__ = 'lana.maidenbaum@zeel.com' __version__ = '0.1.1'
156
70
# -*- coding:utf-8 -*- ''' Created on 2015年3月2日 @author: wanhao01 ''' import sys from crawler.minispider import logerror import main reload(sys) sys.setdefaultencoding('utf-8') if __name__ == '__main__': try: main.main() except Exception as exception: logerror("error during running, details: " + str(exception)) pass
372
145
import tensorflow as tf from tensorflow.python.framework import function cluster = tf.train.ClusterSpec({"local": ["localhost:2222", "localhost:2223"]}) fib = function.Declare("Fib", [("n", tf.int32)], [("ret", tf.int32)]) @function.Defun(tf.int32, func_name="Fib", out_names=["ret"]) def FibImpl(n): def f1(): with tf.device("/job:local/replica:0/task:0/device:CPU:0"): ret = tf.constant(1) return ret def f2(): with tf.device("/job:local/replica:0/task:0/device:CPU:0"): fib1 = fib(n-1) with tf.device("/job:local/replica:0/task:1/device:CPU:0"): fib2 = fib(n-2) return fib1 + fib2 return tf.cond(tf.less_equal(n, 1), f1, f2) FibImpl.add_to_graph(tf.get_default_graph()) n = tf.placeholder(tf.int32, shape=[]) x = fib(n) res = tf.add(x, 1) #print(tf.get_default_graph().as_graph_def()) writer = tf.summary.FileWriter('./graphs', tf.get_default_graph()) with tf.Session("grpc://localhost:2222") as sess: print(sess.run(res, feed_dict={n: 20})) writer.close()
1,001
449
import collections import functools import io import math import typing from abc import ABC, abstractmethod from river import base from river.utils.skmultiflow_utils import ( calculate_object_size, normalize_values_in_dict, ) from .nodes.branch import ( DTBranch, NominalBinaryBranch, NominalMultiwayBranch, NumericBinaryBranch, NumericMultiwayBranch, ) from .nodes.leaf import HTLeaf try: import graphviz GRAPHVIZ_INSTALLED = True except ImportError: GRAPHVIZ_INSTALLED = False class HoeffdingTree(ABC): """Base class for Hoeffding Decision Trees. This is an **abstract class**, so it cannot be used directly. It defines base operations and properties that all the Hoeffding decision trees must inherit or implement according to their own design. Parameters ---------- max_depth The maximum depth a tree can reach. If `None`, the tree will grow indefinitely. binary_split If True, only allow binary splits. max_size The max size of the tree, in Megabytes (MB). memory_estimate_period Interval (number of processed instances) between memory consumption checks. stop_mem_management If True, stop growing as soon as memory limit is hit. remove_poor_attrs If True, disable poor attributes to reduce memory usage. merit_preprune If True, enable merit-based tree pre-pruning. """ def __init__( self, max_depth: int = None, binary_split: bool = False, max_size: float = 100.0, memory_estimate_period: int = 1000000, stop_mem_management: bool = False, remove_poor_attrs: bool = False, merit_preprune: bool = True, ): # Properties common to all the Hoeffding trees self._split_criterion: str = "" self._leaf_prediction: str = "" self.max_depth: float = max_depth if max_depth is not None else math.inf self.binary_split: bool = binary_split self._max_size: float = max_size self._max_byte_size: float = self._max_size * (2**20) # convert to byte self.memory_estimate_period: int = memory_estimate_period self.stop_mem_management: bool = stop_mem_management self.remove_poor_attrs: bool = remove_poor_attrs self.merit_preprune: bool = merit_preprune self._root: typing.Union[DTBranch, HTLeaf, None] = None self._n_active_leaves: int = 0 self._n_inactive_leaves: int = 0 self._inactive_leaf_size_estimate: float = 0.0 self._active_leaf_size_estimate: float = 0.0 self._size_estimate_overhead_fraction: float = 1.0 self._growth_allowed = True self._train_weight_seen_by_model: float = 0.0 @staticmethod def _hoeffding_bound(range_val, confidence, n): r"""Compute the Hoeffding bound, used to decide how many samples are necessary at each node. Notes ----- The Hoeffding bound is defined as: $\\epsilon = \\sqrt{\\frac{R^2\\ln(1/\\delta))}{2n}}$ where: $\\epsilon$: Hoeffding bound. $R$: Range of a random variable. For a probability the range is 1, and for an information gain the range is log *c*, where *c* is the number of classes. $\\delta$: Confidence. 1 minus the desired probability of choosing the correct attribute at any given node. $n$: Number of samples. Parameters ---------- range_val Range value. confidence Confidence of choosing the correct attribute. n Number of processed samples. """ return math.sqrt( (range_val * range_val * math.log(1.0 / confidence)) / (2.0 * n) ) @property def max_size(self): """Max allowed size tree can reach (in MB).""" return self._max_size @max_size.setter def max_size(self, size): self._max_size = size self._max_byte_size = self._max_size * (2**20) @property def height(self) -> int: if self._root: return self._root.height @property def n_nodes(self): if self._root: return self._root.n_nodes @property def n_branches(self): if self._root: return self._root.n_branches @property def n_leaves(self): if self._root: return self._root.n_leaves @property def n_active_leaves(self): return self._n_active_leaves @property def n_inactive_leaves(self): return self._n_inactive_leaves @property def summary(self): """Collect metrics corresponding to the current status of the tree in a string buffer. """ summary = { "n_nodes": self.n_nodes, "n_branches": self.n_branches, "n_leaves": self.n_leaves, "n_active_leaves": self.n_active_leaves, "n_inactive_leaves": self.n_inactive_leaves, "height": self.height, "total_observed_weight": self._train_weight_seen_by_model, } return summary def to_dataframe(self): """Return a representation of the current tree structure organized in a `pandas.DataFrame` object. In case the tree is empty or it only contains a single node (a leaf), `None` is returned. Returns ------- df A `pandas.DataFrame` depicting the tree structure. """ if self._root is not None and isinstance(self._root, DTBranch): return self._root.to_dataframe() def _branch_selector( self, numerical_feature=True, multiway_split=False ) -> typing.Type[DTBranch]: """Create a new split node.""" if numerical_feature: if not multiway_split: return NumericBinaryBranch else: return NumericMultiwayBranch else: if not multiway_split: return NominalBinaryBranch else: return NominalMultiwayBranch @abstractmethod def _new_leaf( self, initial_stats: dict = None, parent: typing.Union[HTLeaf, DTBranch] = None ) -> HTLeaf: """Create a new learning node. The characteristics of the learning node depends on the tree algorithm. Parameters ---------- initial_stats Target statistics set from the parent node. parent Parent node to inherit from. Returns ------- A new learning node. """ @property def split_criterion(self) -> str: """Return a string with the name of the split criterion being used by the tree.""" return self._split_criterion @split_criterion.setter @abstractmethod def split_criterion(self, split_criterion): """Define the split criterion to be used by the tree.""" @property def leaf_prediction(self) -> str: """Return the prediction strategy used by the tree at its leaves.""" return self._leaf_prediction @leaf_prediction.setter @abstractmethod def leaf_prediction(self, leaf_prediction): """Define the prediction strategy used by the tree in its leaves.""" def _enforce_size_limit(self): """Track the size of the tree and disable/enable nodes if required. This memory-management routine shared by all the Hoeffding Trees is based on [^1]. References ---------- [^1]: Kirkby, R.B., 2007. Improving hoeffding trees (Doctoral dissertation, The University of Waikato). """ tree_size = self._size_estimate_overhead_fraction * ( self._active_leaf_size_estimate + self._n_inactive_leaves * self._inactive_leaf_size_estimate ) if self._n_inactive_leaves > 0 or tree_size > self._max_byte_size: if self.stop_mem_management: self._growth_allowed = False return leaves = self._find_leaves() leaves.sort(key=lambda leaf: leaf.calculate_promise()) max_active = 0 while max_active < len(leaves): max_active += 1 if ( ( max_active * self._active_leaf_size_estimate + (len(leaves) - max_active) * self._inactive_leaf_size_estimate ) * self._size_estimate_overhead_fraction ) > self._max_byte_size: max_active -= 1 break cutoff = len(leaves) - max_active for i in range(cutoff): if leaves[i].is_active(): leaves[i].deactivate() self._n_inactive_leaves += 1 self._n_active_leaves -= 1 for i in range(cutoff, len(leaves)): if not leaves[i].is_active() and leaves[i].depth < self.max_depth: leaves[i].activate() self._n_active_leaves += 1 self._n_inactive_leaves -= 1 def _estimate_model_size(self): """Calculate the size of the model and trigger tracker function if the actual model size exceeds the max size in the configuration. This memory-management routine shared by all the Hoeffding Trees is based on [^1]. References ---------- [^1]: Kirkby, R.B., 2007. Improving hoeffding trees (Doctoral dissertation, The University of Waikato). """ leaves = self._find_leaves() total_active_size = 0 total_inactive_size = 0 for leaf in leaves: if leaf.is_active(): total_active_size += calculate_object_size(leaf) else: total_inactive_size += calculate_object_size(leaf) if total_active_size > 0: self._active_leaf_size_estimate = total_active_size / self._n_active_leaves if total_inactive_size > 0: self._inactive_leaf_size_estimate = ( total_inactive_size / self._n_inactive_leaves ) actual_model_size = calculate_object_size(self) estimated_model_size = ( self._n_active_leaves * self._active_leaf_size_estimate + self._n_inactive_leaves * self._inactive_leaf_size_estimate ) self._size_estimate_overhead_fraction = actual_model_size / estimated_model_size if actual_model_size > self._max_byte_size: self._enforce_size_limit() def _deactivate_all_leaves(self): """Deactivate all leaves.""" leaves = self._find_leaves() for leaf in leaves: leaf.deactivate() self._n_inactive_leaves += 1 self._n_active_leaves -= 1 def _find_leaves(self) -> typing.List[HTLeaf]: """Find learning nodes in the tree. Returns ------- List of learning nodes in the tree. """ return [leaf for leaf in self._root.iter_leaves()] # Adapted from creme's original implementation def debug_one(self, x: dict) -> typing.Union[str, None]: """Print an explanation of how `x` is predicted. Parameters ---------- x A dictionary of features. Returns ------- A representation of the path followed by the tree to predict `x`; `None` if the tree is empty. Notes ----- Currently, Label Combination Hoeffding Tree Classifier (for multi-label classification) is not supported. """ if self._root is None: return # We'll redirect all the print statement to a buffer, we'll return the content of the # buffer at the end buffer = io.StringIO() _print = functools.partial(print, file=buffer) for node in self._root.walk(x, until_leaf=True): if isinstance(node, HTLeaf): _print(repr(node)) else: try: child_index = node.branch_no(x) # noqa except KeyError: child_index, _ = node.most_common_path() _print(node.repr_branch(child_index)) # noqa return buffer.getvalue() def draw(self, max_depth: int = None): """Draw the tree using the `graphviz` library. Since the tree is drawn without passing incoming samples, classification trees will show the majority class in their leaves, whereas regression trees will use the target mean. Parameters ---------- max_depth Only the root will be drawn when set to `0`. Every node will be drawn when set to `None`. Notes ----- Currently, Label Combination Hoeffding Tree Classifier (for multi-label classification) is not supported. Examples -------- >>> from river import datasets >>> from river import tree >>> model = tree.HoeffdingTreeClassifier( ... grace_period=5, ... split_confidence=1e-5, ... split_criterion='gini', ... max_depth=10, ... tie_threshold=0.05, ... ) >>> for x, y in datasets.Phishing(): ... model = model.learn_one(x, y) >>> dot = model.draw() .. image:: ../../docs/img/dtree_draw.svg :align: center """ counter = 0 def iterate(node=None): if node is None: yield None, None, self._root, 0, None yield from iterate(self._root) nonlocal counter parent_no = counter if isinstance(node, DTBranch): for branch_index, child in enumerate(node.children): counter += 1 yield parent_no, node, child, counter, branch_index if isinstance(child, DTBranch): yield from iterate(child) if max_depth is None: max_depth = math.inf dot = graphviz.Digraph( graph_attr={"splines": "ortho", "forcelabels": "true", "overlap": "false"}, node_attr={ "shape": "box", "penwidth": "1.2", "fontname": "trebuchet", "fontsize": "11", "margin": "0.1,0.0", }, edge_attr={"penwidth": "0.6", "center": "true", "fontsize": "7 "}, ) if isinstance(self, base.Classifier): n_colors = len(self.classes) # noqa else: n_colors = 1 # Pick a color palette which maps classes to colors new_color = functools.partial(next, iter(_color_brew(n_colors))) palette = collections.defaultdict(new_color) for parent_no, parent, child, child_no, branch_index in iterate(): if child.depth > max_depth: continue if isinstance(child, DTBranch): text = f"{child.feature}" # noqa else: text = f"{repr(child)}\nsamples: {int(child.total_weight)}" # Pick a color, the hue depends on the class and the transparency on the distribution if isinstance(self, base.Classifier): class_proba = normalize_values_in_dict(child.stats, inplace=False) mode = max(class_proba, key=class_proba.get) p_mode = class_proba[mode] try: alpha = (p_mode - 1 / n_colors) / (1 - 1 / n_colors) fillcolor = str(transparency_hex(color=palette[mode], alpha=alpha)) except ZeroDivisionError: fillcolor = "#FFFFFF" else: fillcolor = "#FFFFFF" dot.node(f"{child_no}", text, fillcolor=fillcolor, style="filled") if parent_no is not None: dot.edge( f"{parent_no}", f"{child_no}", xlabel=parent.repr_branch(branch_index, shorten=True), ) return dot # Utility adapted from the original creme's implementation def _color_brew(n: int) -> typing.List[typing.Tuple[int, int, int]]: """Generate n colors with equally spaced hues. Parameters ---------- n The number of required colors. Returns ------- List of n tuples of form (R, G, B) being the components of each color. References ---------- https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/tree/_export.py """ colors = [] # Initialize saturation & value; calculate chroma & value shift s, v = 0.75, 0.9 c = s * v m = v - c for h in [i for i in range(25, 385, int(360 / n))]: # Calculate some intermediate values h_bar = h / 60.0 x = c * (1 - abs((h_bar % 2) - 1)) # Initialize RGB with same hue & chroma as our color rgb = [ (c, x, 0), (x, c, 0), (0, c, x), (0, x, c), (x, 0, c), (c, 0, x), (c, x, 0), ] r, g, b = rgb[int(h_bar)] # Shift the initial RGB values to match value and store colors.append( ((int(255 * (r + m))), (int(255 * (g + m))), (int(255 * (b + m)))) ) return colors # Utility adapted from the original creme's implementation def transparency_hex(color: typing.Tuple[int, int, int], alpha: float) -> str: """Apply alpha coefficient on hexadecimal color.""" return "#%02x%02x%02x" % tuple( [int(round(alpha * c + (1 - alpha) * 255, 0)) for c in color] )
17,777
5,209
import types from tf.advanced.app import App MODIFIERS = """ remark folio note ref emph und super special q num den """.strip().split() def fmt_layoutFull(app, n, **kwargs): return app._wrapHtml(n, ("",)) def fmt_layoutRemarks(app, n, **kwargs): return app._wrapHtml(n, ("r",)) def fmt_layoutNotes(app, n, **kwargs): return app._wrapHtml(n, ("n",)) def fmt_layoutOrig(app, n, **kwargs): return app._wrapHtml(n, ("o",)) def fmt_layoutNoRemarks(app, n, **kwargs): return app._wrapHtml(n, ("o", "n")) def fmt_layoutNoNotes(app, n, **kwargs): return app._wrapHtml(n, ("o", "r")) def fmt_layoutNonOrig(app, n, **kwargs): return app._wrapHtml(n, ("r", "n")) NOTE = "note" WORD = "word" class TfApp(App): def __init__(app, *args, **kwargs): app.fmt_layoutFull = types.MethodType(fmt_layoutFull, app) app.fmt_layoutRemarks = types.MethodType(fmt_layoutRemarks, app) app.fmt_layoutNotes = types.MethodType(fmt_layoutNotes, app) app.fmt_layoutOrig = types.MethodType(fmt_layoutOrig, app) app.fmt_layoutNoRemarks = types.MethodType(fmt_layoutNoRemarks, app) app.fmt_layoutNoNotes = types.MethodType(fmt_layoutNoNotes, app) app.fmt_layoutNonOrig = types.MethodType(fmt_layoutNonOrig, app) super().__init__(*args, **kwargs) def _wrapHtml(app, n, kinds): api = app.api F = api.F Fs = api.Fs L = api.L preNote = "" postNote = "" if "" in kinds or "n" in kinds: notes = L.u(n, otype=NOTE) if notes: note = notes[0] mark = F.mark.v(note) noteWords = L.d(note, otype=WORD) firstWord = noteWords[0] lastWord = noteWords[-1] if firstWord == n: preNote = f"«{mark}= " if lastWord == n: postNote = f" ={mark}»" material = "".join(Fs(f"trans{kind}").v(n) or "" for kind in kinds) after = "".join(Fs(f"punc{kind}").v(n) or "" for kind in kinds) material = f"{preNote}{material}{after}{postNote}" clses = " ".join( cf for cf in MODIFIERS if (fscf := Fs(f"is{cf}")) and fscf.v(n) ) if clses: material = f'<span class="{clses}">{material}</span>' return material
2,376
837
import hashlib import logging import os import shutil import traceback from contextlib import closing from pywb.utils.loaders import BlockLoader from webrecorder.rec.storage.base import BaseStorage from webrecorder.rec.storage.storagepaths import add_local_store_prefix, strip_prefix logger = logging.getLogger('wr.io') # ============================================================================ class DirectLocalFileStorage(BaseStorage): """Webrecorder storage (local files).""" def __init__(self): """Initialize Webrecorder storage.""" super(DirectLocalFileStorage, self).__init__(os.environ['STORAGE_ROOT']) def delete_collection_dir(self, dir_path): """Delete collection directory. :param str dir_path: directory path :returns: whether successful or not :rtype: bool """ local_dir = os.path.join(self.storage_root, dir_path) try: logger.debug('Local Store: Deleting Directory: ' + local_dir) parent_dir = os.path.dirname(local_dir) shutil.rmtree(local_dir) os.removedirs(parent_dir) return True except Exception as e: if e.errno != 2: logger.error(str(e)) return False def do_upload(self, target_url, full_filename): """Upload file into local file storage. :param str target_url: target URL :param str full_filename: path :returns: whether successful or not :rtype: bool """ os.makedirs(os.path.dirname(target_url), exist_ok=True) try: if full_filename != target_url: shutil.copyfile(full_filename, target_url) else: logger.debug('Local Store: Same File, No Upload') return True except Exception as e: logger.error(str(e)) return False def is_valid_url(self, target_url): """Return whether given target URL is an existing file. :param str target_url: target URL :returns: whether given target URL is an existing file :rtype: bool """ return os.path.isfile(target_url) def get_client_url(self, target_url): """Get client URL. :param str target_url: target URL :returns: client URL :rtype: str """ return add_local_store_prefix(target_url.replace(os.path.sep, '/')) def client_url_to_target_url(self, client_url): """Get target URL (from client URL). :param str client URL: client URL :returns: target URL :rtype: str """ return strip_prefix(client_url) def do_delete(self, target_url, client_url): """Delete file from storage. :param str target_url: target URL :returns: whether successful or not :rtype: bool """ try: logger.debug('Local Store: Deleting: ' + target_url) os.remove(target_url) # if target_url.startswith(self.storage_root): # os.removedirs(os.path.dirname(target_url)) return True except Exception as e: if e.errno != 2: logger.error(str(e)) return False # ============================================================================ class LocalFileStorage(DirectLocalFileStorage): """Webrecorder storage w/ Redis interface (local files). :ivar StrictRedis redis: Redis interface """ def __init__(self, redis): """Initialize Webrecorder storage w/ Redis interface. :param StrictRedis redis: Redis interface """ self.redis = redis super(LocalFileStorage, self).__init__() ### BEGIN PERMA CUSTOMIZATIONS ### First pass at https://github.com/harvard-lil/perma/issues/2614 def delete_collection(self, collection): """Delete collection. :param collection: collection :type: n.s. :returns: whether successful or not :rtype: bool """ path = collection.get_dir_path() if path: try: dirpath = os.path.join(self.storage_root, path) return (self.redis.publish('handle_delete_dir', dirpath) > 0) except Exception: logger.error("Failed attempt to delete collection {}".format(collection), exc_info=True) return False return False ### END PERMA CUSTOMIZATIONS def do_delete(self, target_url, client_url): """Delete file. :param str target_url: target URL :param str client_url: client URL (unused argument) :returns: whether successful or not :rtype: bool """ return self.redis.publish('handle_delete_file', target_url) > 0 def get_checksum_and_size(self, filepath_or_url): """Returns the checksum of the supplied URL or filepath and the size of the resource :param str filepath_or_url: The URL or filepath to the resource that the checksum and size is desired for :return: A three tuple containing the kind of checksum, the checksum itself, and size :rtype: tuple[str|None, str|None, int|None] """ m = hashlib.md5() amount = 1024 * 1024 total_size = 0 with closing(BlockLoader().load(filepath_or_url)) as f: while True: chunk = f.read(amount) chunk_size = len(chunk) if chunk_size == 0: break total_size += chunk_size m.update(chunk) return 'md5', m.hexdigest(), total_size
5,693
1,582
#!/usr/bin/env python3 import sys from string import ascii_letters import itertools def includeDefault(charSet): for i in range(0,256): charSet[i] = set([i]) def includeInvertedCases(charSet): for c in ascii_letters: charSet[ord(c)] |= set([ord(c.lower()) if c.isupper() else ord(c.upper())]) def includeLeetSpeak(charSet): # TODO: make this more beautiful charSet[ord("a")] |= set([ord("@"),ord("4")]) charSet[ord("A")] |= set([ord("@"),ord("4")]) charSet[ord("e")] |= set([ord("3")]) charSet[ord("E")] |= set([ord("3")]) charSet[ord("s")] |= set([ord("$"),ord("5")]) charSet[ord("S")] |= set([ord("$"),ord("5")]) charSet[ord("l")] |= set([ord("1")]) charSet[ord("L")] |= set([ord("1")]) charSet[ord("i")] |= set([ord("!"),ord("1")]) charSet[ord("I")] |= set([ord("!"),ord("1")]) def findCombinations(word, charSet): wordCombinations = [] # Transform string to list of set of characters for i, c in enumerate(word): wordCombinations += [charSet[c]] for newWord in itertools.product(*wordCombinations): sys.stdout.buffer.write(bytearray(newWord)) sys.stdout.flush() if __name__ == "__main__": charSet = dict() includeDefault(charSet) #includeInvertedCases(charSet) includeLeetSpeak(charSet) for line in sys.stdin.buffer: findCombinations(line, charSet)
1,424
531
# This file is part of OctoBot (https://github.com/Drakkar-Software/OctoBot) # Copyright (c) 2021 Drakkar-Software, All rights reserved. # # OctoBot is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either # version 3.0 of the License, or (at your option) any later version. # # OctoBot is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # You should have received a copy of the GNU General Public # License along with OctoBot. If not, see <https://www.gnu.org/licenses/>. import pytest import time import octobot.community as community ERROR_TITLE = "An error happened" ERROR_METRICS_ID = "1254xyz" ERROR_TIME = time.time() UPLOADER_URL = "http://upload_url" @pytest.fixture def basic_error(): return community.Error( None, ERROR_TITLE, ERROR_TIME, ERROR_METRICS_ID ) @pytest.fixture def exception_error(): # generated exception with traceback return community.Error( _get_exception(), ERROR_TITLE, ERROR_TIME, ERROR_METRICS_ID ) @pytest.fixture def error_uploader(): return community.ErrorsUploader(UPLOADER_URL) def _get_exception(): def fake3(): 1/0 def fake2(): fake3() def fake_func(): fake2() try: fake_func() except ZeroDivisionError as err: return err
1,621
544
import collections def SCCSearch(G): for v in G: G[v]['n']=G[v]['p']=0 step=0 S=collections.deque() res=[] for u in G: if (G[u]['n']==0): SCCSearch_r(G,u,step,S,res) return res def SCCSearch_r(G,u,step,S,res): G[u]['p']=G[u]['n']=step step=step+1 S.append(u) for v in G[u]['adj']: if (G[v]['n']==0): SCCSearch_r(G,v,step,S,res) G[u]['p']=min(G[u]['p'], G[v]['p']) elif (G[v]['n']<G[u]['n']): G[u]['p']=min(G[u]['p'], G[v]['n']) if (G[u]['p']==G[u]['n']): scc=[] vx=S[-1] while(vx!=u): scc.append(S.pop()) vx=S[-1] scc.append(S.pop()) res.append(scc)
794
346
# Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. import pytest from utils_nlp.common.pytorch_utils import dataloader_from_dataset from utils_nlp.models.transformers.named_entity_recognition import TokenClassificationProcessor, TokenClassifier @pytest.mark.cpu def test_token_classifier_fit_predict(tmpdir, ner_test_data): token_classifier = TokenClassifier(model_name="bert-base-uncased", num_labels=6, cache_dir=tmpdir) processor = TokenClassificationProcessor(model_name="bert-base-uncased", cache_dir=tmpdir) # test fit, no warmup train_dataset = processor.preprocess_for_bert( text=ner_test_data["INPUT_TEXT"], labels=ner_test_data["INPUT_LABELS"], label_map=ner_test_data["LABEL_MAP"], ) train_dataloader = dataloader_from_dataset(train_dataset) token_classifier.fit(train_dataloader) # test predict, no labels _ = token_classifier.predict(train_dataloader, verbose=False)
973
317
""" =============================================================================== Finding local maxima =============================================================================== The ``peak_local_max`` function returns the coordinates of local peaks (maxima) in an image. A maximum filter is used for finding local maxima. This operation dilates the original image and merges neighboring local maxima closer than the size of the dilation. Locations where the original image is equal to the dilated image are returned as local maxima. """ from scipy import ndimage import matplotlib.pyplot as plt from skimage.feature import peak_local_max from skimage import data, img_as_float im = img_as_float(data.coins()) # image_max is the dilation of im with a 20*20 structuring element # It is used within peak_local_max function image_max = ndimage.maximum_filter(im, size=20, mode='constant') # Comparison between image_max and im to find the coordinates of local maxima coordinates = peak_local_max(im, min_distance=20) # display results plt.figure(figsize=(8, 3)) plt.subplot(131) plt.imshow(im, cmap=plt.cm.gray) plt.axis('off') plt.title('Original') plt.subplot(132) plt.imshow(image_max, cmap=plt.cm.gray) plt.axis('off') plt.title('Maximum filter') plt.subplot(133) plt.imshow(im, cmap=plt.cm.gray) plt.autoscale(False) plt.plot([p[1] for p in coordinates], [p[0] for p in coordinates], 'r.') plt.axis('off') plt.title('Peak local max') plt.subplots_adjust(wspace=0.02, hspace=0.02, top=0.9, bottom=0.02, left=0.02, right=0.98) plt.show()
1,575
540
import random from Relium import calcurate, classes, parser, constant """ 1ラインづつランダムな位置に表示していきます """ source_file = r"" target_start_offset = 31999 target_end_offset = 34666 avgbpm = 180 # ノーツの高さの最大値(上げすぎると見えなくなります) max_laneheight = 370 beat = 4 sample_set = 1 sample_index = 0 volume = 64 effects = 1 ## Main ## parsed_map = parser.parsefile(source_file) target_hitobjects = [output for output in parsed_map.HitObjects if output.offset >= target_start_offset and output.offset <= target_end_offset] last_process_offset = 0 result_object = classes.ParsedBeatmap([], []) for target_hitobject in target_hitobjects: if target_hitobject.offset == last_process_offset: target_hitobjects.remove(target_hitobject) last_process_offset = target_hitobject.offset for target_hitobject_i in range(len(target_hitobjects)): target_hitobject = target_hitobjects[target_hitobject_i] if target_hitobject_i == 0: result_object.TimingPoints.append(classes.TimingPoint(target_hitobject.offset, constant.inf_bpm, beat, sample_set, sample_index, volume, False, effects)) result_object.TimingPoints.append(classes.TimingPoint(target_hitobject.offset + 1, constant.zero_bpm, beat, sample_set, sample_index, volume, False, effects)) elif target_hitobject_i == len(target_hitobjects) - 1: if target_hitobject_i == 0: result_object.TimingPoints.append(classes.TimingPoint(target_hitobject.offset - 1, constant.zero_bpm, beat, sample_set, sample_index, volume, False, effects)) else: result_object.TimingPoints.append(classes.TimingPoint(target_hitobject.offset - 1, calcurate.line_notesposition(avgbpm, random.uniform(1, 370)), beat, sample_set, sample_index, volume, False, effects)) result_object.TimingPoints.append(classes.TimingPoint(target_hitobject.offset, calcurate.timingpoint(avgbpm), beat, sample_set, sample_index, volume, False, effects)) else: if target_hitobject_i == 0: result_object.TimingPoints.append(classes.TimingPoint(target_hitobject.offset - 1, constant.zero_bpm, beat, sample_set, sample_index, volume, False, effects)) else: result_object.TimingPoints.append(classes.TimingPoint(target_hitobject.offset - 1, calcurate.line_notesposition(avgbpm, random.uniform(1, 370)), beat, sample_set, sample_index, volume, False, effects)) result_object.TimingPoints.append(classes.TimingPoint(target_hitobject.offset, constant.inf_bpm, beat, sample_set, sample_index, volume, False, effects)) result_object.TimingPoints.append(classes.TimingPoint(target_hitobject.offset + 1, constant.zero_bpm, beat, sample_set, sample_index, volume, False, effects)) # どうやらコンソールのパスからの指定らしい... parser.parsesave(result_object, "export.txt")
2,662
977
import unittest """ graphe_test.py Created by lwi19 Copyright © 2020 Louis Plouffe. All rights reserved. """ """ Test module for graph_theory Call method: python3 -m unittest discover -p "*test.py" -s ./tests -v Graphs are imported in this file, """ # import util.graph_lib as glb from data.some_of_them import g_01, g_02, g_03, g_06, g_07, g_08 class Testfunctions(unittest.TestCase): def test_vertices(self): """ Test the presence of a vertices list in a graph. :return: True if the list match. """ stuff_to_find = {5, 1, 2, 4, 0, 3} stuff_calculated = g_01.vertices self.assertSetEqual(stuff_to_find, stuff_calculated, f"error: vertices(), {stuff_to_find} is not expected") def test_edges(self): """ Test de presence of list of edges in a graph. :return: True if the list match """ edges_tuple = (0, 3), (1, 2), (2, 3), (2, 4) stuff_to_find = set(edges_tuple) stuff_calculated = g_01.edges() self.assertSetEqual(stuff_to_find, stuff_calculated, f"error: edges(), {stuff_to_find} is not expected") def test_is_edge(self): """ Test several vertices in a graph. :return: True if they exist in a graph. """ stuff_to_find_1 = (18, 19) stuff_to_find_2 = (19, 18) stuff_to_find_3 = (88, 151) # this vertex must not pass the test self.assertTrue(g_02.is_edge(*stuff_to_find_1), f"error: is_edges() {stuff_to_find_1} is not expected") self.assertTrue(g_02.is_edge(*stuff_to_find_2), f"error: is_edges() {stuff_to_find_2} is not expected") self.assertFalse(g_02.is_edge(*stuff_to_find_3), f"error: is_edges() {stuff_to_find_3} is not expected") def test_breadth_or_depth(self): """ Test two different methods of searching: BFS and DFS. They must return the same extraction path. :return: """ # "ATTENTION: DFS is very long to calculate for a large graph" dfs_components = g_06.extract_components(style="DFS")[0] bfs_components = g_06.extract_components(style="BFS")[0] self.assertTrue([set(i) for i in dfs_components.values()] == [set(j) for j in bfs_components.values()], "error: DFS and BFS mismatch") def test_find_shortest_path(self): """ Test shortest path between two random vertices. The calculation is verified in the two possible direction They must return the same extraction path :return: True if the same result. """ # Do the calculation on two distinct random numbers. # TODO: ensure that the vertices are connected (in the same components) if rep := g_06.two_random_vertices(): vertex_one, vertex_two = rep if path_in_order := g_06.find_shortest_path(vertex_one, vertex_two): if path_in_reverse_order := g_06.find_shortest_path(vertex_two, vertex_one).reverse(): print(path_in_order) print(path_in_reverse_order) self.assertEqual(path_in_order, path_in_reverse_order, "error : shortest path miscalculation ") else: # path_in_order is False when no path exist. self.assertTrue(path_in_order, f"error: no possible path between vertices {vertex_one} and {vertex_two}") else: # rep is False when vertices do not exist in the path self.assertTrue(rep, f"error: no valid vertices found") # Starting now, all method with pattern test*z could modified graphs. # the 'z' is used to ensure the execution after others methods # because graphs are modified. def test_z_add_edge(self): """ Test the possibility to add an edge :return: True if it is added correctly """ # g_01 is modified here node_to_add = (5, 1) g_01.add_edge(*node_to_add) self.assertTrue(g_01.is_edge(*node_to_add), "edge cannot be added") node_to_add = (0, "X") g_01.add_edge(*node_to_add) self.assertFalse(g_01.is_edge(*node_to_add), "ERROR: impossible edge are added ?") def test_z_delete_edge(self): """ Test the possibility to delete an edge :return: True if it is deleted correctly """ # g_03 is modified here edge_to_delete = (4, 5) edge_exist = g_03.is_edge(*edge_to_delete) g_03.delete_edge(*edge_to_delete) self.assertTrue(edge_exist, "cannot delete edge") self.assertFalse(g_03.is_edge(*edge_to_delete)) def test_z_delete_vertex(self): """ Test the possibility to delete one vertex :return: True if it is deleted correctly """ # g3 is modified here # not only the vertex is deleted, but also all edges connected edge to it. vertex_to_delete = g_03.one_random_vertex() self.assertTrue(vertex_to_delete in g_03.graph_dict.keys(), f"{vertex_to_delete} vertex is not part of the graph.") g_03.delete_vertex(vertex_to_delete) self.assertFalse(vertex_to_delete in g_03.graph_dict.keys()) # when deleted, a vertex must not appear ine the adjacency list. self.assertFalse(bool([k for k in g_03.graph_dict.values() if vertex_to_delete in k])) if __name__ == "__main__": unittest.main()
5,527
1,791
import json from facebook_scraper import get_profile #pass through name from webpage once issue is fixed json_data = get_profile("passparam", cookies="./fbcookies.json") # json_object = json.loads(json_data) json_formatted = json.dumps(json_data, indent = 2) print(json_formatted) with open("fb_info.json", "w") as outfile: json.dump(json_data, outfile, indent = 2)
375
130
import connexion import six from ga4ghtest.models import Plugin # noqa: E501 from ga4ghtest import util from ga4ghtest.core.controllers import plugins_controller as controller def create_plugin( body ): # noqa: E501 """Create a test plugin Add a plugin for testing functionality of an API. # noqa: E501 :param body: :type body: dict | bytes :rtype: str """ if connexion.request.is_json: body = Plugin.from_dict(connexion.request.get_json()) # noqa: E501 return controller.create_plugin( body=body ) def get_plugins( sort_by='created_at', order='desc', limit=3 ): # noqa: E501 """Get test plugins Get the list of available test plugins. # noqa: E501 :param sort_by: logic by which to sort matched records :type sort_by: str :param order: sort order (ascending or descending) :type order: str :param limit: maximum number of records to return :type limit: int :rtype: str """ return controller.get_plugins( sort_by=sort_by, order=order, limit=limit )
1,106
372
import os from split_settings.tools import include, optional ENVIRONMENT = os.getenv('DJANGO_ENV') or 'development' include( # Load environment settings 'base/env.py', optional('local/env.py'), # We can "patch" any settings from local folder env.py file. # Here we should have the order because of dependencies 'base/paths.py', 'base/apps.py', 'base/middleware.py', # Load all other settings 'base/*.py', # Select the right env: 'environments/%s.py' % ENVIRONMENT, optional('local/*.py'), # we can load any other settings from local folder )
597
189
# flake8: noqa from cereal import car from selfdrive.car import dbc_dict from common.params import Params Ecu = car.CarParams.Ecu # Steer torque limits class SteerLimitParams: params = Params() STEER_MAX = int(params.get('SteerMaxAdj')) # 409 is the max, 255 is stock STEER_DELTA_UP = int(params.get('SteerDeltaUpAdj')) STEER_DELTA_DOWN = int(params.get('SteerDeltaDownAdj')) STEER_DRIVER_ALLOWANCE = 50 STEER_DRIVER_MULTIPLIER = 2 STEER_DRIVER_FACTOR = 1 class CAR: # genesis GENESIS = "GENESIS 2015-2016" GENESIS_G70 = "GENESIS G70 2018" GENESIS_G80 = "GENESIS G80 2017" GENESIS_G90 = "GENESIS G90 2017" # hyundai ELANTRA = "HYUNDAI ELANTRA LIMITED ULTIMATE 2017" ELANTRA_GT_I30 = "HYUNDAI I30 N LINE 2019 & GT 2018 DCT" SONATA = "HYUNDAI SONATA 2020" SONATA_HEV = "HYUNDAI SONATA HEV 2020" SONATA19 = "HYUNDAI SONATA 2019" SONATA19_HEV = "HYUNDAI SONATA 2019 HEV" KONA = "HYUNDAI KONA 2019" KONA_EV = "HYUNDAI KONA EV 2019" KONA_HEV = "HYUNDAI KONA HEV 2019" IONIQ_EV = "HYUNDAI IONIQ ELECTRIC LIMITED 2019" IONIQ_HEV = "HYUNDAI IONIQ HYBRID PREMIUM 2017" SANTA_FE = "HYUNDAI SANTA FE LIMITED 2019" PALISADE = "HYUNDAI PALISADE 2020" VELOSTER = "HYUNDAI VELOSTER 2019" GRANDEUR = "GRANDEUR IG 2017-2020" GRANDEUR_HEV = "GRANDEUR IG HEV 2019-2020" NEXO = "HYUNDAI NEXO" # kia FORTE = "KIA FORTE E 2018" OPTIMA = "KIA OPTIMA SX 2019 & 2016" OPTIMA_HEV = "KIA OPTIMA HYBRID 2017 & SPORTS 2019" SPORTAGE = "KIA SPORTAGE S 2020" SORENTO = "KIA SORENTO GT LINE 2018" STINGER = "KIA STINGER GT2 2018" NIRO_EV = "KIA NIRO EV 2020 PLATINUM" NIRO_HEV = "KIA NIRO HEV 2018" CEED = "KIA CEED 2019" CADENZA = "KIA K7 2016-2019" CADENZA_HEV = "KIA K7 HEV 2016-2019" class Buttons: NONE = 0 RES_ACCEL = 1 SET_DECEL = 2 GAP_DIST = 3 CANCEL = 4 params = Params() fingerprint_issued_fix = params.get('FingerprintIssuedFix') == "1" if fingerprint_issued_fix: # 핑거인식문제 혹은 다른차량과 핑거프린트 충돌이 나는경우 여기다가 핑거를 넣으시고 개발자 메뉴에서 핑거프린트 이슈차량 전용을 켜면 적용됩니다. FINGERPRINTS = { # genesis CAR.GENESIS: [{}], CAR.GENESIS_G70: [{}], CAR.GENESIS_G80: [{}], CAR.GENESIS_G90: [{}], # hyundai CAR.ELANTRA: [{}], CAR.ELANTRA_GT_I30: [{}], CAR.SONATA: [{}], CAR.SONATA_HEV: [{}], CAR.SONATA19: [{}], CAR.SONATA19_HEV: [{}], CAR.KONA: [{}], CAR.KONA_EV: [{}], CAR.KONA_HEV: [{}], CAR.IONIQ_HEV: [{}], CAR.IONIQ_EV: [{}], CAR.SANTA_FE: [{}], CAR.PALISADE: [{}], CAR.VELOSTER: [{}], CAR.GRANDEUR: [{}], CAR.GRANDEUR_HEV: [{}], CAR.NEXO: [{}], # kia CAR.FORTE: [{}], CAR.OPTIMA: [{}], CAR.OPTIMA_HEV: [{}], CAR.SPORTAGE: [{}], CAR.SORENTO: [{}], CAR.STINGER: [{}], CAR.NIRO_EV: [{}], CAR.NIRO_HEV: [{}], CAR.CEED: [{}], CAR.CADENZA: [{}], CAR.CADENZA_HEV: [{}] } else: # 핑거 프린트 이슈 없는 차량은 이곳에 넣으세요. FINGERPRINTS = { # genesis CAR.GENESIS: [{}], CAR.GENESIS_G70: [{}], CAR.GENESIS_G80: [{}], CAR.GENESIS_G90: [{}], # hyundai CAR.ELANTRA: [{}], CAR.ELANTRA_GT_I30: [{}], CAR.SONATA: [{}], CAR.SONATA_HEV: [{}], CAR.SONATA19: [{}], CAR.SONATA19_HEV: [{}], CAR.KONA: [{}], CAR.KONA_EV: [{}], CAR.KONA_HEV: [{}], CAR.IONIQ_HEV: [{68: 8, 127: 8, 304: 8, 320: 8, 339: 8, 352: 8, 356: 4, 544: 8, 576: 8, 593: 8, 688: 5, 881: 8, 882: 8, 897: 8, 902: 8, 903: 8, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1078: 4, 1136: 6, 1173: 8, 1225: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1291: 8, 1292: 8, 1294: 8, 1322: 8, 1345: 8, 1348: 8, 1355: 8, 1363: 8, 1369: 8, 1407: 8, 1419: 8, 1427: 6, 1429: 8, 1430: 8, 1448: 8, 1456: 4, 1470:8, 1476: 8, 1535: 8}, {68: 8, 127: 8, 304: 8, 320: 8, 339: 8, 352: 8, 356: 4, 544: 8, 576:8, 593: 8, 688: 5, 832: 8, 881: 8, 882: 8, 897: 8, 902: 8, 903: 8, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1078: 4, 1136: 6, 1173: 8, 1225: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1291: 8, 1292: 8, 1294: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1355: 8, 1363: 8, 1369: 8, 1407: 8, 1427: 6, 1429: 8, 1430: 8, 1448: 8, 1456: 4, 1470: 8, 1476: 8, 1535: 8}, {68: 8, 127: 8, 304: 8, 320: 8, 339: 8, 352: 8, 544: 8, 576: 8, 832: 8, 881: 8, 882: 8, 902: 8, 903: 8, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1078: 4, 1136: 6, 1173: 8, 1225: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1291: 8, 1292: 8, 1294: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1355: 8, 1363: 8, 1369: 8, 1407: 8, 1427: 6, 1429: 8, 1430: 8, 1448: 8, 1456: 4, 1470: 8, 1476: 8, 1535: 8}], CAR.IONIQ_EV: [{127: 8, 304: 8, 320: 8, 339: 8, 352: 8, 356: 4, 544: 7, 593: 8, 688: 5, 832: 8, 881: 8, 882: 8, 897: 8, 902: 8, 903: 8, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1078: 4, 1136: 8, 1151: 6, 1168: 7, 1173: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1291: 8, 1292: 8, 1294: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1355: 8, 1363: 8, 1369: 8, 1407: 8, 1419: 8, 1425: 2, 1426: 8, 1427: 6, 1429: 8, 1430: 8, 1456: 4, 1470: 8, 1507: 8, 1535: 8}, {127: 8, 304: 8, 320: 8, 339: 8, 352: 8, 356: 4, 544: 7, 545: 8, 546: 8, 548: 8, 549: 8, 593: 8, 688: 5, 832: 8, 881: 8, 882: 8, 897: 8, 902: 8, 903: 8, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1078: 4, 1136: 8, 1151: 6, 1168: 7, 1173: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1291: 8, 1292: 8, 1294: 8, 1322: 8, 1345: 8, 1348: 8, 1355: 8, 1363: 8, 1369: 8, 1407: 8, 1419: 8, 1426: 8, 1427: 6, 1429: 8, 1430: 8, 1456: 4, 1470: 8, 1507: 8, 1535: 8}, {127: 8, 304: 8, 320: 8, 339: 8, 352: 8, 356: 4, 544: 7, 546: 8, 832: 8, 881: 8, 882: 8, 902: 8, 903: 8, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1078: 4, 1136: 8, 1151: 6, 1168: 7, 1173: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1291: 8, 1292: 8, 1294: 8, 1322: 8, 1345: 8, 1348: 8, 1355: 8, 1363: 8, 1369: 8, 1407: 8, 1419: 8, 1426: 8, 1427: 6, 1429: 8, 1430: 8, 1456: 4, 1470: 8, 1507: 8}], CAR.SANTA_FE: [{}], CAR.PALISADE: [{}], CAR.VELOSTER: [{}], CAR.GRANDEUR: [{67: 8, 68: 8, 127: 8, 304: 8, 320: 8, 339: 8, 356: 4, 516: 8, 524: 8, 528: 8, 532: 8, 544: 8, 576: 8, 593: 8, 608: 8, 688: 5, 809: 8, 832: 8, 854: 8, 870: 7, 871: 8, 872: 8, 897: 8, 902: 8, 903: 8, 905: 8, 909: 8, 913: 8, 916: 8, 1040: 8, 1042: 8, 1056: 8, 1057: 8, 1078: 4, 1107: 5, 1136: 8, 1151: 8, 1155: 8, 1156: 8, 1157: 4, 1162: 8, 1164: 8, 1168: 8, 1170: 8, 1173: 8, 1180: 8, 1186: 2, 1191: 2, 1193: 8, 1210: 8, 1225: 8, 1227: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 8, 1345: 8, 1348: 8, 1363: 8, 1369: 8, 1371: 8, 1378: 8, 1384: 8, 1407: 8, 1419: 8, 1427: 6, 1456: 4, 1470: 8},{ 67: 8, 68: 8, 127: 8, 304: 8, 320: 8, 339: 8, 356: 4, 544: 8, 593: 8, 608: 8, 688: 5, 809: 8, 854 : 7, 870: 7, 871: 8, 872: 8, 897: 8, 902: 8, 916: 8, 1040: 8, 1042: 8, 1056: 8, 1057: 8, 1078: 4, 1136: 8 , 1151: 6, 1162: 4, 1168: 7, 1170: 8, 1173: 8, 1185: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1292: 8, 1294: 8, 1312 : 8, 1322: 8, 1345: 8, 1348: 8, 1363: 8, 1369: 8, 1371: 8, 1378: 4, 1384: 8, 1407: 8, 1419: 8, 1425: 2, 1427: 6 , 1456: 4, 1470: 8},{ 67: 8, 68: 8, 127: 8, 304: 8, 320: 8, 339: 8, 356: 4, 544: 8, 546: 8, 547: 8, 549: 8, 593: 8, 608: 8, 688: 5, 809: 8, 832: 8, 854: 7, 870: 7, 871: 8, 872: 8, 897: 8, 902: 8, 916: 8, 1040: 8, 1042: 8, 1056: 8, 1057: 8, 1078: 4, 1136: 8, 1151: 6, 1156: 8, 1157: 4, 1162: 4, 1168: 7, 1170: 8, 1173: 8, 1185: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1363: 8, 1369: 8, 1384: 8, 1407: 8, 1419: 8, 1427: 6, 1456: 4, 1470: 8},{ 67: 8, 68: 8, 127: 8, 304: 8, 320: 8, 339: 8, 356: 4, 544: 8, 546: 8, 593: 8, 608: 8, 688: 5, 809: 8, 832: 8, 854: 7, 870: 7, 871: 8, 872: 8, 897: 8, 902: 8, 916: 8, 1040: 8, 1042: 8, 1056: 8, 1057: 8, 1078: 4, 1136: 8, 1151: 6, 1156: 8, 1157: 4, 1162: 4, 1168: 7, 1170: 8, 1173: 8, 1185: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1363: 8, 1369: 8, 1371: 8, 1378: 4, 1384: 8, 1407: 8, 1419: 8, 1425: 2, 1427: 6, 1456: 4, 1470: 8},{ 67: 8, 68: 8, 127: 8, 304: 8, 320: 8, 339: 8, 356: 4, 544: 8, 593: 8, 608: 8, 688: 5, 809: 8, 832: 8, 854: 7, 870: 7, 871: 8, 872: 8, 897: 8, 902: 8, 916: 8, 1040: 8, 1042: 8, 1056: 8, 1057: 8, 1078: 4, 1136: 8, 1151: 6, 1156: 8, 1157: 4, 1162: 4, 1168: 7, 1170: 8, 1173: 8, 1185: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1363: 8, 1369: 8, 1371: 8, 1378: 4, 1384: 8, 1407: 8, 1419: 8, 1425: 2, 1427: 6, 1456: 4, 1470: 8},{ 67: 8, 68: 8, 127: 8, 304: 8, 320: 8, 339: 8, 356: 4, 544: 8, 593: 8, 608: 8, 688: 5, 809: 8, 832: 8, 854: 7, 870: 7, 871: 8, 872: 8, 897: 8, 902: 8, 916: 8, 1040: 8, 1042: 8, 1056: 8, 1057: 8, 1078: 4, 1136: 8, 1151: 6, 1156: 8, 1162: 4, 1168: 7, 1170: 8, 1173: 8, 1185: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1363: 8, 1369: 8, 1371: 8, 1378: 4, 1384: 8, 1407: 8, 1419: 8, 1425: 2, 1427: 6, 1456: 4, 1470: 8}], CAR.GRANDEUR_HEV: [{68: 8, 127: 8, 304: 8, 320: 8, 339: 8, 352: 8, 356: 4, 544: 8, 546: 8, 576: 8, 593: 8, 688: 5, 832: 8, 881: 8, 882: 8, 897: 8, 902: 8, 903: 8, 916: 8, 1040: 8, 1042: 8, 1056: 8, 1057: 8, 1078: 4, 1136: 6, 1151: 6, 1156: 8, 1157: 4, 1168: 7, 1173: 8, 1185: 8, 1225: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1291: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1355: 8, 1363: 8, 1369: 8, 1371: 8, 1378: 4, 1407: 8, 1419: 8, 1425: 2, 1427: 6, 1429: 8, 1430: 8, 1448: 8, 1456: 4, 1470: 8, 1476: 8, 1535: 8},{ 127: 8, 304: 8, 320: 8, 339: 8, 352: 8, 356: 4, 544: 8, 576: 8, 593: 8, 688: 5, 832: 8, 865: 8, 881: 8, 882: 8, 897: 8, 902: 8, 903: 8, 905: 8, 909: 8, 913: 8, 916: 8, 1040: 8, 1042: 8, 1056: 8, 1057: 8, 1078: 4, 1108: 8, 1136: 6, 1138: 5, 1151: 8, 1155: 8, 1156: 8, 1157: 4, 1162: 8, 1164: 8, 1168: 8, 1173: 8, 1180: 8, 1186: 2, 1191: 2, 1193: 8, 1210: 8, 1225: 8, 1227: 8, 1265: 4, 1268: 8, 1280: 1, 1287: 4, 1290: 8, 1291: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 8, 1345: 8, 1348: 8, 1355: 8, 1363: 8, 1369: 8, 1371: 8, 1378: 8, 1407: 8, 1419: 8, 1427: 6, 1429: 8, 1430: 8, 1448: 8, 1456: 4, 1470: 8, 1476: 8, 1535: 8},{ 127: 8, 304: 8, 320: 8, 339: 8, 352: 8, 356: 4, 516: 8, 544: 8, 576: 8, 593: 8, 688: 5, 832: 8, 865: 8, 881: 8, 882: 8, 897: 8, 902: 8, 903: 8, 905: 8, 909: 8, 913: 8, 916: 8, 1040: 8, 1042: 8, 1056: 8, 1057: 8, 1078: 4, 1108: 8, 1136: 6, 1138: 5, 1151: 8, 1155: 8, 1156: 8, 1157: 4, 1162: 8, 1164: 8, 1168: 8, 1173: 8, 1180: 8, 1186: 2, 1191: 2, 1193: 8, 1210: 8, 1225: 8, 1227: 8, 1265: 4, 1268: 8, 1280: 1, 1287: 4, 1290: 8, 1291: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 8, 1345: 8, 1348: 8, 1355: 8, 1363: 8, 1369: 8, 1371: 8, 1378: 8, 1407: 8, 1419: 8, 1427: 6, 1429: 8, 1430: 8, 1448: 8, 1456: 4, 1470: 8, 1476: 8, 1535: 8},{ 68: 8, 127: 8, 304: 8, 320: 8, 339: 8, 352: 8, 356: 4, 544: 8, 546: 8, 576: 8, 593: 8, 688: 5, 832: 8, 881: 8, 882: 8, 897: 8, 902: 8, 903: 8, 916: 8, 1040: 8, 1042: 8, 1056: 8, 1057: 8, 1078: 4, 1136: 6, 1151: 6, 1156: 8, 1157: 4, 1168: 7, 1173: 8, 1185: 8, 1225: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1291: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1355: 8, 1363: 8, 1369: 8, 1379: 8, 1407: 8, 1419: 8, 1425: 2, 1427: 6, 1429: 8, 1430: 8, 1448: 8, 1456: 4, 1470: 8, 1476: 8, 1535: 8}], CAR.NEXO: [{}], # kia CAR.FORTE: [{}], CAR.OPTIMA: [{}], CAR.OPTIMA_HEV: [{}], CAR.SPORTAGE: [{}], CAR.SORENTO: [{}], CAR.STINGER: [{}], CAR.NIRO_EV: [{}], CAR.NIRO_HEV: [{}], CAR.CEED: [{}], CAR.CADENZA: [{}], CAR.CADENZA_HEV: [{}] } # Don't use these fingerprints for fingerprinting, they are still used for ECU detection IGNORED_FINGERPRINTS = [CAR.VELOSTER, CAR.GENESIS_G70, CAR.KONA] CHECKSUM = { "crc8": [CAR.SANTA_FE, CAR.SONATA, CAR.PALISADE, CAR.SONATA_HEV], "6B": [CAR.SORENTO, CAR.GENESIS], } FEATURES = { # 캔오류 관련, 오류가 발생하는 경우는 본인 차종에 맞지 않는 캔신호가 들어오기때문입니다. 대부분 이곳을 수정하면 해결되나, 부득이 판다코드를 수정해야 될수도 있습니다. # debug 코드가 포함되어 있으면, /data/openpilot/selfdrive/debug 안에 몇가지 툴이 들어있습니다. 실행하시면 디버그에 도움이 되실겁니다. 팟팅!!! # Use Cluster for Gear Selection, rather than Transmission "use_cluster_gears": {CAR.ELANTRA, CAR.KONA, CAR.ELANTRA_GT_I30, CAR.CADENZA, CAR.GRANDEUR}, # Use TCU Message for Gear Selection "use_tcu_gears": {CAR.OPTIMA, CAR.SONATA19, CAR.VELOSTER}, # Use E_GEAR Message for Gear Selection "use_elect_gears": {CAR.SONATA_HEV, CAR.SONATA19_HEV, CAR.KONA_EV, CAR.KONA_HEV, CAR.IONIQ_EV, CAR.IONIQ_HEV, CAR.GRANDEUR_HEV, CAR.NEXO, CAR.OPTIMA_HEV, CAR.CADENZA_HEV, CAR.NIRO_EV, CAR.NIRO_HEV}, # 전기차 or 하이브리드 기어인식 부분 # Use E_EMS11 Message for Gas and Brake for Hybrid/ELectric "use_elect_ems": {CAR.SONATA_HEV, CAR.SONATA19_HEV, CAR.KONA_EV, CAR.KONA_HEV, CAR.IONIQ_EV, CAR.IONIQ_HEV, CAR.GRANDEUR_HEV, CAR.NEXO, CAR.OPTIMA_HEV, CAR.CADENZA_HEV, CAR.NIRO_EV, CAR.NIRO_HEV}, # 전기차 or 하이브리드 차량 넣어주세요.(가속페달관련) # send LFA MFA message for new HKG models "send_lfa_mfa": {CAR.GRANDEUR_HEV, CAR.GRANDEUR, CAR.KONA_HEV}, #차량의 LFA아이콘(핸들모양 아이콘)을 켜지게 하려면 여기다가 본인 차종을 넣으세요. "has_scc13": set([]), "has_scc14": set([]), # these cars use the FCA11 message for the AEB and FCW signals, all others use SCC12 "use_fca": {CAR.SONATA, CAR.ELANTRA, CAR.ELANTRA_GT_I30, CAR.PALISADE, CAR.GENESIS_G70, CAR.GRANDEUR_HEV, CAR.KONA_HEV}, # 전방추돌관련 계기판 오류가 발생할 경우 여기다 본인 차종을 넣어보세요. "use_bsm": {CAR.SONATA, CAR.PALISADE, CAR.GENESIS, CAR.GENESIS_G70, CAR.GENESIS_G80, CAR.GENESIS_G90, CAR.NEXO, CAR.KONA, CAR.SONATA_HEV, CAR.SONATA19_HEV, CAR.KONA_EV, CAR.KONA_HEV, CAR.IONIQ_EV, CAR.IONIQ_HEV, CAR.GRANDEUR_HEV, CAR.OPTIMA_HEV, CAR.CADENZA_HEV, CAR.NIRO_EV, CAR.NIRO_HEV, CAR.ELANTRA, CAR.KONA, CAR.ELANTRA_GT_I30, CAR.CADENZA, CAR.GRANDEUR, CAR.OPTIMA, CAR.SONATA19, CAR.VELOSTER}, #후측방 감지 BSM 옵션이 있는 차량의 경우 넣어주세요. } DBC = { # genesis CAR.GENESIS: dbc_dict('hyundai_kia_generic', None), CAR.GENESIS_G70: dbc_dict('hyundai_kia_generic', None), CAR.GENESIS_G80: dbc_dict('hyundai_kia_generic', None), CAR.GENESIS_G90: dbc_dict('hyundai_kia_generic', None), # hyundai CAR.ELANTRA: dbc_dict('hyundai_kia_generic', None), CAR.ELANTRA_GT_I30: dbc_dict('hyundai_kia_generic', None), CAR.SONATA: dbc_dict('hyundai_kia_generic', None), CAR.SONATA_HEV: dbc_dict('hyundai_kia_generic', None), CAR.SONATA19: dbc_dict('hyundai_kia_generic', None), CAR.SONATA19_HEV: dbc_dict('hyundai_kia_generic', None), CAR.KONA: dbc_dict('hyundai_kia_generic', None), CAR.KONA_EV: dbc_dict('hyundai_kia_generic', None), CAR.KONA_HEV: dbc_dict('hyundai_kia_generic', None), CAR.IONIQ_EV: dbc_dict('hyundai_kia_generic', None), CAR.IONIQ_HEV: dbc_dict('hyundai_kia_generic', None), CAR.SANTA_FE: dbc_dict('hyundai_kia_generic', None), CAR.PALISADE: dbc_dict('hyundai_kia_generic', None), CAR.VELOSTER: dbc_dict('hyundai_kia_generic', None), CAR.GRANDEUR: dbc_dict('hyundai_kia_generic', None), CAR.GRANDEUR_HEV: dbc_dict('hyundai_kia_generic', None), CAR.NEXO: dbc_dict('hyundai_kia_generic', None), # kia CAR.FORTE: dbc_dict('hyundai_kia_generic', None), CAR.OPTIMA: dbc_dict('hyundai_kia_generic', None), CAR.OPTIMA_HEV: dbc_dict('hyundai_kia_generic', None), CAR.SPORTAGE: dbc_dict('hyundai_kia_generic', None), CAR.SORENTO: dbc_dict('hyundai_kia_generic', None), CAR.STINGER: dbc_dict('hyundai_kia_generic', None), CAR.NIRO_EV: dbc_dict('hyundai_kia_generic', None), CAR.NIRO_HEV: dbc_dict('hyundai_kia_generic', None), CAR.CEED: dbc_dict('hyundai_kia_generic', None), CAR.CADENZA: dbc_dict('hyundai_kia_generic', None), CAR.CADENZA_HEV: dbc_dict('hyundai_kia_generic', None), } STEER_THRESHOLD = 150
15,854
12,391
# -*- coding: utf-8 -*- ''' Utility methods for dictionary ============================== ''' __all__ = ( 'calling_dict_from', 'combine_dict', 'dict_sorted') from itertools import chain from typing import Tuple from builder.utils import assertion def calling_dict_from(calling: (str, dict), name: str) -> dict: ''' Construct a calling dictionary for Person class. ''' from builder.utils.util_str import dict_from_string tmp = {} if isinstance(calling, dict): tmp = calling else: tmp = dict_from_string(assertion.is_str(calling), ':') me = tmp['me'] if 'me' in tmp else '私' return combine_dict(tmp, {'S': name, 'M': me}) def combine_dict(a: dict, b: dict) -> dict: ''' Combine one dictionary from two dictionaries. ''' return {**assertion.is_dict(a), **assertion.is_dict(b)} def dict_sorted(origin: dict, is_reverse: bool=False) -> dict: ''' Sort dictionary. ''' return dict( sorted(assertion.is_dict(origin).items(), key=lambda x:x[0], reverse=assertion.is_bool(is_reverse)))
1,104
358
try: from zipfile import ZipFile from pyspark import SparkContext, SparkConf from pyspark.sql import SparkSession import pyspark.sql.functions as f import os except Exception as e: print(e) ## http://www.hongyusu.com/imt/technology/spark-via-python-basic-setup-count-lines-and-word-counts.html def push_acc(): spark = SparkSession.builder \ .master('spark://master:7077') \ .appName("Push Accidents data to HDFS") \ .getOrCreate() sc = spark.sparkContext sc.setLogLevel('WARN') # unzip the file # with ZipFile("/volume/data/accidents_2012_2018.zip", 'r') as zipObj: # zipObj.extractall('/volume/data') # read the data from the volume acc_data = spark.read.csv("/volume/data/") # push the data on HDFS as parquet acc_data.write.parquet("hdfs://hadoop/acc_data_parquet") if __name__ == "__main__": push_acc()
924
326
""" DANK MEMER IMGEN API CLIENT --------------------------- Copyright: Copyright 2019 Melms Media LLC License: MIT """ from discord import Client from imgen import AsyncClient bot = Client() memegen = AsyncClient(token='tokengoeshere') @bot.event async def on_ready(): print('Logged in as %s' % bot.user) @bot.event async def on_message(msg): if msg.content.lower().startswith('!magik'): magik = await memegen.magik.get_as_discord(avatar1=msg.author.avatar_url) return await msg.channel.send(file=magik) elif msg.content.lower().startswith('!crab'): parsed = msg.content.replace('!crab ', '') parsed = parsed.replace(', ', ',') if len(parsed.split(',')) != 2: return await msg.channel.send('Please split the text with a comma, e.g. !crab upper, bottom') crab = await memegen.crab.get_as_discord(text=parsed) return await msg.channel.send(file=crab) bot.run('bottokengoeshere')
966
328
from UQpy.surrogates.kriging.correlation_models.baseclass.Correlation import Correlation
89
28
import torch import torch.nn.functional as F from torch.utils import tensorboard import argparse import numpy as np import os from statistics import mean, stdev from tqdm import tqdm import json import gym import simple_discrete_game from models.cnn_agent import cnn_value_net, cnn_policy_net from utils.memory import MainMemory from utils.reproducibility import set_seed, log_params from algo.ppo_step import calc_ppo_loss_gae parser = argparse.ArgumentParser() parser.add_argument("--env-name", default="GoalGrid-v0") parser.add_argument("--exp-name", default="goalgrid_seed_1") parser.add_argument("--batch-size", type=int, default=1000, help="batch_size") parser.add_argument("--full-ppo-iters", type=int, default=500, help="num times whole thing is run") parser.add_argument("--seed", type=int, default=1, help="set random seed for reproducibility ") parser.add_argument("--num-value-updates", type=int, default=4, help="update critic per epoch") parser.add_argument("--num-policy-updates", type=int, default=4, help="update agent per epoch") parser.add_argument("--num-evaluate", type=int, default=20, help="eval per epoch") parser.add_argument( "--episode-max-lenght", type=int, default=100, help="max lenght to run an episode" ) parser.add_argument("--save-interval", type=int, default=100, help="save weights every x episodes") parser.add_argument("--agent-lr", type=int, default=0.002, help="agent learning rate") parser.add_argument("--critic-lr", type=int, default=0.001, help="critic learing rate") args = parser.parse_args() json_log = log_params(args) cuda = torch.device("cuda") cpu = torch.device("cpu") ##Helper function def flat_tensor(t): return torch.from_numpy(t).float().view(-1) def preprocess_obs_img(obs): #channels first obs = np.moveaxis(obs, 2, 0) #normalize obs = obs/255.0 return torch.from_numpy(obs).float() def calculate_gae(memory, gamma=0.99, lmbda=0.95): gae = 0 for i in reversed(range(len(memory.rewards))): delta = ( memory.rewards[i] + gamma * memory.values[i + 1] * (not memory.is_terminals[i]) - memory.values[i] ) gae = delta + gamma * lmbda * (not memory.is_terminals[i]) * gae memory.returns.insert(0, gae + memory.values[i]) adv = np.array(memory.returns) - memory.values[:-1] # normalize advantages memory.advantages = (adv - np.mean(adv)) / (np.std(adv) + 1e-10) def collect_exp_single_actor(env, actor, memory, iters): obs = env.reset() time_step = 0 for _ in range(iters): obs = preprocess_obs_img(obs) memory.states.append(np.array(obs)) action, log_prob = actor.act(obs.unsqueeze(0)) next_obs, reward, done, info = env.step(action.item()) memory.is_terminals.append(done) memory.actions.append(action.item()) memory.logprobs.append(log_prob.item()) memory.rewards.append(reward) obs = next_obs time_step += 1 if done or time_step >= args.episode_max_lenght: obs = env.reset() # normalize rewards m = mean(memory.rewards) # print(memory.rewards) std = stdev(memory.rewards) + 1e-5 memory.rewards = [(i + m) / std for i in memory.rewards] # (memory.rewards - mean(memory.rewards)) / (stdev(memory.rewards) + 1e-5) return memory def save_episode_as_gif(agent, env, episode_max_lenght, gif_name): obs_to_vis = [] obs = env.reset() for timestep in range(0, episode_max_lenght): obs = preprocess_obs_img(obs) action, _ = main_actor.act(obs.unsqueeze(0)) obs, _, done, _ = env.step(action.item()) obs_to_vis.append(env.render(mode="rgb_array")) if done: break write_gif(obs_to_vis, gif_name + ".gif", fps=30) if __name__ == "__main__": # creating environment env = gym.make(args.env_name) set_seed(args.seed, env) n_actions = env.action_space.n n_channels = 3 # create nn's main_actor = cnn_policy_net(n_channels, n_actions) critic = cnn_value_net(n_channels) optim_actor = torch.optim.Adam(main_actor.parameters(), lr=args.agent_lr, betas=(0.9, 0.999)) optim_critic = torch.optim.Adam(critic.parameters(), lr=args.agent_lr, betas=(0.9, 0.999)) # create memory main_memory = MainMemory(batch_size=args.batch_size) # logging tb_summary = tensorboard.SummaryWriter() for iter in tqdm(range(args.full_ppo_iters + 1)): main_memory = collect_exp_single_actor(env, main_actor, main_memory, args.batch_size) critic.to(cuda) main_actor.to(cuda) main_memory.critic_values(critic, cuda) calculate_gae(main_memory) # print(main_memory.memory_size()) for k in range(args.num_policy_updates): optim_actor.zero_grad() ppo_loss = calc_ppo_loss_gae(main_actor, main_memory) ppo_loss.backward() optim_actor.step() # value loss value_loss_list = [] for j in range(args.num_value_updates): batch_states, batch_returns = main_memory.get_value_batch() batch_states, batch_returns = batch_states.to(cuda), batch_returns.to(cuda) optim_critic.zero_grad() pred_returns = critic(batch_states) value_loss = F.mse_loss(pred_returns.view(-1), batch_returns.view(-1)) value_loss.backward() optim_critic.step() value_loss_list.append(value_loss.item()) tb_summary.add_scalar("loss/value_loss", mean(value_loss_list), global_step=iter) main_actor.to(cpu) main_memory.clear_memory() # evaluation eval_ep = 0 obs = env.reset() eval_rewards = [] eval_timesteps = [] ep_reward = 0 ep_timestep = 0 num_done = 0 while args.num_evaluate > eval_ep: ep_timestep += 1 obs = preprocess_obs_img(obs) action, log_prob = main_actor.act(obs.unsqueeze(0)) obs, reward, done, info = env.step(action.item()) ep_reward += reward if done or ep_timestep >= args.episode_max_lenght: if done: num_done += 1 obs = env.reset() eval_ep += 1 eval_timesteps.append(ep_timestep) eval_rewards.append(ep_reward) ep_reward = 0 ep_timestep = 0 tb_summary.add_scalar("reward/eval_reward", mean(eval_rewards), global_step=iter) tb_summary.add_scalar("time/eval_traj_len", mean(eval_timesteps), global_step=iter) tb_summary.add_scalar("reward/prob_done", num_done / args.num_evaluate, global_step=iter) json_log["rewards list"].append(mean(eval_rewards)) json_log["avg episode timesteps"].append(mean(eval_timesteps)) json_log["prob done"].append(num_done / args.num_evaluate) print("eval_reward ", mean(eval_rewards), " eval_timesteps ", mean(eval_timesteps), "prob_done", num_done / args.num_evaluate) if iter % args.save_interval == 0 and iter > 0: torch.save( main_actor.state_dict(), "ppo_" + args.exp_name + "_actor" + str(iter) + ".pth" ) torch.save(critic.state_dict(), "ppo_" + args.exp_name + "_critic" + str(iter) + ".pth") os.chdir(os.path.join(os.getcwd(), "jsons")) with open(str(args.exp_name) + ".json", "w") as fp: json.dump(json_log, fp, sort_keys=True, indent=4)
7,552
2,658
from flask import Flask from flask_socketio import SocketIO from flask_sqlalchemy import SQLAlchemy UPLOAD_FOLDER = 'uploads' ALLOWED_EXTENSIONS = {'png', 'jpg', 'jpeg', 'gif'} app = Flask(__name__) app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:////tmp/dev.db' app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER app.secret_key = "eusehuccuhosn23981pcgid1xth4dn" socketio = SocketIO(app) db = SQLAlchemy(app) from tinder.routes import *
435
178
from pose.poseClass import pose from pose import helpers class dumbbell_lateral_raises(pose): angles={ ('left_hip','left_shoulder','left_elbow'):None, ('right_hip','right_shoulder','right_elbow'):None, ('left_shoulder','left_elbow','left_wrist'):None, ('right_shoulder','right_elbow','right_wrist'):None } def __init__(self,coordinates,conf): pose.__init__(self,coordinates,conf) self.loadInfo() class squats(pose): angles={ ('nose','shoulder','hip'):None, ('shoulder','hip','knee'):None, ('hip','knee','ankle'):None } def __init__(self,coordinates,conf): pose.__init__(self,coordinates,conf) self.loadInfo() class dumbbell_upper_head(pose): angles={ ('right_shoulder','left_shoulder','left_elbow'):None, ('left_shoulder','right_shoulder','right_elbow'):None, ('right_shoulder','right_elbow','left_wrist'):None, ('right_shoulder','right_elbow','right_wrist'):None } def __init__(self,coordinates,conf): pose.__init__(self,coordinates,conf) self.loadInfo() class push_up(pose): angles={ # ('nose','shoulder','hip'):None, # ('shoulder','hip','knee'):None, # ('hip','knee','ankle'):None, ('shoulder','ankle','wrist'):None } def __init__(self,coordinates,conf): pose.__init__(self,coordinates,conf) self.loadInfo() class sit_up(pose): angles={ ('nose','shoulder','hip'):None, ('hip','knee','ankle'):None, ('shoulder','hip','knee'):None, } def __init__(self,coordinates,conf): pose.__init__(self,coordinates,conf) self.loadInfo() class bicep_press(pose): angles={ ('shoulder','elbow','wrist'):None } def __init__(self,coordinates,conf): pose.__init__(self,coordinates,conf) self.loadInfo()
1,910
629
import wiringpi2 import rtmidi from defines import * from config import * #import config # config.TIMEOUT def Setup(outPin, inPin, ledPin): # set Send Pin Register wiringpi2.pinMode(outPin, OUTPUT) # set receivePin Register low to make sure pullups are off wiringpi2.pinMode(inPin, OUTPUT) wiringpi2.digitalWrite(inPin, LOW) wiringpi2.pinMode(inPin, INPUT) # set ledPin wiringpi2.pinMode(ledPin, OUTPUT) wiringpi2.digitalWrite(ledPin, LOW) def CapRead(outPin, inPin, total=0, cycles=CYCLES): # set Send Pin Register low wiringpi2.digitalWrite(outPin, LOW) # set send Pin High wiringpi2.digitalWrite(outPin, HIGH) # while receive pin is LOW AND total is positive value while( wiringpi2.digitalRead(inPin) == LOW and total < TIMEOUT ): total+=1 if ( total > TIMEOUT ): return -2 # total variable over TIMEOUT # set receive pin HIGH briefly to charge up fully - because the while loop above will exit when pin is ~ 2.5V wiringpi2.digitalWrite(inPin, HIGH) # set send Pin LOW wiringpi2.digitalWrite(outPin, LOW) # while receive pin is HIGH AND total is less than TIMEOUT while( wiringpi2.digitalRead(inPin) == HIGH and total < TIMEOUT) : total+=1 if ( total >= TIMEOUT ): return -2 # decrement cycles counting cycles-=1 # if we reach the end of cycles, then... if (cycles == 0): if DEBUG: print("total unit count: %d" % total) # get the average of values over the cycles total = round(total/CYCLES) # if the average total is greater of equal to TRIGGER value if ( total >= TRIGGER ): return 1 else: return 0 return CapRead(outPin, inPin, total, cycles) def ChangeBank(): global BANK # increment bank number BANK += 1 # check for bank boundaries if ( BANK > BANK_MAX ): BANK = 1 print("BANK: %d selected" % BANK) # blink the led x times followed by BANK number #i = 0 #while ( i < BANK ): # wiringpi2.delay(500) # wiringpi2.digitalWrite(SENSORS[7]['led'], HIGH) # wiringpi2.delay(500) # wiringpi2.digitalWrite(SENSORS[7]['led'], LOW) # i += 1 # Initial definitions note = 0; # setup sensor input and output pins for sensor in SENSORS: Setup(sensor['output'], sensor['input'], sensor['led']) # Init virtual midi port midi_out = rtmidi.MidiOut() midi_out.open_virtual_port() # loop while True: for sensor in SENSORS: if (DEBUG): print("############ %s" % sensor['name']) value = CapRead(sensor['output'], sensor['input']) if ( value and ( value != sensor['last_value'] ) ): print("############ %s" % sensor['name']) #if (sensor['note'][BANK-1] == 0): if (sensor['type'] == BANK_CHANGE): # change bank request ChangeBank() note = 0 elif (sensor['type'] == BANK_SELECT): note = sensor['note'][BANK-1] elif (sensor['type'] == RANDOM): # stuff for ramdom type note = note elif (sensor['type'] == SEQUENTIAL): # stuff for sequential sensor type sensor['seq_next'] += 1 if ( sensor['seq_next'] >= len(sensor['note']) ): sensor['seq_next'] = 0 note = sensor['note'][sensor['seq_next']] print ('Send Note ON: %d' % note) midi_out.send_message([0x90, note, 100]) # Note on # set sensor led ON wiringpi2.digitalWrite(sensor['led'], HIGH) elif ( value != sensor['last_value'] ): #if (sensor['note'][BANK-1] != 0): print("############ %s" % sensor['name']) if (sensor['type'] == BANK_SELECT): note = sensor['note'][BANK-1] elif (sensor['type'] == RANDOM): # stuff for ramdom type note = note elif (sensor['type'] == SEQUENTIAL): # stuff for sequential sensor type note = sensor['note'][sensor['seq_next']] print ('Send Note OFF: %d' % note) midi_out.send_message([0x80, note, 100]) # Note off # set sensor led Off wiringpi2.digitalWrite(sensor['led'], LOW) sensor['last_value'] = value
3,850
1,633
import numpy as np class Loss(): def output_gradient(self): return class MSE(Loss): def __call__(self, predicted, labels): return 0.5 * np.square(predicted - labels) def output_gradient(self, predicted, labels): return predicted - labels class BinaryCrossEntropy(Loss): def __call__(self, predicted, labels): return - np.nan_to_num((labels*np.log(predicted) + (1-labels)*np.log(1-predicted))) def output_gradient(self, predicted, labels): return np.nan_to_num(-(labels/predicted) + (1-labels)/(1-predicted)) class CategoricalCrossEntropy(Loss): def __call__(self, predicted, labels): return -np.nan_to_num(np.sum(labels*np.log(predicted), axis=0, keepdims=True)) def output_gradient(self, predicted, labels): return -np.nan_to_num(labels/predicted)
841
278
import pandas as pd class PlusN: """A sample transform that adds n to a specific field. Attributes: field: The field that this transform will be applied to. n: The value to add to the field. """ identifier = "plusN" type_signature = "col->col" def __init__(self, n: int = 1) -> None: self.n = n def __call__(self, column: pd.Series) -> pd.Series: return column + self.n
436
137
#/u/GoldenSights import praw # simple interface to the reddit API, also handles rate limiting of requests import time import datetime import traceback import pickle '''USER CONFIGURATION''' APP_ID = "" APP_SECRET = "" APP_URI = "" APP_REFRESH = "" # https://www.reddit.com/comments/3cm1p8/how_to_make_your_bot_use_oauth2/ USERAGENT = "" #This is a short description of what the bot does. For example "/u/GoldenSights' Newsletter bot" SUBREDDIT = "nsaleaks" #This is the sub or list of subs to scan for new posts. For a single sub, use "sub1". For multiple subs, use "sub1+sub2+sub3+...". For all use "all" KEYWORDS = [" NSA", "NSA " "Snowden", "Greenwald"] #Words to look for KEYDOMAINS = [] #Domains to look for KEYNAMES = [] #Names to look for IGNORESELF = False #Do you want the bot to dump selfposts? Use True or False (Use capitals! No quotations!) TIMESTAMP = '%A %d %B %Y' #The time format. # "%A %d %B %Y" = "Wendesday 04 June 2014" #http://docs.python.org/2/library/time.html#time.strftime HEADER = "" #Put this at the top of the .txt file FORMAT = "_timestamp_: [_title_](_url_) - [r/_subreddit_](_nplink_)" #USE THESE INJECTORS TO CREATE CUSTOM OUTPUT #_timestamp_ which follows the TIMESTAMP format #_title_ #_url_ #_subreddit_ #_nplink_ #_author_ PRINTFILE = "nsa" #Name of the file that will be produced. Do not type the file extension MAXPOSTS = 1000 #This is how many posts you want to retrieve all at once. '''All done!''' for m in ["_date", "_author", "_subreddit", "_title"]: clistfile = open(PRINTFILE + m + '.txt', "a+") clistfile.close() #This is a hackjob way of creating the files if they do not exist. MAXS = str(MAXPOSTS) try: import bot USERAGENT = bot.getaG() except ImportError: pass print('Logging in.') r = praw.Reddit(USERAGENT) r.set_oauth_app_info(APP_ID, APP_SECRET, APP_URI) r.refresh_access_information(APP_REFRESH) def work(lista): global listfile if HEADER != "": print(HEADER, file=listfile) for post in lista: timestamp = post.created_utc timestamp = datetime.datetime.fromtimestamp(int(timestamp)).strftime(TIMESTAMP) final = FORMAT final = final.replace('_timestamp_', timestamp) final = final.replace('_title_', post.title) try: final = final.replace('_author_', post.author.name) except Exception: final = final.replace('_author_', '[DELETED]') final = final.replace('_subreddit_', post.subreddit.display_name) url = post.url url = url.replace('http://www.reddit.com', 'http://np.reddit.com') final = final.replace('_url_', url) slink = post.short_link slink = slink.replace('http://', 'http://np.') final = final.replace('_nplink_', slink) try: print(final, file=listfile) except: print('\t' + post.id + ': Charstepping') for char in final: try: print(char, file=listfile, end='') except: pass print('',file=listfile) lista = [] count = 0 counta = 0 try: print('Scanning.') subreddit = r.get_subreddit(SUBREDDIT) posts = subreddit.get_new(limit=MAXPOSTS) for post in posts: if not post.is_self or IGNORESELF is False: try: author = post.author.name except Exception: author = '[DELETED]' if any(m.lower() in post.title.lower() for m in KEYWORDS) \ or any(m.lower() in post.url.lower() for m in KEYDOMAINS) \ or any(m.lower() == author.lower() for m in KEYNAMES): lista.append(post) counta += 1 count += 1 print(str(count) + ' / ' + MAXS + ' | ' + str(counta)) for item in lista: if item.author is None: item.author = '[DELETED]' except Exception: print('EMERGENCY') print('Collected ' + str(counta) + ' items.') try: print('Writing Date file') lista.sort(key=lambda x: x.created_utc, reverse=False) listfile = open(PRINTFILE + '_date.txt', 'w') work(lista) listfile.close() print('Writing Subreddit file') lista.sort(key=lambda x: x.subreddit.display_name.lower(), reverse=False) listfile = open(PRINTFILE + '_subreddit.txt', 'w') work(lista) listfile.close() print('Writing Title file') lista.sort(key=lambda x: x.title.lower(), reverse=False) listfile = open(PRINTFILE + '_title.txt', 'w') work(lista) listfile.close() print('Writing Author file') lista.sort(key=lambda x: x.author.name.lower(), reverse=False) listfile = open(PRINTFILE + '_author.txt', 'w') work(lista) listfile.close() except Exception: traceback.print_tb() ('EMERGENCY: txt writing failed') print('Saving to Pickle.') class Posted(object): pass listc = [] for item in lista: obj = Posted() obj.id = item.id obj.fullname = item.fullname obj.created_utc = item.created_utc obj.title = item.title obj.subreddit = item.subreddit.display_name obj.url = item.url obj.short_link = item.short_link try: obj.author = item.author.name except: obj.author = '[DELETED]' if item.is_self is True: obj.is_self = True obj.selftext = item.selftext else: obj.is_self = False listc.append(obj.__dict__) filec = open(PRINTFILE + '.p', 'wb') pickle.dump(listc, filec) filec.close() print('Done.')
5,003
1,992
""" This file is part of pynadc https://github.com/rmvanhees/pynadc Methods to query the NADC Sciamachy SQLite database Copyright (c) 2012-2021 SRON - Netherlands Institute for Space Research All Rights Reserved License: BSD-3-Clause """ from pathlib import Path import sqlite3 # -------------------------------------------------- def get_product_by_name(args=None, dbname=None, product=None, to_screen=False, dump=False, debug=False): """ Query NADC Sciamachy SQLite database on product name Input ----- args : dictionary with keys dbname, product, to_screen, dump, debug dbname : full path to Sciamachy SQLite database product : name of product [value required] to_screen : print query result to standard output [default: False] dump : return database content about product, instead of full-path debug : do not query data base, but display SQL query [default: False] Output ------ return full-path to product [default] or show database content about product """ if args: dbname = args.dbname product = args.product dump = args.dump debug = args.debug if dbname is None: print('Fatal, SQLite database is not specified') return [] if not Path(dbname).is_file(): print('Fatal, can not find SQLite database: %s' % dbname) return [] if product[0:10] == 'SCI_NL__0P': table = 'meta__0P' elif product[0:10] == 'SCI_NL__1P': table = 'meta__1P' else: table = 'meta__2P' if dump: select_str = '*' else: select_str = 'path,name,compression' query_str = 'select {} from {} where name=\'{}\''.format(select_str, table, product) # pylint: disable=no-member conn = sqlite3.connect(dbname) if dump: conn.row_factory = sqlite3.Row cur = conn.cursor() if debug: print(query_str) conn.close() return [] cur.execute(query_str) row = cur.fetchone() if row is None: conn.close() return [] if to_screen: if dump: for name in row.keys(): print(name, '\t', row[name]) else: if row[2] == 0: print(Path(*row[:-1])) else: print(Path(*row[:-1]).with_suffix('.gz')) if dump: return row if row[2] == 0: return str(Path(*row[:-1])) return str(Path(*row[:-1]).with_suffix('.gz')) # -------------------------------------------------- def get_product_by_type(args=None, dbname=None, prod_type=None, proc_stage=None, proc_best=None, orbits=None, date=None, rtime=None, to_screen=False, dump=False, debug=False): """ Query NADC Sciamachy SQLite database on product type with data selections Input ----- args : dictionary with keys dbname, type, proc, best, orbit, date, rtime, to_screen, dump, debug dbname : full path to Sciamachy SQLite database prod_type : level of product, available 0, 1, 2 [value required] prod_stage ; baseline of product (PROC_STAGE): N, R, P, R, U, W, ... [default: None] prod_best ; select highest available baseline [default: None] orbit : select on absolute orbit number [default: None] date : select on dateTimeStart [default: None] rtime : select on receiveTime [default: None] to_screen : print query result to standard output [default: False] debug : do not query data base, but display SQL query [default: False] Output ------ return full-path to selected products [default] """ if args: dbname = args.dbname prod_type = args.type proc_stage = args.proc proc_best = args.best orbits = args.orbit date = args.date rtime = args.rtime dump = args.dump debug = args.debug if dbname is None: print('Fatal, SQLite database is not specified') return [] if not Path(dbname).is_file(): print('Fatal, can not find SQLite database: %s' % dbname) return [] if dump: query_str = ['select * from meta__%sP' % prod_type] else: query_str = ['select path,name,compression from meta__%sP' % prod_type] if proc_best: if prod_type == '0': query_str.append(' as s1 join (select absOrbit,MAX(q_flag)') query_str.append(' as qflag from meta__%sP' % prod_type) else: query_str.append(' as s1 join (select absOrbit,MAX(procStage)') query_str.append(' as proc from meta__%sP' % prod_type) if orbits: if ' where' not in query_str: query_str.append(' where') else: query_str.append(' and') if len(orbits) == 1: mystr = ' absOrbit=%-d' % orbits[0] else: mystr = ' absOrbit between %-d and %-d' % (orbits[0], orbits[1]) query_str.append(mystr) if proc_stage: if ' where' not in query_str: query_str.append(' where') else: query_str.append(' and') mystr = ' procStage in (' for _c in proc_stage: if mystr[-1] != '(': mystr += ',' mystr += '\'' + _c + '\'' mystr += ')' query_str.append(mystr) if date: if ' where' not in query_str: query_str.append(' where') else: query_str.append(' and') dtime = '+1 second' year = int(date[0:4]) dtime = '+1 year' if len(date) >= 6: month = int(date[4:6]) dtime = '+1 month' else: month = 1 if len(date) >= 8: day = int(date[6:8]) dtime = '+1 day' else: day = 1 if len(date) >= 10: hour = int(date[8:10]) dtime = '+1 hour' else: hour = 0 if len(date) >= 12: minu = int(date[10:12]) dtime = '+1 minute' else: minu = 0 _d1 = '{:04d}-{:02d}-{:02d} {:02d}:{:02d}:{:02d}'.format( year, month, day, hour, minu, 0) mystr = ' dateTimeStart between \'%s\' and datetime(\'%s\',\'%s\')' query_str.append(mystr % (_d1, _d1, dtime)) if rtime: if ' where' not in query_str: query_str.append(' where') else: query_str.append(' and') mystr = ' receiveDate between datetime(\'now\',\'-%-d %s\')' \ + ' and datetime(\'now\')' if rtime[-1] == 'h': query_str.append(mystr % (int(rtime[0:-1]), 'hour')) else: query_str.append(mystr % (int(rtime[0:-1]), 'day')) if proc_best: query_str.append(' GROUP by absOrbit) as s2 on') query_str.append(' s1.absOrbit=s2.absOrbit') if prod_type == '0': query_str.append(' and s1.q_flag=s2.qflag') else: query_str.append(' and s1.procStage=s2.proc') else: query_str.append(' order by absOrbit ASC, procStage DESC') if debug: print(''.join(query_str)) return [] # pylint: disable=no-member row_list = [] conn = sqlite3.connect(dbname) if dump: conn.row_factory = sqlite3.Row cur = conn.cursor() cur.execute(''.join(query_str)) for row in cur: if to_screen: if dump: print(row) else: if row[2] == 0: print(Path(*row[:-1])) else: print(Path(*row[:-1]).with_suffix('.gz')) else: if dump: row_list.append(row) else: if row[2] == 0: row_list.append(str(Path(*row[:-1]))) else: row_list.append(str(Path(*row[:-1]).with_suffix('.gz'))) conn.close() return row_list
8,222
2,602
#!/usr/bin/env python3 import argparse import collections import enum import itertools import numpy as np import pickle import random import sys from PyQt5.QtCore import * from PyQt5.QtGui import * from PyQt5.QtSvg import * from PyQt5.QtWidgets import * from PyQt5.QtPrintSupport import * class Action(enum.IntEnum): MOVE_LEFT = 0 MOVE_FORWARD = 1 MOVE_RIGHT = 2 class Direction(enum.IntEnum): LEFT = 0 FORWARD = 1 RIGHT = 2 class Entity(enum.IntEnum): EMPTY = 0 WALL = 1 FOOD = 2 POISON = 3 # These entities are mapped to EMPTY by masking two lower bits: FOOD_EATEN = 4 POISON_EATEN = 8 class Heading(enum.IntEnum): NORTH = 0 EAST = 1 SOUTH = 2 WEST = 3 REWARDS = { Entity.EMPTY: 0, Entity.WALL: -100, Entity.FOOD: 1, Entity.FOOD_EATEN: 0, Entity.POISON: -4, Entity.POISON_EATEN: 0 } class BaselineAgent(object): def __init__(self, args): self.baseline_go_sideways = args.baseline_go_sideways self.baseline_prefer_avoid_wall = args.baseline_prefer_avoid_wall self.baseline_prefer_right = args.baseline_prefer_right self.baseline_take_food_near_wall = args.baseline_take_food_near_wall def act(self, percepts): ip = list(percepts[:,0]) # L/F/R ambiguity: if ip[Direction.LEFT] == ip[Direction.FORWARD] and ip[Direction.FORWARD] == ip[Direction.RIGHT]: return (Action.MOVE_FORWARD if not self.baseline_go_sideways else Action.MOVE_RIGHT if self.baseline_prefer_right else Action.MOVE_LEFT) # Single side wall ambiguity: if ip.count(Entity.WALL) == 1 and ip[Direction.FORWARD] == Entity.FOOD: if ip.count(Entity.FOOD) == 2: return (Action.MOVE_FORWARD if self.baseline_take_food_near_wall else Action.MOVE_LEFT if ip[Direction.RIGHT] == Entity.WALL else Action.MOVE_RIGHT) elif self.baseline_prefer_avoid_wall and ip.count(Entity.EMPTY) == 1: return Action.MOVE_LEFT if ip[Direction.RIGHT] == Entity.WALL else Action.MOVE_RIGHT else: return Action.MOVE_FORWARD # Single wall: if ip.count(Entity.WALL) == 1 and ip[Direction.FORWARD] != Entity.WALL: if ip.count(Entity.FOOD) == 2: return (Action.MOVE_FORWARD if self.baseline_take_food_near_wall else Action.MOVE_LEFT if ip[Direction.RIGHT] == Entity.WALL else Action.MOVE_RIGHT) elif ip.count(Entity.FOOD) == 1: if ip[Direction.FORWARD] == Entity.FOOD: if self.baseline_prefer_avoid_wall and ip.count(Entity.EMPTY) == 1: return Action.MOVE_LEFT if ip[Direction.RIGHT] == Entity.WALL else Action.MOVE_RIGHT else: return Action.MOVE_FORWARD else: return Action.MOVE_LEFT if ip[Direction.RIGHT] == Entity.WALL else Action.MOVE_RIGHT elif ip.count(Entity.EMPTY) == 2: return Action.MOVE_LEFT if ip[Direction.RIGHT] == Entity.WALL else Action.MOVE_RIGHT elif ip.count(Entity.EMPTY) == 1: return (Action.MOVE_FORWARD if ip[Direction.FORWARD] == Entity.EMPTY else Action.MOVE_LEFT if ip[Direction.RIGHT] == Entity.WALL else Action.MOVE_RIGHT) elif ip.count(Entity.POISON) == 2: return Action.MOVE_LEFT if ip[Direction.RIGHT] == Entity.WALL else Action.MOVE_RIGHT elif ip.count(Entity.POISON) == 1: return (Action.MOVE_FORWARD if ip[Direction.FORWARD] == Entity.POISON else Action.MOVE_LEFT if ip[Direction.RIGHT] == Entity.WALL else Action.MOVE_RIGHT) # Prefer food: if ip.count(Entity.FOOD) == 1: return (Action.MOVE_LEFT if ip[Direction.LEFT] == Entity.FOOD else Action.MOVE_FORWARD if ip[Direction.FORWARD] == Entity.FOOD else Action.MOVE_RIGHT) elif ip.count(Entity.FOOD) == 2: if ip[Direction.FORWARD] != Entity.FOOD: # L/R ambiguity: return Action.MOVE_RIGHT if self.baseline_prefer_right else Action.MOVE_LEFT else: # S/F ambiguity: return (Action.MOVE_FORWARD if not self.baseline_go_sideways else Action.MOVE_RIGHT if ip[Direction.RIGHT] == Entity.FOOD else Action.MOVE_LEFT) # Prefer empty: if ip.count(Entity.EMPTY) == 1: return (Action.MOVE_LEFT if ip[Direction.LEFT] == Entity.EMPTY else Action.MOVE_FORWARD if ip[Direction.FORWARD] == Entity.EMPTY else Action.MOVE_RIGHT) elif ip.count(Entity.EMPTY) == 2: if ip[Direction.FORWARD] != Entity.EMPTY: # L/R ambiguity: return Action.MOVE_RIGHT if self.baseline_prefer_right else Action.MOVE_LEFT else: # S/F ambiguity: return (Action.MOVE_FORWARD if not self.baseline_go_sideways else Action.MOVE_RIGHT if ip[Direction.RIGHT] == Entity.EMPTY else Action.MOVE_LEFT) # Prefer poison: if ip.count(Entity.POISON) == 1: return (Action.MOVE_LEFT if ip[Direction.LEFT] == Entity.POISON else Action.MOVE_FORWARD if ip[Direction.FORWARD] == Entity.POISON else Action.MOVE_RIGHT) elif ip.count(Entity.POISON) == 2: if ip[Direction.FORWARD] != Entity.POISON: # L/R ambiguity: return Action.MOVE_RIGHT if self.baseline_prefer_right else Action.MOVE_LEFT else: return (Action.MOVE_FORWARD if not self.baseline_go_sideways else Action.MOVE_RIGHT if ip[Direction.RIGHT] == Entity.POISON else Action.MOVE_LEFT) raise Exception('unknown scenario: {}'.format(ip)) class RandomAgent(object): def __init__(self, args): pass def act(self, percepts): return np.random.choice(list(Action)) class LearningAgent(object): def __init__(self, args): self.weights = np.random.randn(3, 3 * 4 * args.sensor_range) * 0.001 def act(self, percepts): return np.argmax(self.evaluate(percepts)[1]) def evaluate(self, percepts): inputs = encode_percepts_as_one_hot(percepts) outputs = np.dot(self.weights, inputs) return inputs, outputs def update_weights(self, learning_rate, delta, inputs): self.weights += learning_rate * np.dot(delta.reshape((-1, 1)), inputs.reshape((1, -1))) class ReinforcementAgent(LearningAgent): def __init__(self, args): super().__init__(args) def train(self, percepts, percepts_next, learning_rate, discount_factor, reward): inputs, outputs = self.evaluate(percepts) action = np.argmax(outputs) q_current = outputs[action] q_next = np.amax(self.evaluate(percepts_next)[1]) delta = (encode_int_as_one_hot(action, 3) * (reward + discount_factor * q_next - q_current)) self.update_weights(learning_rate, delta, inputs) class SupervisedAgent(LearningAgent): def __init__(self, args): super().__init__(args) def train(self, percepts, learning_rate, target_action): inputs, outputs = self.evaluate(percepts) outputs -= np.max(outputs) # Shift values for numerical stability softmax = np.exp(outputs) / np.sum(np.exp(outputs)) correct_choice = encode_int_as_one_hot(target_action, 3) delta = correct_choice - softmax self.update_weights(learning_rate, delta, inputs) def apply_action(world, agent_position, agent_heading, action): # Update agent heading if action == Action.MOVE_LEFT: agent_heading = (agent_heading + 4 - 1) % 4 elif action == Action.MOVE_RIGHT: agent_heading = (agent_heading + 1) % 4 # Update agent position if agent_heading == Heading.NORTH: agent_position = (agent_position[0] - 1, agent_position[1]) elif agent_heading == Heading.EAST: agent_position = (agent_position[0], agent_position[1] + 1) elif agent_heading == Heading.SOUTH: agent_position = (agent_position[0] + 1, agent_position[1]) elif agent_heading == Heading.WEST: agent_position = (agent_position[0], agent_position[1] - 1) agent_position = (np.clip(agent_position[0], 0, world.shape[0] - 1), np.clip(agent_position[1], 0, world.shape[1] - 1)) entity = world[agent_position] reward = REWARDS[entity] done = entity == Entity.WALL if entity == Entity.FOOD: world[agent_position] = Entity.FOOD_EATEN elif entity == Entity.POISON: world[agent_position] = Entity.POISON_EATEN return reward, done, agent_position, agent_heading def benchmark_agent(agent, iterations, args): total_points = 0 for _ in range(iterations): world, agent_position, agent_heading = create_world( args.world_width, args.world_height, args.food_ratio, args.poison_ratio) _, _, _, _, points = evaluate_agent( world, args.max_steps, args.sensor_range, agent, agent_position, agent_heading) total_points += points return total_points / iterations def create_world(width, height, food_ratio, poison_ratio): world = np.full((width + 2, height + 2), Entity.EMPTY, dtype=int) # Add wall border world[ 0, :] = Entity.WALL world[-1, :] = Entity.WALL world[ :, 0] = Entity.WALL world[ :,-1] = Entity.WALL world[np.where(np.logical_and( world == Entity.EMPTY, np.random.choice([True, False], world.shape, p=[food_ratio, 1.0 - food_ratio])))] \ = Entity.FOOD world[np.where(np.logical_and( world == Entity.EMPTY, np.random.choice([True, False], world.shape, p=[poison_ratio, 1.0 - poison_ratio])))] \ = Entity.POISON agent_position = list(zip(*np.where(world == Entity.EMPTY)))[0] agent_heading = np.random.choice(list(Heading)) return world, agent_position, agent_heading def encode_int_as_one_hot(value, k): one_hot_encoding = np.zeros(k) one_hot_encoding[value] = 1 return one_hot_encoding def encode_percepts_as_one_hot(percepts): one_hot_percepts = np.zeros((3 * len(percepts[0]), 4)) one_hot_percepts[np.arange(one_hot_percepts.shape[0]), np.concatenate(percepts)] = 1 return one_hot_percepts.flatten() def evaluate_agent(world, steps, sensor_range, agent, agent_position, agent_heading): world = np.copy(world) done = False points = 0 position_history = [agent_position] percepts_history = [] action_history = [] while not done and steps > 0: percepts = get_percepts(world, sensor_range, agent_position, agent_heading) action = agent.act(percepts) reward, done, agent_position, agent_heading = apply_action( world, agent_position, agent_heading, action) percepts_history.append(np.copy(percepts)) action_history.append(action) position_history.append(agent_position) points += reward steps -= 1 return world, position_history, percepts_history, action_history, points def get_percepts(world, sensor_range, agent_position, agent_heading): # Construct perception padded_world = np.full( (world.shape[0] + 2 * sensor_range, world.shape[1] + 2 * sensor_range), Entity.WALL, dtype=int) padded_world[sensor_range:sensor_range + world.shape[0], sensor_range:sensor_range + world.shape[1]] = world # Rotate perception according to agent heading and mask two lower bits # such that FOOD_EATEN and POISON_EATEN are mapped to EMPTY. agent_percepts = np.rot90( padded_world[agent_position[0]:agent_position[0] + 2 * sensor_range + 1, agent_position[1]:agent_position[1] + 2 * sensor_range + 1], agent_heading) & 0x3 # Get agent action percepts = np.stack(( agent_percepts[sensor_range, sensor_range - 1::-1], # Left agent_percepts[sensor_range - 1::-1, sensor_range], # Forward agent_percepts[sensor_range, sensor_range + 1:])) # Right return percepts def render(output_filename, world, agent_path): app = QApplication([ '-platform', 'offscreen']) cell_size = 50 margin_size = 5 symbol_size = 0.35 colors = { 'line': QColor( 51, 51, 51), 'path': QColor( 51, 51, 51), Entity.WALL: QColor( 88, 89, 91), Entity.FOOD: QColor( 28, 150, 32), Entity.FOOD_EATEN: QColor(135, 243, 132), Entity.POISON: QColor(255, 153, 0), Entity.POISON_EATEN: QColor(204, 51, 102) } printer = QPrinter() printer.setOutputFormat(QPrinter.PdfFormat) printer.setOutputFileName(output_filename) printer.setPageMargins(0, 0, 0, 0, QPrinter.Inch) printer.setPageSize(QPageSize( QSizeF(float(world.shape[1] * cell_size + 2 * margin_size) / printer.resolution(), float(world.shape[0] * cell_size + 2 * margin_size) / printer.resolution()), QPageSize.Inch)) painter = QPainter(printer) painter.translate(margin_size, margin_size) painter.setPen(QPen(colors['line'], 0)) for y in range(world.shape[0] + 1): painter.drawLine(0, cell_size * y, cell_size * world.shape[1], cell_size * y) for x in range(world.shape[1] + 1): painter.drawLine(cell_size * x, 0, cell_size * x, cell_size * world.shape[0]) # Draw X marking starting location painter.drawLine(cell_size * agent_path[0][1], cell_size * agent_path[0][0], cell_size * (agent_path[0][1] + 1), cell_size * (agent_path[0][0] + 1)) painter.drawLine(cell_size * (agent_path[0][1] + 1), cell_size * agent_path[0][0], cell_size * agent_path[0][1], cell_size * (agent_path[0][0] + 1)) for row in range(world.shape[0]): for column in range(world.shape[1]): entity = world[row, column] if entity != Entity.EMPTY: painter.setBrush(QBrush(colors[entity])) painter.drawEllipse( QPointF(cell_size * (column + 0.5), cell_size * (row + 0.5)), symbol_size * float(cell_size), symbol_size * float(cell_size)) pen_thickness_increment = 0.2 pen_thickness = 2.0 for i, (first, second) in enumerate(zip(agent_path, agent_path[1:])): pen_thickness += pen_thickness_increment painter.setPen(QPen(colors['path'], pen_thickness, Qt.SolidLine, Qt.RoundCap)) painter.drawLine( QPointF(cell_size * (first[1] + 0.5), cell_size * (first[0] + 0.5)), QPointF(cell_size * (second[1] + 0.5), cell_size * (second[0] + 0.5))) painter.end() def main(): parser = argparse.ArgumentParser() parser.add_argument('--agent', choices=['baseline', 'random', 'supervised', 'reinforcement']) parser.add_argument('--baseline_go_sideways', action='store_true') parser.add_argument('--baseline_prefer_avoid_wall', action='store_true') parser.add_argument('--baseline_prefer_right', action='store_true') parser.add_argument('--baseline_take_food_near_wall', action='store_true') parser.add_argument('--compare', action='store_true') parser.add_argument('--discount_factor', type=float, default=0.9) parser.add_argument('--evaluate', type=int) parser.add_argument('--food_ratio', type=float, default=0.5) parser.add_argument('--learning_rate', type=float, default=0.01) parser.add_argument('--load', type=str) parser.add_argument('--max_steps', type=int, default=50) parser.add_argument('--poison_ratio', type=float, default=0.5) parser.add_argument('--render', action='store_true') parser.add_argument('--render_filename', type=str, default='flatland.pdf') parser.add_argument('--report_output', action='store_true') parser.add_argument('--report_weights', action='store_true') parser.add_argument('--save', type=str) parser.add_argument('--sensor_range', type=int, default=1) parser.add_argument('--train', action='store_true') parser.add_argument('--training_round_repetitions', type=int, default=1) parser.add_argument('--training_round_size', type=int, default=100) parser.add_argument('--training_rounds', type=int, default=25) parser.add_argument('--world_height', type=int, default=10) parser.add_argument('--world_width', type=int, default=10) args = parser.parse_args() if args.load: agent = pickle.load(open(args.load, 'rb')) else: if not args.agent: print('Agent type must be specified') sys.exit(1) agent = globals()[args.agent.title() + 'Agent'](args) if args.train: if not issubclass(agent.__class__, LearningAgent): print('Agent class cannot be trained') sys.exit(1) if args.agent == 'supervised': baseline_agent = BaselineAgent(args) mean_agent_scores = np.zeros((args.training_round_repetitions, args.training_rounds)) for training_round_repetition in range(args.training_round_repetitions): agent = globals()[args.agent.title() + 'Agent'](args) for training_round in range(args.training_rounds): total_points = 0 for iteration in range(args.training_round_size): world, agent_position, agent_heading = create_world( args.world_width, args.world_height, args.food_ratio, args.poison_ratio) done = False steps = args.max_steps points = 0 while not done and steps > 0: percepts = get_percepts( world, args.sensor_range, agent_position, agent_heading) action = agent.act(percepts) reward, done, agent_position, agent_heading = apply_action( world, agent_position, agent_heading, action) if args.agent == 'supervised': target_action = baseline_agent.act(percepts) agent.train(percepts, args.learning_rate, target_action) elif args.agent == 'reinforcement': updated_percepts = get_percepts( world, args.sensor_range, agent_position, agent_heading) agent.train(percepts, updated_percepts, args.learning_rate, args.discount_factor, reward) points += reward steps -= 1 total_points += points mean_agent_scores[training_round_repetition, training_round] += \ total_points / args.training_round_size print('\n'.join('{} {} {}'.format(training_round + 1, mean, std) for training_round, mean, std in zip( range(args.training_rounds), np.mean(mean_agent_scores, axis=0), np.std(mean_agent_scores, axis=0)))) if args.save: pickle.dump(agent, open(args.save, 'wb')) if args.evaluate: mean_agent_score = benchmark_agent(agent, args.evaluate, args) print(mean_agent_score) if args.report_output: for i, scenario in enumerate(itertools.product([Entity.EMPTY, Entity.WALL, Entity.FOOD, Entity.POISON], repeat=3)): percepts = np.array(scenario).reshape(3, 1) inputs, outputs = agent.evaluate(percepts) action = np.argmax(outputs) print(' & '.join( [str(i + 1)] + ['\\textsc{{{}}}'.format(str(Entity(entity))) for entity in scenario] + ['${:.5f}$'.format(float(output)) for output in outputs] + ['\\textsc{{{}}}'.format(str(Action(action))), '~ \\\\'])) if args.report_weights: print('&{}\\\\'.format('&'.join(' \\textsc{{{}}} '.format( ''.join(x)) for x in itertools.product('LFR', 'EWFP')))) for i, action in enumerate(['Left', 'Forward', 'Right']): print('\\textsc{{{}}} &{}\\\\'.format( action, '&'.join(' \\textsc{{{:.5f}}} '.format(weight) for weight in list(agent.weights[i, :])))) if args.compare: baseline_agent = BaselineAgent(args) for scenario in itertools.product([Entity.EMPTY, Entity.WALL, Entity.FOOD, Entity.POISON], repeat=3): percepts = np.array(scenario).reshape(3, 1) action = agent.act(percepts) baseline_action = baseline_agent.act(percepts) if action != baseline_action: print('{} -> Agent: {} Baseline: {}'.format(scenario, str(Action(action)), str(Action(baseline_action)))) if args.render: world, agent_position, agent_heading = create_world( args.world_width, args.world_height, args.food_ratio, args.poison_ratio) world, position_history, percepts_history, action_history, points = evaluate_agent( world, args.max_steps, args.sensor_range, agent, agent_position, agent_heading) render(args.render_filename, world, position_history) print(points) if __name__ == '__main__': main()
22,889
7,310
from pycord.discord.ext import commands import pycord.discord as discord from pycord.discord import Embed import requests import json from discord import Embed class Data(commands.Cog): def __init__(self, bot) -> None: self.bot: commands.Bot = bot @commands.command() async def ping(self, ctx): await ctx.send(f"My Ping: {round(self.bot.latency * 1000)}ws") @commands.command() async def discordstatus(self, ctx): res = requests.get('https://discordstatus.com/metrics-display/5k2rt9f7pmny/day.json') data = json.loads(res.text) latency = round(data['summary']['mean']) embed = Embed( description=f"Current: {latency}", ) await ctx.send(embed=embed) def setup(bot): bot.add_cog(Data(bot))
745
270
#!/usr/bin/python3 from SetupRunDirectory import verifyDirectoryFiles, setupRunDirectory from CleanupRunDirectory import cleanUpRunDirectory from RunAssembly import verifyConfigFiles, verifyFastaFiles, runAssembly, initializeAssembler from SaveRun import saveRun import configparser from datetime import datetime from shutil import copyfile import subprocess import signal import traceback import argparse import sys import gc import os def getDatetimeString(): """ Generate a datetime string. Useful for making output folders names that never conflict. """ now = datetime.now() now = [now.year, now.month, now.day, now.hour, now.minute, now.second, now.microsecond] datetimeString = "_".join(list(map(str, now))) return datetimeString def ensureDirectoryExists(directoryPath, i=0): """ Recursively test directories in a directory path and generate missing directories as needed :param directoryPath: :return: """ if i > 3: print("WARNING: generating subdirectories of depth %d, please verify path is correct: %s" % (i, directoryPath)) if not os.path.exists(directoryPath): try: os.mkdir(directoryPath) except FileNotFoundError: ensureDirectoryExists(os.path.dirname(directoryPath), i=i + 1) if not os.path.exists(directoryPath): os.mkdir(directoryPath) def overrideDefaultConfig(config, args): """ Check all the possible params to see if the user provided an override value, and add any overrides to their appropriate location in the config dictionary """ if args.minReadLength is not None: config["Reads"]["minReadLength"] = str(args.minReadLength) if args.k is not None: config["Kmers"]["k"] = str(args.k) if args.probability is not None: config["Kmers"]["probability"] = str(args.probability) if args.m is not None: config["MinHash"]["m"] = str(args.m) if args.minHashIterationCount is not None: config["MinHash"]["minHashIterationCount"] = str(args.minHashIterationCount) if args.maxBucketSize is not None: config["MinHash"]["maxBucketSize"] = str(args.maxBucketSize) if args.minFrequency is not None: config["MinHash"]["minFrequency"] = str(args.minFrequency) if args.maxSkip is not None: config["Align"]["maxSkip"] = str(args.maxSkip) if args.maxMarkerFrequency is not None: config["Align"]["maxMarkerFrequency"] = str(args.maxMarkerFrequency) if args.minAlignedMarkerCount is not None: config["Align"]["minAlignedMarkerCount"] = str(args.minAlignedMarkerCount) if args.maxTrim is not None: config["Align"]["maxTrim"] = str(args.maxTrim) if args.minComponentSize is not None: config["ReadGraph"]["minComponentSize"] = str(args.minComponentSize) if args.maxChimericReadDistance is not None: config["ReadGraph"]["maxChimericReadDistance"] = str(args.maxChimericReadDistance) if args.minCoverage is not None: config["MarkerGraph"]["minCoverage"] = str(args.minCoverage) if args.maxCoverage is not None: config["MarkerGraph"]["maxCoverage"] = str(args.maxCoverage) if args.lowCoverageThreshold is not None: config["MarkerGraph"]["lowCoverageThreshold"] = str(args.lowCoverageThreshold) if args.highCoverageThreshold is not None: config["MarkerGraph"]["highCoverageThreshold"] = str(args.highCoverageThreshold) if args.maxDistance is not None: config["MarkerGraph"]["maxDistance"] = str(args.maxDistance) if args.pruneIterationCount is not None: config["MarkerGraph"]["pruneIterationCount"] = str(args.pruneIterationCount) if args.markerGraphEdgeLengthThresholdForConsensus is not None: config["Assembly"]["markerGraphEdgeLengthThresholdForConsensus"] = str( args.markerGraphEdgeLengthThresholdForConsensus) if args.consensusCaller is not None: config["Assembly"]["consensusCaller"] = str(args.consensusCaller) + "ConsensusCaller" if args.useMarginPhase is not None: config["Assembly"]["useMarginPhase"] = str(args.useMarginPhase) if args.storeCoverageData is not None: config["Assembly"]["storeCoverageData"] = str(args.storeCoverageData) return config def main(readsSequencePath, outputParentDirectory, Data, largePagesMountPoint, processHandler, savePageMemory, performPageCleanUp, args): if not os.path.exists(readsSequencePath): raise Exception("ERROR: input file not found: %s" % readsSequencePath) # Make sure given sequence file path is absolute, because CWD will be changed later readsSequencePath = os.path.abspath(readsSequencePath) # Generate output directory to run shasta in outputDirectoryName = "run_" + getDatetimeString() outputDirectory = os.path.abspath(os.path.join(outputParentDirectory, outputDirectoryName)) ensureDirectoryExists(outputDirectory) # Locate path of default configuration files relative to this script's "binary" file. # Use of realpath is needed to make sure symbolic links are resolved. scriptPath = os.path.dirname(os.path.realpath(__file__)) confDirectory = os.path.join(os.path.dirname(scriptPath), "conf") defaultConfFilename = "shasta.conf" defaultConfPath = os.path.join(confDirectory, defaultConfFilename) localConfPath = os.path.join(outputDirectory, "shasta.conf") # Parse config file to fill in default parameters config = configparser.ConfigParser() if not config.read(defaultConfPath): raise Exception("Error reading config file %s." % defaultConfPath) # Check if any params were specified by user and override the default config config = overrideDefaultConfig(config, args) # Write updated config file to output directory so RunAssembly.py can be called as a separate process with open(localConfPath, "w") as file: config.write(file) # Add bayesian params file to the output directory if needed if args.consensusCaller == "SimpleBayesian": defaultMatrixPath = os.path.join(confDirectory, "SimpleBayesianConsensusCaller-1.csv") localMatrixPath = os.path.join(outputDirectory, "SimpleBayesianConsensusCaller.csv") copyfile(defaultMatrixPath, localMatrixPath) # Add marginphase params file to the output directory if needed if args.useMarginPhase: defaultParamsPath = os.path.join(confDirectory, "MarginPhase-allParams.np.json") localParamsPath = os.path.join(outputDirectory, "MarginPhase.json") copyfile(defaultParamsPath, localParamsPath) # Setup run directory according to SetupRunDirectory.py verifyDirectoryFiles(runDirectory=outputDirectory) setupRunDirectory(runDirectory=outputDirectory) # Ensure prerequisite files are present verifyConfigFiles(parentDirectory=outputDirectory) verifyFastaFiles(fastaFileNames=[readsSequencePath]) # Set current working directory to the output dir os.chdir(outputDirectory) # Launch assembler as a separate process using the saved (updated) config file executablePath = os.path.join(scriptPath, "RunAssembly.py") arguments = [executablePath, readsSequencePath] processHandler.launchProcess(arguments=arguments, working_directory=outputDirectory, wait=True) # Save page memory to disk so it can be reused during RunServerFromDisk if savePageMemory: saveRun(outputDirectory) if performPageCleanUp: sys.stderr.write("Cleaning up page memory...") cleanUpRunDirectory(requireUserInput=False) sys.stderr.write("\rCleaning up page memory... Done\n") class ProcessHandler: def __init__(self, Data, largePagesMountPoint, process=None): self.process = process self.Data = Data self.largePagesMountPoint = largePagesMountPoint def launchProcess(self, arguments, working_directory, wait): if self.process is None: self.process = subprocess.Popen(arguments, cwd=working_directory) if wait: self.process.wait() else: exit("ERROR: process already launched") def handleExit(self, signum, frame): """ Method to be called at (early) termination. By default, the native "signal" handler passes 2 arguments signum and frame :param signum: :param frame: :return: """ pass if self.process is not None: self.process.kill() # kill or terminate? gc.collect() self.cleanup() def cleanup(self): sys.stderr.write("\nERROR: script terminated or interrupted\n") sys.stderr.write("Cleaning up page memory...") cleanUpRunDirectory(requireUserInput=False) sys.stderr.write("\rCleaning up page memory... Done\n") exit(1) def stringAsBool(s): s = s.lower() boolean = None if s in {"t", "true", "1", "y", "yes"}: boolean = True elif s in {"f", "false", "0", "n", "no"}: boolean = False else: exit("Error: invalid argument specified for boolean flag: %s"%s) return boolean if __name__ == "__main__": parser = argparse.ArgumentParser() parser.register("type", "bool", stringAsBool) # add type keyword to registries parser.add_argument( "--inputSequences", type=str, required=True, help="File path of FASTQ or FASTA sequence file containing sequences for assembly" ) parser.add_argument( "--savePageMemory", type="bool", # default=10, required=False, help="Save page memory to disk before clearing the ephemeral page data. \n \ Convenient for post-assembly analysis using RunServerFromDisk.py. \n\n \ Any case insensitive variant of the following is accepted: \n \ t, true, 1, y, yes, f, false, 0, n, no" ) parser.add_argument( "--performPageCleanUp", type="bool", default="True", required=False, help="Whether to perform post-assembly cleanup of page files. \n \ Any case insensitive variant of the following is accepted: \n \ t, true, 1, y, yes, f, false, 0, n, no" ) parser.add_argument( "--storeCoverageData", type="bool", # default=10, required=False, help="Whether to store read-level data: observed bases and run lengths. \n \ Any case insensitive variant of the following is accepted: \n \ t, true, 1, y, yes, f, false, 0, n, no" ) parser.add_argument( "--outputDir", type=str, default="./output/", required=False, help="Desired output directory path (will be created during run time if doesn't exist)" ) parser.add_argument( "--minReadLength", type=int, # default=1000, required=False, help="The minimum read length. Reads shorter than this are skipped on input." ) parser.add_argument( "--k", type=int, # default=10, required=False, help="The length of the k-mers used as markers. \n" ) parser.add_argument( "--probability", type=float, # default=0.1, required=False, help="The probability that a k-mer is a marker. \n \ This is approximately equal to the fraction\n \ of k-mers that will be used as markers." ) parser.add_argument( "--m", type=int, # default=4, required=False, help="The number of consecutive markers that define a MinHash feature." ) parser.add_argument( "--minHashIterationCount", type=int, # default=100, required=False, help="The number of MinHash iterations." ) parser.add_argument( "--maxBucketSize", type=int, # default=30, required=False, help="The maximum bucket size to be used by the MinHash algoritm. \n \ Buckets larger than this are ignored." ) parser.add_argument( "--minFrequency", type=int, # default=1, required=False, help="The minimum number of times a pair of oriented reads \n \ is found by the MinHash algorithm for the pair to \n \ generate an overlap." ) parser.add_argument( "--maxSkip", type=int, # default=30, required=False, help="The maximum number of markers that an alignment is allowed\n \ to skip on either of the oriented reads being aligned." ) parser.add_argument( "--maxMarkerFrequency", type=int, # default=10, required=False, help="Marker frequency threshold. \n \ When computing an alignment between two oriented reads, \n \ marker kmers that appear more than this number of times \n \ in either of the two oriented reads are discarded \n \ (in both oriented reads)." ) parser.add_argument( "--minAlignedMarkerCount", type=int, # default=100, required=False, help="The minimum number of aligned markers in an alignment \n \ in order for the alignment to be considered good and usable." ) parser.add_argument( "--maxTrim", type=int, # default=30, required=False, help="The maximum number of trim markers tolerated at the \n \ beginning and end of an alignment. There can be \n \ up this number of markers between the first/last aligned marker \n \ and the beginning/end of either oriented read \n \ for an alignment to be considered good and usable." ) parser.add_argument( "--minComponentSize", type=int, # default=100, required=False, help="The minimum size (number of oriented reads) of \n \ a connected component to be kept." ) parser.add_argument( "--maxChimericReadDistance", type=int, # default=2, required=False, help="Argument maxChimericReadDistance for flagChimericReads." ) parser.add_argument( "--minCoverage", type=int, # default=10, required=False, help="The minimum and maximum coverage (number of markers) \n \ for a vertex of the marker graph. \n \ Vertices with coverage outside this range are collapsed \n \ away and not generated by computeMarkerGraphVertices." ) parser.add_argument( "--maxCoverage", type=int, # default=100, required=False, help="The minimum and maximum coverage (number of markers) \n \ for a vertex of the marker graph. \n \ Vertices with coverage outside this range are collapsed \n \ away and not generated by computeMarkerGraphVertices." ) parser.add_argument( "--lowCoverageThreshold", type=int, # default=1, required=False, help="Parameters for flagMarkerGraphWeakEdges." ) parser.add_argument( "--highCoverageThreshold", type=int, # default=1000, required=False, help="Parameters for flagMarkerGraphWeakEdges." ) parser.add_argument( "--maxDistance", type=int, # default=30, required=False, help="Parameters for flagMarkerGraphWeakEdges." ) parser.add_argument( "--pruneIterationCount", type=int, # default=6, required=False, help="Number of iterations for pruneMarkerGraphStrongSubgraph." ) parser.add_argument( "--markerGraphEdgeLengthThresholdForConsensus", type=int, # default=100, required=False, help="Used during sequence assembly." ) parser.add_argument( "--consensusCaller", type=str, required=False, choices=["Simple", "SimpleBayesian", "Median"], help="Whether to use Bayesian inference on read lengths during consensus calling" ) parser.add_argument( "--useMarginPhase", type="bool", # default=True, required=False, help="Use margin polisher during consensus. \n\n \ Any case insensitive variant of the following is accepted: \n \ t, true, 1, y, yes, f, false, 0, n, no" ) args = parser.parse_args() # Assign default paths for page data largePagesMountPoint = "/hugepages" Data = os.path.join(largePagesMountPoint, "Data") # Initialize a class to deal with the subprocess that is opened for the assembler processHandler = ProcessHandler(Data=Data, largePagesMountPoint=largePagesMountPoint) # Setup termination handling to deallocate large page memory, unmount on-disk page data, and delete disk data # This is done by mapping the signal handler to the member function of an instance of ProcessHandler signal.signal(signal.SIGTERM, processHandler.handleExit) signal.signal(signal.SIGINT, processHandler.handleExit) main(readsSequencePath=args.inputSequences, outputParentDirectory=args.outputDir, largePagesMountPoint=largePagesMountPoint, Data=Data, args=args, processHandler=processHandler, savePageMemory=args.savePageMemory, performPageCleanUp=args.performPageCleanUp)
17,743
4,993
import typing as t import typing_extensions as tx import sys import logging import os.path import dataclasses import filecmp from io import StringIO from .minifs import MiniFS, File, T, DefaultT from .utils import reify logger = logging.getLogger(__name__) ActionType = tx.Literal["update", "create"] class Writer(tx.Protocol): def write(self, name: str, file: File[T], *, _retry: bool = False) -> None: ... def cleanup_all(output: "output[DefaultT]") -> None: import shutil logger.info("cleanup %s", output.root) shutil.rmtree(output.root, ignore_errors=True) # todo: dryrun @dataclasses.dataclass(frozen=False, unsafe_hash=False) class output(t.Generic[DefaultT]): root: str prefix: str = "" suffix: str = "" # for MiniFS opener: t.Optional[t.Callable[[], DefaultT]] = None sep: str = "/" store: t.Dict[str, t.Any] = dataclasses.field(default_factory=dict) cleanup: t.Optional[t.Callable[["output[DefaultT]"], None]] = None verbose: bool = os.environ.get("VERBOSE", "") != "" use_console: bool = os.environ.get("CONSOLE", "") != "" nocheck: bool = os.environ.get("NOCHECK", "") != "" def fullpath(self, name: str) -> str: dirname, basename = os.path.split(name) fname = "{}{}{}".format(self.prefix, basename, self.suffix) return os.path.join(self.root, os.path.join(dirname, fname)) def guess_action(self, fullpath: str) -> ActionType: if os.path.exists(fullpath): return "update" else: return "create" @reify def fs(self) -> MiniFS[DefaultT]: opener = self.opener or StringIO return MiniFS(opener=opener, sep=self.sep) # type: ignore # xxx @reify def writer(self) -> Writer: setup_logging(level=logging.INFO) # xxx if self.use_console: return _ConsoleWriter(self) else: return _ActualWriter(self) def __enter__(self) -> MiniFS[DefaultT]: return self.fs def __exit__( self, exc: t.Optional[t.Type[BaseException]], value: t.Optional[BaseException], tb: t.Any, ) -> None: writer = self.writer if not self.use_console and self.cleanup is not None: self.cleanup(self) for name, f in self.fs.walk(): if name is None: raise RuntimeError("something wrong, name is None") writer.write(name, f) class _ActualWriter: TMP_SUFFIX = "_TMP" def __init__(self, output: output[DefaultT]): self.output = output def write(self, name: str, file: File[T], *, _retry: bool = False) -> None: if self.output.nocheck: self._write_without_check(name, file) else: self._write_with_check(name, file) def _write_with_check(self, name: str, file: File[T]) -> None: fullpath = self.output.fullpath(name) if not os.path.exists(fullpath): self._write_without_check(name, file, action="create") else: tmppath = fullpath + self.TMP_SUFFIX with open(tmppath, "w") as wf: file.write(wf) not_changed = filecmp.cmp(fullpath, tmppath, shallow=True) if not_changed: action = "no change" os.remove(tmppath) if self.output.verbose: logger.info("[F]\t%s\t%s", action, fullpath) else: action = "update" os.replace(tmppath, fullpath) logger.info("[F]\t%s\t%s", action, fullpath) def _write_without_check( self, name: str, file: File[T], *, action: t.Optional[ActionType] = None, _retry: bool = False, ) -> None: fullpath = self.output.fullpath(name) action = action or self.output.guess_action(fullpath) try: with open(fullpath, "w") as wf: file.write(wf) logger.info("[F]\t%s\t%s", action, fullpath) except FileNotFoundError: if _retry: raise logger.info("[D]\tcreate\t%s", os.path.dirname(fullpath)) os.makedirs(os.path.dirname(fullpath), exist_ok=True) self._write_without_check(name, file, action="create", _retry=True) class _ConsoleWriter: def __init__( self, output: output[DefaultT], *, stdout: t.IO[str] = sys.stdout, stderr: t.IO[str] = sys.stderr, ) -> None: self.output = output self.stdout = stdout self.stderr = stderr def write(self, name: str, f: File[T], *, _retry: bool = False) -> None: fullpath = self.output.fullpath(name) if not self.output.verbose: logger.info("[F]\t%s\t%s", self.output.guess_action(fullpath), fullpath) return print(f"# {fullpath}", file=self.stdout) print( "\x1b[90m----------------------------------------\x1b[0m", file=self.stderr ) self.stderr.flush() o = StringIO() f.write(o) print( " ", o.getvalue().rstrip().replace("\n", "\n ").rstrip(" "), file=self.stdout, sep="", ) self.stdout.flush() print("\n", file=self.stderr) self.stderr.flush() class _MarkdownWriter: def __init__( self, output: output[DefaultT], *, stdout: t.IO[str] = sys.stdout, stderr: t.IO[str] = sys.stderr, ) -> None: self.output = output self.stdout = stdout self.stderr = stderr def write(self, name: str, f: File[T], *, _retry: bool = False) -> None: fullpath = self.output.fullpath(name) o = StringIO() f.write(o) content = o.getvalue().strip() print(f"## {fullpath}\n", file=self.stdout) self.stdout.flush() print("<details>\n", file=self.stderr) self.stderr.flush() print("```", file=self.stdout) print(content, file=self.stdout) print("```\n", file=self.stdout) self.stdout.flush() print("</details>\n", file=self.stderr) self.stderr.flush() self.stdout.flush() def setup_logging( *, _logger: t.Optional[logging.Logger] = None, level: int = logging.INFO ) -> None: _logger = _logger or logger if _logger.handlers: return h = logging.StreamHandler(sys.stderr) h.setFormatter(logging.Formatter(fmt="%(message)s")) _logger.addHandler(h) _logger.propagate = False logging.basicConfig(level=level)
6,651
2,162
#!/usr/bin/env python # -*- coding: utf-8 -*- from __future__ import unicode_literals from itertools import repeat import os import numpy as np import json from flask import Flask,render_template,url_for,request import joblib import traceback import requests from bs4 import BeautifulSoup import re from onmt.utils.logging import init_logger from onmt.utils.misc import split_corpus from onmt.translate.translator import build_translator import onmt.opts as opts from onmt.utils.parse import ArgumentParser app = Flask(__name__) Categories = ["相机", "内存卡", "三脚架","麦克风","行车记录仪","充电器","笔记本电脑","遥控器", "音响","手机","智能手表","体脂秤","键盘","鼠标","显示器","打印机","平板电脑","电子书阅读器"] @app.route('/index.html') def index(): return render_template('index.html') @app.route('/about.html') def about(): return render_template('about.html') @app.route('/contact.html') def contact(): return render_template('contact.html') def constraint_iter_func(f_iter): all_tags = [] for json_line in f_iter: data = json.loads(json_line) words = data['words'] probs = [p[1] for p in data['class_probabilities'][:len(words)]] tags = [1 if p > opt.bu_threshold else 0 for p in probs] all_tags.append(tags) #print(len(words), len(data['class_probabilities'])) #all_tags.append(words) return all_tags def _get_parser(): parser = ArgumentParser(description='translate.py') opts.config_opts(parser) opts.translate_opts(parser) return parser def extract_category(title): min_start = len(title) target = None for i, category in enumerate(Categories): result = re.search(category, title) if result is not None: result = result.span() if result[0] < min_start and result[0] >= 0: min_start = result[0] target = category if target is None: target = "不知道" return target parser = _get_parser() opt = parser.parse_args() model_path = opt.models[0] step = os.path.basename(model_path)[:-3].split('step_')[-1] temp = opt.random_sampling_temp if opt.extra_output_str: opt.extra_output_str = '_'+opt.extra_output_str if opt.output is None: output_path = '/'.join(model_path.split('/')[:-2])+'/output_%s_%s%s.encoded' % (step, temp, opt.extra_output_str) opt.output = output_path ArgumentParser.validate_translate_opts(opt) logger = init_logger(opt.log_file) translator = build_translator(opt, report_score=True) BASE_LIB='html5lib' UA='Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36' HEADERS={'user-agent':UA} print("prepared") @app.route('/index.html',methods=['POST']) def main(): if request.method == 'POST': url= str(request.form['message']).strip() if len(url) == 0 or not url.startswith("https://"): return render_template('index.html',prediction = "请输入正确的网址") resp = requests.get(url, headers=HEADERS) text = resp.text soup = BeautifulSoup(text, 'lxml') title=soup.title.string[:-16] print(title) src_shard = extract_category(title) print(src_shard) try: assert src_shard in Categories predictions = translator.translate( src=[src_shard.encode(encoding = "utf-8")]*opt.batch_size, tgt=None, src_dir=opt.src_dir, batch_size=opt.batch_size, attn_debug=opt.attn_debug, tag_shard=None ) pred_comments = [prediction[0].replace(" ", "").split("。")[0] for prediction in predictions[1]] scores = [-torch_score[0].cpu().item() for torch_score in predictions[0]] pred_comment = pred_comments[scores.index(max(scores))] print(pred_comments) print(scores) except: traceback.print_exc() pred_comment = "不好意思,此类商品暂不支持" return render_template('index.html',prediction = pred_comment) if __name__ == "__main__": app.run(host='127.0.0.1',port=5500,debug=True)
4,158
1,466