id stringlengths 1 8 | text stringlengths 6 1.05M | dataset_id stringclasses 1
value |
|---|---|---|
3299936 | # terrascript/provider/sethvargo/berglas.py
# Automatically generated by tools/makecode.py (24-Sep-2021 15:13:20 UTC)
import terrascript
class berglas(terrascript.Provider):
"""A Terraform provider for Berglas"""
__description__ = "A Terraform provider for Berglas"
__namespace__ = "sethvargo"
__name__ = "berglas"
__source__ = "https://github.com/sethvargo/terraform-provider-berglas"
__version__ = "0.2.0"
__published__ = "2021-08-23T19:47:39Z"
__tier__ = "community"
__all__ = ["berglas"]
| StarcoderdataPython |
11244617 | #
# compas_hpc_input.py
#
# This is the script that should be modified by the user to give your inputs
# to compas_hpc.py
#
# The names of the variables in this script should not be changed as they
# are expected by compas_hpc.py
#
# Once you have edited this script, run
#
# python compas_hpc.py
#
import numpy as np
import os
import time
from subprocess import Popen, PIPE
import sys
import pickle
################################################
#
# The user should modify the following options
#
#################################################
nBatches = 3 #-- Choose how many batches to split the run into
maxRandomSeed = int(2**32-1)
venvActivatePath = None # path to source or None
send_email = False # Whether you want to recieve an email when your jobs are done
user_email = '<EMAIL>'
#-- Path where the output will go
# Does not have to exist, compas_hpc will attempt to create it
rootOutputDir = '/fred/oz101/user_name/folder_name'
#-- on G2 this should be something like
# /lustre/projects/p027/user_name/foler_name
#-- on OzSTAR this should be something like
# /fred/oz003/user_name/folder_name
#-- on tsunami this should be something like
# /home/user_name/folder_name
#-- Whether to use your own list of nBatches random seeds or generate a new list
generate_random_seeds = True
seedsFile = "/path/to/seeds/file"
#-- Which cluster to use. Current possible clusters are tsunami, g2, ozstar, helios and local
cluster = 'ozstar'
#-- Request walltime in HH:MM:SS -- used on ozstar and g2
walltime = "1:00:00"
#-- Request memory in MB -- used on ozstar and g2
memory='4000'
#-- Set maximum number of jobs to run at any one time
maxNumJobsRun = '100'
#
# Example of how to set up the grid dictionary.
# Modify to what you want
#
# To see available options to change do
# $COMPAS_ROOT_DIR/COMPAS/COMPAS --help
#
# Example of a grid of common envelope alphas
# gridDictionary = {}
# gridDictionary['--common-envelope-alpha'] = np.linspace(0.,2.,10)
#
# Example of a metallicity grid
# gridDictionary = {}
# n_metallicities = 50
# gridDictionary['--metallicity'] = np.logspace(log_metallicity_lower_limit,log_metallicity_upper_limit,n_metallicities)
#
# These are the SSE (Hurley et al 2000) limits for the
# range of metallicities
#
metallicity_lower_limit = 1E-4
metallicity_upper_limit = 3E-2
log_metallicity_lower_limit = np.log10(metallicity_lower_limit)
log_metallicity_upper_limit = np.log10(metallicity_upper_limit)
common_envelope_alpha_lower_limit = 0.01
common_envelope_alpha_upper_limit = 2.0
sigma_kick_black_hole_lower_limit = 0.0
sigma_kick_black_hole_upper_limit = 400.0
flbv_lower_limit = 0.0
flbv_upper_limit = 10.0
ranges = np.array([[log_metallicity_lower_limit, log_metallicity_upper_limit],
[common_envelope_alpha_lower_limit, common_envelope_alpha_upper_limit],
[sigma_kick_black_hole_lower_limit, sigma_kick_black_hole_upper_limit],
[flbv_lower_limit, flbv_upper_limit]])
gridDictionary = None
| StarcoderdataPython |
4923166 | from playeranalyze import get_player_data
import matplotlib.pyplot as plt
def organize_data_by_over(player_data):
player_data["over"] = player_data["over"] + (player_data["ball"] / 6)
player_data = player_data[["over", "total_runs"]]
player_data[["over", "total_runs"]] = player_data.groupby("over",as_index=False).mean()
player_data = player_data.dropna()
return player_data
def plot_organized_data(player_data, player_name):
fig1, ax1 = plt.subplots()
plot = ax1.plot(player_data["over"], player_data["total_runs"])
ax1.set_title(f"{player_name}'s runs over a T20 game")
ax1.set_xlabel('Overs')
ax1.set_ylabel('Runs')
plt.show()
return player_data
def plot_player_trends(player_name):
plot_organized_data(organize_data_by_over(get_player_data(player_name)), player_name)
return player_name | StarcoderdataPython |
6503217 | <reponame>OpenITI/oipy
"""Converter that converts HTML files from the Ghbook library
to OpenITI mARkdown.
The converter has two main functions:
* convert_file: convert a single html file.
* convert_files_in_folder: convert all html files in a given folder
Usage examples:
>>> from html_converter_Ghbook import convert_file
>>> folder = r"test/Ghbook/"
>>> convert_file(folder+"10584.html", dest_fp=folder+"converted/10584")
>>> from html_converter_Ghbook import convert_files_in_folder
>>> convert_files_in_folder(folder, dest_folder=folder+"converted")
Both functions use the GhbookHtmlConverter class to do the heavy lifting.
The GhbookHtmlConverter is a subclass of GenericHtmlConverter,
which in its turn inherits many functions from the GenericConverter.
GenericConverter
\_ GenericHtmlConverter
\_ EShiaHtmlConverter
\_ GhbookHtmlConverter
\_ ...
Overview of the methods of these classes:
(methods of GenericConverter are inherited by GenericHtmlConverter;
and methods of GenericHtmlConverter are inherited by GhbookHtmlConverter.
Methods of the superclass with the same name
in the subclass are overwritten by the latter)
================================== ========================== ==========================
GenericConverter GenericHtmlConverter GhbookHtmlConverter
================================== ========================== ==========================
__init__ __init__ (inherited)
convert_files_in_folder (inherited) (inherited)
convert file (inherited) (inherited)
make_dest_fp (inherited - generic!) (inherited - generic!)
get_metadata (dummy) (inherited - dummy!) get_metadata
get_data get_data (inherited)
pre_process (inherited) pre_process
add_page_numbers (dummy) (inherited - dummy!) add_page_numbers
add_structural_annotations (dummy) add_structural_annotations add_structural_annotations
remove_notes (dummy) remove_notes remove_notes
reflow (inherited) (inherited)
add_milestones (dummy) (inherited - dummy!) (inherited - dummy!)
post_process (inherited - generic!) post_process
compose (inherited) (inherited)
save_file (inherited) (inherited)
inspect_tags_in_html (inherited)
inspect_tags_in_folder (inherited)
find_example_of_tag (inherited)
================================== ========================== ==========================
The GhbookHtmlConverter's add_structural_annotations method uses html2md_Ghbook,
an adaptation of the generic html2md (based on markdownify)
to convert the html tags to OpenITI annotations.
Examples:
>>> from html_converter_Ghbook import GhbookHtmlConverter
>>> conv = GhbookHtmlConverter()
>>> conv.dest_folder = r"test/Ghbook/converted"
>>> conv.VERBOSE = False
>>> folder = r"test/Ghbook/"
>>> conv.convert_file(folder+"10584.html")
>>> conv.convert_files_in_folder(folder, ["html"])
"""
from bs4 import BeautifulSoup
import re
if __name__ == '__main__':
from os import sys, path
root_folder = path.dirname(path.dirname(path.abspath(__file__)))
root_folder = path.dirname(path.dirname(root_folder))
sys.path.append(root_folder)
from openiti.new_books.convert.html_converter_generic import GenericHtmlConverter
from openiti.new_books.convert.helper import html2md_Ghbook
def convert_file(fp, dest_fp=None):
"""Convert one file to OpenITI format.
Args:
source_fp (str): path to the file that must be converted.
dest_fp (str): path to the converted file. Defaults to None
(in which case, the converted folder will be put in a folder
named "converted" in the same folder as the source_fp)
Returns:
None
"""
conv = GhbookHtmlConverter()
conv.convert_file(fp, dest_fp=dest_fp)
def convert_files_in_folder(src_folder, dest_folder=None,
extensions=["html"], exclude_extensions=["yml"],
fn_regex=None):
"""Convert all files in a folder to OpenITI format.\
Use the `extensions` and `exclude_extensions` lists to filter\
the files to be converted.
Args:
src_folder (str): path to the folder that contains
the files that must be converted.
dest_folder (str): path to the folder where converted files
will be stored.
extensions (list): list of extensions; if this list is not empty,
only files with an extension in the list should be converted.
exclude_extensions (list): list of extensions;
if this list is not empty,
only files whose extension is not in the list will be converted.
fn_regex (str): regular expression defining the filename pattern
e.g., "-(ara|per)\d". If `fn_regex` is defined,
only files whose filename matches the pattern will be converted.
Returns:
None
"""
conv = GhbookHtmlConverter()
conv.convert_files_in_folder(src_folder, dest_folder=dest_folder,
extensions=extensions,
exclude_extensions=exclude_extensions,
fn_regex=fn_regex)
################################################################################
class GhbookHtmlConverter(GenericHtmlConverter):
def pre_process(self, text):
"""Remove superfluous elements from the html file before processing."""
def remove_html_elements(soup, tag_name, class_=None, contains_str=None):
"""Remove all html elements with tag `tag` and class `class_` \
if they contain `contains_str`
Args:
soup (BeautifulSoup object): BeautifulSoup representation
of the html file
tag_name (str): name of the tag that needs to be removed
(e.g., "p", "div", "span").
class_ (str): class of the tag that needs to be removed.
Defaults to None. If None, all `tag_name` elements
will be removed, regardless of their class.
contains_str (str): defaults to None. If not None,
`tag_name` tags will only be removed if the text within them
contains the `contains_str` string.
"""
if class_:
elements = soup.find_all(tag_name, class_=class_)
else:
elements = soup.find_all(tag_name)
for el in elements:
if contains_str:
if contains_str in el.text:
el.extract()
else:
el.extract()
text = super().pre_process(text)
# attach separated wa- and a- prefixes to the following word:
text = re.sub(r"\b([وأ])[\s~]+", r"\1", text)
# remove superfluous html elements:
soup = BeautifulSoup(text)
remove_html_elements(soup, "STYLE")
remove_html_elements(soup, "TITLE")
for fn_line in soup.find_all("hr", class_="content_hr"):
fn_line.insert_after("FOOTNOTES")
text = soup.prettify()
return text
def add_page_numbers(self, text, source_fp):
"""Convert the page numbers in the text into OpenITI mARkdown format
In Ghbook texts, the page numbers are in the page header
(<div class="PageHead">).
Volume numbers are not mentioned in the html files,
but every volume is a different html file
and volume numbers should be marked in the file names as VOLxxx.
The script gets the volume number from the file name
and the page number from the page header,
joins these together in the OpenITI page number format PageVxxPxxx
and adds this into the html at the end of the page.
It also deletes the page header after extracting the page number.
"""
# try to get the volume number from the filename:
try:
vol_no = int(re.findall("VOL(\d+)", source_fp)[0])
vol_no = "PageV{:02d}P{}".format(vol_no, "{:03d}")
except:
vol_no = "PageV01P{:03d}"
# add the page number
soup = BeautifulSoup(text)
for span in soup.find_all("SPAN", class_="content_text"):
span_text = span.text.strip()
if span_text.startswith("ص:"):
page_no = re.findall("\d+", span_text)[0]
page_no = vol_no.format(int(page_no))
span.insert_after(page_no)
span.extract()
return soup.prettify()
def remove_notes(self, text):
"""Remove footnotes from text, and format them as endnotes.
Footnotes in Ghbook html files are below a horizontal line
(<HR class=content_hr>), located just below the page number
(<P class=content_paragraph><SPAN class=content_text>ص:45</SPAN></P></DIV></DIV>)
each footnote in a <DIV id=content_note_PAGE_NOTENUMBER class=content_note>(footnote text)</DIV>
This function extracts the footnotes from the texts
and turns them into endnotes.
The markers that indicate the location of the notes
within the text are not removed.
"""
split_text = re.split("(PageV\d+P\d+)", text)
text = []
footnotes = []
for i, t in enumerate(split_text):
if re.match("PageV\d+P\d+", t):
text.append(t)
else:
notes = re.findall("content_note>([^<]+)", t)
if notes:
try:
notes = "\n".join(notes) + "\n" + split_text[i-1] + "\n\n"
except:
notes = "\n".join(notes) + "\n" + "PageV00P000\n\n"
footnotes.append(notes)
text.append(re.sub("<DIV [^>]+? class=content_note>[^<]+?</DIV>", "", t))
text = "\n\n".join(text)
notes = "\n\n".join(footnotes)
notes = re.sub("\n+#* *\n+", "\n\n", notes)
notes = self.endnote_splitter + notes
return text, notes
def add_structural_annotations(self, html):
"""Convert html to mARkdown text using a html2md converter."""
text = html2md_Ghbook.markdownify(html)
return text
def post_process(self, text):
"""Deal with formatting probles introduced during the conversion process."""
text = super().post_process(text)
# remove page numbers of empty pages:
text = re.sub("(PageV\d+P\d+)\s*PageV\d+P\d+", r"\1", text)
# remove empty paragraphs:
text = re.sub(r"[\r\n]+# *[\r\n]+", "\n", text)
# adjust spacing after closing brackets and punctuation:
fmt = ")»،؛:.!؟\-"
fmt2 = fmt + "\d\s"
text = re.sub("([{}]+)([^{}])".format(fmt, fmt2), r"\1 \2", text)
text = re.sub("\) ([{}])".format(fmt), r")\1", text)
# adjust spacing before opening brackets:
text = re.sub("(\w)([(«])", r"\1 \2", text)
# remove superfluous new lines before a new paragraph/page number
text = re.sub("[\r\n]+(# |Page)", r"\n\1", text)
return text
if __name__ == "__main__":
import doctest
doctest.testmod()
input("Passed all tests. Continue?")
| StarcoderdataPython |
9660914 | import json
import requests
import time
from urllib import parse
from datetime import datetime
import ac_utility
from ac_constants import *
"""
# The json from Airtable is processed first in build_icandi_json, parsed then saved locally to icandi.json
# icandi.json is then loaded into get_venue_list for processing.
# Benefits: local data is persistent and doesn't rely on internet connection
# Airtable data can be parsed, filtered and then well formed before being written local
"""
def load_sims(url, options):
""" sims returns a jquery? object of all room booking items (dictionaries) """
try:
pulljson = requests.get(url + options)
status = pulljson.status_code
except (requests.Timeout, requests.ConnectionError, KeyError, Exception) as e:
print(f'Exception raised: {e}')
pulljson = {}
status = 408
return pulljson, status
def build_sims_json(sims_id):
# todo set this as a pref key and with advanced settings option
sims_root = "https://applicant.sims.uwa.edu.au/connect/webconnect?" \
"pagecd=UPTMTBL&dataname=%7Cp_mode%7Cp_ret%7Cp_draft_ind%7Cp_uoos&datavalue=%7CVENUE%7CDTL%7CY%7C"
# sims_query = parse.quote_plus('ARTS: [ G59] Fox Lecture Ha', safe='/&=')
sims_query = parse.quote_plus(sims_id, safe='/=') # removed '&' from safe list
# the return from the query is a mish mash of stuff, not json, no consistency in formatting/validation, etc X:(
req, json_request_status = load_sims(sims_root, sims_query)
if json_request_status == requests.codes.ok:
today_iso = datetime.now().date()
current_year, current_week, _ = today_iso.isocalendar()
# current_week = 4
bookings_list = []
clean_response = ""
trimmed_response = (req.text[1:-11]) # remove cruft from top and tail of response
# dropping 'othdtl' field as it passes illegal characters on data entry -> ** slack validation by callista!
for line in trimmed_response.splitlines():
if "othdtl" not in line:
clean_response += line + "\n"
else:
clean_response += '\t\t"othdtl" : ""\n'
loaded = json.loads(clean_response, strict=False)
for booking in loaded[1:]: # dump the first record - just structure info
bookingdetail = {}
bookingdetail["title"] = booking.get("actLongFrm", "MISSING").replace("_", " ")
bookingdetail["day"] = booking.get("day", "MISSING")
bookingdetail["duration"] = booking.get("sttoend", "MISSING")
weeks = booking.get("wknos", "MISSING").split(',')
while "" in weeks:
weeks.remove("")
bookingdetail["weeks"] = list(map(int, weeks))
b_start, b_end = bookingdetail["duration"].split(" - ") # split "duration" into start and end time strings
g_start = int(b_start.split(":")[0]) # convert the start string into int of hour value
bookingdetail["g_start"] = g_start if g_start > 7 else 7 # grid start time is no less than 7
g_end = int(b_end.split(":")[0]) # convert the start string into int of hour value
if g_end == 0:
g_end = 24 # make booking times finish at 24:00 hours (not 00:00)
bookingdetail["g_span"] = g_end - g_start
if current_week in bookingdetail["weeks"]:
bookings_list.append(bookingdetail)
# write to disk - just for testing
# temp_json_file = DATA_DIR / "sims.json"
# with temp_json_file.open("w") as file:
# json.dump(bookings_list, file, indent=2)
return bookings_list
else:
print(f'Failed to connect to sims\n\n({json_request_status})')
print(ERROR_CODES.get(str(json_request_status), 'Error code not found'))
def load_airtable(bearer_key=None, url=None, options=None, offset=""):
""" AirTable returns a json object of up to 100 records
if there are more records, AirTable passes an offset attribute
this offset can be used as a parameter in the query string to get the next 100 records
once there are less than 100 records returned, the offset attribute is no longer passed"""
header = {'authorization': 'Bearer ' + bearer_key}
timeout = 8 # in seconds
try:
pulljson = requests.get(url + options + "&offset=" + offset, headers=header, timeout=timeout)
status = pulljson.status_code
except (requests.Timeout, requests.ConnectionError, KeyError, Exception) as e:
print(f'Exception raised: {e}')
pulljson = {}
status = 408
return pulljson, status
def build_icandi_json():
# venue_records = None
# venue_pages = None
morepages = True
offset = ""
json_request_status = 0
venue_list = []
has_new_data = False
fail_msg = None
tmp_prefs = ac_utility.preferences(DATA_DIR)
bearer_key = tmp_prefs["bearer_key"]
# bearer_key = "BAD_KEY_FOR_TESTING"
# bearer_key = "keyfppFBhdYli2nSr"
# # Do Not use the url below - to get json from old iturd run icandi_json_iTurd.py
# # url = "https://api.airtable.com/v0/appfybfS11FLtnOH8/Venue?view=XojoView" # also from preferences - iTURD - old
# url = "https://api.airtable.com/v0/appXOyM6EA9QQpWU0/Venue?view=iCandi" # also from preferences - iTARD - new
url = tmp_prefs["airtable_url"]
options = ""
# options = "&maxRecords=11&filterByFormula=(Group='Business School')" # just for testing
del tmp_prefs
while morepages:
req, json_request_status = load_airtable(bearer_key, url, options, offset)
if json_request_status == requests.codes.ok:
if "offset" in json.loads(req.content): # offset is the second outermost key in the json dictionary
offset = json.loads(req.content).get("offset")
else:
offset = ""
venue_records = json.loads(req.content)["records"]
# venue_records is a list of venue dictionaries {id:text, fields{dictionary}, createdTime:text}
# Iterate through dictionary, extract only fields for use with ArseCandi
for index, venue_record in enumerate(venue_records):
venue = {"id": venue_record.get("id")}
# print(venue_record)
for venuekey in venue_record:
# Remove unused fields and rename used fields as needed
if "fields" in venuekey:
# Remember: AirTable returns no key if bool is False, we have to force a False value on booleans
# <Returned records do not include any fields with "empty" values, e.g. "", [], or false'>
fields = venue_records[index][venuekey]
venue["name"] = fields.get("Venue Name", "MISSING")
venue["code"] = fields.get("Room code", "MISSING")
venue["building"] = fields.get("_Building", "Unknown")
venue["bookingid"] = fields.get("Booking ID", "") # Callista code
venue["aka"] = fields.get("AKA", "")
venue["capacity"] = int(fields.get("Capacity", 0))
venue["group"] = fields.get("Group", "Unknown")
venue["phone"] = str(fields.get("Phone", "Unknown"))
venue["ctf"] = "Yes" if fields.get("CTF") else "No" # AirTable stored as boolean
venue["cardax"] = "Yes" if fields.get("Cardax") else "No" # AirTable stored as boolean
venue["notes"] = fields.get("_Notes", "")
venue["pc"] = fields.get("_PC", "")
# venue["webcam"] = fields.get("_WebCam", "")
# venue["webcamtype"] = fields.get("_WebCamType", "")
venue["echo360"] = fields.get("Venue Echo Link", "")
venue["websis"] = fields.get("Venue WebSIS Link", "")
venue["projection"] = fields.get("Projection", "")
venue["projector"] = fields.get("Projector", "")
venue["asana"] = fields.get("Asana tag", "")
venue["sdc"] = fields.get("_SDC_String", "")
# Construct a new key ["networkdevice"] by iterating through ["_Device Data"] - a list of
# semicolon separated device strings and splitting into list
# "ip; device name; extension; notes, model, Button Flags"
devicedata = fields.pop("_Device Data", None) #
devicelist = []
if devicedata:
for d in devicedata:
datalist = d.split(' ; ')
if datalist[0]: # if there's an ip address in the first position...
devicelist.append(datalist)
devicelist.sort(key=lambda x: (int(x[0].split(".")[0]),
int(x[0].split(".")[1]),
int(x[0].split(".")[2]),
int(x[0].split(".")[3])))
# Sorts by first element (an IP address) of inner list item
# Ip sorting is done by splitting each section of the address - pretty clear
# add pc name to the device/ip collection - after IP sorting
pc = fields.get("_PC", "")
# webcam = fields.get("_WebCam", "")
# echo360 = fields.get("_Echo360", "")
# TODO make allowances for more than one webcam and pc per venue - requires Airtable adjustment
# if webcam:
# devicelist.append((webcam, "[WebCam]", "0", "", "Webcam", 4))
# if echo360:
# devicelist.append((echo360, "Echo 360", "0", "", "Echo 360", 4))
if pc:
for item in pc.split(', '):
devicelist.append((item, "[Lectern PC]", "0", "", "", "HP PC", "16", "", ""))
venue["networkdevice"] = devicelist
venue_list.append(venue)
if not offset: # if offset has no value
morepages = False
print("Pages retrieved from Airtable; Offset = " + offset)
else:
print(f'Failed to connect to Airtable\n\n({json_request_status})')
print(ERROR_CODES.get(str(json_request_status), 'Error code not found'))
break
###################################
# Write to new json file icandi.json - if needed
if json_request_status == requests.codes.ok:
temp_json_file = DATA_DIR / "icandi.tmp"
# with open(temp_json_file, "w") as file: # Raises a Pycharm warning -> open() expects a str not a Path object
# writing in the style below circumvents raising a warning
with temp_json_file.open("w") as file:
json.dump(venue_list, file, indent=2)
update_success, file_datetime = ac_utility.replace_with_updated(DATA_DIR / "icandi.json",
DATA_DIR / "icandi.tmp",
DATA_DIR / "icandi.bak")
if file_datetime:
date_response = datetime.fromtimestamp(file_datetime).strftime('%d %b %Y %H:%M:%S')
if update_success:
has_new_data = True
print(f"Database updated: {date_response}")
else:
print(f"Database is up to date. Last modified: {date_response}")
else:
print("Unable to update database.")
fail_msg = "Unable to update database.\nTry manual update"
# Manual update could require deleting all icandi.* json files from data directory and restarting iCandi
else:
print("Problems with Airtable prevented any updating")
http_err_msg = ERROR_CODES.get(str(json_request_status), 'Check AirTable API codes')
fail_msg = f'Failed to connect to Airtable\n\n{json_request_status}: {http_err_msg}'
ac_utility.preferences(DATA_DIR, "update", "last_data_refresh", time.localtime())
return has_new_data, fail_msg
if __name__ == '__main__':
# is_rebuilt, msg = build_icandi_json()
# print(f'Database updated by build_icandi_json: {is_rebuilt}\nPassed Message: {msg}')
# build_sims_json()
pass
| StarcoderdataPython |
6444252 | <filename>pyannote/metrics/detection.py<gh_stars>0
#!/usr/bin/env python
# encoding: utf-8
# The MIT License (MIT)
# Copyright (c) 2012-2019 CNRS
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# AUTHORS
# <NAME> - http://herve.niderb.fr
from .base import BaseMetric
from .utils import UEMSupportMixin
DER_NAME = 'detection error rate'
DER_TOTAL = 'total'
DER_FALSE_ALARM = 'false alarm'
DER_MISS = 'miss'
class DetectionErrorRate(UEMSupportMixin, BaseMetric):
"""Detection error rate
This metric can be used to evaluate binary classification tasks such as
speech activity detection, for instance. Inputs are expected to only
contain segments corresponding to the positive class (e.g. speech regions).
Gaps in the inputs considered as the negative class (e.g. non-speech
regions).
It is computed as (fa + miss) / total, where fa is the duration of false
alarm (e.g. non-speech classified as speech), miss is the duration of
missed detection (e.g. speech classified as non-speech), and total is the
total duration of the positive class in the reference.
Parameters
----------
collar : float, optional
Duration (in seconds) of collars removed from evaluation around
boundaries of reference segments (one half before, one half after).
skip_overlap : bool, optional
Set to True to not evaluate overlap regions.
Defaults to False (i.e. keep overlap regions).
"""
@classmethod
def metric_name(cls):
return DER_NAME
@classmethod
def metric_components(cls):
return [DER_TOTAL, DER_FALSE_ALARM, DER_MISS]
def __init__(self, collar=0.0, skip_overlap=False, **kwargs):
super(DetectionErrorRate, self).__init__(**kwargs)
self.collar = collar
self.skip_overlap = skip_overlap
def compute_components(self, reference, hypothesis, uem=None, **kwargs):
reference, hypothesis, uem = self.uemify(
reference, hypothesis, uem=uem,
collar=self.collar, skip_overlap=self.skip_overlap,
returns_uem=True)
reference = reference.get_timeline(copy=False).support()
hypothesis = hypothesis.get_timeline(copy=False).support()
reference_ = reference.gaps(support=uem)
hypothesis_ = hypothesis.gaps(support=uem)
false_positive = 0.
for r_, h in reference_.co_iter(hypothesis):
false_positive += (r_ & h).duration
false_negative = 0.
for r, h_ in reference.co_iter(hypothesis_):
false_negative += (r & h_).duration
detail = {}
detail[DER_MISS] = false_negative
detail[DER_FALSE_ALARM] = false_positive
detail[DER_TOTAL] = reference.duration()
return detail
def compute_metric(self, detail):
error = 1. * (detail[DER_FALSE_ALARM] + detail[DER_MISS])
total = 1. * detail[DER_TOTAL]
if total == 0.:
if error == 0:
return 0.
else:
return 1.
else:
return error / total
ACCURACY_NAME = 'detection accuracy'
ACCURACY_TRUE_POSITIVE = 'true positive'
ACCURACY_TRUE_NEGATIVE = 'true negative'
ACCURACY_FALSE_POSITIVE = 'false positive'
ACCURACY_FALSE_NEGATIVE = 'false negative'
class DetectionAccuracy(DetectionErrorRate):
"""Detection accuracy
This metric can be used to evaluate binary classification tasks such as
speech activity detection, for instance. Inputs are expected to only
contain segments corresponding to the positive class (e.g. speech regions).
Gaps in the inputs considered as the negative class (e.g. non-speech
regions).
It is computed as (tp + tn) / total, where tp is the duration of true
positive (e.g. speech classified as speech), tn is the duration of true
negative (e.g. non-speech classified as non-speech), and total is the total
duration of the input signal.
Parameters
----------
collar : float, optional
Duration (in seconds) of collars removed from evaluation around
boundaries of reference segments (one half before, one half after).
skip_overlap : bool, optional
Set to True to not evaluate overlap regions.
Defaults to False (i.e. keep overlap regions).
"""
@classmethod
def metric_name(cls):
return ACCURACY_NAME
@classmethod
def metric_components(cls):
return [ACCURACY_TRUE_POSITIVE, ACCURACY_TRUE_NEGATIVE,
ACCURACY_FALSE_POSITIVE, ACCURACY_FALSE_NEGATIVE]
def compute_components(self, reference, hypothesis, uem=None, **kwargs):
reference, hypothesis, uem = self.uemify(
reference, hypothesis, uem=uem,
collar=self.collar, skip_overlap=self.skip_overlap,
returns_uem=True)
reference = reference.get_timeline(copy=False).support()
hypothesis = hypothesis.get_timeline(copy=False).support()
reference_ = reference.gaps(support=uem)
hypothesis_ = hypothesis.gaps(support=uem)
true_positive = 0.
for r, h in reference.co_iter(hypothesis):
true_positive += (r & h).duration
true_negative = 0.
for r_, h_ in reference_.co_iter(hypothesis_):
true_negative += (r_ & h_).duration
false_positive = 0.
for r_, h in reference_.co_iter(hypothesis):
false_positive += (r_ & h).duration
false_negative = 0.
for r, h_ in reference.co_iter(hypothesis_):
false_negative += (r & h_).duration
detail = {}
detail[ACCURACY_TRUE_NEGATIVE] = true_negative
detail[ACCURACY_TRUE_POSITIVE] = true_positive
detail[ACCURACY_FALSE_NEGATIVE] = false_negative
detail[ACCURACY_FALSE_POSITIVE] = false_positive
return detail
def compute_metric(self, detail):
numerator = 1. * (detail[ACCURACY_TRUE_NEGATIVE] +
detail[ACCURACY_TRUE_POSITIVE])
denominator = 1. * (detail[ACCURACY_TRUE_NEGATIVE] +
detail[ACCURACY_TRUE_POSITIVE] +
detail[ACCURACY_FALSE_NEGATIVE] +
detail[ACCURACY_FALSE_POSITIVE])
if denominator == 0.:
return 1.
else:
return numerator / denominator
PRECISION_NAME = 'detection precision'
PRECISION_RETRIEVED = 'retrieved'
PRECISION_RELEVANT_RETRIEVED = 'relevant retrieved'
class DetectionPrecision(DetectionErrorRate):
"""Detection precision
This metric can be used to evaluate binary classification tasks such as
speech activity detection, for instance. Inputs are expected to only
contain segments corresponding to the positive class (e.g. speech regions).
Gaps in the inputs considered as the negative class (e.g. non-speech
regions).
It is computed as tp / (tp + fp), where tp is the duration of true positive
(e.g. speech classified as speech), and fp is the duration of false
positive (e.g. non-speech classified as speech).
Parameters
----------
collar : float, optional
Duration (in seconds) of collars removed from evaluation around
boundaries of reference segments (one half before, one half after).
skip_overlap : bool, optional
Set to True to not evaluate overlap regions.
Defaults to False (i.e. keep overlap regions).
"""
@classmethod
def metric_name(cls):
return PRECISION_NAME
@classmethod
def metric_components(cls):
return [PRECISION_RETRIEVED, PRECISION_RELEVANT_RETRIEVED]
def compute_components(self, reference, hypothesis, uem=None, **kwargs):
reference, hypothesis, uem = self.uemify(
reference, hypothesis, uem=uem,
collar=self.collar, skip_overlap=self.skip_overlap,
returns_uem=True)
reference = reference.get_timeline(copy=False).support()
hypothesis = hypothesis.get_timeline(copy=False).support()
reference_ = reference.gaps(support=uem)
true_positive = 0.
for r, h in reference.co_iter(hypothesis):
true_positive += (r & h).duration
false_positive = 0.
for r_, h in reference_.co_iter(hypothesis):
false_positive += (r_ & h).duration
detail = {}
detail[PRECISION_RETRIEVED] = true_positive + false_positive
detail[PRECISION_RELEVANT_RETRIEVED] = true_positive
return detail
def compute_metric(self, detail):
relevant_retrieved = 1. * detail[PRECISION_RELEVANT_RETRIEVED]
retrieved = 1. * detail[PRECISION_RETRIEVED]
if retrieved == 0.:
return 1.
else:
return relevant_retrieved / retrieved
RECALL_NAME = 'detection recall'
RECALL_RELEVANT = 'relevant'
RECALL_RELEVANT_RETRIEVED = 'relevant retrieved'
class DetectionRecall(DetectionErrorRate):
"""Detection recall
This metric can be used to evaluate binary classification tasks such as
speech activity detection, for instance. Inputs are expected to only
contain segments corresponding to the positive class (e.g. speech regions).
Gaps in the inputs considered as the negative class (e.g. non-speech
regions).
It is computed as tp / (tp + fn), where tp is the duration of true positive
(e.g. speech classified as speech), and fn is the duration of false
negative (e.g. speech classified as non-speech).
Parameters
----------
collar : float, optional
Duration (in seconds) of collars removed from evaluation around
boundaries of reference segments (one half before, one half after).
skip_overlap : bool, optional
Set to True to not evaluate overlap regions.
Defaults to False (i.e. keep overlap regions).
"""
@classmethod
def metric_name(cls):
return RECALL_NAME
@classmethod
def metric_components(cls):
return [RECALL_RELEVANT, RECALL_RELEVANT_RETRIEVED]
def compute_components(self, reference, hypothesis, uem=None, **kwargs):
reference, hypothesis, uem = self.uemify(
reference, hypothesis, uem=uem,
collar=self.collar, skip_overlap=self.skip_overlap,
returns_uem=True)
reference = reference.get_timeline(copy=False).support()
hypothesis = hypothesis.get_timeline(copy=False).support()
hypothesis_ = hypothesis.gaps(support=uem)
true_positive = 0.
for r, h in reference.co_iter(hypothesis):
true_positive += (r & h).duration
false_negative = 0.
for r, h_ in reference.co_iter(hypothesis_):
false_negative += (r & h_).duration
detail = {}
detail[RECALL_RELEVANT] = true_positive + false_negative
detail[RECALL_RELEVANT_RETRIEVED] = true_positive
return detail
def compute_metric(self, detail):
relevant_retrieved = 1. * detail[RECALL_RELEVANT_RETRIEVED]
relevant = 1. * detail[RECALL_RELEVANT]
if relevant == 0.:
if relevant_retrieved == 0:
return 1.
else:
return 0.
else:
return relevant_retrieved / relevant
| StarcoderdataPython |
3478257 | import schedule
import time
def two_seconds():
print('tick')
time.sleep(2)
print('tock')
schedule.every().second.do(two_seconds)
while True:
schedule.run_pending()
time.sleep(1)
| StarcoderdataPython |
127817 | <filename>mp_server/_dash/server_dash_.py
import flask
from dash import Dash
import dash_core_components as dcc
import dash_html_components as html
## NOTE: https://community.plotly.com/t/dash-exceptions-nolayoutexception-the-layout-was-none-at-the-time-that-run-server/34798/3
server = flask.Flask(__name__)
app = dash.Dash(__name__)
app.config.suppress_callback_exceptions = True
# App Layout
app.layout = html.Div([
# header
html.Div([
html.Div(
html.Img(src='logo',height="100%")
,style={"float":"right","width":"170px","height":"100px","margin-top":"-14px"})
],
className="row header"
),
# tabs
html.Div([
dcc.Tabs(
id="tabs",
style={"height":"60","verticalAlign":"middle"},
children=[
dcc.Tab(label="Market", value="market_tab"),
dcc.Tab(label="Portfolio", value="portfolio_tab"),
dcc.Tab(label="Reporting", value="reporting_tab"),
],
value="market_tab",
)
],
className="row tabs_div"
),
# Tab content
html.Div(id="tab_content", style={"margin": "2% 3%"})
])
@app.callback(Output("tab_content", "children"),
[
Input("tabs", "value")
]
)
def render_content(tab):
"""
For user selections, return the relevant tab
"""
if tab == "portfolio_tab":
return portfolio.layout
if tab == "reporting_tab":
return reporting.layout
elif tab == "market_tab":
return market.layout
else:
return market.layout
if __name__ == '__main__':
app.run_server(debug=True)
## NOTE: https://kibua20.tistory.com/216
# application = flask.Flask(__name__)
# dash_app1 = Dash(__name__, server = application, url_base_pathname='/dashapp1/')
# dash_app2 = Dash(__name__, server = application, url_base_pathname='/dashapp2/')
# dash_app3 = Dash(__name__, server = application, url_base_pathname='/dashapp3/')
# dash_app4 = Dash(__name__, server = application, url_base_pathname='/dashapp4/')
# dash_app5 = Dash(__name__, server = application, url_base_pathname='/dashapp5/')
# # flask app
# @application.route('/')
# def index():
# print ('flask index()')
# return 'index'
# # run the app.
# if __name__ == "__main__":
# application.debug=True
# application.run(host='127.0.0.1', port=6868)
# # app.run(debug=True, host='127.0.0.1', port=6868)
| StarcoderdataPython |
8077249 | <gh_stars>0
from beir.datasets.data_loader import GenericDataLoader
from beir.configs import dataset_stored_loc
from beir.custom_logging import setup_logger, log_map
from sentence_transformers import InputExample
from typing import List, Set
from tqdm import tqdm
import requests
import json
import os
import argparse
import random
import logging
logger = logging.getLogger(__name__)
setup_logger(logger)
def collect_training_data(number_positives: int = 1, number_random_negatives: int = 2, number_hard_negatives: int = 2):
data_folder: str = os.path.join(dataset_stored_loc, dataset)
corpus, queries, qrels = GenericDataLoader(data_folder).load(split=split)
query_ids: List[str] = list(qrels)
corpus_ids: List[str] = list(corpus)
positive_examples: List[InputExample] = []
negative_examples: List[InputExample] = []
for query_id in tqdm(query_ids[:limit], desc="Collecting training data...", total=limit, leave=True):
positive_document_ids: List[str] = list(qrels[query_id])
query_text: str = queries[query_id]
# collect positive examples
for positive_document_id in positive_document_ids[:number_positives]:
positive_examples.append(InputExample(
texts=[query_text, corpus[positive_document_id]["text"]], label=1
))
# collect negative examples (pyserini)
# use query/document to retrieve relative documents
queries_for_search = [f"{corpus[pos_id]['title']} {query_text}" for pos_id in
positive_document_ids[:number_positives]]
payload = {
"queries": queries_for_search,
"qids": [f"{query_id}_{idx + 1}" for idx in range(len(queries_for_search))],
"k": 100
}
hits = json.loads(
requests.post(docker_beir_pyserini + "/lexical/batch_search/", json=payload).text)["results"]
cnt_hard_negatives: int = 0
cnt_random_negatives: int = 0
record_hit_ids: List[str] = []
# Block to add hard negatives
for query_for_search in range(len(positive_document_ids)):
if f"{query_id}_{query_for_search + 1}" in hits:
hit_ids = list(hits[f"{query_id}_{query_for_search + 1}"])[:100]
record_hit_ids.extend(hit_ids)
for hit_id in hit_ids:
if hit_id not in positive_document_ids and cnt_hard_negatives < number_hard_negatives:
negative_document = corpus[hit_id]["title"] + " " + corpus[hit_id]["text"]
negative_examples.append(InputExample(
texts=[query_text, negative_document], label=0
))
cnt_hard_negatives += 1
record_hit_ids: Set[str] = set(record_hit_ids)
# Block to add random negatives
while cnt_random_negatives < number_random_negatives:
random_document_id = random.choice(corpus_ids)
while random_document_id in record_hit_ids or random_document_id in positive_document_ids:
logger.info("Overlapped with hard examples... random pick another one...")
random_document_id = random.choice(corpus_ids)
random_negative_document = corpus[random_document_id]["title"] + " " + corpus[random_document_id]["text"]
negative_examples.append(InputExample(
texts=[query_text, random_negative_document], label=0
))
cnt_random_negatives += 1
curr_folder = os.path.abspath(os.path.dirname(__file__))
os.makedirs(os.path.join(curr_folder, dataset), exist_ok=True)
with open(os.path.join(curr_folder, dataset, output_ds_name), 'w') as f:
for example in [*positive_examples, *negative_examples]:
f.write(json.dumps({
"query": example.texts[0],
"document": example.texts[1],
"label": example.label
}))
f.write("\n")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--dataset", type=str)
parser.add_argument("--split", type=str, default="train")
parser.add_argument("--limit", type=int, default=100)
parser.add_argument("--port", type=int, default=8000)
parser.add_argument("--number_positives", type=int, default=1)
parser.add_argument("--number_random_negatives", type=int, default=2)
parser.add_argument("--number_hard_negatives", type=int, default=2)
params = parser.parse_args()
log_map(logger, "Arguments", params.__dict__)
dataset = params.dataset
split = params.split
limit = params.limit
num_pos = params.number_positives
num_rands = params.number_random_negatives
num_hards = params.number_hard_negatives
output_ds_name: str = f"{dataset}_{split}_{limit}_rand_{num_rands}_hard_{num_hards}.jsonl"
docker_beir_pyserini = f"http://localhost:{params.port}"
collect_training_data(
number_positives=num_pos,
number_random_negatives=num_rands,
number_hard_negatives=num_hards
)
| StarcoderdataPython |
163241 | import unittest
from player import Player
class PlayerTest(unittest.TestCase):
def setUp(self):
self.player_1 = Player()
def test_when_the_players_input_its_out_of_range(self):
previous_len = len(self.player_1.choices)
self.player_1.add_choice('C4')
last_len = len(self.player_1.choices)
self.assertEqual(last_len, previous_len)
def test_when_the_player_input_its_a_string_with_len_bigger_than_2(self):
self.player_1.add_choice('C1A')
self.assertFalse(self.player_1.choices)
def test_when_the_player_input_its_not_a_string(self):
self.player_1.add_choice(12.4)
self.assertFalse(self.player_1.choices)
def test_when_the_player_tries_to_input_the_same_position(self):
self.player_1.add_choice('C1')
previos_len = len(self.player_1.choices)
self.player_1.add_choice('C1')
last_len = len(self.player_1.choices)
self.assertEqual(last_len, previos_len)
def test_crescent_diagonal_win(self):
self.player_1.add_choice('A1')
self.player_1.add_choice('A3')
self.player_1.add_choice('B1')
self.player_1.add_choice('B2')
self.player_1.add_choice('C3')
self.assertEqual(self.player_1.status, "winner")
def test_decrescent_diagonal_win(self):
self.player_1.add_choice('A1')
self.player_1.add_choice('A3')
self.player_1.add_choice('B1')
self.player_1.add_choice('B2')
self.player_1.add_choice('C1')
self.assertEqual(self.player_1.status, "winner")
def test_horizontal_win(self):
self.player_1.add_choice('A1')
self.player_1.add_choice('C1')
self.player_1.add_choice('A3')
self.player_1.add_choice('B1')
self.assertEqual(self.player_1.status, "winner")
def test_vertical_win(self):
self.player_1.add_choice('C2')
self.player_1.add_choice('C1')
self.player_1.add_choice('C3')
self.player_1.add_choice('A3')
self.player_1.add_choice('B1')
self.assertEqual(self.player_1.status, "winner")
if __name__ == "__main__":
unittest.main() | StarcoderdataPython |
3464607 | <reponame>Patrick-Star125/handgesture-recognition
import cv2 as cv
import abc
import numpy as np
class Buffer:
def __init__(self, length):
self.len = length
self.q = []
self.number = 0
self.count = 0
def isempty(self):
if self.q == []:
return 1
else:
return 0
def isfull(self):
if self.number == self.len:
return 1
else:
return 0
def enqueue(self, elem):
if self.isfull():
pass
else:
self.number += 1
self.q.append(elem)
def dequeue(self, index, step=True):
if self.isempty():
return 0
else:
self.number -= 1
if step == True:
return self.q[0]
else:
try:
val = self.q[index]
self.count += 1
return val
except:
return 0
# 读取缓存最早图片
def readBuffer(self, index, step):
val = self.dequeue(index, step)
return val
# 读取缓存区图片
def readBuffers(self):
return self.q
# 清理缓存
def clearBuffer(self):
self.number = 0
self.q.clear()
return self.q
# 存入图片
def writeBuffer(self, elem):
self.enqueue(elem)
class Cut(metaclass=abc.ABCMeta):
@abc.abstractmethod
def videoStream(self):
pass
class Video_cut(Cut):
def __init__(self, video_path, frameRate=1, blen=30):
self.frameRate = frameRate
print('[frameRate]:帧数间隔为%d' % frameRate)
self.blen = blen
self.buf = Buffer(self.blen)
self.img_buf = []
self.cap = cv.VideoCapture(video_path)
ret, first = self.cap.read()
if ret:
if not self.buf.len == 0:
print('[buffer]:缓冲区成功创建,长度为%d' % self.buf.len)
else:
print('长度不能为0')
else:
print('视频不存在')
def videoStream(self, step=True):
while True:
ret, frame = self.cap.read()
if not ret:
others_len = self.buf.number
for i in range(0, others_len, self.frameRate):
self.img_buf.append(self.buf.readBuffer(i, step))
self.buf.clearBuffer()
break
if self.buf.isfull():
for i in range(0, self.blen, self.frameRate):
print("取走第%d帧" % i)
self.img_buf.append(self.buf.readBuffer(i, step))
if not self.img_buf == []:
print('取走的帧数:%d,buf剩余图片:%d' % (self.buf.count, self.buf.number))
self.buf.clearBuffer()
else:
self.buf.writeBuffer(frame)
print("存入帧数%d" % self.buf.number)
return self.img_buf
'''
用于存放列表数据
实例化需要传入需要的长度
'''
class Cutlist(Cut):
def __init__(self, length, get_num):
# self.vlist = np.array(vlist,ndmin=1)
self.len = length
self.num = get_num
self.count = 0
self.l = []
# 列表为满的判断
def is_full(self):
if self.count == self.len:
return 1
return 0
# 清空列表
def lclear(self):
self.l.clear
self.count = 0
# --------------------------#
# 参数为要传入的列表数据
# 返回值为存放列表数据的列表
# --------------------------#
def videoStream(self, vlist):
self.re_l = []
if not self.is_full():
self.l.append(vlist)
self.count += 1
# if self.count == self.len:
# return self.l
print('缓冲')
return None
self.l.pop(0)
self.l.append(vlist)
for i in range(0, self.len, int(self.len / self.num)):
self.re_l.append(self.l[i])
return self.re_l
def cameraStream(self, vlist):
self.re_l = []
if not self.is_full():
self.l.append(vlist)
self.count += 1
# if self.count == self.len:
# return self.l
print('缓冲')
return None
self.l.pop(0)
self.l.append(vlist)
for i in range(0, self.len, int(self.len / self.num)):
self.re_l.append(self.l[i])
return self.re_l
| StarcoderdataPython |
1770624 | import json
from app.api.auth import views
from app.tests import mock_objects
# Test user registration passes
def test_user_registration(test_app, monkeypatch):
monkeypatch.setattr(
views, "get_user_by_email", mock_objects.get_no_user_by_email,
)
monkeypatch.setattr(views, "add_user", mock_objects.add_user)
client = test_app.test_client()
response = client.post(
"/auth/register",
data=json.dumps(
{
"username": "test_user",
"email": "<EMAIL>",
"password": "<PASSWORD>",
}
),
headers={
"Accept": "application/json",
"Content-Type": "application/json",
},
)
assert response.status_code == 201
data = response.get_json()
assert "password" not in data.keys()
assert data["id"] == 1
assert data["username"] == "test_user"
assert data["email"] == "<EMAIL>"
# Test user registration fails due to empty data
def test_user_registration_empty_data(test_app):
client = test_app.test_client()
response = client.post(
"/auth/register",
data=json.dumps({}),
headers={
"Accept": "application/json",
"Content-Type": "application/json",
},
)
assert response.status_code == 400
data = response.get_json()
assert "Input payload validation failed" in data["message"]
# Test user registration fails due to invalid data
def test_user_registration_invalid_data(test_app):
client = test_app.test_client()
response = client.post(
"/auth/register",
data=json.dumps({"username": "test_user"}),
headers={
"Accept": "application/json",
"Content-Type": "application/json",
},
)
assert response.status_code == 400
data = response.get_json()
assert "Input payload validation failed" in data["message"]
# Test user registration fails due to duplicate entry
def test_user_registration_duplicate_entry(test_app, monkeypatch):
monkeypatch.setattr(
views, "get_user_by_email", mock_objects.get_user_by_email
)
monkeypatch.setattr(views, "add_user", mock_objects.add_user)
client = test_app.test_client()
response = client.post(
"/auth/register",
data=json.dumps(
{
"username": "test_user",
"email": "<EMAIL>",
"password": "<PASSWORD>",
}
),
headers={
"Accept": "application/json",
"Content-Type": "application/json",
},
)
assert response.status_code == 400
data = response.get_json()
assert "<EMAIL> is already registered" in data["message"]
# Test user registration fails due to invalid headers
def test_user_registration_invalid_header(test_app):
client = test_app.test_client()
response = client.post(
"/auth/register",
data=json.dumps({"email": "<EMAIL>"}),
headers={"Accept": "application/json"},
)
assert response.status_code == 415
data = response.get_json()
assert "define Content-Type header" in data["message"]
response = client.post(
"/auth/register",
data=json.dumps({"email": "<EMAIL>"}),
headers={"Content-Type": "application/json"},
)
assert response.status_code == 415
data = response.get_json()
assert "supported is application/json" in data["message"]
# Test user login passes
def test_user_login(test_app, monkeypatch):
monkeypatch.setattr(
views, "get_user_by_email", mock_objects.get_user_object_by_email,
)
monkeypatch.setattr(views, "add_token", mock_objects.add_token)
monkeypatch.setattr(
views, "password_matches", mock_objects.password_matches
)
client = test_app.test_client()
response = client.post(
"/auth/login",
data=json.dumps(
{"email": "<EMAIL>", "password": "<PASSWORD>"}
),
headers={
"Accept": "application/json",
"Content-Type": "application/json",
},
)
assert response.status_code == 200
data = response.get_json()
assert data["access_token"]
assert data["refresh_token"]
# Test user login fails due to wrong password
def test_user_login_wrong_password(test_app, monkeypatch):
monkeypatch.setattr(
views, "get_user_by_email", mock_objects.get_user_by_email,
)
monkeypatch.setattr(
views, "password_matches", mock_objects.password_not_matches
)
client = test_app.test_client()
response = client.post(
"/auth/login",
data=json.dumps(
{"email": "<EMAIL>", "password": "<PASSWORD>"}
),
headers={
"Accept": "application/json",
"Content-Type": "application/json",
},
)
assert response.status_code == 401
data = response.get_json()
assert "Invalid password for" in data["message"]
# Test user login fails due to unregistered user
def test_user_login_unregistered_user(test_app, monkeypatch):
monkeypatch.setattr(
views, "get_user_by_email", mock_objects.get_no_user_by_email,
)
client = test_app.test_client()
response = client.post(
"/auth/login",
data=json.dumps(
{"email": "<EMAIL>", "password": "<PASSWORD>"}
),
headers={
"Accept": "application/json",
"Content-Type": "application/json",
},
)
assert response.status_code == 404
data = response.get_json()
assert "<EMAIL> does not exists" in data["message"]
# Test user login fails due to invalid header
def test_user_login_invalid_header(test_app):
client = test_app.test_client()
response = client.post(
"/auth/login",
data=json.dumps({"email": "<EMAIL>"}),
headers={"Accept": "application/json"},
)
assert response.status_code == 415
data = response.get_json()
assert "define Content-Type header" in data["message"]
response = client.post(
"/auth/login",
data=json.dumps({"email": "<EMAIL>"}),
headers={"Content-Type": "application/json"},
)
assert response.status_code == 415
data = response.get_json()
assert "supported is application/json" in data["message"]
# Test refresh token passes
def test_refresh_token(test_app, monkeypatch):
monkeypatch.setattr(
views, "get_user_id_by_token", mock_objects.get_user_id_by_token,
)
monkeypatch.setattr(views, "update_token", mock_objects.update_token)
client = test_app.test_client()
response = client.post(
"/auth/refresh",
data=json.dumps({"refresh_token": "refresh_token"}),
headers={
"Accept": "application/json",
"Content-Type": "application/json",
},
)
assert response.status_code == 200
data = response.get_json()
assert data["refresh_token"]
assert data["access_token"]
# Test refresh token fails due to expired token
def test_refresh_token_expired(test_app, monkeypatch):
monkeypatch.setattr(
views,
"get_user_id_by_token",
mock_objects.get_expired_token_exception,
)
monkeypatch.setattr(views, "update_token", mock_objects.update_token)
client = test_app.test_client()
response = client.post(
"/auth/refresh",
data=json.dumps({"refresh_token": "refresh_token"}),
headers={
"Accept": "application/json",
"Content-Type": "application/json",
},
)
assert response.status_code == 401
data = response.get_json()
assert "Token expired" in data["message"]
# Test refresh token fails due to invalid token
def test_refresh_token_invalid(test_app, monkeypatch):
monkeypatch.setattr(
views,
"get_user_id_by_token",
mock_objects.get_invalid_token_exception,
)
monkeypatch.setattr(views, "update_token", mock_objects.update_token)
client = test_app.test_client()
response = client.post(
"/auth/refresh",
data=json.dumps({"refresh_token": "refresh_token"}),
headers={
"Accept": "application/json",
"Content-Type": "application/json",
},
)
assert response.status_code == 401
data = response.get_json()
assert "Invalid token" in data["message"]
# Test refresh token fails due to invalid headers
def test_refresh_token_invalid_header(test_app):
client = test_app.test_client()
response = client.post(
"/auth/refresh",
data=json.dumps({"refresh_token": "refresh"}),
headers={"Accept": "application/json"},
)
assert response.status_code == 415
data = response.get_json()
assert "define Content-Type header" in data["message"]
response = client.post(
"/auth/refresh",
data=json.dumps({"email": "<EMAIL>"}),
headers={"Content-Type": "application/json"},
)
assert response.status_code == 415
data = response.get_json()
assert "supported is application/json" in data["message"]
| StarcoderdataPython |
5065035 | #!/usr/bin/env python
# -*-coding:utf-8-*-
# File Name : t.py
# Description :
# Author :
# Creation Date : 2021-10-24
# Last Modified : 2021年10月24日 星期日 20时57分01秒
# Created By : lsl
def f1(s):
name = ""
num = 0
ret = {}
while s:
c = s.pop(0)
if c == ")":
return ret
elif c == "(" or c.isupper():
if isinstance(name, str):
if name:
ret[name] = num if num != 0 else 1
else:
if num == 0:
num = 1
for k,v in name.items():
ret[k] = ret.get(k, 0) + v * num
if c == "(":
name = f1(s)
else:
name = c
num = 0
elif c.islower():
name += c
elif c.isdigit():
num = num * 10 + int(c)
else:
pass
print(name)
if isinstance(name, str):
if name:
ret[name] = num if num != 0 else 1
else:
if num == 0:
num = 1
for k,v in name.items():
ret[k] = ret.get(k, 0) + v * num
return ret
def f(s):
ans = ""
s = list(s)
r = f1(s)
for k,v in r.items():
ans += "{}{}".format(k,v if v > 1 else "")
return ans
print(f("Mg(OH)2"))
| StarcoderdataPython |
5166086 | import numpy as np
import pytest
import numpy.testing as npt
from pulse2percept.implants.base import ProsthesisSystem
from pulse2percept.implants.bvt import BVT24, BVT44
@pytest.mark.parametrize('x', (-100, 200))
@pytest.mark.parametrize('y', (-200, 400))
@pytest.mark.parametrize('rot', (-45, 60))
@pytest.mark.parametrize('eye', ('LE', 'RE'))
def test_BVT24(x, y, rot, eye):
# Create a BVT24 and make sure location is correct
bva = BVT24(x=x, y=y, rot=rot, eye=eye)
# Slots:
npt.assert_equal(hasattr(bva, '__slots__'), True)
npt.assert_equal(hasattr(bva, '__dict__'), False)
# Make sure rotation + translation is applied correctly:
bva0 = BVT24(eye=eye) # centered
xy = np.array([bva0['C1'].x, bva0['C1'].y]).T
xy2 = np.array([bva0['C21m'].x, bva0['C21m'].y]).T
# Rotate:
rot_rad = np.deg2rad(rot)
R = np.array([np.cos(rot_rad), -np.sin(rot_rad),
np.sin(rot_rad), np.cos(rot_rad)]).reshape((2, 2))
xy = np.matmul(R, xy)
xy2 = np.matmul(R, xy2)
# Translate:
npt.assert_almost_equal(bva['C1'].x, xy[0] + x)
npt.assert_almost_equal(bva['C1'].y, xy[1] + y)
npt.assert_almost_equal(bva['C21m'].x, xy2[0] + x)
npt.assert_almost_equal(bva['C21m'].y, xy2[1] + y)
# Check radii of electrodes
for e in ['C1', 'C5', 'C8', 'C15', 'C20']:
npt.assert_almost_equal(bva[e].r, 300.0)
for e in ['C9', 'C17', 'C19']:
npt.assert_almost_equal(bva[e].r, 200.0)
for e in ['R1', 'R2']:
npt.assert_almost_equal(bva[e].r, 1000.0)
# Check the center is still at (x,y)
y_center = (bva['C8'].y + bva['C13'].y) / 2
npt.assert_almost_equal(y_center, y)
x_center = (bva['C8'].x + bva['C13'].x) / 2
npt.assert_almost_equal(x_center, x)
# Right-eye implant:
xc, yc = -500, -500
bva_re = BVT24(eye='RE', x=xc, y=yc)
npt.assert_equal(bva_re['C1'].x > bva_re['C6'].x, True)
npt.assert_equal(bva_re['C1'].y, bva_re['C1'].y)
# Left-eye implant:
xc, yc = -500, -500
bva_le = BVT24(eye='LE', x=xc, y=yc)
npt.assert_equal(bva_le['C1'].x < bva_le['C6'].x, True)
npt.assert_equal(bva_le['C1'].y, bva_le['C1'].y)
def test_BVT24_stim():
# Assign a stimulus:
implant = BVT24()
implant.stim = {'C1': 1}
npt.assert_equal(implant.stim.electrodes, ['C1'])
npt.assert_equal(implant.stim.time, None)
npt.assert_equal(implant.stim.data, [[1]])
# You can also assign the stimulus in the constructor:
BVT24(stim={'C1': 1})
npt.assert_equal(implant.stim.electrodes, ['C1'])
npt.assert_equal(implant.stim.time, None)
npt.assert_equal(implant.stim.data, [[1]])
# Set a stimulus via array:
implant = BVT24(stim=np.ones(35))
npt.assert_equal(implant.stim.shape, (35, 1))
npt.assert_almost_equal(implant.stim.data, 1)
@pytest.mark.parametrize('x', (-100, 200))
@pytest.mark.parametrize('y', (-200, 400))
@pytest.mark.parametrize('rot', (-45, 60))
@pytest.mark.parametrize('eye', ('LE', 'RE'))
def test_BVT44(x, y, rot, eye):
# Create a BVT44 and make sure location is correct
bva = BVT44(x=x, y=y, rot=rot, eye=eye)
# Slots:
npt.assert_equal(hasattr(bva, '__slots__'), True)
npt.assert_equal(hasattr(bva, '__dict__'), False)
# Make sure array is rotated + translated correctly:
bva0 = BVT44(eye=eye)
xy = np.array([bva0['A1'].x, bva0['A1'].y]).T
xy2 = np.array([bva0['G6'].x, bva0['G6'].y]).T
# Rotate:
rot_rad = np.deg2rad(rot)
R = np.array([np.cos(rot_rad), -np.sin(rot_rad),
np.sin(rot_rad), np.cos(rot_rad)]).reshape((2, 2))
xy = np.matmul(R, xy)
xy2 = np.matmul(R, xy2)
# Translate:
npt.assert_almost_equal(bva['A1'].x, xy[0] + x)
npt.assert_almost_equal(bva['A1'].y, xy[1] + y)
npt.assert_almost_equal(bva['G6'].x, xy2[0] + x)
npt.assert_almost_equal(bva['G6'].y, xy2[1] + y)
# Check radii of electrodes
for e in ['A1', 'A5', 'B3', 'C5', 'D2']:
npt.assert_almost_equal(bva[e].r, 500.0)
for e in ['R1', 'R2']:
npt.assert_almost_equal(bva[e].r, 1000.0)
# Check the center is still at (x,y)
npt.assert_almost_equal((bva['D4'].x + bva['D5'].x) / 2.0, x)
npt.assert_almost_equal((bva['E4'].y + bva['C4'].y) / 2.0, y)
# Right-eye implant:
xc, yc = -500, -500
bva_re = BVT44(eye='RE', x=xc, y=yc)
npt.assert_equal(bva_re['A6'].x > bva_re['A1'].x, True)
npt.assert_equal(bva_re['A6'].y, bva_re['A1'].y)
# Left-eye implant:
xc, yc = -500, -500
bva_le = BVT44(eye='LE', x=xc, y=yc)
npt.assert_equal(bva_le['A6'].x < bva_le['A1'].x, True)
npt.assert_equal(bva_le['A6'].y, bva_le['A1'].y)
def test_BVT44_stim():
# Assign a stimulus:
implant = BVT44()
implant.stim = {'A1': 1}
npt.assert_equal(implant.stim.electrodes, ['A1'])
npt.assert_equal(implant.stim.time, None)
npt.assert_equal(implant.stim.data, [[1]])
# You can also assign the stimulus in the constructor:
BVT44(stim={'A1': 1})
npt.assert_equal(implant.stim.electrodes, ['A1'])
npt.assert_equal(implant.stim.time, None)
npt.assert_equal(implant.stim.data, [[1]])
# Set a stimulus via array:
implant = BVT44(stim=np.ones(46))
npt.assert_equal(implant.stim.shape, (46, 1))
npt.assert_almost_equal(implant.stim.data, 1)
| StarcoderdataPython |
9722512 | <filename>test/test_static_runtime.py
# Owner(s): ["module: unknown"]
import unittest
from typing import Dict, Optional
import numpy as np
import torch
from torch import nn
from torch.testing._internal.common_utils import TestCase, run_tests
from typing import List
class StaticModule:
def __init__(self, scripted):
# this is an nn.Module
if hasattr(scripted, "_c"):
self.static_module = torch._C._jit_to_static_module(scripted._c)
else:
self.static_module = torch._C._jit_to_static_module(scripted.graph)
def __call__(self, *args, **kwargs):
return self.static_module(*args, **kwargs)
def benchmark(self, args, kwargs, warmup_runs, main_runs):
self.static_module.benchmark(args, kwargs, warmup_runs, main_runs)
def benchmark_individual_ops(self, args, kwargs, warmup_runs, main_runs):
return self.static_module.benchmark_individual_ops(
args, kwargs, warmup_runs, main_runs
)
def linear_shim(
input: torch.Tensor, weight: torch.Tensor, bias: Optional[torch.Tensor] = None
) -> torch.Tensor:
output = input.matmul(weight.t())
if bias is not None:
output += bias
ret = output
return ret
torch.nn.functional.linear = linear_shim
class MultiHeadAttentionLayer(nn.Module):
def __init__(self, hid_dim, n_heads, dropout, device):
super().__init__()
assert hid_dim % n_heads == 0
self.hid_dim = hid_dim
self.n_heads = n_heads
self.head_dim = hid_dim // n_heads
self.fc_q = nn.Linear(hid_dim, hid_dim)
self.fc_k = nn.Linear(hid_dim, hid_dim)
self.fc_v = nn.Linear(hid_dim, hid_dim)
self.fc_o = nn.Linear(hid_dim, hid_dim)
# self.dropout = nn.Dropout(dropout)
self.scale = torch.sqrt(torch.FloatTensor([self.head_dim])).to(device)
def forward(self, query, key, value, mask):
batch_size = query.shape[0]
Q = self.fc_q(query)
K = self.fc_k(key)
V = self.fc_v(value)
Q = Q.view(batch_size, -1, self.n_heads, self.head_dim).permute(0, 2, 1, 3)
K = K.view(batch_size, -1, self.n_heads, self.head_dim).permute(0, 2, 1, 3)
V = V.view(batch_size, -1, self.n_heads, self.head_dim).permute(0, 2, 1, 3)
energy = torch.matmul(Q, K.permute(0, 1, 3, 2)) / self.scale
# energy = energy.masked_fill(mask == 0, -1e10)
attention = torch.softmax(energy, dim=-1)
# x = torch.matmul(self.dropout(attention), V)
x = torch.matmul(attention, V)
x = x.permute(0, 2, 1, 3).contiguous()
x = x.view(batch_size, -1, self.hid_dim)
x = self.fc_o(x)
return x, attention
# Taken from https://github.com/facebookresearch/dlrm/blob/master/dlrm_s_pytorch.py
def create_mlp(ln, sigmoid_layer):
layers = nn.ModuleList()
for i in range(0, len(ln) - 1):
n = ln[i]
m = ln[i + 1]
LL = nn.Linear(int(n), int(m), bias=True)
mean = 0.0 # std_dev = np.sqrt(variance)
std_dev = np.sqrt(2 / (m + n)) # np.sqrt(1 / m) # np.sqrt(1 / n)
W = np.random.normal(mean, std_dev, size=(m, n)).astype(np.float32)
std_dev = np.sqrt(1 / m) # np.sqrt(2 / (m + 1))
bt = np.random.normal(mean, std_dev, size=m).astype(np.float32)
LL.weight.data = torch.tensor(W, requires_grad=True)
LL.bias.data = torch.tensor(bt, requires_grad=True)
layers.append(LL)
if i == sigmoid_layer:
layers.append(nn.Sigmoid())
else:
layers.append(nn.ReLU())
with torch.no_grad():
s = torch.jit.script(torch.nn.Sequential(*layers))
s.eval()
return s
def trivial_graph(a, b, c):
s = torch.tensor([[3, 3], [3, 3]])
return a + b * c + s
def elementwise_square_addition(input1, input2):
return input1 * input1 + input2 * input2
def fork_wait_graph1(input1, input2):
fut = torch.jit.fork(elementwise_square_addition, input1, input2)
return torch.jit.wait(fut)
def fork_wait_graph2(input1, input2):
fut = torch.jit.fork(loop_graph, input1, input2, 5)
return torch.jit.wait(fut)
def fork_wait_graph3(input):
futures : List[torch.jit.Future[torch.Tensor]] = []
for _ in range(100):
futures.append(torch.jit.fork(torch.neg, input))
results = []
for future in futures:
results.append(torch.jit.wait(future))
return torch.sum(torch.stack(results))
def loop_graph(a, b, iters: int):
c = a + b * 2
for i in range(iters):
c = c + b
c *= 2
c -= a
return c
def output_graph(a, b, c, iters: int):
s = torch.tensor([[3, 3], [3, 3]])
k = a + b * c + s
d: Dict[int, torch.Tensor] = {}
for i in range(iters):
d[i] = k + i
return d
class SubModule(nn.Module):
def __init__(self):
super(SubModule, self).__init__()
self.a = 11
self.b = 2
def forward(self, x):
return self.a + self.b + x
class SubModule2(nn.Module):
def __init__(self):
super(SubModule2, self).__init__()
self.a = 12
self.b = 2
def forward(self, x):
self.b = 30
return self.a + self.b + x
class TestModule(nn.Module):
def __init__(self):
super(TestModule, self).__init__()
self.sub1 = SubModule()
self.sub2 = SubModule2()
self.a = 3
self.b = 4
def forward(self, x):
self.b = 20
return self.sub1(x) + self.a + self.b + self.sub2(x)
class TestStaticModule(TestCase):
"""
Test Case: To test simple fork/wait operation in a graph
fork is called on simple addition operation on input tensors
"""
def test_fork_wait_1(self):
inp1 = torch.ones(5, 5)
inp2 = torch.randn(5, 5)
torch_graph = torch.jit.script(fork_wait_graph1)
output_ref = torch_graph(inp1, inp2)
static_runtime_module = StaticModule(torch_graph)
output_test = static_runtime_module(inp1, inp2)
torch.testing.assert_close(output_test, output_ref)
"""
Test Case: To test fork/wait operation in a graph on
a loop subgraph performing mix of operations
"""
def test_fork_wait_2(self):
inp1 = torch.randn(5, 5)
inp2 = torch.randn(5, 5)
torch_graph = torch.jit.script(fork_wait_graph2)
output_ref = torch_graph(inp1, inp2)
static_runtime_module = StaticModule(torch_graph)
output_test = static_runtime_module(inp1, inp2)
torch.testing.assert_close(output_test, output_ref)
"""
Test Case: To test fork/wait operation in a graph on
having multiple fork/wait operations
"""
def test_fork_wait_3(self):
input = torch.ones(3, 3)
torch_graph = torch.jit.script(fork_wait_graph3)
output_ref = torch_graph(input)
static_runtime_module = StaticModule(torch_graph)
output_test = static_runtime_module(input)
torch.testing.assert_close(output_test, output_ref)
def test_multihead_attention_layer(self):
HID_DIM = 256
QUERY_LEN = 8
BATCH_SIZE = 128
LAYERS = 3
HEADS = 8
DROPOUT = 0.1
device = torch.device("cpu")
attention = MultiHeadAttentionLayer(HID_DIM, HEADS, DROPOUT, device).to(device)
with torch.no_grad():
src = torch.randn(BATCH_SIZE, QUERY_LEN, HID_DIM).to(device)
src_mask = (src > 0)[:, :, 0].unsqueeze(1).unsqueeze(2).to(device)
attention.eval()
attention = torch.jit.script(attention)
attention.eval()
o_ref = attention(src, src, src, src_mask)
attention_a = StaticModule(attention)
o_test = attention_a(src, src, src, src_mask)
o_test_kw = attention_a(src, src, value=src, mask=src_mask)
for a, b in zip(o_ref, o_test):
torch.testing.assert_close(a, b)
for a, b in zip(o_ref, o_test_kw):
torch.testing.assert_close(a, b)
def test_multihead_attention_layer_benchmark(self):
HID_DIM = 256
QUERY_LEN = 8
BATCH_SIZE = 128
LAYERS = 3
HEADS = 8
DROPOUT = 0.1
device = torch.device("cpu")
attention = MultiHeadAttentionLayer(HID_DIM, HEADS, DROPOUT, device).to(device)
with torch.no_grad():
src = torch.randn(BATCH_SIZE, QUERY_LEN, HID_DIM).to(device)
src_mask = (src > 0)[:, :, 0].unsqueeze(1).unsqueeze(2).to(device)
attention.eval()
attention = torch.jit.script(attention)
attention_a = StaticModule(attention)
attention_a.benchmark([src, src, src, src_mask], {}, 2, 2)
metrics = attention_a.benchmark_individual_ops(
[src, src, src, src_mask], {}, 2, 2
)
def test_mlp(self):
# Arguments taken from benchmark script, ./bench/dlrm_s_benchmark.sh
ln_bot = [512, 512, 64]
sigmoid_bot = -1
ln_top = [100, 1024, 1024, 1024, 1]
sigmoid_top = 3
bot_l = create_mlp(ln_bot, sigmoid_bot)
bot_l_acc = StaticModule(bot_l)
top_l = create_mlp(ln_top, sigmoid_top)
top_l_acc = StaticModule(top_l)
with torch.no_grad():
bot_inp = torch.randn(2048, 512) # torch.Size([2048, 512])
top_inp = torch.randn(2048, 100) # torch.Size([2048, 100])
ref_bot = bot_l(bot_inp)
acc_bot = bot_l_acc(bot_inp)
torch.testing.assert_close(acc_bot, ref_bot)
ref_top = top_l(top_inp)
acc_top = top_l_acc(top_inp)
torch.testing.assert_close(acc_top, ref_top)
for _ in range(5):
with torch.no_grad():
bot_inp = torch.randn(2048, 512) # torch.Size([2048, 512])
top_inp = torch.randn(2048, 100) # torch.Size([2048, 100])
ref_bot = bot_l(bot_inp)
acc_bot = bot_l_acc(bot_inp)
torch.testing.assert_close(acc_bot, ref_bot)
ref_top = top_l(top_inp)
acc_top = top_l_acc(top_inp)
torch.testing.assert_close(acc_top, ref_top)
def test_trivial_graph(self):
s = torch.full((2, 2), 2)
tg = torch.jit.script(trivial_graph)
o_ref = tg(s, s, s)
tg_a = StaticModule(tg)
o_test = tg_a(s, s, s)
torch.testing.assert_close(o_ref, o_test)
def test_leaky_relu(self):
s = torch.randn(5, 5)
tg = torch.jit.script(nn.LeakyReLU(0.1))
o_ref = tg(s)
tg_a = StaticModule(tg)
o_test = tg_a(s)
torch.testing.assert_close(o_ref, o_test)
def test_attr(self):
"""
TorchScript IR of TestModule() after freezing:
graph(%self : __torch__.test_static_runtime.___torch_mangle_0.TestModule,
%x.1 : Tensor):
%18 : int = prim::Constant[value=30]()
%30 : int = prim::Constant[value=13]()
%3 : int = prim::Constant[value=20]()
%2 : int = prim::Constant[value=1]()
%self.sub2.a : int = prim::Constant[value=12]()
%self.a : int = prim::Constant[value=3]()
= prim::SetAttr[name="b"](%self, %3)
%17 : Tensor = aten::add(%x.1, %30, %2)
%7 : Tensor = aten::add(%17, %self.a, %2)
%b.1 : int = prim::GetAttr[name="b"](%self)
%9 : Tensor = aten::add(%7, %b.1, %2)
%sub2 : __torch__.test_static_runtime.___torch_mangle_2.SubModule2 = prim::GetAttr[name="sub2"](%self)
= prim::SetAttr[name="b"](%sub2, %18)
%b : int = prim::GetAttr[name="b"](%sub2)
%22 : int = aten::add(%self.sub2.a, %b)
%23 : Tensor = aten::add(%x.1, %22, %2)
%12 : Tensor = aten::add(%9, %23, %2)
return (%12)
"""
# test prim::SetAttr and prim::GetAttr impl in Static Runtime
m = TestModule()
m.eval()
input = torch.randn(2, 2)
output_s = m.forward(input)
ms = torch.jit.script(m)
sm = StaticModule(ms)
output_sm = sm(input)
torch.testing.assert_close(output_s, output_sm)
sm.benchmark([input], {}, 2, 2)
sm.benchmark_individual_ops([input], {}, 2, 2)
sm.benchmark([], {"x": input}, 2, 2)
sm.benchmark_individual_ops([], {"x": input}, 2, 2)
@unittest.skip("Temporarily disabled")
def test_fusion_trivial_graph(self):
s = torch.full((2, 2), 2)
tg = torch.jit.script(trivial_graph)
o_ref = tg(s, s, s)
torch._C._fuse_to_static_module(tg.graph)
assert "StaticSubgraph" in str(tg.graph)
o_test = tg(s, s, s)
torch.testing.assert_close(o_ref, o_test)
@unittest.skip("Temporarily disabled")
def test_fusion_multihead_attention_layer(self):
HID_DIM = 256
QUERY_LEN = 8
BATCH_SIZE = 128
LAYERS = 3
HEADS = 8
DROPOUT = 0.1
device = torch.device("cpu")
attention = MultiHeadAttentionLayer(HID_DIM, HEADS, DROPOUT, device).to(device)
with torch.no_grad():
src = torch.randn(BATCH_SIZE, QUERY_LEN, HID_DIM).to(device)
src_mask = (src > 0)[:, :, 0].unsqueeze(1).unsqueeze(2).to(device)
attention.eval()
attention = torch.jit.script(attention)
attention.eval()
o_ref = attention(src, src, src, src_mask)
torch._C._fuse_to_static_module(attention._c)
o_test = attention(src, src, src, src_mask)
for a, b in zip(o_ref, o_test):
torch.testing.assert_close(a, b)
@unittest.skip("Temporarily disabled")
def test_fusion_loop(self):
a = torch.randn(5, 5)
b = torch.randn(5, 5)
c = 4
lg = torch.jit.script(loop_graph)
o_ref = lg(a, b, c)
torch._C._fuse_to_static_module(lg.graph)
assert "StaticSubgraph" in str(lg.graph)
o_test = lg(a, b, c)
torch.testing.assert_close(o_ref, o_test)
@unittest.skip("Temporarily disabled")
def test_fusion_outputs(self):
a = torch.randn(2, 2)
b = torch.randn(2, 2)
c = 4
og = torch.jit.script(output_graph)
o_ref = og(a, b, b, c)
torch._C._fuse_to_static_module(og.graph)
assert "StaticSubgraph" in str(og.graph)
o_test = og(a, b, b, c)
for i in o_ref.keys():
torch.testing.assert_close(o_ref[i], o_test[i])
def test_create_object(self):
class Foo: # noqa: B903
def __init__(self, x: torch.Tensor) -> None:
self.x = x
class Mod(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
def forward(self, y: torch.Tensor) -> torch.Tensor:
foo = Foo(y)
return y * foo.x
mod = torch.jit.script(Mod()).eval()
y = torch.randn((1, ))
expected = mod(y)
static_mod = StaticModule(torch.jit.freeze(mod))
actual = static_mod(y)
self.assertEqual(expected, actual)
if __name__ == "__main__":
run_tests()
| StarcoderdataPython |
9674369 | import pathlib
import shutil
import click
from roo.console import console
@click.group(help="Commands to interact with the cache")
def cache():
pass
@cache.command(name="clear", help="Clear the cache completely")
def cache_clear():
cache_root_dir = pathlib.Path("~/.roo/cache").expanduser()
console().print("Clearing cache")
try:
shutil.rmtree(cache_root_dir)
cache_root_dir.mkdir(parents=True, exist_ok=True)
except Exception as e:
raise click.ClickException(f"Unable to clear cache: {e}")
| StarcoderdataPython |
3445715 | <filename>src/Knight.py
from Defense import Defense
from Projectile import Projectile
KNIGHTATTACK = ["Images/Defense/knight1/attack1.png", "Images/Defense/knight1/attack2.png", "Images/Defense/knight1/attack3.png", "Images/Defense/knight1/attack4.png", "Images/Defense/knight1/attack5.png"]
KNIGHTIDLE = ["Images/Defense/knight1/idle1.png", "Images/Defense/knight1/idle2.png", "Images/Defense/knight1/idle3.png", "Images/Defense/knight1/idle4.png", "Images/Defense/knight1/idle5.png"]
class WeakKnight(Defense):
def __init__(self, knight_id, x_coord, y_coord):
super().__init__(knight_id, 35, 1, x_coord, y_coord, KNIGHTIDLE#"Images/Defense/knight1/idle1.png"
, KNIGHTATTACK)#"Images/Defense/knight1/attack2.png")
self.projectile = Projectile("spear", super().get_attack_damage(), "Images/Projectile/knight/basic_spear1.png")
def get_projectile(self):
return self.projectile
class IntermediateKnight(Defense):
def __init__(self, knight_id, x_coord, y_coord):
super().__init__(knight_id, 40, 2, x_coord, y_coord, "Images/Defense/knight2/idle1.png"
, "Images/Defense/knight2/attack2.png")
self.projectile = Projectile("spear", super().get_attack_damage(), "Images/Projectile/knight/level2_spear1.png")
def get_projectile(self):
return self.projectile
class StrongKnight(Defense):
def __init__(self, knight_id, x_coord, y_coord):
super().__init__(knight_id, 50, 3, x_coord, y_coord, "Images/Defense/knight3/idle1.png"
, "Images/Defense/knight3/attack2.png")
self.projectile = Projectile("spear", super().get_attack_damage(), "Images/Projectile/knight/level3_spear1.png")
def get_projectile(self):
return self.projectile
| StarcoderdataPython |
6669612 | <reponame>ntaylorwss/megatron
from . import generator
from . import dataset
from . import storage
from .generator import *
from .dataset import *
from .storage import *
| StarcoderdataPython |
1773295 | <filename>flexmeasures/data/migrations/versions/e0c2f9aff251_rename_source_id_column_in_data_sources_table.py
"""rename_source_id_column_in_data_sources_table
Revision ID: e0c2f9aff251
Revises: <PASSWORD>
Create Date: 2018-07-20 16:08:50.641000
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = "e0c2f9aff251"
down_revision = "<PASSWORD>"
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column("data_sources", sa.Column("user_id", sa.Integer(), nullable=True))
op.drop_constraint(
"data_sources_source_id_bvp_users_fkey", "data_sources", type_="foreignkey"
)
op.create_foreign_key(None, "data_sources", "bvp_users", ["user_id"], ["id"])
op.drop_column("data_sources", "source_id")
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column(
"data_sources",
sa.Column("source_id", sa.INTEGER(), autoincrement=False, nullable=True),
)
op.drop_constraint(None, "data_sources", type_="foreignkey")
op.create_foreign_key(
"data_sources_source_id_bvp_users_fkey",
"data_sources",
"bvp_users",
["source_id"],
["id"],
)
op.drop_column("data_sources", "user_id")
# ### end Alembic commands ###
| StarcoderdataPython |
6576928 | import multiprocessing
results = [] #Creating a Global Variable
def calc_square(numbers, q): #child-function
global results
for i in numbers:
q.put(i*i)
print('square: ', str(i*i))
results.append(i*i)
print('inside process : '+str(results))
def main():
arr = [2,3,8,9]
q = multiprocessing.Queue()
p1 = multiprocessing.Process(target = calc_square,args=(arr, q))
p1.start()
p1.join()
while q.empty() is False:
print(q.get())
if __name__ == "__main__": #main function
main()
| StarcoderdataPython |
11283019 | <reponame>rapidpro/ureport-partners
from dash.orgs.views import OrgPermsMixin
from smartmin.views import SmartCRUDL, SmartListView
from .models import Rule
class RuleCRUDL(SmartCRUDL):
"""
Simple CRUDL for debugging by superusers, i.e. not exposed to regular users for now
"""
model = Rule
actions = ("list",)
class List(OrgPermsMixin, SmartListView):
fields = ("tests", "actions")
def get_queryset(self, **kwargs):
return self.model.objects.filter(org=self.request.org).order_by("id")
| StarcoderdataPython |
3418469 | <filename>torch_glow/torch_glow/to_glow.py
import collections
import copy
from typing import List, Any
import torch
__all__ = [
"to_glow",
"to_glow_selective",
"get_submod_input_shapes",
"CompilationSpec",
"CompilationGroup",
"InputSpec",
"CompilationSpecSettings",
"FuserSettings",
"input_spec_from_tensor",
"input_specs_from_tensors",
"generate_glow_compilation_spec",
]
CompilationSpec = torch.classes.glow.CompilationSpec
CompilationGroup = torch.classes.glow.CompilationGroup
InputSpec = torch.classes.glow.InputSpec
CompilationSpecSettings = torch.classes.glow.CompilationSpecSettings
FuserSettings = torch.classes.glow.FuserSettings
def input_spec_from_tensor(tensor: torch.Tensor) -> InputSpec:
input_spec = InputSpec()
input_spec.set_same_as(tensor)
return input_spec
def input_specs_from_tensors(tensors: List[torch.Tensor]) -> List[InputSpec]:
return [input_spec_from_tensor(tensor) for tensor in tensors]
def generate_glow_compilation_spec(model, backend, *example_inputs):
spec = CompilationSpec()
spec.get_settings().set_glow_backend(backend)
compilation_group = CompilationGroup()
compilation_group.input_sets_append(input_specs_from_tensors(example_inputs))
spec.compilation_groups_append(compilation_group)
return spec
def to_glow(model, method_compile_spec):
r"""Lower a model to Glow
to_glow is a wrapper around the torch._C._jit_to_backend which lowers the
the specified module `mod` to Glow using the the MethodCompileSpec
`method_compile_spec`. MethodCompileSpec is a dictionary from method name
in `mod` such as 'forward' to CompilationSpec for that method
Args:
model: Model to be lowered to glow
method_compile_spec: Either a dicionary from method name to
CompilationSpec or just a CompilationSpec and method
name is assumed to be "forward"
Return:
A copy of the model that has been lowered to Glow and will run on
Glow backend devices
"""
if not isinstance(method_compile_spec, collections.Mapping):
method_compile_spec = {"forward": method_compile_spec}
return torch._C._jit_to_backend("glow", model, method_compile_spec)
def check_module_names(module_names):
"""Checks that module names don't overlap at all"""
assert "" not in module_names, "Use to_glow to lower top level module"
for path1 in module_names:
for path2 in module_names:
if path1 == path2:
continue
assert (
path1 not in path2
), f"Can't to_glow a module nested inside another to_glow module, \
found {path2} inside of {path1}"
def get_submodule(mod, path):
path = path.split(".")
assert len(path) > 0
found_mod = mod
for item in path:
found_mod = getattr(found_mod, item)
return found_mod
def set_submodule(mod, path, submod):
path = path.split(".")
assert len(path) > 0
found_mod = mod
for item in path[:-1]:
found_mod = getattr(found_mod, item)
setattr(found_mod, path[-1], submod)
pass
def get_submod_input_shapes(
mod: torch.nn.Module, path: str, example_inputs: Any
) -> List[torch.Size]:
r"""Get the input shapes of a submodule given the top-level model
and its input.
Register a forward hook that record the input shapes of the submodule
and then run the model to triger the hook.
Args:
mod: top-level model
path: path to a submodule
example_inputs: inputs to the top-level model
Return:
input shapes: List[torch.Size]
"""
submod = get_submodule(mod, path)
input_shapes = []
def get_shape(self: torch.nn.Module, inputs: Any):
nonlocal input_shapes
for i in inputs:
assert isinstance(i, torch.Tensor), "We only support tensor inputs."
input_shapes.append(i.size())
handle = submod.register_forward_pre_hook(get_shape)
mod(*example_inputs)
handle.remove()
return input_shapes
def to_glow_selective(model, specs_and_examples, inplace=False):
r"""Selectively lowers submodules of the given module to Glow.
Instead of using to_glow to lower an entire module to Glow,
to_glow_selective can be used to selectively find and replace submodules in
the given module with a version of the module that is traced and lowered
to Glow. Each specified submodule is lowered independently and so will be
a separate compilation unit in Glow.
Args:
model: top-level model to be selectively lowered
specs_and_examples: A dictionary with keys that name submodules
recursively from model and values that are the a
tuple of (CompilationSpec, example_inputs) where
example_inputs are inputs that are used to trace
the submodule.
inplace: Carry out model transformations in-place, the original module
is mutated
Return:
Model with selectively lowered submodules
"""
check_module_names(list(specs_and_examples.keys()))
if not inplace:
model = copy.deepcopy(model)
if isinstance(model, torch.jit._script.RecursiveScriptModule):
spec_list, path_list = [], []
submod_idx = 0
for path, spec in specs_and_examples.items():
spec_list.append(spec)
path_list.append(path)
def to_glow_helper(submod):
nonlocal submod_idx
res_model = to_glow(submod, {"forward": spec_list[submod_idx]})
submod_idx += 1
return res_model
model = torch._C._jit_to_backend_selective(model, to_glow_helper, path_list)
else:
for path, (spec, example_inputs) in specs_and_examples.items():
submod = get_submodule(model, path)
submod = torch.jit.trace(submod, example_inputs)
submod = to_glow(submod, {"forward": spec})
set_submodule(model, path, submod)
return model
| StarcoderdataPython |
4949725 | import sys
from functools import reduce
# Eager evaluation makes this easier
lmap = lambda x, y: list(map(x, y))
lfilter = lambda x, y: list(filter(x, y))
def get_input():
with open("input", "r") as filey:
for line in filey:
yield line.strip()
def transformed_input():
return lmap(list, get_input())
maps = {'[': ']', '(': ')', '{': '}', '<': '>'}
values = {')': 1, ']': 2, '}': 3, '>': 4}
def get_malformed(lines):
for line in lines:
stack = []
# https://stackoverflow.com/questions/2597104/break-the-nested-double-loop-in-python
try:
while line:
char = line.pop(0)
if char in maps.keys():
stack.append(char)
elif stack:
if char != maps[stack[-1]]:
raise Exception("Hi")
if char == maps[stack[-1]]:
stack.pop()
else:
# Technically malformed
raise Exception("Hi")
if stack:
yield stack
except:
pass
def reformed(stack):
stack.reverse()
return lmap(lambda x: maps[x], stack)
def solution():
lines = transformed_input()
malformeds = list(get_malformed(lines))
corrections = lmap(reformed, malformeds)
return sorted(lmap(score, corrections))[len(corrections) // 2]
def score(reform):
return reduce(lambda acc, x: acc * 5 + values[x], reform, 0)
if __name__ == "__main__":
print(solution())
| StarcoderdataPython |
3883 | pregunta = input('trabajas desde casa? ')
if pregunta == True:
print 'Eres afortunado'
if pregunta == False:
print 'Trabajas fuera de casa'
tiempo = input('Cuantos minutos haces al trabajo: ')
if tiempo == 0:
print 'trabajas desde casa'
elif tiempo <=20:
print 'Es poco tiempo'
elif tiempo >= 21 and tiempo <=45:
print 'Es un tiempo razonable'
else:
print 'Busca otras rutas'
| StarcoderdataPython |
4898513 | <gh_stars>0
#!python
import random
def merge(items1, items2):
left_index, right_index = 0, 0
result = []
while left_index < len(items1) and right_index < len(items2):
if items1[left_index] < items2[right_index]:
result.append(items1[left_index])
left_index += 1
else:
result.append(items2[right_index])
right_index += 1
result += items1[left_index:]
result += items2[right_index:]
print(result)
return result
def merge_sort(items):
if len(items) <= 1: # base case
return items
# divide array in half and merge sort recursively
half = len(items) // 2
left = merge_sort(items[:half])
right = merge_sort(items[half:])
return merge(left, right)
def partition(items, low, high):
i = (low-1)
pivot = items[high]
for j in range(low, high):
if items[j] <= pivot:
i += 1
items[i+1], items[high] = items[high], items[i+1]
return (i+1)
def quick_sort(items, low=None, high=None):
if low < high:
partition_index = partition(items, low, high)
quick_sort(items, low, partition_index-1)
quick_sort(items, partition_index+1, high)
return items
items = [2,2,1,4,7,6]
high = len(items)-1
print(quick_sort(items, 0, high))
print(merge_sort(items))
| StarcoderdataPython |
11386613 | <gh_stars>0
#!/usr/bin/env python
from matplotlib import pyplot as plt
import numpy as np
def read_csv(path):
with open(path) as csvf:
data = list(zip(*[[float(cell) for cell in l.split(',')] for l in csvf.readlines()[1:]]))
return data
data = read_csv("optenc_data.csv")
trips = data[0]
std = np.std(trips)
plt.errorbar(trips, trips, xerr=std, yerr=std, fmt='o')
med = np.median(trips)
plt.axhline(med, linestyle='--', color='C1')
plt.axvline(med, linestyle='--', color='C1')
plt.show()
print(f"Median number of ticks {med}")
total_length = 113.0 # mm
ticks_per_mm = med / total_length
mm_per_tick = 1.0 / ticks_per_mm
print(f"Ticks per mm {ticks_per_mm}")
print(f"mm per tick {mm_per_tick}")
| StarcoderdataPython |
1650508 | <reponame>clojia/DTAE
import sys
import os.path
sys.path.insert(0, os.path.abspath("./simple-dnn"))
#Import the libraries we will need.
from IPython.display import display
import tensorflow as tf
import tensorflow.contrib.slim as slim
import numpy as np
from tensorflow.examples.tutorials.mnist import input_data
import matplotlib.pyplot as plt
import pandas as pd
import tensorflow.contrib.slim as slim
import scipy.misc
import scipy
import scipy.io
from sklearn import metrics, preprocessing
from sklearn.neighbors import KernelDensity
import time
import pickle
import cPickle
import matplotlib.cm as cm
import random
import statistics
from util.openworld_sim import OpenWorldSim, OpenWorldMsData
from util.visualization import visualize_dataset_2d
from simple_dnn.cnn.dcnn import DCNN
from simple_dnn.util.format import UnitPosNegScale, reshape_pad
from simple_dnn.generative.vae import VariationalAutoencoder
from simple_dnn.generative.gan import MultiClassGAN
from simple_dnn.generative.discriminator import DiscriminatorDC
from simple_dnn.generative.generator import GeneratorDC
from simple_dnn.util.sample_writer import ImageGridWriter
from open_net import OpenNetFlat, OpenNetCNN, OpenNetBase
from util.metrics import auc, open_set_classification_metric, open_classification_performance
from util.open_net_train_eval import train_eval, compare_performance, ttest
from util.visualization import visualize_dataset_nd
import argparse
import subprocess
import random
import sys
mnist = input_data.read_data_sets("data/MNIST_data/", one_hot=True)
#zd = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
comb = [0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]
#comb = [0, 5, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100]
tr_classes_list = []
with open("/home/jiaj2018/code/opennet_ii/data/mini_mnist") as fin:
for line in fin:
if line.strip() == '':
continue
cols = line.strip().split()
tr_classes_list.append([int(float(c)) for c in cols])
cor_acc_comb = []
for c in comb:
cor_acc_mul = []
for tr_classes in tr_classes_list:
length = 6
acc = 0
label_text_lookup = {i: str(c) for i, c in enumerate(sorted(tr_classes))}
label_text_lookup[6] = 'unknown'
open_mnist = OpenWorldSim(mnist.train.images, mnist.train.labels,
val_data=mnist.validation.images, val_label=mnist.validation.labels,
test_data=mnist.test.images, test_label=mnist.test.labels,
tr_classes=[0,2,3,4,6,9],
seed=None)
print("comb coefficient: " + str(c))
with tf.device('/GPU:0'):
cnn_disc_ae_6_long = OpenNetCNN(
[32, 32], # x_dim
1, #x_ch
length, #y_dim
[32, 64], # conv_units,
[256, 128], #hidden_units
z_dim=6,
kernel_sizes=[4, 4], strides=[1, 1], paddings='SAME',
pooling_enable=True, pooling_kernel=[3,3],
pooling_stride=[2,2], pooling_padding='SAME',
pooling_type='max',
activation_fn=tf.nn.relu,
x_scale=UnitPosNegScale.scale,
x_inverse_scale=UnitPosNegScale.inverse_scale,
x_reshape=reshape_pad([28,28], [32,32], 1, pad=True, pad_value=-1),
opt=tf.train.AdamOptimizer(learning_rate=0.001, beta1=0.5),
recon_opt=tf.train.AdamOptimizer(learning_rate=0.001, beta1=0.5),
c_opt=tf.train.AdamOptimizer(learning_rate=0.001, beta1=0.5),
dist='mean_separation_spread',
decision_dist_fn='mahalanobis',#'euclidean',
dropout=True, keep_prob=0.2,
# batch_norm=True,
batch_size=256,
iterations=5000,
display_step=1000,
save_step=500,
model_directory=None, # Directory to save trained model to.
density_estimation_factory=None,
ce_loss=False, recon_loss=False, inter_loss=True, intra_loss=True, div_loss=False, tc_loss=False, cor_loss=True,
contamination=0.01, comb=c)
acc, _, _ = train_eval(cnn_disc_ae_6_long, open_mnist.train_data(), open_mnist.train_label(),
open_mnist.validation_data(), open_mnist.validation_label(),
np.logical_not(open_mnist.validation_label()[:,-1].astype(bool)),
open_mnist.test_data(), open_mnist.test_label(),
np.logical_not(open_mnist.test_label()[:,-1].astype(bool)),
n_scatter=1000, unique_ys=range(7), plot_recon=False,
# save_path='data/results/fig/mnist_cnn_z6_plot.pdf',
label_text_lookup=label_text_lookup, visualize=False, acc=acc
)
cor_acc_mul.append(acc)
cor_acc_comb.append(sum(cor_acc_mul) / len(tr_classes_list) )
#cor_acc_comb.append(statistics.mean(cor_acc_mul))
print("pred acc under different coefficients:")
print(cor_acc_comb)
| StarcoderdataPython |
1752555 | <filename>tensorflow/save_clusters.py
import os
import cv2
import numpy as np
def save_images(images, clusters, dst, n=10):
""" Save images labelled by cluster prediction
images - np array [b, h, w, c]
clusters - np array [b, c] (softmaxed)
dst - str, destination
n - int number of images per cluster to search
"""
assert images.shape[0] == clusters.shape[0]
images = np.array(images)
clusters = np.argmax(clusters, axis=-1)
if not os.path.exists(dst):
print('creating {}'.format(dst))
os.makedirs(dst)
for c in np.unique(clusters):
idx = np.squeeze(np.argwhere(clusters == c))
print(c, idx)
choices = np.random.choice(idx, n)
for choice in choices:
img = images[choice, ...]
imgdst = os.path.join(dst, 'c{:02d}_im{:04d}_{}.jpg'.format(c, choice, np.datetime64('now')))
print('\t', imgdst, img.shape)
cv2.imwrite(imgdst, img[:,:,::-1]*255) | StarcoderdataPython |
1775388 | # coding=utf-8
import random
def randomStr(length=6, timeIn=False, lowerCaseLetter=False, capitalLetter=False, number=True, specialSign=False,
otherSignsList=None):
'''
返回一个随机字符串
:param length: 字符串长度
:param time: 是否包含时间
:param number: 是否包含数字
:param lowerCaseLetter: 是否包含小写字母
:param capitalLetter: 是否包含大写字母
:param specialSign: 是否包含特殊符号
:param otherSignsList: 其他字符
:return:
'''
res = []
if number == True:
res.extend(map(lambda i: chr(i), [x for x in range(48, 58)]))
if lowerCaseLetter == True:
res.extend(map(lambda i: chr(i), [x for x in range(97, 123)]))
if capitalLetter == True:
res.extend(map(lambda i: chr(i), [x for x in range(65, 90)]))
if specialSign == True:
# res.extend(['_', '-'])
if otherSignsList != None and isinstance(otherSignsList, list):
res.extend(otherSignsList)
str = ""
if len(res) != 0:
for x in range(length):
index = random.randint(0, len(res) - 1)
str = str + res[index]
if timeIn == True:
from SRC.common.utils import getCurrentTime
str = str + getCurrentTime()
return str | StarcoderdataPython |
355059 | from __future__ import unicode_literals
import frappe
from frappe import msgprint
from frappe.model.document import Document
from frappe.utils import flt
import erpnext.controllers.taxes_and_totals
@frappe.whitelist(allow_guest=True)
def sales_tax_series(sales_tax,company):
query= frappe.db.sql("SELECT MAX(tax_number) FROM `tabSales Invoice` WHERE sales_tax = '"+str(sales_tax)+"' and company = '"+str(company)+"';")
return query
| StarcoderdataPython |
3365046 | <reponame>red5alex/ifm_contrib<filename>contrib_lib/simulator.py<gh_stars>0
from ifm import Enum
from .simulator_pandas import SimPd
import pandas as pd
from datetime import datetime
import sys
class Simulator:
"""
Extension child-class for IFM contributor's Extensions.
Use this class to add functionality relating to MESH (Nodes, Elements).
"""
def __init__(self, doc):
self.doc = doc
# add custom child-classes here
self.df = SimPd(doc)
# add custom methods here
def start(self, dac=None, save_time_steps=None, skip_time_steps=None, binary=True,
compact_output=True, time_log_xlsx=None, auto_stop=True):
"""
Runs the model. Similar to doc.startSimulator but adds some features.
:param dac: (not implemented)
:param save_time_steps: (not implemented)
:param skip_time_steps: (not implemented)
:param binary: (not implemented)
:param compact_output: write console output to a single line
:param time_log_xlsx: file name for storing time measurement data
:param auto_stop: auto-terminate the simulation after completion
:return:
"""
# initialize
clock_start = datetime.now()
clock_now = datetime.now()
time_elapsed = 0.
print("simulation started at {:%m/%d/%Y, %H:%M:%S}".format(clock_start))
t_0 = self.doc.getAbsoluteSimulationTime()
df_log = pd.DataFrame()
i = 0
# run time steps
while self.doc.getAbsoluteSimulationTime() < self.doc.getFinalSimulationTime():
self.doc.singleStep()
i += 1
# measure time
clock_now = datetime.now()
simu_time = self.doc.getAbsoluteSimulationTime()
time_step = self.doc.getCurrentTimeIncrement()
# get percent progress
t_end = self.doc.getFinalSimulationTime()
progress = (simu_time - t_0) / (t_end - t_0)
# write time log
df_log = df_log.append({"i": i,
"wall_time": clock_now,
"simu_time": simu_time,
"time_step": time_step},
ignore_index=True)
if time_log_xlsx is not None:
df_log.to_excel(time_log_xlsx)
# update console
time_elapsed = clock_now - clock_start
sys.stdout.write(
"\r#{:4d} {: 4d}% t={:2.2e} dt={:2.2e} clock={}".format(i, int(progress * 100), simu_time,
time_step, time_elapsed))
sys.stdout.flush()
if not compact_output:
print("")
# finalize
sys.stdout.write("\rmodel run complete {:%m/%d/%Y, %H:%M:%S} ({})".format(clock_now, time_elapsed))
if auto_stop:
self.doc.stopSimulator()
sys.stdout.write(", simulator stopped")
print(".")
def getAbsoluteSimulationTimeCalendar(self):
"""
Get the current absolute simulation time as a datetime object.
Reference time must be set in model.
:return: DataFrame
"""
if self.doc.getReferenceTime() is None:
raise ValueError("Reference Time not set in FEFLOW model.")
self.doc.getReferenceTime() + datetime.timedelta(days=self.doc.getAbsoluteSimulationTime())
def load_first_ts_after(self, time):
"""
Load the first time step after the time step provided by time
:param time: Simulation time to load
:type time: float
:return: Information on time step loaded
:rtype: pandas.Series
"""
if type(time) == float or int:
# get time step list
df_ts = self.doc.c.sim.df.time_steps()
if len(df_ts[df_ts.simulation_time > time]) == 0:
raise RuntimeError("{} contains no timestep after {} d".format(self.doc.c.original_filename(),
time))
else:
ts_no = int(df_ts[df_ts.simulation_time > time].reset_index().iloc[0].file_index)
else:
raise ValueError("parameter 'time' must be of type float (simulation time in days) ")
self.doc.loadTimeStep(ts_no)
return df_ts[df_ts.simulation_time > time].reset_index().iloc[0]
| StarcoderdataPython |
6604721 | #!/usr/bin/env python
# encoding:utf-8
#
# Copyright 2015-2016 <NAME>
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
from src.impl.error import IllegalMethodError
from src.impl.model.reserved_method import ReservedMethod
from src.impl.reserved.file import File
from src.impl.reserved.list import List
from src.impl.reserved.out import Out
from src.impl.reserved.system import System
from src.impl.reserved.test import Test
__Author__ = "<NAME> <<EMAIL>>"
__date__ = "2015-12-21"
class ReservedMethodCaller:
def __init__(self, cp, handler):
self.cp = cp
self.handler = handler
@classmethod
def __methods(cls):
reserved = {}
list_methods = inspect.getmembers(List, predicate=inspect.ismethod)
for method in list_methods:
if "__init__" != method:
reserved[List.__name__ + "'" + method[0]] = [List, method[1]]
out_methods = inspect.getmembers(Out, predicate=inspect.ismethod)
for method in out_methods:
if "__init__" != method:
reserved[method[0]] = [Out, method[1]]
file_methods = inspect.getmembers(File, predicate=inspect.ismethod)
for method in file_methods:
if "__init__" != method:
reserved[File.__name__ + "'" + method[0]] = [File, method[1]]
test_methods = inspect.getmembers(Test, predicate=inspect.ismethod)
for method in test_methods:
if "__init__" != method:
reserved[Test.__name__ + "'" + method[0]] = [Test, method[1]]
system_methods = inspect.getmembers(System, predicate=inspect.ismethod)
for method in system_methods:
if "__init__" != method:
reserved[System.__name__ + "'" + method[0]] = [System, method[1]]
return reserved
@classmethod
def is_reserved_method(cls, name):
return name in cls.__methods()
def core(self, name, args):
reserved = self.__methods()
if name in reserved:
lst = reserved[name]
args_len = len(args)
arg_spec = inspect.getargspec(lst[1])
class_method_args = len(arg_spec.args) - 1
defaults = len(arg_spec.defaults) if arg_spec.defaults is not None else 0
if class_method_args - defaults <= args_len <= class_method_args:
return ReservedMethod(lst[0], lst[1], args)
raise IllegalMethodError(name)
def fire(self, rm):
self = self
cls = rm.cls
method = rm.method
args = rm.args
args_len = len(args)
ins = cls(self.cp, self.handler)
if args_len == 0:
return method(ins)
elif args_len == 1:
return method(ins, args[0])
elif args_len == 2:
return method(ins, args[0], args[1])
| StarcoderdataPython |
6401381 | # Specify either 'commit' (to label the x-axis with commit sha) or 'date' (to label the x-axis with the date the test was run)
x_axis_qty = 'date'
# Plot dims in pixels
plot_height = '300'
plot_width = ''
# For grouping tests together on same axes
test_group_prefixes = ['test_C3D8R_failureEnvelope_sig11sig22',
'test_C3D8R_failureEnvelope_sig12sig22',
'test_C3D8R_failureEnvelope_sig12sig23',
'test_C3D8R_mixedModeMatrix',
'test_C3D8R_fiberCompression_DGD_wkbToTotal',
'test_C3D8R_twoElement_fiberCompression_DGD']
# Grouping charts into subsections
chart_groups = dict()
chart_groups['failure-envelopes-C3D8R'] = {
'name_pretty': 'Failure Envelopes, C3D8R',
'charts': ['test_C3D8R_failureEnvelope_sig11sig22', 'test_C3D8R_failureEnvelope_sig12sig22', 'test_C3D8R_failureEnvelope_sig12sig23']
}
chart_groups['single-element-C3D8R'] = {
'name_pretty': 'Single Element, C3D8R',
'charts': ['test_C3D8R_elastic_fiberTension', 'test_C3D8R_elastic_matrixTension', 'test_C3D8R_elementSize', 'test_C3D8R_fiberCompression_CDM', 'test_C3D8R_fiberLoadReversal', 'test_C3D8R_fiberTension', 'test_C3D8R_matrixCompression', 'test_C3D8R_matrixTension', 'test_C3D8R_nonlinearShear12', 'test_C3D8R_schapery12', 'test_C3D8R_simpleShear12', 'test_C3D8R_simpleShear12friction']
}
chart_groups['Fiber-Compression-DGD'] = {
'name_pretty': 'Fiber Compression, DGD',
'charts': ['test_C3D8R_fiberCompression_DGD', 'test_C3D8R_twoElement_fiberCompression_DGD']
}
# Subsection heading
subsection = """
<div id="{section_name_dashes}" class="">
<h3>{section_name}</h3>
{plots}
</div>
"""
# Subsection toc wrapper
subsection_toc_wrapper = """
<li>
<a href="#{section_name_dashes}" class="">{section_name}</a>
<ul class="nav subsection_heading">
{toc_entries}
</ul>
</li>
"""
# Formatting for each plt
plot = """
<div id="{plot_title}" class="section scrollspy">
{plot}
</div>
<br><br>
"""
# Table of contents
toc = """
<li>
<a href="#{plot_title}" class="">{plot_title}</a>
</li>
"""
# Overall page formatting
body = ""
with open('template_run_time_plots_body.html') as f:
body = f.read()
| StarcoderdataPython |
3402396 | <filename>main/tutors/admin.py
from django.contrib import admin
from .models import Tutor, Invitaions, PostAnAd, AboutAndQualifications, Verify,WishList, Invitaions_by_academy
# Register your models here.
class TutorAdmin(admin.ModelAdmin):
list_display = ("username", "id","gender", "email" , "verified", "verification_sent", "about_complete", "qual_complete")
search_fields = ("username", "id", "email", "gender")
list_filter = ("verified","verification_sent","about_complete", "qual_complete")
class InvitaionsAdmin(admin.ModelAdmin):
list_display = ("inivitaion_by_student","tutor_ad", "accepted", "rejected")
list_filter = ("accepted","rejected")
class postADAdmin(admin.ModelAdmin):
list_display = ("tutorUser","subject","tuition_level","can_travel","estimated_fees","views")
search_fields = ("subject","tuition_level")
admin.site.register(Tutor, TutorAdmin)
admin.site.register(Invitaions, InvitaionsAdmin)
admin.site.register(PostAnAd,postADAdmin)
admin.site.register(AboutAndQualifications)
admin.site.register(WishList)
admin.site.register(Verify)
admin.site.register(Invitaions_by_academy)
| StarcoderdataPython |
4961806 | """
Should always be faithful duplicate of sequence/BioReaders.py
Duplicated here for tofu installation. This one is called via cupcake.io.BioReaders.
"""
import re, sys, pdb
from collections import namedtuple
import pysam
Interval = namedtuple('Interval', ['start', 'end'])
class SimpleSAMReader:
"""
A simplified SAM reader meant for speed. Skips CIGAR & FLAG parsing; identity/coverage calculation.
"""
SAMheaders = ['@HD', '@SQ', '@RG', '@PG', '@CO']
def __init__(self, filename, has_header):
self.filename = filename
self.f = open(filename)
self.header = ''
if has_header:
while True:
cur = self.f.tell()
line = self.f.readline()
if line[:3] not in SimpleSAMReader.SAMheaders:
break
self.header += line
self.f.seek(cur)
def __iter__(self):
return self
def __next__(self):
line = self.f.readline().strip()
if len(line) == 0:
raise StopIteration
return SimpleSAMRecord(line)
class SimpleSAMRecord:
cigar_rex = re.compile('(\d+)([MIDSHN])')
SAMflag = namedtuple('SAMflag', ['is_paired', 'strand', 'PE_read_num', 'is_secondary', 'is_supplementary'])
def __init__(self, record_line):
"""
Simple bare bones version: only has
qID, sID, sStart, sEnd, qStart, qEnd, cigar
Simplified assumptions:
-- must be end-to-end alignment (so qStart always 0)
-- must be unspliced (no 'N' in cigar string)
"""
self.qID = None
self.sID = None
self.sStart = None
self.sEnd = None
self.qStart = 0
self.qEnd = None # length of SEQ
self.cigar = None
self.process(record_line)
def __str__(self):
msg = \
"""
qID: {q}
sID: {s}
sStart-sEnd: {ss}-{se}
qStart-qEnd: {qs}-{qe}
cigar: {c}
""".format(q=self.qID, s=self.sID, \
ss=self.sStart, se=self.sEnd, qs=self.qStart, qe=self.qEnd, c=self.cigar)
return msg
def parse_cigar(self, cigar, start):
"""
M - match
I - insertion w.r.t. to ref
D - deletion w.r.t. to ref
N - skipped (which means splice junction)
S - soft clipped
H - hard clipped (not shown in SEQ)
= - read match
X - read mismatch
ex: 50M43N3D
NOTE: sets qStart & qEnd, which are often incorrect because of different ways to write CIGAR strings
instead rely on XS/XE flags (from blasr or pbalign.py) to overwrite this later!!!
Returns: genomic segment locations (using <start> as offset)
"""
cur_end = start
q_aln_len = 0
for (num, type) in re.findall('(\d+)(\S)', cigar):
num = int(num)
if type == 'I':
q_aln_len += num
elif type in ('M', '=', 'X'):
cur_end += num
q_aln_len += num
elif type == 'D':
cur_end += num
self.qEnd = self.qStart + q_aln_len
self.sEnd = cur_end
def process(self, record_line):
"""
Only process cigar to get qEnd and sEnd
"""
raw = record_line.split('\t')
self.qID = raw[0]
self.sID = raw[2]
if self.sID == '*': # means no match! STOP here
return
self.sStart = int(raw[3]) - 1
self.cigar = raw[5]
self.parse_cigar(self.cigar, self.sStart)
#self.flag = SimpleSAMRecord.parse_sam_flag(int(raw[1]))
class SAMReader:
SAMheaders = ['@HD', '@SQ', '@RG', '@PG', '@CO']
def __init__(self, filename, has_header, ref_len_dict=None, query_len_dict=None):
self.filename = filename
self.f = open(filename)
self.header = ''
self.ref_len_dict = ref_len_dict
self.query_len_dict = query_len_dict
if has_header:
while True:
cur = self.f.tell()
line = self.f.readline()
if line[:3] not in SAMReader.SAMheaders:
break
self.header += line
self.f.seek(cur)
def __iter__(self):
return self
def __next__(self):
line = self.f.readline().strip()
if len(line) == 0:
raise StopIteration
return SAMRecord(line, self.ref_len_dict, self.query_len_dict)
class SAMRecord:
SAMflag = namedtuple('SAMflag', ['is_paired', 'strand', 'PE_read_num', 'is_secondary', 'is_supplementary'])
def __init__(self, record_line=None, ref_len_dict=None, query_len_dict=None):
"""
Designed to handle BowTie SAM output for unaligned reads (PE read not yet supported)
Can handle map to transfrag (no splicing) and genome (splicing)
"""
self.qID = None
self.sID = None
self.sStart = None
self.sEnd = None
self.segments = None
self.num_nonmatches = None
self.num_ins = None
self.num_del = None
self.num_mat_or_sub = None
self.qCoverage = None
self.sCoverage = None
self.sLen = None
self.qLen = None
self.cigar_qLen = None # qLen based on parsing CIGAR string
# qStart, qEnd might get changed in parse_cigar
self.qStart = 0
self.qEnd = None # length of SEQ
self.cigar = None
self.flag = None
self.identity = None
self.record_line = record_line
if record_line is not None:
self.process(record_line, ref_len_dict, query_len_dict)
def __str__(self):
msg =\
"""
qID: {q}
sID: {s}
cigar: {c}
sStart-sEnd: {ss}-{se}
qStart-qEnd: {qs}-{qe}
segments: {seg}
flag: {f}
coverage (of query): {qcov}
coverage (of subject): {scov}
alignment identity: {iden}
""".format(q=self.qID, s=self.sID, seg=self.segments, c=self.cigar, f=self.flag,\
ss=self.sStart, se=self.sEnd, qs=self.qStart, qe=self.qEnd, iden=self.identity,\
qcov=self.qCoverage, scov=self.sCoverage)
return msg
def __eq__(self, other):
return self.qID == other.qID and self.sID == other.sID and\
self.sStart == other.sStart and self.sEnd == other.sEnd and\
self.segments == other.segments and self.qCoverage == other.qCoverage and\
self.sCoverage == other.sCoverage and self.qLen == other.qLen and\
self.sLen == other.sLen and self.qStart == other.qStart and\
self.cigar == other.cigar and self.flag == other.flag and self.identity == other.identity
@property
def ref_exons(self):
return self.segments
def process(self, record_line, ref_len_dict, query_len_dict):
"""
If SAM is from pbalign.py output, then have flags:
XS: 1-based qStart, XE: 1-based qEnd, XQ: query length, NM: number of non-matches
ignore_XQ should be False for BLASR/pbalign.py's SAM, True for GMAP's SAM
0. qID
1. flag
2. sID
3. 1-based offset sStart
4. mapping quality (ignore)
5. cigar
6. name of ref of mate alignment (ignore)
7. 1-based offset sStart of mate (ignore)
8. inferred fragment length (ignore)
9. sequence (ignore)
10. read qual (ignore)
11. optional fields
"""
raw = record_line.split('\t')
self.qID = raw[0]
self.sID = raw[2]
if self.sID == '*': # means no match! STOP here
return
self.sStart = int(raw[3]) - 1
self.cigar = raw[5]
self.segments = self.parse_cigar(self.cigar, self.sStart)
self.sEnd = self.segments[-1].end
self.flag = SAMRecord.parse_sam_flag(int(raw[1]))
# process optional fields
# XM: number of mismatches
# NM: edit distance (sub/ins/del)
for x in raw[11:]:
if x.startswith('NM:i:'):
self.num_nonmatches = int(x[5:])
if ref_len_dict is not None:
self.sCoverage = (self.sEnd - self.sStart) * 1. / ref_len_dict[self.sID]
self.sLen = ref_len_dict[self.sID]
if self.flag.strand == '-' and self.qLen is not None:
self.qStart, self.qEnd = self.qLen - self.qEnd, self.qLen - self.qStart
if query_len_dict is not None: # over write qLen and qCoverage, should be done LAST
self.qLen = query_len_dict[self.qID]
self.qCoverage = (self.qEnd - self.qStart) * 1. / self.qLen
if self.num_nonmatches is not None:
self.identity = 1. - (self.num_nonmatches * 1. / (self.num_del + self.num_ins + self.num_mat_or_sub))
def parse_cigar(self, cigar, start):
"""
M - match
I - insertion w.r.t. to ref
D - deletion w.r.t. to ref
N - skipped (which means splice junction)
S - soft clipped
H - hard clipped (not shown in SEQ)
= - read match
X - read mismatch
ex: 50M43N3D
NOTE: sets qStart & qEnd, which are often incorrect because of different ways to write CIGAR strings
Returns: genomic segment locations (using <start> as offset)
"""
segments = []
cur_start = start
cur_end = start
first_thing = True
q_aln_len = 0
self.num_del = 0
self.num_ins = 0
self.num_mat_or_sub = 0
self.cigar_qLen = 0
for (num, type) in re.findall('(\d+)(\S)', cigar):
num = int(num)
self.cigar_qLen += num
if type == 'H' or type == 'S':
if first_thing:
self.qStart += num
elif type == 'I':
q_aln_len += num
self.num_ins += num
elif type in ('M','=','X'):
cur_end += num
q_aln_len += num
self.num_mat_or_sub += num
elif type == 'D':
cur_end += num
self.num_del += num
elif type == 'N': # junction, make a new segment
segments.append(Interval(cur_start, cur_end))
cur_start = cur_end + num
cur_end = cur_start
else:
raise Exception("Unrecognized cigar character {0}!".format(type))
first_thing = False
if cur_start != cur_end:
segments.append(Interval(cur_start, cur_end))
self.qEnd = self.qStart + q_aln_len
return segments
@classmethod
def parse_sam_flag(self, flag):
"""
<NAME>'s SAM https://samtools.github.io/hts-specs/SAMv1.pdf
1 -- read is one of a pair
2 -- alignment is one end of proper PE alignment (IGNORE)
4 -- read has no reported alignments (IGNORE)
8 -- read is one of a pair and has no reported alignments (IGNORE)
16 -- reverse ref strand
32 -- other mate is aligned to ref strand
64 -- first mate in pair
128 -- second mate in pair
256 -- not primary alignment
512 -- not passing filters
1024 -- PCR or optical duplicate
2048 -- supplementary alignment
Return: SAMflag
"""
PE_read_num = 0
strand = '+'
is_supp = False
is_secondary = False
if flag >= 2048: # supplementary alignment
flag -= 2048
is_supp = True
if flag >= 1024: #PCR or optical duplicate, should never see this...
flag -= 1024
if flag >= 512: #not passing QC, should never see this
flag -= 512
if flag >= 256: #secondary alignment, OK to see this if option given in BowTie
flag -= 256
is_secondary = True
if flag >= 128:
PE_read_num = 2
flag -= 128
elif flag >= 64:
PE_read_num = 1
flag -= 64
if flag >= 32:
flag -= 32
if flag >= 16:
strand = '-'
flag -= 16
if flag >= 8:
flag -= 8
if flag >= 4:
flag -= 4
if flag >= 2:
flag -= 2
assert flag == 0 or flag == 1
is_paired = flag == 1
return SAMRecord.SAMflag(is_paired, strand, PE_read_num, is_secondary, is_supp)
class GMAPSAMReader(SAMReader):
def __next__(self):
while True:
line = self.f.readline().strip()
if len(line) == 0:
raise StopIteration
if not line.startswith('@'): # header can occur at file end if the SAM was sorted
break
return GMAPSAMRecord(line, self.ref_len_dict, self.query_len_dict)
class GMAPSAMRecord(SAMRecord):
def process(self, record_line, ref_len_dict=None, query_len_dict=None):
"""
SAM files from pbalign.py have following optional fields:
XS: 1-based qStart, XE: 1-based qEnd, XQ: query length, NM: number of non-matches
0. qID
1. flag
2. sID
3. 1-based offset sStart
4. mapping quality (ignore)
5. cigar
6. name of ref of mate alignment (ignore)
7. 1-based offset sStart of mate (ignore)
8. inferred fragment length (ignore)
9. sequence (ignore)
10. read qual (ignore)
11. optional fields
"""
raw = record_line.split('\t')
self.qID = raw[0]
self.sID = raw[2]
if self.sID == '*': # means no match! STOP here
return
self.sStart = int(raw[3]) - 1
self.cigar = raw[5]
self.segments = self.parse_cigar(self.cigar, self.sStart)
self.sEnd = self.segments[-1].end
self.flag = SAMRecord.parse_sam_flag(int(raw[1])) # strand can be overwritten by XS:A flag
self._flag_strand = self.flag.strand # serve as backup for debugging
for x in raw[11:]:
if x.startswith('NM:i:'): # number of non-matches
self.num_nonmatches = int(x[5:])
self.identity = 1. - (self.num_nonmatches * 1. / (self.num_del + self.num_ins + self.num_mat_or_sub))
elif x.startswith('XS:A:'): # strand information
_s = x[5:]
if _s!='?':
self._flag_strand = self.flag.strand # serve as backup for debugging
self.flag = SAMRecord.SAMflag(self.flag.is_paired, _s, self.flag.PE_read_num, is_secondary=False, is_supplementary=False)
if ref_len_dict is not None:
self.sCoverage = (self.sEnd - self.sStart) * 1. / ref_len_dict[self.sID]
self.sLen = ref_len_dict[self.sID]
if self.flag.strand == '-' and self.qLen is not None:
self.qStart, self.qEnd = self.qLen - self.qEnd, self.qLen - self.qStart
if self.qLen is not None:
self.qCoverage = (self.qEnd - self.qStart) * 1. / self.qLen
if query_len_dict is not None: # over write qLen and qCoverage, should be done LAST
try:
self.qLen = query_len_dict[self.qID]
except KeyError: # HACK for blasr's extended qID
raise Exception("Unable to find qID {0} in the input fasta/fastq!".format(self.qID))
self.qCoverage = (self.qEnd - self.qStart) * 1. / self.qLen
class SplicedBAMReader:
"""
The SplicedBAMReader imitates the behavior of GMAPSAMReader,
basically accepted an aligned BAM file instead of aligned SAM file
The returned records will have the same format as GMAPSAMRecord
"""
def __init__(self, filename, ref_len_dict=None, query_len_dict=None):
self.filename = filename
self.reader = pysam.AlignmentFile(open(filename), 'rb', check_sq=False)
self.ref_len_dict = ref_len_dict
self.query_len_dict = query_len_dict
def __iter__(self):
return self
def __next__(self):
return self.grab_next_record()
def grab_next_record(self):
try:
r = next(self.reader)
samrec = SAMRecord(None) # we initiate the record and fill it in manually
samrec.qID = r.qname
samrec.qStart = r.qstart
samrec.qEnd = r.qend
if self.query_len_dict is not None:
samrec.qLen = self.query_len_dict[samrec.qID]
else:
samrec.qLen = r.qlen
samrec.sID = '*' if r.is_unmapped else r.reference_name
samrec.sStart = r.reference_start
samrec.sEnd = r.reference_end
if self.ref_len_dict is not None:
samrec.sLen = self.ref_len_dict[samrec.sID]
else:
samrec.sLen = r.reference_length
samrec.cigar = r.cigarstring
samrec.record_line = r.tostring()
if samrec.sID == '*': # unmapped, nothing to parse
return samrec
# calling parse_cigar also sets num_ins, num_del, num_mat_or_sub, cigar_qlen
samrec.segments = samrec.parse_cigar(r.cigarstring, r.reference_start)
samrec.flag = SAMRecord.parse_sam_flag(r.flag)
tag_d = dict(r.tags)
if 'NM' in tag_d: # this is used by regular minimap2
samrec.num_nonmatches = tag_d['NM']
samrec.identity = 1 - (samrec.num_nonmatches / samrec.qLen)
else: # parse the cigar tuple to get the number of mismatches/insertions/deletions
# https://pysam.readthedocs.io/en/latest/api.html#pysam.AlignedSegment.cigartuples
# NOTE: we rely on this being run with mismatches represented by X
samrec.num_nonmatches = 0
for cigartype,cigarcount in r.cigartuples:
# 1:I, 2:D, 8:X
if cigartype in [1, 2, 8]:
samrec.num_nonmatches += cigarcount
samrec.identity = 1 - (samrec.num_nonmatches / samrec.qLen)
samrec.qCoverage = (r.qend-r.qstart)/samrec.qLen
samrec.sCoverage = (r.reference_end-r.reference_start)/samrec.sLen
return samrec
except StopIteration:
raise StopIteration
class SplicedBAMReaderRegioned(SplicedBAMReader):
"""
Extension of SpliceBAMReader, except that an upfront [start_index, end_index) is defined,
so that it'll return the records within that region
The returned records will have the same format as GMAPSAMRecord
"""
def __init__(self, filename, start_index, end_index, ref_len_dict=None, query_len_dict=None):
self.filename = filename
self.reader = pysam.AlignmentFile(open(filename), 'rb', check_sq=False)
self.ref_len_dict = ref_len_dict
self.query_len_dict = query_len_dict
self.start_index = start_index
self.end_index = end_index
self.cur_index = start_index
if self.start_index is None or self.end_index is None or self.start_index >= self.end_index:
raise Exception(f"SplicedBAMReaderRegioned must be given proper integer [start_index, end_index)! Instead got {start_index}, {end_index}")
for i in range(self.start_index):
r = next(self.reader)
def __iter__(self):
return self
def __next__(self):
self.cur_index += 1
if self.cur_index > self.end_index:
raise StopIteration
return self.grab_next_record()
| StarcoderdataPython |
3222671 | <reponame>gdmarsh/opal
from fastapi import APIRouter, Depends, WebSocket
from fastapi_websocket_pubsub import PubSubEndpoint
from opal_common.confi.confi import load_conf_if_none
from opal_common.config import opal_common_config
from opal_common.logger import logger
from opal_common.authentication.signer import JWTSigner
from opal_common.authentication.deps import WebsocketJWTAuthenticator
from opal_server.config import opal_server_config
class PubSub:
"""
Warpper for the Pub/Sub channel used for both policy and data updates
"""
def __init__(self, signer: JWTSigner, broadcaster_uri:str=None):
"""
Args:
broadcaster_uri (str, optional): Which server/medium should the PubSub use for broadcasting. Defaults to BROADCAST_URI.
None means no broadcasting.
"""
broadcaster_uri = load_conf_if_none(broadcaster_uri, opal_server_config.BROADCAST_URI)
self.router = APIRouter()
self.endpoint = PubSubEndpoint(broadcaster=broadcaster_uri, rpc_channel_get_remote_id=opal_common_config.STATISTICS_ENABLED)
authenticator = WebsocketJWTAuthenticator(signer)
@self.router.websocket("/ws")
async def websocket_rpc_endpoint(websocket: WebSocket, logged_in: bool = Depends(authenticator)):
"""
this is the main websocket endpoint the sidecar uses to register on policy updates.
as you can see, this endpoint is protected by an HTTP Authorization Bearer token.
"""
if not logged_in:
logger.info("Closing connection, remote address: {remote_address}", remote_address=websocket.client, reason="Authentication failed")
await websocket.close()
return
# Init PubSub main-loop with or without broadcasting
if broadcaster_uri is not None:
async with self.endpoint.broadcaster:
await self.endpoint.main_loop(websocket)
else:
await self.endpoint.main_loop(websocket)
| StarcoderdataPython |
6401604 | #!/usr/bin/env python3
# Copyright 2018 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""This module contains functions for retrieving fasta sequences from the
database directory used by run_BLAST_pipeline.py and HMMer_pipeline.py.
This will need to be updated to work with nucleotide data in the database
directory system.
"""
from Bio import SeqIO
import glob
import os
def get_seq_obj_from_db_fasta(acc_list, fa_path):
"""Takes a list of accessions and a fasta file path, and returns seq
objects for corresponding sequences.
"""
# Construct a list of sequences (objects) that correspond to accessions.
# Need a way to check them off the list as it goes, and break the loop when
# they are all identified.
record_list = []
acc_not_found_list = acc_list
with open(fa_path) as fa:
rec_num = 0
for record in SeqIO.parse(fa, 'fasta'):
rec_num += 1
for acc in acc_list:
if acc == record.id:
record_list.append(record)
acc_not_found_list.remove(acc)
# Check whether any sequences not identified.
if len(acc_not_found_list) > 0:
print('\nThe sequences corresponding to the following accessions were \
not identified:')
for acc in acc_not_found_list: print(acc)
print('\n')
return record_list
def get_fas_from_db_dir(db_name, acc_list, dbdirpath, prot_name=None):
"""Takes a dir path (e.g., /Users/Lael/Documents/Data/Genomes_2016), a
database name (e.g., Gintestinalis), and one or more accession numbers, and
returns the corresponding fasta sequence(s) as a string.
"""
# print('db_name: ' + db_name)
# print('acc_list: ' + str(acc_list))
# print('dbdirpath: ' + dbdirpath)
# print('prot_name: ' + str(prot_name))
fa_paths = glob.glob(os.path.join(dbdirpath, db_name + '/db_*/*.fa')) #'/db_Prot/*.fa'))
if len(fa_paths) > 1:
print('\nProblem: more than one fasta file identified in the database\
path.')
elif len(fa_paths) < 1:
print('\nProblem: No fasta file identified in the database path.')
# Check that fasta file paths were identified.
assert len(fa_paths) >= 1, "Error: Could not identify any fasta file paths."
# Define fasta file path.
fa_path = fa_paths[0]
# Get sequence objects.
record_list = get_seq_obj_from_db_fasta(acc_list, fa_path)
# Get the necessary info from the list of objects.
fas_string = ''
for record in record_list:
fas_string = str(fas_string + get_abbrev_fa_record_text_from_obj(record,
db_name, prot_name))
return fas_string
| StarcoderdataPython |
11301735 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Apr 26 16:06:51 2018
@author: jguillaumes
"""
import json
import elasticsearch_dsl as dsl
from weatherLib import parseLine,connect_wait_ES,WeatherData,VERSION,FW_VERSION,SW_VERSION
from elasticsearch.helpers import bulk
from datetime import date,datetime,timedelta
es_hosts = [ 'elastic00.jguillaumes.dyndns.org',\
'elastic01.jguillaumes.dyndns.org',\
'elastic02.jguillaumes.dyndns.org']
fileName = "weather-2018.04.04.dat"
bulkFile = 'bulk-insert.json'
numdocs = 0
curindex = None
with open(bulkFile,'w') as outfile:
with open(fileName) as file:
numtsa = 1
for line in file:
stamp,temp,humd,pres,light = parseLine(line)
# print(stamp,temp,humd,pres,light)
tsa = stamp.year * 10000 + stamp.month * 100 + stamp.day
tsa = tsa * 1000000 + numtsa
numtsa += 1
index = {
'index': {
'_index': "weather-" + VERSION + "-" + stamp.strftime("%Y.%m.%d"),
'_type': "doc"
}
}
w = WeatherData()
w.time = stamp.isoformat()
w.temperature = temp
w.humidity = humd
w.pressure = pres
w.light = light
w.version = VERSION
w.fwVersion = FW_VERSION
w.swVersion = SW_VERSION
w.tsa = tsa
json.dump(index,outfile)
print("\r",file=outfile)
json.dump(w.to_dict(),outfile)
print("\r",file=outfile)
numdocs += 1
outfile.close()
print("Generated {0:d} documents.".format(numdocs))
| StarcoderdataPython |
9762937 | <reponame>XinyueZ/models
"""Downloads the UCI HIGGS Dataset and prepares train data.
The details on the dataset are in https://archive.ics.uci.edu/ml/datasets/HIGGS
It takes a while as it needs to download 2.8 GB over the network, process, then
store it into the specified location as a compressed numpy file.
Usage:
$ python data_download.py --data_dir=/tmp/higgs_data
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
import sys
import tempfile
import numpy as np
import pandas as pd
from six.moves import urllib
import tensorflow as tf
URL_ROOT = 'https://archive.ics.uci.edu/ml/machine-learning-databases/00280'
INPUT_FILE = 'HIGGS.csv.gz'
NPZ_FILE = 'HIGGS.csv.gz.npz' # numpy compressed file to contain 'data' array.
def parse_args():
"""Parses arguments and returns a tuple (known_args, unparsed_args)."""
parser = argparse.ArgumentParser()
parser.add_argument(
'--data_dir', type=str, default='/tmp/higgs_data',
help='Directory to download higgs dataset and store training/eval data.')
return parser.parse_known_args()
def _download_higgs_data_and_save_npz(data_dir):
"""Download higgs data and store as a numpy compressed file."""
input_url = os.path.join(URL_ROOT, INPUT_FILE)
np_filename = os.path.join(data_dir, NPZ_FILE)
if tf.gfile.Exists(np_filename):
raise ValueError('data_dir already has the processed data file: {}'.format(
np_filename))
if not tf.gfile.Exists(data_dir):
tf.gfile.MkDir(data_dir)
# 2.8 GB to download.
try:
print('Data downloading..')
temp_filename, _ = urllib.request.urlretrieve(input_url)
# Reading and parsing 11 million csv lines takes 2~3 minutes.
print('Data processing.. taking multiple minutes..')
data = pd.read_csv(
temp_filename,
dtype=np.float32,
names=['c%02d' % i for i in range(29)] # label + 28 features.
).as_matrix()
finally:
os.remove(temp_filename)
# Writing to temporary location then copy to the data_dir (0.8 GB).
f = tempfile.NamedTemporaryFile()
np.savez_compressed(f, data=data)
tf.gfile.Copy(f.name, np_filename)
print('Data saved to: {}'.format(np_filename))
def main(unused_argv):
if not tf.gfile.Exists(FLAGS.data_dir):
tf.gfile.MkDir(FLAGS.data_dir)
_download_higgs_data_and_save_npz(FLAGS.data_dir)
if __name__ == '__main__':
FLAGS, unparsed = parse_args()
tf.app.run(argv=[sys.argv[0]] + unparsed)
| StarcoderdataPython |
288526 | """
Form for creating a region object
"""
import logging
from django import forms
from django.utils.translation import ugettext_lazy as _
from django.apps import apps
from gvz_api.utils import GvzRegion
from ...models import Region, Page, PageTranslation, LanguageTreeNode
from ...utils.slug_utils import generate_unique_slug
logger = logging.getLogger(__name__)
class RegionForm(forms.ModelForm):
duplicated_region = forms.ModelChoiceField(
queryset=Region.objects.all(),
empty_label=_("Do no import initial content"),
required=False,
)
class Meta:
model = Region
fields = [
'name',
'common_id',
'slug',
'events_enabled',
'push_notifications_enabled',
'push_notification_channels',
'latitude',
'longitude',
'postal_code',
'admin_mail',
'statistics_enabled',
'matomo_url',
'matomo_token',
'matomo_ssl_verify',
'status',
'page_permissions_enabled',
'administrative_division',
'aliases',
]
# pylint: disable=arguments-differ
def save(self, *args, **kwargs):
logger.info(
'RegionForm saved with args %s, kwargs %s and cleaned data %s',
args,
kwargs,
self.cleaned_data
)
# Only duplicate content if region is created and a region was selected
duplicate_region = not self.instance.id and self.cleaned_data['duplicated_region']
# Save region with the default method from ModelForm
region = super(RegionForm, self).save(*args, **kwargs)
if duplicate_region:
source_region = self.cleaned_data['duplicated_region']
logger.info(
'Duplicate content of region %s to region %s',
source_region,
region
)
# Duplicate language tree
duplicate_language_tree(source_region, region)
# Duplicate pages
duplicate_pages(source_region, region)
# Duplicate media content
duplicate_media(source_region, region)
return region
def clean(self):
cleaned_data = super(RegionForm, self).clean()
if apps.get_app_config('gvz_api').api_available:
gvz_region = GvzRegion(region_name=cleaned_data['name'],
region_key=cleaned_data['common_id'],
region_type=cleaned_data['administrative_division'])
if gvz_region.aliases and cleaned_data['aliases'] == '':
cleaned_data['aliases'] = gvz_region.aliases
if gvz_region.longitude and cleaned_data['longitude'] == 0.0:
cleaned_data['longitude'] = gvz_region.longitude
if gvz_region.latitude and cleaned_data['latitude'] == 0.0:
cleaned_data['latitude'] = gvz_region.latitude
return cleaned_data
def clean_slug(self):
return generate_unique_slug(self, 'region')
def duplicate_language_tree(source_region, target_region, source_parent_id=None, target_parent=None):
"""
Function to duplicate the language tree of one region to another.
Usage: duplicate_language_tree(source_region, target_region)
This is a recursive function to walk the whole language tree. It starts at root level with the default parent None.
The recursion is necessary because the new nodes need their correct (also duplicated) parent node.
Args:
:param source_region: The region from which the language tree should be duplicated
:param target_region: The region to which the language tree should be added
:param source_parent_id: The current parent node id of the recursion
:param target_parent: The node of the target region which is the duplicate of the source parent node
"""
# Iterate over all children of the current source parent, beginning with the root node
for node in LanguageTreeNode.objects.filter(region=source_region, parent__id=source_parent_id).all():
# Store the source node id for the next iteration
source_node_id = node.pk
# Change the region and parent to its new values
node.region = target_region
node.parent = target_parent
# Delete the primary key to force an insert
node.pk = None
# Check if the resulting node is valid
node.full_clean()
# Save the duplicated node
node.save()
# Call the function recursively for all children of the current node
duplicate_language_tree(source_region, target_region, source_node_id, node)
def duplicate_pages(source_region, target_region, source_parent_id=None, target_parent=None, level=0):
"""
Function to duplicate all pages of one region to another.
Usage: duplicate_pages(source_region, target_region)
This is a recursive function to walk the whole page tree. It starts at root level with the default parent None.
The recursion is necessary because the new pages need their correct (also duplicated) parent page.
Args:
:param source_region: The region from which the pages should be duplicated
:param target_region: The region to which the pages should be added
:param source_parent_id: The current parent page id of the recursion
:param target_parent: The page of the target region which is the duplicate of the source parent page
:param level: recursion level to get a pretty log output
"""
logger.info(
'%s Source parent %s started (target parent %s)',
'| ' * level + '├' + '─',
source_parent_id,
target_parent
)
# At first, get all pages from the source region with a specific parent page
# As the parent will be None for the initial call, this returns all pages from the root level
for target_page in Page.objects.filter(region=source_region, parent__id=source_parent_id).all():
logger.info(
'%s Source page %s started',
'| ' * (level + 1) + '├' + '─',
target_page
)
# Store the source page id into a buffer (if we store the whole object instance instead of only the id,
# it will also change when we change target_page, because both variables would reference the same object)
source_page_id = target_page.pk
# Set the parent of the new page to the previously created target parent
target_page.parent = target_parent
# Set the region of the new page to the target region
target_page.region = target_region
# Delete the primary key to duplicate the object instance instead of updating it
target_page.pk = None
# Check if the page is valid
target_page.full_clean()
# Save duplicated page
target_page.save()
# Clone all page translations of the source page
for page_translation in PageTranslation.objects.filter(page__id=source_page_id):
# Set the page of the source translation to the new page
page_translation.page = target_page
# Delete the primary key to duplicate the object instance instead of updating it
page_translation.pk = None
# Check if the page translation is valid
page_translation.full_clean()
# Save duplicated page translation
page_translation.save()
logger.info(
'%s Page translation %s finished',
'| ' * (level + 3) + '├' + '─',
page_translation
)
# Recursively call this function with the current pages as new parents
duplicate_pages(source_region, target_region, source_page_id, target_page, level + 2)
logger.info(
'%s Source page %s finished (target page %s)',
'| ' * (level + 1) + '├' + '─',
source_page_id,
target_page
)
logger.info(
'%s Source parent %s finished (target parent %s)',
'| ' * level + '├' + '─',
source_parent_id,
target_parent
)
# pylint: disable=unused-argument
def duplicate_media(source_region, target_region):
pass
# TODO: implement duplication of all media files
| StarcoderdataPython |
84286 | # Copyright 2020 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from typing import Dict, cast
from pants.engine.addresses import Address, Addresses
from pants.engine.console import Console
from pants.engine.goal import Goal, GoalSubsystem, LineOriented
from pants.engine.rules import goal_rule
from pants.engine.selectors import Get
from pants.engine.target import DescriptionField, ProvidesField, Targets
class ListOptions(LineOriented, GoalSubsystem):
"""Lists all targets matching the file or target arguments."""
name = "list-v2"
@classmethod
def register_options(cls, register):
super().register_options(register)
register(
"--provides",
type=bool,
help=(
"List only targets that provide an artifact, displaying the columns specified by "
"--provides-columns."
),
)
register(
"--provides-columns",
default="address,artifact_id",
help=(
"Display these columns when --provides is specified. Available columns are: "
"address, artifact_id, repo_name, repo_url, push_db_basedir"
),
)
register(
"--documented",
type=bool,
help="Print only targets that are documented with a description.",
)
class List(Goal):
subsystem_cls = ListOptions
@goal_rule
async def list_targets(addresses: Addresses, options: ListOptions, console: Console) -> List:
if not addresses.dependencies:
console.print_stderr(f"WARNING: No targets were matched in goal `{options.name}`.")
return List(exit_code=0)
provides_enabled = options.values.provides
documented_enabled = options.values.documented
if provides_enabled and documented_enabled:
raise ValueError(
"Cannot specify both `--list-documented` and `--list-provides` at the same time. "
"Please choose one."
)
if provides_enabled:
targets = await Get[Targets](Addresses, addresses)
addresses_with_provide_artifacts = {
tgt.address: tgt[ProvidesField].value
for tgt in targets
if tgt.get(ProvidesField).value is not None
}
extractor_funcs = {
"address": lambda address, _: address.spec,
"artifact_id": lambda _, artifact: str(artifact),
"repo_name": lambda _, artifact: artifact.repo.name,
"repo_url": lambda _, artifact: artifact.repo.url,
"push_db_basedir": lambda _, artifact: artifact.repo.push_db_basedir,
}
try:
extractors = [
extractor_funcs[col] for col in options.values.provides_columns.split(",")
]
except KeyError:
raise ValueError(
"Invalid columns provided for `--list-provides-columns`: "
f"{options.values.provides_columns}. Valid columns are: "
f"{', '.join(sorted(extractor_funcs.keys()))}."
)
with options.line_oriented(console) as print_stdout:
for address, artifact in addresses_with_provide_artifacts.items():
print_stdout(" ".join(extractor(address, artifact) for extractor in extractors))
return List(exit_code=0)
if documented_enabled:
targets = await Get[Targets](Addresses, addresses)
addresses_with_descriptions = cast(
Dict[Address, str],
{
tgt.address: tgt[DescriptionField].value
for tgt in targets
if tgt.get(DescriptionField).value is not None
},
)
with options.line_oriented(console) as print_stdout:
for address, description in addresses_with_descriptions.items():
formatted_description = "\n ".join(description.strip().split("\n"))
print_stdout(f"{address.spec}\n {formatted_description}")
return List(exit_code=0)
with options.line_oriented(console) as print_stdout:
for address in sorted(addresses):
print_stdout(address)
return List(exit_code=0)
def rules():
return [list_targets]
| StarcoderdataPython |
3268032 | <filename>src/cobra/apps/accessgroup/migrations/0001_initial.py
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.utils.timezone
from django.conf import settings
import cobra.models.fields.gzippeddict
import cobra.models.fields.bounded
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='AccessGroup',
fields=[
('id', cobra.models.fields.bounded.BoundedBigAutoField(serialize=False, primary_key=True)),
('name', models.CharField(max_length=64)),
('type', cobra.models.fields.bounded.BoundedIntegerField(default=50, choices=[(0, 'Owner'), (25, 'Admin'), (50, 'User'), (100, 'System Agent')])),
('managed', models.BooleanField(default=False)),
('data', cobra.models.fields.gzippeddict.GzippedDictField(null=True, blank=True)),
('date_added', models.DateTimeField(default=django.utils.timezone.now)),
('members', models.ManyToManyField(to=settings.AUTH_USER_MODEL)),
],
options={
'abstract': False,
'db_table': 'cobra_accessgroup',
},
bases=(models.Model,),
),
]
| StarcoderdataPython |
9768217 | <gh_stars>0
import sys, os, json, urllib
from collections import OrderedDict
"""
Steamspy & steam web api scraper
By <NAME>
Last Modified: 03/10/2018
Usage:
python scraper.py -f <filename.json>
"""
def get_args():
loc = ""
for index, item in enumerate(sys.argv, start=1):
if len(sys.argv) > index and sys.argv[index] == "-f":
loc = sys.argv[index+1]
print loc
return loc
print "Usage Error: 'python scraper.py -f <filename.json>'"
print "--------------------"
exit()
def get_json_file():
filename = os.path.join(os.getcwd(), get_args())
with open(filename) as data:
apps = json.load(data, object_pairs_hook=OrderedDict)
print "Loaded: ", filename
return apps
print "Error: Failed to load JSON file"
print "--------------------"
exit()
def get_game_ids(apps):
ids = []
print "%6s %1s" % ("appid", "name")
for key, value in apps.items():
print "%6s %1s" % (value["appid"],value["name"])
ids.append(value["appid"])
return ids
def get_steam_api_data(ids):
count = 0
collection = []
directory = os.path.join(os.getcwd(), 'games/')
if not os.path.exists(directory):
os.makedirs(directory)
for current in ids:
count += 1
url ="https://store.steampowered.com/api/appdetails?appids=" + str(current) + "&cc=gb&l=en"
print count, " | ", url
# Steam API limits may occur
response = urllib.urlopen(url)
data = json.loads(response.read())
name = directory + str(current) + ".json"
with open(name, 'w') as outfile:
json.dump(data, outfile, indent=1)
print "All files successfuly retrieved"
# Run Program
print "--------------------"
apps = get_game_ids(get_json_file())
get_steam_api_data(apps)
print "--------------------"
| StarcoderdataPython |
3407617 | <reponame>Xentrics/metaGEM
#!/usr/bin/env python
"""
Based on the checkm results, approves bins according to the leves of contamination and completeness.
Copies approved bins to output directory.
@author: alneberg
"""
from __future__ import print_function
import sys
import os
import argparse
import pandas as pd
from shutil import copyfile
def main(args):
# Read in the checkm table
df = pd.read_table(args.checkm_stats, index_col=0)
# extract the ids for all rows that meet the requirements
filtered_df = df[(df['Completeness'] >= args.min_completeness) & (df['Contamination'] <= args.max_contamination)]
approved_bins = list(filtered_df.index)
# copy the approved bins to the new output directory
for approved_bin_int in approved_bins:
approved_bin = str(approved_bin_int)
bin_source = os.path.join(args.bin_directory, approved_bin)
bin_source += '.' + args.extension
bin_destination = os.path.join(args.output_directory)
bin_destination += '/' + os.path.basename(bin_source)
sys.stderr.write("Copying approved bin {} from {} to {}\n".format(approved_bin, bin_source, bin_destination))
copyfile(bin_source, bin_destination)
sys.stderr.write("\nApproved {} bins\n\n".format(len(approved_bins)))
if __name__ == "__main__":
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument("bin_directory", help=("Input fasta files should be within directory."))
parser.add_argument("checkm_stats", help="Checkm qa stats in tab_table format")
parser.add_argument("output_directory", help="Directory where to put approved bins")
parser.add_argument("--min_completeness", default=85, type=float, help="default=85")
parser.add_argument("--max_contamination", default=5, type=float, help="default=5")
parser.add_argument("--extension", default='fa')
args = parser.parse_args()
main(args)
| StarcoderdataPython |
1702868 | <filename>torch_trainer.py
import os,time
import numpy as np
import torch
import torch.nn as nn
# import torch.optim as optim
from torch.autograd import Variable
from sklearn.metrics import accuracy_score, classification_report, confusion_matrix, cohen_kappa_score, f1_score
from tqdm import tqdm
from tensorboardX import SummaryWriter
from tools.data_loader import XY_dataset_5inOne as myDataset
from sleep_models.Net_ResV1_TwoLoss import Net_Seq_E2E as myNet
def trainer(resume = False, freq = 125):
path = ('./weights', './history')
for p in path:
if os.path.exists(p) is False:
os.mkdir(p)
EPOCH_NUM_MAX = 20
EPOCH_STEP_NUM = 5000
BATCH_NUM = 200
# !!!!!!!!!!!!!!!!!
import multiprocessing
multiprocessing.set_start_method('spawn', True)
# tensorboard
import shutil,random
if not resume:
shutil.rmtree('logs')
writer = SummaryWriter('logs')
# dataloader prepare
trainSet = myDataset('train', frame_len = 30*freq)
validSet = myDataset('valid', frame_len = 30*freq)
testSet = myDataset('test', frame_len = 30*freq)
trainLoader = torch.utils.data.DataLoader(trainSet, batch_size = BATCH_NUM,
shuffle = True, num_workers = 6, drop_last = False)
validLoader = torch.utils.data.DataLoader(validSet, batch_size = BATCH_NUM * 2,
shuffle = False, num_workers = 6, drop_last = False)
testLoader = torch.utils.data.DataLoader(testSet, batch_size = BATCH_NUM * 2,
shuffle = False, num_workers = 6, drop_last = False)
# options
if resume:
loadObj = torch.load('./weights/checkpoint')
model, epoch, optim, scheduler, best_loss_val = loadObj['net'], loadObj['epoch'], loadObj['optim'], loadObj['sched'], loadObj['best_loss_val']
epoch += 1
else:
model = myNet(5).cuda()
best_loss_val, epoch = 9999, 1
optim = torch.optim.Adam(model.parameters(), lr= 2e-3)
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optim,4,2e-5)
# start epoch
print('start epoch')
step = 0
trainIter = iter(trainLoader)
for epoch in range(epoch, EPOCH_NUM_MAX + 1):
tic = time.time()
alpha = 1 - (epoch / EPOCH_NUM_MAX) ** 2
name = ('train', 'valid', 'test')
epoch_loss = {i:0 for i in name}; epoch_acc = {i:0 for i in name}
record_target = {i:torch.LongTensor([]) for i in name}; record_pred = {i:torch.LongTensor([]) for i in name}
torch.cuda.empty_cache()
model.train()
tq = tqdm(range(EPOCH_STEP_NUM), desc= 'Trn', ncols=75, ascii=True)
for i, _ in enumerate(tq):
data, target, loc = next(trainIter)
step += 1
if step == len(trainLoader):
step = 0
trainIter = iter(trainLoader)
inputs = Variable(data.cuda())
inputs.requires_grad = True
targets = Variable(target.cuda())
# forward
x1, x2, loss1, loss2 = model(inputs, targets.view([-1]).long())
pred = alpha * x1 + (1 - alpha) * x2
loss = alpha * loss1 + (1 - alpha) * loss2
# backward
optim.zero_grad()
loss.backward()
optim.step()
# record
pred = torch.argmax(pred,1).cpu()
record_pred['train'] = torch.cat([record_pred['train'], pred])
record_target['train'] = torch.cat([record_target['train'], target])
epoch_loss['train'] += loss.item()
epoch_acc['train'] += accuracy_score(target, pred)
tq.set_postfix({'Loss':'{:.4f}'.format(epoch_loss['train'] / (tq.n+1)),
'Acc:':'{:.4f}'.format(epoch_acc['train'] / (tq.n+1))})
epoch_loss['train'] /= (i+1)
# eval
torch.cuda.empty_cache()
model.eval()
valtestLoader = {'valid':validLoader, 'test':testLoader}
for idx in valtestLoader:
tq = tqdm(valtestLoader[idx], desc = {'valid':'Val','test':'Tst'}[idx], ncols=75, ascii=True)
for i, (data, target, loc) in enumerate(tq):
inputs = Variable(data.cuda())
# inputs.requires_grad = True
targets = Variable(target.cuda())
with torch.no_grad():
x1, x2, loss1, loss2 = model(inputs, targets.view([-1]).long())
alpha = 0.5
pred = alpha * x1 + (1 - alpha) * x2
loss = alpha * loss1 + (1 - alpha) * loss2
#record
pred = torch.argmax(pred,1).cpu()
record_pred[idx] = torch.cat([record_pred[idx], pred])
record_target[idx] = torch.cat([record_target[idx], target])
epoch_loss[idx] += loss.item()
epoch_acc[idx] += accuracy_score(target, pred)
tq.set_postfix({'Loss':'{:.4f}'.format(epoch_loss[idx] / (i+1)),
'Acc:':'{:.4f}'.format(epoch_acc[idx] / (i+1))})
epoch_loss[idx] /= (i+1)
# epoch end
scheduler.step()
for idx in name:
epoch_acc[idx] = accuracy_score(record_target[idx], record_pred[idx])
msg_epoch = 'epoch:{:02d}, time:{:2f}\n'.format(epoch, time.time() - tic)
msg_loss = 'Trn Loss:{:.4f}, acc:{:.2f} Val Loss:{:.4f}, acc:{:.2f} Tst Loss:{:.4f}, acc:{:.2f}\n'.format(
epoch_loss['train'], epoch_acc['train'] * 100,
epoch_loss['valid'], epoch_acc['valid'] * 100,
epoch_loss['test'], epoch_acc['test'] * 100)
msg_test_detail = classification_report(record_target['test'], record_pred['test'], labels=[0,1,2,3,4]) \
+ str(confusion_matrix(record_target['test'], record_pred['test'], labels=[0,1,2,3,4])) \
+ '\nKappa:' \
+ str(cohen_kappa_score(record_target['test'], record_pred['test'], labels=[0,1,2,3,4])) \
+ '\n\n'
print(msg_epoch + msg_loss[:-1] + msg_test_detail)
# save
writer.add_scalars('loss',{'train':epoch_loss['train'] , 'valid':epoch_loss['valid'], 'test':epoch_loss['test']},epoch)
writer.add_scalars('acc',{'train':epoch_acc['train'], 'valid':epoch_acc['valid'], 'test':epoch_acc['test']},epoch)
with open('history/log.txt','a') as f:
f.write(msg_epoch)
f.write(msg_loss)
f.write(msg_test_detail)
if best_loss_val > epoch_loss['valid']:
best_loss_val = epoch_loss['valid']
saveObj = {'net': model, 'epoch':epoch, 'optim':optim , 'sched':scheduler, 'best_loss_val':best_loss_val}
torch.save(saveObj, 'weights/epoch_{:02d}_val_loss={:4f}_acc={:.4f}'.format(epoch, epoch_loss['valid'], epoch_acc['valid']))
torch.save(saveObj, 'weights/checkpoint')
writer.close()
if __name__ == "__main__":
os.environ['CUDA_VISIBLE_DEVICES']= '1'
trainer() | StarcoderdataPython |
1923528 | <filename>utils/typecheck.py
"""
This module contains function to check the type of variables.
"""
def ensure_type(name, var, *types):
"""
Checks if a variable with a name has one of the allowed types.
Arguments
---------
name: variable name
var: python object
*types: allowed types
"""
for ty in types:
if not isinstance(ty, type):
raise ValueError(
"The given value {} in *types is not a type. (found {})".
format(ty, type(ty).__name__))
if isinstance(var, ty):
return
raise TypeError("{} has to be {}. (found {})".format(
name,
' or'.join(map(lambda x: x.__name__, types)),
type(var).__name__,
))
def ensure_type_array(name, array, *types):
"""
Checks if one type holds for all array elements.
Arguments
---------
name: variable name
var: array with python objects
*types: allowed types
"""
for ty in types:
if not isinstance(ty, type):
raise ValueError(
"The given value {} in *types is not a type. (found {})".
format(ty, type(ty).__name__))
errors = []
for idx, var in enumerate(array):
skip = False
for ty in types:
if isinstance(var, ty):
skip = True
break
if skip:
continue
else:
errors.append((idx, type(var)))
if errors:
raise TypeError(
"All elements in {} has to be {}. This does not hold for the elements:\n{}".
format(
name,
' or'.join(map(lambda x: x.__name__, types)),
'\n'.join(
map(lambda e: "\telement with index " + str(e[0]) + " has type " + str(e[1].__name__),
errors)),
))
| StarcoderdataPython |
1604933 | <filename>minibugs/middleware.py
from django.utils.encoding import force_text
from django.conf import settings
from django.template.loader import render_to_string
from django.contrib.auth import get_user
from .models import Ticket, TicketUpdate
import re
_HTML_TYPES = ('text/html', 'application/xhtml+xml')
class MinibugsMiddleware:
def process_response(self, request, response):
u = get_user(request)
if u.is_anonymous() or not u.is_authenticated():
return response
# Check for responses where the toolbar can't be inserted.
content_encoding = response.get('Content-Encoding', '')
content_type = response.get('Content-Type', '').split(';')[0]
if any((getattr(response, 'streaming', False),
'gzip' in content_encoding,
content_type not in _HTML_TYPES)):
return response
# Insert the toolbar in the response.
content = force_text(response.content, encoding=settings.DEFAULT_CHARSET)
insert_before = "</body>"
try: # Python >= 2.7
pattern = re.escape(insert_before)
bits = re.split(pattern, content, flags=re.IGNORECASE)
except TypeError: # Python < 2.7
pattern = '(.+?)(%s|$)' % re.escape(insert_before)
matches = re.findall(pattern, content, flags=re.DOTALL | re.IGNORECASE)
bits = [m[0] for m in matches if m[1] == insert_before]
# When the body ends with a newline, there's two trailing groups.
bits.append(''.join(m[0] for m in matches if m[1] == ''))
if len(bits) > 1:
vn = request.resolver_match.url_name
ts = Ticket.objects.filter(viewname=vn).all()
context = { 'view_name': vn, "tickets": ts }
bits[-2] += render_to_string('minibugs/modalpage.html', context)
response.content = insert_before.join(bits)
if response.get('Content-Length', None):
response['Content-Length'] = len(response.content)
return response | StarcoderdataPython |
4946600 | from megnet.models import MEGNetModel
from megnet.data.graph import GaussianDistance
from megnet.data.crystal import CrystalGraph
from keras.callbacks import ModelCheckpoint
import numpy as np
import pandas as pd
import json
inputs = pd.read_pickle('./band_gap_data.pkl')
boundary = int(len(inputs)*0.75)
epochs = 5
batch_size=56
Xtrain = inputs.iloc[0:boundary]['structure']
ytrain = inputs.iloc[0:boundary]['band_gap']
Xtest = inputs.iloc[boundary:]['structure']
ytest = inputs.iloc[boundary:]['band_gap']
model_form = MEGNetModel.from_file('./fitted_gap_model.hdf5')
for i in range(10):
bg = model.predict_structure(Xtrain[i])
print(bg, ytrain[i])
| StarcoderdataPython |
6443909 | <gh_stars>10-100
from typing import Dict, Tuple
import torch
import torch.nn as nn
from torecsys.layers import BaseLayer
class FieldAwareFactorizationMachineLayer(BaseLayer):
"""
Layer class of Field-aware Factorization Machine (FFM).
Field-aware Factorization Machine is purposed by Yuchin Juan et al, 2016, to calculate element-wise cross feature
interaction per field of sparse fields by using dot product between field-wise feature tensors.
:Reference:
#. `Yuchin Juan et al, 2016. Field-aware Factorization Machines for CTR Prediction
<https://www.csie.ntu.edu.tw/~cjlin/papers/ffm.pdf>`_.
"""
@property
def inputs_size(self) -> Dict[str, Tuple[str, ...]]:
return {
'inputs': ('B', 'N^2', 'E',)
}
@property
def outputs_size(self) -> Dict[str, Tuple[str, ...]]:
return {
'inputs': ('B', 'NC2', 'E',)
}
def __init__(self,
num_fields: int,
dropout_p: float = 0.0):
"""
Initialize FieldAwareFactorizationMachineLayer
Args:
num_fields (int): number of inputs' fields
dropout_p (float, optional): probability of Dropout in FFM. Defaults to 0.0
"""
super().__init__()
self.num_fields = num_fields
self.dropout = nn.Dropout(dropout_p)
def forward(self, field_emb_inputs: torch.Tensor) -> torch.Tensor:
"""
Forward calculation of FieldAwareFactorizationMachineLayer
Args:
field_emb_inputs (T), shape = (B, N * N, E), data_type = torch.float: field aware embedded features tensors
Returns:
T, shape = (B, NC2, E), data_type = torch.float: output of FieldAwareFactorizationMachineLayer
"""
# Name the inputs tensor for alignment
field_emb_inputs.names = ('B', 'N', 'E',)
# initialize list to store tensors temporarily for output
outputs = []
# chunk field_emb_inputs into num_fields parts
# inputs: field_emb_inputs, shape = (B, N * N , E)
# output: field_emb_inputs, shape = (B, Nx = N, Ny = N, E)
field_emb_inputs = field_emb_inputs.unflatten('N', (('Nx', self.num_fields,), ('Ny', self.num_fields,),))
field_emb_inputs.names = None
# calculate dot-product between e_{i, fj} and e_{j, fi}
# inputs: field_emb_inputs, shape = (B, Nx = N, Ny = N, E)
# output: output, shape = (B, N = 1, E)
for i in range(self.num_fields - 1):
for j in range(i + 1, self.num_fields):
fij = field_emb_inputs[:, i, j]
fji = field_emb_inputs[:, j, i]
output = torch.einsum('ij,ij->ij', fij, fji)
output.names = ('B', 'E',)
output = output.unflatten('B', (('B', output.size('B'),), ('N', 1,),))
outputs.append(output)
# concat outputs into a tensor
# inputs: output, shape = (B, N = 1, E)
# output: outputs, shape = (B, NC2, E)
outputs = torch.cat(outputs, dim='N')
# apply dropout
# inputs: outputs, shape = (B, NC2, E)
# output: outputs, shape = (B, NC2, E)
outputs = self.dropout(outputs)
return outputs
| StarcoderdataPython |
4818651 | <reponame>isaac-ped/demikernel
import seaborn as sns
import pandas as pd
import argparse
import matplotlib.pyplot as plt
import os.path
import numpy as np
webserver_root = '/media/memfs/wiki/'
def plot_hist(files_list, trim_flags=False):
files = []
with open(files_list, 'r') as f:
files = np.array(f.read().splitlines())
if len(files) == 0:
print('{} was likely empty?'.format(files_list))
return
sizes = []
for f in files:
try:
if trim_flags:
filepath = os.path.join(webserver_root, f.split(',')[1])
else:
filepath = os.path.join(webserver_root, f)
size = os.path.getsize(filepath)
if size > 0:
sizes.append(size)
except OSError as e:
print('Could not get size for {}'.format(filepath))
'''
hplot = sns.distplot(sizes, bins=1000)
hplot.set(xlabel='Bytes', ylabel='#files')
hplot.xaxis.set_major_locator(plt.MaxNLocator(18))
for item in hplot.get_xticklabels():
item.set_rotation(45)
hplot.figure.suptitle('Size distribution of files served')
filename = os.path.basename(files_list) + '-size-hist.pdf'
hplot.figure.savefig(filename, format='pdf')
'''
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
ax.plot(np.log2(np.sort(sizes)), np.linspace(0, 1, len(sizes), endpoint=False))
filename = os.path.basename(files_list) + '-size-hist.pdf'
fig.savefig(filename, format='pdf')
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('files', nargs='+', help='list of URIs')
parser.add_argument('--trim-flags', action='store_true', dest='trim_flags', default=False,
help='remove request type flag from URI list')
args = parser.parse_args()
for f in args.files:
plot_hist(f, trim_flags=args.trim_flags)
| StarcoderdataPython |
12832022 | # Copyright <NAME> 2021
# Author: <NAME>
"""
Comparison of GP-interpolated X-ray and true structure functions where the GP interpolated
structure functions are computed following the introduction of gaps into lightcurves.
"""
import numpy as np
from matplotlib import pyplot as plt
from simulation_utils import load_sim_data
from structure_function_utils import compute_gp_structure_function
TIMINGS_FILE = '../processed_data/xray_simulations/x_ray_sim_times.pickle'
GAPPED_FILE = 'sim_curves/xray_lightcurves.dat'
GROUND_TRUTH_FILE = 'sim_curves/xray_lightcurves_no_gaps.dat'
resolution = 5.3
nsims = 1000 # number of simulated curves i.e length of gapped_file
kernel = 'Matern' # ['Matern', 'RQ']
f_plot = False
if __name__ == '__main__':
if kernel == 'Matern':
tag = 'Matern_12'
else:
tag = 'Rational Quadratic'
# Load the times for gap points, times for full curves, count rates for gap points and count rates for full curves
# Matrix because second dimension corresponds to nsims.
time, test_times, gapped_count_rates_matrix, ground_truth_count_rates_matrix = load_sim_data(TIMINGS_FILE,
GAPPED_FILE,
GROUND_TRUTH_FILE)
for i in range(0, 15):
# file handle for GP lightcurve
handle = f'SF_xray_samples_{tag} Kernel_iteration_{i}.txt'
gapped_count_rates = np.reshape(gapped_count_rates_matrix[i, :], (-1, 1))
count_rates = np.reshape(ground_truth_count_rates_matrix[i, :], (-1, 1))
gp_count_rates = np.reshape(np.loadtxt(fname=f'SF_samples/xray/{handle}'), (-1, 1))
gapped_tao_plot, gapped_structure_function_vals = compute_gp_structure_function(gapped_count_rates, time, resolution=resolution)
ground_truth_tao_plot, ground_truth_structure_function_vals = compute_gp_structure_function(count_rates, test_times, resolution=resolution)
gp_tao_plot, gp_structure_function_vals = compute_gp_structure_function(gp_count_rates, test_times, resolution=resolution)
np.savetxt(f'saved_sf_values/xray/_gapped_tao_plot_{i}.txt', gapped_tao_plot, fmt='%.15f')
np.savetxt(f'saved_sf_values/xray/gapped_structure_function_vals_{i}.txt', gapped_structure_function_vals, fmt='%.15f')
np.savetxt(f'saved_sf_values/xray/{kernel}_gp_tao_plot_{i}.txt', gp_tao_plot, fmt='%.15f')
np.savetxt(f'saved_sf_values/xray/ground_truth_structure_function_vals_{i}.txt', ground_truth_structure_function_vals, fmt='%.15f')
np.savetxt(f'saved_sf_values/xray/ground_truth_tao_plot_{i}.txt', ground_truth_tao_plot, fmt='%.15f')
np.savetxt(f'saved_sf_values/xray/{kernel}_gp_structure_function_vals_{i}.txt', gp_structure_function_vals, fmt='%.15f')
if f_plot:
fig, ax = plt.subplots(1)
plt.scatter(gapped_tao_plot, gapped_structure_function_vals, s=10, marker='+', label='Gapped')
plt.scatter(ground_truth_tao_plot, ground_truth_structure_function_vals, s=10, marker='+', label='Ground Truth')
plt.xscale('log')
plt.yscale('log')
plt.xlabel(r'$\tau$' + ' (days)')
plt.ylabel('SF')
plt.xlim([10, 700])
plt.title('X-ray Gapped Structure Function')
plt.tight_layout()
plt.legend()
plt.savefig(f'SF_sims_figures/xray/gapped_structure_function_{i}')
plt.close()
fig, ax = plt.subplots(1)
plt.scatter(gp_tao_plot, gp_structure_function_vals, s=10, marker='+', label='GP')
plt.scatter(ground_truth_tao_plot, ground_truth_structure_function_vals, s=10, marker='+', label='Ground Truth')
plt.xscale('log')
plt.yscale('log')
plt.xlabel(r'$\tau$' + ' (days)')
plt.ylabel('SF')
plt.xlim([10, 700])
plt.title(f'X-ray GP {kernel} Structure Function')
plt.tight_layout()
plt.legend()
plt.savefig(f'SF_sims_figures/xray/gp_{kernel}_structure_function_{i}')
plt.close()
| StarcoderdataPython |
6662493 | import collections
class TreeNode:
def __init__(self):
self.key = key
self.value = value
list = [] # <TreeNode> children
self.node = TreeNode
#node
for child in node.children:
print (child.key)
# BFS O(n) visits every node.
def levelorder(root):
""" Breadth-first search"""
q = collections.deque([root])
q.push(root)
while q is not None:
node = q.pop()
print(node)
if node.left is not None:
q.push(node.left)
if node.right is not None:
q.push(node.right)
# n array tree
if node.children is not None:
for child in node.children:
q.push(child)
if __name__ == '__main__':
#root = [3,9,20,None,None,15,7]
root = TreeNode(3)
root.left = TreeNode(9)
root.right = TreeNode(20)
root.right.left = TreeNode(15)
root.right.right = TreeNode(7)
print(root.preorderTraversal(root)) | StarcoderdataPython |
3592503 | import torch
from torch import nn
class Swish(nn.Module):
def __init__(self, num_features):
super().__init__()
self.num_features = num_features
self.scale = nn.Parameter(torch.ones(num_features))
def forward(self, x):
return x * torch.sigmoid(self.scale * x)
def extra_repr(self):
return ('num_features={}'.format(self.num_features))
| StarcoderdataPython |
1656496 | """
This unit test checks SBS server API.
"""
import asyncio
import asynctest
import unittest
import unittest.mock
from unittest.mock import patch
from adsb.sbs.client import Client
from adsb.sbs.server import Server
from adsb.sbs.protocol import logger as prot_logger
from adsb.sbs.message import SBSMessage
TEST_MSG = (
b"MSG,3,1,1,7C79B7,1,2017/03/25,10:41:45.365,2017/03/25,10:41:45.384,,2850,,,-34.84658,138.67962,,,,,,\r\n"
)
class SBSServerTestCase(asynctest.TestCase):
async def setUp(self):
self.server = Server(host="localhost", port=0, loop=self.loop)
await self.server.start()
async def tearDown(self):
await self.server.stop()
async def test_server_send_message(self):
# Check exception is raised when send is called and no peers are
# present.
with self.assertRaises(Exception) as cm:
self.server.send_message(TEST_MSG)
self.assertIn("Server can't send msg, no peers available", str(cm.exception))
async def test_server_send_to_specific_peer(self):
""" Check sending messages to specific peers """
mock_handler = unittest.mock.Mock()
client = Client(
host="localhost", port=self.server.port, on_msg_callback=mock_handler
)
await client.start()
# allow time for server to register connection
await asyncio.sleep(0.01)
self.assertEqual(len(self.server.protocols), 1)
# check msg can be sent to a specific peer
remote_addr = None
for _remote_addr in self.server.protocols:
remote_addr = _remote_addr
self.assertIsInstance(remote_addr, tuple)
self.server.send_message(TEST_MSG, peer=remote_addr)
# check an exception is raised when sending to an invalid peer
# At least one peer must be present to test this case.
with self.assertRaises(Exception) as cm:
self.server.send_message(TEST_MSG, peer="invalid")
self.assertIn("Server can't send msg to non-existant peer", str(cm.exception))
# allow time for msg to propagate to client
await asyncio.sleep(0.01)
self.assertEqual(mock_handler.call_count, 1)
name, args, kwargs = mock_handler.mock_calls[0]
self.assertIsInstance(args[0], SBSMessage)
await client.stop()
# allow time for server to register disconnection
await asyncio.sleep(0.01)
self.assertEqual(len(self.server.protocols), 0)
async def test_server_broadcast(self):
""" Check broadcasting messages to many peers """
# check msg can be broadcast to all peers
# This test requires multiple clients
mock_handler_1 = unittest.mock.Mock()
client1 = Client(
host="localhost", port=self.server.port, on_msg_callback=mock_handler_1
)
await client1.start()
mock_handler_2 = unittest.mock.Mock()
client2 = Client(
host="localhost", port=self.server.port, on_msg_callback=mock_handler_2
)
await client2.start()
# allow time client and server to register connection
await asyncio.sleep(0.01)
self.assertEqual(len(self.server.protocols), 2)
self.server.send_message(TEST_MSG)
# allow time for msg to propogate to client
await asyncio.sleep(0.02)
self.assertEqual(mock_handler_1.call_count, 1)
name, args, kwargs = mock_handler_1.mock_calls[0]
self.assertIsInstance(args[0], SBSMessage)
self.assertEqual(mock_handler_2.call_count, 1)
name, args, kwargs = mock_handler_2.mock_calls[0]
self.assertIsInstance(args[0], SBSMessage)
await client1.stop()
await client2.stop()
# allow time client and server to register disconnection
await asyncio.sleep(0.01)
self.assertEqual(len(self.server.protocols), 0)
async def test_server_receive_message(self):
""" Check unexpected messages received from peers raise a warning """
mock_handler = unittest.mock.Mock()
client = Client(
host="localhost", port=self.server.port, on_msg_callback=mock_handler
)
await client.start()
# allow time client and server to register connection
await asyncio.sleep(0.01)
self.assertEqual(len(self.server.protocols), 1)
with patch.object(prot_logger, "warning") as mock_warn:
client.protocol.transport.write(b"123")
# allow time for msg to propagate from client to server
await asyncio.sleep(0.01)
self.assertEqual(mock_warn.call_count, 1)
# confirm warning was emitted as expected
name, args, kwargs = mock_warn.mock_calls[0]
self.assertIn("Received unexpected data from client", args[0])
await client.stop()
# allow time client and server to register disconnection
await asyncio.sleep(0.01)
self.assertEqual(len(self.server.protocols), 0)
| StarcoderdataPython |
9664556 | from allauth.account.models import EmailAddress
from allauth.socialaccount.providers.base import ProviderAccount
from allauth.socialaccount.providers.oauth2.provider import OAuth2Provider
class Scope(object):
PROFILE = 'profile'
class DitSSOAccount(ProviderAccount):
def get_profile_url(self):
return self.account.extra_data.get('link')
class DitSSOProvider(OAuth2Provider):
id = 'ditsso'
name = 'DitSSO'
account_class = DitSSOAccount
def get_default_scope(self):
return [Scope.PROFILE]
def extract_uid(self, response):
uid = response['id']
return uid
def extract_email_addresses(self, response):
email = response.get('email', None)
return [EmailAddress(email=email, verified=True)]
def extract_common_fields(self, data):
common_data = {}
first_name = data.get('first_naname')
if first_name:
common_data['first_name'] = first_name
last_name = data.get('last_name')
if last_name:
common_data['last_name'] = last_name
email = data.get('email')
if email:
common_data['email'] = email
username = data.get('username', '_'.join([x for x in [first_name, last_name] if x]))
if not username and email:
username = email.split('@').pop(0)
if username:
common_data['username'] = username
return common_data
provider_classes = [DitSSOProvider]
| StarcoderdataPython |
3277834 | <filename>gui_utils/training.py
import os
import torch
import torch.nn as nn
import torch.optim as optim
import shutil
import threading
from torch.utils.data import DataLoader
from PyQt5 import QtWidgets
import matplotlib.pyplot as plt
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt5agg import NavigationToolbar2QT as NavigationToolbar
from gui_utils.auxilary_utils import GetFolderWidget
from training_utils.dataset import ROIDataset
from training_utils.training import train_model, CombinedLoss, accuracy, iou
from models.rcnn import RecurrentCNN
from models.cnn_classifier import Classifier
from models.cnn_segmentator import Segmentator
class TrainingParameterWindow(QtWidgets.QDialog):
"""
Training Parameter Window, where one should choose parameters for training
Parameters
----------
mode : str
A one of two 'all in one' of 'sequential'
parent : MainWindow(QtWidgets.QMainWindow)
-
Attributes
----------
mode : str
A one of two 'all in one' of 'sequential'
parent : MainWindow(QtWidgets.QMainWindow)
-
train_folder_getter : GetFolderWidget
A getter for a path to train data
val_folder_getter : GetFolderWidget
A getter for a path to validation data
"""
def __init__(self, mode, parent=None):
self.mode = mode
self.parent = parent
super().__init__(parent)
self.setWindowTitle('peakonly: models')
train_folder_label = QtWidgets.QLabel()
train_folder_label.setText('Choose a folder with train data:')
self.train_folder_getter = GetFolderWidget(os.path.join(os.getcwd(), 'data', 'train'), self)
val_folder_label = QtWidgets.QLabel()
val_folder_label.setText('Choose a folder with validation data:')
self.val_folder_getter = GetFolderWidget(os.path.join(os.getcwd(), 'data', 'val'), self)
continue_button = QtWidgets.QPushButton('Continue')
continue_button.clicked.connect(self._continue)
main_layout = QtWidgets.QVBoxLayout()
main_layout.addWidget(train_folder_label)
main_layout.addWidget(self.train_folder_getter)
main_layout.addWidget(val_folder_label)
main_layout.addWidget(self.val_folder_getter)
main_layout.addWidget(continue_button)
self.setLayout(main_layout)
def _continue(self):
try:
train_folder = self.train_folder_getter.get_folder()
val_folder = self.val_folder_getter.get_folder()
main_window = TrainingMainWindow(self.mode, train_folder, val_folder, self.parent)
main_window.show()
self.close()
except ValueError:
# popup window with exception
msg = QtWidgets.QMessageBox(self)
msg.setText("Check parameters. Something is wrong!")
msg.setIcon(QtWidgets.QMessageBox.Warning)
msg.exec_()
class TrainingMainWindow(QtWidgets.QDialog):
"""
Training Main Window, where training process occurs
Parameters
----------
mode : str
A one of two 'all in one' of 'sequential'
train_folder : str
A path to the folder with training data
val_folder : str
A path to the folder with validation data
parent : MainWindow(QtWidgets.QMainWindow)
-
Attributes
----------
mode : str
A one of two 'all in one' of 'sequential'
parent : MainWindow(QtWidgets.QMainWindow)
-
"""
def __init__(self, mode, train_folder, val_folder, parent):
self.mode = mode
self.parent = parent
super().__init__(parent)
main_layout = QtWidgets.QVBoxLayout()
# to do: device should be adjustable parameter
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
if self.mode == 'all in one':
# create data loaders
train_dataset = ROIDataset(path=train_folder, device=device, balanced=True)
train_loader = DataLoader(train_dataset, batch_size=1, shuffle=True)
val_dataset = ROIDataset(path=val_folder, device=device, balanced=False)
val_loader = DataLoader(val_dataset, batch_size=1, shuffle=False)
# create model
model = RecurrentCNN().to(device)
optimizer = optim.Adam(params=model.parameters(), lr=1e-3)
scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=15, eta_min=1e-6)
label_criterion = nn.CrossEntropyLoss()
integration_criterion = CombinedLoss([0.4, 0.2])
intersection_criterion = CombinedLoss([0.1, 2])
# add training widget
main_layout.addWidget(TrainingMainWidget(train_loader, val_loader, model, optimizer, accuracy, iou,
scheduler, label_criterion, integration_criterion,
intersection_criterion, 64, self))
elif self.mode == 'sequential':
# create data loaders
batch_size = 64
train_dataset = ROIDataset(path=train_folder, device=device, interpolate=True, length=256, balanced=True)
train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
val_dataset = ROIDataset(path=val_folder, device=device, interpolate=True, length=256, balanced=False)
val_loader = DataLoader(val_dataset, batch_size=batch_size, shuffle=False)
# classifier
classifier = Classifier().to(device)
optimizer = optim.Adam(params=classifier.parameters(), lr=1e-3)
scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=20, eta_min=1e-6)
label_criterion = nn.CrossEntropyLoss()
main_layout.addWidget(TrainingMainWidget(train_loader, val_loader, classifier, optimizer, accuracy, None,
scheduler, label_criterion, None, None, 1, self))
# segmentator
segmentator = Segmentator().to(device)
optimizer = optim.Adam(params=segmentator.parameters(), lr=1e-2)
scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=15, eta_min=1e-6)
integration_criterion = CombinedLoss([0.4, 0.2])
intersection_criterion = CombinedLoss([0.1, 2])
main_layout.addWidget(TrainingMainWidget(train_loader, val_loader, segmentator, optimizer, None, iou,
scheduler, None, integration_criterion,
intersection_criterion, 1, self))
self.setLayout(main_layout)
class TrainingMainWidget(QtWidgets.QWidget):
"""
Training Main Widget, where training process of one model occurs
Parameters
----------
train_loader : DataLoader
-
val_loader : DataLoader
-
model : nn.Module
model to train
parent : QDialog
-
Attributes
----------
parent : MainWindow(QtWidgets.QMainWindow)
-
"""
def __init__(self, train_loader, val_loader, model, optimizer, classification_metric, segmenatation_metric,
scheduler, label_criterion, integration_criterion, intersection_criterion, accumulation, parent):
self.parent = parent
super().__init__(parent)
self.setWindowTitle('peakonly: training')
self.train_loader = train_loader
self.val_loader = val_loader
self.model = model
self.optimizer = optimizer
self.classification_metric = classification_metric
self.segmentation_metric = segmenatation_metric
self.scheduler = scheduler
self.label_criterion = label_criterion
self.integration_criterion = integration_criterion
self.intersection_criterion = intersection_criterion
self.accumulation = accumulation
self._init_ui()
def _init_ui(self):
# canvas layout (with 3 subplots)
self.figure = plt.figure()
self.loss_ax = self.figure.add_subplot(131)
self.loss_ax.set_title('Loss function')
self.classification_score_ax = self.figure.add_subplot(132)
self.classification_score_ax.set_title('Classification score')
self.segmentation_score_ax = self.figure.add_subplot(133)
self.segmentation_score_ax.set_title('Segmentation score')
self.canvas = FigureCanvas(self.figure)
toolbar = NavigationToolbar(self.canvas, self)
canvas_layout = QtWidgets.QVBoxLayout()
canvas_layout.addWidget(toolbar)
canvas_layout.addWidget(self.canvas)
self.figure.tight_layout()
# training parameters layout
parameters_layout = QtWidgets.QVBoxLayout()
empty_label = QtWidgets.QLabel()
number_of_epochs_label = QtWidgets.QLabel()
number_of_epochs_label.setText('Number of epochs:')
self.number_of_epochs_getter = QtWidgets.QLineEdit(self)
self.number_of_epochs_getter.setText('100')
learning_rate_label = QtWidgets.QLabel()
learning_rate_label.setText('Learning rate:')
self.learning_rate_getter = QtWidgets.QLineEdit(self)
self.learning_rate_getter.setText('1e-3')
parameters_layout.addWidget(empty_label, 80)
parameters_layout.addWidget(number_of_epochs_label, 5)
parameters_layout.addWidget(self.number_of_epochs_getter, 5)
parameters_layout.addWidget(learning_rate_label, 5)
parameters_layout.addWidget(self.learning_rate_getter, 5)
# buttons layout
buttons_layout = QtWidgets.QHBoxLayout()
restart_button = QtWidgets.QPushButton('Restart')
restart_button.clicked.connect(self.restart)
buttons_layout.addWidget(restart_button)
save_weights_button = QtWidgets.QPushButton('Save weights')
save_weights_button.clicked.connect(self.save_weights)
buttons_layout.addWidget(save_weights_button)
run_training_button = QtWidgets.QPushButton('Run training')
run_training_button.clicked.connect(self.run_training)
buttons_layout.addWidget(run_training_button)
# main layouts
upper_layout = QtWidgets.QHBoxLayout()
upper_layout.addLayout(canvas_layout, 85)
upper_layout.addLayout(parameters_layout, 15)
main_layout = QtWidgets.QVBoxLayout()
main_layout.addLayout(upper_layout)
main_layout.addLayout(buttons_layout)
self.setLayout(main_layout)
def restart(self):
# to do: change restart (problem with optimizer, etc.)
self.loss_ax.clear()
self.loss_ax.set_title('Loss function')
self.classification_score_ax.clear()
self.classification_score_ax.set_title('Classification score')
self.segmentation_score_ax.clear()
self.classification_score_ax.set_title('Segmentation score')
self.figure.tight_layout()
self.canvas.draw()
self.model = self.model.__class__()
def save_weights(self):
subwindow = SaveModelWindow(self.model, self)
subwindow.show()
def run_training(self):
try:
number_of_epoch = int(self.number_of_epochs_getter.text())
learning_rate = float(self.learning_rate_getter.text())
for param_group in self.optimizer.param_groups:
param_group['lr'] = learning_rate
thread = threading.Thread(target=train_model, args=(self.model, self.train_loader, self.val_loader,
self.optimizer, number_of_epoch, 10,
self.classification_metric, self.segmentation_metric,
self.scheduler, self.label_criterion,
self.integration_criterion, self.intersection_criterion,
self.accumulation, self.loss_ax,
self.classification_score_ax,
self.segmentation_score_ax,
self.figure, self.canvas))
thread.start()
except ValueError:
# popup window with exception
msg = QtWidgets.QMessageBox(self)
msg.setText("Check parameters. Something is wrong!")
msg.setIcon(QtWidgets.QMessageBox.Warning)
msg.exec_()
class SaveModelWindow(QtWidgets.QDialog):
def __init__(self, model, parent):
self.parent = parent
super().__init__(parent)
self.model = model
folder_label = QtWidgets.QLabel()
folder_label.setText('Choose a folder where to save:')
self.folder_getter = GetFolderWidget(os.path.join(os.getcwd(), 'data', 'weights'), self)
name_label = QtWidgets.QLabel()
name_label.setText('Set a name of file: ')
self.name_getter = QtWidgets.QLineEdit(self)
self.name_getter.setText('model.pt')
save_button = QtWidgets.QPushButton('Save')
save_button.clicked.connect(self.save)
main_layout = QtWidgets.QVBoxLayout()
main_layout.addWidget(folder_label)
main_layout.addWidget(self.folder_getter)
main_layout.addWidget(name_label)
main_layout.addWidget(self.name_getter)
main_layout.addWidget(save_button)
self.setLayout(main_layout)
def save(self):
folder = self.folder_getter.get_folder()
name = self.name_getter.text()
shutil.copyfile(os.path.join('data/tmp_weights', self.model.__class__.__name__),
os.path.join(folder, name))
| StarcoderdataPython |
4889763 | #
# Copyright IBM Corp. All Rights Reserved.
#
# SPDX-License-Identifier: Apache-2.0
#
import os
import sys
import datetime
from pykafka import KafkaClient
import endorser_util
def getOrdererList(context):
# Get the Orderers list from the orderer container name
orderers = list()
for container in context.composition.containerDataList:
if 'orderer' in container.containerName:
orderers.append(container.containerName)
return orderers
def getKafkaBrokerList(context, orderer):
# Get the kafka broker list from the orderer environment var
kafkaBrokers = ""
for container in context.composition.containerDataList:
if orderer in container.containerName:
kafkaBrokers = container.getEnv('CONFIGTX_ORDERER_KAFKA_BROKERS')
break
# Be sure that kafka broker list returned is not an empty string
assert kafkaBrokers != "", "There are no kafka brokers set in the orderer environment"
brokers = kafkaBrokers[1:-1].split(',')
return brokers
def getKafkaIPs(context, kafkaList):
kafkas = []
for kafka in kafkaList:
containerName = kafka.split(':')[0]
container = context.composition.getContainerFromName(containerName, context.composition.containerDataList)
kafkas.append("{0}:9092".format(container.ipAddress))
return kafkas
def getKafkaTopic(kafkaBrokers=["0.0.0.0:9092"], channel=endorser_util.SYS_CHANNEL_ID):
kafkas = ",".join(kafkaBrokers)
client = KafkaClient(hosts=kafkas)
if client.topics == {} and channel is None:
topic = client.topics[endorser_util.TEST_CHANNEL_ID]
elif client.topics == {} and channel is not None:
topic = client.topics[channel]
elif channel is not None and channel in client.topics:
topic = client.topics[channel]
elif channel is None and client.topics != {}:
topic_list = client.topics.keys()
topic = client.topics[topic_list[0]]
# Print brokers in ISR
print("ISR: {}".format(["kafka{}".format(broker.id) for broker in topic.partitions[0].isr]))
isr_set = ["kafka{}".format(broker.id) for broker in topic.partitions[0].isr]
return topic, isr_set
def getKafkaPartitionLeader(kafkaBrokers=["0.0.0.0:9092"], channel=endorser_util.SYS_CHANNEL_ID):
topic, isr_set = getKafkaTopic(kafkaBrokers, channel)
leader = "kafka{0}".format(topic.partitions[0].leader.id)
print("current leader: {}".format(leader))
return leader
def getNonISRKafkaBroker(kafkaBrokers=["0.0.0.0:9092"], channel=endorser_util.SYS_CHANNEL_ID):
topic, isr_set = getKafkaTopic(kafkaBrokers, channel)
kafka = None
for kafkaNum in range(len(kafkaBrokers)):
if str(kafkaNum) not in topic.partitions[0].isr:
kafka = "kafka{0}".format(kafkaNum)
return kafka
def generateMessageEnvelope():
channel_header = common_pb2.ChannelHeader(channel_id=endorser_util.TEST_CHANNEL_ID,
type=common_pb2.ENDORSER_TRANSACTION)
header = common_pb2.Header(channel_header=channel_header.SerializeToString(),
signature_header=common_pb2.SignatureHeader().SerializeToString())
payload = common_pb2.Payload(header=header,
data=str.encode("Functional test: {0}".format(datetime.datetime.utcnow())) )
envelope = common_pb2.Envelope(payload=payload.SerializeToString())
return envelope
| StarcoderdataPython |
9763780 | #!/usr/bin/env python
__all__ = ['tradfriStatus', 'tradfriActions']
| StarcoderdataPython |
3203262 | # Copyright 2018 German Aerospace Center (DLR)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
Created on 08.04.2016
@author: meinel
'''
from F2x.parser import tree
class VarDecl(tree.VarDecl):
"""
A variable declaration.
The following properties are available:
- name: The symbolic name of the variable.
- type: The C type of this variable. This might be a basic type (REAL, INTEGER, LOGICAL) or TYPE(C) for any
other type like arrays, derived types or strings.
- pytype, cstype: The type to be used by Python or C# respectively.
- intent: May be 'IN', 'OUT' or 'INOUT'.
- getter: This indicates whether the generated getter should be a 'function' or 'subroutine'.
- setter (opt): This indicates whether a 'subroutine' should be generated as setter.
- ftype (opt): The name of the derived type.
- strlen (opt): The length of the string.
- kind (opt): The kind specifier if available.
- dynamic (opt): Indicates whether the variable is 'ALLOCATABLE' or a 'POINTER'.
- dims (opt): For an array contains a list with the sizes per dimension.
"""
_PYTYPES = {
"REAL": "ctypes.c_double",
"INTEGER": "ctypes.c_int",
"LOGICAL": "ctypes.c_bool",
"TYPE(C_PTR)": "ctypes.c_void_p",
}
_CSTYPES = {
"REAL": "Double",
"INTEGER": "Int32",
"LOGICAL": "Int32",
"TYPE(C_PTR)": "IntPtr",
}
def _init_children(self):
self["name"] = self._ast.select1("name").tail[0]
# Identify FORTRAN type and store properties accordingly
full_spec = self._ast.parent().parent()
type_spec = full_spec.select1("declaration_type_spec")
try:
self["ftype"] = type_spec.select1("derived_type_spec name").tail[0]
self["type"] = "TYPE(C_PTR)"
self["getter"] = "function"
self["dynamic"] = False
except ValueError:
try:
self["strlen"] = int(type_spec.select1("char_selector int_literal_constant").tail[0])
self["intent"] = "IN"
self["type"] = "TYPE(C_PTR)"
self["pytype"] = "ctypes.c_char_p"
self["cstype"] = "String"
self["getter"] = "subroutine"
self["setter"] = "subroutine"
except ValueError:
try:
self["strlen"] = type_spec.select1("char_selector /(\*|:)/")
self["intent"] = "IN"
self["type"] = "TYPE(C_PTR)"
self["pytype"] = "ctypes.c_char_p"
self["cstype"] = "String"
self["getter"] = "subroutine"
self["setter"] = "subroutine"
except ValueError:
self["type"] = type_spec.select1("intrinsic_type_kind").tail[0]
self["getter"] = "function"
self["setter"] = "subroutine"
for attr in full_spec.select(self._prefix + "attr_spec"):
if 'ALLOCATABLE' in attr.tail:
self["dynamic"] = 'ALLOCATABLE'
elif 'POINTER' in attr.tail:
self["dynamic"] = 'POINTER'
# Identify array dimensions
for ast in (self._ast, full_spec):
dim_nodes = ast.select(self._prefix + "array_spec array_spec_element")
if not dim_nodes:
continue
dims = []
for node in dim_nodes:
dim = node.select("int_literal_constant")
if dim:
dims.append(dim[0].tail[0])
continue
dim = node.select("part_ref")
if dim:
dims.append(dim[0].tail[0])
break
dims.append(0)
if dims:
self["dims"] = dims
if "dims" in self \
and "strlen" not in self:
if "setter" in self:
del self["setter"]
if "pytype" not in self \
and self["type"].upper() in self._PYTYPES:
self["pytype"] = self._PYTYPES[self["type"].upper()]
if "cstype" not in self \
and self["type"].upper() in self._CSTYPES:
self["cstype"] = self._CSTYPES[self["type"].upper()]
try:
kind_selector = type_spec.select1("kind_selector int_literal_constant")
self["kind"] = int(kind_selector.tail[0])
except ValueError:
try:
kind_selector = type_spec.select1("kind_selector part_ref")
self["kind"] = kind_selector.tail[0]
except ValueError:
pass
try:
intent_spec = type_spec.parent().select1("intent_spec")
self["intent"] = intent_spec.tail[0]
except ValueError:
self["intent"] = 'IN'
# No setter for PARAMETERs
if "setter" in self \
and len(full_spec.select("attr_spec /PARAMETER/")) > 0:
del self["setter"]
def with_intent(self, intent):
self["intent"] = intent
return self
class TypeDef(tree.TypeDef):
def _init_children(self):
self["name"] = self._ast.select1("derived_type_stmt name").tail[0]
try:
self["public"] = (self._ast.select1("access_spec").tail[0].upper() == 'PUBLIC')
except ValueError:
self["public"] = False
self["fields"] = [
VarDecl(decl, 'component_') # See documentation of VarDecl.__init__
for decl in self._ast.select("component_decl")
]
for field in self["fields"]:
del field["intent"]
class SubDef(tree.SubDef):
_PREFIX = "subroutine"
def _init_children(self):
self["name"] = self._ast.select(self._PREFIX + "_stmt name")[0].tail[0]
# Two-stage argument extraction:
# First, identify all variables declared and the dummy argument list.
dummy_args = [arg.tail[0] for arg in self._ast.select("dummy_arg name")]
var_specs = dict(
(argdecl.select1("name").tail[0], VarDecl(argdecl))
for argdecl in self._ast.select("entity_decl")
)
# Fill up self["args"] based on dummy argument list order.
self["args"] = [var_specs[argname] for argname in dummy_args]
return var_specs # to be re-used in child classes.
class FuncDef(SubDef):
_PREFIX = "function"
def _init_children(self):
var_specs = super(FuncDef, self)._init_children()
# Capture return type of function for return value.
res_name = self._ast.select("result_name name")
if res_name:
self["ret"] = var_specs[res_name[0].tail[0]]
else:
try:
self["ret"] = var_specs[self["name"] + "_VALUE"]
except KeyError:
self["ret"] = var_specs[self["name"]]
if "dims" in self["ret"]:
self["ret"]["getter"] = "subroutine"
self["ret"]["intent"] = "OUT"
class Module(tree.Module):
def _init_children(self):
self["name"] = self._ast.select1("module_stmt name").tail[0]
self["uses"] = [use.tail[0] for use in self._ast.select("use_stmt name")]
self["types"] = [
TypeDef(typedef)
for typedef in self._ast.select("derived_type_def")
]
self["globals"] = [
VarDecl(var)
for var in self._ast.select("module > specification_part type_declaration_stmt entity_decl")
if len(var.parent().parent().select("access_spec /PUBLIC/")) > 0
]
# def export_methods(self, config):
def export_methods(self, src):
config = src.config
if config.has_section("export"):
export_items = [key for key, _ in config.items("export")]
else:
export_items = None
methods = []
for funcdef in self._ast.select("function_subprogram") :
if export_items is None or funcdef.select("function_stmt name")[0].tail[0].lower() in export_items:
method = FuncDef(funcdef)
method["export_name"] = config.get("export", method["name"].lower(), fallback=f'{self["name"]}_{method["name"]}')
if "ret" in method:
if "dims" in method["ret"]:
l_line = [line for line in src.source_lines if method["ret"]["name"] in line and "ALLOCATE" in line]
if len(l_line) == 1:
#ok, it is a dynamic array, find the size variable of the array
l_aux_line = l_line[0][l_line[0].find(method["ret"]["name"]):-2]
l_size_var = l_aux_line[len(method["ret"]["name"])+1:-1].split(',')
method["ret"]["dims"] = l_size_var
if method["ret"]["getter"] == "subroutine":
if method["ret"]["name"] == method["name"]:
method["ret"]["name"] = method["export_name"].upper() + '_OUT'
method["ret"]["intent"] = "OUT"
else:
method["ret"]["name"] = method["export_name"].upper() + '_RESULT'
del method["ret"]["intent"]
methods.append(method)
for subdef in self._ast.select("subroutine_subprogram") :
if export_items is None or subdef.select("subroutine_stmt name")[0].tail[0].lower() in export_items:
method = SubDef(subdef)
method["export_name"] = config.get("export", method["name"].lower(), fallback=f'{self["name"]}_{method["name"]}')
l_array_args = [ l_arg for l_arg in method["args"] if "dims" in l_arg ]
if len(l_array_args) > 0:
#okay, we have arguments of array type
sub_start, sub_end = self._get_subroutine(method["name"], src.source_lines)
for arg in l_array_args:
self._set_array_size(arg, src.source_lines[sub_start: sub_end])
if "ret" in method:
method["ret"]["name"] = method["export_name"].upper() + '_OUT'
method["ret"]["intent"] = "OUT"
methods.append(method)
self["methods"] = methods
for method in methods:
section_key = "{0}:Cleanup".format(method["name"])
if config.has_section(section_key):
if "ret" in method: print("FREE", section_key, method["ret"]["name"])
if "ret" in method and config.has_option(section_key, method["ret"]["name"]):
method["ret"]["free"] = config.get(section_key, method["ret"]["name"])
for var in method["args"]:
if config.has_option(section_key, var["name"]):
var["free"] = config.get(section_key, var["name"])
def _set_array_size(self, a_argument, a_src):
l_arg = a_argument["name"]
l_arg_len = len(l_arg)
l_key_len = 8 # keyword "ALLOCATE"
for index, line in enumerate(a_src) :
# to do: skip the comments
l_line = line[line.find("::")+2 : ].strip()
# this is the declaration line
if l_line.startswith(l_arg+'(') :
l_declare = l_line.split('!')
l_array_var = l_declare[0].strip()
l_size_var = l_array_var[l_arg_len+1:-1].split(',')
if l_size_var[0] == ':':
# check if the array is dynamically allocated within the function/subroutine body
for line in a_src[index:] :
line = line.strip()
if line.startswith("ALLOCATE") :
# skip comment
l_alloc = line.split('!')[0].strip()
l_line = l_alloc[l_key_len:].strip()[1:-1]
l_alloc_list = l_line.split('),')
# check if more than one variables are allocated
if len(l_alloc_list) > 1 :
for l_alloc in l_alloc_list :
l_alloc = l_alloc.strip()
if l_alloc.startswith(l_arg + '(') :
l_aux_line = ''
if l_alloc.endswith(')') :
l_aux_line = l_alloc[l_arg_len+1:-1].strip()
else :
l_aux_line = l_alloc[l_arg_len+1:].strip()
l_size_var = l_aux_line.split(',')
a_argument["dims"] = l_size_var
break
else :
l_alloc = l_alloc_list[0].strip()
if l_alloc.startswith(l_arg + '(') :
l_aux_line = l_alloc[l_arg_len+1:-1].strip()
l_size_var = l_aux_line.split(',')
a_argument["dims"] = l_size_var
else :
# okay, no size variable is found. It could be "IN" or "INOUT" type,
if len(l_declare) == 2 :
l_comment = l_declare[1].strip()
l_f2x_markup='@F2x=>'
if l_comment.startswith(l_f2x_markup) :
l_vars = l_comment.split(l_f2x_markup+l_arg)[1]
l_size_var = l_vars[1:-1].split(',')
a_argument["dims"] = l_size_var
else :
# Attention: no information is provided, code is not reliable !!
# But at leaset make sure the dimension is correctly set
n = len(l_size_var)
a_argument["dims"] = [ 0 if x == ':' else x for x in l_size_var ]
else :
# Same problem as above !!
n = len(l_size_var)
a_argument["dims"] = [ 0 if x == ':' else x for x in l_size_var ]
else :
# size variables are set explicitly
a_argument["dims"] = l_size_var
break
def _get_subroutine(self,a_argument, a_src):
startIndex = 0
stopIndex =0
for i in range(len(a_src)):
l_str = a_src[i].strip()
if l_str.startswith("SUBROUTINE") and a_argument in l_str :
startIndex = i
for j, line in enumerate(a_src[i:]):
line = line.strip()
if line.startswith("END SUBROUTINE") :
stopIndex = i + j
break
break
else:
# should not happend
pass
return (startIndex, stopIndex)
| StarcoderdataPython |
6459658 | <reponame>ajgates42/netrd
from .threshold import *
__all__ = []
| StarcoderdataPython |
6621257 | #!/usr/bin/env python
import rospy #importar ros para python
from sensor_msgs.msg import Image
import cv2 as cv
from cv_bridge import CvBridge
from std_msgs.msg import String, Int32 # importar mensajes de ROS tipo String y tipo Int32
from geometry_msgs.msg import Twist # importar mensajes de ROS tipo geometry / Twist
class Template(object):
def __init__(self, args):
self.contador=0
super(Template, self).__init__()
self.args = args
self.subscriber = rospy.Subscriber("/duckiebot/camera_node/image/rect",Image,self.callback)
self.bridge = CvBridge()
def callback(self,msg):
image = self.bridge.imgmsg_to_cv2(msg,"bgr8")
filename = str(rospy.get_time()) + ".jpg"
if (self.contador%20==0):
cv.imwrite("/home/duckiebot/patos/"+filename,image)
self.contador+=1
#def publicar(self):
#def callback(self,msg):
def main():
rospy.init_node('test') #creacion y registro del nodo!
obj = Template('args') # Crea un objeto del tipo Template, cuya definicion se encuentra arriba
#objeto.publicar() #llama al metodo publicar del objeto obj de tipo Template
rospy.spin() #funcion de ROS que evita que el programa termine - se debe usar en Subscribers
if __name__ =='__main__':
main()
| StarcoderdataPython |
3218745 | <reponame>bbhunter/takeover-1
#!/usr/bin/env python3
# takeover - subdomain takeover finder
# coded by M'hamed (@m4ll0k) Outaadi
import os
import json
import requests
import urllib.parse
import concurrent.futures as thread
import urllib3
import getopt
import sys
import re
r = '\033[1;31m'
g = '\033[1;32m'
y = '\033[1;33m'
b = '\033[1;34m'
r_ = '\033[0;31m'
g_ = '\033[0;32m'
y_ = '\033[0;33m'
b_ = '\033[0;34m'
e = '\033[0m'
global _output
_output = []
global k_
k_ = {
'domain': None,
'threads': 1,
'd_list': None,
'proxy': None,
'output': None,
'timeout': None,
'process': False,
'user_agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.36 Safari/537.36',
'verbose': False,
'dict_len': 0
}
# index/lenght * 100
def PERCENT(x, y): return float(x)/float(y) * 100
services = {
'AWS/S3': {'error': r'The specified bucket does not exist'},
'BitBucket': {'error': r'Repository not found'},
'Github': {'error': r'There isn\\\'t a Github Pages site here\.'},
'Shopify': {'error': r'Sorry\, this shop is currently unavailable\.'},
'Fastly': {'error': r'Fastly error\: unknown domain\:'},
'Ghost': {'error': r'The thing you were looking for is no longer here\, or never was'},
'Heroku': {'error': r'no-such-app.html|<title>no such app</title>|herokucdn.com/error-pages/no-such-app.html'},
'Pantheon': {'error': r'The gods are wise, but do not know of the site which you seek.'},
'Tumbler': {'error': r'Whatever you were looking for doesn\\\'t currently exist at this address.'},
'Wordpress': {'error': r'Do you want to register'},
'TeamWork': {'error': r'Oops - We didn\'t find your site.'},
'Helpjuice': {'error': r'We could not find what you\'re looking for.'},
'Helpscout': {'error': r'No settings were found for this company:'},
'Cargo': {'error': r'<title>404 — File not found</title>'},
'Uservoice': {'error': r'This UserVoice subdomain is currently available!'},
'Surge': {'error': r'project not found'},
'Intercom': {'error': r'This page is reserved for artistic dogs\.|Uh oh\. That page doesn\'t exist</h1>'},
'Webflow': {'error': r'<p class=\"description\">The page you are looking for doesn\'t exist or has been moved.</p>'},
'Kajabi': {'error': r'<h1>The page you were looking for doesn\'t exist.</h1>'},
'Thinkific': {'error': r'You may have mistyped the address or the page may have moved.'},
'Tave': {'error': r'<h1>Error 404: Page Not Found</h1>'},
'Wishpond': {'error': r'<h1>https://www.wishpond.com/404?campaign=true'},
'Aftership': {'error': r'Oops.</h2><p class=\"text-muted text-tight\">The page you\'re looking for doesn\'t exist.'},
'Aha': {'error': r'There is no portal here \.\.\. sending you back to Aha!'},
'Tictail': {'error': r'to target URL: <a href=\"https://tictail.com|Start selling on Tictail.'},
'Brightcove': {'error': r'<p class=\"bc-gallery-error-code\">Error Code: 404</p>'},
'Bigcartel': {'error': r'<h1>Oops! We couldn’t find that page.</h1>'},
'ActiveCampaign': {'error': r'alt=\"LIGHTTPD - fly light.\"'},
'Campaignmonitor': {'error': r'Double check the URL or <a href=\"mailto:<EMAIL>'},
'Acquia': {'error': r'The site you are looking for could not be found.|If you are an Acquia Cloud customer and expect to see your site at this address'},
'Proposify': {'error': r'If you need immediate assistance, please contact <a href=\"mailto:<EMAIL>'},
'Simplebooklet': {'error': r'We can\'t find this <a href=\"https://simplebooklet.com'},
'GetResponse': {'error': r'With GetResponse Landing Pages, lead generation has never been easier'},
'Vend': {'error': r'Looks like you\'ve traveled too far into cyberspace.'},
'Jetbrains': {'error': r'is not a registered InCloud YouTrack.'},
'Smartling': {'error': r'Domain is not configured'},
'Pingdom': {'error': r'pingdom'},
'Tilda': {'error': r'Domain has been assigned'},
'Surveygizmo': {'error': r'data-html-name'},
'Mashery': {'error': r'Unrecognized domain <strong>'},
'Divio': {'error': r'Application not responding'},
'feedpress': {'error': r'The feed has not been found.'},
'readme': {'error': r'Project doesnt exist... yet!'},
'statuspage': {'error': r'You are being <a href=\'https>'},
'zendesk': {'error': r'Help Center Closed'},
'worksites.net': {'error': r'Hello! Sorry, but the webs>'}
}
def plus(string):
print('{0}[ + ]{1} {2}'.format(g, e, string))
def warn(string, exit=not 1):
print('{0}[ ! ]{1} {2}'.format(r, e, string))
if exit:
sys.exit()
def info(string):
print('{0}[ i ]{1} {2}'.format(y, e, string))
def _info():
return '{0}[ i ]{1} '.format(y, e)
def err(string):
print(r' |= [REGEX]: {0}{1}{2}'.format(y_, string, e))
def request(domain, proxy, timeout, user_agent):
url = checkurl(domain)
timeout = timeout
proxies = {
'http': proxy,
'https': proxy
}
redirect = True
headers = {
'User-Agent': user_agent
}
try:
req = requests.packages.urllib3.disable_warnings(
urllib3.exceptions.InsecureRequestWarning
)
req = requests.get(
url=url,
headers=headers,
verify=False,
allow_redirects=redirect,
timeout=int(timeout) if timeout != None else None,
proxies=proxies
)
return req.status_code, req.content
except Exception as err:
if k_.get('d_list'):
print("")
warn('Failed to establish a new connection for: %s' % (domain), 1)
else:
warn('Failed to establish a new connection for: %s' % (domain), 1)
def find(status, content, ok):
for service in services:
for values in services[service].items():
if re.findall(str(values[1]), str(content), re.I) and int(status) in range(201 if ok is False else 200, 599):
return str(service), str(values[1])
def banner():
print("\n /~\\")
print(" C oo ---------------")
print(" _( ^) |T|A|K|E|O|V|E|R|")
print("/ ~\\ ----------------")
print("#> by M'hamed (@m4ll0k) Outaadi")
print("#> http://github.com/m4ll0k")
print("-"*40)
def help(_exit_=False):
banner()
print("Usage: %s [OPTION]\n" % sys.argv[0])
print("\t-d\tSet domain URL (e.g: www.test.com)")
print("\t-t\tSet threads, default 1")
print("\t-l\tScan multiple targets in a text file")
print("\t-p\tUse a proxy to connect the target URL")
print("\t-o\tUse this settings for save a file, args=json or text")
print("\t-T\tSet a request timeout,default value is 20 seconds")
print("\t-k\tProcess 200 http code, cause more false positive")
print("\t-u\tSet custom user agent (e.g: takeover-bot)")
print("\t-v\tVerbose, print more info\n")
if _exit_:
sys.exit()
def checkpath(path):
if os.path.exists(path):
return path
elif os.path.isdir(path):
warn('"%s" is directory!', 1)
elif os.path.exists(path) is False:
warn('"%s" not exists!' % path, 1)
else:
warn('Error in: "%s"' % path, 1)
def readfile(path):
info('Read wordlist.. "%s"' % path)
return [x.strip() for x in open(checkpath(path), 'r')]
def checkurl(url):
o = urllib.parse.urlsplit(url)
if o.scheme not in ['http', 'https', '']:
warn('Scheme "%s" not supported!' % o.scheme, 1)
if o.netloc == '':
return 'http://' + o.path
elif o.netloc:
return o.scheme + '://' + o.netloc
else:
return 'http://' + o.netloc
def print_(string):
sys.stdout.write('\033[1K')
sys.stdout.write('\033[0G')
sys.stdout.write(string)
sys.stdout.flush()
def runner(k):
threadpool = thread.ThreadPoolExecutor(max_workers=k.get('threads'))
if k.get('verbose'):
info('Set %s threads..' % k.get('threads'))
futures = (threadpool.submit(requester, domain, k.get("proxy"), k.get("timeout"), k.get("user_agent"),
k.get("output"), k.get('process'), k.get('verbose')) for domain in k.get("domains"))
for i, results in enumerate(thread.as_completed(futures)):
if k.get('verbose') and k.get('d_list'):
str_ = "{i}{b:.2f}% Domain: {d}".format(
i=_info(),
b=PERCENT(int(i),
int(k.get('dict_len'))), d=k.get('domains')[i]
)
print_(str_)
else:
info('Domain: {}'.format(k.get('domains')[i]))
pass
def requester(domain, proxy, timeout, user_agent, output, ok, v):
code, html = request(domain, proxy, timeout, user_agent)
service, error = find(code, html, ok)
if service and error:
if output:
_output.append((domain, service, error))
if v and not k_.get('d_list'):
plus('%s service found! Potential domain takeover found! - %s' %
(service, domain))
elif v and k_.get('d_list'):
print("")
plus('%s service found! Potential domain takeover found! - %s' %
(service, domain))
else:
if k_.get('d_list'):
print("")
plus('%s service found! Potential domain takeover found! - %s' %
(service, domain))
elif not k_.get('d_list'):
plus('%s service found! Potential domain takeover found! - %s' %
(service, domain))
if v:
err(error)
def savejson(path, content, v):
if v and not k_.get('d_list'):
info('Writing file..')
elif v and k_.get('d_list'):
print("")
info("Writing file..")
a = {}
b = {"domains": {}}
for i in content:
a.update({i[0]: {'service': i[1], 'error': i[2]}})
b['domains'] = a
with open(path, 'w+') as outjsonfile:
json.dump(b, outjsonfile, indent=4)
outjsonfile.close()
info('Saved at '+path+'..')
def savetxt(path, content, v):
if v and not k_.get('d_list'):
info('Writing file..')
elif v and k_.get('d_list'):
print("")
info("Writing file..")
br = '-'*40
bf = '='*40
out = ''+br+'\n'
for i in content:
out += 'Domain\t: %s\n' % i[0]
out += 'Service\t: %s\n' % i[1]
out += 'Error\t: %s\n' % i[2]
out += ''+bf+'\n'
out += ''+br+'\n'
with open(path, 'w+') as outtxtfile:
outtxtfile.write(out)
outtxtfile.close()
info('Saved at '+path+'..')
def main():
# --
if len(sys.argv) < 2:
help(1)
try:
opts, args = getopt.getopt(sys.argv[1:],
'd:l:p:o:t:T::u:kv',
['d=', 'l=', 'p=', 'v', 'o=', 't=', 'T=', 'u=', 'k'])
except Exception as e:
warn(e, 1)
for o, a in opts:
if o == '-d':
k_['domain'] = a
if o == '-t':
k_['threads'] = int(a)
if o == '-l':
k_['d_list'] = a
if o == '-p':
k_['proxy'] = a
if o == '-o':
k_['output'] = a
if o == '-T':
k_['timeout'] = int(a)
if o == '-k':
k_['process'] = True
if o == '-u':
k_['user_agent'] = a
if o == '-v':
k_['verbose'] = True
if k_.get("domain") or k_.get("d_list"):
banner()
domains = []
if k_.get('verbose'):
info('Starting..')
if k_.get("d_list"):
domains.extend(readfile(k_.get("d_list")))
else:
domains.append(k_.get("domain"))
k_['domains'] = domains
k_['dict_len'] = len(domains)
runner(k_)
if k_.get("output"):
if '.txt' in k_.get('output'):
savetxt(k_.get('output'), _output, k_.get('verbose'))
elif '.json' in k_.get('output'):
savejson(k_.get('output'), _output, k_.get('verbose'))
else:
warn('Output Error: %s extension not supported, only .txt or .json' % k_.get(
'output').split('.')[1], 1)
elif k_.get('domain') is None and k_.get('d_list') is None:
help(1)
if __name__ == '__main__':
try:
main()
except (KeyboardInterrupt) as e:
sys.exit(0)
| StarcoderdataPython |
181608 | # -*- coding: utf-8 -*-
u"""
Created on 2017-1-25
@author: cheng.li
"""
import unittest
import copy
import pickle
import tempfile
import os
import numpy as np
import pandas as pd
from PyFin.Analysis.SeriesValues import SeriesValues
class TestSecurityValues(unittest.TestCase):
def testSecurityValuesInit(self):
data = np.array([1, 2, 3])
index = ['c', 'b', 'a']
test = SeriesValues(data, dict(zip(index, range(len(index)))))
expected = dict(zip(index, data))
for name in test.index():
self.assertEqual(test[name], expected[name])
def testSecurityValuesRank(self):
data = np.array([3, 2, np.nan, np.nan, 4, 5])
index = [1, 2, 3, 4, 5, 6]
data = SeriesValues(data, index)
test = data.rank()
expected = SeriesValues(np.array([2, 1, np.nan, np.nan, 3, 4]), dict(zip(index, range(len(index)))))
for name in test.index():
if np.isnan(test[name]):
self.assertTrue(np.isnan(expected[name]))
else:
self.assertEqual(test[name], expected[name])
def testSecurityValuesRankWithGroup(self):
data = np.random.randn(3000)
groups = np.random.randint(0, 30, 3000)
index = list(range(3000))
data = SeriesValues(data, index)
groups = SeriesValues(groups, index)
test = data.rank(groups)
pd_series = pd.Series(data.values)
expected = pd_series.groupby(groups.values).rank()
np.testing.assert_array_almost_equal(test.values, expected.values)
def testSecurityValuesUnit(self):
data = np.array([3, -2, np.nan, np.nan, 4, 5])
index = [1, 2, 3, 4, 5, 6]
test = SeriesValues(data, index)
test = test.unit()
expected = SeriesValues(data / np.nansum(np.abs(data)), dict(zip(index, range(len(index)))))
for name in test.index():
if np.isnan(test[name]):
self.assertTrue(np.isnan(expected[name]))
else:
self.assertEqual(test[name], expected[name])
def testSecurityValuesDeepCopy(self):
data = np.array([3, 2, 2., 1., 4., 5.])
index = [1, 2, 3, 4, 5, 6]
test = SeriesValues(data, index)
copied = copy.deepcopy(test)
np.testing.assert_array_equal(test.values, copied.values)
self.assertEqual(test.name_mapping, copied.name_mapping)
def testSecurityValuesAdd(self):
data1 = np.array([3, 2, 2., 1., 4., 5.])
data2 = -np.array([3, 2, 2., 1., 4., 5.])
index = [1, 2, 3, 4, 5, 6]
test1 = SeriesValues(data1, index)
test2 = SeriesValues(data2, index)
calculated = test1 + test2
expected = SeriesValues(data1 + data2, index)
np.testing.assert_array_equal(calculated.values, expected.values)
self.assertEqual(calculated.name_mapping, expected.name_mapping)
calculated = test1 + 2.0
expected = SeriesValues(data1 + 2.0, index)
np.testing.assert_array_equal(calculated.values, expected.values)
self.assertEqual(calculated.name_mapping, expected.name_mapping)
calculated = 2.0 + test2
expected = SeriesValues(2.0 + data2, index)
np.testing.assert_array_equal(calculated.values, expected.values)
self.assertEqual(calculated.name_mapping, expected.name_mapping)
def testSecurityValuesSub(self):
data1 = np.array([3, 2, 2., 1., 4., 5.])
data2 = -np.array([3, 2, 2., 1., 4., 5.])
index = [1, 2, 3, 4, 5, 6]
test1 = SeriesValues(data1, index)
test2 = SeriesValues(data2, index)
calculated = test1 - test2
expected = SeriesValues(data1 - data2, index)
np.testing.assert_array_equal(calculated.values, expected.values)
self.assertEqual(calculated.name_mapping, expected.name_mapping)
calculated = test1 - 2.0
expected = SeriesValues(data1 - 2.0, index)
np.testing.assert_array_equal(calculated.values, expected.values)
self.assertEqual(calculated.name_mapping, expected.name_mapping)
calculated = 2.0 - test2
expected = SeriesValues(2.0 - data2, index)
np.testing.assert_array_equal(calculated.values, expected.values)
self.assertEqual(calculated.name_mapping, expected.name_mapping)
def testSecurityValuesMul(self):
data1 = np.array([3, 2, 2., 1., 4., 5.])
data2 = -np.array([3, 2, 2., 1., 4., 5.])
index = [1, 2, 3, 4, 5, 6]
test1 = SeriesValues(data1, index)
test2 = SeriesValues(data2, index)
calculated = test1 * test2
expected = SeriesValues(data1 * data2, index)
np.testing.assert_array_equal(calculated.values, expected.values)
self.assertEqual(calculated.name_mapping, expected.name_mapping)
calculated = test1 * 2.0
expected = SeriesValues(data1 * 2.0, index)
np.testing.assert_array_equal(calculated.values, expected.values)
self.assertEqual(calculated.name_mapping, expected.name_mapping)
calculated = 2.0 * test2
expected = SeriesValues(2.0 * data2, index)
np.testing.assert_array_equal(calculated.values, expected.values)
self.assertEqual(calculated.name_mapping, expected.name_mapping)
def testSecurityValuesXor(self):
data1 = np.array([3, 2, 2., 1., 4., 5.])
data2 = -np.array([3, 2, 2., 1., 4., 5.])
index = [1, 2, 3, 4, 5, 6]
test1 = SeriesValues(data1, index)
test2 = SeriesValues(data2, index)
calculated = test1 ^ test2
expected = SeriesValues(np.array([data1, data2]).T, index=index)
np.testing.assert_array_equal(calculated.values, expected.values)
self.assertEqual(calculated.name_mapping, expected.name_mapping)
for name in index:
np.testing.assert_array_almost_equal(calculated[name], expected[name])
def testSecurityValuesDiv(self):
data1 = np.array([3, 2, 2., 1., 4., 5.])
data2 = -np.array([3, 2, 2., 1., 4., 5.])
index = [1, 2, 3, 4, 5, 6]
test1 = SeriesValues(data1, index)
test2 = SeriesValues(data2, index)
calculated = test1 / test2
expected = SeriesValues(data1 / data2, index)
np.testing.assert_array_equal(calculated.values, expected.values)
self.assertEqual(calculated.name_mapping, expected.name_mapping)
calculated = test1 / 2.0
expected = SeriesValues(data1 / 2.0, index)
np.testing.assert_array_equal(calculated.values, expected.values)
self.assertEqual(calculated.name_mapping, expected.name_mapping)
calculated = 2.0 / test2
expected = SeriesValues(2.0 / data2, index)
np.testing.assert_array_equal(calculated.values, expected.values)
self.assertEqual(calculated.name_mapping, expected.name_mapping)
def testSecurityValuesRes(self):
data1 = np.array([3, 2, 2., 1., 4., 5.])
data2 = -np.array([3, 2, 2., 1., 4., 5.])
index = [1, 2, 3, 4, 5, 6]
test1 = SeriesValues(data1, index)
test2 = SeriesValues(data2, index)
calculated = test1.res(test2)
expected = SeriesValues(np.zeros(len(data1)), index)
np.testing.assert_array_almost_equal(calculated.values, expected.values)
self.assertEqual(calculated.name_mapping, expected.name_mapping)
data1 = np.random.randn(100)
data1 = data1 - data1.mean()
data2 = np.random.randn(100)
data2 = data2 - data2.mean()
index = list(range(1, 101))
test1 = SeriesValues(data1, index)
test2 = SeriesValues(data2, index)
calculated = test1.res(test2)
expected = SeriesValues(data1 - np.dot(data2, data1) / np.dot(data2, data2) * data2, index)
np.testing.assert_array_almost_equal(calculated.values, expected.values)
self.assertEqual(calculated.name_mapping, expected.name_mapping)
def testSecurityValuesPickle(self):
data = np.array([3, 2, np.nan, np.nan, 4, 5])
index = [1, 2, 3, 4, 5, 6]
test = SeriesValues(data, index)
f = tempfile.NamedTemporaryFile('w+b', delete=False)
pickle.dump(test, f)
f.close()
with open(f.name, 'rb') as f2:
pickled = pickle.load(f2)
np.testing.assert_array_equal(test.values, pickled.values)
self.assertEqual(test.name_mapping, pickled.name_mapping)
os.unlink(f.name)
| StarcoderdataPython |
12805547 | <filename>tests/filter_integration_tests/test_filters_with_mongo_storage.py
from chatterbot.storage import MongoDatabaseAdapter
from tests.base_case import ChatBotMongoTestCase
class RepetitiveResponseFilterTestCase(ChatBotMongoTestCase):
"""
Test case for the RepetitiveResponseFilter class.
"""
def test_filter_selection(self):
"""
Test that repetitive responses are filtered out of the results.
"""
from chatterbot.filters import RepetitiveResponseFilter
from chatterbot.trainers import ListTrainer
self.chatbot.filters = (RepetitiveResponseFilter(), )
self.chatbot.set_trainer(ListTrainer)
self.chatbot.train([
'Hello',
'Hi',
'Hello',
'Hi',
'Hello',
'Hi, how are you?',
'I am good.'
])
first_response = self.chatbot.get_response('Hello')
second_response = self.chatbot.get_response('Hello')
self.assertEqual(first_response.text, 'Hi')
self.assertEqual(second_response.text, 'Hi, how are you?')
| StarcoderdataPython |
1830857 | <reponame>chagaz/sfan<filename>code/synthetic_data_experiments__parallel-fold.py
import synthetic_data_experiments as sde
import argparse
import logging
if __name__ == "__main__":
# TODO : use sde.get_integrous_arg_values ???
help_str = "Validation experiments on synthetic data"
parser = argparse.ArgumentParser(description=help_str,add_help=True)
parser.add_argument("-k", "--num_tasks", help="Number of tasks", type=int)
parser.add_argument("-m", "--num_features", help="Number of features",
type=int)
parser.add_argument("-n", "--num_samples", help="Number of samples",
type=int)
parser.add_argument("-r", "--num_repeats", help="Number of repeats",
type=int)
parser.add_argument("-f", "--num_folds", help="Number of CV folds",
type=int)
parser.add_argument("-s", "--num_subsamples", help="Number of subsamples",
type=int)
parser.add_argument("data_dir", help="Simulated data directory")
parser.add_argument("resu_dir", help="Results directory")
parser.add_argument("simu_id", help="Simulation name")
parser.add_argument("hyperparam_fname_np", help="File holding hyperparam for sfan and msfan np")
# arg that differ with sde.
parser.add_argument("hyperparam_fname", help="File holding hyperparam for msfan")
# arg that differ with sde.
parser.add_argument("repeat_idx", help="Index of the current repeat",
type=int) # arg that differ with sde.
parser.add_argument("fold_idx", help="Index of the current fold",
type=int) # arg that differ with sde.
parser.add_argument("-v", "--verbose", help="Turn on detailed info log",
action='store_true')
args = parser.parse_args()
args.fold_idx = args.fold_idx -1
if args.verbose:
logging.basicConfig(format="[%(levelname)s] %(message)s",
level=logging.DEBUG)
logging.info("Verbose output.")
resu_dir = "%s/repeat_%d" % (args.resu_dir, args.repeat_idx)
data_dir = '%s/repeat_%d' % (args.data_dir, args.repeat_idx)
genotype_fname = '%s/%s.genotypes.txt' % (data_dir, args.simu_id)
network_fname = '%s/%s.network.dimacs' % (data_dir, args.simu_id)
precision_fname = '%s/%s.task_similarities.txt' % (data_dir,
args.simu_id)
causal_fname = '%s/%s.causal_features.txt' % (data_dir, args.simu_id)
phenotype_fnames = ['%s/%s.phenotype_%d.txt' % \
(data_dir, args.simu_id, task_idx) \
for task_idx in range(args.num_tasks)]
scores_fnames = ['%s/%s.scores_%d.txt' % \
(data_dir, args.simu_id, task_idx) \
for task_idx in range(args.num_tasks)]
#with open(args.hyperparam_fname) as f:
#lbd_eta_mu_values = f.readlines()
#lbd_eta_values = [" ".join(plist.split()[:-2]) \
# for plist in lbd_eta_mu_values]
lbd_eta_values = []
lbd_eta_mu_values_np = []
lbd_eta_mu_values = []
with open(args.hyperparam_fname_np) as f:
for line in f :
lbd_eta_mu_values_np.append(line)
lbd_eta_values.append(" ".join(line.split()[:-2]) )
with open(args.hyperparam_fname) as f:
for line in f :
lbd_eta_mu_values.append(line)
# indices for this fold :
# TODO : factorisation of fname template...
trIndices_fname = resu_dir+'/'+args.simu_id+'.fold%d.trIndices'
teIndices_fname = resu_dir+'/'+args.simu_id+'.fold%d.teIndices'
ssIndices_fname = resu_dir+'/'+args.simu_id+'.fold%d.ss%d.ssIndices'
indices = {'trIndices': list(), 'teIndices':list(), 'ssIndices':list()}
with open(trIndices_fname %(args.fold_idx), 'r') as trIndices_f :
line = trIndices_f.readline().split()
indices["trIndices"] = [int (i) for i in line ]
with open(teIndices_fname %(args.fold_idx),'r') as teIndices_f :
line = teIndices_f.readline().split()
indices["teIndices"] = [int (i) for i in line ]
for ss_idx in xrange(args.num_subsamples) :
with open(ssIndices_fname %(args.fold_idx,ss_idx), 'r') as ssIndices_f:
line = ssIndices_f.readline().split()
indices["ssIndices"].append( [int (i) for i in line ] )
tmp_weights_fnames = sde.fetch_tmp_weights_fnames(resu_dir, args.simu_id, args.fold_idx)
sde.run_fold(
args.fold_idx,
args,
lbd_eta_values, lbd_eta_mu_values_np, lbd_eta_mu_values,
indices,
genotype_fname, network_fname , tmp_weights_fnames, precision_fname , causal_fname, phenotype_fnames, scores_fnames,
resu_dir)
| StarcoderdataPython |
11205437 | <reponame>jeantardelli/wargameRepo<filename>wargame/designpatterns/pythonic_dwarfironjacket.py<gh_stars>1-10
"""pythonic_dwarfironjacket
This module represents a dwarf iron jacket object.
"""
class DwarfIronJacket:
"""Represents a piece of armor for the attack of the orcs game"""
pass
| StarcoderdataPython |
12866452 | <reponame>Chang-Liu-TAMU/Python-Cookbook-reading
# @Time: 2022/4/12 20:50
# @Author: <NAME>
# @Email: <EMAIL>
# @File:4.4.Implementing_the_iterator_protocol.py
################ clean version #########################
# class Node:
# def __init__(self, val):
# self._value = val
# self._children = []
#
# def __repr__(self):
# return "Node({!r})".format(self._value)
#
# def add_child(self, node):
# self._children.append(node)
#
# def __iter__(self):
# return iter(self._children)
#
# def depth_first(self):
# yield self
# for c in self:
# yield from c.depth_first()
############# some messy version ####################
class Node:
def __init__(self, value):
self._value = value
self._children = []
def __repr__(self):
return "Node({!r})".format(self._value)
def add_child(self, node):
self._children.append(node)
def __iter__(self):
return iter(self._children)
# def iter(self):
# return iter(self._children)
def depth_first(self):
return DepthFirstIterator(self)
# def __iter__(self):
# return DepthFirstIterator(self)
class DepthFirstIterator:
'''
DFS traversal
'''
def __init__(self, start_node):
self._node = start_node
self._children_iter = None
self._child_iter = None
def __iter__(self):
return self
def __next__(self):
if self._children_iter == None:
self._children_iter = iter(self._node)
# self._children_iter = self._node.iter()
return self._node
elif self._child_iter:
try:
following = next(self._child_iter)
return following
except StopIteration:
self._child_iter = None
return next(self)
else:
self._child_iter = next(self._children_iter).depth_first()
return next(self)
# return next(self._child_iter)
root = Node(0)
left = Node(1)
right = Node(2)
left.add_child(Node(3))
left.add_child(Node(4))
right.add_child(Node(5))
right.add_child(Node(6))
root.add_child(left)
root.add_child(right)
for i in root.depth_first():
print(i)
# for i in root:
# print(i) | StarcoderdataPython |
312046 | <gh_stars>0
import tensorflow as tf
import os
import numpy as np
from box_utils import compute_target
from image_utils import random_patching, horizontal_flip
os.environ['CUDA_VISIBLE_DEVICES'] = ''
# def _extract_fn(tfrecord):
def extract_fn(augmentation, default_boxes, tfrecord):
image_feature_description = {
'image/height': tf.io.FixedLenFeature([], tf.int64),
'image/width': tf.io.FixedLenFeature([], tf.int64),
'image/filename': tf.io.FixedLenFeature([], tf.string),
'image/source_id': tf.io.FixedLenFeature([], tf.string),
'image/encoded': tf.io.FixedLenFeature([], tf.string),
'image/format': tf.io.FixedLenFeature([], tf.string),
'image/object/bbox/xmin': tf.io.FixedLenFeature([], tf.float32),
'image/object/bbox/xmax': tf.io.FixedLenFeature([], tf.float32),
'image/object/bbox/ymin': tf.io.FixedLenFeature([], tf.float32),
'image/object/bbox/ymax': tf.io.FixedLenFeature([], tf.float32),
'image/object/class/text': tf.io.FixedLenFeature([], tf.string),
'image/object/class/label': tf.io.FixedLenFeature([], tf.int64),
}
# Extract the data record
sample = tf.io.parse_single_example(tfrecord, image_feature_description)
filename = sample['image/filename']
img = tf.io.decode_jpeg(sample['image/encoded'])
height = sample['image/height']
width = sample['image/width']
xmin = sample['image/object/bbox/xmin']
xmax = sample['image/object/bbox/xmax']
ymin = sample['image/object/bbox/ymin']
ymax = sample['image/object/bbox/ymax']
boxes = [[xmin, ymin, xmax, ymax]]
labels = [1]
# boxes = tf.constant(boxes, dtype=tf.float32)
labels = tf.constant(labels, dtype=tf.int64)
# augmentation_method = np.random.choice(augmentation)
# if augmentation_method == 'patch':
# img, boxes, labels = random_patching(img, boxes, labels)
# elif augmentation_method == 'flip':
# img, boxes, labels = horizontal_flip(img, boxes, labels)
img = tf.cast(img, tf.float32)
img = (img / 127.0) - 1.0
gt_confs, gt_locs = compute_target(
default_boxes, boxes, labels)
return filename, img, gt_confs, gt_locs
| StarcoderdataPython |
47724 | MAX = 4294967295
blacklist = []
with open("inputs/day20.txt") as f:
for line in f:
line = line.strip().split('-')
blacklist.append([int(x) for x in line])
blacklist.sort()
def part1():
ip = 0
for i in range(0, len(blacklist)):
bl = blacklist[i]
if ip < bl[0]:
break
if bl[1] > ip:
ip = bl[1]+1
print "(part1):", ip
def part2():
ip = 0
good_ips = 0
for i in range(0, len(blacklist)):
bl = blacklist[i]
if ip < bl[0]:
good_ips += bl[0]-ip
ip = bl[1]+1
elif bl[1] > ip:
ip = bl[1]+1
print "(part2):", good_ips
part1()
part2()
| StarcoderdataPython |
9704937 | # this file is here to make the external plugins of this repo available from the pcbnew menu.
# to make these plugins available in your kicad, you'll need to have then be available here:
# ~/ubuntu/.kicad_plugins/
#in other worked ~/ubuntu/.kicad_plugins/kicad_mmccooo
# for these particular plugins, you'll need dxfgrabber, numpy, scipy, shapely.
# note that kicad is still on python 2.7.
# sudo python2.7 -m ensurepip --default-pip
# or
# sudo apt install python-pip
# sudo pip2 install --upgrade pip
# sudo pip2 install dxfgrabber
# sudo pip2 install numpy
# sudo pip2 install scipy
# sudo pip2 install shapely
import pcbnew
print("initializing mmccoo_kicad")
import gen_border
import dxf_stuff
import place_by_sch
import instantiate_footprint
import toggle_visibility
# I don't think it's possible to control ratsnets for individual nets.
# It used to be possible, but not since the new connectivity algorithm.
# import ratnest
import utils
import svg2border
print("done adding mmccoo_kicad")
| StarcoderdataPython |
6223 | <reponame>marshuang80/napari
import numpy as np
class Mesh:
"""Contains meshses of shapes that will ultimately get rendered.
Attributes
----------
vertices : np.ndarray
Qx2 array of vertices of all triangles for shapes including edges and
faces
vertices_centers : np.ndarray
Qx2 array of centers of vertices of triangles for shapes. For vertices
corresponding to faces these are the same as the actual vertices. For
vertices corresponding to edges these values should be added to a
scaled `vertices_offsets` to get the actual vertex positions.
The scaling corresponds to the width of the edge
vertices_offsets : np.ndarray
Qx2 array of offsets of vertices of triangles for shapes. For vertices
corresponding to faces these are 0. For vertices corresponding to
edges these values should be scaled and added to the
`vertices_centers` to get the actual vertex positions.
The scaling corresponds to the width of the edge
vertices_index : np.ndarray
Qx2 array of the index (0, ..., N-1) of each shape that each vertex
corresponds and the mesh type (0, 1) for face or edge.
triangles : np.ndarray
Px3 array of vertex indices that form the mesh triangles
triangles_index : np.ndarray
Px2 array of the index (0, ..., N-1) of each shape that each triangle
corresponds and the mesh type (0, 1) for face or edge.
triangles_colors : np.ndarray
Px4 array of the rgba color of each triangle
triangles_z_order : np.ndarray
Length P array of the z order of each triangle. Must be a permutation
of (0, ..., P-1)
Extended Summary
----------
_types : list
Length two list of the different mesh types corresponding to faces and
edges
"""
_types = ['face', 'edge']
def __init__(self):
self.clear()
def clear(self):
"""Resets mesh data
"""
self.vertices = np.empty((0, 2))
self.vertices_centers = np.empty((0, 2))
self.vertices_offsets = np.empty((0, 2))
self.vertices_index = np.empty((0, 2), dtype=int)
self.triangles = np.empty((0, 3), dtype=np.uint32)
self.triangles_index = np.empty((0, 2), dtype=int)
self.triangles_colors = np.empty((0, 4))
self.triangles_z_order = np.empty((0), dtype=int)
| StarcoderdataPython |
1846607 | <gh_stars>0
from architecture.trainer import Trainer
trainer = Trainer()
trainer.train(500)
| StarcoderdataPython |
9628748 | <gh_stars>0
# Copyright 2014 Netflix, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
.. module: security_monkey.watchers.iam_user
:platform: Unix
.. version:: $$VERSION$$
.. moduleauthor:: <NAME> <<EMAIL>> @monkeysecurity
"""
from security_monkey.watcher import Watcher
from security_monkey.watcher import ChangeItem
from security_monkey.exceptions import InvalidAWSJSON
from security_monkey.exceptions import BotoConnectionIssue
from security_monkey.constants import IGNORE_PREFIX
from security_monkey import app
import json
import urllib
class IAMUser(Watcher):
index = 'iamuser'
i_am_singular = 'IAM User'
i_am_plural = 'IAM Users'
def __init__(self, accounts=None, debug=False):
super(IAMUser, self).__init__(accounts=accounts, debug=debug)
def slurp(self):
"""
:returns: item_list - list of IAM Groups.
:returns: exception_map - A dict where the keys are a tuple containing the
location of the exception and the value is the actual exception
"""
item_list = []
exception_map = {}
from security_monkey.common.sts_connect import connect
for account in self.accounts:
try:
iam = connect(account, 'iam')
users_response = iam.get_all_users()
except Exception as e:
exc = BotoConnectionIssue(str(e), 'iamgroup', account, None)
self.slurp_exception((self.index, account, 'universal'), exc, exception_map)
continue
for user in users_response.users:
### Check if this User is on the Ignore List ###
ignore_item = False
for ignore_item_name in IGNORE_PREFIX[self.index]:
if user.user_name.lower().startswith(ignore_item_name.lower()):
ignore_item = True
break
if ignore_item:
continue
item_config = {
'user': {},
'userpolicies': {},
'accesskeys': {},
'mfadevices': {},
'signingcerts': {}
}
app.logger.debug("Slurping %s (%s) from %s" % (self.i_am_singular, user.user_name, account))
item_config['user'] = dict(user)
for policy_name in iam.get_all_user_policies(user.user_name).policy_names:
policy = urllib.unquote(iam.get_user_policy(user.user_name, policy_name).policy_document)
try:
policydict = json.loads(policy)
except:
exc = InvalidAWSJSON(policy)
self.slurp_exception((self.index, account, 'universal', user.user_name), exc, exception_map)
item_config['userpolicies'][policy_name] = dict(policydict)
for key in iam.get_all_access_keys(user_name=user.user_name).access_key_metadata:
item_config['accesskeys'][key.access_key_id] = dict(key)
for mfa in iam.get_all_mfa_devices(user_name=user.user_name).mfa_devices:
item_config['mfadevices'][mfa.serial_number] = dict(mfa)
login_profile = 'undefined'
try:
login_profile = iam.get_login_profiles(user.user_name).login_profile
item_config['loginprofile'] = dict(login_profile)
except:
pass
for cert in iam.get_all_signing_certs(user_name=user.user_name).certificates:
_cert = dict(cert)
del _cert['certificate_body']
item_config['signingcerts'][cert.certificate_id] = dict(_cert)
item_list.append(
IAMUserItem(account=account, name=user.user_name, config=item_config)
)
return item_list, exception_map
class IAMUserItem(ChangeItem):
def __init__(self, account=None, name=None, config={}):
super(IAMUserItem, self).__init__(
index=IAMUser.index,
region='universal',
account=account,
name=name,
new_config=config)
| StarcoderdataPython |
279745 | """
Pretty Errors for TiddlyWeb
This module initializes the plugin.
See tiddlywebplugins.prettyerror.exceptor for details
on operation.
"""
__version__ = '1.1.1'
import selector
from httpexceptor import HTTPExceptor, HTTP404
from tiddlywebplugins.prettyerror.exceptor import PrettyHTTPExceptor
def replacement_not_found(klass, environ, start_response):
"""
Replaces the selector not_found method with a TiddlyWeb
exception, so PrettyHTTPExceptor will be engaged when
selector has no route.
"""
raise HTTP404('path not found')
selector.Selector.status404 = replacement_not_found
def init(config):
"""
In server_response_filters replace HTTPExceptor with
PrettyHTTPExceptor.
"""
if PrettyHTTPExceptor not in config['server_response_filters']:
config['server_response_filters'].insert(
config['server_response_filters'].index(HTTPExceptor) + 1,
PrettyHTTPExceptor)
config['server_response_filters'].remove(HTTPExceptor)
| StarcoderdataPython |
1677207 | <reponame>allena29/brewerslabng
import cgi
from cloudApi import *
#from django.utils import simplejson
import json
import urllib
from google.appengine.api import users
from google.appengine.ext import webapp
from google.appengine.ext import db
from google.appengine.ext.webapp.util import run_wsgi_app
from cloudUtils import cloudUtils
from gData import *
class MainPage(webapp.RequestHandler):
def __init__(self):
self.api=brewerslabCloudApi()
"""
def get(self):
print "Content-Type:text/plain\n\n"
print "BA"
func = getattr(self.api,"listRecipes",None)
args=()
args+=("<EMAIL>",)
print func(*args)
"""
def get(self):
user = users.get_current_user()
if not user.email() == "<EMAIL>":
self.response.headers['Content-Type'] = "text/plain"
self.response.out.write("cannot use this method\n")
return
func = getattr(self.api, self.request.get("taskOperation"), None)
args=()
args+=(user.email(),)
for i in range( int(self.request.get("argNum"))):
args+=( urllib.unquote(self.request.get("arg%s" %(i))) ,)
if self.request.get("taskOperation") == "publish":
self.api.response=self.response
results = func(*args)
return
# Call function from within cloudApi and write it
results = func(*args)
a=1
AA=0
for arg in args:
if AA > 0:
self.response.out.write("<b>Argument %s:</b> %s<p>" %(AA-1,arg))
AA=AA+1
self.response.out.write("<b>Task Operation:</b> %s<p>" %(results['operation']))
self.response.out.write("<b>Status Code:</b> %s<p>" %(results['status']))
if results.has_key("json"):
result = json.loads(results['json'])
for x in result:
self.response.out.write("<b>%s</b><p>\n" %(x))
self.response.out.write("<pre>")
self.response.out.write( result[x])
self.response.out.write("</pre>")
a=0
try:
if result['result'].has_key("cost_result"):
for c in result['result']['cost_result']:
self.response.out.write("<b>%s</b><br>" %(c))
self.response.out.write(" %s <p>" %(result['result']['cost_result'][c]))
if result['result'].has_key("stock_result"):
for c in result['result']['stock_result']:
self.response.out.write("<b>%s</b><br>" %(c))
self.response.out.write(" %s <p>" %(result['result']['stock_result'][c]))
except:
pass
if a == 1:
self.response.out.write("Status %s\n" %( results['status']) )
else:
self.response.out.write("No JSON - status %s" %(results['status']))
def post(self):
self.response.headers['Content-Type'] = "text/plain"
self.api.response=self.response
cloudKey = self.request.get("cloudKey")
cloudRequest= self.request.get("cloudRequest")
cloudMail = self.request.get("cloudUser")
cloudDevice = self.request.get("cloudDevice")
cu=cloudUtils()
indata = urllib.unquote(self.request.path ).split("/")
taskOperation = indata[-1]
sys.stderr.write(" taskOperation %s\n" %(taskOperation))
# If not authroised
if not cu.checkAuthorised( cloudKey, cloudRequest,cloudMail,cloudDevice):
self.response.out.write( json.dumps( { 'operation' : taskOperation , 'status' : -1 } ) ) #not authorised
return
# Decode Request
decodedRequest = json.loads(cloudRequest)
func = getattr(self.api, taskOperation, None)
if not func:
self.response.out.write( json.dumps( { 'operation' : taskOperation , 'status' : -2 } ) ) #function not available
return
args=()
args+=(cloudMail,)
sys.stderr.write("request coming in is %s\n" %(cloudRequest))
c=0
for i in range( decodedRequest['argNum'] ):
c=c+1
args+=( decodedRequest['arg%s' %(i)],)
for arg in args:
sys.stderr.write(" args reconsituted %s\n" %(arg))
sys.stderr.write("\ntaskOperation=%s&argNum=%s" %(taskOperation,c))
c=0
for arg in args:
sys.stderr.write("&arg%s=%s" %(c,arg))
c=c+1
sys.stderr.write("\n")
# Call function from within cloudApi and write it
self.response.out.write( func(*args) )
application = webapp.WSGIApplication( [('/brewlab/.*', MainPage), ('/stores/.*', MainPage) ], debug=True)
run_wsgi_app(application)
| StarcoderdataPython |
168684 | <reponame>Frky/moon
from django.conf.urls import url, include
from django.contrib.auth.views import login as django_login
from django.contrib.auth.views import logout as django_logout
from . import views
urlpatterns = [
url(r'^$', views.index, name="index"),
url(r'^u/(?P<label>[\w-]{,50})$', views.underground_comptoir),
url(r'^agora$', views.underground_comptoir, name="agora"),
# Django authentication views
url(r'^login$', django_login, name="login"),
url(r'^logout$', django_logout, name="logout"),
# Custom register view
url(r'^register$', views.register, name="register"),
]
| StarcoderdataPython |
1669977 | # Copyright (c) 2015 The Pycroft Authors. See the AUTHORS file.
# This file is part of the Pycroft project and licensed under the terms of
# the Apache License, Version 2.0. See the LICENSE file for details.
from itertools import chain
import re
from sqlalchemy import inspect
from ipaddr import IPv4Address
from sqlalchemy.exc import IntegrityError
from pycroft.lib.net import MacExistsException, get_free_ip
from pycroft.model import session, host, user
from pycroft.model.net import Subnet, VLAN
from pycroft.model.types import InvalidMACAddressException
from tests import FixtureDataTestBase
from tests.fixtures.dummy.traffic import TrafficVolumeData
from tests.fixtures.dummy.facilities import BuildingData, RoomData
from tests.fixtures.dummy.host import IPData, HostData, InterfaceData, \
SwitchPortData
from tests.fixtures.dummy.net import SubnetData, VLANData
from tests.fixtures.dummy.user import UserData
class Test_010_InterfaceValidators(FixtureDataTestBase):
datasets = [UserData, InterfaceData, HostData]
mac_regex = re.compile(r"^[a-f0-9]{2}(:[a-f0-9]{2}){5}$")
def assertSetMAC(self, interface, mac):
parts = mac.split(":")
if len(mac) != 17 or len(parts) != 6:
with self.assertRaises(InvalidMACAddressException):
interface.mac = mac
return
if self.mac_regex.match(mac) is None:
with self.assertRaises(InvalidMACAddressException):
interface.mac = mac
return
if int(parts[0], base=16) & 1:
with self.assertRaises(InvalidMACAddressException):
interface.mac = mac
return
interface.mac = mac
def test_0010_mac_validate(self):
interface = host.Interface(host=host.Host.q.first())
# Try some bad macs
self.assertSetMAC(interface, "ff:ff:ff:ff:ff")
self.assertSetMAC(interface, "ff:ff:ff:ff:ff:ff")
self.assertSetMAC(interface, "ff:asjfjsdaf:ff:ff:ff:ff")
self.assertSetMAC(interface, "aj:00:ff:1f:ff:ff")
self.assertSetMAC(interface, "ff:ff:ff:ff:ff:ff:ff")
# Assert that we have no mac assigned
session.session.add(interface)
self.assertRaises(IntegrityError, session.session.commit)
session.session.rollback()
# Assert a correct mac
self.assertSetMAC(interface, "00:00:00:01:00:00")
# Assert that we have the mac assigned
session.session.add(interface)
session.session.commit()
# Wipe the instance
session.session.delete(interface)
session.session.commit()
class Test_030_IpModel(FixtureDataTestBase):
datasets = (BuildingData, RoomData, SubnetData, UserData, HostData,
InterfaceData, VLANData)
def test_0030_delete_address(self):
subnet = Subnet.q.first()
interface = host.Interface.q.first()
ip, _ = get_free_ip((subnet, ))
ip_addr = host.IP(interface=interface, address=ip, subnet=subnet)
session.session.add(ip_addr)
session.session.commit()
with self.assertRaises(IntegrityError):
ip_addr.address = None
self.assertIsNone(ip_addr.address)
session.session.commit()
def test_0040_delete_subnet(self):
subnet = Subnet.q.first()
interface = host.Interface.q.first()
ip, _ = get_free_ip((subnet, ))
ip_addr = host.IP(interface=interface, address=ip, subnet=subnet)
session.session.add(ip_addr)
session.session.commit()
with self.assertRaises(IntegrityError):
ip_addr.subnet = None
self.assertIsNone(ip_addr.subnet)
session.session.commit()
class Test_040_IpEvents(FixtureDataTestBase):
datasets = (BuildingData, VLANData, SubnetData, RoomData, UserData,
HostData, InterfaceData)
def test_0010_correct_subnet_and_ip(self):
subnet = Subnet.q.first()
interface = host.Interface.q.first()
ip_address, _ = get_free_ip((subnet, ))
ip = host.IP(interface=interface)
ip.address = ip_address
ip.subnet = subnet
session.session.add(ip)
session.session.commit()
interface = host.Interface.q.first()
ip_address, _ = get_free_ip((subnet, ))
ip = host.IP(address=ip_address, subnet=subnet, interface=interface)
session.session.add(ip)
session.session.commit()
host.IP.q.filter(host.IP.interface == interface).delete()
session.session.commit()
def test_0020_missing_subnet(self):
subnet = Subnet.q.first()
interface = host.Interface.q.first()
ip_address, _ = get_free_ip((subnet, ))
ip = host.IP(interface=interface)
ip.address = ip_address
def commit():
session.session.add(ip)
session.session.commit()
self.assertRaises(IntegrityError, commit)
def test_0030_missing_ip(self):
subnet = Subnet.q.first()
interface = host.Interface.q.first()
ip = host.IP(interface=interface)
ip.subnet = subnet
def commit():
session.session.add(ip)
session.session.commit()
self.assertRaises(IntegrityError, commit)
def test_0040_wrong_subnet(self):
subnets = Subnet.q.all()
interface = host.Interface.q.first()
ip_address, _ = get_free_ip((subnets[0], ))
ip = host.IP(interface=interface, address=ip_address)
with self.assertRaises(ValueError):
ip.subnet = subnets[1]
ip = host.IP(interface=interface, subnet=subnets[1])
with self.assertRaises(ValueError):
ip.address = ip_address
with self.assertRaises(ValueError):
host.IP(interface=interface, subnet=subnets[1], address=ip_address)
class Test_060_Cascades(FixtureDataTestBase):
datasets = (SubnetData, UserData, HostData, InterfaceData, IPData,
TrafficVolumeData, SwitchPortData)
def test_0010_cascade_on_delete_ip(self):
test_ip = host.IP.q.filter_by(
address=IPData.dummy_user_ipv4.address).one()
tv_of_test_ip = test_ip.traffic_volumes
session.session.delete(test_ip)
session.session.commit()
self.assertTrue(all(inspect(o).was_deleted
for o in tv_of_test_ip))
def test_0010_cascade_on_delete_interface(self):
test_interface = host.Interface.q.filter_by(
mac=InterfaceData.dummy.mac).one()
ips = test_interface.ips
traffic_volumes = tuple(chain(*(ip.traffic_volumes for ip in ips)))
session.session.delete(test_interface)
session.session.commit()
self.assertTrue(all(inspect(o).was_deleted
for o in chain(ips, traffic_volumes)))
def test_0010_cascade_on_delete_host(self):
test_host = host.Host.q.first()
interfaces = test_host.interfaces
ips = tuple(chain(*(d.ips for d in interfaces)))
traffic_volumes = tuple(chain(*(ip.traffic_volumes for ip in ips)))
session.session.delete(test_host)
session.session.commit()
self.assertTrue(all(inspect(o).was_deleted
for o in chain(interfaces, ips, traffic_volumes)))
def test_0010_cascade_on_delete_user(self):
test_user = user.User.q.filter_by(login=UserData.dummy.login).one()
hosts = test_user.hosts
interfaces = tuple(chain(*(h.interfaces for h in hosts)))
ips = tuple(chain(*(d.ips for d in interfaces)))
traffic_volumes = tuple(chain(*(ip.traffic_volumes for ip in ips)))
session.session.delete(test_user)
session.session.commit()
self.assertTrue(all(inspect(o).was_deleted
for o in chain(hosts, interfaces, ips, traffic_volumes)))
def test_cascade_on_delete_vlan(self):
# TODO: delete a vlan
vlan = VLAN.q.filter_by(vid=VLANData.vlan_dummy1.vid).one()
associations_query = session.session.query(host.switch_port_default_vlans)\
.filter_by(vlan_id=vlan.id)
self.assertEqual(associations_query.count(), 2)
for subnet in vlan.subnets:
session.session.delete(subnet)
session.session.delete(vlan)
session.session.commit()
self.assertEqual(associations_query.count(), 0)
def test_cascade_on_delete_switch_port(self):
port_name = SwitchPortData.dummy_port4.name
port = host.SwitchPort.q.filter_by(name=port_name).one()
associations_query = session.session.query(host.switch_port_default_vlans)\
.filter_by(switch_port_id=port.id)
self.assertEqual(associations_query.count(), 2)
session.session.delete(port)
session.session.commit()
self.assertEqual(associations_query.count(), 0)
class TestVLANAssociations(FixtureDataTestBase):
datasets = (SwitchPortData,)
def test_secondary_relationship_works(self):
port = host.SwitchPort.q.filter_by(name=SwitchPortData.dummy_port1.name).one()
self.assertEqual(len(port.default_vlans), 1)
port4 = host.SwitchPort.q.filter_by(name=SwitchPortData.dummy_port4.name).one()
self.assertEqual(len(port4.default_vlans), 2)
| StarcoderdataPython |
217526 | """
Parsed, structurized Quake3 events
"""
from collections import (
namedtuple,
)
from datetime import (
datetime,
)
from quakestats.core.q3toql import (
entities,
)
RawEvent = namedtuple(
'RawEvent', ['time', 'name', 'payload']
)
class Q3GameEvent():
def __init__(self, ev_time: int):
assert ev_time >= 0
self.time = ev_time
class Q3EVInitGame(Q3GameEvent):
def __init__(
self, ev_time: int,
hostname: str, gametype: str, mapname: str,
fraglimit: int, capturelimit: int, timelimit: int,
modname: str
):
super().__init__(ev_time)
if gametype not in ['FFA', 'CA', 'DUEL']:
raise ValueError("Invalid gametype, got {}".format(gametype))
self.hostname = hostname
self.gametype = gametype
self.mapname = mapname
self.fraglimit = fraglimit
self.timelimit = timelimit
self.capturelimit = capturelimit
self.modname = modname
class Q3EVUpdateClient(Q3GameEvent):
def __init__(
self, ev_time: int, client_id: int, name: str, team: str
):
super().__init__(ev_time)
if team not in ["RED", "BLUE", "SPECTATOR", "FREE"]:
raise ValueError("Invalid team, got {}".format(team))
self.client_id = client_id
self.name = name
self.team = team
class Q3EVPlayerStats(Q3GameEvent):
WeaponStat = namedtuple('WeaponStat', ['shots', 'hits'])
DamageStat = namedtuple('DamageStat', ['given', 'received'])
PickupStats = namedtuple('PickupStats', ['health', 'armor'])
def __init__(
self, ev_time: int, client_id: int
):
super().__init__(ev_time)
self.client_id = client_id
self.weapons = {}
self.pickups = self.PickupStats(0, 0)
self.damage = self.DamageStat(0, 0)
def add_weapon(self, name: str, shots: int, hits: int):
assert name in entities.Q3Data.WEAPONS, f"Got {name}"
self.weapons[name] = self.WeaponStat(shots, hits)
def set_damage(self, given: int, received: int):
self.damage = self.DamageStat(given, received)
def set_pickups(self, health: int, armor: int):
self.pickups = self.PickupStats(health, armor)
class Q3EVPlayerKill(Q3GameEvent):
def __init__(
self, ev_time: int, client_id: int, victim_id: int, reason: str
):
super().__init__(ev_time)
self.client_id = client_id
self.victim_id = victim_id
self.reason = reason
class Q3EVClientDisconnect(Q3GameEvent):
def __init__(self, ev_time: int, client_id: int):
super().__init__(ev_time)
self.client_id = client_id
class Q3EventExit(Q3GameEvent):
def __init__(self, ev_time: int, reason: str):
super().__init__(ev_time)
self.reason = reason
class Q3EVServerTime(Q3GameEvent):
def __init__(self, ev_time: int, dt: datetime):
super().__init__(ev_time)
self.dt = dt
| StarcoderdataPython |
4894027 | from flask import current_app as app
# most recent date the app has stock data downloaded. Update if we retrieve more current data.
MOST_RECENT_DATE_FOR_STOCK_PRICES = '2022-03-10'
class Stock:
def __init__(self, ticker, name, sector, price=-1):
self.ticker = ticker
self.name = name
self.sector = sector
self.price = price
@staticmethod
def get(ticker):
rows = app.db.execute('''
SELECT ticker, name, sector
FROM Stocks
WHERE ticker = :ticker
''',
ticker=ticker)
return Stock(*(rows[0])) if rows is not None else None
@staticmethod
def get_all(sortBy):
print("get all")
print(sortBy)
if sortBy == "ASC Name":
rows = app.db.execute('''
SELECT stocks.ticker,name,sector,closeprice
FROM stocks
JOIN (SELECT ticker AS ticker,closeprice
FROM timedata
WHERE period = :p
ORDER BY period) AS tickerPrice ON tickerPrice.ticker = stocks.ticker
ORDER BY name ASC
''', p=MOST_RECENT_DATE_FOR_STOCK_PRICES
)
elif sortBy == "DESC Name":
rows = app.db.execute('''
SELECT stocks.ticker,name,sector,closeprice
FROM stocks
JOIN (SELECT ticker AS ticker,closeprice
FROM timedata
WHERE period = :p
ORDER BY period) AS tickerPrice ON tickerPrice.ticker = stocks.ticker
ORDER BY name DESC
''', p=MOST_RECENT_DATE_FOR_STOCK_PRICES
)
elif sortBy == "ASC Price":
rows = app.db.execute('''
SELECT stocks.ticker,name,sector,closeprice
FROM stocks
JOIN (SELECT ticker AS ticker,closeprice
FROM timedata
WHERE period = :p
ORDER BY period) AS tickerPrice ON tickerPrice.ticker = stocks.ticker
ORDER BY closeprice ASC
''', p=MOST_RECENT_DATE_FOR_STOCK_PRICES
)
elif sortBy == "DESC Price":
rows = app.db.execute('''
SELECT stocks.ticker,name,sector,closeprice
FROM stocks
JOIN (SELECT ticker AS ticker,closeprice
FROM timedata
WHERE period = :p
ORDER BY period) AS tickerPrice ON tickerPrice.ticker = stocks.ticker
ORDER BY closeprice DESC
''', p=MOST_RECENT_DATE_FOR_STOCK_PRICES
)
return [Stock(*row) for row in rows]
@staticmethod
def get_by_search(searchInput):
sqlSearchInput = '%' + searchInput + '%'
print(sqlSearchInput)
rows = app.db.execute('''
SELECT stocks.ticker,name,sector,closeprice
FROM stocks
JOIN (SELECT ticker AS ticker,closeprice
FROM timedata
WHERE period = :p
ORDER BY period) AS tickerPrice ON tickerPrice.ticker = stocks.ticker
WHERE name ILIKE :s OR stocks.ticker ILIKE :s
ORDER BY name DESC ''', s=sqlSearchInput, p = MOST_RECENT_DATE_FOR_STOCK_PRICES)
return [Stock(*row) for row in rows]
# TODO: This function is a work in progress and doesn't work
def get_details_by_ticker(ticker):
rows = app.db.execute('''
SELECT stocks.ticker,name,sector,closeprice
FROM stocks
JOIN (SELECT ticker AS ticker,closeprice
FROM timedata
WHERE period = :p
ORDER BY period) AS tickerPrice ON tickerPrice.ticker = stocks.ticker
WHERE name ILIKE :s OR stocks.ticker ILIKE :s
ORDER BY name DESC ''', s=sqlSearchInput, p=MOST_RECENT_DATE_FOR_STOCK_PRICES)
return [Stock(*row) for row in rows]
@staticmethod
def get_by_sector(sector):
rows = app.db.execute('''
SELECT id, ticker,name, sector, price
FROM Stocks
WHERE sector = :sector
''',
sector = sector)
return [Stock(*row) for row in rows]
@staticmethod
def get_current_price(ticker):
rows = app.db.execute('''
SELECT distinct closeprice
FROM Stocks
JOIN (SELECT ticker AS ticker,closeprice
FROM timedata
WHERE period = :p
ORDER BY period) AS tickerPrice ON tickerPrice.ticker = :ticker
''', p=MOST_RECENT_DATE_FOR_STOCK_PRICES,ticker = ticker)
return rows[0][0] if rows is not None else None
| StarcoderdataPython |
6592522 | <gh_stars>1-10
# pylint: disable=protected-access,redefined-outer-name
"""Unit tests package."""
import os
from .const import MOCK_HOST
def load_fixture(filename):
"""Load a fixture."""
path = os.path.join(os.path.dirname(__file__), "fixtures", filename)
with open(path, encoding="utf-8") as fptr:
return fptr.read()
def load_binary(filename):
"""Load a binary data."""
path = os.path.join(os.path.dirname(__file__), "binary", filename)
with open(path, "rb") as fptr:
return fptr.read()
def function_url(function, host=MOCK_HOST, user=None, password=None):
"""Make function URL."""
auth = ""
if user:
auth = user
if password:
auth += ":" + password
auth += "@"
return "http://" + auth + host + ":80/cgi-bin/" + function + "_cgi"
| StarcoderdataPython |
8055509 | from tqdm import tqdm
def run_germ(exposure=None, N=2):
''' run count scans
exposure : exposure time in secs
if not set, use previously set exposure
N : number of measurements
'''
# set frame time to 30 min
if exposure is not None:
yield from bp.mv(germ.frametime, exposure)
yield from bp.count([germ], num=N)
| StarcoderdataPython |
8060984 | <filename>DissertationFigures.py
"""
Functions to make some of the figures I used in my dissertation
and in my ISMIR 2017 paper
"""
from BlockWindowFeatures import *
from Covers80Experiments import *
from CSMSSMTools import *
from Covers80 import *
from SongComparator import *
import scipy.io.wavfile
import librosa
def plotCSM(CSM, artist1, artist2, songName):
[I2, J2] = np.meshgrid(np.arange(CSM.shape[1]), np.arange(CSM.shape[0]))
CSM2 = np.array(CSM)
#CSM2[np.abs(I2 - J2) > 300] = np.inf
idx = np.unravel_index(np.argmin(CSM2), CSM2.shape)
print(idx)
plt.imshow(CSM, cmap = 'afmhot', interpolation = 'nearest')
plt.hold(True)
plt.scatter(idx[1], idx[0], 50)
plt.xlabel(artist2 + " Block Index")
plt.ylabel(artist1 + " Block Index")
plt.title("CSM " + songName)
return idx
def getSampleSSMs():
Kappa = 0.1
hopSize = 512
TempoBias1 = 180
TempoBias2 = 180
DPixels = 400
BeatsPerBlock = 8
p = np.arange(DPixels)
[I, J] = np.meshgrid(p, p)
FeatureParams = {'MFCCBeatsPerBlock':BeatsPerBlock, 'MFCCSamplesPerBlock':200, 'DPixels':DPixels, 'ChromaBeatsPerBlock':20, 'ChromasPerBlock':40}
CSMTypes = {'MFCCs':'Euclidean', 'SSMs':'Euclidean', 'CurvsSS':'Euclidean', 'TorsSS':'Euclidean', 'D2s':'EMD1D', 'Chromas':'CosineOTI'}
fin = open('covers32k/list1.list', 'r')
files1 = [f.strip() for f in fin.readlines()]
fin.close()
fin = open('covers32k/list2.list', 'r')
files2 = [f.strip() for f in fin.readlines()]
fin.close()
cmap = 'Spectral'
#67 is a good male/female example
for index in [11]:
fileprefix = "Covers80%i"%index
filename1 = "covers32k/" + files1[index] + ".mp3"
filename2 = "covers32k/" + files2[index] + ".mp3"
artist1 = getCovers80ArtistName(files1[index])
artist2 = getCovers80ArtistName(files2[index])
songName = getCovers80SongName(files1[index])
print("Getting features for %s..."%filename1)
(XAudio1, Fs1) = getAudio(filename1)
(tempo, beats1) = getBeats(XAudio1, Fs1, TempoBias1, hopSize)
(Features1, O1) = getBlockWindowFeatures((XAudio1, Fs1, tempo, beats1, hopSize, FeatureParams))
bRatio1 = float(Fs1)/hopSize
print("Getting features for %s..."%filename2)
(XAudio2, Fs2) = getAudio(filename2)
(tempo, beats2) = getBeats(XAudio2, Fs2, TempoBias2, hopSize)
(Features2, O2) = getBlockWindowFeatures((XAudio2, Fs2, tempo, beats2, hopSize, FeatureParams))
bRatio2 = float(Fs2)/hopSize
#Make SSM CSM
plt.figure()
CSM = getCSM(Features1['SSMs'], Features2['SSMs'])
idx = plotCSM(CSM, artist1, artist2, songName)
plt.savefig("DissertationFigures/CSM%i_SSM.svg"%index, bbox_inches = 'tight')
D1 = np.zeros((DPixels, DPixels))
D1[I < J] = Features1['SSMs'][idx[0]]
D1 = D1 + D1.T
t1l = beats1[idx[0]]/bRatio1
t1r = beats1[idx[0]+BeatsPerBlock]/bRatio1
s1 = beats1[idx[0]]*hopSize
s2 = beats1[idx[0]+BeatsPerBlock]*hopSize
x1 = XAudio1[s1:s2]
scipy.io.wavfile.write("DissertationFigures/%i_1.wav"%index, Fs1, x1)
D2 = np.zeros((DPixels, DPixels))
D2[I < J] = Features2['SSMs'][idx[1]]
D2 = D2 + D2.T
t2l = beats2[idx[1]]/bRatio2
t2r = beats2[idx[1]+BeatsPerBlock]/bRatio2
s1 = beats2[idx[1]]*hopSize
s2 = beats2[idx[1]+BeatsPerBlock]*hopSize
x2 = XAudio2[s1:s2]
scipy.io.wavfile.write("DissertationFigures/%i_2.wav"%index, Fs2, x2)
#Plot spectrograms
plt.clf()
plt.figure(figsize=(12, 5))
plt.subplot(211)
S1 = librosa.logamplitude(np.abs(librosa.stft(x1)))
#librosa.display.specshow(S1, x_axis='time', y_axis='log')
plt.subplot(212)
S2 = librosa.logamplitude(np.abs(librosa.stft(x2)))
#librosa.display.specshow(S2, x_axis='time', y_axis='log')
plt.savefig("DissertationFigures/Spectrograms%i.svg"%index, bbox_inches='tight')
#Plot SSMs
plt.clf()
plt.subplot(121)
plt.title(artist1)
plt.imshow(D1, interpolation = 'nearest', cmap = cmap, extent = (t1l, t1r, t1r, t1l))
plt.xlabel("Time (sec)")
plt.ylabel("Time (sec)")
plt.subplot(122)
plt.title(artist2)
plt.imshow(D2, interpolation = 'nearest', cmap = cmap, extent = (t2l, t2r, t2r, t2l))
plt.xlabel("Time (sec)")
plt.ylabel("Time (sec)")
plt.savefig("DissertationFigures/SSMs%i.svg"%index, bbox_inches = 'tight')
# #Make HPCP CSM
# off1 = 400
# off2 = 700
# F1 = Features1['Chromas'][off1:off1+200]
# F2 = Features2['Chromas'][off2:off2+200]
# CSM = getCSMType(F1, O1, F2, O2, 'CosineOTI')
# idx = plotCSM(CSM, artist1, artist2, songName)
# plt.savefig("DissertationFigures/CSM%i_HPCP.svg"%index, bbox_inches = 'tight')
#
# #Plot HPCP Blocks
# plt.clf()
# HPCP1 = Features1['Chromas'][idx[0] + off1]
# HPCP2 = Features2['Chromas'][idx[1] + off2]
# HPCP1 = np.reshape(HPCP1, [len(HPCP1)/12, 12])
# HPCP2 = np.reshape(HPCP2, [len(HPCP2)/12, 12])
# plt.subplot(211)
# librosa.display.specshow(HPCP1.T, y_axis = 'chroma')
# plt.title("HPCP %s"%artist1)
# plt.subplot(212)
# librosa.display.specshow(HPCP2.T, y_axis = 'chroma')
# plt.title("HPCP %s"%artist2)
# plt.savefig("DissertationFigures/HPCP_%i.svg"%index, bbox_inches = 'tight')
def makeCSMWinSizeVideo():
Kappa = 0.1
hopSize = 512
TempoBias = 180
index1 = 6
index2 = 62
fin = open('covers32k/list1.list', 'r')
files1 = [f.strip() for f in fin.readlines()]
fin.close()
fin = open('covers32k/list2.list', 'r')
files2 = [f.strip() for f in fin.readlines()]
fin.close()
filename1 = "covers32k/" + files1[index1] + ".mp3"
filename2 = "covers32k/" + files2[index1] + ".mp3"
filename3 = "covers32k/" + files2[index2] + ".mp3"
artist1 = getCovers80ArtistName(files1[index1])
artist2 = getCovers80ArtistName(files2[index1])
artist3 = getCovers80ArtistName(files2[index2])
songName1 = getCovers80SongName(files1[index1])
songName2 = getCovers80SongName(files2[index1])
songName3 = getCovers80SongName(files2[index2])
FeatureParams = {'MFCCBeatsPerBlock':4, 'DPixels':50}
CSMTypes = {'MFCCs':'Euclidean', 'SSMs':'Euclidean', 'SSMsDiffusion':'Euclidean', 'Geodesics':'Euclidean', 'Jumps':'Euclidean', 'Curvs':'Euclidean', 'Tors':'Euclidean', 'CurvsSS':'Euclidean', 'TorsSS':'Euclidean', 'D2s':'EMD1D', 'Chromas':'CosineOTI'}
(XAudio1, Fs1) = getAudio(filename1)
(tempo1, beats1) = getBeats(XAudio1, Fs1, TempoBias, hopSize)
(XAudio2, Fs2) = getAudio(filename2)
(tempo2, beats2) = getBeats(XAudio2, Fs2, TempoBias, hopSize)
(XAudio3, Fs3) = getAudio(filename3)
(tempo3, beats3) = getBeats(XAudio3, Fs3, TempoBias, hopSize)
FeatureName = 'SSMs'
plt.figure(figsize=(15, 12))
N1 = len(beats1)
N2 = len(beats2)
N3 = len(beats3)
for Win in range(4, 30):
FeatureParams['MFCCBeatsPerBlock'] = Win
(Features1, O1) = getBlockWindowFeatures((XAudio1, Fs1, tempo1, beats1, hopSize, FeatureParams))
(Features2, O2) = getBlockWindowFeatures((XAudio2, Fs2, tempo2, beats2, hopSize, FeatureParams))
(Features3, O3) = getBlockWindowFeatures((XAudio3, Fs3, tempo3, beats3, hopSize, FeatureParams))
res1 = getCSMSmithWatermanScores(Features1[FeatureName], O1, Features2[FeatureName], O2, Kappa, CSMTypes[FeatureName], True)
res2 = getCSMSmithWatermanScores(Features1[FeatureName], O1, Features3[FeatureName], O3, Kappa, CSMTypes[FeatureName], True)
#[artist1, artist2, artist3] = ["", "", ""]
plt.clf()
plt.subplot(231)
plt.imshow(res1['CSM'], cmap = 'afmhot', interpolation = 'nearest')
plt.title("True Cover, BeatsPerBlock = %i\n%s"%(Win, songName1))
plt.xlabel("%s Beat Index"%artist2)
plt.ylabel("%s Beat Index"%artist1)
plt.xlim([0, N2])
plt.ylim([N1, 0])
plt.subplot(232)
plt.title("KNN Binary Matrix")
plt.imshow(1 - res1['DBinary'], cmap = 'gray')
plt.xlabel("%s Beat Index"%artist2)
plt.ylabel("%s Beat Index"%artist1)
plt.xlim([0, N2])
plt.ylim([N1, 0])
plt.subplot(233)
plt.imshow(res1['D'], cmap = 'afmhot', interpolation = 'nearest')
plt.title("SMWat Score = %i"%res1['score'])
plt.xlabel("%s Beat Index"%artist2)
plt.ylabel("%s Beat Index"%artist1)
plt.xlim([0, N2])
plt.ylim([N1, 0])
plt.subplot(234)
plt.imshow(res2['CSM'], cmap = 'afmhot', interpolation = 'nearest')
plt.title("False Cover, BeatsPerBlock = %i\n%s vs\n %s"%(Win, songName1, songName3))
plt.xlabel("%s Beat Index"%artist3)
plt.ylabel("%s Beat Index"%artist1)
plt.xlim([0, N3])
plt.ylim([N1, 0])
plt.subplot(235)
plt.title("KNN Binary Matrix")
plt.imshow(1 - res2['DBinary'], cmap = 'gray')
plt.xlabel("%s Beat Index"%artist3)
plt.ylabel("%s Beat Index"%artist1)
plt.xlim([0, N3])
plt.ylim([N1, 0])
plt.subplot(236)
plt.imshow(res2['D'], cmap = 'afmhot', interpolation = 'nearest')
plt.title("SMWat Score = %i"%res2['score'])
plt.xlabel("%s Beat Index"%artist3)
plt.ylabel("%s Beat Index"%artist1)
plt.xlim([0, N3])
plt.ylim([N1, 0])
plt.savefig("%i.png"%Win, bbox_inches = 'tight')
def makeCSMSSMSizeVideo():
Kappa = 0.1
hopSize = 512
TempoBias = 180
index = 6
fin = open('covers32k/list1.list', 'r')
files1 = [f.strip() for f in fin.readlines()]
fin.close()
fin = open('covers32k/list2.list', 'r')
files2 = [f.strip() for f in fin.readlines()]
fin.close()
filename1 = "covers32k/" + files1[index] + ".mp3"
filename2 = "covers32k/" + files2[index] + ".mp3"
artist1 = getCovers80ArtistName(files1[index])
artist2 = getCovers80ArtistName(files2[index])
songName = getCovers80SongName(files1[index])
FeatureParams = {'MFCCBeatsPerBlock':20, 'DPixels':50}
CSMTypes = {'MFCCs':'Euclidean', 'SSMs':'Euclidean', 'SSMsDiffusion':'Euclidean', 'Geodesics':'Euclidean', 'Jumps':'Euclidean', 'Curvs':'Euclidean', 'Tors':'Euclidean', 'CurvsSS':'Euclidean', 'TorsSS':'Euclidean', 'D2s':'EMD1D', 'Chromas':'CosineOTI'}
(XAudio1, Fs1) = getAudio(filename1)
(tempo1, beats1) = getBeats(XAudio1, Fs1, TempoBias, hopSize)
(XAudio2, Fs2) = getAudio(filename2)
(tempo2, beats2) = getBeats(XAudio2, Fs2, TempoBias, hopSize)
FeatureName = 'SSMs'
plt.figure(figsize=(15, 6))
N1 = len(beats1)
N2 = len(beats2)
count = 0
for DPixels in [400, 350, 300, 250, 200, 150, 100, 90, 80, 70, 60, 50, 45, 40, 35, 30, 25, 20, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2]:
FeatureParams['DPixels'] = DPixels
(Features1, O1) = getBlockWindowFeatures((XAudio1, Fs1, tempo1, beats1, hopSize, FeatureParams))
(Features2, O2) = getBlockWindowFeatures((XAudio2, Fs2, tempo2, beats2, hopSize, FeatureParams))
res = getCSMSmithWatermanScores(Features1[FeatureName], O1, Features2[FeatureName], O2, Kappa, CSMTypes[FeatureName], True)
plt.clf()
plt.subplot(131)
plt.imshow(res['CSM'], cmap = 'afmhot', interpolation = 'nearest')
plt.title("%i x %i SSMs"%(DPixels, DPixels))
plt.xlabel("%s Beat Index"%artist2)
plt.ylabel("%s Beat Index"%artist1)
plt.xlim([0, N2])
plt.ylim([N1, 0])
plt.subplot(132)
plt.title("KNN Binary Matrix")
plt.imshow(1 - res['DBinary'], cmap = 'gray')
plt.xlabel("%s Beat Index"%artist2)
plt.ylabel("%s Beat Index"%artist1)
plt.xlim([0, N2])
plt.ylim([N1, 0])
plt.subplot(133)
plt.imshow(res['D'], cmap = 'afmhot', interpolation = 'nearest')
plt.title("SMWat Score = %i"%res['score'])
plt.xlabel("%s Beat Index"%artist2)
plt.ylabel("%s Beat Index"%artist1)
plt.xlim([0, N2])
plt.ylim([N1, 0])
plt.savefig("%i.png"%count, bbox_inches = 'tight')
count += 1
def getFalseCoversPair():
Kappa = 0.1
hopSize = 512
TempoBias1 = 180
TempoBias2 = 180
index1 = 6
index2 = 62
fin = open('covers32k/list1.list', 'r')
files1 = [f.strip() for f in fin.readlines()]
fin.close()
fin = open('covers32k/list2.list', 'r')
files2 = [f.strip() for f in fin.readlines()]
fin.close()
filename1 = "covers32k/" + files1[index1] + ".mp3"
filename2 = "covers32k/" + files2[index2] + ".mp3"
fileprefix = "Covers80_%i_%i"%(index1, index2)
artist1 = getCovers80ArtistName(files1[index1])
artist2 = getCovers80ArtistName(files2[index2])
songName1 = getCovers80SongName(files1[index1])
songName2 = getCovers80SongName(files2[index2])
#filename1 = 'MIREX_CSIBSF/GotToGiveItUp.mp3'
#filename2 = 'MIREX_CSIBSF/BlurredLines.mp3'
#fileprefix = "BlurredLines"
#FeatureParams = {'DPixels':200, 'NCurv':400, 'NJump':400, 'NTors':400, 'D2Samples':50, 'CurvSigma':20, 'D2Samples':40, 'MFCCSamplesPerBlock':200, 'GeodesicDelta':10, 'NGeodesic':400, 'lifterexp':0.6, 'MFCCBeatsPerBlock':12, 'ChromaBeatsPerBlock':20, 'ChromasPerBlock':40}
#FeatureParams = {'ChromaBeatsPerBlock':20, 'ChromasPerBlock':40, 'DPixels':200, 'MFCCBeatsPerBlock':20}
CurvSigmas = [10, 60]
FeatureParams = {'MFCCBeatsPerBlock':20, 'MFCCSamplesPerBlock':200, 'DPixels':50, 'ChromaBeatsPerBlock':20, 'ChromasPerBlock':40}
CSMTypes = {'MFCCs':'Euclidean', 'SSMs':'Euclidean', 'SSMsDiffusion':'Euclidean', 'Geodesics':'Euclidean', 'Jumps':'Euclidean', 'Curvs':'Euclidean', 'Tors':'Euclidean', 'CurvsSS':'Euclidean', 'TorsSS':'Euclidean', 'D2s':'EMD1D', 'Chromas':'CosineOTI'}
for sigma in CurvSigmas:
CSMTypes['Jumps%g'%sigma] = 'Euclidean'
CSMTypes['Curvs%g'%sigma] = 'Euclidean'
CSMTypes['Tors%g'%sigma] = 'Euclidean'
compareTwoSongs(filename1, TempoBias1, filename2, TempoBias2, hopSize, FeatureParams, CSMTypes, Kappa, fileprefix, songName1, songName2)
if __name__ == '__main__':
getSampleSSMs()
#getFalseCoversPair()
#makeCSMWinSizeVideo()
#makeCSMSSMSizeVideo()
| StarcoderdataPython |
12848369 | <gh_stars>1-10
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: parse_bpmnxml.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from flowable_service_sdk.model.flowable_service import bpmn_sequence_flow_pb2 as flowable__service__sdk_dot_model_dot_flowable__service_dot_bpmn__sequence__flow__pb2
from flowable_service_sdk.model.flowable_service import bpmn_exclusive_gateway_pb2 as flowable__service__sdk_dot_model_dot_flowable__service_dot_bpmn__exclusive__gateway__pb2
from flowable_service_sdk.model.flowable_service import bpmn_start_event_pb2 as flowable__service__sdk_dot_model_dot_flowable__service_dot_bpmn__start__event__pb2
from flowable_service_sdk.model.flowable_service import bpmn_end_event_pb2 as flowable__service__sdk_dot_model_dot_flowable__service_dot_bpmn__end__event__pb2
from flowable_service_sdk.model.flowable_service import bpmn_user_task_pb2 as flowable__service__sdk_dot_model_dot_flowable__service_dot_bpmn__user__task__pb2
from flowable_service_sdk.model.flowable_service import bpmn_process_pb2 as flowable__service__sdk_dot_model_dot_flowable__service_dot_bpmn__process__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='parse_bpmnxml.proto',
package='process_definition',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n\x13parse_bpmnxml.proto\x12\x12process_definition\x1a\x44\x66lowable_service_sdk/model/flowable_service/bpmn_sequence_flow.proto\x1aHflowable_service_sdk/model/flowable_service/bpmn_exclusive_gateway.proto\x1a\x42\x66lowable_service_sdk/model/flowable_service/bpmn_start_event.proto\x1a@flowable_service_sdk/model/flowable_service/bpmn_end_event.proto\x1a@flowable_service_sdk/model/flowable_service/bpmn_user_task.proto\x1a>flowable_service_sdk/model/flowable_service/bpmn_process.proto\"&\n\x13ParseBPMNXMLRequest\x12\x0f\n\x07\x62pmnXML\x18\x01 \x01(\t\"|\n\x1bParseBPMNXMLResponseWrapper\x12\x0c\n\x04\x63ode\x18\x01 \x01(\x05\x12\x13\n\x0b\x63odeExplain\x18\x02 \x01(\t\x12\r\n\x05\x65rror\x18\x03 \x01(\t\x12+\n\x04\x64\x61ta\x18\x04 \x01(\x0b\x32\x1d.flowable_service.BPMNProcessb\x06proto3')
,
dependencies=[flowable__service__sdk_dot_model_dot_flowable__service_dot_bpmn__sequence__flow__pb2.DESCRIPTOR,flowable__service__sdk_dot_model_dot_flowable__service_dot_bpmn__exclusive__gateway__pb2.DESCRIPTOR,flowable__service__sdk_dot_model_dot_flowable__service_dot_bpmn__start__event__pb2.DESCRIPTOR,flowable__service__sdk_dot_model_dot_flowable__service_dot_bpmn__end__event__pb2.DESCRIPTOR,flowable__service__sdk_dot_model_dot_flowable__service_dot_bpmn__user__task__pb2.DESCRIPTOR,flowable__service__sdk_dot_model_dot_flowable__service_dot_bpmn__process__pb2.DESCRIPTOR,])
_PARSEBPMNXMLREQUEST = _descriptor.Descriptor(
name='ParseBPMNXMLRequest',
full_name='process_definition.ParseBPMNXMLRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='bpmnXML', full_name='process_definition.ParseBPMNXMLRequest.bpmnXML', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=451,
serialized_end=489,
)
_PARSEBPMNXMLRESPONSEWRAPPER = _descriptor.Descriptor(
name='ParseBPMNXMLResponseWrapper',
full_name='process_definition.ParseBPMNXMLResponseWrapper',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='code', full_name='process_definition.ParseBPMNXMLResponseWrapper.code', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='codeExplain', full_name='process_definition.ParseBPMNXMLResponseWrapper.codeExplain', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='error', full_name='process_definition.ParseBPMNXMLResponseWrapper.error', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='data', full_name='process_definition.ParseBPMNXMLResponseWrapper.data', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=491,
serialized_end=615,
)
_PARSEBPMNXMLRESPONSEWRAPPER.fields_by_name['data'].message_type = flowable__service__sdk_dot_model_dot_flowable__service_dot_bpmn__process__pb2._BPMNPROCESS
DESCRIPTOR.message_types_by_name['ParseBPMNXMLRequest'] = _PARSEBPMNXMLREQUEST
DESCRIPTOR.message_types_by_name['ParseBPMNXMLResponseWrapper'] = _PARSEBPMNXMLRESPONSEWRAPPER
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
ParseBPMNXMLRequest = _reflection.GeneratedProtocolMessageType('ParseBPMNXMLRequest', (_message.Message,), {
'DESCRIPTOR' : _PARSEBPMNXMLREQUEST,
'__module__' : 'parse_bpmnxml_pb2'
# @@protoc_insertion_point(class_scope:process_definition.ParseBPMNXMLRequest)
})
_sym_db.RegisterMessage(ParseBPMNXMLRequest)
ParseBPMNXMLResponseWrapper = _reflection.GeneratedProtocolMessageType('ParseBPMNXMLResponseWrapper', (_message.Message,), {
'DESCRIPTOR' : _PARSEBPMNXMLRESPONSEWRAPPER,
'__module__' : 'parse_bpmnxml_pb2'
# @@protoc_insertion_point(class_scope:process_definition.ParseBPMNXMLResponseWrapper)
})
_sym_db.RegisterMessage(ParseBPMNXMLResponseWrapper)
# @@protoc_insertion_point(module_scope)
| StarcoderdataPython |
12835854 | # -*- coding: utf-8 -*-
"""
@Time : 2020/3/4 13:58
@Author : 半纸梁
@File : urls.py
"""
from django.urls import path
from course import views
app_name = "course"
urlpatterns = [
path("index/", views.CourseIndexView.as_view(), name="index"),
path("<int:course_id>/", views.CourseDetailView.as_view(), name="course_detail"),
] | StarcoderdataPython |
6455399 | from typing import Any
from typing import Mapping
from typing import Optional
from fastapi import FastAPI
from glassio.dispatcher import IDispatcher
from glassio.event_bus import IEventBus
from glassio.initializable_components import InitializableComponent
from glassio.logger import ILogger
from amocrm_asterisk_ng.infrastructure import ISelectableFactory
from .AmocrmComponent import AmocrmComponent
from .AmocrmComponentConfig import AmocrmComponentConfig
from .kernel import AmocrmKernelComponentFactory
from .widgets import WidgetComponentFactory
__all__ = [
"AmocrmComponentFactory"
]
class AmocrmComponentFactory(ISelectableFactory):
__slots__ = (
"__app",
"__dispatcher",
"__event_bus",
"__logger",
)
def __init__(
self,
app: FastAPI,
event_bus: IEventBus,
dispatcher: IDispatcher,
logger: ILogger,
) -> None:
self.__app = app
self.__dispatcher = dispatcher
self.__event_bus = event_bus
self.__logger = logger
def unique_tag(self) -> str:
return "amocrm"
def get_instance(
self,
settings: Optional[Mapping[str, Any]] = None
) -> InitializableComponent:
config = AmocrmComponentConfig(**settings)
widget_component_factory = WidgetComponentFactory(
app=self.__app,
dispatcher=self.__dispatcher,
logger=self.__logger,
)
widget_component = widget_component_factory.get_instance(
settings=config.widget,
)
amocrm_kernel_factory = AmocrmKernelComponentFactory(
app=self.__app,
dispatcher=self.__dispatcher,
logger=self.__logger,
)
amocrm_kernel_component = amocrm_kernel_factory.get_instance(
settings=config.kernel,
)
amocrm_component = AmocrmComponent(
amocrm_kernel_component=amocrm_kernel_component,
widget_component=widget_component,
)
return amocrm_component
| StarcoderdataPython |
6673474 | import logging
import pprint
from dataclasses import dataclass
from typing import Any, Dict, Mapping, Optional
import satosa.context
import satosa.internal
from satosa.attribute_mapping import AttributeMapper
from satosa.micro_services.base import ResponseMicroService
from eduid_userdb import UserDB
from eduid_scimapi.db.userdb import ScimApiUser, ScimApiUserDB
logger = logging.getLogger(__name__)
@dataclass
class Config(object):
mongo_uri: str
idp_to_data_owner: Mapping[str, str]
class ScimAttributes(ResponseMicroService):
"""
Add attributes from the scim db to the responses.
"""
def __init__(self, config: Mapping[str, Any], internal_attributes: Dict[str, Any], *args, **kwargs):
super().__init__(*args, **kwargs)
self.config = Config(**config)
# Setup databases
self.eduid_userdb = UserDB(db_uri=self.config.mongo_uri, db_name='eduid_scimapi')
logger.info(f'Connected to eduid db: {self.eduid_userdb}')
# TODO: Implement real 'data owner' to database lookup
data_owner = 'eduid.se'
_owner = data_owner.replace('.', '_') # replace dots with underscores
coll = f'{_owner}__users'
# TODO: rename old collection and remove this
if data_owner == 'eduid.se':
coll = 'profiles'
self._userdbs = {'eduid.se': ScimApiUserDB(db_uri=self.config.mongo_uri, collection=coll)}
self.converter = AttributeMapper(internal_attributes)
# Get the internal attribute name for the eduPersonPrincipalName that will be
# used to find users in the SCIM database
_int = self.converter.to_internal('saml', {'eduPersonPrincipalName': 'something'})
self.ext_id_attr = list(_int.keys())[0]
logger.debug(f'SCIM externalId internal attribute name: {self.ext_id_attr}')
def process(
self, context: satosa.context.Context, data: satosa.internal.InternalData,
) -> satosa.internal.InternalData:
logger.debug(f'Data as dict:\n{pprint.pformat(data.to_dict())}')
user = self._get_user(data)
if user:
# TODO: handle multiple profiles beyond just picking the first one
profiles = user.profiles.keys()
if profiles:
_name = sorted(profiles)[0]
logger.info(f'Applying attributes from SCIM user {user.scim_id}, profile {_name}')
profile = user.profiles[_name]
update = self.converter.to_internal('saml', profile.attributes)
for _name, _new in update.items():
_old = data.attributes.get(_name)
if _old != _new:
logger.debug(f'Changing attribute {_name} from {repr(_old)} to {repr(_new)}')
data.attributes[_name] = _new
return super().process(context, data)
def _get_user(self, data: satosa.internal.InternalData) -> Optional[ScimApiUser]:
data_owner = self.config.idp_to_data_owner.get(data.auth_info.issuer)
logger.debug(f'Data owner for IdP {data.auth_info.issuer}: {data_owner}')
if not data_owner:
return None
userdb = self._userdbs.get(data_owner)
if not userdb:
logger.error(f'Found no userdb for data owner {data_owner}')
return None
_ext_ids = data.attributes.get(self.ext_id_attr, [])
if _ext_ids:
ext_id = _ext_ids[0]
user = userdb.get_user_by_external_id(ext_id)
if user:
logger.info(
f'Found SCIM user {user.scim_id} using {self.ext_id_attr} {ext_id} (data owner: {data_owner})'
)
else:
logger.info(f'No user found using {self.ext_id_attr} {ext_id}')
return user
return None
| StarcoderdataPython |
3412693 | <gh_stars>0
#!/usr/bin/env python3
# Amount of water, milk, and coffee beans required for a cup of coffee
WATER, MILK, COFFEE = (200, 50, 15)
# Enter the available amount of water, milk, and coffee beans
water_check = int(input("Write how many ml of water the coffee machine has: "))
milk_check = int(input("Write how many ml of milk the coffee machine has: "))
coffee_check = int(input("Write how many grams of coffee beans the machine has: "))
cups = int(input("Write how many cups of coffee you will need: "))
# Calculate the amount of water, milk, and coffee beans
water_amount = water_check // WATER
milk_amount = milk_check // MILK
coffee_amount = coffee_check // COFFEE
# Maximum cups that the coffee machine can make
max_cup = min([water_amount, milk_amount, coffee_amount])
if max_cup == cups:
print("Yes, I can make that amount of coffee")
elif max_cup > cups:
print(f"Yes, I can make that amount of coffee {max_cup - cups} and even excess more than that")
elif max_cup < cups:
print(f"No, I can only {max_cup} cups of coffee")
| StarcoderdataPython |
78647 | <gh_stars>0
from django.shortcuts import render, redirect
from django.contrib.auth.decorators import login_required
from .forms import UserCreationForm
from .models import CustomUser, Party
from django.contrib.auth import login
# Create your views here.
def index(request):
if request.user.is_authenticated:
return redirect('app:party')
else:
return render(request, 'registration/login.html')
def party(request):
parties = Party.objects.all()
context = {
'parties': parties,
}
return render(request, 'parties.html', context)
@login_required(login_url='/accounts/login/')
def welcomeParty(request, id):
user = request.user
db_user = CustomUser.objects.get(email=user.email)
if db_user.party is None:
chosen_party = Party.objects.get(id=id)
db_user.party = chosen_party
db_user.save()
context = {
'Success': 'Successfully Joined New Party',
'chosen_party': chosen_party,
}
else:
in_party = db_user.party
context = {
'party': in_party,
}
return render(request, 'welcome.html', context)
def register(request):
form = UserCreationForm
context = {
'form': form,
}
if request.method == 'POST':
username = request.POST['username']
password = request.POST['password']
email = request.POST['email']
user = CustomUser.objects.create_user(username, email, password)
user.save()
login(request, user)
return redirect('app:home')
return render(request, 'register.html', context)
| StarcoderdataPython |
8141200 | <filename>gym_lightriders/envs/__init__.py
"""Banana Gym Enviornments."""
from gym_lightriders.envs.light_rider_env import LightRidersEnv | StarcoderdataPython |
5194533 | from sdk.color_print import c_print
from user_profiles import usr_get, usr_add, usr_compare
def migrate(tenant_sessions: list, logger: object):
'''
Accepts a list of tenant session objects.
Migrates all the User Profiles from the source tenant to the
clone tenants
'''
tenant_users_added = []
#Get all user profiles
tenant_user_profiles = []
for session in tenant_sessions:
data = usr_get.get_users(session, logger)
tenant_user_profiles.append(data)
#Get all roles for role ID translation
tenant_user_roles = []
for session in tenant_sessions:
data = usr_get.get_user_roles(session, logger)
tenant_user_roles.append(data)
#Comapre user profiles
tenant_users_to_add = []
cln_tenant_user_roles = tenant_user_roles[1:]
src_tenant_usr_profiles = tenant_user_profiles[0]
cln_tenant_usr_profiles = tenant_user_profiles[1:]
for index in range(len(cln_tenant_usr_profiles)):
users_to_add = usr_compare.compare_users(src_tenant_usr_profiles, cln_tenant_usr_profiles[index], cln_tenant_user_roles[index])
tenant_users_to_add.append(users_to_add)
#Add user profiles
for index in range(len(tenant_users_to_add)):
added = usr_add.add_users(tenant_sessions[index + 1], tenant_users_to_add[index], logger)
tenant_users_added.append(added)
logger.info('Finished migrating User Profiles')
return tenant_users_added
if __name__ =='__main__':
from sdk.load_config import load_config_create_sessions
tenant_sessions = load_config_create_sessions()
migrate(tenant_sessions) | StarcoderdataPython |
8099175 | <filename>linkedlist/code_signal/loop-tunnel/lt-02.py
# Given integers n, l and r, find the number of ways to
# represent n as a sum of two integers A and B such that
# l ≤ A ≤ B ≤ r.
# Example
# For n = 6, l = 2, and r = 4, the output should be
# countSumOfTwoRepresentations2(n, l, r) = 2.
# There are just two ways to write 6 as A + B,
# where 2 ≤ A ≤ B ≤ 4: 6 = 2 + 4 and 6 = 3 + 3.
def countSumOfTwoRepresentations2(n, l, r):
count = 0
a = max(n-r, l)
b = n - a
while a <= r and a <= b:
count += 1
a += 1
b -= 1
return count
print(countSumOfTwoRepresentations2(10, 4, 6))
| StarcoderdataPython |
11287710 | <gh_stars>0
import datetime
import os
import random
import base64
from mirai import Plain, At, AtAll, Image
from mirai.models.message import FlashImage
from plugins import BaseFunction
from plugins import Clash
from plugins import Clock
from plugins import RPG
from plugins import autoReply
from plugins import baidu
from plugins import command
from plugins import dataManage
from plugins import getNow
from plugins import keyReply
from plugins import logManage
from plugins import operator
from plugins import talk
from plugins import weather
async def send_message(bot, event, mode, merge_reply, reply_text, reply_image, need_at, at_qq):
if reply_text is None:
reply_text = '【突发未知bug,请使用“*send 信息”指令,将如何触发的以及状态尽可能简略地告诉主人】'
if mode == 0 or mode == 2 or (mode == 1 and not need_at):
if not merge_reply or reply_image == '' or reply_text == '':
if reply_image != '':
await bot.send(event, await Image.from_local(filename=reply_image))
if reply_text != '':
await bot.send(event, reply_text)
else:
await bot.send(event, [reply_text, await Image.from_local(filename=reply_image)])
else:
if at_qq > 0:
member = await bot.get_group_member(event.sender.group.id, at_qq)
if member is not None:
if reply_image != '':
await bot.send(event, [At(at_qq), await Image.from_local(filename=reply_image)])
if reply_text != '':
await bot.send(event, [At(at_qq), reply_text])
else:
if reply_image != '':
await bot.send(event, await Image.from_local(filename=reply_image))
if reply_text != '':
await bot.send(event, reply_text)
elif at_qq == 0:
if reply_image != '':
await bot.send(event, [At(event.sender.id), await Image.from_local(filename=reply_image)])
if reply_text != '':
await bot.send(event, [At(event.sender.id), reply_text])
elif at_qq == -1:
if reply_image != '':
await bot.send(event, [AtAll(), await Image.from_local(filename=reply_image)])
if reply_text != '':
await bot.send(event, [AtAll(), reply_text])
async def send_complex_message(bot, event, mode, complex_reply, complex_at):
if mode == 0 or mode == 2:
await bot.send(event, complex_reply)
elif mode == 1:
if complex_at['at_type'] == -1:
await bot.send(event, complex_reply)
elif complex_at['at_type'] == 0:
if complex_at['at'] > 0:
member = await bot.get_group_member(event.sender.group.id, complex_at['at'])
if member is not None:
complex_reply.insert(0, At(complex_at['at']))
await bot.send(event, complex_reply)
elif complex_at['at'] == 0:
complex_reply.insert(0, At(event.sender.id))
await bot.send(event, complex_reply)
elif complex_at['at'] == -1:
complex_reply.insert(0, AtAll())
await bot.send(event, complex_reply)
elif complex_at['at_type'] == 1:
group = dataManage.read_group(event.sender.group.id)
init = False
if not group['group'].__contains__(complex_at['at']):
complex_reply.insert(0, Plain('@' + str(complex_at['at']) + ' '))
else:
complex_reply.insert(0, Plain('\n---------------\n'))
for qq in group['group'][complex_at['at']]:
member = await bot.get_group_member(event.sender.group.id, qq)
if member is not None:
complex_reply.insert(0, At(qq))
await bot.send(event, complex_reply)
# 布尔开关类型文案
def bool_string(switch):
if switch:
return '已开启'
else:
return '已关闭'
# 时间预处理
def time_pretreatment(time: str) -> str:
time = time.replace('\\', '').strip()
if len(time) == 2:
if time == '00':
return '0'
if time[0] == '0' and time[1] != '0':
return time[1]
return time
# 合法的时间
def valid_time(hour: int, minute: int) -> bool:
if not (0 <= hour < 24):
return False
if not (0 <= minute < 60):
return False
return True
class MessageProcessing:
config = {}
statistics = {}
bot_qq = 0
bot_name = '小柒'
groups = {}
users = {}
message_tmp = {}
last_reply = ''
luck = BaseFunction.luck()
bottle = BaseFunction.DriftingBottle()
rpg = RPG.RPG()
clash = Clash.Clash()
clock = Clock.Clock()
def __init__(self):
pass
def get_user(self, qq):
self.users[qq] = dataManage.read_user(qq)
def get_group(self, group_id):
self.groups[group_id] = dataManage.read_group(group_id)
def loadfile(self):
# 基本信息重置
self.config = dataManage.read_config()
self.statistics = dataManage.read_statistics()
luck = dataManage.read_luck()
screen = dataManage.read_screen_word()
if not os.path.exists('data/Function/Talk/lovetalk.txt'):
with open('data/Function/Talk/lovetalk.txt', 'w', encoding='utf-8') as f:
f.write('1\n1.我大约真的没有什么才华,只是因为有幸见着了你,于是这颗庸常的心中才凭空生出好些浪漫。')
if not os.path.exists('data/Function/Talk/poem.txt'):
with open('data/Function/Talk/poem.txt', 'w', encoding='utf-8') as f:
f.write('1\n1.我们趋行在人生这个恒古的旅途,在坎坷中奔跑,在挫折里涅槃,忧愁缠满全身,痛苦飘洒一地。我们累,却无从止歇;我们苦,却无法回避。——《百年孤独》')
if not os.path.exists('data/Function/Talk/swear.txt'):
with open('data/Function/Talk/swear.txt', 'w', encoding='utf-8') as f:
f.write('1\n1.我无外乎也就讨厌两种人,一种是你这样的,另一种是不管你以后变成什么样那样的。')
if not os.path.exists('data/Function/Talk/tarot.txt'):
return False
if not os.path.exists('data/Function/Talk/tarot2.txt'):
return False
# 四六级词汇
if not os.path.exists('data/Function/Vocabulary/vocabulary-4.txt'):
return False
if not os.path.exists('data/Function/Vocabulary/vocabulary-4-index.txt'):
with open('data/vocabulary-4-index.txt', 'w', encoding='utf-8') as f:
f.write('1')
if not os.path.exists('data/Function/Vocabulary/vocabulary-6.txt'):
return False
if not os.path.exists('data/Function/Vocabulary/vocabulary-6-index.txt'):
with open('data/vocabulary-6-index.txt', 'w', encoding='utf-8') as f:
f.write('1')
return True
def get_right(self, qq):
if qq == self.config['master']:
return 0
elif qq in self.config["administrator"]:
return 1
elif qq in self.config["contributor"]:
return 2
else:
return 3
def get_blacklist(self, qq, group_id):
if qq in self.config['blacklist_member']:
return 1
elif group_id > 0 and group_id in self.config['blacklist_group']:
return 2
return 0
def get_qq(self):
return self.bot_qq
def get_name(self):
return self.bot_name
async def switch(self, bot, event, mode, message, message_code, group_id, right, group_right, qq):
merge_reply = False
reply_image = ''
need_at = False
at_qq = 0
muteall_schedule = dataManage.load_obj('data/Function/muteall') # 禁言计划
remind_schedule = dataManage.load_obj('data/Function/remind') # 定时提醒
if message_code == 'nudge on' or message == '开启戳一戳':
if not self.groups[group_id]['config']['nudge']:
if group_right < 2 or right < 3:
self.groups[group_id]['config']['nudge'] = True
dataManage.save_group(group_id, self.groups[group_id])
self.statistics['operate'] += 1
dataManage.save_statistics(self.statistics)
reply_text = '本群已开启戳一戳功能~'
else:
reply_text = '权限不足,需要群管理或群主'
await send_message(bot, event, mode, merge_reply, reply_text, reply_image, need_at, at_qq)
return
elif message_code == 'nudge off' or message == '关闭戳一戳':
if self.groups[group_id]['config']['nudge']:
if group_right < 2 or right < 3:
self.groups[group_id]['config']['nudge'] = False
dataManage.save_group(group_id, self.groups[group_id])
self.statistics['operate'] += 1
dataManage.save_statistics(self.statistics)
reply_text = '本群已关闭戳一戳功能~'
else:
reply_text = '权限不足,需要群管理或群主'
await send_message(bot, event, mode, merge_reply, reply_text, reply_image, need_at, at_qq)
return
elif message_code == 'curse on' or message == '开启脏话':
if not self.groups[group_id]['config']['curse']:
if group_right < 2 or right < 3:
self.groups[group_id]['config']['curse'] = True
dataManage.save_group(group_id, self.groups[group_id])
self.statistics['operate'] += 1
dataManage.save_statistics(self.statistics)
reply_text = '本群已开启脏话功能~'
else:
reply_text = '权限不足,需要群管理或群主'
await send_message(bot, event, mode, merge_reply, reply_text, reply_image, need_at, at_qq)
return
elif message_code == 'curse off' or message == '关闭脏话':
if self.groups[group_id]['config']['curse']:
if group_right < 2 or right < 3:
self.groups[group_id]['config']['curse'] = False
dataManage.save_group(group_id, self.groups[group_id])
self.statistics['operate'] += 1
dataManage.save_statistics(self.statistics)
reply_text = '本群已关闭脏话功能~'
else:
reply_text = '权限不足,需要群管理或群主'
await send_message(bot, event, mode, merge_reply, reply_text, reply_image, need_at, at_qq)
return
elif message_code == 'game on' or message == '开启游戏':
if not self.groups[group_id]['config']['RPG']:
if group_right < 2 or right < 3:
self.groups[group_id]['config']['RPG'] = True
dataManage.save_group(group_id, self.groups[group_id])
self.statistics['operate'] += 1
dataManage.save_statistics(self.statistics)
reply_text = '本群已开启游戏功能~'
else:
reply_text = '权限不足,需要群管理或群主'
await send_message(bot, event, mode, merge_reply, reply_text, reply_image, need_at, at_qq)
return
elif message_code == 'game off' or message == '关闭游戏':
if self.groups[group_id]['config']['RPG']:
if group_right < 2 or right < 3:
self.groups[group_id]['config']['RPG'] = False
dataManage.save_group(group_id, self.groups[group_id])
self.statistics['operate'] += 1
dataManage.save_statistics(self.statistics)
reply_text = '本群已关闭游戏功能~'
else:
reply_text = '权限不足,需要群管理或群主'
await send_message(bot, event, mode, merge_reply, reply_text, reply_image, need_at, at_qq)
return
elif message_code == 'game limit on' or message == '开启游戏限制模式':
if not self.groups[group_id]['config']['limit_RPG']:
if group_right < 2 or right < 3:
self.groups[group_id]['config']['limit_RPG'] = True
dataManage.save_group(group_id, self.groups[group_id])
self.statistics['operate'] += 1
dataManage.save_statistics(self.statistics)
reply_text = '本群已开启游戏限制~'
else:
reply_text = '权限不足,需要群管理或群主'
await send_message(bot, event, mode, merge_reply, reply_text, reply_image, need_at, at_qq)
return
elif message_code == 'game limit off' or message == '关闭游戏限制模式':
if self.groups[group_id]['config']['limit_RPG']:
if group_right < 2 or right < 3:
self.groups[group_id]['config']['limit_RPG'] = False
dataManage.save_group(group_id, self.groups[group_id])
self.statistics['operate'] += 1
dataManage.save_statistics(self.statistics)
reply_text = '本群已关闭游戏限制~'
else:
reply_text = '权限不足,需要群管理或群主'
await send_message(bot, event, mode, merge_reply, reply_text, reply_image, need_at, at_qq)
return
elif message_code == 'image on' or message == '开启图片搜索':
if not self.groups[group_id]['config']['image']:
if right == 0:
self.groups[group_id]['config']['image'] = True
dataManage.save_group(group_id, self.groups[group_id])
self.statistics['operate'] += 1
dataManage.save_statistics(self.statistics)
reply_text = '本群已开启p站图片搜索功能~'
else:
reply_text = '权限不足,需要主人(发送图片及其占用资源所以只对部分开放)'
await send_message(bot, event, mode, merge_reply, reply_text, reply_image, need_at, at_qq)
return
elif message_code == 'image off' or message == '关闭图片搜索':
if self.groups[group_id]['config']['image']:
if right == 0:
self.groups[group_id]['config']['image'] = False
dataManage.save_group(group_id, self.groups[group_id])
self.statistics['operate'] += 1
dataManage.save_statistics(self.statistics)
reply_text = '本群已关闭p站图片搜索功能~'
else:
reply_text = '权限不足,需要主人(发送图片及其占用资源所以只对部分开放)'
await send_message(bot, event, mode, merge_reply, reply_text, reply_image, need_at, at_qq)
return
elif message_code == 'reply on' or message == '开启自定义回复':
if not self.groups[group_id]['config']['autonomous_reply']:
if group_right < 2 or right < 3:
self.groups[group_id]['config']['autonomous_reply'] = True
dataManage.save_group(group_id, self.groups[group_id])
self.statistics['operate'] += 1
dataManage.save_statistics(self.statistics)
reply_text = '本群已开启自定义回复功能~'
else:
reply_text = '权限不足,需要群管理或群主'
await send_message(bot, event, mode, merge_reply, reply_text, reply_image, need_at, at_qq)
return
elif message_code == 'reply off' or message == '关闭自定义回复':
if self.groups[group_id]['config']['autonomous_reply']:
if group_right < 2 or right < 3:
self.groups[group_id]['config']['autonomous_reply'] = False
dataManage.save_group(group_id, self.groups[group_id])
self.statistics['operate'] += 1
dataManage.save_statistics(self.statistics)
reply_text = '本群已关闭自定义回复功能~'
else:
reply_text = '权限不足,需要群管理或群主'
await send_message(bot, event, mode, merge_reply, reply_text, reply_image, need_at, at_qq)
return
elif message_code == 'repeat on' or message == '开启自动加一':
if not self.groups[group_id]['config']['repeat']:
if group_right < 2 or right < 3:
self.groups[group_id]['config']['repeat'] = True
dataManage.save_group(group_id, self.groups[group_id])
self.statistics['operate'] += 1
dataManage.save_statistics(self.statistics)
reply_text = '本群已开启自动加一功能~'
else:
reply_text = '权限不足,需要群管理或群主'
await send_message(bot, event, mode, merge_reply, reply_text, reply_image, need_at, at_qq)
return
elif message_code == 'repeat off' or message == '关闭自动加一':
if self.groups[group_id]['config']['repeat']:
if group_right < 2 or right < 3:
self.groups[group_id]['config']['repeat'] = False
dataManage.save_group(group_id, self.groups[group_id])
self.statistics['operate'] += 1
dataManage.save_statistics(self.statistics)
reply_text = '本群已关闭自动加一功能~'
else:
reply_text = '权限不足,需要群管理或群主'
await send_message(bot, event, mode, merge_reply, reply_text, reply_image, need_at, at_qq)
return
elif message_code == 'trpg on' or message == '开启骰娘':
if not self.groups[group_id]['config']['TRPG']:
if group_right < 2 or right < 3:
self.groups[group_id]['config']['TRPG'] = True
dataManage.save_group(group_id, self.groups[group_id])
self.statistics['operate'] += 1
dataManage.save_statistics(self.statistics)
reply_text = '本群已开启骰娘功能~'
else:
reply_text = '权限不足,需要群管理或群主'
await send_message(bot, event, mode, merge_reply, reply_text, reply_image, need_at, at_qq)
return
elif message_code == 'trpg off' or message == '关闭骰娘':
if self.groups[group_id]['config']['TRPG']:
if group_right < 2 or right < 3:
self.groups[group_id]['config']['TRPG'] = False
dataManage.save_group(group_id, self.groups[group_id])
self.statistics['operate'] += 1
dataManage.save_statistics(self.statistics)
reply_text = '本群已关闭骰娘功能~'
else:
reply_text = '权限不足,需要群管理或群主'
await send_message(bot, event, mode, merge_reply, reply_text, reply_image, need_at, at_qq)
return
elif message_code == 'clash on' or message == '开启部落冲突查询':
if not self.groups[group_id]['config']['clash']:
if group_right < 2 or right < 3:
self.groups[group_id]['config']['clash'] = True
dataManage.save_group(group_id, self.groups[group_id])
self.statistics['operate'] += 1
dataManage.save_statistics(self.statistics)
reply_text = '本群已开启部落冲突查询功能~'
else:
reply_text = '权限不足,需要群管理或群主'
await send_message(bot, event, mode, merge_reply, reply_text, reply_image, need_at, at_qq)
return
elif message_code == 'clash off' or message == '关闭部落冲突查询':
if self.groups[group_id]['config']['clash']:
if group_right < 2 or right < 3:
self.groups[group_id]['config']['clash'] = False
dataManage.save_group(group_id, self.groups[group_id])
self.statistics['operate'] += 1
dataManage.save_statistics(self.statistics)
reply_text = '本群已关闭部落冲突查询功能~'
else:
reply_text = '权限不足,需要群管理或群主'
await send_message(bot, event, mode, merge_reply, reply_text, reply_image, need_at, at_qq)
return
elif message_code == 'flash on' or message == '开启解除闪照':
if not self.groups[group_id]['config']['flash']:
if group_right < 2 or right < 3:
self.groups[group_id]['config']['flash'] = True
dataManage.save_group(group_id, self.groups[group_id])
self.statistics['operate'] += 1
dataManage.save_statistics(self.statistics)
reply_text = '本群已开启自动解除闪照功能~'
else:
reply_text = '权限不足,需要群管理或群主'
await send_message(bot, event, mode, merge_reply, reply_text, reply_image, need_at, at_qq)
return
elif message_code == 'flash off' or message == '关闭解除闪照':
if self.groups[group_id]['config']['flash']:
if group_right < 2 or right < 3:
self.groups[group_id]['config']['flash'] = False
dataManage.save_group(group_id, self.groups[group_id])
self.statistics['operate'] += 1
dataManage.save_statistics(self.statistics)
reply_text = '本群已关闭自动解除闪照功能~'
else:
reply_text = '权限不足,需要群管理或群主'
await send_message(bot, event, mode, merge_reply, reply_text, reply_image, need_at, at_qq)
return
elif message_code == 'member wather on' or message == '开启成员监控':
if not self.groups[group_id]['config']['member_wather']:
if group_right < 2 or right < 3:
self.groups[group_id]['config']['member_wather'] = True
dataManage.save_group(group_id, self.groups[group_id])
self.statistics['operate'] += 1
dataManage.save_statistics(self.statistics)
reply_text = '本群已开启成员监控功能~'
else:
reply_text = '权限不足,需要群管理或群主'
await send_message(bot, event, mode, merge_reply, reply_text, reply_image, need_at, at_qq)
return
elif message_code == 'member wather off' or message == '关闭成员监控':
if self.groups[group_id]['config']['member_wather']:
if group_right < 2 or right < 3:
self.groups[group_id]['config']['member_wather'] = False
dataManage.save_group(group_id, self.groups[group_id])
self.statistics['operate'] += 1
dataManage.save_statistics(self.statistics)
reply_text = '本群已关闭成员监控功能~'
else:
reply_text = '权限不足,需要群管理或群主'
await send_message(bot, event, mode, merge_reply, reply_text, reply_image, need_at, at_qq)
return
elif message_code == 'welcome on' or message == '开启新人欢迎':
if not self.groups[group_id]['config']['welcome']:
if group_right < 2 or right < 3:
self.groups[group_id]['config']['welcome'] = True
dataManage.save_group(group_id, self.groups[group_id])
self.statistics['operate'] += 1
dataManage.save_statistics(self.statistics)
reply_text = '本群已开启入群欢迎功能~'
if self.groups[group_id]['welcome'] is None:
reply_text += '\n但是您还没有设置入群欢迎哦~请告诉我入群欢迎的内容吧~(下一条发送的消息将会被记录,请不要包含链接,违者黑名单!!!)'
self.users[qq]['buffer']['id'] = 1
self.users[qq]['buffer']['buffer'] = group_id
dataManage.save_user(qq, self.users[qq])
else:
reply_text = '权限不足,需要群管理或群主'
await send_message(bot, event, mode, merge_reply, reply_text, reply_image, need_at, at_qq)
return
elif message_code == 'welcome off' or message == '关闭新人欢迎':
if self.groups[group_id]['config']['welcome']:
if group_right < 2 or right < 3:
self.groups[group_id]['config']['welcome'] = False
dataManage.save_group(group_id, self.groups[group_id])
self.statistics['operate'] += 1
dataManage.save_statistics(self.statistics)
reply_text = '本群已关闭入群欢迎功能~'
else:
reply_text = '权限不足,需要群管理或群主'
await send_message(bot, event, mode, merge_reply, reply_text, reply_image, need_at, at_qq)
return
elif message_code == 'welcome set' or message == '设置新人欢迎':
if group_right < 2 or right < 3:
self.groups[group_id]['config']['welcome'] = True
dataManage.save_group(group_id, self.groups[group_id])
self.statistics['operate'] += 1
dataManage.save_statistics(self.statistics)
reply_text = '请告诉我入群欢迎的内容吧~(下一条发送的消息将会被记录,请不要包含链接,违者黑名单!!!)'
self.users[qq]['buffer']['id'] = 1
self.users[qq]['buffer']['buffer'] = group_id
dataManage.save_user(qq, self.users[qq])
else:
reply_text = '权限不足,需要群管理或群主'
await send_message(bot, event, mode, merge_reply, reply_text, reply_image, need_at, at_qq)
return
elif message_code == 'automatic on' or message == '开启自动审核':
if not self.groups[group_id]['config']['automatic']:
if group_right < 2 or right < 3:
self.groups[group_id]['config']['automatic'] = True
dataManage.save_group(group_id, self.groups[group_id])
self.statistics['operate'] += 1
dataManage.save_statistics(self.statistics)
reply_text = '本群已开启入群自动审核功能~'
if self.groups[group_id]['config']['pass'] == '':
reply_text += '\n但是您还没有设置入群暗号哦~请告诉我入群暗号的内容吧~(下一条发送的消息将会被记录)'
self.users[qq]['buffer']['id'] = 8
self.users[qq]['buffer']['buffer'] = group_id
dataManage.save_user(qq, self.users[qq])
else:
reply_text = '权限不足,需要群管理或群主'
await send_message(bot, event, mode, merge_reply, reply_text, reply_image, need_at, at_qq)
return
elif message_code == 'automatic off' or message == '关闭自动审核':
if self.groups[group_id]['config']['automatic']:
if group_right < 2 or right < 3:
self.groups[group_id]['config']['automatic'] = False
dataManage.save_group(group_id, self.groups[group_id])
self.statistics['operate'] += 1
dataManage.save_statistics(self.statistics)
reply_text = '本群已关闭入群自动审核功能~'
else:
reply_text = '权限不足,需要群管理或群主'
await send_message(bot, event, mode, merge_reply, reply_text, reply_image, need_at, at_qq)
return
elif message_code == 'automatic set' or message == '设置自动审核':
if group_right < 2 or right < 3:
self.groups[group_id]['config']['welcome'] = True
dataManage.save_group(group_id, self.groups[group_id])
self.statistics['operate'] += 1
dataManage.save_statistics(self.statistics)
reply_text = '请告诉我自动审批的暗号吧~(下一条发送的消息将会被记录)'
self.users[qq]['buffer']['id'] = 8
self.users[qq]['buffer']['buffer'] = group_id
dataManage.save_user(qq, self.users[qq])
else:
reply_text = '权限不足,需要群管理或群主'
await send_message(bot, event, mode, merge_reply, reply_text, reply_image, need_at, at_qq)
return
elif message_code == 'muteall schedule on' or message == '开启定时全员禁言' or message == '开启定时全体禁言':
if not muteall_schedule.__contains__(group_id):
if group_right < 2 or right < 2:
self.users[qq]['buffer']['id'] = 12
self.users[qq]['buffer']['buffer'] = group_id
dataManage.save_user(qq, self.users[qq])
await bot.send(event,
'欢迎订阅“定时全体禁言”服务!请用以下格式告诉我您的开始和结束时间:\nxx:xx xx:xx(采用24小时制,不足两位补0)\n例如您想从凌晨两点半禁言到早上六点,可以输入:“02:30 06:00”')
else:
await bot.send(event, '权限不足,需要群主或群管理')
else:
await bot.send(event, '现在已有了定时全员禁言的计划,不可重复添加。当前计划:%2d:%2d—%2d:%2d' % (
muteall_schedule[group_id]['hour1'],
muteall_schedule[group_id]['minute1'],
muteall_schedule[group_id]['hour2'],
muteall_schedule[group_id]['minute2']
))
return
elif message_code == 'muteall schedule off' or message == '关闭定时全员禁言' or message == '关闭定时全体禁言':
if muteall_schedule.__contains__(group_id):
if group_right < 2 or right < 2:
del muteall_schedule[group_id]
dataManage.save_obj(muteall_schedule, 'data/Function/muteall')
await bot.send(event, '已成功关闭')
else:
await bot.send(event, '权限不足,需要群主或群管理')
else:
await bot.send(event, '现在没有定时全员禁言的计划')
return
elif message == '添加定时提醒':
single = {
'name': '',
'is_consecutive': False,
'from': { # 开始时间
'year': 0,
'month': 0,
'day': 0,
'hour': 0,
'minute': 0,
'second': 0
},
'to': { # 结束时间
'year': 0,
'month': 0,
'day': 0,
'hour': 0,
'minute': 0,
'second': 0
},
'repeat': '每小时、每天、每星期、每月、每年、每季度',
'repeat-mode': ''
}
elif message == '查看定时提醒':
if not remind_schedule.__contains__(group_id) or len(remind_schedule[group_id]) == 0:
await bot.send(event, '本群没有任何定时提醒')
return
index = 1
reply = '本群定时提醒如下:'
for single in remind_schedule[group_id]:
reply = '\n%d.' % index
await bot.send(event, reply)
return
elif message == '删除定时提醒':
pass
elif message_code == 'revoke on' or message == '开启防撤回':
if not self.groups[group_id]['config']['revoke']:
if group_right < 2 or right < 3:
self.groups[group_id]['config']['revoke'] = True
dataManage.save_group(group_id, self.groups[group_id])
self.statistics['operate'] += 1
dataManage.save_statistics(self.statistics)
reply_text = '本群已开启防撤回功能~'
else:
reply_text = '权限不足,需要群管理或群主'
await send_message(bot, event, mode, merge_reply, reply_text, reply_image, need_at, at_qq)
return
elif message_code == 'revoke off' or message == '关闭防撤回':
if self.groups[group_id]['config']['revoke']:
if group_right < 2 or right < 3:
self.groups[group_id]['config']['revoke'] = False
dataManage.save_group(group_id, self.groups[group_id])
self.statistics['operate'] += 1
dataManage.save_statistics(self.statistics)
reply_text = '本群已关闭防撤回功能~'
else:
reply_text = '权限不足,需要群管理或群主'
await send_message(bot, event, mode, merge_reply, reply_text, reply_image, need_at, at_qq)
return
elif message[:6] == '添加指令隧穿':
if not (group_right < 2 or right < 3):
await bot.send(event, '权限不足')
return
information: list = message[6:].strip().split(' ')
if len(information) == 2:
if '删除指令隧穿' in information[0] or '查看指令隧穿' in information[0] or '添加指令隧穿' in information[0]:
await bot.send(event, "非法占用,该指令格式不可以占用删除隧穿,查看隧穿")
else:
tunneling: dict = dataManage.load_obj('data/Function/tunneling')
if not tunneling.__contains__(group_id):
tunneling[group_id] = {}
information[0] = information[0].strip()
information[1] = information[1].strip()
if tunneling[group_id].__contains__(information[0]):
await bot.send(event, "该隧穿已被占用:%s->%s" % (information[0], tunneling[group_id][information[0]]))
else:
tunneling[group_id][information[0]] = information[1]
dataManage.save_obj(tunneling, 'data/Function/tunneling')
await bot.send(event, "成功添加隧穿指令:%s->%s" % (information[0], information[1]))
else:
await bot.send(event, "格式错误,该指令格式如下“添加指令隧穿 原始指令 隧穿到某个指令”")
return
elif message[:6] == '删除指令隧穿':
if not (group_right < 2 or right < 3):
await bot.send(event, '权限不足')
return
information: str = message[6:].strip()
tunneling: dict = dataManage.load_obj('data/Function/tunneling')
if not tunneling.__contains__(group_id) or not tunneling[group_id].__contains__(information):
await bot.send(event, "原始隧穿指令不存在")
else:
await bot.send(event, "成功删除隧穿指令:%s->%s" % (information, tunneling[group_id][information]))
del tunneling[group_id][information]
dataManage.save_obj(tunneling, 'data/Function/tunneling')
return
elif message == '查看指令隧穿':
tunneling: dict = dataManage.load_obj('data/Function/tunneling')
if not tunneling.__contains__(group_id):
await bot.send(event, "暂无任何隧穿命令")
elif len(tunneling[group_id]) == 0:
await bot.send(event, "暂无任何隧穿命令")
else:
reply = '隧穿指令如下:'
for key, value in tunneling[group_id].items():
reply += '\n%s->%s' % (key, value)
await bot.send(event, reply)
return
elif message == '开启群回复共享':
if not (group_right < 2 or right < 3):
await bot.send(event, '权限不足')
return
copy_allow = dataManage.load_obj('data/Function/reply_copy_right')
if copy_allow.__contains__(group_id):
await bot.send(event, '本群的回复共享本来就是开启的')
return
copy_allow[group_id] = True
dataManage.save_obj(copy_allow, 'data/Function/reply_copy_right')
await bot.send(event, '已开启群回复共享,其他群可以输入“复制群回复%d”来复制回复' % group_id)
return
elif message == '关闭群回复共享':
if not (group_right < 2 or right < 3):
await bot.send(event, '权限不足')
return
copy_allow = dataManage.load_obj('data/Function/reply_copy_right')
if not copy_allow.__contains__(group_id):
await bot.send(event, '本群的回复共享本来就是关闭的')
return
del copy_allow[group_id]
dataManage.save_obj(copy_allow, 'data/Function/reply_copy_right')
await bot.send(event, '已关闭群回复共享')
return
elif message[:5] == '复制群回复':
if not (group_right < 2 or right < 3):
await bot.send(event, '权限不足')
return
target = message[5:].strip()
if target.isdigit():
target = int(target)
copy_allow = dataManage.load_obj('data/Function/reply_copy_right')
if copy_allow.__contains__(target):
self.get_group(target)
target_group = self.groups[target]
group = self.groups[group_id]
group['key_reply'] = target_group['key_reply']
dataManage.save_group(group_id, group)
await bot.send(event, '复制成功~')
else:
await bot.send(event, '目标群没有共享回复库~')
else:
await bot.send(event, '格式错误~')
return 1
# 0:朋友消息,1:群消息,2:临时消息
async def run(self, bot, event, mode, message_chain, be_at):
self.config = dataManage.read_config()
self.statistics = dataManage.read_statistics()
self.bot_qq = self.config['qq']
self.bot_name = self.config['name']
# ===================================================================================
# ===================================================================================
# 消息表获取
message = ''
plain_list = message_chain[Plain]
for i in plain_list:
message += str(i)
at_list = message_chain[At]
for i in at_list:
if i != At(bot.qq):
message += str(i)
if len(message_chain[Image]) != 0:
message += '[图片]'
flash_image = message_chain[FlashImage]
if len(flash_image) != 0:
message += '[闪照]'
# ===================================================================================
# ===================================================================================
# 基本信息获取
# interceptable_need_reply = False # 可被打断的回复
need_reply = False # 是否需要回复
merge_reply = False # 是否合并回复
reply_text = '' # 回复的文本内容
reply_image = '' # 回复的图片
need_complex_reply = False # 是否是复杂回复
complex_at = {
'at_type': -1, # -1:不艾特;0:艾特;1:艾特分组
'at': 0
} # 复杂艾特
complex_reply = None # 复杂回复
need_at = False # 是否需要at
at_qq = 0 # at的qq是谁
# 状态信息
group_right = 2 # 在群里的权限(群主、管理员、成员)
if mode == 0:
group_id = 0 # 发消息的人的群号(如果是群聊消息)
group_name = ''
else:
group_id = event.sender.group.id
group_name = event.sender.group.name
tmp = str(event.sender.permission)
if tmp == 'Permission.Owner':
group_right = 0
elif tmp == 'Permission.Administrator':
group_right = 1
qq = event.sender.id # (发消息人的qq)
name = event.sender.get_name()
right = self.get_right(qq) # 对于小柒的权限(主人、管理员、贡献者)
blacklist = self.get_blacklist(qq, group_id)
if mode == 0 or mode == 2:
be_at = True
self.get_user(qq)
if mode == 1:
self.get_group(group_id)
key_allow = []
if mode == 1:
key_allow = self.groups[group_id]['config']['key']
elif mode == 0 or mode == 2:
key_allow = self.users[qq]['config']['key']
# 获取指令信息
message = message.strip()
tunneling: dict = dataManage.load_obj('data/Function/tunneling')
if tunneling.__contains__(group_id):
if tunneling[group_id].__contains__(message):
print('隧穿指令:%s->%s' % (message, tunneling[group_id][message]))
message = tunneling[group_id][message]
else:
for key, value in tunneling[group_id].items():
if message.startswith(key):
message = message.replace(key, value, 1)
break
message_len = len(message)
message_code = message.lower()
if len(key_allow) == 0:
message_code = message_code
elif message_len > 0 and message_code[0] in key_allow:
message_code = message_code[1:]
else:
message_code = ''
message_code_len = len(message_code)
be_mute = (mode == 1 and self.groups[group_id]['config']['mute'])
master = await bot.get_friend(self.config['master'])
# print('\tmessage:' + message)
# print('\tmessage_code:' + message_code)
# print('\tqq:' + str(qq) + '<' + name + '>')
# if mode == 1:
# print('\tgroup:' + str(group_id) + '<' + event.sender.group.get_name() + '>')
# print('\tmute:' + str(be_mute))
# ===================================================================================
# ===================================================================================
# 消息处理开始
# 禁言消息的处理
if mode == 1 and message[:5] != '删除屏蔽词' and message[
:5] != '添加屏蔽词' and message != '清空屏蔽词' and message != '查看屏蔽词':
revoke = False
for key in self.groups[group_id]['prohibited_word']:
if key in message:
reply_text = '发现屏蔽词“' + key + '”'
revoke = True
break
if revoke:
need_reply = True
need_at = True
if group_right == 2:
if str(event.sender.group.permission) != 'Permission.Member':
await bot.recall(message_chain.message_id)
reply_text += ',予以撤回~'
else:
reply_text += ',但是' + self.bot_name + '没有办法撤回诶~'
else:
reply_text += ',但是对方是管理员/群主,' + self.bot_name + '打不过,嘤嘤嘤~'
if need_reply:
await send_message(bot, event, mode, merge_reply, reply_text, reply_image, need_at, at_qq)
return
# 基本信息查看
if message == '我的权限':
need_at = True
if blacklist == 1:
reply_text = '你当前在黑名单中~'
elif blacklist == 2:
reply_text = '本群当前在黑名单中'
elif right == 0:
reply_text = '当前权限:主人\n可以输入“主人帮助”来获取指令帮助哦~'
elif right == 1:
reply_text = '当前权限:管理员\n可以输入“管理员帮助”来获取指令帮助哦~'
elif right == 2:
reply_text = '当前权限:贡献者\n可以输入“贡献者帮助”来获取指令帮助哦~'
elif right == 3:
reply_text = '当前权限:普通用户\n可以输入“*help”来获取指令帮助;输入“骰娘”来获取骰娘帮助;输入“游戏帮助”来获取游戏帮助'
if be_mute:
reply_text += '\n在本群中' + self.bot_name + '被禁言了'
self.statistics['help'] += 1
dataManage.save_statistics(self.statistics)
await send_message(bot, event, mode, merge_reply, reply_text, reply_image, need_at, at_qq)
return
elif message.replace('查看', '').replace('查询', '') == '开关列表' or message.replace('查看', '').replace('查询',
'') == '模块列表':
if mode == 1:
reply_image = BaseFunction.generate_module_list(group_id, self.groups[group_id])
else:
reply_text = '用户<' + name + '>模块开关情况如下:'
reply_text += '\n输入“模块管理帮助”获取所有指令的详细说明'
reply_text += '\n格式:”字段(操作指令):状态“\n'
reply_text += '\n是否开启ai(时不时自主回复)【开启/关闭智能回复】:' + bool_string(self.users[qq]['config']['ai'])
self.statistics['help'] += 1
dataManage.save_statistics(self.statistics)
await send_message(bot, event, mode, merge_reply, reply_text, reply_image, need_at, at_qq)
return
# 如果是黑名单那么不会回复任何消息
if blacklist != 0:
return
if message_len == 0:
if be_at:
reply_text = '我在'
await send_message(bot, event, mode, merge_reply, reply_text, reply_image, need_at, at_qq)
return
# 如果被限制那么只回复at消息
if mode == 1:
if self.groups[group_id]['config']['limit']:
if not be_at:
return
# ===================================================================================
# 处理上一次的消息
if self.users[qq]['buffer']['id'] != 0:
reset_buffer = True
if self.users[qq]['buffer']['id'] == 1: # 群欢迎语
self.get_group(self.users[qq]['buffer']['buffer'])
self.groups[self.users[qq]['buffer']['buffer']]['welcome'] = message_chain
reply_text = self.bot_name + '已经记录下了~!'
need_reply = True
dataManage.save_group(self.users[qq]['buffer']['buffer'],
self.groups[self.users[qq]['buffer']['buffer']])
elif self.users[qq]['buffer']['id'] == 2: # 清空屏蔽词
if message == '是' or message == '确定' or message == '确认' or message == '可':
self.get_group(self.users[qq]['buffer']['buffer'])
self.groups[self.users[qq]['buffer']['buffer']]['prohibited_word'] = []
reply_text = self.bot_name + '已经帮您清空了'
need_reply = True
dataManage.save_group(self.users[qq]['buffer']['buffer'],
self.groups[self.users[qq]['buffer']['buffer']])
else:
reply_text = self.bot_name + '啊嘞?已为您取消清空。'
need_reply = True
elif self.users[qq]['buffer']['id'] == 3: # 覆盖分组
if message == '是' or message == '确定' or message == '确认' or message == '可':
tmp_members = self.users[qq]['buffer']['buffer']['members']
tmp_name = self.users[qq]['buffer']['buffer']['name']
tmp_group_id = self.users[qq]['buffer']['buffer']['group_id']
self.get_group(tmp_group_id)
operator.del_group(tmp_group_id, self.groups[tmp_group_id], tmp_name)
operator.add_group(tmp_group_id, self.groups[tmp_group_id], tmp_name, tmp_members, qq)
reply_text = '已经覆盖~'
need_reply = True
else:
reply_text = self.bot_name + '啊嘞?已为您取消覆盖分组。'
need_reply = True
elif self.users[qq]['buffer']['id'] == 4: # 清空分组
if message == '是' or message == '确定' or message == '确认' or message == '可':
tmp_group_id = self.users[qq]['buffer']['buffer']['group_id']
self.get_group(tmp_group_id)
self.groups[tmp_group_id]['group'] = {}
dataManage.save_group(tmp_group_id, self.groups[tmp_group_id])
reply_text = '清空成功!'
need_reply = True
else:
reply_text = self.bot_name + '啊嘞?已为您取消清空分组。'
need_reply = True
elif self.users[qq]['buffer']['id'] == 5: # 创建复杂回复的触发词
if message != '*取消创建*':
self.users[qq]['buffer']['id'] = 6
self.users[qq]['buffer']['buffer'] = {
'group_id': self.users[qq]['buffer']['buffer'],
'key': message
}
dataManage.save_user(qq, self.users[qq])
reply_text = '触发词:' + message
reply_text += '\n小柒已为您记录下来了,请问你的回复内容是什么?(可以文字+图片,不可以包含艾特)'
reset_buffer = False
else:
reply_text = '已为您取消创建'
need_reply = True
elif self.users[qq]['buffer']['id'] == 6: # 创建复杂回复的回复内容
if message != '*取消创建*':
self.users[qq]['buffer']['id'] = 7
self.users[qq]['buffer']['buffer']['reply'] = message_chain
dataManage.save_user(qq, self.users[qq])
reply_text += '小柒记录下来了,请问这条消息需要艾特谁吗(全体成员/分组/触发人/QQ号,这四种都是可以的哦~如果QQ号为0表示不艾特,如果不明白分组可以看“贡献者帮助”)?'
reset_buffer = False
else:
reply_text = '已为您取消创建'
need_reply = True
elif self.users[qq]['buffer']['id'] == 7: # 创建复杂回复的艾特对象
message = message.replace('@', '').strip()
if message != '*取消创建*':
if message == '全体成员':
self.users[qq]['buffer']['buffer']['at_type'] = 0 # 0表示艾特
self.users[qq]['buffer']['buffer']['at'] = -1
elif message == '触发人':
self.users[qq]['buffer']['buffer']['at_type'] = 0
self.users[qq]['buffer']['buffer']['at'] = 0
elif message.isdigit():
buffer_at = int(message)
if buffer_at > 0:
self.users[qq]['buffer']['buffer']['at_type'] = 0
self.users[qq]['buffer']['buffer']['at'] = buffer_at
else:
self.users[qq]['buffer']['buffer']['at_type'] = -1 # -1表示不艾特
self.users[qq]['buffer']['buffer']['at'] = 0
else:
self.users[qq]['buffer']['buffer']['at_type'] = 1 # 1表示艾特分组
self.users[qq]['buffer']['buffer']['at'] = message
self.get_group(self.users[qq]['buffer']['buffer']['group_id'])
group = self.groups[self.users[qq]['buffer']['buffer']['group_id']]
if not group['key_reply'].__contains__('complex'):
group['key_reply']['complex'] = {}
group['key_reply']['complex'][self.users[qq]['buffer']['buffer']['key']] = {
'reply': self.users[qq]['buffer']['buffer']['reply'],
'at': self.users[qq]['buffer']['buffer']['at'],
'at_type': self.users[qq]['buffer']['buffer']['at_type']
}
dataManage.save_group(self.users[qq]['buffer']['buffer']['group_id'], group)
reply_text = '创建成功~'
else:
reply_text = '已为您取消创建'
need_reply = True
elif self.users[qq]['buffer']['id'] == 8: # 自动审批暗号
self.get_group(self.users[qq]['buffer']['buffer'])
self.groups[self.users[qq]['buffer']['buffer']]['config']['pass'] = message
reply_text = self.bot_name + '已经记录下了~!当前入群暗号:' + message
need_reply = True
dataManage.save_group(self.users[qq]['buffer']['buffer'],
self.groups[self.users[qq]['buffer']['buffer']])
elif self.users[qq]['buffer']['id'] == 9: # XMU服务条款同意
need_reply = True
if message == '同意':
reply_text = '很高兴您订阅“厦大自动健康打卡”服务,请问您的厦大统一身份认证账号是什么?'
reset_buffer = False
self.users[qq]['buffer']['id'] = 10
self.users[qq]['buffer']['buffer'] = {
'account': '',
'password': ''
}
dataManage.save_user(qq, self.users[qq])
else:
reply_text = '已取消为您取消订阅“厦大自动健康打卡”服务'
elif self.users[qq]['buffer']['id'] == 10: # XMU服务账号
need_reply = True
reply_text = '请问您的厦大统一身份认证密码是什么?(请再次确保您在私聊!)'
reset_buffer = False
self.users[qq]['buffer']['id'] = 11
self.users[qq]['buffer']['buffer'] = {
'account': message,
'password': ''
}
dataManage.save_user(qq, self.users[qq])
elif self.users[qq]['buffer']['id'] == 11: # XMU服务密码
need_reply = True
reply_text = '好的~已为您记录下来了,将会在每天12:05自动打卡,并私聊告诉你打卡的结果,请确保有添加' + self.get_name() + '的好友'
reply_text += '\n你可以通过输入“AsYNARTvgt”来退订此服务'
password_byte = bytes(message, encoding="utf8")
ciphertext = base64.b64encode(password_byte)
xmu = dataManage.load_obj('lib/account')
xmu[qq] = {
'account': self.users[qq]['buffer']['buffer']['account'],
'password': ciphertext
}
dataManage.save_obj(xmu, 'lib/account')
elif self.users[qq]['buffer']['id'] == 12: # 订阅定时全局禁言服务
need_reply = True
get_time = True
value = {
'id': qq,
'hour1': 0,
'minute1': 0,
'hour2': 0,
'minute2': 0
}
list1 = message.replace(':', ':').split(' ')
if len(list1) != 2:
get_time = False
else:
list1_1 = list1[0].split(':')
list1_2 = list1[1].split(':')
if len(list1_1) != 2 or len(list1_2) != 2:
get_time = False
else:
list1_1[0] = time_pretreatment(list1_1[0])
list1_1[1] = time_pretreatment(list1_1[1])
list1_2[0] = time_pretreatment(list1_2[0])
list1_2[1] = time_pretreatment(list1_2[1])
if not list1_1[0].isdigit() or not list1_1[1].isdigit or not list1_2[0].isdigit() or not \
list1_2[1].isdigit:
get_time = False
else:
value['hour1'] = int(list1_1[0])
value['minute1'] = int(list1_1[1])
value['hour2'] = int(list1_2[0])
value['minute2'] = int(list1_2[1])
if not valid_time(value['hour1'], value['minute1']) or not valid_time(value['hour2'],
value['minute2']):
get_time = False
if not get_time:
if message != '取消':
reset_buffer = False
await bot.send(event, '这好像不是一个正确的格式,你可以输入“取消”来取消创建。请再次告诉我时间:')
else:
await bot.send(event, '已为您取消创建')
else:
muteall_schedule = dataManage.load_obj('data/Function/muteall') # 禁言计划
if value['hour1'] == value['hour2'] and value['minute1'] == value['minute2']:
reset_buffer = False
await bot.send(event, '这好像只有一分钟呢,你可以输入“取消”来取消创建。请再次告诉我时间:')
else:
muteall_schedule[group_id] = value
dataManage.save_obj(muteall_schedule, 'data/Function/muteall')
await bot.send(event, '创建成功!你可以输入“模块列表”来查看订阅的服务')
if reset_buffer:
self.users[qq]['buffer']['id'] = 0
self.users[qq]['buffer']['buffer'] = None
dataManage.save_user(qq, self.users[qq])
if need_reply:
await send_message(bot, event, mode, merge_reply, reply_text, reply_image, need_at, at_qq)
return
# ===================================================================================
# 如果是群聊消息,并且具有小柒的操作权限,那么就可以进行退群和禁言的操作
if mode == 1:
if message_code == 'quit' or message_code == 'dismiss':
if group_right < 2 or right < 3:
await bot.send(event, '再见啦~各位!我会想你们的!')
await bot.quit(group_id)
self.statistics['quit'] += 1
dataManage.save_statistics(self.statistics)
logManage.group_log(getNow.toString(), qq, group_id, event.sender.group.get_name(),
message + '; 小柒退群!')
if master is not None:
await bot.send_friend_message(master.id, [
Plain('已退出群聊:' + str(group_id) + '!')
])
else:
reply_text = '权限不足,需要群管理或群主或者小柒的管理员'
await send_message(bot, event, mode, merge_reply, reply_text, reply_image, need_at, at_qq)
return
elif message_code == 'mute' or message_code == 'bot off':
if not self.groups[group_id]['config']['mute']:
if group_right < 2 or right < 3:
self.groups[group_id]['config']['mute'] = True
dataManage.save_group(group_id, self.groups[group_id])
await bot.send(event, 'QAQ,那我闭嘴了')
self.statistics['mute'] += 1
dataManage.save_statistics(self.statistics)
logManage.group_log(getNow.toString(), qq, group_id, event.sender.group.get_name(),
message + '; 小柒禁言!')
if master is not None:
await bot.send_friend_message(master.id, [
Plain('在群' + str(group_id) + '被禁言!')
])
else:
reply_text = '权限不足,需要群管理或群主'
await send_message(bot, event, mode, merge_reply, reply_text, reply_image, need_at, at_qq)
else:
reply_text = '小柒本来就被禁言了!'
await send_message(bot, event, mode, merge_reply, reply_text, reply_image, need_at, at_qq)
return
elif message_code == 'unmute' or message_code == 'bot on':
if self.groups[group_id]['config']['mute']:
if group_right < 2 or right < 3:
self.groups[group_id]['config']['mute'] = False
dataManage.save_group(group_id, self.groups[group_id])
await bot.send(event, '呜呜呜,憋死我了,终于可以说话了')
self.statistics['unmute'] += 1
dataManage.save_statistics(self.statistics)
logManage.group_log(getNow.toString(), qq, group_id, event.sender.group.get_name(),
message + '; 小柒解除禁言!')
if master is not None:
await bot.send_friend_message(master.id, [
Plain('在群' + str(group_id) + '解除禁言!')
])
else:
reply_text = '权限不足,需要群管理或群主'
await send_message(bot, event, mode, merge_reply, reply_text, reply_image, need_at, at_qq)
else:
reply_text = '本来就没有禁言哦~'
await send_message(bot, event, mode, merge_reply, reply_text, reply_image, need_at, at_qq)
return
elif message_code == 'limit on' or message == '开启限制模式':
if group_right < 2 or right < 3:
if not self.groups[group_id]['config']['limit']:
self.groups[group_id]['config']['limit'] = True
dataManage.save_group(group_id, self.groups[group_id])
await bot.send(event, '限制模式已开启,指令需艾特才能回复。解禁指令也别忘记艾特哦~')
self.statistics['operate'] += 1
dataManage.save_statistics(self.statistics)
logManage.group_log(getNow.toString(), qq, group_id, event.sender.group.get_name(),
message + '; 小柒开启限制!')
else:
reply_text = '权限不足,需要群管理或群主或小柒的管理'
await send_message(bot, event, mode, merge_reply, reply_text, reply_image, need_at, at_qq)
return
elif message_code == 'limit off' or message == '关闭限制模式':
if group_right < 2 or right < 3:
if self.groups[group_id]['config']['limit']:
self.groups[group_id]['config']['limit'] = False
dataManage.save_group(group_id, self.groups[group_id])
await bot.send(event, '从现在起,指令无需艾特也能回复~')
self.statistics['operate'] += 1
dataManage.save_statistics(self.statistics)
logManage.group_log(getNow.toString(), qq, group_id, event.sender.group.get_name(),
message + '; 小柒解除限制!')
else:
reply_text = '权限不足,需要群管理或群主或小柒的管理'
await send_message(bot, event, mode, merge_reply, reply_text, reply_image, need_at, at_qq)
return
# 如果被禁言那么直接返回
if be_mute:
return
# 基本权限管理
# if message_code[:9] == 'broadcast':
# if right == 0:
# temp = message_code[9:].strip() + '【全局广播内容无需回复】'
# group_list = await app.groupList()
# for i in group_list:
# print(i)
# await app.sendGroupMessage(i, MessageChain.create([
# Plain(temp)
# ]))
if message_code[:4] == 'send':
if master is not None and len(message) > 5:
await bot.send_friend_message(master.id, [
Plain(group_name + '<' + group_id + '>' + name + '(' + str(qq) + '):' + message[5:].strip())
])
reply_text = '已经报告给主人了~'
await send_message(bot, event, mode, merge_reply, reply_text, reply_image, need_at, at_qq)
self.statistics['operate'] += 1
dataManage.save_statistics(self.statistics)
return
elif message_code == 'ai on' or message == '开启智能回复':
if mode == 0 or mode == 2:
if not self.users[qq]['config']['ai']:
self.users[qq]['config']['ai'] = True
dataManage.save_user(qq, self.users[qq])
self.statistics['operate'] += 1
dataManage.save_statistics(self.statistics)
reply_text = '已开启智能回复~'
else:
reply_text = '智能回复本身就是开启的'
await send_message(bot, event, mode, merge_reply, reply_text, reply_image, need_at, at_qq)
elif mode == 1: # 如果是群聊则需要有权限,才能够操作
if group_right < 2 or right < 3:
if not self.groups[group_id]['config']['ai']:
self.groups[group_id]['config']['ai'] = True
dataManage.save_group(group_id, self.groups[group_id])
self.statistics['operate'] += 1
dataManage.save_statistics(self.statistics)
reply_text = '本群已开启艾特的智能回复~'
else:
reply_text = '本群本身就是开启艾特智能回复的'
await send_message(bot, event, mode, merge_reply, reply_text, reply_image, need_at, at_qq)
else:
reply_text = '权限不足,需要群管理或群主'
await send_message(bot, event, mode, merge_reply, reply_text, reply_image, need_at, at_qq)
return
elif message_code == 'ai off' or message == '关闭智能回复':
if mode == 0 or mode == 2:
if self.users[qq]['config']['ai']:
self.users[qq]['config']['ai'] = False
dataManage.save_user(qq, self.users[qq])
self.statistics['operate'] += 1
dataManage.save_statistics(self.statistics)
reply_text = '已关闭智能回复~'
else:
reply_text = '智能回复本身就是关闭的'
await send_message(bot, event, mode, merge_reply, reply_text, reply_image, need_at, at_qq)
elif mode == 1:
if group_right < 2 or right < 3:
if self.groups[group_id]['config']['ai']:
self.groups[group_id]['config']['ai'] = False
dataManage.save_group(group_id, self.groups[group_id])
self.statistics['operate'] += 1
dataManage.save_statistics(self.statistics)
reply_text = '本群已关闭艾特的智能回复~'
else:
reply_text = '本群本身就是关闭艾特智能回复的'
else:
reply_text = '权限不足,需要群管理或群主'
await send_message(bot, event, mode, merge_reply, reply_text, reply_image, need_at, at_qq)
return
elif mode == 1:
ans = await self.switch(bot, event, mode, message, message_code, group_id, right, group_right, qq)
if ans is None:
return
# -----------------------------------------------------------------------------------
# 闪照处理
if mode == 1 and self.groups[group_id]['config']['flash']:
for image in flash_image:
await bot.send(event, image.as_image())
# 禁言操作
if mode == 1 and group_right != 2:
if message[:2] == '禁言' and len(at_list) > 0:
need_reply = True
mute_seconds = 60 * 10
message_plain = ''
for i in plain_list:
message_plain += str(i)
message_plain = message_plain.replace(' ', '').replace('个', '').strip()
if len(message_plain) > 2:
mute_seconds = 0
sum_number = 0
temp_number = 0
valid = True
for index in range(len(message_plain[2:])):
char = message_plain[2:][index]
if char == '一':
temp_number = 1
elif char == '二' or char == '两':
temp_number = 2
elif char == '三':
temp_number = 3
elif char == '四':
temp_number = 4
elif char == '五':
temp_number = 5
elif char == '六':
temp_number = 6
elif char == '七':
temp_number = 7
elif char == '八':
temp_number = 8
elif char == '九':
temp_number = 9
elif char == '十':
if temp_number > 0:
sum_number += temp_number * 10
temp_number = 0
else:
sum_number += 10
elif char == '百':
sum_number += temp_number * 100
temp_number = 0
elif char == '千':
sum_number += temp_number * 1000
temp_number = 0
elif char == '万':
sum_number = sum_number * 10000 + temp_number * 10000
temp_number = 0
elif char == '天':
sum_number += temp_number
mute_seconds += sum_number * 24 * 3600
temp_number = 0
sum_number = 0
elif char == '小':
if message_plain[2:][index + 1] == '时':
sum_number += temp_number
mute_seconds += sum_number * 3600
temp_number = 0
sum_number = 0
else:
valid = False
break
elif char == '分':
if message_plain[2:][index + 1] == '钟':
sum_number += temp_number
mute_seconds += sum_number * 60
temp_number = 0
sum_number = 0
else:
valid = False
break
elif char == '秒':
sum_number += temp_number
mute_seconds += sum_number
temp_number = 0
sum_number = 0
elif char != '钟' and char != '时':
valid = False
break
if not valid and message_plain[2:].isdigit():
mute_seconds = int(message_plain[2:]) * 60
elif not valid:
mute_seconds = 60 * 10
if mute_seconds < 60:
mute_seconds = 60
if mute_seconds > 30 * 24 * 3600:
mute_seconds = 30 * 24 * 3600
number = 0
for qq in at_list:
if str(event.sender.group.permission) != 'Permission.Member':
if qq.target != bot.qq:
member = await bot.get_group_member(group_id, qq.target)
if member is not None:
if str(member.permission) == 'Permission.Member':
await bot.mute(member_id=qq.target, target=group_id, time=mute_seconds)
number += 1
else:
reply_text = '小柒无权禁言'
break
if number > 0:
reply_text = '成功禁言' + str(number) + '人'
elif reply_text == '':
reply_text = '啊嘞?好像全是管理员或群主呢'
elif message[:4] == '解除禁言' and len(at_list) > 0:
if str(event.sender.group.permission) != 'Permission.Member':
need_reply = True
number = 0
for qq in at_list:
member = await bot.get_group_member(group_id, qq.target)
if member is not None:
await bot.unmute(member_id=qq.target, target=group_id)
number += 1
reply_text = '成功解除' + str(number) + '人禁言'
elif message == '开启全体禁言':
if str(event.sender.group.permission) != 'Permission.Member':
await bot.mute_all(target=group_id)
need_reply = True
reply_text = '已开启全体禁言'
elif message == '解除全体禁言' or message == '关闭全体禁言':
if str(event.sender.group.permission) != 'Permission.Member':
await bot.unmute_all(target=group_id)
need_reply = True
reply_text = '已关闭全体禁言'
# -----------------------------------------------------------------------------------
# 定制功能
if message == 'XKoTVtvG2P':
need_reply = True
reply_text = '欢迎订阅' + self.get_name() + '的“厦大自动健康打卡”服务,请确保你了解以下需知:'
reply_text += '\n1.不得利用本软件进行瞒报,以此造成的责任应由使用者自行承担,如有前往其他城市,请及时手动登陆打卡系统更新相关信息'
reply_text += '\n2.使用者的厦大账号密码,将会采用加密算法加密后存储到数据库,开发者可以使用特定的解密工具看到你的密码,但是我们保证不会如此做或者泄露你的密码,也不会向任何人透露加密密钥,信不信任由你自行决定。'
reply_text += '\n3.请确保你目前在私聊告诉小柒密码,而不是在群聊之中,因此造成的损失应该由使用者自行承担'
reply_text += '\n4.自动打卡不保证一直有效,或许接口更改,服务器忙等造成打卡失败,应配合辅导员的提醒自行检查。(由于学业原因不一定及时更新接口)'
reply_text += '\n----------------------------'
reply_text += '\n回复“同意”表示你同意以上服务条款,其余任何回复表示不同意'
self.users[qq]['buffer']['id'] = 9
dataManage.save_user(qq, self.users[qq])
elif message == 'AsYNARTvgt':
need_reply = True
xmu = dataManage.load_obj('lib/account')
if xmu.__contains__(qq):
reply_text = '已为您取消订阅“厦大自动健康打卡”服务'
del xmu[qq]
dataManage.save_obj(xmu, 'lib/account')
else:
reply_text = '您没有订阅“厦大自动健康打卡”服务'
# -----------------------------------------------------------------------------------
# 通过名字唤醒
if message == self.bot_name:
reply_text = '我在!'
need_reply = True
self.statistics['awaken'] += 1
dataManage.save_statistics(self.statistics)
# 帮助内容
if not need_reply:
if message == '帮助' or message == '指令' or message == '菜单':
reply_image = command.help_function()
if mode == 1:
reply_text = '在群内输入“模块列表”查询各个模块开关状态'
need_reply = True
elif message == '打卡帮助':
reply_image = command.help_clock()
if mode == 0 or mode == 2:
reply_text = '这部分命令,只支持群聊哦~'
need_reply = True
elif message == '骰娘' or message == '骰娘帮助' or message == '骰娘指令':
reply_image = command.help_thrower()
if mode == 0 or mode == 2:
reply_text = '这部分命令,只支持群聊哦~'
reply_text += '\n目前因为框架更新骰娘未能及时迁移,如果你确保了解什么是骰娘可以使用小柒的妹妹399608601。' + \
'小捌采用的是塔骰并且加入了全网共同黑名单。违规操作将会被所有塔骰拉黑。'
else:
reply_text = '目前因为框架更新骰娘未能及时迁移,如果你确保了解什么是骰娘可以使用小柒的妹妹399608601。' + \
'小捌采用的是塔骰并且加入了全网共同黑名单。违规操作将会被所有塔骰拉黑。'
need_reply = True
elif message == '塔罗牌帮助':
reply_image = command.help_tarot()
need_reply = True
elif message == '游戏帮助' or message == '游戏指令':
reply_image = command.help_game()
reply_text = '游戏官方社区(906554784)'
reply_text += '\n输入“游戏帮助2”查看下一页'
if mode == 1:
if not self.groups[group_id]['config']['RPG']:
reply_text += '\n本群游戏模块为关闭状态,在群内输入“模块列表”查询各个模块开关状态'
need_reply = True
elif message == '游戏帮助2':
reply_image = command.help_game2()
need_reply = True
elif message == '附魔查询' or message == '查询附魔':
reply_image = command.enchanting()
need_reply = True
elif message == 'buff查询' or message == '查询buff':
reply_image = command.buff()
need_reply = True
elif message == '游戏新手指南' or message == '新手指南':
reply_image = command.help_game_novice()
need_reply = True
elif message == '模块管理帮助':
reply_image = command.help_modular()
reply_text = '您可以使用“模块列表”来查看开关状态'
need_reply = True
elif message == '部落冲突查询帮助' or message.lower() == 'coc帮助':
reply_image = command.help_clash()
need_reply = True
if mode == 1:
if not self.groups[group_id]['config']['clash']:
reply_text = '本群部落冲突查询模块为关闭状态,在群内输入“模块列表”查询各个模块开关状态'
if need_reply:
self.statistics['help'] += 1
dataManage.save_statistics(self.statistics)
# 打卡&分组
if not need_reply and mode == 1:
if message[:4] == '加入分组':
group_name = message[4:].strip()
reply_text = operator.join_group(group_id, self.groups[group_id], group_name, qq)
need_reply = True
elif message[:4] == '退出分组':
group_name = message[4:].strip()
reply_text = operator.quit_group(group_id, self.groups[group_id], group_name, qq)
need_reply = True
elif message == '打卡列表':
clock_data = self.clock.get_clock(group_id)
need_reply = True
if clock_data is None or len(clock_data) == 0:
reply_text = '暂无任何打卡~'
else:
reply_text = '本群现有打卡如下:'
for key, value in clock_data.items():
reply_text += '\n' + key + '(' + str(len(value['member'])) + '人)'
elif message[:4] == '添加打卡' and '@' not in message:
name = message[4:].strip()
need_reply = True
if self.clock.insert_clock(group_id, name):
reply_text = '添加成功!群成员可以输入“加入打卡' + name + '”'
else:
reply_text = '添加失败!已有同名的打卡计划或者打卡已满10个'
elif message[:4] == '删除打卡':
name = message[4:].strip()
need_reply = True
if self.clock.remove_clock(group_id, name):
reply_text = '删除成功!'
else:
reply_text = '删除失败!没有该打卡'
elif message[:4] == '查看打卡':
name = message[4:].strip()
clock_data = self.clock.get_clock_single(group_id, name)
need_reply = True
if clock_data is None:
reply_text = '不存在该打卡'
else:
today = str(datetime.date.today())
reply_text = '打卡<' + name + '>情况如下:'
if clock_data['remind']['switch']:
reply_text += '\n提醒时间-%02d:%02d' % (
clock_data['remind']['hour'], clock_data['remind']['minute'])
if clock_data['summary']['switch']:
reply_text += '\n总结时间-%02d:%02d' % (
clock_data['summary']['hour'], clock_data['summary']['minute'])
reply_text += '\n参与打卡的成员:'
member_list_origin = await bot.member_list(group_id)
member_list = {}
for member in member_list_origin.data:
if not member_list.__contains__(member.id):
member_list[member.id] = member.member_name
for member in clock_data['member']:
if member_list.__contains__(member['qq']):
state = '已签' if today == member['last'] else '未签'
reply_text += '\n' + member_list[member['qq']] + '<' + str(
member['qq']) + '>:' + state + '(连续' + str(member['continuity']) + '天) '
elif message[:4] == '加入打卡':
name = message[4:].strip()
ans = self.clock.join_clock(group_id, qq, name)
need_reply = True
if ans == 0:
reply_text = '加入打卡' + name + '成功\n你可以输入“打卡' + name + '”来进行打卡\n输入“退出打卡' + name + '”来退出'
elif ans == 1:
reply_text = '不存在打卡' + name
elif ans == 2:
reply_text = '你已在打卡' + name + '中'
else:
reply_text = '达到人数上限(单个打卡30人)'
elif message[:4] == '退出打卡':
name = message[4:].strip()
ans = self.clock.quit_clock(group_id, qq, name)
need_reply = True
if ans == 0:
reply_text = '退出打卡' + name + '成功'
elif ans == 1:
reply_text = '不存在打卡' + name
elif ans == 2:
reply_text = '你不在打卡' + name + '中'
elif message[:2] == '打卡' and message_len > 2 and '@' not in message:
name = message[2:].strip()
ans = self.clock.sign(group_id, qq, name)
need_at = True
need_reply = True
if ans >= 0:
reply_text = '打卡' + name + '成功!已经连续打卡' + str(ans) + '天'
elif ans == -1:
need_reply = False
need_at = False
reply_text = '不存在打卡<' + name + '>'
elif ans == -2:
reply_text = '你没有加入打卡<' + name + '>'
elif ans == -3:
reply_text = '你今天已经打过卡了~'
if need_reply:
self.statistics['clock_activity'] += 1
dataManage.save_statistics(self.statistics)
logManage.group_log(getNow.toString(), qq, group_id, event.sender.group.get_name(),
message + "; 执行结果:" + reply_text)
# 基础功能
if not need_reply:
if message[:2] == '天气': # 开始的天气
tmp = message[2:].strip()
if tmp[0] != '#':
reply_text = weather.getWeather(tmp)
need_at = False
need_reply = True
elif message[-2:] == '天气': # 结尾的天气
tmp = message[:-2].strip()
if '这鬼' not in tmp and tmp[0] != '#': # 语言优化处理(避免“这鬼天气”的语气词)
reply_text = weather.getWeather(tmp)
need_at = False
need_reply = True
elif message[-3:] == '的天气': # 结尾的天气
tmp = message[:-3].strip()
if tmp[0] != '#':
reply_text = weather.getWeather(tmp)
need_at = False
need_reply = True
elif message == '色子' or message == '骰子':
reply_text = BaseFunction.dice()
need_at = True
need_reply = True
elif message == '抛硬币' or message == '硬币':
reply_text = BaseFunction.coin()
need_at = True
need_reply = True
elif message == '运势':
reply_text = self.luck.get_luck(qq)
need_at = True
need_reply = True
elif message == '微博热搜':
reply_text = BaseFunction.getHot()
need_reply = True
elif message == '百度热搜':
reply_text = baidu.getHot()
need_reply = True
elif message == '四级词汇' or message == '四级单词' or message == '4级词汇' or message == '4级单词':
vocabularyNumber = 1
reply_text = BaseFunction.get_vocabulary4(vocabularyNumber)
need_reply = True
elif message[:5] == '四级词汇 ' or message[:5] == '四级单词 ' or message[:5] == '4级词汇 ' or message[:5] == '4级单词 ':
vocabularyNumber = int(message[5:].strip())
if vocabularyNumber <= 0:
vocabularyNumber = 1
reply_text = BaseFunction.get_vocabulary4(vocabularyNumber)
need_reply = True
elif message == '六级词汇' or message == '六级单词' or message == '6级词汇' or message == '6级单词':
vocabularyNumber = 1
reply_text = BaseFunction.get_vocabulary6(vocabularyNumber)
need_reply = True
elif message[:5] == '六级词汇 ' or message[:5] == '六级单词 ' or message[:5] == '6级词汇 ' or message[:5] == '6级单词 ':
vocabularyNumber = int(message[5:].strip())
if vocabularyNumber <= 0:
vocabularyNumber = 1
reply_text = BaseFunction.get_vocabulary6(vocabularyNumber)
need_reply = True
elif message == '拾取漂流瓶' or message == '捡漂流瓶' or message == '捞漂流瓶':
reply_text = self.bottle.pick()
need_reply = True
elif message[:4] == '扔漂流瓶' and message_len > 4:
text = message[4:].strip()
if len(text) > 0:
reply_text = self.bottle.throw(qq, text)
need_reply = True
elif message[:5] == '随机字符串' and message_len > 5:
text = message[5:].strip()
if text.isdigit():
need_reply = True
reply_text = BaseFunction.random_char(int(text))
if need_reply:
self.statistics['base_function'] += 1
dataManage.save_statistics(self.statistics)
# 文摘、脏话、情话
if not need_reply:
if message == '文摘':
reply_text = talk.poem()
need_reply = True
elif message == '情话':
reply_text = talk.loveTalk()
need_reply = True
elif message == '骂我一句' or message == '骂我' or message == '再骂' or message == '你再骂' or message == '脏话':
if mode == 0 or mode == 2 or (mode == 1 and self.groups[group_id]['config']['curse']):
reply_text = talk.swear()
need_reply = True
if need_reply:
self.statistics['talk'] += 1
dataManage.save_statistics(self.statistics)
# 涩图
if not need_reply and mode == 1 and self.groups[group_id]['config']['image']:
if message == '涩图':
await bot.send(event, '该功能并未优化暂时被锁定,不开放。具体开放日期待定,是开发情况而定。')
need_reply = True
if need_reply:
self.statistics['image_search'] += 1
dataManage.save_statistics(self.statistics)
# 指令
if not need_reply:
if 0 < message_code_len < 1000 and message_code[0].isalnum():
if mode == 1:
(reply_text, need_at, reply_image) = command.function(message_code,
qq,
name,
group_id,
mode,
self.config,
self.groups[group_id],
self.statistics)
else:
(reply_text, need_at, reply_image) = command.function(message_code,
qq,
name,
group_id,
mode,
self.config,
self.users[qq],
self.statistics)
if len(key_allow) == 0 and reply_text.startswith('未知指令'):
need_reply = False
else:
need_reply = True
if reply_text == '*运势*':
reply_text = self.luck.get_luck(qq)
need_at = True
if need_reply:
self.statistics['command'] += 1
dataManage.save_statistics(self.statistics)
# -----------------------------------------------------------------------------------
# 管理员操作
if not need_reply:
if not need_reply:
if mode == 1:
(need_reply, need_at, reply_text, reply_image) = await operator.administrator_operation(
bot,
event,
message,
qq,
name,
group_id,
mode,
self.config,
self.groups[group_id],
self.statistics,
right,
group_right)
else:
(need_reply, need_at, reply_text, reply_image) = await operator.administrator_operation(
bot,
event,
message,
qq,
name,
group_id,
mode,
self.config,
self.users[qq],
self.statistics,
right,
group_right)
if need_reply:
self.statistics['operate'] += 1
dataManage.save_statistics(self.statistics)
# -----------------------------------------------------------------------------------
# rpg游戏
if not need_reply:
if mode == 1:
limit = self.groups[group_id]['config']['limit_RPG']
RPG = self.groups[group_id]['config']['RPG']
else:
limit = False
RPG = True
if RPG:
(need_reply, reply_text, reply_image) = self.rpg.handle(
message,
qq,
name,
self.get_user(qq),
self.config,
be_at,
limit
)
if need_reply:
self.statistics['game'] += 1
dataManage.save_statistics(self.statistics)
# -----------------------------------------------------------------------------------
# 部落冲突
if not need_reply and mode == 1 and self.groups[group_id]['config']['clash']:
need_reply, reply_text, reply_image = await self.clash.handle(bot, event, message, group_id, qq,
self.groups[group_id],
self.users[qq])
if need_reply:
merge_reply = True
if need_reply:
self.statistics['clash'] += 1
dataManage.save_statistics(self.statistics)
if not need_reply and mode == 0 and message.startswith('coc'):
need_reply = True
reply_text = '暂不支持私聊查询,请在群聊内查询。后续会慢慢支持私聊查询。'
# -----------------------------------------------------------------------------------
# 群自己设定的关键词回复
if not need_reply and mode == 1 and self.groups[group_id]['config']['autonomous_reply']:
(need_reply, reply_text, reply_image, at_qq, need_at, need_complex_reply, complex_reply,
complex_at) = keyReply.reply(
message,
group_id,
self.groups[group_id],
self.statistics)
if need_reply:
self.statistics['key_reply'] += 1
dataManage.save_statistics(self.statistics)
# -----------------------------------------------------------------------------------
# 自动加一
if not need_reply and mode == 1 and self.groups[group_id]['config']['repeat']:
if not self.message_tmp.__contains__(group_id):
self.message_tmp[group_id] = message_chain
else:
reply_chain = self.message_tmp[group_id]
tmp = str(reply_chain)
self.message_tmp[group_id] = message_chain # 将记录的上一次的消息更改为这次收到的消息
if 'xml' not in tmp and tmp[0] != '[' and tmp[-1] != ']':
if tmp == message and tmp != self.last_reply:
await bot.send(event, message_chain)
need_reply = True
self.last_reply = tmp
if need_reply:
self.statistics['auto_repeat'] += 1
dataManage.save_statistics(self.statistics)
# 智能回复
if not need_reply:
if mode == 1:
ai = self.groups[group_id]['config']['ai']
else:
ai = self.users[qq]['config']['ai']
if ai:
(need_reply, reply_text, reply_image, at_qq, need_at) = autoReply.reply(
message,
be_at,
self.config,
self.statistics,
name,
group_id,
qq,
mode)
if need_reply:
self.statistics['auto_reply'] += 1
dataManage.save_statistics(self.statistics)
if mode == 1:
for key in self.groups[group_id]['prohibited_word']:
if key in reply_text:
reply_text = '【神经网络回复内容包含群内设置的屏蔽词,已自动和谐】'
break
if need_reply:
self.statistics['message'] += 1
dataManage.save_statistics(self.statistics)
if not need_complex_reply: # 非复杂回复
if reply_text != '':
self.last_reply = reply_text
await send_message(bot, event, mode, merge_reply, reply_text, reply_image, need_at, at_qq)
else:
await send_complex_message(bot, event, mode, complex_reply, complex_at)
async def new_friend(self, bot, event):
self.config = dataManage.read_config()
master = await bot.get_friend(self.config['master'])
blacklist = self.get_blacklist(event.from_id, 0)
if blacklist != 0:
if master is not None:
await bot.send_friend_message(self.config['master'],
'有新的好友申请<' + event.nick + '>(' + str(event.from_id) + ')!已拒绝,原因:黑名单')
await bot.decline(event)
return
if master is not None:
await bot.send_friend_message(self.config['master'],
'有新的好友申请<' + event.nick + '>(' + str(event.from_id) + ')!')
await bot.allow(event)
qq = event.from_id
name = event.nick
member = await bot.get_friend(qq)
if member is not None:
reply = '你好呀!' + name + '\n'
reply += '小柒的快速上手指南:\n'
reply += '可以通过输入“帮助”来获取所有的指令帮助,请仔细阅读其中的内容!!\n'
reply += '可以通过输入“骰娘帮助”来获取所有的骰娘指令帮助\n\n'
reply += '小柒的功能是分模块的,按需开启,可以在群内输入“模块列表”查询\n'
reply += '如果有任何疑问可以加小柒的官方Q群:479504567,在群聊里可以告诉主人解除黑名单,以及获取到管理员权限解锁一些新功能\n\n'
reply += '特别申明:\n'
reply += '1.不要将小柒踢出任何群聊,或者在任何群聊禁言小柒,这些都有专门的指令代替!!!如果直接踢出,踢出人和群将会无理由黑名单,禁言视情况(频繁程度)而定\n'
reply += '2.不要对机器人搞黄色,对机器人搞黄色你是有多饥渴?'
await bot.send_friend_message(qq, reply)
self.statistics['new_friend'] += 1
dataManage.save_statistics(self.statistics)
async def new_group(self, bot, event):
self.config = dataManage.read_config()
master = await bot.get_friend(self.config['master'])
blacklist = self.get_blacklist(event.from_id, event.group_id)
if blacklist != 0:
await bot.send_friend_message(self.config['master'],
'有新的群申请<' + event.group_name + '>(' + str(event.group_id) + ')!已拒绝,原因:黑名单')
await bot.decline(event)
return
qq = event.from_id
name = event.nick
if master is not None:
await bot.send_friend_message(self.config['master'], '有新的群申请<' + event.group_name + '>(' + str(
event.group_id) + ')!\n邀请人:<' + name + '>(' + str(qq) + ')')
# await bot.allow(event)
member = await bot.get_friend(qq)
if member is not None:
await bot.send_friend_message(qq, '暂时不接受群邀请,请前往官方群(479504567)申请')
self.statistics['new_group'] += 1
dataManage.save_statistics(self.statistics)
async def join_group(self, bot, event):
if event.invitor is not None:
name = event.invitor['memberName']
qq = event.invitor['id']
reply = '已加入群,邀请人:<' + name + '>(' + str(qq) + ')' + '\n'
reply += '小柒的快速上手指南:\n'
reply += '可以通过输入“帮助”来获取所有的指令帮助,请仔细阅读其中的内容!!\n'
reply += '可以通过输入“骰娘帮助”来获取所有的骰娘指令帮助\n\n'
reply += '小柒的功能是分模块的,按需开启,可以在群内输入“模块列表”查询\n'
reply += '如果有任何疑问可以加小柒的官方Q群:479504567,在群聊里可以告诉主人解除黑名单,以及获取到管理员权限解锁一些新功能\n\n'
reply += '特别申明:\n'
reply += '1.不要将小柒踢出任何群聊,或者在任何群聊禁言小柒,这些都有专门的指令代替!!!如果直接踢出,踢出人和群将会无理由黑名单,禁言视情况(频繁程度)而定\n'
reply += '2.不要对机器人搞黄色,对机器人搞黄色你是有多饥渴?'
reply += '3.如果群主或管理员对该机器人有疑问请问邀请人,或者在群里发送“*quit”指令让机器人退群,星号不可以省略'
await bot.send_group_message(event.group.id, reply)
else:
reply = '已加入群<' + event.group.name + '>'
reply += '1.如果群主或管理员对该机器人有疑问请问邀请人,或者在群里发送“*quit”指令让机器人退群,星号不可以省略'
await bot.send_group_message(event.group.id, reply)
async def nudge(self, bot, event):
self.get_group(event.subject.id)
if self.groups[event.subject.id]['config']['mute']:
return
if not self.groups[event.subject.id]['config']['nudge']:
return
if self.get_blacklist(0, event.subject.id) != 0:
return
if event.target == bot.qq:
statistics = dataManage.read_statistics()
statistics['nudge'] += 1
dataManage.save_statistics(statistics)
if str(event.subject.kind) == 'Group':
rand = random.randint(0, 26)
if rand == 0:
reply_image = 'data/AutoReply/Nudge/打.gif'
await bot.send_group_message(event.subject.id,
[Plain('你再戳?你再戳?'), await Image.from_local(filename=reply_image)])
elif rand == 1:
reply_image = 'data/AutoReply/Nudge/质疑.jpg'
await bot.send_group_message(event.subject.id, [await Image.from_local(filename=reply_image)])
elif rand == 2:
reply_image = 'data/AutoReply/Nudge/过分.jpg'
await bot.send_group_message(event.subject.id,
[Plain('别戳了'), await Image.from_local(filename=reply_image)])
elif rand == 3:
reply_image = 'data/AutoReply/Nudge/乖巧.jpg'
await bot.send_group_message(event.subject.id,
[Plain('放过我吧'), await Image.from_local(filename=reply_image)])
elif rand == 4:
reply_image = 'data/AutoReply/Nudge/无语.jpg'
await bot.send_group_message(event.subject.id, [await Image.from_local(filename=reply_image)])
elif rand == 5:
await bot.send_group_message(event.subject.id, '你再戳我就哭给你看,嘤嘤嘤~')
elif rand == 6:
reply_image = 'data/AutoReply/Nudge/委屈2.jpg'
await bot.send_group_message(event.subject.id,
[Plain('别戳了呜呜'), await Image.from_local(filename=reply_image)])
elif rand == 7:
reply_image = 'data/AutoReply/Nudge/上头.png'
await bot.send_group_message(event.subject.id,
[Plain('你是不是戳上头了'), await Image.from_local(filename=reply_image)])
elif rand == 8:
reply_image = 'data/AutoReply/Nudge/质疑2.gif'
await bot.send_group_message(event.subject.id,
[Plain('为什么戳我'), await Image.from_local(filename=reply_image)])
elif rand == 9:
reply_image = 'data/AutoReply/Nudge/委屈.jpg'
await bot.send_group_message(event.subject.id,
[Plain('别戳了呜呜'), await Image.from_local(filename=reply_image)])
elif rand == 10:
reply_image = 'data/AutoReply/Nudge/不许戳.jpg'
await bot.send_group_message(event.subject.id,
[Plain('不许戳'), await Image.from_local(filename=reply_image)])
elif rand == 11:
reply_image = 'data/AutoReply/Nudge/委屈3.jpg'
await bot.send_group_message(event.subject.id, [await Image.from_local(filename=reply_image)])
elif rand == 12:
reply_image = 'data/AutoReply/Nudge/不开心.jpg'
await bot.send_group_message(event.subject.id, [await Image.from_local(filename=reply_image)])
elif rand == 13:
reply_image = 'data/AutoReply/Nudge/不开心2.jpg'
await bot.send_group_message(event.subject.id,
[Plain('不可以再戳了'), await Image.from_local(filename=reply_image)])
elif rand == 14:
reply_image = 'data/AutoReply/Nudge/无语2.jpg'
await bot.send_group_message(event.subject.id, [await Image.from_local(filename=reply_image)])
elif rand == 15:
reply_image = 'data/AutoReply/Nudge/无语3.bmp'
await bot.send_group_message(event.subject.id, [await Image.from_local(filename=reply_image)])
elif rand == 16:
reply_image = 'data/AutoReply/Nudge/哭.bmp'
await bot.send_group_message(event.subject.id,
[Plain('不可以做这种事情哦~'), await Image.from_local(filename=reply_image)])
elif rand == 17:
reply_image = 'data/AutoReply/Nudge/别戳了.bmp'
await bot.send_group_message(event.subject.id,
[Plain('不可以再戳了~'), await Image.from_local(filename=reply_image)])
elif rand == 18:
reply_image = 'data/AutoReply/Nudge/质疑3.bmp'
await bot.send_group_message(event.subject.id,
[Plain('你再戳你是笨蛋'), await Image.from_local(filename=reply_image)])
elif rand == 19:
reply_image = 'data/AutoReply/Nudge/骂骂咧咧.png'
await bot.send_group_message(event.subject.id, [await Image.from_local(filename=reply_image)])
elif rand == 20:
reply_image = 'data/AutoReply/Nudge/质疑4.bmp'
await bot.send_group_message(event.subject.id,
[Plain('真够无聊的呢'), await Image.from_local(filename=reply_image)])
elif rand == 21:
reply_image = 'data/AutoReply/Nudge/打2.jpg'
await bot.send_group_message(event.subject.id,
[Plain('突死你'), await Image.from_local(filename=reply_image)])
elif rand == 22:
reply_image = 'data/AutoReply/Nudge/无语4.gif'
await bot.send_group_message(event.subject.id, [await Image.from_local(filename=reply_image)])
elif rand == 23:
reply_image = 'data/AutoReply/Nudge/乖巧2.jpg'
await bot.send_group_message(event.subject.id, [await Image.from_local(filename=reply_image)])
elif rand == 24:
reply_image = 'data/AutoReply/Nudge/哭2.jpg'
await bot.send_group_message(event.subject.id, [await Image.from_local(filename=reply_image)])
else:
await bot.send_group_message(event.subject.id, '别戳啦~')
elif str(event.subject.kind) == 'Friend':
await bot.send_friend_message(event.from_id, '别戳啦~')
async def kick(self, bot, event):
self.config = dataManage.read_config()
master = await bot.get_friend(self.config['master'])
if master is not None:
await bot.send_friend_message(self.config['master'],
'被踢出群<' + event.group.get_name() + '>(' + str(event.group.id) + ')!')
self.statistics['kick'] += 1
dataManage.save_statistics(self.statistics)
async def join(self, bot, event):
self.get_group(event.group.id)
if self.groups[event.group.id]['config']['welcome']:
welcome = self.groups[event.group.id]['welcome']
if welcome is None:
self.groups[event.group.id]['config']['welcome'] = False
dataManage.save_group(event.group.id, self.groups[event.group.id])
return
welcome.insert(0, At(event.member.id))
await bot.send_group_message(event.group.id, welcome)
logManage.group_log(getNow.toString(), event.member.id, event.group.id, event.group.get_name(), '入群欢迎')
async def request_group(self, bot, event):
self.get_group(event.group_id)
if self.groups[event.group_id]['config']['member_wather']:
reply = '有新的群申请~'
reply += '\n申请人:' + event.nick
reply += '\n申请人QQ:' + str(event.from_id)
reply += '\n申请信息:\n' + event.message
await bot.send_group_message(event.group_id, reply)
if self.groups[event.group_id]['config']['automatic']:
if event.message.strip() == self.groups[event.group_id]['config']['pass']:
await bot.allow(event)
async def leave_group(self, bot, event):
self.get_group(event.group.id)
if self.groups[event.group.id]['config']['member_wather']:
reply = '此刻我们失去了一位成员:' + event.member.member_name + '(' + str(event.member.id) + ')'
await bot.send_group_message(event.group.id, reply)
async def kick_group(self, bot, event):
self.get_group(event.group.id)
if self.groups[event.group.id]['config']['member_wather']:
reply = '管理员<' + event.operator.member_name + '>踢出成员<' + event.member.member_name + '(' + str(
event.member.id) + ')>'
await bot.send_group_message(event.group.id, reply)
async def member_change(self, bot, event):
self.get_group(event.group.id)
if self.groups[event.group.id]['config']['member_wather']:
reply = '有一成员(' + str(event.member.id) + ')修改了群名片'
reply += '\n原始名字:' + event.origin
if event.origin == '':
reply += event.member.member_name + '(QQ昵称)'
reply += '\n新名字:' + event.current
if event.current == '':
reply += event.member.member_name + '(QQ昵称)'
await bot.send_group_message(event.group.id, reply)
async def group_recall_message(self, bot, event):
self.get_group(event.group.id)
if self.groups[event.group.id]['config']['revoke']:
if event.author_id == event.operator.id:
message = await bot.message_from_id(event.message_id)
message_chain = message.message_chain
message_chain.insert(0, Plain('成员<%d>试图撤回%s的消息:\n------------\n' % (event.author_id, event.time)))
await bot.send_group_message(event.group.id, message_chain)
else:
print('非自己撤回')
| StarcoderdataPython |
4908931 | #EXPORT_PATH = "/Users/johantenbroeke/Sites/projects/fullscreen_3/xcodeprojects/oneonone/Resources/leveldata/"
#GAMEPROGRESS_PATH = "/Users/johantenbroeke/Sites/projects/fullscreen_3/xcodeprojects/oneonone/Resources/"
USE_BINARY_PLIST = 1
EXPORT_PATH = "../../../../Resources/leveldata/"
GAMEPROGRESS_PATH = "./testing/"
OBJC_CLASS_PATH = "../../../../Classes/"
| StarcoderdataPython |
45408 | <reponame>ads-ad-itcenter/qunomon.forked
import os
import sys
import shutil
import glob
from pathlib import Path
import json
import yaml
# init
QAI_USER_HOME = os.environ['QAI_USER_HOME']
inventory_dir = os.path.join(QAI_USER_HOME, 'inventory/')
# check args
args_file = os.path.join(QAI_USER_HOME, 'args', 'args.json')
if os.path.exists(args_file):
print('args_file:{}'.format(str(args_file)))
with open(str(args_file), encoding='utf-8') as f:
args = json.load(f)
def inventory(inventory_name):
match = inventory_dir + inventory_name + '/**/*'
file_paths = [p.replace('\\', '/') for p in glob.glob(match, recursive=True) if os.path.isfile(p)]
return file_paths
def arg(arg_name):
return args[arg_name]
def output(path):
copy_path = os.path.join(os.environ['QAI_USER_HOME'], 'result/') + os.path.basename(path)
shutil.copyfile(path, copy_path)
| StarcoderdataPython |
11308388 | from aiohttp import ClientSession
import asyncio
import json
import random
from typing import List
import hqtrivia.config as config
"""
Abstracts the multiple question to be used in the trivia game.
"""
class Question:
"""
Represents 1 multiple question that will be used in a game round.
"""
def __init__(self, question: str, choices: List[str], answer: str):
self.question = question
self.choices = choices
self.answer = answer
def __repr__(self) -> str:
return str(self)
def __str__(self) -> str:
return str(self.__dict__)
@staticmethod
async def generate() -> 'Question':
"""Generates a new question by calling a RESTful API on an open trivia internet server.
Returns
-------
Returns the new question if successful. If error was encountered or response is not 200 OK,
an exception will be thrown.
"""
async with ClientSession() as session:
async with session.get(config.CONFIG_QUESTION_GENERATOR_API) as resp:
if (resp.status != 200):
raise Exception(
f"Received response {resp.status} from {config.CONFIG_QUESTION_GENERATOR_API}")
# Convert JSON to our Question and return it
text = await resp.text()
return opentdb_json_to_question(text)
def opentdb_json_to_question(json_text: str) -> Question:
"""A quick "hack" for converting opentdb json result to our Question instance.
Example JSON from opentdb.com:
{
"response_code": 0,
"results": [{
"category": "General Knowledge",
"type": "multiple",
"difficulty": "medium",
"question": "What is real haggis made of?",
"correct_answer": "Sheep's Heart, Liver and Lungs",
"incorrect_answers": ["Sheep's Heart, Kidneys and Lungs", "Sheep's Liver, Kidneys and Eyes", "Whole Sheep"]
}]
}
"""
result = json.loads(json_text)['results'][0]
incorrect_choices = result['incorrect_answers']
answer = result['correct_answer']
# Insert correct answer randomly into the middle of incorrect answers
random_index = random.randint(0, len(incorrect_choices))
choices = incorrect_choices.copy()
choices.insert(random_index, answer)
return Question(result['question'], choices, answer)
| StarcoderdataPython |
301563 | <filename>mldp/tutorials/steps/features_labels_formatter.py<gh_stars>1-10
from mldp.steps.formatters.base_formatter import BaseFormatter
import numpy as np
class FeaturesLabelsFormatter(BaseFormatter):
"""Formats batches into features and one-hot encoded labels tuple."""
def __init__(self, features_field_name, labels_field_name, classes_number):
super(FeaturesLabelsFormatter, self).__init__()
self.feature_field_name = features_field_name
self.labels_field_name = labels_field_name
self.classes_number = classes_number
def _format(self, data_chunk):
features = data_chunk[self.feature_field_name]
lbls = data_chunk[self.labels_field_name]
labels = np.eye(self.classes_number, dtype="float32")[lbls]
return features, labels
| StarcoderdataPython |
8109294 | <gh_stars>0
import aiohttp
from sanic.log import log
HOST = '127.0.0.1'
PORT = 42101
async def local_request(method, uri, cookies=None, *args, **kwargs):
url = 'http://{host}:{port}{uri}'.format(host=HOST, port=PORT, uri=uri)
log.info(url)
async with aiohttp.ClientSession(cookies=cookies) as session:
async with getattr(session, method)(url, *args, **kwargs) as response:
response.text = await response.text()
response.body = await response.read()
return response
def sanic_endpoint_test(app, method='get', uri='/', gather_request=True,
loop=None, *request_args, **request_kwargs):
results = []
exceptions = []
if gather_request:
@app.middleware
def _collect_request(request):
results.append(request)
async def _collect_response(sanic, loop):
try:
response = await local_request(method, uri, *request_args,
**request_kwargs)
results.append(response)
except Exception as e:
exceptions.append(e)
app.stop()
app.run(host=HOST, port=42101, after_start=_collect_response, loop=loop)
if exceptions:
raise ValueError("Exception during request: {}".format(exceptions))
if gather_request:
try:
request, response = results
return request, response
except:
raise ValueError(
"request and response object expected, got ({})".format(
results))
else:
try:
return results[0]
except:
raise ValueError(
"request object expected, got ({})".format(results))
| StarcoderdataPython |
1978674 | '''
Created on Dec 6, 2018
'''
# System imports
import os
# Standard imports
import numpy as np
import tensorflow as tf
import keras.backend as K
from scipy import stats
# Plotting libraries
import matplotlib.pyplot as plt
# Project library imports
from modules.deltavae.deltavae_latent_spaces.deltavae_parent import DiffusionVAE
from modules.deltavae.deltavae_latent_spaces.deltavae_sphere import volume_sphere
class DiffusionO3VAE(DiffusionVAE):
'''
classdocs
'''
def __init__(self, params, encoder_class, decoder_class):
'''
Constructor
'''
params.params_dict["manifold"] = "o3"
self.latent_dim = 9 # dimension of ambient space
self.scale_dim = 1 # dimension of time parameter
# The volume of O(3) is twice the volume of SO(3)
self.volume = np.sqrt(2) ** 3 * volume_sphere(3) # manifold volume
self.S = lambda x:self.params.d *(self.params.d-1) # scalar curvature
# Distributions and densities
self.decoding_distribution = stats.multivariate_normal
self.log_prior = np.log(1 / self.volume)
super(DiffusionO3VAE, self).__init__( params, encoder_class, decoder_class)
def kl_tensor(self, logt, y):
d = 3
scalar_curv = d * (d - 1) / 2
volume = self.volume
loss = -d * logt / 2.0 - d * np.log(2.0 * np.pi) / 2.0 - d / 2.0 + np.log(volume) \
+ scalar_curv * K.exp(logt) / 4
if self.params.controlled_capacity:
self.C = tf.Variable(1.0)
loss = tf.abs(loss-self.C)
return loss
def sampling(self, args):
"""Reparameterization trick by sampling from an isotropic unit Gaussian.
# Arguments:
args (tensor): mean and log of variance of Q(z|X)
# Returns:
z (tensor): sampled latent vector
"""
z_mean_projected, z_log_t = args
z_sample = z_mean_projected
for k in range(self.steps):
epsilon = K.random_normal(shape=K.shape(z_mean_projected))
# Define the step taken
step = K.exp(0.5 * z_log_t) * epsilon / np.sqrt(self.steps)
# Project back to the manifold
z_sample = self.projection(z_sample + step)
return z_sample
def projection(self, z):
"""
This function takes an input latent variable (tensor) in ambient space R^latent_dim and projects it into the
chosen manifold
:param z: Input latent variable in R^latent_dim
:return: Projected latent variable in manifold
"""
z_reshaped = tf.reshape(z, [-1, 3, 3])
s, u, v = tf.linalg.svd(z_reshaped, full_matrices=True)
z_proj = tf.reshape(tf.matmul(u, v, transpose_b=True), [-1, 9])
return z_proj
def encode_matrix(self, data, batch_size):
encoded = self.encode_location(data, batch_size)
encoded = encoded.reshape((-1, 3, 3))
return encoded
# # # # # # # # # # PLOTTING FUNCTIONS # # # # # # # # # #
def save_plot_latent_space(self, x_test, color, batch_size, filename):
z_mean = self.encode_matrix(x_test, batch_size=batch_size)
angles_positive = []
positive_y = []
angles_negative = []
negative_y = []
for num_z, z in enumerate(z_mean):
if np.linalg.det(z) >= 0:
angles_positive.append(self.rotationMatrixToEulerAngles(z))
positive_y.append(color[num_z])
else:
angles_negative.append(self.rotationMatrixToEulerAngles(-z))
negative_y.append(color[num_z])
angles_positive = np.array(angles_positive)
angles_negative = np.array(angles_negative)
positive_y = np.array(positive_y)
negative_y = np.array(negative_y)
fig = plt.figure(figsize=(24, 10))
ax = fig.add_subplot(1, 2, 1, projection='3d')
ax.set_title("Positive")
ax.scatter(angles_positive[:, 0], angles_positive[:, 1], angles_positive[:, 2], c=positive_y)
ax = fig.add_subplot(1, 2, 2, projection='3d')
ax.scatter(angles_negative[:, 0], angles_negative[:, 1], angles_negative[:, 2], c=negative_y)
ax.set_title("Negative")
if filename is not None:
root_dir = os.path.split(filename)[0]
os.makedirs(root_dir, exist_ok=True)
plt.savefig(filename, bbox_inches="tight")
return fig, ax
def save_plot_image_reconstruction(self, batch_size, filename, samples):
print("Not implemented")
return None
# # Checks if a matrix is a valid rotation matrix.
def isRotationMatrix(self, R):
Rt = np.transpose(R)
shouldBeIdentity = np.dot(Rt, R)
I = np.identity(3, dtype=R.dtype)
n = np.linalg.norm(I - shouldBeIdentity)
return n < 1e-5
# Calculates rotation matrix to euler angles
# The result is the same as MATLAB except the order
# of the euler angles ( x and z are swapped ).
def rotationMatrixToEulerAngles(self, R):
assert (self.isRotationMatrix(R)), "Not a rotation matrix"
sy = np.sqrt(R[0, 0] * R[0, 0] + R[1, 0] * R[1, 0])
singular = sy < 1e-6
if not singular:
x = np.arctan2(R[2, 1], R[2, 2])
y = np.arctan2(-R[2, 0], sy)
z = np.arctan2(R[1, 0], R[0, 0])
else:
x = np.arctan2(-R[1, 2], R[1, 1])
y = np.arctan2(-R[2, 0], sy)
z = 0
return np.array([x, y, z])
| StarcoderdataPython |
3279259 | import hmac
import hashlib
import binascii
import json
import base64
import re
import time
def base64url(utfbytes):
s = base64.b64encode(utfbytes).decode('utf-8')
s = re.sub(r'=+$', "", s)
s = re.sub(r'\+', '-', s)
s = re.sub(r'\/', '_', s)
return s
def stringify64(data):
return base64url(json.dumps(data, separators=(',', ':')).encode('utf-8'))
def create_sha256_signature(message, secret):
secret = secret.encode('utf-8') # convert to byte array
message = message.encode('utf-8')
return hmac.new(secret, message, hashlib.sha256).digest()
def createJWT(tokenId, tokenSecret):
return encodeJWT({
'path': '/realtime',
'token_id': str(tokenId),
'nonce': round(time.time() * 1000)
}, tokenSecret)
def encodeJWT(data, secret):
header = {
'typ': 'JWT',
'alg': 'HS256'
}
encodedHeader = stringify64(header)
encodedData = stringify64(data)
token = encodedHeader + '.' + encodedData
signature = create_sha256_signature(token, secret)
signedToken = token + '.' + base64url(signature)
return signedToken | StarcoderdataPython |
6417455 | <filename>2020_2021/DonNU CTF 2021/Coding/coding3/hamming_distance.py
import random
from lib.types import IStdin, IStdout
def hamming_distance(a, b):
counter = 0
for i in str(bin(a ^ b)):
if i == '1': counter += 1
return counter
def main(stdin: IStdin, stdout: IStdout):
stdout.write("To get the flag you will need to calculate the Hamming distance of two numbers 100 times.\n")
stdout.write("Hamming distance is number of bits at which two numbers differ.\n")
stdout.write("Example: for 3 (011) and 5 (101) Hamming distance equals 2\n")
for i in range(100):
x, y = random.randint(1, 2 ** 32), random.randint(1, 2 ** 32)
stdout.write(f"Round {i + 1}: {x} {y}\n")
stdout.write("Answer >> ")
stdout.flush()
try:
answer = int(stdin.readline().strip())
if answer != hamming_distance(x, y):
stdout.write("Wrooong\n")
return None
except Exception:
stdout.write("You must answer with a single number\n")
return None
stdout.write("Congratulations! Your flag is donnuCTF{x0r_15_th3_answer}\n")
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.