id stringlengths 1 265 | text stringlengths 6 5.19M | dataset_id stringclasses 7
values |
|---|---|---|
54229 | """
LC 480
Given an array of numbers and a number ‘k’, find the median of all the ‘k’ sized sub-arrays (or windows) of the array.
Example 1:
Input: nums=[1, 2, -1, 3, 5], k = 2
Output: [1.5, 0.5, 1.0, 4.0]
Explanation: Lets consider all windows of size ‘2’:
[1, 2, -1, 3, 5] -> median is 1.5
[1, 2, -1, 3, 5] -> median is 0.5
[1, 2, -1, 3, 5] -> median is 1.0
[1, 2, -1, 3, 5] -> median is 4.0
Example 2:
Input: nums=[1, 2, -1, 3, 5], k = 3
Output: [1.0, 2.0, 3.0]
Explanation: Lets consider all windows of size ‘3’:
[1, 2, -1, 3, 5] -> median is 1.0
[1, 2, -1, 3, 5] -> median is 2.0
[1, 2, -1, 3, 5] -> median is 3.0
"""
from heapq import *
import heapq
class SlidingWindowMedian:
def find_sliding_window_median(self, nums, k):
self.min_heap = []
self.max_heap = []
res = []
for i, n in enumerate(nums):
self.insert(n)
if i >= k - 1:
res.append(self.median())
self.delete(nums[i - k + 1])
return res
def delete(self, n):
if self.min_heap and self.min_heap[0] <= n:
heap = self.min_heap
else:
heap = self.max_heap
n = -n
i = heap.index(n)
if i == len(heap) - 1: # the last
heap.pop()
else:
heap[i] = heap.pop()
heapq._siftup(heap, i) # move children up
heapq._siftdown(heap, 0, i) # move parent down
self.balance()
def balance(self):
if len(self.min_heap) - len(self.max_heap) > 1:
heappush(self.max_heap, -heappop(self.min_heap))
if len(self.min_heap) < len(self.max_heap):
heappush(self.min_heap, -heappop(self.max_heap))
def insert(self, n):
if not self.min_heap or self.min_heap[0] <= n:
heappush(self.min_heap, n)
else:
heappush(self.max_heap, -n)
self.balance()
def median(self):
if len(self.min_heap) == len(self.max_heap):
return 0.5 * (self.min_heap[0] - self.max_heap[0])
else:
return self.min_heap[0]
def main():
slidingWindowMedian = SlidingWindowMedian()
result = slidingWindowMedian.find_sliding_window_median(
[1, 2, -1, 3, 5], 2)
print("Sliding window medians are: " + str(result))
slidingWindowMedian = SlidingWindowMedian()
result = slidingWindowMedian.find_sliding_window_median(
[1, 2, -1, 3, 5], 3)
print("Sliding window medians are: " + str(result))
main()
"""
Time O(NK): find the index takes O(K)
Space O(K)
"""
| StarcoderdataPython |
63062 | <reponame>princessmiku/Asuna-Scientist<filename>scientist/displayRecord.py
"""
A simple Class for display records
"""
# Own
from .collection import Collection
# python stuff
import math
class DRec:
def __init__(self, _record, maxShows: int = 25):
"""
Init a display lib
:param _record:
:param maxShows: set the amount of shows which you get with "get"
"""
self._record = _record
self.maxShows: int = maxShows
self.currentIndex: int = 0
self.lenOfData: int = len(self._record.data)
self.currentPage: int = 0
self.maxPages: int = math.ceil(self.lenOfData / maxShows)
# get values in the range of the maxShows
def get(self) -> list[Collection]:
"""
Get Content of the record in the current range
:return: the list with collection in the range of the show
"""
showUp: int = self.currentIndex + self.maxShows
if self.lenOfData < showUp:
showUp -= showUp - self.lenOfData
return self._record.data[self.currentIndex:showUp]
# next page, or next page with skip xxx pages
def nextPage(self, amount: int = 1) -> bool:
"""
Go to the next page, with or without skip pages
:param amount: amount of skips, normal 1, 1 = next page
:return: is successful, nice to have but it can be useless
"""
if self.currentPage + amount > self.maxPages: return False
self.currentPage += amount
self.currentIndex += self.maxShows * amount
if self.currentIndex > self.lenOfData: self.currentIndex = self.lenOfData
return True
# previous page, or previous page with skip xxx pages
def previousPage(self, amount: int = 1) -> bool:
"""
Go to the previous page, with or without skip pages
:param amount: amount of skips, normal 1, 1 = next page
:return: is successful, nice to have but it can be useless
"""
if self.currentPage + amount < 0: return False
self.currentPage -= amount
self.currentIndex -= self.maxShows * amount
if self.currentIndex < 0: self.currentIndex = 0
return True
# count the index up
def addIndex(self, amount: int = 1) -> bool:
"""
Add a amount on the index
:param amount:
:return: is successful, nice to have but it can be useless
"""
if self.lenOfData > self.currentIndex + amount: return False
self.currentIndex += amount
return True
# count the index down
def removeIndex(self, amount: int = 1) -> bool:
"""
Remove a amount on the index
:param amount:
:return: is successful, nice to have but it can be useless
"""
if self.currentIndex - amount < 0: return False
self.currentIndex -= amount
return True
| StarcoderdataPython |
91809 | #!/usr/bin/env python
import numpy as np
def ltr_parts(parts_dict):
# when we flip image left parts became right parts and vice versa. This is the list of parts to exchange each other.
leftParts = [ parts_dict[p] for p in ["Lsho", "Lelb", "Lwri", "Lhip", "Lkne", "Lank", "Leye", "Lear"] ]
rightParts = [ parts_dict[p] for p in ["Rsho", "Relb", "Rwri", "Rhip", "Rkne", "Rank", "Reye", "Rear"] ]
return leftParts,rightParts
class RmpeGlobalConfig:
width = 368
height = 368
stride = 8
parts = ["nose", "neck", "Rsho", "Relb", "Rwri", "Lsho", "Lelb", "Lwri", "Rhip", "Rkne", "Rank", "Lhip", "Lkne", "Lank", "Reye", "Leye", "Rear", "Lear"]
num_parts = len(parts)
parts_dict = dict(zip(parts, range(num_parts)))
parts += ["background"]
num_parts_with_background = len(parts)
leftParts, rightParts = ltr_parts(parts_dict)
# this numbers probably copied from matlab they are 1.. based not 0.. based
limb_from = [2, 9, 10, 2, 12, 13, 2, 3, 4, 3, 2, 6, 7, 6, 2, 1, 1, 15, 16]
limb_to = [9, 10, 11, 12, 13, 14, 3, 4, 5, 17, 6, 7, 8, 18, 1, 15, 16, 17, 18]
limbs_conn = zip(limb_from, limb_to)
limbs_conn = [(fr - 1, to - 1) for (fr, to) in limbs_conn]
paf_layers = 2*len(limbs_conn)
heat_layers = num_parts
num_layers = paf_layers + heat_layers + 1
paf_start = 0
heat_start = paf_layers
bkg_start = paf_layers + heat_layers
data_shape = (3, height, width) # 3, 368, 368
mask_shape = (height//stride, width//stride) # 46, 46
parts_shape = (num_layers, height//stride, width//stride) # 57, 46, 46
class TransformationParams:
target_dist = 0.6;
scale_prob = 0; # TODO: this is actually scale unprobability, i.e. 1 = off, 0 = always, not sure if it is a bug or not
scale_min = 0.5;
scale_max = 0.9;
max_rotate_degree = 40.
center_perterb_max = 40.
flip_prob = 0.5
sigma = 7.
paf_thre = 8. # it is original 1.0 * stride in this program
class RmpeCocoConfig:
parts = ['nose', 'Leye', 'Reye', 'Lear', 'Rear', 'Lsho', 'Rsho', 'Lelb',
'Relb', 'Lwri', 'Rwri', 'Lhip', 'Rhip', 'Lkne', 'Rkne', 'Lank',
'Rank']
num_parts = len(parts)
# for COCO neck is calculated like mean of 2 shoulders.
parts_dict = dict(zip(parts, range(num_parts)))
@staticmethod
def convert(joints):
result = np.zeros((joints.shape[0], RmpeGlobalConfig.num_parts, 3), dtype=np.float)
result[:,:,2]=2. # 2 - abstent, 1 visible, 0 - invisible
for p in RmpeCocoConfig.parts:
coco_id = RmpeCocoConfig.parts_dict[p]
global_id = RmpeGlobalConfig.parts_dict[p]
assert global_id!=1, "neck shouldn't be known yet"
result[:,global_id,:]=joints[:,coco_id,:]
neckG = RmpeGlobalConfig.parts_dict['neck']
RshoC = RmpeCocoConfig.parts_dict['Rsho']
LshoC = RmpeCocoConfig.parts_dict['Lsho']
# no neck in coco database, we calculate it as averahe of shoulders
# TODO: we use 0 - hidden, 1 visible, 2 absent - it is not coco values they processed by generate_hdf5
both_shoulders_known = (joints[:, LshoC, 2]<2) & (joints[:, RshoC, 2]<2)
result[both_shoulders_known, neckG, 0:2] = (joints[both_shoulders_known, RshoC, 0:2] +
joints[both_shoulders_known, LshoC, 0:2]) / 2
result[both_shoulders_known, neckG, 2] = np.minimum(joints[both_shoulders_known, RshoC, 2],
joints[both_shoulders_known, LshoC, 2])
return result
class RpmeMPIIConfig:
parts = ["HeadTop", "Neck", "RShoulder", "RElbow", "RWrist", "LShoulder", "LElbow", "LWrist", "RHip", "RKnee",
"RAnkle", "LHip", "LKnee", "LAnkle"]
numparts = len(parts)
#14 - Chest is calculated like "human center location provided by the annotated data"
@staticmethod
def convert(joints):
raise "Not implemented"
# more information on keypoints mapping is here
# https://github.com/ZheC/Realtime_Multi-Person_Pose_Estimation/issues/7
def check_layer_dictionary():
dct = RmpeGlobalConfig.parts[:]
dct = [None]*(RmpeGlobalConfig.num_layers-len(dct)) + dct
for (i,(fr,to)) in enumerate(RmpeGlobalConfig.limbs_conn):
name = "%s->%s" % (RmpeGlobalConfig.parts[fr], RmpeGlobalConfig.parts[to])
print(i, name)
x = i*2
y = i*2+1
assert dct[x] is None
dct[x] = name + ":x"
assert dct[y] is None
dct[y] = name + ":y"
print(dct)
if __name__ == "__main__":
check_layer_dictionary()
| StarcoderdataPython |
1616651 | import os
import json
import glob
from flask import Flask, jsonify
from flask_cors import cross_origin
from skedulord.common import HEARTBEAT_PATH, SKEDULORD_PATH
def create_app():
app = Flask(__name__, static_folder="templates", static_url_path="")
@app.route("/")
def static_file():
return app.send_static_file("index.html")
@app.route("/api/heartbeats")
@cross_origin()
def grab():
with open(HEARTBEAT_PATH, "r") as f:
jobs = sorted(
[json.loads(_) for _ in f.readlines()],
key=lambda d: d["start"],
reverse=True,
)
names = set([_["name"] for _ in jobs])
return jsonify(
[
{"name": n, "id": i, "jobs": [j for j in jobs if j["name"] == n]}
for i, n in enumerate(names)
]
)
@app.route("/api/test_heartbeats")
def grab_test():
# the @cross_origin is messing up the tests =(
with open(HEARTBEAT_PATH, "r") as f:
jobs = sorted(
[json.loads(_) for _ in f.readlines()],
key=lambda d: d["start"],
reverse=True,
)
names = set([_["name"] for _ in jobs])
return jsonify(
[
{"name": n, "id": i, "jobs": [j for j in jobs if j["name"] == n]}
for i, n in enumerate(names)
]
)
@app.route("/api/jobs/<job>/<datetime>")
@cross_origin()
def fetch_logs(job, datetime):
path = os.path.join(SKEDULORD_PATH, "jobs", job, datetime)
with open(path) as f:
return f"<pre>{f.read()}</pre>"
@app.route("/api/glob_logs")
@cross_origin()
def glob_logs():
return jsonify(
[
_.replace(SKEDULORD_PATH, "")
for _ in glob.glob(f"{SKEDULORD_PATH}/jobs/*/*.txt")
]
)
return app
if __name__ == "__main__":
web = create_app()
web.run(debug=True, threaded=True, host="0.0.0.0")
| StarcoderdataPython |
3231650 | # @@@ START COPYRIGHT @@@
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# @@@ END COPYRIGHT @@@
#
# This script finds ranges of ports suitable for input
# to the Trafodion install_local_hadoop script.
#
import os
import sys
import subprocess
import re
import sets
import argparse # requires Python 2.7
# beginning of main
# pseudocode
#
# do a "netstat | grep localhost" to obtain raw port-in-use info
#
# inUseRanges = empty set
# for each line
# for each occurrance of the string "localhost:nnnnn"
# rangeInUse = integer(nnnnn/200) (assume a range of 200 ports is needed)
# add rangeInUse to inUseRanges
# end for
# end for
#
# for each possible range starting from the smallest in rangeInUse and ending at 48800
# if it isn't in the inUseRanges set
# print it out as a possible port range to use
# end if
# end for
# process command line arguments
parser = argparse.ArgumentParser(
description='This script finds possible port ranges to use with install_local_hadoop.')
args = parser.parse_args() # exits and prints help if args are incorrect
exitCode = 0
retcode = subprocess.call("sudo -ll lsof", shell=True)
inUseRanges = sets.Set()
if retcode == 0:
# sudo lsof can be run successfully
# get the ips in use to process ( sudo lsof -i | awk -f lsof.awk )
print "Use sudo lsof to get port numbers";
p1 = subprocess.Popen(["sudo", "lsof", "-i"], stdout=subprocess.PIPE)
p2 = subprocess.Popen(["awk", "-f", "lsof.awk"], stdin=p1.stdout, stdout=subprocess.PIPE, close_fds=True)
for ip in p2.stdout:
rangeInUse = int(ip)/200
inUseRanges.add(rangeInUse)
else :
# no sudo permission on lsof
# get the set of lines to process ( netstat | grep localhost )
print "Use netstat to get port numbers";
p1 = subprocess.Popen(["netstat"], stdout=subprocess.PIPE)
p2 = subprocess.Popen(["grep","localhost"], stdin=p1.stdout, stdout=subprocess.PIPE, close_fds=True)
# process the lines, looking for port numbers
pattern = r'localhost:(?P<portNumber>[0-9]{5})'
matcher = re.compile(pattern)
for line in p2.stdout:
result = matcher.findall(line)
for occurrance in result:
rangeInUse = int(occurrance)/200
inUseRanges.add(rangeInUse)
# avoid recommending low ranges; our lowest recommendation
# will be the first unused range above the lowest in-use range
# avoid ranges in the ephemeral port range 49191-65535 also
print
foundOne = False
minInUse = min(inUseRanges)
for r in range(min(inUseRanges)+1,48800/200):
if r not in inUseRanges:
if not foundOne:
print "Port ranges not in use:"
foundOne = True
print 200*r
if not foundOne:
print "All port ranges from " + str(200*minInUse) + " through 48800 are in use."
exitCode = 1
print
exit(exitCode)
| StarcoderdataPython |
3398111 | from flask_restful import Resource, reqparse, request
from flask_restful import fields, marshal_with, marshal
from .model import UserBook
from app import db
from app import api
from utilities import responseSchema
response = responseSchema.ResponseSchema()
manga_parser = reqparse.RequestParser()
manga_parser.add_argument(
'name', help='Field name cannot be blank', required=True)
manga_parser.add_argument('chapters_amount', required=False)
manga_parser.add_argument('left_at', required=False)
manga_parser.add_argument('finished', required=False)
manga_parser.add_argument('author', required=False)
book_list_fields = {
'id': fields.Integer,
'pages_amount': fields.Integer,
'left_at': fields.Integer,
'finished': fields.Boolean,
'author': fields.String
}
class UserBookResource(Resource):
def post(self):
try:
book = request.get_json()
db.session.add(UserBook(**book))
db.session.commit()
return marshal(book, book_list_fields)
except Exception as error:
response.errorResponse(str(error))
return response.__dict__
def get(self):
try:
book = UserBook.query.all()
response.successMessage(book)
return marshal(book, book_list_fields)
except Exception as error:
response.errorResponse(str(error))
return response.__dict__
class UserBookByIdResource(Resource):
def get(self, id=None):
try:
book = UserBook.query.filter_by(id=id).first()
return marshal(book, book_list_fields)
except Exception as error:
response.errorResponse(str(error))
return response.__dict__
def delete(self, id):
try:
book = UserBook.query.get(id)
db.session.delete(book)
db.session.commit()
return marshal(book, book_list_fields)
except Exception as error:
response.errorResponse(str(error))
return response.__dict__
| StarcoderdataPython |
3253978 | <filename>api/database/models.py
from tortoise import fields
from tortoise.models import Model
class Region(Model):
id = fields.IntField(pk=True)
name = fields.CharField(60, unique=True)
dates: fields.ManyToManyRelation["Date"] = \
fields.ManyToManyField(
"models.Date", related_name="regions",
)
def __str__(self):
return self.name
class Date(Model):
id = fields.IntField(pk=True)
day = fields.DateField()
fajr = fields.DatetimeField(auto_now=True)
iftar = fields.DatetimeField(auto_now=True)
def __str__(self):
return str(self.day)
| StarcoderdataPython |
3241032 | import sys
class tee(object):
'''
A file-like object that duplicates output to two other file-like
objects.
Thanks to <NAME> for the implementation:
http://shallowsky.com/blog/programming/python-tee.html
'''
def __init__(self, _fd1, _fd2):
self.fd1 = _fd1
self.fd2 = _fd2
def __del__(self):
if self.fd1 != sys.stdout and self.fd1 != sys.stderr:
self.fd1.close()
if self.fd2 != sys.stdout and self.fd2 != sys.stderr:
self.fd2.close()
def write(self, text):
self.fd1.write(text)
self.fd2.write(text)
def flush(self):
self.fd1.flush()
self.fd2.flush()
def log_stdout_to(logfilename):
stdoutsav = sys.stdout
outputlog = open(logfilename, "w")
sys.stdout = tee(stdoutsav, outputlog)
def log_stderr_to(logfilename):
stderrsav = sys.stderr
outputlog = open(logfilename, "w")
sys.stderr = tee(stderrsav, outputlog)
| StarcoderdataPython |
4801786 | from jd.api.base import RestApi
class StockForListBatgetRequest(RestApi):
def __init__(self,domain='gw.api.360buy.com',port=80):
RestApi.__init__(self,domain, port)
self.sku = None
self.area = None
def getapiname(self):
return 'biz.stock.forList.batget'
| StarcoderdataPython |
3252730 | import json
import django.test
from .user import UserMixin
class Client(django.test.Client):
def patch_json(self, path, data=None, **kwargs):
return self.patch(path, **self._json_kwargs(data, kwargs))
def post_json(self, path, data=None, **kwargs):
return self.post(path, **self._json_kwargs(data, kwargs))
def put_json(self, path, data=None, **kwargs):
return self.put(path, **self._json_kwargs(data, kwargs))
def _json_kwargs(self, data, kwargs):
if data is not None:
data = json.dumps(data)
kwargs['data'] = data
kwargs['content_type'] = 'application/json'
return kwargs
class FunctionalTestCase(django.test.TestCase, UserMixin):
"""Base class for view tests.
It adds the following to Django's `TestCase`:
- Convenient user creation & login
- Convenient POSTs, PUTs, and PATCHes with a JSON body
"""
client_class = Client
| StarcoderdataPython |
32693 | <reponame>savarin/algorithms
lookup = [
(10, "x"),
(9, "ix"),
(5, "v"),
(4, "iv"),
(1, "i"),
]
def to_roman(integer):
#
"""
"""
for decimal, roman in lookup:
if decimal <= integer:
return roman + to_roman(integer - decimal)
return ""
def main():
print(to_roman(1))
print(to_roman(2))
print(to_roman(4))
print(to_roman(5))
print(to_roman(6))
print(to_roman(9))
print(to_roman(10))
print(to_roman(11))
print(to_roman(36))
if __name__ == "__main__":
main()
| StarcoderdataPython |
112275 | #!/usr/bin/env python
# -*- coding:utf-8 -*-
import time
import unittest
import uiautomator2 as u2
import uiautomator2.ext.ocr as ocr
import random
from utx import *
class TestCase(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.d = u2.connect()
cls.d.set_orientation('natural')
cls.d.healthcheck()
cls.d.implicitly_wait(10)
cls.d.app_clear("com.xiaoxiao.ludan")
cls.d.app_stop_all()
ocr.API = "http://ocr.open.netease.com/api/ocr"
u2.plugin_register("ocr", ocr.OCR)
def setUp(self):
self.d.set_fastinput_ime(True)
self.sess = self.d.session("com.xiaoxiao.ludan")
def tearDown(self):
self.d.app_clear("com.xiaoxiao.ludan")
self.d.app_stop_all()
# def test_000(self):
# self.d.ext_ocr.all()
# self.d.ext_ocr("登录").click()
# print('OCR')
# output
# ('状态', 138, 1888),
# ('运动', 408, 1888),
# ('发现', 678, 1888),
# ('我的', 948, 1888)]
# d.ext_ocr("我的").click() # 点击带有"我的" 的按钮
# @tag(Tag.temp)
# def test_idcard_generator(self):
# """ 随机生成新的18为身份证号码 """
# ARR = (7, 9, 10, 5, 8, 4, 2, 1, 6, 3, 7, 9, 10, 5, 8, 4, 2)
# LAST = ('1', '0', 'X', '9', '8', '7', '6', '5', '4', '3', '2')
# t = time.localtime()[0]
# x = '%02d%02d%02d%04d%02d%02d%03d' % (
# random.randint(10, 99), random.randint(1, 99), random.randint(1, 99), random.randint(t - 80, t - 18),
# random.randint(1, 12), random.randint(1, 28), random.randint(1, 999))
# y = 0
# for i in range(17):
# y += int(x[i]) * ARR[i]
# IDCard = '%s%s' % (x, LAST[y % 11])
# # birthday = '%s-%s-%s 00:00:00' % (IDCard[6:14][0:4], IDCard[6:14][4: 6], IDCard[6:14][6:8])
# print(IDCard)
# log.info(IDCard)
# return IDCard
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
3218647 | # Instructions
# Use the Airflow context in the pythonoperator to complete the TODOs below. Once you are done, run your DAG and check the logs to see the context in use.
import datetime
import logging
from airflow import DAG
from airflow.models import Variable
from airflow.operators.python_operator import PythonOperator
from airflow.hooks.S3_hook import S3Hook
def log_details(*args, **kwargs):
#
# TODO: Extract ds, run_id, prev_ds, and next_ds from the kwargs, and log them
# NOTE: Look here for context variables passed in on kwargs:
# https://airflow.apache.org/macros.html
#
ds = kwargs['ds'] # kwargs[]
run_id = kwargs['run_id'] # kwargs[]
previous_ds = kwargs['prev_ds'] # kwargs.get('')
next_ds = kwargs['next_ds'] # kwargs.get('')
logging.info(f"Execution date is {ds}")
logging.info(f"My run id is {run_id}")
if previous_ds:
logging.info(f"My previous run was on {previous_ds}")
if next_ds:
logging.info(f"My next run will be {next_ds}")
dag = DAG(
'lesson1.exercise5',
schedule_interval="@daily",
start_date=datetime.datetime.now() - datetime.timedelta(days=2)
)
list_task = PythonOperator(
task_id="log_details",
python_callable=log_details,
provide_context=True,
dag=dag
)
| StarcoderdataPython |
1652712 | # Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
from fetcher.dependency import Dependency, target_name_from_path
class MojomFile(object):
"""Mojom represents an interface file at a given location in the
repository."""
def __init__(self, repository, name):
self.name = name
self._repository = repository
self.deps = []
def add_dependency(self, dependency):
"""Declare a new dependency of this mojom."""
self.deps.append(Dependency(self._repository, self.name, dependency))
def get_jinja_parameters(self, include_dirs):
"""Get the Jinja parameters to construct the BUILD.gn target of this
mojom."""
params = {}
params["filename"] = os.path.basename(self.name)
params["target_name"] = target_name_from_path(self.name)
params["deps"] = []
params["mojo_sdk_deps"] = []
params["import_dirs"] = set()
for dep in self.deps:
# Mojo SDK dependencies have special treatment.
if dep.is_sdk_dep():
target, _ = dep.get_target_and_import(include_dirs)
params["mojo_sdk_deps"].append(target)
else:
target, import_dir = dep.get_target_and_import(include_dirs)
if import_dir != None:
params["import_dirs"].add(import_dir)
params["deps"].append(target)
if len(params["import_dirs"]) != 0:
params["import_dirs"] = list(params["import_dirs"])
else:
del params["import_dirs"]
return params
def _os_path_exists(self, path):
return os.path.exists(path)
| StarcoderdataPython |
1742969 | <reponame>asellappen/python-libarchive-c
"""
This is a modified version of <NAME>'s pure-Python implementation of
PEP 383: the "surrogateescape" error handler of Python 3.
This code is released under the Python license and the BSD 2-clause license
Source: misc/python/surrogateescape.py in https://bitbucket.org/haypo/misc
"""
from __future__ import division, print_function, unicode_literals
import codecs
chr = __builtins__.get('unichr', chr)
def surrogateescape(exc):
if isinstance(exc, UnicodeDecodeError):
decoded = []
for code in exc.object[exc.start:exc.end]:
if not isinstance(code, int):
code = ord(code)
if 0x80 <= code <= 0xFF:
decoded.append(chr(0xDC00 + code))
elif code <= 0x7F:
decoded.append(chr(code))
else:
raise exc
return (''.join(decoded), exc.end)
elif isinstance(exc, UnicodeEncodeError):
encoded = []
for ch in exc.object[exc.start:exc.end]:
code = ord(ch)
if not 0xDC80 <= code <= 0xDCFF:
raise exc
encoded.append(chr(code - 0xDC00))
return (''.join(encoded), exc.end)
else:
raise exc
def register():
"""Register the surrogateescape error handler if it doesn't exist
"""
try:
codecs.lookup_error('surrogateescape')
except LookupError:
codecs.register_error('surrogateescape', surrogateescape)
| StarcoderdataPython |
161895 | <gh_stars>1-10
from sqlalchemy import create_engine
engine = create_engine('sqlite:///todo.db?check_same_thread=False')
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import Column, Integer, String, Date
from datetime import datetime
Base = declarative_base()
class task(Base):
__tablename__ = 'task'
id = Column(Integer, primary_key=True)
task = Column(String, default='')
deadline = Column(Date, default=datetime.today())
def __repr__(self):
return self.task
Base.metadata.create_all(engine)
from sqlalchemy.orm import sessionmaker
Session = sessionmaker(bind=engine)
session = Session()
from datetime import datetime,timedelta
today=datetime.today().date()
rows=session.query(task).filter(task.deadline==today).all()
while (True):
print("1) Today's tasks")
print("2) Week's tasks")
print("3) All tasks")
print("4) Missed tasks")
print("5) Add task")
print("6) Delete task")
print("0) Exit")
n = int(input())
if n == 0:
print("Bye!")
break;
if n == 1:
count = 1
tasks = session.query(task).filter(task.deadline == datetime.today().date()).all()
print("Today {0} {1}:".format(datetime.today().day, datetime.today().strftime('%b')))
for task_today in tasks:
print("{0}. {1}".format(count, task_today))
count += 1
if count == 1:
print("Nothing to do!")
if n == 2:
for i in range(7):
count = 1
tasks = session.query(task).filter(task.deadline == datetime.today().date() + timedelta(days=i)).all()
print("{2} {0} {1}:".format((datetime.today() + timedelta(days=i)).day,
(datetime.today() + timedelta(days=i)).strftime('%b'),
(datetime.today() + timedelta(days=i)).strftime('%A')))
for task_week in tasks:
print("{0}. {1}".format(count, task_week))
count += 1
if count == 1:
print("Nothing to do!")
print()
if n == 3:
count = 1
tasks = session.query(task).all()
print("All tasks:")
for task_x in tasks:
print("{0}. {1}".format(count, task_x))
count += 1
if count == 1:
print("Nothing to do!")
if n == 4:
missed_tasks = session.query(task).filter(task.deadline < datetime.today().date()).all()
print("Missed activities:")
count = 1
for missed_task in missed_tasks:
print("{0}. {1}".format(count, missed_task))
count += 1
if count == 1:
print("Nothing is missed!")
print()
if n == 5:
print("Enter activity")
activity = input()
print("Enter deadline")
activity_deadline_str = input()
activity_deadline = datetime.strptime(activity_deadline_str, '%Y-%m-%d').date()
new_task = task(task=activity, deadline=activity_deadline)
session.add(new_task)
session.commit()
print("The task has been added!")
if n == 6:
print("Chose the number of the task you want to delete:")
tasks = session.query(task).all()
count = 1
for task_x in tasks:
print("{0}. {1}".format(count, task_x))
count += 1
n = int(input())
session.delete(tasks[n - 1])
session.commit()
print("The task has been deleted!")
if count == 1:
print("Nothing to delete!")
| StarcoderdataPython |
42345 | <gh_stars>1-10
from contextlib import contextmanager
from warnings import warn
from .conflict import ordering, ambiguities, super_signature, AmbiguityWarning
import inspect
import sys
class Dispatcher(object):
""" Dispatch methods based on type signature
Use ``multipledispatch.dispatch`` to add implementations
Examples
--------
>>> @dispatch(int)
... def f(x):
... return x + 1
>>> @dispatch(float)
... def f(x):
... return x - 1
>>> f(3)
4
>>> f(3.0)
2.0
"""
__slots__ = 'name', 'funcs', 'ordering', '_cache'
def __init__(self, name):
self.name = name
self.funcs = dict()
self._cache = dict()
def add(self, signature, func):
""" Add new types/method pair to dispatcher
>>> D = Dispatcher('add')
>>> D.add((int, int), lambda x, y: x + y)
>>> D.add((float, float), lambda x, y: x + y)
>>> D(1, 2)
3
>>> D(1, 2.0)
Traceback (most recent call last):
...
NotImplementedError
"""
self.funcs[signature] = func
self.ordering = ordering(self.funcs)
amb = ambiguities(self.funcs)
if amb:
warn(warning_text(self.name, amb), AmbiguityWarning)
self._cache.clear()
def __call__(self, *args, **kwargs):
types = tuple([type(arg) for arg in args])
func = self.resolve(types)
return func(*args, **kwargs)
def __str__(self):
return "<dispatched %s>" % self.name
__repr__ = __str__
@property
def supported_types(self):
""" A topologically sorted list of type signatures """
return self.ordering
def resolve(self, types):
""" Deterimine appropriate implementation for this type signature
This method is internal. Users should call this object as a function.
Implementation resolution occurs within the ``__call__`` method.
>>> @dispatch(int)
... def inc(x):
... return x + 1
>>> implementation = inc.resolve((int,))
>>> implementation(3)
4
>>> inc.resolve((float,))
Traceback (most recent call last):
...
NotImplementedError
See Also:
``multipledispatch.conflict`` - module to determine resolution order
"""
if types in self._cache:
return self._cache[types]
elif types in self.funcs:
self._cache[types] = self.funcs[types]
return self.funcs[types]
n = len(types)
for signature in self.ordering:
if all(len(signature) == n and issubclass(typ, sig)
for typ, sig in zip(types, signature)):
result = self.funcs[signature]
self._cache[types] = result
return result
raise NotImplementedError()
class MethodDispatcher(Dispatcher):
""" Dispatch methods based on type signature
See Also:
Dispatcher
"""
def __get__(self, instance, owner):
self.obj = instance
self.cls = owner
return self
def __call__(self, *args, **kwargs):
types = tuple([type(arg) for arg in args])
func = self.resolve(types)
return func(self.obj, *args, **kwargs)
dispatchers = dict()
def dispatch(*types):
"""
Dispatch decorator with two modes of use:
@dispatch(int):
def f(x):
return 'int!'
@dispatch
def f(x: float):
return 'float!'
"""
# if one argument as passed that is not callable and isn't a type, dispatch
# on annotations
frame = inspect.currentframe().f_back
if (len(types) == 1
and callable(types[0])
and not isinstance(types[0], type)):
fn = types[0]
return dispatch_on_annotations(fn, frame=frame)
# otherwise dispatch on types
else:
return dispatch_on_types(*types, frame=frame)
def dispatch_on_types(*types, **kwargs):
""" Dispatch function on the types of the inputs
Supports dispatch on all non-keyword arguments.
Collects implementations based on the function name. Ignores namespaces.
If ambiguous type signatures occur a warning is raised when the function is
defined suggesting the additional method to break the ambiguity.
Examples
--------
>>> @dispatch(int)
... def f(x):
... return x + 1
>>> @dispatch(float)
... def f(x):
... return x - 1
>>> f(3)
4
>>> f(3.0)
2.0
"""
types = tuple(types)
frame = kwargs.get('frame', None)
def _(func):
name = func.__name__
if ismethod(func):
dispatcher = frame.f_locals.get(name,
MethodDispatcher(name))
else:
if name not in dispatchers:
dispatchers[name] = Dispatcher(name)
dispatcher = dispatchers[name]
for typs in expand_tuples(types):
dispatcher.add(typs, func)
return dispatcher
return _
def dispatch_on_annotations(fn, **kwargs):
"""
Extract types from fn's annotation (only works in Python 3+)
"""
if sys.version_info.major >= 3:
argspec = inspect.getfullargspec(fn)
args = argspec.args[1:] if ismethod(fn) else argspec.args
types = [argspec.annotations[a] for a in args]
return dispatch_on_types(*types, **kwargs)(fn)
else:
raise SyntaxError('Annotations require Python 3+.')
def ismethod(func):
""" Is func a method?
Note that this has to work as the method is defined but before the class is
defined. At this stage methods look like functions.
"""
try:
spec = inspect.getargspec(func)
except:
spec = inspect.getfullargspec(func)
return spec and spec.args and spec.args[0] == 'self'
def expand_tuples(L):
"""
>>> expand_tuples([1, (2, 3)])
[(1, 2), (1, 3)]
>>> expand_tuples([1, 2])
[(1, 2)]
"""
if not L:
return [()]
elif not isinstance(L[0], tuple):
rest = expand_tuples(L[1:])
return [(L[0],) + t for t in rest]
else:
rest = expand_tuples(L[1:])
return [(item,) + t for t in rest for item in L[0]]
def str_signature(sig):
""" String representation of type signature
>>> str_signature((int, float))
'int, float'
"""
return ', '.join(cls.__name__ for cls in sig)
def warning_text(name, amb):
""" The text for ambiguity warnings """
text = "\nAmbiguities exist in dispatched function %s\n\n"%(name)
text += "The following signatures may result in ambiguous behavior:\n"
for pair in amb:
text += "\t" + ', '.join('['+str_signature(s)+']' for s in pair) + "\n"
text += "\n\nConsider making the following additions:\n\n"
text += '\n\n'.join(['@dispatch(' + str_signature(super_signature(s))
+ ')\ndef %s(...)'%name for s in amb])
return text
| StarcoderdataPython |
3217739 | <reponame>ver228/cell_localization<gh_stars>1-10
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Aug 17 16:46:42 2018
@author: avelinojaver
"""
#import multiprocessing as mp
#mp.set_start_method('spawn', force=True)
from ..flow import collate_simple
from ..utils import save_checkpoint
from ..evaluation import get_masks_metrics, get_IoU_best_match
import warnings
with warnings.catch_warnings():
warnings.simplefilter("ignore")
from torch.utils.tensorboard import SummaryWriter
from collections import defaultdict
import torch
from torch.utils.data import DataLoader
from torch.nn.utils import clip_grad_norm_
import time
import tqdm
import numpy as np
__all__ = ['train_segmentation']
def mask_metrics2scores(metrics, logger, prefix, epoch):
scores = {}
for iclass, (TP, FP, FN, agg_inter, agg_union) in metrics.items():
P = TP/(TP+FP)
R = TP/(TP+FN)
F1 = 2*P*R/(P+R)
AJI = agg_inter/agg_union
scores[iclass] = (P, R, F1, AJI)
logger.add_scalar(f'{prefix}_P_{iclass}', P, epoch)
logger.add_scalar(f'{prefix}_R_{iclass}', R, epoch)
logger.add_scalar(f'{prefix}_F1_{iclass}', F1, epoch)
logger.add_scalar(f'{prefix}_AJI_{iclass}', AJI, epoch)
return scores
def train_one_epoch(basename, model, optimizer, lr_scheduler, data_loader, device, epoch, logger):
# Modified from https://github.com/pytorch/vision/blob/master/references/detection/engine.py
model.train()
header = f'{basename} Train Epoch: [{epoch}]'
train_avg_losses = defaultdict(int)
pbar = tqdm.tqdm(data_loader, desc = header)
for images, targets in pbar:
images = torch.from_numpy(np.stack(images)).to(device)
targets = [{k: torch.from_numpy(v).to(device) for k, v in target.items()} for target in targets]
losses = model(images, targets)
loss = sum([x for x in losses.values()])
optimizer.zero_grad()
loss.backward()
clip_grad_norm_(model.parameters(), 0.5) # I was having problems here before. I am not completely sure this makes a difference now
optimizer.step()
if lr_scheduler is not None:
lr_scheduler.step()
for k,l in losses.items():
train_avg_losses[k] += l.item()
train_avg_losses = {k: loss / len(data_loader) for k, loss in train_avg_losses.items()} #average loss
train_avg_loss = sum([x for x in train_avg_losses.values()]) # total loss
#save data into the logger
for k, loss in train_avg_losses.items():
logger.add_scalar('train_' + k, loss, epoch)
logger.add_scalar('train_epoch_loss', train_avg_loss, epoch)
return train_avg_loss
@torch.no_grad()
def evaluate_one_epoch(basename, model, data_loader, device, epoch, logger, eval_dist = 5):
model.eval()
header = f'{basename} Test Epoch: [{epoch}]'
metrics = {'all' : np.zeros(5)}
model_time_avg = 0
test_avg_losses = defaultdict(int)
N = len(data_loader.data_indexes)
for ind in tqdm.trange(N, desc = header):
image, target = data_loader.read_full(ind) #I am evaluating one image at the time because some images can have seperated size
image = image.to(device)
target = {k: v.to(device) for k, v in target.items()}
if torch.cuda.is_available():
torch.cuda.synchronize()
model_time = time.time()
losses, pred_segmentation = model(image[None], [target])
model_time_avg += time.time() - model_time
for k,l in losses.items():
test_avg_losses[k] += l.item()
true_cells_mask = (target['segmentation_mask']==1).cpu().numpy().astype(np.uint8)
pred_cells_mask = (pred_segmentation[0] == 1).cpu().numpy().astype(np.uint8)
pred_coords, target_coords, IoU, agg_inter, agg_union = get_masks_metrics(true_cells_mask, pred_cells_mask)
TP, FP, FN, pred_ind, true_ind = get_IoU_best_match(IoU)
metrics['all'] += TP, FP, FN, agg_inter, agg_union
model_time_avg /= N
test_avg_losses = {k:loss/N for k, loss in test_avg_losses.items()} #get the average...
test_avg_loss = sum([x for x in test_avg_losses.values()]) #... and the total loss
#save data into the logger
for k, loss in test_avg_losses.items():
logger.add_scalar('val_' + k, loss, epoch)
logger.add_scalar('val_avg_loss', test_avg_loss, epoch)
logger.add_scalar('model_time', model_time_avg, epoch)
scores = mask_metrics2scores(metrics, logger, 'val', epoch)
AJI = scores['all'][-1]
return AJI
def train_segmentation(save_prefix,
model,
device,
train_flow,
val_flow,
optimizer,
log_dir,
lr_scheduler = None,
batch_size = 16,
n_epochs = 2000,
num_workers = 1,
init_model_path = None,
save_frequency = 200,
hard_mining_freq = None,
val_dist = 5
):
train_loader = DataLoader(train_flow,
batch_size=batch_size,
shuffle=True,
num_workers=num_workers,
collate_fn = collate_simple,
)
model = model.to(device)
log_dir = log_dir / save_prefix
logger = SummaryWriter(log_dir = str(log_dir))
best_score = 0#1e10
pbar_epoch = tqdm.trange(n_epochs)
for epoch in pbar_epoch:
train_one_epoch(save_prefix,
model,
optimizer,
lr_scheduler,
train_loader,
device,
epoch,
logger
)
if lr_scheduler is not None:
lr_scheduler.step()
val_AJI = evaluate_one_epoch(save_prefix,
model,
val_flow,
device,
epoch,
logger,
val_dist
)
desc = f'epoch {epoch} , AJI={val_AJI}'
pbar_epoch.set_description(desc = desc, refresh=False)
state = {
'epoch': epoch,
'state_dict': model.state_dict(),
'optimizer' : optimizer.state_dict(),
'model_input_parameters' : model.input_parameters,
'train_flow_input_parameters': train_loader.dataset.input_parameters,
'val_flow_input_parameters': train_loader.dataset.input_parameters
}
is_best = val_AJI > best_score
if is_best:
best_score = val_AJI
save_checkpoint(state, is_best, save_dir = str(log_dir))
if (epoch+1) % save_frequency == 0:
checkpoint_path = log_dir / f'checkpoint-{epoch}.pth.tar'
torch.save(state, checkpoint_path)
| StarcoderdataPython |
199613 | <reponame>Eve-ning/reamber_base_py<gh_stars>1-10
from __future__ import annotations
from typing import List, Dict, Any
import pandas as pd
from reamber.base.Property import list_props
from reamber.base.lists.notes.HitList import HitList
from reamber.quaver.QuaHit import QuaHit
from reamber.quaver.lists.notes.QuaNoteList import QuaNoteList
@list_props(QuaHit)
class QuaHitList(HitList[QuaHit], QuaNoteList[QuaHit]):
@staticmethod
def from_yaml(dicts: List[Dict[str, Any]]) -> QuaHitList:
df = pd.DataFrame(dicts)
df = df.rename(dict(StartTime='offset', Lane='column', KeySounds='keysounds'),
axis=1)
df.column -= 1
df = df.reindex(df.columns.union(['offset', 'column', 'keysounds'], sort=False), axis=1)
df.offset = df.offset.fillna(0)
df.column = df.column.fillna(0)
return QuaHitList(df)
def to_yaml(self):
df = self.df.copy()
df.column += 1
return df.astype(dict(offset=int, column=int))\
.rename(dict(offset='StartTime', column='Lane', keysounds='KeySounds'), axis=1).to_dict('records')
| StarcoderdataPython |
71618 | # ============================================================================
# Copyright 2021 The AIMM team at Shenzhen Bay Laboratory & Peking University
#
# People: <NAME>, <NAME>, <NAME>, <NAME>, <NAME>,
# <NAME>, <NAME>, <NAME>, <NAME>, <NAME>
#
# This code is a part of Cybertron-Code package.
#
# The Cybertron-Code is open-source software based on the AI-framework:
# MindSpore (https://www.mindspore.cn/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""cybertron cutoff"""
import numpy as np
import mindspore as ms
from mindspore import nn
from mindspore import Tensor
from mindspore.ops import operations as P
from mindspore.ops import functional as F
from .units import units
__all__ = [
"CosineCutoff",
"MollifierCutoff",
"HardCutoff",
"SmoothCutoff",
"GaussianCutoff",
"get_cutoff",
]
_CUTOFF_ALIAS = dict()
def _cutoff_register(*aliases):
"""Return the alias register."""
def alias_reg(cls):
name = cls.__name__
name = name.lower()
if name not in _CUTOFF_ALIAS:
_CUTOFF_ALIAS[name] = cls
for alias in aliases:
if alias not in _CUTOFF_ALIAS:
_CUTOFF_ALIAS[alias] = cls
return cls
return alias_reg
class Cutoff(nn.Cell):
"""Cutoff"""
def __init__(self,
r_max=units.length(1, 'nm'),
r_min=0,
hyperparam='default',
return_mask=False,
reverse=False
):
super().__init__()
self.name = 'cutoff'
self.hyperparam = hyperparam
self.r_min = r_min
self.cutoff = r_max
self.return_mask = return_mask
self.reverse = reverse
@_cutoff_register('cosine')
class CosineCutoff(Cutoff):
r"""Class of Behler cosine cutoff.
.. math::
f(r) = \begin{cases}
0.5 \times \left[1 + \cos\left(\frac{\pi r}{r_\text{cutoff}}\right)\right]
& r < r_\text{cutoff} \\
0 & r \geqslant r_\text{cutoff} \\
\end{cases}
Args:
cutoff (float, optional): cutoff radius.
"""
def __init__(self,
r_max=units.length(1, 'nm'),
r_min='default',
hyperparam='default',
return_mask=False,
reverse=False
):
super().__init__(
r_max=r_max,
r_min=r_min,
hyperparam=None,
return_mask=return_mask,
reverse=reverse,
)
self.name = 'cosine cutoff'
self.pi = Tensor(np.pi, ms.float32)
self.cos = P.Cos()
self.logical_and = P.LogicalAnd()
def construct(self, distances, neighbor_mask=None):
"""Compute cutoff.
Args:
distances (mindspore.Tensor): values of interatomic distances.
Returns:
mindspore.Tensor: values of cutoff function.
"""
# Compute values of cutoff function
cuts = 0.5 * (self.cos(distances * self.pi / self.cutoff) + 1.0)
if self.reverse:
cuts = 1.0 - cuts
ones = F.ones_like(cuts)
cuts = F.select(distances < cuts, cuts, ones)
if neighbor_mask is None:
mask = distances >= 0
else:
mask = neighbor_mask
else:
mask = distances < self.cutoff
if neighbor_mask is not None:
mask = self.logical_and(mask, neighbor_mask)
# Remove contributions beyond the cutoff radius
cutoffs = cuts * mask
if self.return_mask:
return cutoffs, mask
return cutoffs
@_cutoff_register('mollifier')
class MollifierCutoff(Cutoff):
r"""Class for mollifier cutoff scaled to have a value of 1 at :math:`r=0`.
.. math::
f(r) = \begin{cases}
\exp\left(1 - \frac{1}{1 - \left(\frac{r}{r_\text{cutoff}}\right)^2}\right)
& r < r_\text{cutoff} \\
0 & r \geqslant r_\text{cutoff} \\
\end{cases}
Args:
cutoff (float, optional): Cutoff radius.
eps (float, optional): offset added to distances for numerical stability.
"""
def __init__(self,
r_max=units.length(1, 'nm'),
r_min='default',
hyperparam='default',
return_mask=False,
reverse=False
):
super().__init__(
r_min=r_min,
r_max=r_max,
hyperparam=hyperparam,
return_mask=return_mask,
reverse=reverse,
)
self.name = "Mollifier cutoff"
if hyperparam == 'default':
self.eps = units.length(1.0e-8, 'nm')
else:
self.eps = hyperparam
self.exp = P.Exp()
self.logical_and = P.LogicalAnd()
def construct(self, distances, neighbor_mask=None):
"""Compute cutoff.
Args:
distances (mindspore.Tensor): values of interatomic distances.
Returns:
mindspore.Tensor: values of cutoff function.
"""
exponent = 1.0 - 1.0 / (1.0 - F.square(distances / self.cutoff))
cutoffs = self.exp(exponent)
if self.reverse:
cutoffs = 1. - cutoffs
ones = F.ones_like(cutoffs)
cutoffs = F.select(distances < self.cutoff, cutoffs, ones)
if neighbor_mask is None:
mask = (distances + self.eps) >= 0
else:
mask = neighbor_mask
else:
mask = (distances + self.eps) < self.cutoff
if neighbor_mask is not None:
mask = self.logical_and(mask, neighbor_mask)
cutoffs = cutoffs * mask
return cutoffs, mask
@_cutoff_register('hard')
class HardCutoff(Cutoff):
r"""Class of hard cutoff.
.. math::
f(r) = \begin{cases}
1 & r \leqslant r_\text{cutoff} \\
0 & r > r_\text{cutoff} \\
\end{cases}
Args:
cutoff (float): cutoff radius.
"""
def __init__(self,
r_max=units.length(1, 'nm'),
r_min=0,
hyperparam='default',
return_mask=False,
reverse=False
):
super().__init__(
r_min=r_min,
r_max=r_max,
hyperparam=None,
return_mask=return_mask,
reverse=reverse,
)
self.name = "Hard cutoff"
self.logical_and = P.LogicalAnd()
def construct(self, distances, neighbor_mask=None):
"""Compute cutoff.
Args:
distances (mindspore.Tensor): values of interatomic distances.
Returns:
mindspore.Tensor: values of cutoff function.
"""
if self.reverse:
mask = distances >= self.cutoff
else:
mask = distances < self.cutoff
if neighbor_mask is not None:
self.logical_and(mask, neighbor_mask)
if self.return_mask:
return F.cast(mask, distances.dtype), mask
return F.cast(mask, distances.dtype)
@_cutoff_register('smooth')
class SmoothCutoff(Cutoff):
r"""Class of smooth cutoff by <NAME>. et al:
[ref] <NAME>.; <NAME>.; <NAME>.; <NAME>.; <NAME>.
Texturing & Modeling: A Procedural Approach; <NAME>: 2003
.. math::
r_min < r < r_max:
f(r) = 1.0 - 6 * ( r / r_cutoff ) ^ 5
+ 15 * ( r / r_cutoff ) ^ 4
- 10 * ( r / r_cutoff ) ^ 3
r >= r_max: f(r) = 0
r <= r_min: f(r) = 1
reverse:
r_min < r < r_max:
f(r) = 6 * ( r / r_cutoff ) ^ 5
- 15 * ( r / r_cutoff ) ^ 4
+ 10 * ( r / r_cutoff ) ^ 3
r >= r_max: f(r) = 1
r <= r_min: f(r) = 0
Args:
d_max (float, optional): the maximum distance (cutoff radius).
d_min (float, optional): the minimum distance
"""
def __init__(self,
r_max=units.length(1, 'nm'),
r_min=0,
hyperparam='default',
return_mask=False,
reverse=False
):
super().__init__(
r_min=r_min,
r_max=r_max,
hyperparam=None,
return_mask=return_mask,
reverse=reverse,
)
if self.r_min >= self.cutoff:
raise ValueError(
'dis_min must be smaller than cutoff at SmmothCutoff')
self.dis_range = self.cutoff - self.r_min
self.pow = P.Pow()
self.logical_and = P.LogicalAnd()
def construct(self, distance, neighbor_mask=None):
"""Compute cutoff.
Args:
distances (mindspore.Tensor or float): values of interatomic distances.
Returns:
mindspore.Tensor or float: values of cutoff function.
"""
dd = distance - self.r_min
dd = dd / self.dis_range
cuts = - 6. * self.pow(dd, 5) \
+ 15. * self.pow(dd, 4) \
- 10. * self.pow(dd, 3)
if self.reverse:
cutoffs = -cuts
mask_upper = distance < self.cutoff
mask_lower = distance > self.r_min
else:
cutoffs = 1 + cuts
mask_upper = distance > self.r_min
mask_lower = distance < self.cutoff
if neighbor_mask is not None:
mask_lower = self.logical_and(mask_lower, neighbor_mask)
zeros = F.zeros_like(distance)
ones = F.ones_like(distance)
cutoffs = F.select(mask_upper, cutoffs, ones)
cutoffs = F.select(mask_lower, cutoffs, zeros)
if self.return_mask:
return cutoffs, mask_lower
return cutoffs
@_cutoff_register('gaussian')
class GaussianCutoff(Cutoff):
r"""Class of hard cutoff.
.. math::
f(r) = \begin{cases}
1 & r \leqslant r_\text{cutoff} \\
0 & r > r_\text{cutoff} \\
\end{cases}
Args:
cutoff (float): cutoff radius.
"""
def __init__(self,
r_max=units.length(1, 'nm'),
r_min=0,
hyperparam='default',
return_mask=False,
reverse=False
):
super().__init__(
r_min=r_min,
r_max=r_max,
hyperparam=hyperparam,
return_mask=return_mask,
reverse=reverse,
)
if hyperparam == 'default':
self.sigma = units.length(1, 'nm')
else:
self.sigma = hyperparam
self.sigma2 = self.sigma * self.sigma
self.exp = P.Exp()
self.logical_and = P.LogicalAnd()
def construct(self, distance, neighbor_mask=None):
"""construct"""
dd = distance - self.cutoff
dd2 = dd * dd
gauss = self.exp(-0.5 * dd2 / self.sigma2)
if self.reverse:
cuts = gauss
ones = F.ones_like(cuts)
cuts = F.select(distance < self.cutoff, cuts, ones)
if neighbor_mask is None:
mask = distance >= 0
else:
mask = neighbor_mask
else:
cuts = 1. - gauss
mask = distance < self.cutoff
if neighbor_mask is not None:
mask = self.logical_and(mask, neighbor_mask)
cuts = cuts * mask
if self.return_mask:
return cuts, mask
return cuts
def get_cutoff(obj, r_max=units.length(1, 'nm'), r_min=0, hyperparam='default', return_mask=False, reverse=False):
"""get cutoff"""
if obj is None or isinstance(obj, Cutoff):
return obj
if isinstance(obj, str):
if obj not in _CUTOFF_ALIAS.keys():
raise ValueError(
"The class corresponding to '{}' was not found.".format(obj))
return _CUTOFF_ALIAS[obj.lower()](
r_min=r_min,
r_max=r_max,
hyperparam=hyperparam,
return_mask=return_mask,
reverse=reverse,
)
raise TypeError("Unsupported Cutoff type '{}'.".format(type(obj)))
| StarcoderdataPython |
194890 | <reponame>guardian-network/hydra
"""
Classes in this file are standalone because we don't want to impose a false hierarchy
between two classes. That is, inheritance may imply a hierarchy that isn't real.
"""
class Settings(object):
kExactTestBias = 1.0339757656912846e-25
kSmallEpsilon = 5.684341886080802e-14
kLargeEpsilon = 1e-07
SMALL_EPSILON = 5.684341886080802e-14
local_scratch = '/app/scratch'
python = 'python'
plink = "/srv/gsfs0/software/plink/1.90/plink"
redis_uri = 'redis://hydra_redis:6379'
class Commands(object):
HELP = "HELP"
INIT = "INIT"
INIT_STATS = 'INIT_STATS'
QC = "QC"
PCA = "PCA"
ECHO = "ECHO"
ASSO = "ASSO"
EXIT = "EXIT"
all_commands = [HELP, INIT, QC, PCA, ASSO, EXIT] # used by v.1 interface
commands_with_parms = [QC, PCA, ASSO]
class Thresholds(object):
# ECHO options
ECHO_COUNTS = 20
# QC Options
QC_hwe = 1e-10
QC_maf = 0.01
# PCA Options
PCA_maf = 0.1
PCA_ld_window = 50
PCA_ld_threshold = 0.2
PCA_pcs = 10
# Association Options
ASSO_pcs = 10
class Options(object):
# HELP = Commands.HELP
INIT = Commands.INIT
QC = Commands.QC
PCA = Commands.PCA
ASSO = Commands.ASSO
EXIT = Commands.EXIT
HWE = "HWE"
MAF = "MAF"
MPS = "MPS"
MPI = "MPI"
SNP = "snp"
LD = "LD"
NONE = "NONE"
class QCOptions(object):
HWE = Options.HWE
MAF = Options.MAF
MPS = Options.MPS
MPI = Options.MPI
SNP = Options.SNP
all_options = [HWE, MAF, MPS, MPI, SNP]
class PCAOptions(object):
HWE = Options.HWE
MAF = Options.MAF
MPS = Options.MPS
MPI = Options.MPI
SNP = Options.SNP
LD = Options.LD
NONE = Options.NONE
all_options = [HWE, MAF, MPS, MPI, SNP, LD, NONE]
class QCFilterNames(object):
QC_HWE = Options.HWE
QC_MAF = Options.MAF
QC_MPS = Options.MPS
QC_MPI = Options.MPI
QC_snp = Options.SNP
class PCAFilterNames(object):
PCA_HWE = Options.HWE
PCA_MAF = Options.MAF
PCA_MPS = Options.MPS
PCA_MPI = Options.MPI
PCA_snp = Options.SNP
PCA_LD = Options.LD
PCA_NONE = Options.NONE
external_host = "hydratest23.azurewebsites.net"
class ServerHTTP(object):
listen_host = '0.0.0.0'
external_host = external_host#"localhost"#external_host#'hydraapp.azurewebsites.net'#"localhost"#
port = '9001'
max_content_length = 1024 * 1024 * 1024 # 1 GB
wait_time = 0.5 # for the time.sleep() hacks
class ClientHTTP(object):
default_max_content_length = 1024 * 1024 * 1024 # 1 GB
default_listen_host = '0.0.0.0'
default_external_host = external_host#'hydraapp.azurewebsites.net' # "localhost"#
clients = [{
'name': 'Center1',
'listen_host': default_listen_host,
'external_host': default_external_host,
'port': 9002,
'max_content_length': default_max_content_length
},
{
'name': 'Center2',
'listen_host': default_listen_host,
'external_host': default_external_host,
'port': 9003,
'max_content_length': default_max_content_length
},
{
'name': 'Center3',
'listen_host': default_listen_host,
'external_host': default_external_host,
'port': 9004,
'max_content_length': default_max_content_length
}
]
| StarcoderdataPython |
88978 | class ListView:
__slots__ = ['_list']
def __init__(self, list_object):
self._list = list_object
def __add__(self, other):
return self._list.__add__(other)
def __getitem__(self, other):
return self._list.__getitem__(other)
def __contains__(self, item):
return self._list.__contains__(item)
def __eq__(self, other):
return self._list.__eq__(other)
def __hash__(self):
return self._list.__hash__()
def __ge__(self, other):
if isinstance(other, ListView):
return self._list.__ge__(other._list)
return self._list.__ge__(other)
def __gt__(self, other):
if isinstance(other, ListView):
return self._list.__gt__(other._list)
return self._list.__gt__(other)
def __iter__(self):
return self._list.__iter__()
def __le__(self, other):
if isinstance(other, ListView):
return self._list.__le__(other._list)
return self._list.__le__(other)
def __len__(self):
return self._list.__len__()
def __lt__(self, other):
if isinstance(other, ListView):
return self._list.__lt__(other._list)
return self._list.__lt__(other)
def __ne__(self, other):
return self._list.__ne__(other)
def __mul__(self, other):
return self._list.__mul__(other)
def __rmul__(self, n):
return self._list.__rmul__(n)
def __reversed__(self):
return self._list.__reversed__()
def __repr__(self):
return self._list.__repr__()
def __str__(self):
return self._list.__str__()
def __radd__(self, other):
return other + self._list
def __iadd__(self, other):
raise TypeError("unsupported operator for type SetView")
def __imul__(self, other):
raise TypeError("unsupported operator for type SetView")
def copy(self):
return self._list.copy()
def count(self, object):
return self._list.count(object)
def index(self, *args, **kwargs):
return self._list.index(*args, **kwargs) | StarcoderdataPython |
1733710 | from django import forms
import json
from ..models.reviewer import Reviewer
from ..models.applicant import Applicant
from ..models.review import Review
from ..models.metric import Metric
class ReviewForm(forms.Form):
def __init__(self, *args, **kwargs):
self.instance = kwargs.pop('instance')
super().__init__(*args, **kwargs)
# Dynamically create the choice fields with rating options from 1 to 5
metrics = self.get_metrics()
for metric in metrics:
self.fields[metric] = forms.ChoiceField(choices=[(x, x) for x in range(1, 6)])
self.fields['additional-comments'] = forms.CharField(label='Additional Comments', max_length=500, required=False, widget=forms.Textarea())
def save(self, reviewer_name, document):
reviewer, _ = Reviewer.objects.get_or_create(
name=reviewer_name
)
try:
applicant = Applicant.objects.get(document=document)
except (Applicant.DoesNotExist, Applicant.MultipleObjectsReturned):
max_name = Applicant._meta.get_field('name').max_length
try:
name = document.label[:document.label.index('.')]
except ValueError:
name = document.label
applicant, _ = Applicant.objects.get_or_create(
name=name[:max_name],
document=document
)
del self.cleaned_data['applicant']
review = Review.objects.create(
reviewer=reviewer,
applicant=applicant,
evaluation=json.dumps(self.cleaned_data) # Serialized evaluations
)
def get_metrics(self):
return [x.metric_name for x in Metric.objects.all()]
| StarcoderdataPython |
1613934 | <gh_stars>0
import shutil
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import Mapping
import pytest
from _pytest.capture import CaptureFixture
from _pytest.tmpdir import TempdirFactory
from freezegun import freeze_time
from prance import ValidationError
from datamodel_code_generator.__main__ import Exit, main
DATA_PATH: Path = Path(__file__).parent / 'data'
OPEN_API_DATA_PATH: Path = DATA_PATH / 'openapi'
JSON_SCHEMA_DATA_PATH: Path = DATA_PATH / 'jsonschema'
JSON_DATA_PATH: Path = DATA_PATH / 'json'
YAML_DATA_PATH: Path = DATA_PATH / 'yaml'
TIMESTAMP = '1985-10-26T01:21:00-07:00'
@freeze_time('2019-07-26')
def test_main():
with TemporaryDirectory() as output_dir:
output_file: Path = Path(output_dir) / 'output.py'
return_code: Exit = main(
[
'--input',
str(OPEN_API_DATA_PATH / 'api.yaml'),
'--output',
str(output_file),
]
)
assert return_code == Exit.OK
assert (
output_file.read_text()
== '''# generated by datamodel-codegen:
# filename: api.yaml
# timestamp: 2019-07-26T00:00:00+00:00
from __future__ import annotations
from typing import List, Optional
from pydantic import AnyUrl, BaseModel, Field
class Pet(BaseModel):
id: int
name: str
tag: Optional[str] = None
class Pets(BaseModel):
__root__: List[Pet]
class User(BaseModel):
id: int
name: str
tag: Optional[str] = None
class Users(BaseModel):
__root__: List[User]
class Id(BaseModel):
__root__: str
class Rules(BaseModel):
__root__: List[str]
class Error(BaseModel):
code: int
message: str
class api(BaseModel):
apiKey: Optional[str] = Field(
None, description='To be used as a dataset parameter value'
)
apiVersionNumber: Optional[str] = Field(
None, description='To be used as a version parameter value'
)
apiUrl: Optional[AnyUrl] = Field(
None, description="The URL describing the dataset\'s fields"
)
apiDocumentationUrl: Optional[AnyUrl] = Field(
None, description='A URL to the API console for each API'
)
class apis(BaseModel):
__root__: List[api]
class Event(BaseModel):
name: Optional[str] = None
class Result(BaseModel):
event: Optional[Event] = None
'''
)
with pytest.raises(SystemExit):
main()
@freeze_time('2019-07-26')
def test_main_base_class():
with TemporaryDirectory() as output_dir:
output_file: Path = Path(output_dir) / 'output.py'
return_code: Exit = main(
[
'--input',
str(OPEN_API_DATA_PATH / 'api.yaml'),
'--output',
str(output_file),
'--base-class',
'custom_module.Base',
]
)
assert return_code == Exit.OK
assert (
output_file.read_text()
== '''# generated by datamodel-codegen:
# filename: api.yaml
# timestamp: 2019-07-26T00:00:00+00:00
from __future__ import annotations
from typing import List, Optional
from pydantic import AnyUrl, Field
from custom_module import Base
class Pet(Base):
id: int
name: str
tag: Optional[str] = None
class Pets(Base):
__root__: List[Pet]
class User(Base):
id: int
name: str
tag: Optional[str] = None
class Users(Base):
__root__: List[User]
class Id(Base):
__root__: str
class Rules(Base):
__root__: List[str]
class Error(Base):
code: int
message: str
class api(Base):
apiKey: Optional[str] = Field(
None, description='To be used as a dataset parameter value'
)
apiVersionNumber: Optional[str] = Field(
None, description='To be used as a version parameter value'
)
apiUrl: Optional[AnyUrl] = Field(
None, description="The URL describing the dataset\'s fields"
)
apiDocumentationUrl: Optional[AnyUrl] = Field(
None, description='A URL to the API console for each API'
)
class apis(Base):
__root__: List[api]
class Event(Base):
name: Optional[str] = None
class Result(Base):
event: Optional[Event] = None
'''
)
with pytest.raises(SystemExit):
main()
@freeze_time('2019-07-26')
def test_target_python_version():
with TemporaryDirectory() as output_dir:
output_file: Path = Path(output_dir) / 'output.py'
return_code: Exit = main(
[
'--input',
str(OPEN_API_DATA_PATH / 'api.yaml'),
'--output',
str(output_file),
'--target-python-version',
'3.6',
]
)
assert return_code == Exit.OK
assert (
output_file.read_text()
== '''# generated by datamodel-codegen:
# filename: api.yaml
# timestamp: 2019-07-26T00:00:00+00:00
from typing import List, Optional
from pydantic import AnyUrl, BaseModel, Field
class Pet(BaseModel):
id: int
name: str
tag: Optional[str] = None
class Pets(BaseModel):
__root__: List['Pet']
class User(BaseModel):
id: int
name: str
tag: Optional[str] = None
class Users(BaseModel):
__root__: List['User']
class Id(BaseModel):
__root__: str
class Rules(BaseModel):
__root__: List[str]
class Error(BaseModel):
code: int
message: str
class api(BaseModel):
apiKey: Optional[str] = Field(
None, description='To be used as a dataset parameter value'
)
apiVersionNumber: Optional[str] = Field(
None, description='To be used as a version parameter value'
)
apiUrl: Optional[AnyUrl] = Field(
None, description="The URL describing the dataset\'s fields"
)
apiDocumentationUrl: Optional[AnyUrl] = Field(
None, description='A URL to the API console for each API'
)
class apis(BaseModel):
__root__: List['api']
class Event(BaseModel):
name: Optional[str] = None
class Result(BaseModel):
event: Optional['Event'] = None
'''
)
with pytest.raises(SystemExit):
main()
@freeze_time('2019-07-26')
def test_main_autodetect():
with TemporaryDirectory() as output_dir:
output_file: Path = Path(output_dir) / 'output.py'
return_code: Exit = main(
[
'--input',
str(JSON_SCHEMA_DATA_PATH / 'person.json'),
'--output',
str(output_file),
'--input-file-type',
'auto',
]
)
assert return_code == Exit.OK
assert (
output_file.read_text()
== '''# generated by datamodel-codegen:
# filename: person.json
# timestamp: 2019-07-26T00:00:00+00:00
from __future__ import annotations
from typing import Any, List, Optional
from pydantic import BaseModel, Field, conint
class Person(BaseModel):
firstName: Optional[str] = Field(None, description="The person\'s first name.")
lastName: Optional[str] = Field(None, description="The person\'s last name.")
age: Optional[conint(ge=0.0)] = Field(
None, description='Age in years which must be equal to or greater than zero.'
)
friends: Optional[List] = None
comment: Optional[Any] = None
'''
)
with pytest.raises(SystemExit):
main()
@freeze_time('2019-07-26')
def test_main_autodetect_failed():
with TemporaryDirectory() as input_dir, TemporaryDirectory() as output_dir:
input_file: Path = Path(input_dir) / 'input.yaml'
output_file: Path = Path(output_dir) / 'output.py'
input_file.write_text(':')
return_code: Exit = main(
[
'--input',
str(input_file),
'--output',
str(output_file),
'--input-file-type',
'auto',
]
)
assert return_code == Exit.ERROR
with pytest.raises(SystemExit):
main()
@freeze_time('2019-07-26')
def test_main_jsonschema():
with TemporaryDirectory() as output_dir:
output_file: Path = Path(output_dir) / 'output.py'
return_code: Exit = main(
[
'--input',
str(JSON_SCHEMA_DATA_PATH / 'person.json'),
'--output',
str(output_file),
'--input-file-type',
'jsonschema',
]
)
assert return_code == Exit.OK
assert (
output_file.read_text()
== '''# generated by datamodel-codegen:
# filename: person.json
# timestamp: 2019-07-26T00:00:00+00:00
from __future__ import annotations
from typing import Any, List, Optional
from pydantic import BaseModel, Field, conint
class Person(BaseModel):
firstName: Optional[str] = Field(None, description="The person\'s first name.")
lastName: Optional[str] = Field(None, description="The person\'s last name.")
age: Optional[conint(ge=0.0)] = Field(
None, description='Age in years which must be equal to or greater than zero.'
)
friends: Optional[List] = None
comment: Optional[Any] = None
'''
)
with pytest.raises(SystemExit):
main()
@freeze_time('2019-07-26')
def test_main_jsonschema_nested_deep():
import os
os.chdir(DATA_PATH / 'jsonschema')
with TemporaryDirectory() as output_dir:
output_init_file: Path = Path(output_dir) / '__init__.py'
output_nested_file: Path = Path(output_dir) / 'nested/deep.py'
output_empty_parent_nested_file: Path = Path(
output_dir
) / 'empty_parent/nested/deep.py'
return_code: Exit = main(
[
'--input',
str(JSON_SCHEMA_DATA_PATH / 'nested_person.json'),
'--output',
str(output_dir),
'--input-file-type',
'jsonschema',
]
)
assert return_code == Exit.OK
print(list(Path(output_dir).iterdir()))
assert (
output_init_file.read_text()
== '''# generated by datamodel-codegen:
# filename: nested_person.json
# timestamp: 2019-07-26T00:00:00+00:00
from __future__ import annotations
from typing import Optional
from pydantic import BaseModel
from .empty_parent.nested import deep as deep_1
from .nested import deep
class NestedPerson(BaseModel):
nested_deep_childJson: Optional[deep.Json] = None
nested_deep_childAnother: Optional[deep.Another] = None
empty_parent_nested_deep_childJson: Optional[deep_1.Json] = None
'''
)
assert (
output_nested_file.read_text()
== '''# generated by datamodel-codegen:
# filename: nested_person.json
# timestamp: 2019-07-26T00:00:00+00:00
from __future__ import annotations
from typing import Optional
from pydantic import BaseModel
class Json(BaseModel):
firstName: Optional[str] = None
class Another(BaseModel):
firstName: Optional[str] = None
'''
)
assert (
output_empty_parent_nested_file.read_text()
== '''# generated by datamodel-codegen:
# filename: nested_person.json
# timestamp: 2019-07-26T00:00:00+00:00
from __future__ import annotations
from typing import Optional
from pydantic import BaseModel
class Json(BaseModel):
firstName: Optional[str] = None
'''
)
with pytest.raises(SystemExit):
main()
@freeze_time('2019-07-26')
def test_main_json():
with TemporaryDirectory() as output_dir:
output_file: Path = Path(output_dir) / 'output.py'
return_code: Exit = main(
[
'--input',
str(JSON_DATA_PATH / 'pet.json'),
'--output',
str(output_file),
'--input-file-type',
'json',
]
)
assert return_code == Exit.OK
assert (
output_file.read_text()
== '''# generated by datamodel-codegen:
# filename: pet.json
# timestamp: 2019-07-26T00:00:00+00:00
from __future__ import annotations
from pydantic import BaseModel
class Pet(BaseModel):
name: str
age: int
class Model(BaseModel):
Pet: Pet
'''
)
with pytest.raises(SystemExit):
main()
@freeze_time('2019-07-26')
def test_main_json_failed():
with TemporaryDirectory() as output_dir:
output_file: Path = Path(output_dir) / 'output.py'
return_code: Exit = main(
[
'--input',
str(JSON_DATA_PATH / 'broken.json'),
'--output',
str(output_file),
'--input-file-type',
'json',
]
)
assert return_code == Exit.ERROR
with pytest.raises(SystemExit):
main()
@freeze_time('2019-07-26')
def test_main_yaml():
with TemporaryDirectory() as output_dir:
output_file: Path = Path(output_dir) / 'output.py'
return_code: Exit = main(
[
'--input',
str(YAML_DATA_PATH / 'pet.yaml'),
'--output',
str(output_file),
'--input-file-type',
'yaml',
]
)
assert return_code == Exit.OK
assert (
output_file.read_text()
== '''# generated by datamodel-codegen:
# filename: pet.yaml
# timestamp: 2019-07-26T00:00:00+00:00
from __future__ import annotations
from pydantic import BaseModel
class Pet(BaseModel):
name: str
age: int
class Model(BaseModel):
Pet: Pet
'''
)
with pytest.raises(SystemExit):
main()
@pytest.mark.parametrize(
'expected',
[
{
(
'__init__.py',
): '''\
# generated by datamodel-codegen:
# filename: modular.yaml
# timestamp: 1985-10-26T08:21:00+00:00
from __future__ import annotations
from typing import Optional
from pydantic import BaseModel
from . import models
class Id(BaseModel):
__root__: str
class Error(BaseModel):
code: int
message: str
class Result(BaseModel):
event: Optional[models.Event] = None
class Source(BaseModel):
country: Optional[str] = None
''',
(
'models.py',
): '''\
# generated by datamodel-codegen:
# filename: modular.yaml
# timestamp: 1985-10-26T08:21:00+00:00
from __future__ import annotations
from enum import Enum
from typing import Any, Dict, List, Optional, Union
from pydantic import BaseModel
class Species(Enum):
dog = 'dog'
cat = 'cat'
snake = 'snake'
class Pet(BaseModel):
id: int
name: str
tag: Optional[str] = None
species: Optional[Species] = None
class User(BaseModel):
id: int
name: str
tag: Optional[str] = None
class Event(BaseModel):
name: Optional[Union[str, float, int, bool, Dict[str, Any], List[str]]] = None
''',
(
'collections.py',
): '''\
# generated by datamodel-codegen:
# filename: modular.yaml
# timestamp: 1985-10-26T08:21:00+00:00
from __future__ import annotations
from typing import List, Optional
from pydantic import AnyUrl, BaseModel, Field
from . import models
class Pets(BaseModel):
__root__: List[models.Pet]
class Users(BaseModel):
__root__: List[models.User]
class Rules(BaseModel):
__root__: List[str]
class api(BaseModel):
apiKey: Optional[str] = Field(
None, description='To be used as a dataset parameter value'
)
apiVersionNumber: Optional[str] = Field(
None, description='To be used as a version parameter value'
)
apiUrl: Optional[AnyUrl] = Field(
None, description="The URL describing the dataset\'s fields"
)
apiDocumentationUrl: Optional[AnyUrl] = Field(
None, description='A URL to the API console for each API'
)
class apis(BaseModel):
__root__: List[api]
''',
(
'foo',
'__init__.py',
): '''\
# generated by datamodel-codegen:
# filename: modular.yaml
# timestamp: 1985-10-26T08:21:00+00:00
from __future__ import annotations
from typing import Optional
from pydantic import BaseModel
from .. import Id
class Tea(BaseModel):
flavour: Optional[str] = None
id: Optional[Id] = None
class Cocoa(BaseModel):
quality: Optional[int] = None
''',
(
'foo',
'bar.py',
): '''\
# generated by datamodel-codegen:
# filename: modular.yaml
# timestamp: 1985-10-26T08:21:00+00:00
from __future__ import annotations
from typing import Any, Dict, List, Optional
from pydantic import BaseModel
class Thing(BaseModel):
attributes: Optional[Dict[str, Any]] = None
class Thang(BaseModel):
attributes: Optional[List[Dict[str, Any]]] = None
class Clone(Thing):
pass
''',
(
'woo',
'__init__.py',
): '''\
# generated by datamodel-codegen:
# filename: modular.yaml
# timestamp: 1985-10-26T08:21:00+00:00
''',
(
'woo',
'boo.py',
): '''\
# generated by datamodel-codegen:
# filename: modular.yaml
# timestamp: 1985-10-26T08:21:00+00:00
from __future__ import annotations
from typing import Optional
from pydantic import BaseModel
from .. import Source, foo
class Chocolate(BaseModel):
flavour: Optional[str] = None
source: Optional[Source] = None
cocoa: Optional[foo.Cocoa] = None
''',
}
],
)
def test_main_modular(
tmpdir_factory: TempdirFactory, expected: Mapping[str, str]
) -> None:
"""Test main function on modular file."""
output_directory = Path(tmpdir_factory.mktemp('output'))
input_filename = OPEN_API_DATA_PATH / 'modular.yaml'
output_path = output_directory / 'model'
with freeze_time(TIMESTAMP):
main(['--input', str(input_filename), '--output', str(output_path)])
for key, value in expected.items():
result = output_path.joinpath(*key).read_text()
assert result == value
def test_main_modular_no_file() -> None:
"""Test main function on modular file with no output name."""
input_filename = OPEN_API_DATA_PATH / 'modular.yaml'
assert main(['--input', str(input_filename)]) == Exit.ERROR
def test_main_modular_filename(tmpdir_factory: TempdirFactory) -> None:
"""Test main function on modular file with filename."""
output_directory = Path(tmpdir_factory.mktemp('output'))
input_filename = OPEN_API_DATA_PATH / 'modular.yaml'
output_filename = output_directory / 'model.py'
assert (
main(['--input', str(input_filename), '--output', str(output_filename)])
== Exit.ERROR
)
@pytest.mark.parametrize(
'expected',
[
'''\
# generated by datamodel-codegen:
# filename: api.yaml
# timestamp: 1985-10-26T08:21:00+00:00
from __future__ import annotations
from typing import List, Optional
from pydantic import AnyUrl, BaseModel, Field
class Pet(BaseModel):
id: int
name: str
tag: Optional[str] = None
class Pets(BaseModel):
__root__: List[Pet]
class User(BaseModel):
id: int
name: str
tag: Optional[str] = None
class Users(BaseModel):
__root__: List[User]
class Id(BaseModel):
__root__: str
class Rules(BaseModel):
__root__: List[str]
class Error(BaseModel):
code: int
message: str
class api(BaseModel):
apiKey: Optional[str] = Field(
None, description='To be used as a dataset parameter value'
)
apiVersionNumber: Optional[str] = Field(
None, description='To be used as a version parameter value'
)
apiUrl: Optional[AnyUrl] = Field(
None, description="The URL describing the dataset\'s fields"
)
apiDocumentationUrl: Optional[AnyUrl] = Field(
None, description='A URL to the API console for each API'
)
class apis(BaseModel):
__root__: List[api]
class Event(BaseModel):
name: Optional[str] = None
class Result(BaseModel):
event: Optional[Event] = None
'''
],
)
def test_main_no_file(capsys: CaptureFixture, expected: str) -> None:
"""Test main function on non-modular file with no output name."""
input_filename = OPEN_API_DATA_PATH / 'api.yaml'
with freeze_time(TIMESTAMP):
main(['--input', str(input_filename)])
captured = capsys.readouterr()
assert captured.out == expected
assert not captured.err
@pytest.mark.parametrize(
'expected',
[
'''\
# generated by datamodel-codegen:
# filename: api.yaml
# timestamp: 1985-10-26T08:21:00+00:00
from __future__ import annotations
from typing import List, Optional
from pydantic import AnyUrl, BaseModel, Field
class Pet(BaseModel): # 1 2, 1 2, this is just a pet
id: int
name: str
tag: Optional[str] = None
class Pets(BaseModel):
__root__: List[Pet]
class User(BaseModel):
id: int
name: str
tag: Optional[str] = None
class Users(BaseModel):
__root__: List[User]
class Id(BaseModel):
__root__: str
class Rules(BaseModel):
__root__: List[str]
class Error(BaseModel):
code: int
message: str
class api(BaseModel):
apiKey: Optional[str] = None
apiVersionNumber: Optional[str] = None
apiUrl: Optional[AnyUrl] = None
apiDocumentationUrl: Optional[AnyUrl] = None
class apis(BaseModel):
__root__: List[api]
class Event(BaseModel):
name: Optional[str] = None
class Result(BaseModel):
event: Optional[Event] = None
'''
],
)
def test_main_custom_template_dir(capsys: CaptureFixture, expected: str) -> None:
"""Test main function with custom template directory."""
input_filename = OPEN_API_DATA_PATH / 'api.yaml'
custom_template_dir = DATA_PATH / 'templates'
extra_template_data = OPEN_API_DATA_PATH / 'extra_data.json'
with freeze_time(TIMESTAMP):
main(
[
'--input',
str(input_filename),
'--custom-template-dir',
str(custom_template_dir),
'--extra-template-data',
str(extra_template_data),
]
)
captured = capsys.readouterr()
assert captured.out == expected
assert not captured.err
@freeze_time('2019-07-26')
def test_pyproject():
with TemporaryDirectory() as output_dir:
output_dir = Path(output_dir)
pyproject_toml = Path(DATA_PATH) / "project" / "pyproject.toml"
shutil.copy(pyproject_toml, output_dir)
output_file: Path = output_dir / 'output.py'
return_code: Exit = main(
[
'--input',
str(OPEN_API_DATA_PATH / 'api.yaml'),
'--output',
str(output_file),
]
)
assert return_code == Exit.OK
assert (
output_file.read_text()
== '''# generated by datamodel-codegen:
# filename: api.yaml
# timestamp: 2019-07-26T00:00:00+00:00
from __future__ import (
annotations,
)
from typing import (
List,
Optional,
)
from pydantic import (
AnyUrl,
BaseModel,
Field,
)
class Pet(BaseModel):
id: int
name: str
tag: Optional[str] = None
class Pets(BaseModel):
__root__: List[Pet]
class User(BaseModel):
id: int
name: str
tag: Optional[str] = None
class Users(BaseModel):
__root__: List[User]
class Id(BaseModel):
__root__: str
class Rules(BaseModel):
__root__: List[str]
class Error(BaseModel):
code: int
message: str
class api(BaseModel):
apiKey: Optional[
str
] = Field(
None,
description="To be used as a dataset parameter value",
)
apiVersionNumber: Optional[
str
] = Field(
None,
description="To be used as a version parameter value",
)
apiUrl: Optional[
AnyUrl
] = Field(
None,
description="The URL describing the dataset\'s fields",
)
apiDocumentationUrl: Optional[
AnyUrl
] = Field(
None,
description="A URL to the API console for each API",
)
class apis(BaseModel):
__root__: List[api]
class Event(BaseModel):
name: Optional[str] = None
class Result(BaseModel):
event: Optional[
Event
] = None
'''
)
with pytest.raises(SystemExit):
main()
@freeze_time('2019-07-26')
def test_validation():
with TemporaryDirectory() as output_dir:
output_file: Path = Path(output_dir) / 'output.py'
return_code: Exit = main(
[
'--input',
str(OPEN_API_DATA_PATH / 'api.yaml'),
'--output',
str(output_file),
'--validation',
]
)
assert return_code == Exit.OK
assert (
output_file.read_text()
== '''# generated by datamodel-codegen:
# filename: api.yaml
# timestamp: 2019-07-26T00:00:00+00:00
from __future__ import annotations
from typing import List, Optional
from pydantic import AnyUrl, BaseModel, Field
class Pet(BaseModel):
id: int
name: str
tag: Optional[str] = None
class Pets(BaseModel):
__root__: List[Pet]
class User(BaseModel):
id: int
name: str
tag: Optional[str] = None
class Users(BaseModel):
__root__: List[User]
class Id(BaseModel):
__root__: str
class Rules(BaseModel):
__root__: List[str]
class Error(BaseModel):
code: int
message: str
class api(BaseModel):
apiKey: Optional[str] = Field(
None, description='To be used as a dataset parameter value'
)
apiVersionNumber: Optional[str] = Field(
None, description='To be used as a version parameter value'
)
apiUrl: Optional[AnyUrl] = Field(
None, description="The URL describing the dataset\'s fields"
)
apiDocumentationUrl: Optional[AnyUrl] = Field(
None, description='A URL to the API console for each API'
)
class apis(BaseModel):
__root__: List[api]
class Event(BaseModel):
name: Optional[str] = None
class Result(BaseModel):
event: Optional[Event] = None
'''
)
with pytest.raises(SystemExit):
main()
@freeze_time('2019-07-26')
def test_validation_failed():
with TemporaryDirectory() as output_dir:
output_file: Path = Path(output_dir) / 'output.py'
assert (
main(
[
'--input',
str(OPEN_API_DATA_PATH / 'invalid.yaml'),
'--output',
str(output_file),
'--input-file-type',
'openapi',
'--validation',
]
)
== Exit.ERROR
)
| StarcoderdataPython |
4818254 | import os
import sys
import time
# Create new process
pid = os.fork()
# Print text
c = 'p' if pid == 0 else 'c'
if pid == 0:
sys.exit(0)
while True:
time.sleep(1)
sys.stderr.write(c)
| StarcoderdataPython |
4828957 | #!/bin/env python3
# coding=utf-8
from dns_restful.main import main
if __name__ == '__main__':
main(debug=True)
| StarcoderdataPython |
1664461 | # buildifier: disable=module-docstring
# buildifier: disable=function-docstring
def exercise_the_api():
_var6 = configuration_field("foo", "bar") # @unused
exercise_the_api()
def transition_func(settings):
"""A no-op transition function."""
return settings
my_transition = transition(implementation = transition_func, inputs = [], outputs = [])
def _build_setting_impl(ctx):
_ignore = [ctx] # @unused
return []
string_flag = rule(
doc = "A string flag.",
implementation = _build_setting_impl,
build_setting = config.string(flag = True),
)
int_setting = rule(
doc = "An integer flag.",
implementation = _build_setting_impl,
build_setting = config.int(flag = False),
)
| StarcoderdataPython |
3321713 | from transformers import pipeline
unmasker = pipeline('fill-mask', model='roberta-base')
result = unmasker("Hello I'm from africa and <mask>.")
print(result) | StarcoderdataPython |
1696412 | <filename>tests/test_client.py
"""Tests for clovek_ne_jezi_se.Client"""
import builtins
from copy import deepcopy
from clovek_ne_jezi_se.client import Client
from clovek_ne_jezi_se.agents import HumanPlayer
from clovek_ne_jezi_se.game_state import (
MoveContainer, BoardSpace, EMPTY_SYMBOL
)
class TestClient:
experiment_config = dict(
players=[
dict(name='red', agent='HumanPlayer', kwargs=dict(print_game_state=False)),
dict(name='blue', agent='HumanPlayer', kwargs=dict(print_game_state=False)),
dict(name='green', agent='HumanPlayer', kwargs=dict(print_game_state=False)),
dict(name='yellow', agent='HumanPlayer', kwargs=dict(print_game_state=False))
],
board=dict(
main_board_section_length=4,
pieces_per_player=4,
number_of_dice_faces=6
)
)
player_names = [player['name'] for player in experiment_config['players']]
players = [
eval(player['agent'])(name=player['name'], **player['kwargs'])
for player in experiment_config['players']
]
client = Client(players=players, **experiment_config['board'])
client.initialize()
def test_next_player(self):
player_round = [
self.client.next_player() for _ in range(len(self.player_names))
]
assert player_round == self.players
def test_one_round_of_play(self, mocker, monkeypatch):
played_client = deepcopy(self.client)
expected_client = deepcopy(self.client)
# Set roll values to 6 then 1 for each player turn
mocker.patch.object(
played_client, 'roll', side_effect=4 * [6, 1]
)
# For HumanAgent choose_move input, always select 0th
idx_move_input = 0
monkeypatch.setattr(builtins, 'input', lambda x: idx_move_input)
# Play one round with fixed (monkeypatched) dice and move choice
for _ in range(len(played_client.players)):
played_client.take_turn()
played_game_state = played_client.get_game_state()
# Move 0th piece to main board from waiting for each player
expected_game_state = expected_client.get_game_state()
for player_name in expected_client._player_names:
expected_game_state.do(MoveContainer(
from_space=BoardSpace(
kind='waiting', idx=0, occupied_by=player_name,
allowed_occupants=[player_name, EMPTY_SYMBOL]
),
to_space=BoardSpace(
kind='main',
idx=expected_game_state.get_main_entry_index(
player_name
) + 1,
occupied_by=EMPTY_SYMBOL,
allowed_occupants=self.player_names + [EMPTY_SYMBOL]
)
))
played_waiting = played_game_state.waiting_areas_to_dict()
expected_waiting = expected_game_state.waiting_areas_to_dict()
assert played_waiting == expected_waiting
played_main_spaces = played_game_state.main_spaces_to_list()
expected_main_spaces = expected_game_state.main_spaces_to_list()
assert played_main_spaces == expected_main_spaces
played_home = played_game_state.home_areas_to_dict()
expected_home = expected_game_state.home_areas_to_dict()
assert played_home == expected_home
def test_send_player_home(self, monkeypatch):
played_client = deepcopy(self.client)
expected_client = deepcopy(self.client)
# Move red player to main 0, yellow to main 1
played_game_state = played_client.get_game_state()
played_game_state.do(MoveContainer(
from_space=BoardSpace(
kind='waiting', idx=0,
occupied_by='red',
allowed_occupants=['red', EMPTY_SYMBOL]
),
to_space=BoardSpace(
kind='main', idx=0,
occupied_by=EMPTY_SYMBOL,
allowed_occupants=self.player_names + [EMPTY_SYMBOL]
)
))
played_game_state.do(MoveContainer(
from_space=BoardSpace(
kind='waiting', idx=0,
occupied_by='yellow',
allowed_occupants=['yellow', EMPTY_SYMBOL]
),
to_space=BoardSpace(
kind='main', idx=1,
occupied_by=EMPTY_SYMBOL,
allowed_occupants=self.player_names + [EMPTY_SYMBOL]
)
))
# Set roll value to 1
monkeypatch.setattr(played_client, 'roll', lambda: 1)
# For HumanAgent choose_move input, always select 0th
idx_move_input = 0
monkeypatch.setattr(builtins, 'input', lambda x: idx_move_input)
# Play once (red) with fixed (monkeypatched) dice and move choice
played_client.take_turn()
expected_game_state = expected_client.get_game_state()
# Expect red to be at main index 1, all yellow back in waiting
expected_game_state.do(MoveContainer(
from_space=BoardSpace(
kind='waiting', idx=0, occupied_by='red',
allowed_occupants=['red', EMPTY_SYMBOL]
),
to_space=BoardSpace(
kind='main', idx=1,
occupied_by='red',
allowed_occupants=self.player_names + [EMPTY_SYMBOL]
)
))
played_waiting = played_game_state.waiting_areas_to_dict()
expected_waiting = expected_game_state.waiting_areas_to_dict()
assert played_waiting == expected_waiting
played_main_spaces = played_game_state.main_spaces_to_list()
expected_main_spaces = expected_game_state.main_spaces_to_list()
assert played_main_spaces == expected_main_spaces
played_home = played_game_state.home_areas_to_dict()
expected_home = expected_game_state.home_areas_to_dict()
assert played_home == expected_home
def test_play_finished(self, monkeypatch):
played_client = deepcopy(self.client)
played_game_state = played_client.get_game_state()
idx_winner = 0
winner_name = played_game_state.player_names[idx_winner]
# move all pieces but first to home
for idx in range(1, played_game_state.pieces_per_player):
played_game_state.do(MoveContainer(
from_space=BoardSpace(
kind='waiting', idx=idx,
occupied_by=winner_name,
allowed_occupants=[winner_name, EMPTY_SYMBOL]
),
to_space=BoardSpace(
kind='home', idx=idx,
occupied_by=EMPTY_SYMBOL,
allowed_occupants=[winner_name, EMPTY_SYMBOL]
)
))
# Move first piece to one before home
winner_enter_main_idx = played_game_state\
.get_main_entry_index(winner_name)
n_players = len(played_client.players)
winner_prehome_idx = (winner_enter_main_idx - 1) % \
played_client.main_board_section_length * n_players
print(winner_prehome_idx)
played_game_state.do(MoveContainer(
from_space=BoardSpace(
kind='waiting', idx=0,
occupied_by=winner_name,
allowed_occupants=[winner_name, EMPTY_SYMBOL]
),
to_space=BoardSpace(
kind='main', idx=winner_prehome_idx,
occupied_by=EMPTY_SYMBOL,
allowed_occupants=self.player_names + [EMPTY_SYMBOL]
)
))
monkeypatch.setattr(played_client, 'roll', lambda: 1)
# Only one move possible, advance 1 to last open home spot
monkeypatch.setattr(builtins, 'input', lambda x: 0)
winner, _ = played_client.play()
assert winner == played_client.players[idx_winner]
| StarcoderdataPython |
1662068 | <reponame>bbfrederick/capcalc<filename>capcalc/utils.py
#!/usr/bin/env python
#
# Copyright 2016-2019 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# $Author: frederic $
# $Date: 2016/06/14 12:04:51 $
# $Id: showstxcorr,v 1.11 2016/06/14 12:04:51 frederic Exp $
#
from __future__ import division, print_function
import matplotlib.cm as cm
import matplotlib.pyplot as plt
import numpy as np
from sklearn import metrics
def statefilter(thestates, minlength, minhold, debug=False):
print("state filtering with length", minlength)
thefiltstates = np.zeros((len(thestates)), dtype=int)
thefiltstates[0] = thestates[0]
currentstate = thestates[0]
laststate = currentstate + 0
currentlen = 1
lastlen = 1
for state in range(1, len(thestates)):
if thestates[state] == currentstate:
currentlen += 1
thefiltstates[state] = thestates[state]
if debug:
print("state", state, "(", thestates[state], "):continue")
else:
if (currentlen < minlength) and (thestates[state] == laststate):
thefiltstates[state - currentlen : state + 1] = laststate
currentstate = laststate + 0
currentlen += lastlen
if debug:
print("state", state, "(", thestates[state], "):patch")
elif (currentlen < minhold) and (thestates[state] != laststate):
thefiltstates[state - currentlen : state + 1] = laststate
currentstate = laststate + 0
currentlen += lastlen
if debug:
print("state", state, "(", thestates[state], "):fill")
else:
lastlen = currentlen + 1
currentlen = 1
laststate = currentstate + 0
currentstate = thestates[state]
thefiltstates[state] = thestates[state]
if debug:
print("state", state, "(", thestates[state], "):switch")
if debug:
for state in range(len(thestates)):
print(state, thestates[state], thefiltstates[state])
return thefiltstates
def statestats(thestates, numlabels, minlabel, minout=1, minhold=1, debug=False):
# returns statestats and transmat
#
# statestats file columns:
# percentage of TRs in state
# number of continuous runs in state
# total number of TRs in state
# minimum number of TRs in state
# maximum number of TRs in state
# average number of TRs in state
# median number of TRs in state
# standard deviation of the number of TRs in state
#
# transmat contains an n_states by n_states matrix:
# the number of transitions from state a to state b is in location [a, b]
#
minlabel = minlabel
maxlabel = minlabel + numlabels - 1
numlabels = maxlabel - minlabel + 1
transmat = np.zeros((numlabels, numlabels), dtype="float")
# prefilter
thestates = statefilter(thestates, minout, minhold, debug=debug)
# now tabulate states
currentstate = thestates[0]
currentlen = 1
lenlist = [[]]
for i in range(numlabels - 1):
lenlist.append([])
for state in range(1, len(thestates)):
if thestates[state] == currentstate:
currentlen += 1
else:
lenlist[currentstate - minlabel].append(currentlen)
currentstate = thestates[state]
currentlen = 1
sourcestate = thestates[state - 1] - minlabel
deststate = thestates[state] - minlabel
transmat[sourcestate, deststate] += 1.0
# for debugging - remove!
# for i in range(numlabels):
# transmat[0, i] = i
lenlist[currentstate - minlabel].append(currentlen)
thestats = []
for i in range(numlabels):
lenarray = np.asarray(lenlist[i], dtype="float")
if len(lenarray) > 2:
thestats.append(
[
100.0 * np.sum(lenarray) / len(thestates),
len(lenarray),
np.sum(lenarray),
np.min(lenarray),
np.max(lenarray),
np.mean(lenarray),
np.median(lenarray),
np.std(lenarray),
]
)
elif len(lenarray) > 1:
thestats.append(
[
100.0 * np.sum(lenarray) / len(thestates),
len(lenarray),
np.sum(lenarray),
np.min(lenarray),
np.max(lenarray),
np.mean(lenarray),
lenarray[1],
0.0,
]
)
elif len(lenarray) > 0:
thestats.append(
[
100.0 * np.sum(lenarray) / len(thestates),
len(lenarray),
np.sum(lenarray),
lenarray[0],
lenarray[0],
lenarray[0],
lenarray[0],
0.0,
]
)
else:
thestats.append([0.0, len(lenarray), 0.0, 0.0, 0.0, 0.0, 0.0, 0.0])
return transmat, np.asarray(thestats, dtype="float"), lenlist
def silhouette_test(
X, kmeans, n_clusters, numsegs, segsize, summaryonly, display=False
):
print("generating cluster labels")
cluster_labels = kmeans.predict(X)
thesilavgs = np.zeros(numsegs, dtype="float")
thesilclusterstats = np.zeros((numsegs, 4, n_clusters), dtype="float")
print("calculating silhouette stats")
for segment in range(numsegs):
seg_X = X[segment * segsize : (segment + 1) * segsize]
seg_cluster_labels = cluster_labels[segment * segsize : (segment + 1) * segsize]
# do a quick sanity check to see if all the labels are present
clusternums = np.zeros(n_clusters, dtype="int")
for i in range(len(seg_cluster_labels)):
clusternums[seg_cluster_labels[i]] += 1
if np.min(clusternums) > 0:
thesilavgs[segment] = metrics.silhouette_score(seg_X, seg_cluster_labels)
print(
"average silhouette score for segment",
segment,
"=",
thesilavgs[segment],
)
if not summaryonly:
print("doing silhouette samples")
sample_silhouette_values = metrics.silhouette_samples(
seg_X, seg_cluster_labels
)
if display:
# Create a subplot with 1 row and 2 columns
fig, (ax1) = plt.subplots(1, 1)
fig.set_size_inches(8, 4.5)
# The 1st subplot is the silhouette plot
# The silhouette coefficient can range from -1, 1 but in this example all
# lie within [-0.3, 1]
ax1.set_xlim([-0.3, 1])
# The (n_clusters+1)*10 is for inserting blank space between silhouette
# plots of individual clusters, to demarcate them clearly.
ax1.set_ylim([0, len(seg_X) + (n_clusters + 1) * 10])
y_lower = 10
for i in range(n_clusters):
# Aggregate the silhouette scores for samples belonging to
# cluster i, and sort them
ith_cluster_silhouette_values = sample_silhouette_values[
seg_cluster_labels == i
]
ith_cluster_silhouette_values.sort()
thesilclusterstats[segment, 0, i] = np.mean(
ith_cluster_silhouette_values
)
thesilclusterstats[segment, 1, i] = np.median(
ith_cluster_silhouette_values
)
thesilclusterstats[segment, 2, i] = ith_cluster_silhouette_values[0]
thesilclusterstats[segment, 3, i] = ith_cluster_silhouette_values[
-1
]
size_cluster_i = ith_cluster_silhouette_values.shape[0]
if display:
y_upper = y_lower + size_cluster_i
color = cm.spectral(float(i) / n_clusters)
ax1.fill_betweenx(
np.arange(y_lower, y_upper),
0,
ith_cluster_silhouette_values,
facecolor=color,
edgecolor=color,
alpha=0.7,
)
# Label the silhouette plots with their cluster numbers at the middle
ax1.text(-0.05, y_lower + 0.5 * size_cluster_i, str(i))
# Compute the new y_lower for next plot
y_lower = y_upper + 10 # 10 for the 0 samples
if display:
ax1.set_title("The silhouette plot for the various clusters.")
ax1.set_xlabel("The silhouette coefficient values")
ax1.set_ylabel("Cluster label")
# The vertical line for average silhouette score of all the values
ax1.axvline(x=thesilavgs[segment], color="red", linestyle="--")
ax1.set_yticks([]) # Clear the yaxis labels / ticks
ax1.set_xticks([-0.1, 0, 0.2, 0.4, 0.6, 0.8, 1])
plt.suptitle(
(
"Silhouette analysis for KMeans clustering on sample data "
"with n_clusters = %d" % n_clusters
),
fontsize=14,
fontweight="bold",
)
plt.show()
else:
print("states are not fully populated - skipping stats")
return thesilavgs, thesilclusterstats
| StarcoderdataPython |
84799 | from beetl.task_datasets import BeetlSleepTutorial, BeetlSleepSource
ds = BeetlSleepTutorial()
path = ds.download()
# Load all subject data
X, y, info = ds.get_data()
# Assume source group is subject 0-4, target group is subject 5-7,
# and subject 8,9 are from target group for testing.
X_source_train, y_source_train, info = ds.get_data(subjects=range(5))
X_target_train, y_target_train, info = ds.get_data(subjects=range(5, 8))
X_target_test, y_target_test, _ = ds.get_data(subjects=range(8, 10))
################################
# For Sleep Source
ds = BeetlSleepSource()
path = ds.download()
# Load all subject data
X, y, info = ds.get_data()
| StarcoderdataPython |
3380511 | <filename>02.py
# Day 2: http://adventofcode.com/2016/day/2
inp = [
'RLRLLLULULULUUDUULULRDDLURURDDLDUUDDLRDDUUUDDRUDLRRDDUDUUDULUDRDULRUDRULRDRUDLDDULRRDLDRLUDDLLDRDDDUDDLUDUDULDRLLDRLULRLURDLULRUUUDRULLUUDLRDLDDUDRRRLDLRUUURRLDDRRRURLLULDUULLDRLRDLLDURDLDDULLDDLDLUURRRURLRURLLRRDURLDUDDLULUUULULLLDRRRRRLULRDUDURURLULRURRRLLUURDURULRRUULDRDLULDLLUDLUDRLUDLRRLDLLDLDUDDLULLDRULRLRULDURRDLDLLUDRLLDRRDLDUDUURUURDUUDDDLDLDDRDLUDLDUUUUDLDRLRURDLURURDLLLUURURDRDLUDLLRUDULLLDLULLULLDLDDRDRRRUDDDUDDDDRULLLLRLDDLLRDRLLLRRLDRRUDRUUURLLLRULRRDURDLDRLDDUUDUUURRLRRUDLDLDDRUDLULLUUDUUUDLUDDRUULLLURUDDDDLRUDDLLLRUR',
'LDLRLDDDLUDRDRRUDUURLRULLUDDRLURLUULDLLRLLUDLRLRUDLULRLRRLRURLDDDURUDUUURDRLDDLUUUDRUDUDDDLLURLLULRUULLUDRULUDDULDUDUDULLDRUUUULRDUUDLUDURDLLRLLRLUUDUUDRLLLRULUURUDLDRLLDUDLDDRULDULDURRLDDDUDUDDRUDUDRDURLLLLLULDRDDLLUDULLLUDRURLDLDLDULLDDRURRLUDDRLURLULRLDDDUUUURLRDLRURDDURLDLRRLLRLRLUURRLLDDLDRLRDUDDLLDDDURUUDURLRRDUULRRDDRRUULDRLRUDRRLDDRLDRULLDLDURRULDURRRDLRRLRLLLRLDRLLULRRLLLLLDLDDULDLLDLLDUUDDRLURUUUUULRDDLRDLRDRDRDLUDDLDDRULLUDDRLDLLUDRLUURRLUDURURLLRURRURRLRLLRLURURDDDDRRLURDUULLUU',
'LLRRDURRDLDULRDUDLRDRDRURULDURUDRRURDDDRLDLDRDRDRDRULDUURLULDDUURUULUDULLDUDLLLLDLLLDRLUUULLULDDRRUDDULLLULRDRULDDULDUDRDDLUUURULDLLUDUUUUURUDLLDRDULLRULLDURDRLLDLDRDDURUULUDURRRUULLDUUDDURDURLDLRRLLDURDDLRRRUDLRRRDLDRLUDLUDRDRLDDLLLRLLRURDLRDUUUURRLULDDLDLLLUDRDRLRRDURDDLURDLDDDULLLRRLDDDRULDDDLRRDULUUUDRRULDDLLLURDRRLLLUULDRRRUURRDDLULDRLULDDDLDULDRRRULRULLURLURULLLLRUDRRRDRDRDLDULURLRRRRLRUDDRRRUURUURLLRURURUURRURRDLDLLUDRRRDUDDRDURLLRLRRULD',
'DULRRDRLRLUDLLURURLLRLRDLLDLLDRDUURLRUUUDLLDUUDDUULDUULLRUDRURLUDRDLRUDDDLULUDLLDRULULLLDRRULDLLUURLRRRLDRDLDRURRRRDLRUUDULLRLLLDLRUDLDUUDRLDLRDRLRDLDDDUDLRUDLDDLLLDRLLRRUUDRDDUUURURRRUUDLRRDDRUDLDDULULDLRRLRDDUDRUURRUULURLURUDRRURRRULDDDDURDLUUULUULULRDLRRRRRURURRLRUULDUUURRDRRDLDUUUULLULLLLUDLUUDUURRDLDLRRRLUUURULDULDLDRLLURDRUULLLLLULLLDRURURRUDRRRRUDUDUDRUDUDRDRULUUDRURDDUUDLDLDUURUDURLRLRRDRDRDLLDUDDULLRDLDDRLLDLRDURDDULLLDLLLULDLUUUDLDRDLURUURDDLRDLLLLLRLURDLLLULLRRLU',
'DUULULUUDUDLLRLRURULLDLRRLURDLLDUDUDDRURRLUDULULDRRDRLUULUDDLUURURDLDDDRDRUDURLDDLUDUURULRRUUDRLURRLRLDURRRULRLDDDRUDDDDDUDDULLLRRLLDULDRULUDLRRDLLUDRDLDULRLLLUULLRULRLLLLUDDRRDRLULDLDLURDDRUDDLDLDLDRULDLLDDUUDULUULULLURDURRLLUDRULLRDUDRDRURDRDRDURUUDULDDRURUDLLUUDUUDURDLRDRURUDRUURLUUURLRLUDRUDRUURLLUDRLURDDURRUDRDRLRRLDDDRDDLUUUDDLULDUURUDUDLLDRURDURRDULRLURRDLDDRLUDRLDLRLDDUURRULDDLDUDDLRDULLDDDLDUUUUDLRUDUDLDRDLRDDLDLRLLUDDRRLUDLDUUULLDDRLRRDLRRRRUDDLRLLULRLRDURDUDDRRULLDDLDLRRDLLULDURURDDURLRLULULURRUDUDRDLURULDUDLUULDUUURLLRUDLLRDLRUDRLULDUDRRDUUDUUULUUUDDRUD'
]
def first(arg):
keypad = [[1, 2, 3],
[4, 5, 6],
[7, 8, 9]]
keypad, code = [*zip(*keypad[::-1])], []
for line in arg:
x, y = code[-1] if code else (1, 1)
for i in line:
tx = x + (i in 'LR' and (-1) ** (i == 'L'))
ty = y + (i in 'UD' and (-1) ** (i == 'D'))
try:
if tx < 0 or ty < 0:
raise IndexError
keypad[tx][ty]
except IndexError:
continue
else:
x, y = tx, ty
code.append((x, y))
return [keypad[x][y] for x, y in code]
def second(arg):
keypad = [[...,..., 1 ,...,...],
[..., 2 , 3 , 4 ,...],
[ 5 , 6 , 7 , 8 , 9 ],
[...,'A','B','C',...],
[...,...,'D',...,...]]
keypad, code = [*zip(*keypad[::-1])], []
for line in arg:
x, y = code[-1] if code else (0, 2)
for i in line:
tx = x + (i in 'LR' and (-1) ** (i == 'L'))
ty = y + (i in 'UD' and (-1) ** (i == 'D'))
try:
if tx < 0 or ty < 0 or keypad[tx][ty] is ...:
raise IndexError
except IndexError:
continue
else:
x, y = tx, ty
code.append((x, y))
return [keypad[x][y] for x, y in code]
if __name__ == '__main__':
print('Bathroom code 1:', *first(inp))
print('Bathroom code 2:', *second(inp))
| StarcoderdataPython |
167321 | from flask_testing import TestCase
from unit_tests.utilities import Utilities
from unittest.mock import MagicMock, patch
from maintain_frontend import main
from maintain_frontend.dependencies.session_api.session import Session
from maintain_frontend.constants.permissions import Permissions
from maintain_frontend.models import LLC1Search
from maintain_frontend.main import app
from flask import url_for, g
import json
class TestLLC1Description(TestCase):
def create_app(self):
main.app.testing = True
Utilities.mock_session_cookie_flask_test(self)
return main.app
def setUp(self):
main.app.config['Testing'] = True
main.app.config['WTF_CSRF_ENABLED'] = False
def test_get_redirects_when_no_state(self):
self.client.set_cookie('localhost', Session.session_cookie_name, 'cookie_value')
self.mock_session.return_value.llc1_state = None
self.mock_session.return_value.user.permissions = [Permissions.request_llc1]
response = self.client.get(url_for("create_llc1.llc1_get_description"))
self.assert_status(response, 302)
self.assertRedirects(response, url_for("create_llc1.create_llc1"))
def test_get_renders_when_state_ok(self):
self.client.set_cookie('localhost', Session.session_cookie_name, 'cookie_value')
self.mock_session.return_value.llc1_state = LLC1Search()
self.mock_session.return_value.user.permissions = [Permissions.request_llc1]
response = self.client.get(url_for("create_llc1.llc1_get_description"))
self.assert_status(response, 200)
self.assert_template_used("search_description.html")
def test_post_redirects_when_no_state(self):
self.client.set_cookie('localhost', Session.session_cookie_name, 'cookie_value')
self.mock_session.return_value.llc1_state = None
self.mock_session.return_value.user.permissions = [Permissions.request_llc1]
response = self.client.post(url_for("create_llc1.llc1_set_description"),
data={'charge-geographic-description': 'foo', 'hasAddress': None})
self.assert_status(response, 302)
self.assertRedirects(response, url_for("create_llc1.create_llc1"))
def test_post_renders_error_when_no_description(self):
self.client.set_cookie('localhost', Session.session_cookie_name, 'cookie_value')
self.mock_session.return_value.llc1_state = LLC1Search()
self.mock_session.return_value.user.permissions = [Permissions.request_llc1]
response = self.client.post(url_for("create_llc1.llc1_set_description"),
data={'has-address': 'No', 'charge-geographic-description': ''})
self.assertStatus(response, 400)
self.assert_template_used('search_description.html')
@patch('maintain_frontend.app.requests.Session')
@patch('maintain_frontend.add_land_charge.address_confirmation.AddressConverter')
def test_post_redirects_when_single_address_chosen(self, session, mock_address_converter):
with app.test_request_context():
self.client.set_cookie('localhost', Session.session_cookie_name, 'cookie_value')
self.mock_session.return_value.llc1_state = LLC1Search()
self.mock_session.return_value.user.permissions = [Permissions.request_llc1]
g.session = MagicMock()
response = MagicMock()
response.status_code = 201
session.return_value.post.return_value = response
selected_address = {
'address': 'display address',
'line_1': 'Flat 1',
'line_2': 'Place',
'line_3': 'Holder',
'line_4': 'Flat 1',
'line_5': 'Flat 1',
'line_6': 'Flat 1',
'postcode': 'postcode',
'uprn': 123456789
}
mock_address_converter.to_charge_address.return_value = selected_address
response = self.client.post(url_for("create_llc1.llc1_set_description"), data={
'has-address': 'ProvideAddress',
'selected-address': json.dumps(selected_address),
})
self.assertEqual(self.mock_session.return_value.llc1_state.description, 'Flat 1, Place, Holder, Flat 1, '
'Flat 1, Flat 1 postcode')
self.assert_status(response, 302)
self.assertRedirects(response, url_for("create_llc1.llc1_get_result"))
@patch('maintain_frontend.app.requests.Session')
def test_post_redirects_when_no_single_address_chosen_with_description(self, session):
with app.test_request_context():
self.client.set_cookie('localhost', Session.session_cookie_name, 'cookie_value')
self.mock_session.return_value.llc1_state = LLC1Search()
self.mock_session.return_value.user.permissions = [Permissions.request_llc1]
g.session = MagicMock()
response = MagicMock()
response.status_code = 201
session.return_value.post.return_value = response
response = self.client.post(url_for("create_llc1.llc1_set_description"), data={
'has-address': 'No',
'charge-geographic-description': 'This is a valid description',
})
self.assertEqual(self.mock_session.return_value.llc1_state.description, 'This is a valid description')
self.assert_status(response, 302)
self.assertRedirects(response, url_for("create_llc1.llc1_get_result"))
def test_get_without_permission(self):
self.client.set_cookie('localhost', Session.session_cookie_name, 'cookie_value')
self.mock_session.return_value.user.permissions = []
response = self.client.get(url_for("create_llc1.llc1_get_description"))
self.assertStatus(response, 302)
self.assertRedirects(response, '/not-authorised')
def test_post_without_permission(self):
self.client.set_cookie('localhost', Session.session_cookie_name, 'cookie_value')
self.mock_session.return_value.user.permissions = []
response = self.client.post(url_for("create_llc1.llc1_set_description"),
data={'charge-geographic-description': '', 'hasAddress': 'No'})
self.assertStatus(response, 302)
self.assertRedirects(response, '/not-authorised')
| StarcoderdataPython |
149951 | <reponame>HeliumEdu/platform
from django import forms
from django.contrib.admin import ModelAdmin
from django.contrib.auth import admin, password_validation
from django.contrib.auth import get_user_model
from django.contrib.auth.forms import UserChangeForm, UserCreationForm
from django.core import exceptions
from rest_framework.authtoken.models import Token
from helium.auth.models import UserProfile
from helium.auth.models import UserSettings
from helium.common.admin import admin_site, BaseModelAdmin
__author__ = "<NAME>"
__copyright__ = "Copyright 2019, Helium Edu"
__version__ = "1.4.38"
class AdminUserCreationForm(UserCreationForm):
def clean_password2(self):
password1 = self.cleaned_data.get("<PASSWORD>")
password2 = self.cleaned_data.get("<PASSWORD>")
if password1 != password2:
raise forms.ValidationError("You must enter matching passwords.")
try:
password_validation.validate_password(password=<PASSWORD>, user=get_user_model())
except exceptions.ValidationError as e:
raise forms.ValidationError(list(e.messages))
return password1
def save(self, commit=True):
super().save(commit)
self.instance.is_active = True
self.instance.save()
return self.instance
class UserAdmin(admin.UserAdmin, BaseModelAdmin):
form = UserChangeForm
add_form = AdminUserCreationForm
list_display = ('email', 'username', 'created_at', 'last_login', 'is_active')
list_filter = (
'is_active', 'profile__phone_verified', 'settings__default_view', 'settings__receive_emails_from_admin',)
search_fields = ('email', 'username')
ordering = ('-last_login',)
add_fieldsets = (
(None, {
'fields': ('username', 'email', '<PASSWORD>', '<PASSWORD>',),
}),
)
fieldsets = None
filter_horizontal = ()
def get_readonly_fields(self, request, obj=None):
if obj:
return self.readonly_fields + ('created_at', 'last_login',)
return self.readonly_fields
class UserSettingsAdmin(BaseModelAdmin):
list_display = ['time_zone', 'default_view', 'receive_emails_from_admin', 'get_user']
list_filter = ['default_view', 'week_starts_on', 'receive_emails_from_admin']
search_fields = ('user__email', 'user__username')
ordering = ('user__username',)
readonly_fields = ('user',)
def get_user(self, obj):
if obj.user:
return obj.user.get_username()
else:
return ''
def has_add_permission(self, request):
return False
get_user.short_description = 'User'
get_user.admin_order_field = 'user__username'
class UserProfileAdmin(BaseModelAdmin):
list_display = ['phone', 'phone_verified', 'get_user']
search_fields = ('user__email', 'user__username')
ordering = ('user__username',)
readonly_fields = ('user',)
def has_add_permission(self, request):
return False
def get_user(self, obj):
if obj.user:
return obj.user.get_username()
else:
return ''
get_user.short_description = 'User'
get_user.admin_order_field = 'user__username'
class TokenAdmin(ModelAdmin):
list_display = ['key', 'created', 'get_user']
search_fields = ('key', 'user__email', 'user__username')
ordering = ('user__username',)
readonly_fields = ('user', 'created')
def get_user(self, obj):
if obj.user:
return obj.user.get_username()
else:
return ''
get_user.short_description = 'User'
get_user.admin_order_field = 'user__username'
# Register the models in the Admin
admin_site.register(get_user_model(), UserAdmin)
admin_site.register(UserSettings, UserSettingsAdmin)
admin_site.register(UserProfile, UserProfileAdmin)
admin_site.register(Token, TokenAdmin)
| StarcoderdataPython |
176311 | from sklearn import tree;
Test_data = [[]]
| StarcoderdataPython |
1715396 | <reponame>synapticarbors/pysumma
import os
import re
import json
import pkg_resources
from .option import BaseOption
from .option import OptionContainer
METADATA_PATH = pkg_resources.resource_filename(
__name__, 'meta/decisions.json')
with open(METADATA_PATH, 'r') as f:
DECISION_META = json.load(f)
class DecisionOption(BaseOption):
"""Container for lines in a decisions file"""
def __init__(self, name, value):
super().__init__(name)
self.description = DECISION_META[name]['description']
self.available_options = DECISION_META[name]['options']
self.set_value(value)
def set_value(self, new_value):
if new_value in self.available_options:
self.value = new_value
else:
raise ValueError(os.linesep.join([
'Invalid option given for decision: {}'.format(self.name),
'You gave a value of: {}'.format(new_value),
'Valid options include: {}'.format(self.available_options)]))
def __str__(self):
if self.name in ['simulStart', 'simulFinsh']:
value = "'{}'".format(self.value)
else:
value = self.value
return "{0} {1: <20} ! {2}".format(
self.name, value, self.description)
class Decisions(OptionContainer):
"""
The Decisions object provides an interface to
a SUMMA decisions file.
"""
def __init__(self, dirpath, filepath=None):
super().__init__(DecisionOption, dirpath, filepath)
def set_option(self, key, value):
try:
o = self.get_option(key, strict=True)
o.set_value(value)
except ValueError:
if key in DECISION_META.keys():
self.options.append(DecisionOption(key, value))
else:
raise
def get_constructor_args(self, line):
decision, *value = line.split('!')[0].split()
if isinstance(value, list):
value = " ".join(value).replace("'", "")
return decision, value
| StarcoderdataPython |
1660582 | import time
from pyrazine.typing.lambda_client_context import LambdaClientContext
from pyrazine.typing.lambda_cognito_identity import LambdaCognitoIdentity
class LambdaContext(object):
"""
Models the context object passed to the function handler by AWS Lambda. To
be used mainly for typing and testing purposes.
"""
# The maximum execution time allowed for AWS Lambda functions (15 minutes)
# in milliseconds.
MAX_EXEC_TIME_IN_MILLIS = 15 * 60 * 1000
def __init__(self,
function_name: str,
function_version: str,
invoked_function_arn: str,
memory_limit_in_mb: int,
aws_request_id: str,
log_group_name: str,
log_stream_name: str,
identity: LambdaCognitoIdentity,
client_context: LambdaClientContext):
"""
Initializes an instance of the LambdaContext class.
:param function_name: The name of the Lambda function.
:param function_version: The version of the Lambda function.
:param invoked_function_arn: The ARN that is used to invoke the function.
:param memory_limit_in_mb: The amount of memory allocated for the Lambda
function, in megabytes.
:param aws_request_id: The identifier of the invocation request.
:param log_group_name: The log group of the Lambda function.
:param log_stream_name: The log stream for the function instance.
:param identity: Information about the Amazon Cognito identity that
authorized the request. (Only for mobile apps)
:param client_context: Returns the number of milliseconds left before
the execution times out. (Only for mobile apps)
"""
self._function_name = function_name
self._function_version = function_version
self._invoked_function_arn = invoked_function_arn
self._memory_limit_in_mb = memory_limit_in_mb
self._aws_request_id = aws_request_id
self._log_group_name = log_group_name
self._log_stream_name = log_stream_name
self._identity = identity
self._client_context = client_context
self._start_time = time.time()
@property
def function_name(self) -> str:
"""
The name of the Lambda function.
"""
return self._function_name
@property
def function_version(self) -> str:
"""
The version of the Lambda function.
"""
return self._function_version
@property
def invoked_function_arn(self) -> str:
"""
The ARN that is used to invoke the function.
"""
return self._invoked_function_arn
@property
def memory_limit_in_mb(self) -> int:
"""
The amount of memory allocated for the Lambda function, in megabytes.
"""
return self._memory_limit_in_mb
@property
def aws_request_id(self) -> str:
"""
The identifier of the invocation request.
"""
return self._aws_request_id
@property
def log_group_name(self) -> str:
"""
The log group of the Lambda function.
"""
return self._log_group_name
@property
def log_stream_name(self) -> str:
"""
The log stream for the function instance.
"""
return self._log_stream_name
@property
def identity(self) -> LambdaCognitoIdentity:
"""
Information about the Amazon Cognito identity that authorized the request.
(Only for mobile apps)
"""
return self._identity
@property
def client_context(self) -> LambdaClientContext:
"""
Client context that has been provided to Lambda by the client application.
(Only for mobile apps)
"""
return self._client_context
def get_remaining_time_in_millis(self) -> int:
"""
Returns the number of milliseconds left before the execution times out.
(Mocked)
"""
time_elapsed = time.time() - self._start_time
return int((self.MAX_EXEC_TIME_IN_MILLIS - time_elapsed) * 1000)
| StarcoderdataPython |
3320688 | """Tests for the static document."""
import unittest
from grow.documents import static_document
from grow.pods import pods
from grow import storage
from grow.testing import testing
class StaticDocumentTestCase(unittest.TestCase):
"""Test the static document."""
def setUp(self):
self.dir_path = testing.create_test_pod_dir()
self.pod = pods.Pod(self.dir_path, storage=storage.FileStorage)
def test_exists(self):
"""Static document exists?"""
static_doc = static_document.StaticDocument(
self.pod, '/static/test.txt')
self.assertTrue(static_doc.exists)
static_doc = static_document.StaticDocument(
self.pod, '/static/something.txt')
self.assertFalse(static_doc.exists)
static_doc = static_document.StaticDocument(
self.pod, '/static-a/test-a.txt')
self.assertTrue(static_doc.exists)
static_doc = static_document.StaticDocument(
self.pod, '/static-b/test-a.txt')
self.assertFalse(static_doc.exists)
def test_filter(self):
"""Static filter config."""
static_doc = static_document.StaticDocument(
self.pod, '/static/test.txt')
self.assertEqual(static_doc.filter, {})
def test_path_format(self):
"""Static document path format."""
static_doc = static_document.StaticDocument(
self.pod, '/static/test.txt')
self.assertEqual('/app/static/test.txt', static_doc.path_format)
static_doc = static_document.StaticDocument(
self.pod, '/static/something.txt')
self.assertEqual('/app/static/something.txt', static_doc.path_format)
static_doc = static_document.StaticDocument(
self.pod, '/static/something.txt', locale='de')
self.assertEqual(
'/app/{root}/static/somepath/{locale}/something.txt', static_doc.path_format)
def test_path_filter(self):
"""Static path filter."""
static_doc = static_document.StaticDocument(
self.pod, '/static/test.txt')
self.assertTrue(static_doc.path_filter.is_valid(
static_doc.serving_path))
def test_serving_path(self):
"""Static document serving path."""
static_doc = static_document.StaticDocument(
self.pod, '/static/test.txt')
self.assertEqual(
('/app/static/test-180e03ea719ad14b0e02701048db567e231eb6fd'
'0a23b6359f068b1e8bef135b.txt'),
static_doc.serving_path)
static_doc = static_document.StaticDocument(
self.pod, '/static/test.txt', locale='de')
self.assertEqual(
('/app/root/static/somepath/de/test-180e03ea719ad14b0e02701'
'048db567e231eb6fd0a23b6359f068b1e8bef135b.txt'),
static_doc.serving_path)
def test_serving_path_parameterized(self):
"""Static document parameterized serving path."""
static_doc = static_document.StaticDocument(
self.pod, '/static/test.txt')
self.assertEqual(
'/app/static/test.txt', static_doc.serving_path_parameterized)
static_doc = static_document.StaticDocument(
self.pod, '/static/something.txt', locale='de')
self.assertEqual(
'/app/root/static/somepath/:locale/something.txt',
static_doc.serving_path_parameterized)
def test_source_path(self):
"""Static document source path."""
static_doc = static_document.StaticDocument(
self.pod, '/static/test.txt')
self.assertEqual('/static/', static_doc.source_path)
static_doc = static_document.StaticDocument(
self.pod, '/static/something.txt', locale='de')
self.assertEqual('/static/intl/{locale}/', static_doc.source_path)
def test_source_path_multi_paths(self):
"""Static document source path with multiple source dirs."""
static_doc = static_document.StaticDocument(
self.pod, '/static-a/test-a.txt')
self.assertEqual('/static-a/', static_doc.source_path)
static_doc = static_document.StaticDocument(
self.pod, '/static-b/test-b.txt')
self.assertEqual('/static-b/', static_doc.source_path)
static_doc = static_document.StaticDocument(
self.pod, '/static-a/test-a.txt', locale='de')
self.assertEqual('/static-a/intl/{locale}/', static_doc.source_path)
static_doc = static_document.StaticDocument(
self.pod, '/static-b/test-b.txt', locale='de')
self.assertEqual('/static-b/intl/{locale}/', static_doc.source_path)
def test_source_paths(self):
"""Static document source paths."""
static_doc = static_document.StaticDocument(
self.pod, '/static-a/test-a.txt')
self.assertEqual('/static-a/', static_doc.source_path)
static_doc = static_document.StaticDocument(
self.pod, '/static-b/test-b.txt')
self.assertEqual('/static-b/', static_doc.source_path)
def test_source_pod_path(self):
"""Static document source path."""
static_doc = static_document.StaticDocument(
self.pod, '/static/test.txt')
self.assertEqual('/static/test.txt', static_doc.source_pod_path)
static_doc = static_document.StaticDocument(
self.pod, '/static/test.txt', locale='de')
self.assertEqual(
'/static/intl/de/test.txt', static_doc.source_pod_path)
def test_sub_pod_path(self):
"""Static document source path."""
static_doc = static_document.StaticDocument(
self.pod, '/static/test.txt')
self.assertEqual('test.txt', static_doc.sub_pod_path)
static_doc = static_document.StaticDocument(
self.pod, '/static/something/test.txt')
self.assertEqual('something/test.txt', static_doc.sub_pod_path)
static_doc = static_document.StaticDocument(
self.pod, '/static/intl/{locale}/something.txt', locale='de')
self.assertEqual('something.txt', static_doc.sub_pod_path)
def test_strip_fingerprint(self):
"""Strip off fingerprint from the serving path."""
serving_path = '/'
expected = '/'
actual = static_document.StaticDocument.strip_fingerprint(serving_path)
self.assertEqual(expected, actual)
serving_path = '/test.txt'
expected = '/test.txt'
actual = static_document.StaticDocument.strip_fingerprint(serving_path)
self.assertEqual(expected, actual)
serving_path = '/something/test.txt'
expected = '/something/test.txt'
actual = static_document.StaticDocument.strip_fingerprint(serving_path)
self.assertEqual(expected, actual)
serving_path = ('/global-fd3b7d753284484c35e7fa347c78529fce0aa43'
'962403acd7f1a8f1ce83a3d71.min.css')
expected = '/global.min.css'
actual = static_document.StaticDocument.strip_fingerprint(serving_path)
self.assertEqual(expected, actual)
def test_url(self):
"""Static document url."""
static_doc = static_document.StaticDocument(
self.pod, '/static/test.txt')
self.assertEqual(
('/app/static/test-180e03ea719ad14b0e02701048db567e231eb6f'
'd0a23b6359f068b1e8bef135b.txt'),
static_doc.url.path)
static_doc = static_document.StaticDocument(
self.pod, '/static/test.txt', locale='de')
self.assertEqual(
('/app/root/static/somepath/de/test-180e03ea719ad14b0e0270'
'1048db567e231eb6fd0a23b6359f068b1e8bef135b.txt'),
static_doc.url.path)
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
1798896 | <gh_stars>0
#!/usr/bin/python
import crypt
import random
import sys
import getpass
pwd = getpass.getpass()
ALPHABET = "0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
chars = []
for i in range(16):
chars.append(random.choice(ALPHABET))
salt = "".join(chars)
print crypt.crypt(pwd, salt)
| StarcoderdataPython |
3334998 | import os
from invoke import task, Collection, run
@task
def info(c):
"""Prints info about the templating engine"""
print("Welcome to qtemplate - https://github.com/QsonLabs/qtemplate")
| StarcoderdataPython |
1609009 | <filename>pygem/massbalance.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Feb 3 14:00:14 2020
@author: davidrounce
"""
# External libraries
import numpy as np
#import pandas as pd
# Local libraries
#from oggm import cfg
#from oggm import utils
from oggm.core.massbalance import MassBalanceModel
import pygem.pygem_input as pygem_prms
from pygem.utils._funcs import annualweightedmean_array
#cfg.initialize()
#cfg.PARAMS['has_internet'] = False
#%%
class PyGEMMassBalance(MassBalanceModel):
"""Mass-balance computed from the Python Glacier Evolution Model.
This mass balance accounts for ablation, accumulation, and refreezing.
This class implements the MassBalanceModel interface so that the dynamical model can use it.
"""
def __init__(self, gdir, modelprms, glacier_rgi_table,
option_areaconstant=False, hindcast=pygem_prms.hindcast, frontalablation_k=None,
debug=False, debug_refreeze=False,
fls=None, fl_id=0,
heights=None, repeat_period=False,
# use_refreeze=True
hyps_data=pygem_prms.hyps_data,
inversion_filter=False,
ignore_debris=False
):
""" Initialize.
Parameters
----------
modelprms : dict
Model parameters dictionary (lrgcm, lrglac, precfactor, precgrad, ddfsnow, ddfice, tempsnow, tempchange)
glacier_rgi_table : pd.Series
Table of glacier's RGI information
option_areaconstant : Boolean
option to keep glacier area constant (default False allows glacier area to change annually)
frontalablation_k : float
frontal ablation parameter
debug : Boolean
option to turn on print statements for development or debugging of code
debug_refreeze : Boolean
option to turn on print statements for development/debugging of refreezing code
hindcast : int
switch to run the model in reverse or not (may be irrelevant after converting to OGGM's setup)
"""
if debug:
print('\n\nDEBUGGING MASS BALANCE FUNCTION\n\n')
self.debug_refreeze = debug_refreeze
self.inversion_filter = inversion_filter
super(PyGEMMassBalance, self).__init__()
self.valid_bounds = [-1e4, 2e4] # in m
self.hemisphere = 'nh'
# self.use_refreeze = use_refreeze
# Glacier data
self.modelprms = modelprms
self.glacier_rgi_table = glacier_rgi_table
if pygem_prms.hyps_data in ['Farinotti', 'Huss']:
self.icethickness_initial = gdir.self.icethickness_initial
self.width_initial = gdir.width_initial
self.glacier_area_initial = gdir.glacier_area_initial
self.heights = gdir.heights
self.debris_ed = gdir.debris_ed
assert True==False, 'Check units of the initial input data from Farinotti and/or Huss'
else:
self.icethickness_initial = getattr(fls[fl_id], 'thick', None)
self.width_initial = fls[fl_id].widths_m
self.glacier_area_initial = fls[fl_id].widths_m * fls[fl_id].dx_meter
self.heights = fls[fl_id].surface_h
if pygem_prms.include_debris and not ignore_debris and not gdir.is_tidewater:
self.debris_ed = fls[fl_id].debris_ed
else:
self.debris_ed = np.ones(self.glacier_area_initial.shape[0])
self.glac_idx_initial = self.glacier_area_initial.nonzero()
# Climate data
self.dates_table = gdir.dates_table
self.glacier_gcm_temp = gdir.historical_climate['temp']
self.glacier_gcm_tempstd = gdir.historical_climate['tempstd']
self.glacier_gcm_prec = gdir.historical_climate['prec']
self.glacier_gcm_elev = gdir.historical_climate['elev']
self.glacier_gcm_lrgcm = gdir.historical_climate['lr']
self.glacier_gcm_lrglac = gdir.historical_climate['lr']
if pygem_prms.hindcast == 1:
self.glacier_gcm_prec = self.glacier_gcm_prec[::-1]
self.glacier_gcm_temp = self.glacier_gcm_temp[::-1]
self.glacier_gcm_lrgcm = self.glacier_gcm_lrgcm[::-1]
self.glacier_gcm_lrglac = self.glacier_gcm_lrglac[::-1]
self.repeat_period = repeat_period
# Variables to store (consider storing in xarray)
nbins = self.glacier_area_initial.shape[0]
self.nmonths = self.glacier_gcm_temp.shape[0]
self.nyears = int(self.dates_table.shape[0] / 12)
self.bin_temp = np.zeros((nbins,self.nmonths))
self.bin_prec = np.zeros((nbins,self.nmonths))
self.bin_acc = np.zeros((nbins,self.nmonths))
self.bin_refreezepotential = np.zeros((nbins,self.nmonths))
self.bin_refreeze = np.zeros((nbins,self.nmonths))
self.bin_meltglac = np.zeros((nbins,self.nmonths))
self.bin_meltsnow = np.zeros((nbins,self.nmonths))
self.bin_melt = np.zeros((nbins,self.nmonths))
self.bin_snowpack = np.zeros((nbins,self.nmonths))
self.snowpack_remaining = np.zeros((nbins,self.nmonths))
self.glac_bin_refreeze = np.zeros((nbins,self.nmonths))
self.glac_bin_melt = np.zeros((nbins,self.nmonths))
self.glac_bin_frontalablation = np.zeros((nbins,self.nmonths))
self.glac_bin_snowpack = np.zeros((nbins,self.nmonths))
self.glac_bin_massbalclim = np.zeros((nbins,self.nmonths))
self.glac_bin_massbalclim_annual = np.zeros((nbins,self.nyears))
self.glac_bin_surfacetype_annual = np.zeros((nbins,self.nyears+1))
self.glac_bin_area_annual = np.zeros((nbins,self.nyears+1))
self.glac_bin_icethickness_annual = np.zeros((nbins,self.nyears+1)) # Needed for MassRedistributionCurves
self.glac_bin_width_annual = np.zeros((nbins,self.nyears+1)) # Needed for MassRedistributionCurves
self.offglac_bin_prec = np.zeros((nbins,self.nmonths))
self.offglac_bin_melt = np.zeros((nbins,self.nmonths))
self.offglac_bin_refreeze = np.zeros((nbins,self.nmonths))
self.offglac_bin_snowpack = np.zeros((nbins,self.nmonths))
self.offglac_bin_area_annual = np.zeros((nbins,self.nyears+1))
self.glac_wide_temp = np.zeros(self.nmonths)
self.glac_wide_prec = np.zeros(self.nmonths)
self.glac_wide_acc = np.zeros(self.nmonths)
self.glac_wide_refreeze = np.zeros(self.nmonths)
self.glac_wide_melt = np.zeros(self.nmonths)
self.glac_wide_frontalablation = np.zeros(self.nmonths)
self.glac_wide_massbaltotal = np.zeros(self.nmonths)
self.glac_wide_runoff = np.zeros(self.nmonths)
self.glac_wide_snowline = np.zeros(self.nmonths)
self.glac_wide_area_annual = np.zeros(self.nyears+1)
self.glac_wide_volume_annual = np.zeros(self.nyears+1)
self.glac_wide_volume_change_ignored_annual = np.zeros(self.nyears)
self.glac_wide_ELA_annual = np.zeros(self.nyears+1)
self.offglac_wide_prec = np.zeros(self.nmonths)
self.offglac_wide_refreeze = np.zeros(self.nmonths)
self.offglac_wide_melt = np.zeros(self.nmonths)
self.offglac_wide_snowpack = np.zeros(self.nmonths)
self.offglac_wide_runoff = np.zeros(self.nmonths)
self.dayspermonth = self.dates_table['daysinmonth'].values
self.surfacetype_ddf = np.zeros((nbins))
# Surface type DDF dictionary (manipulate this function for calibration or for each glacier)
self.surfacetype_ddf_dict = self._surfacetypeDDFdict(self.modelprms)
# Refreezing specific layers
if pygem_prms.option_refreezing == 'HH2015':
# Refreezing layers density, volumetric heat capacity, and thermal conductivity
self.rf_dens_expb = (pygem_prms.rf_dens_bot / pygem_prms.rf_dens_top)**(1/(pygem_prms.rf_layers-1))
self.rf_layers_dens = np.array([pygem_prms.rf_dens_top * self.rf_dens_expb**x
for x in np.arange(0,pygem_prms.rf_layers)])
self.rf_layers_ch = ((1 - self.rf_layers_dens/1000) * pygem_prms.ch_air + self.rf_layers_dens/1000 *
pygem_prms.ch_ice)
self.rf_layers_k = ((1 - self.rf_layers_dens/1000) * pygem_prms.k_air + self.rf_layers_dens/1000 *
pygem_prms.k_ice)
# refreeze in each bin
self.refr = np.zeros(nbins)
# refrezee cold content or "potential" refreeze
self.rf_cold = np.zeros(nbins)
# layer temp of each elev bin for present time step
self.te_rf = np.zeros((pygem_prms.rf_layers,nbins,self.nmonths))
# layer temp of each elev bin for previous time step
self.tl_rf = np.zeros((pygem_prms.rf_layers,nbins,self.nmonths))
# Sea level for marine-terminating glaciers
self.sea_level = 0
rgi_region = int(glacier_rgi_table.RGIId.split('-')[1].split('.')[0])
if frontalablation_k == None:
self.frontalablation_k0 = pygem_prms.frontalablation_k0dict[rgi_region]
def get_annual_mb(self, heights, year=None, fls=None, fl_id=None,
debug=False, option_areaconstant=False):
"""FIXED FORMAT FOR THE FLOWLINE MODEL
Returns annual climatic mass balance [m ice per second]
Parameters
----------
heights : np.array
elevation bins
year : int
year starting with 0 to the number of years in the study
Returns
-------
mb : np.array
mass balance for each bin [m ice per second]
"""
year = int(year)
if self.repeat_period:
year = year % (pygem_prms.gcm_endyear - pygem_prms.gcm_startyear)
fl = fls[fl_id]
np.testing.assert_allclose(heights, fl.surface_h)
glacier_area_t0 = fl.widths_m * fl.dx_meter
glacier_area_initial = self.glacier_area_initial
fl_widths_m = getattr(fl, 'widths_m', None)
fl_section = getattr(fl,'section',None)
# Ice thickness (average)
if fl_section is not None and fl_widths_m is not None:
icethickness_t0 = np.zeros(fl_section.shape)
icethickness_t0[fl_widths_m > 0] = fl_section[fl_widths_m > 0] / fl_widths_m[fl_widths_m > 0]
else:
icethickness_t0 = None
# Quality control: ensure you only have glacier area where there is ice
if icethickness_t0 is not None:
glacier_area_t0[icethickness_t0 == 0] = 0
# Record ice thickness
self.glac_bin_icethickness_annual[:,year] = icethickness_t0
# Glacier indices
glac_idx_t0 = glacier_area_t0.nonzero()[0]
nbins = heights.shape[0]
nmonths = self.glacier_gcm_temp.shape[0]
# Local variables
bin_precsnow = np.zeros((nbins,nmonths))
# Refreezing specific layers
if pygem_prms.option_refreezing == 'HH2015' and year == 0:
self.te_rf[:,:,0] = 0 # layer temp of each elev bin for present time step
self.tl_rf[:,:,0] = 0 # layer temp of each elev bin for previous time step
elif pygem_prms.option_refreezing == 'Woodward':
refreeze_potential = np.zeros(nbins)
if len(glac_idx_t0) > 0:
# Surface type [0=off-glacier, 1=ice, 2=snow, 3=firn, 4=debris]
if year == 0:
self.surfacetype, self.firnline_idx = self._surfacetypebinsinitial(self.heights)
self.glac_bin_surfacetype_annual[:,year] = self.surfacetype
# Off-glacier area and indices
if option_areaconstant == False:
self.offglac_bin_area_annual[:,year] = glacier_area_initial - glacier_area_t0
offglac_idx = np.where(self.offglac_bin_area_annual[:,year] > 0)[0]
# Functions currently set up for monthly timestep
# only compute mass balance while glacier exists
if (pygem_prms.timestep == 'monthly') and (glac_idx_t0.shape[0] != 0):
# AIR TEMPERATURE: Downscale the gcm temperature [deg C] to each bin
if pygem_prms.option_temp2bins == 1:
# Downscale using gcm and glacier lapse rates
# T_bin = T_gcm + lr_gcm * (z_ref - z_gcm) + lr_glac * (z_bin - z_ref) + tempchange
self.bin_temp[:,12*year:12*(year+1)] = (self.glacier_gcm_temp[12*year:12*(year+1)] +
self.glacier_gcm_lrgcm[12*year:12*(year+1)] *
(self.glacier_rgi_table.loc[pygem_prms.option_elev_ref_downscale] - self.glacier_gcm_elev) +
self.glacier_gcm_lrglac[12*year:12*(year+1)] * (heights -
self.glacier_rgi_table.loc[pygem_prms.option_elev_ref_downscale])[:, np.newaxis] +
self.modelprms['tbias'])
# PRECIPITATION/ACCUMULATION: Downscale the precipitation (liquid and solid) to each bin
if pygem_prms.option_prec2bins == 1:
# Precipitation using precipitation factor and precipitation gradient
# P_bin = P_gcm * prec_factor * (1 + prec_grad * (z_bin - z_ref))
bin_precsnow[:,12*year:12*(year+1)] = (self.glacier_gcm_prec[12*year:12*(year+1)] *
self.modelprms['kp'] * (1 + self.modelprms['precgrad'] * (heights -
self.glacier_rgi_table.loc[pygem_prms.option_elev_ref_downscale]))[:,np.newaxis])
# Option to adjust prec of uppermost 25% of glacier for wind erosion and reduced moisture content
if pygem_prms.option_preclimit == 1:
# Elevation range based on all flowlines
raw_min_elev = []
raw_max_elev = []
if len(fl.surface_h[fl.widths_m > 0]):
raw_min_elev.append(fl.surface_h[fl.widths_m > 0].min())
raw_max_elev.append(fl.surface_h[fl.widths_m > 0].max())
elev_range = np.max(raw_max_elev) - np.min(raw_min_elev)
elev_75 = np.min(raw_min_elev) + 0.75 * (elev_range)
# If elevation range > 1000 m, apply corrections to uppermost 25% of glacier (Huss and Hock, 2015)
if elev_range > 1000:
# Indices of upper 25%
glac_idx_upper25 = glac_idx_t0[heights[glac_idx_t0] >= elev_75]
# Exponential decay according to elevation difference from the 75% elevation
# prec_upper25 = prec * exp(-(elev_i - elev_75%)/(elev_max- - elev_75%))
# height at 75% of the elevation
height_75 = heights[glac_idx_upper25].min()
glac_idx_75 = np.where(heights == height_75)[0][0]
# exponential decay
bin_precsnow[glac_idx_upper25,12*year:12*(year+1)] = (
bin_precsnow[glac_idx_75,12*year:12*(year+1)] *
np.exp(-1*(heights[glac_idx_upper25] - height_75) /
(heights[glac_idx_upper25].max() - heights[glac_idx_upper25].min()))
[:,np.newaxis])
# Precipitation cannot be less than 87.5% of the maximum accumulation elsewhere on the glacier
for month in range(0,12):
bin_precsnow[glac_idx_upper25[(bin_precsnow[glac_idx_upper25,month] < 0.875 *
bin_precsnow[glac_idx_t0,month].max()) &
(bin_precsnow[glac_idx_upper25,month] != 0)], month] = (
0.875 * bin_precsnow[glac_idx_t0,month].max())
# Separate total precipitation into liquid (bin_prec) and solid (bin_acc)
if pygem_prms.option_accumulation == 1:
# if temperature above threshold, then rain
(self.bin_prec[:,12*year:12*(year+1)]
[self.bin_temp[:,12*year:12*(year+1)] > self.modelprms['tsnow_threshold']]) = (
bin_precsnow[:,12*year:12*(year+1)]
[self.bin_temp[:,12*year:12*(year+1)] > self.modelprms['tsnow_threshold']])
# if temperature below threshold, then snow
(self.bin_acc[:,12*year:12*(year+1)]
[self.bin_temp[:,12*year:12*(year+1)] <= self.modelprms['tsnow_threshold']]) = (
bin_precsnow[:,12*year:12*(year+1)]
[self.bin_temp[:,12*year:12*(year+1)] <= self.modelprms['tsnow_threshold']])
elif pygem_prms.option_accumulation == 2:
# if temperature between min/max, then mix of snow/rain using linear relationship between min/max
self.bin_prec[:,12*year:12*(year+1)] = (
(0.5 + (self.bin_temp[:,12*year:12*(year+1)] -
self.modelprms['tsnow_threshold']) / 2) * bin_precsnow[:,12*year:12*(year+1)])
self.bin_acc[:,12*year:12*(year+1)] = (
bin_precsnow[:,12*year:12*(year+1)] - self.bin_prec[:,12*year:12*(year+1)])
# if temperature above maximum threshold, then all rain
(self.bin_prec[:,12*year:12*(year+1)]
[self.bin_temp[:,12*year:12*(year+1)] > self.modelprms['tsnow_threshold'] + 1]) = (
bin_precsnow[:,12*year:12*(year+1)]
[self.bin_temp[:,12*year:12*(year+1)] > self.modelprms['tsnow_threshold'] + 1])
(self.bin_acc[:,12*year:12*(year+1)]
[self.bin_temp[:,12*year:12*(year+1)] > self.modelprms['tsnow_threshold'] + 1]) = 0
# if temperature below minimum threshold, then all snow
(self.bin_acc[:,12*year:12*(year+1)]
[self.bin_temp[:,12*year:12*(year+1)] <= self.modelprms['tsnow_threshold'] - 1]) = (
bin_precsnow[:,12*year:12*(year+1)]
[self.bin_temp[:,12*year:12*(year+1)] <= self.modelprms['tsnow_threshold'] - 1])
(self.bin_prec[:,12*year:12*(year+1)]
[self.bin_temp[:,12*year:12*(year+1)] <= self.modelprms['tsnow_threshold'] - 1]) = 0
# ENTER MONTHLY LOOP (monthly loop required since surface type changes)
for month in range(0,12):
# Step is the position as a function of year and month, which improves readability
step = 12*year + month
# ACCUMULATION, MELT, REFREEZE, AND CLIMATIC MASS BALANCE
# Snowpack [m w.e.] = snow remaining + new snow
if step == 0:
self.bin_snowpack[:,step] = self.bin_acc[:,step]
else:
self.bin_snowpack[:,step] = self.snowpack_remaining[:,step-1] + self.bin_acc[:,step]
# MELT [m w.e.]
# energy available for melt [degC day]
if pygem_prms.option_ablation == 1:
# option 1: energy based on monthly temperature
melt_energy_available = self.bin_temp[:,step]*self.dayspermonth[step]
melt_energy_available[melt_energy_available < 0] = 0
elif pygem_prms.option_ablation == 2:
# Seed randomness for repeatability, but base it on step to ensure the daily variability is not
# the same for every single time step
np.random.seed(step)
# option 2: monthly temperature superimposed with daily temperature variability
# daily temperature variation in each bin for the monthly timestep
bin_tempstd_daily = np.repeat(
np.random.normal(loc=0, scale=self.glacier_gcm_tempstd[step],
size=self.dayspermonth[step])
.reshape(1,self.dayspermonth[step]), heights.shape[0], axis=0)
# daily temperature in each bin for the monthly timestep
bin_temp_daily = self.bin_temp[:,step][:,np.newaxis] + bin_tempstd_daily
# remove negative values
bin_temp_daily[bin_temp_daily < 0] = 0
# Energy available for melt [degC day] = sum of daily energy available
melt_energy_available = bin_temp_daily.sum(axis=1)
# SNOW MELT [m w.e.]
self.bin_meltsnow[:,step] = self.surfacetype_ddf_dict[2] * melt_energy_available
# snow melt cannot exceed the snow depth
self.bin_meltsnow[self.bin_meltsnow[:,step] > self.bin_snowpack[:,step], step] = (
self.bin_snowpack[self.bin_meltsnow[:,step] > self.bin_snowpack[:,step], step])
# GLACIER MELT (ice and firn) [m w.e.]
# energy remaining after snow melt [degC day]
melt_energy_available = (
melt_energy_available - self.bin_meltsnow[:,step] / self.surfacetype_ddf_dict[2])
# remove low values of energy available caused by rounding errors in the step above
melt_energy_available[abs(melt_energy_available) < pygem_prms.tolerance] = 0
# DDF based on surface type [m w.e. degC-1 day-1]
for surfacetype_idx in self.surfacetype_ddf_dict:
self.surfacetype_ddf[self.surfacetype == surfacetype_idx] = (
self.surfacetype_ddf_dict[surfacetype_idx])
# Debris enhancement factors in ablation area (debris in accumulation area would submerge)
if surfacetype_idx == 1 and pygem_prms.include_debris:
self.surfacetype_ddf[self.surfacetype == 1] = (
self.surfacetype_ddf[self.surfacetype == 1] * self.debris_ed[self.surfacetype == 1])
self.bin_meltglac[glac_idx_t0,step] = (
self.surfacetype_ddf[glac_idx_t0] * melt_energy_available[glac_idx_t0])
# TOTAL MELT (snow + glacier)
# off-glacier need to include melt of refreeze because there are no glacier dynamics,
# but on-glacier do not need to account for this (simply assume refreeze has same surface type)
self.bin_melt[:,step] = self.bin_meltglac[:,step] + self.bin_meltsnow[:,step]
# REFREEZING
if pygem_prms.option_refreezing == 'HH2015':
if step > 0:
self.tl_rf[:,:,step] = self.tl_rf[:,:,step-1]
self.te_rf[:,:,step] = self.te_rf[:,:,step-1]
# Refreeze based on heat conduction approach (Huss and Hock 2015)
# refreeze time step (s)
rf_dt = 3600 * 24 * self.dayspermonth[step] / pygem_prms.rf_dsc
if pygem_prms.option_rf_limit_meltsnow == 1:
bin_meltlimit = self.bin_meltsnow.copy()
else:
bin_meltlimit = self.bin_melt.copy()
# Debug lowest bin
if self.debug_refreeze:
gidx_debug = np.where(heights == heights[glac_idx_t0].min())[0]
# Loop through each elevation bin of glacier
for nbin, gidx in enumerate(glac_idx_t0):
# COMPUTE HEAT CONDUCTION - BUILD COLD RESERVOIR
# If no melt, then build up cold reservoir (compute heat conduction)
if self.bin_melt[gidx,step] < pygem_prms.rf_meltcrit:
if self.debug_refreeze and gidx == gidx_debug and step < 12:
print('\nMonth ' + str(self.dates_table.loc[step,'month']),
'Computing heat conduction')
# Set refreeze equal to 0
self.refr[gidx] = 0
# Loop through multiple iterations to converge on a solution
# -> this will loop through 0, 1, 2
for h in np.arange(0, pygem_prms.rf_dsc):
# Compute heat conduction in layers (loop through rows)
# go from 1 to rf_layers-1 to avoid indexing errors with "j-1" and "j+1"
# "j+1" is set to zero, which is fine for temperate glaciers but inaccurate for
# cold/polythermal glaciers
for j in np.arange(1, pygem_prms.rf_layers-1):
# Assume temperature of first layer equals air temperature
# assumption probably wrong, but might still work at annual average
# Since next line uses tl_rf for all calculations, set tl_rf[0] to present mean
# monthly air temperature to ensure the present calculations are done with the
# present time step's air temperature
self.tl_rf[0, gidx,step] = self.bin_temp[gidx,step]
# Temperature for each layer
self.te_rf[j,gidx,step] = (self.tl_rf[j,gidx,step] +
rf_dt * self.rf_layers_k[j] / self.rf_layers_ch[j] / pygem_prms.rf_dz**2 *
0.5 * ((self.tl_rf[j-1,gidx,step] - self.tl_rf[j,gidx,step]) -
(self.tl_rf[j,gidx,step] - self.tl_rf[j+1,gidx,step])))
# Update previous time step
self.tl_rf[:,gidx,step] = self.te_rf[:,gidx,step]
if self.debug_refreeze and gidx == gidx_debug and step < 12:
print('tl_rf:', ["{:.2f}".format(x) for x in self.tl_rf[:,gidx,step]])
# COMPUTE REFREEZING - TAP INTO "COLD RESERVOIR" or potential refreezing
else:
if self.debug_refreeze and gidx == gidx_debug and step < 12:
print('\nMonth ' + str(self.dates_table.loc[step,'month']), 'Computing refreeze')
# Refreezing over firn surface
if (self.surfacetype[gidx] == 2) or (self.surfacetype[gidx] == 3):
nlayers = pygem_prms.rf_layers-1
# Refreezing over ice surface
else:
# Approximate number of layers of snow on top of ice
smax = np.round((self.bin_snowpack[gidx,step] / (self.rf_layers_dens[0] / 1000) +
pygem_prms.pp) / pygem_prms.rf_dz, 0)
# if there is very little snow on the ground (SWE > 0.06 m for pp=0.3),
# then still set smax (layers) to 1
if self.bin_snowpack[gidx,step] > 0 and smax == 0:
smax=1
# if no snow on the ground, then set to rf_cold to NoData value
if smax == 0:
self.rf_cold[gidx] = 0
# if smax greater than the number of layers, set to max number of layers minus 1
if smax > pygem_prms.rf_layers - 1:
smax = pygem_prms.rf_layers - 1
nlayers = int(smax)
# Compute potential refreeze, "cold reservoir", from temperature in each layer
# only calculate potential refreezing first time it starts melting each year
if self.rf_cold[gidx] == 0 and self.tl_rf[:,gidx,step].min() < 0:
if self.debug_refreeze and gidx == gidx_debug and step < 12:
print('calculating potential refreeze from ' + str(nlayers) + ' layers')
for j in np.arange(0,nlayers):
j += 1
# units: (degC) * (J K-1 m-3) * (m) * (kg J-1) * (m3 kg-1)
rf_cold_layer = (self.tl_rf[j,gidx,step] * self.rf_layers_ch[j] *
pygem_prms.rf_dz / pygem_prms.Lh_rf / pygem_prms.density_water)
self.rf_cold[gidx] -= rf_cold_layer
if self.debug_refreeze and gidx == gidx_debug and step < 12:
print('j:', j, 'tl_rf @ j:', np.round(self.tl_rf[j,gidx,step],2),
'ch @ j:', np.round(self.rf_layers_ch[j],2),
'rf_cold_layer @ j:', np.round(rf_cold_layer,2),
'rf_cold @ j:', np.round(self.rf_cold[gidx],2))
if self.debug_refreeze and gidx == gidx_debug and step < 12:
print('rf_cold:', np.round(self.rf_cold[gidx],2))
# Compute refreezing
# If melt and liquid prec < potential refreeze, then refreeze all melt and liquid prec
if (bin_meltlimit[gidx,step] + self.bin_prec[gidx,step]) < self.rf_cold[gidx]:
self.refr[gidx] = bin_meltlimit[gidx,step] + self.bin_prec[gidx,step]
# otherwise, refreeze equals the potential refreeze
elif self.rf_cold[gidx] > 0:
self.refr[gidx] = self.rf_cold[gidx]
else:
self.refr[gidx] = 0
# Track the remaining potential refreeze
self.rf_cold[gidx] -= (bin_meltlimit[gidx,step] + self.bin_prec[gidx,step])
# if potential refreeze consumed, set to 0 and set temperature to 0 (temperate firn)
if self.rf_cold[gidx] < 0:
self.rf_cold[gidx] = 0
self.tl_rf[:,gidx,step] = 0
# Record refreeze
self.bin_refreeze[gidx,step] = self.refr[gidx]
if self.debug_refreeze and step < 12 and gidx == gidx_debug:
print('Month ' + str(self.dates_table.loc[step,'month']),
'Rf_cold remaining:', np.round(self.rf_cold[gidx],2),
'Snow depth:', np.round(self.bin_snowpack[glac_idx_t0[nbin],step],2),
'Snow melt:', np.round(self.bin_meltsnow[glac_idx_t0[nbin],step],2),
'Rain:', np.round(self.bin_prec[glac_idx_t0[nbin],step],2),
'Rfrz:', np.round(self.bin_refreeze[gidx,step],2))
elif pygem_prms.option_refreezing == 'Woodward':
# Refreeze based on annual air temperature (Woodward etal. 1997)
# R(m) = (-0.69 * Tair + 0.0096) * 1 m / 100 cm
# calculate annually and place potential refreeze in user defined month
if step%12 == 0:
bin_temp_annual = annualweightedmean_array(self.bin_temp[:,12*year:12*(year+1)],
self.dates_table.iloc[12*year:12*(year+1),:])
bin_refreezepotential_annual = (-0.69 * bin_temp_annual + 0.0096) / 100
# Remove negative refreezing values
bin_refreezepotential_annual[bin_refreezepotential_annual < 0] = 0
self.bin_refreezepotential[:,step] = bin_refreezepotential_annual
# Reset refreeze potential every year
if self.bin_refreezepotential[:,step].max() > 0:
refreeze_potential = self.bin_refreezepotential[:,step]
if self.debug_refreeze:
print('Year ' + str(year) + ' Month ' + str(self.dates_table.loc[step,'month']),
'Refreeze potential:', np.round(refreeze_potential[glac_idx_t0[0]],3),
'Snow depth:', np.round(self.bin_snowpack[glac_idx_t0[0],step],2),
'Snow melt:', np.round(self.bin_meltsnow[glac_idx_t0[0],step],2),
'Rain:', np.round(self.bin_prec[glac_idx_t0[0],step],2))
# Refreeze [m w.e.]
# refreeze cannot exceed rain and melt (snow & glacier melt)
self.bin_refreeze[:,step] = self.bin_meltsnow[:,step] + self.bin_prec[:,step]
# refreeze cannot exceed snow depth
self.bin_refreeze[self.bin_refreeze[:,step] > self.bin_snowpack[:,step], step] = (
self.bin_snowpack[self.bin_refreeze[:,step] > self.bin_snowpack[:,step], step])
# refreeze cannot exceed refreeze potential
self.bin_refreeze[self.bin_refreeze[:,step] > refreeze_potential, step] = (
refreeze_potential[self.bin_refreeze[:,step] > refreeze_potential])
self.bin_refreeze[abs(self.bin_refreeze[:,step]) < pygem_prms.tolerance, step] = 0
# update refreeze potential
refreeze_potential -= self.bin_refreeze[:,step]
refreeze_potential[abs(refreeze_potential) < pygem_prms.tolerance] = 0
# if step < 12 and self.debug_refreeze:
# print('refreeze bin ' + str(int(glac_idx_t0[0]*10)) + ':',
# np.round(self.bin_refreeze[glac_idx_t0[0],step],3))
# SNOWPACK REMAINING [m w.e.]
self.snowpack_remaining[:,step] = self.bin_snowpack[:,step] - self.bin_meltsnow[:,step]
self.snowpack_remaining[abs(self.snowpack_remaining[:,step]) < pygem_prms.tolerance, step] = 0
# Record values
self.glac_bin_melt[glac_idx_t0,step] = self.bin_melt[glac_idx_t0,step]
self.glac_bin_refreeze[glac_idx_t0,step] = self.bin_refreeze[glac_idx_t0,step]
self.glac_bin_snowpack[glac_idx_t0,step] = self.bin_snowpack[glac_idx_t0,step]
# CLIMATIC MASS BALANCE [m w.e.]
self.glac_bin_massbalclim[glac_idx_t0,step] = (
self.bin_acc[glac_idx_t0,step] + self.glac_bin_refreeze[glac_idx_t0,step] -
self.glac_bin_melt[glac_idx_t0,step])
# OFF-GLACIER ACCUMULATION, MELT, REFREEZE, AND SNOWPACK
if option_areaconstant == False:
# precipitation, refreeze, and snowpack are the same both on- and off-glacier
self.offglac_bin_prec[offglac_idx,step] = self.bin_prec[offglac_idx,step]
self.offglac_bin_refreeze[offglac_idx,step] = self.bin_refreeze[offglac_idx,step]
self.offglac_bin_snowpack[offglac_idx,step] = self.bin_snowpack[offglac_idx,step]
# Off-glacier melt includes both snow melt and melting of refreezing
# (this is not an issue on-glacier because energy remaining melts underlying snow/ice)
# melt of refreezing (assumed to be snow)
self.offglac_meltrefreeze = self.surfacetype_ddf_dict[2] * melt_energy_available
# melt of refreezing cannot exceed refreezing
self.offglac_meltrefreeze[self.offglac_meltrefreeze > self.bin_refreeze[:,step]] = (
self.bin_refreeze[:,step][self.offglac_meltrefreeze > self.bin_refreeze[:,step]])
# off-glacier melt = snow melt + refreezing melt
self.offglac_bin_melt[offglac_idx,step] = (self.bin_meltsnow[offglac_idx,step] +
self.offglac_meltrefreeze[offglac_idx])
# ===== RETURN TO ANNUAL LOOP =====
# SURFACE TYPE (-)
# Annual climatic mass balance [m w.e.] used to determine the surface type
self.glac_bin_massbalclim_annual[:,year] = self.glac_bin_massbalclim[:,12*year:12*(year+1)].sum(1)
# Update surface type for each bin
self.surfacetype, firnline_idx = self._surfacetypebinsannual(self.surfacetype,
self.glac_bin_massbalclim_annual, year)
# Record binned glacier area
self.glac_bin_area_annual[:,year] = glacier_area_t0
# Store glacier-wide results
self._convert_glacwide_results(year, glacier_area_t0, heights, fls=fls, fl_id=fl_id,
option_areaconstant=option_areaconstant)
## if debug:
# debug_startyr = 57
# debug_endyr = 61
# if year > debug_startyr and year < debug_endyr:
# print('\n', year, 'glac_bin_massbalclim:', self.glac_bin_massbalclim[:,12*year:12*(year+1)].sum(1))
# print('ice thickness:', icethickness_t0)
# print('heights:', heights[glac_idx_t0])
## print('surface type present:', self.glac_bin_surfacetype_annual[12:20,year])
## print('surface type updated:', self.surfacetype[12:20])
# Example of modularity
# if self.use_refreeze:
# mb += self._refreeze_term(heights, year)
# Mass balance for each bin [m ice per second]
seconds_in_year = self.dayspermonth[12*year:12*(year+1)].sum() * 24 * 3600
mb = (self.glac_bin_massbalclim[:,12*year:12*(year+1)].sum(1)
* pygem_prms.density_water / pygem_prms.density_ice / seconds_in_year)
if self.inversion_filter:
mb = np.minimum.accumulate(mb)
# debug_startyr = 57
# debug_endyr = 61
# Fill in non-glaciated areas - needed for OGGM dynamics to remove small ice flux into next bin
mb_filled = mb.copy()
if len(glac_idx_t0) > 3:
mb_max = np.max(mb[glac_idx_t0])
mb_min = np.min(mb[glac_idx_t0])
height_max = np.max(heights[glac_idx_t0])
height_min = np.min(heights[glac_idx_t0])
mb_grad = (mb_min - mb_max) / (height_max - height_min)
mb_filled[(mb_filled==0) & (heights < height_max)] = (
mb_min + mb_grad * (height_min - heights[(mb_filled==0) & (heights < height_max)]))
elif len(glac_idx_t0) >= 1 and len(glac_idx_t0) <= 3 and mb.max() <= 0:
mb_min = np.min(mb[glac_idx_t0])
height_max = np.max(heights[glac_idx_t0])
mb_filled[(mb_filled==0) & (heights < height_max)] = mb_min
# if year > debug_startyr and year < debug_endyr:
# print('mb_min:', mb_min)
#
# if year > debug_startyr and year < debug_endyr:
# import matplotlib.pyplot as plt
# plt.plot(mb_filled, heights, '.')
# plt.ylabel('Elevation')
# plt.xlabel('Mass balance (mwea)')
# plt.show()
#
# print('mb_filled:', mb_filled)
return mb_filled
#%%
def _convert_glacwide_results(self, year, glacier_area, heights,
fls=None, fl_id=None, option_areaconstant=False, debug=False):
"""
Convert raw runmassbalance function output to glacier-wide results for output package 2
Parameters
----------
year : int
the year of the model run starting from zero
glacier_area : np.array
glacier area for each elevation bin (m2)
heights : np.array
surface elevation of each elevatio nin
fls : object
flowline object
fl_id : int
flowline id
"""
# Glacier area
glac_idx = glacier_area.nonzero()[0]
glacier_area_monthly = glacier_area[:,np.newaxis].repeat(12,axis=1)
# Check if need to adjust for complete removal of the glacier
# - needed for accurate runoff calcs and accurate mass balance components
icethickness_t0 = getattr(fls[fl_id], 'thick', None)
if icethickness_t0 is not None:
# Mass loss cannot exceed glacier volume
mb_max_loss = (-1 * (glacier_area * icethickness_t0).sum() / glacier_area.sum() *
pygem_prms.density_ice / pygem_prms.density_water)
# Check annual climatic mass balance (mwea)
mb_mwea = ((glacier_area * self.glac_bin_massbalclim[:,12*year:12*(year+1)].sum(1)).sum() /
glacier_area.sum())
if len(glac_idx) > 0:
# Quality control for thickness
if hasattr(fls[fl_id], 'thick'):
thickness = fls[fl_id].thick
glacier_area[thickness == 0] = 0
section = fls[fl_id].section
section[thickness == 0] = 0
# Glacier-wide area (m2)
self.glac_wide_area_annual[year] = glacier_area.sum()
# Glacier-wide volume (m3)
self.glac_wide_volume_annual[year] = (section * fls[fl_id].dx_meter).sum()
else:
# Glacier-wide area (m2)
self.glac_wide_area_annual[year] = glacier_area.sum()
# Glacier-wide temperature (degC)
self.glac_wide_temp[12*year:12*(year+1)] = (
(self.bin_temp[:,12*year:12*(year+1)][glac_idx] * glacier_area_monthly[glac_idx]).sum(0) /
glacier_area.sum())
# Glacier-wide precipitation (m3)
self.glac_wide_prec[12*year:12*(year+1)] = (
(self.bin_prec[:,12*year:12*(year+1)][glac_idx] * glacier_area_monthly[glac_idx]).sum(0))
# Glacier-wide accumulation (m3 w.e.)
self.glac_wide_acc[12*year:12*(year+1)] = (
(self.bin_acc[:,12*year:12*(year+1)][glac_idx] * glacier_area_monthly[glac_idx]).sum(0))
# Glacier-wide refreeze (m3 w.e.)
self.glac_wide_refreeze[12*year:12*(year+1)] = (
(self.glac_bin_refreeze[:,12*year:12*(year+1)][glac_idx] * glacier_area_monthly[glac_idx]).sum(0))
# Glacier-wide melt (m3 w.e.)
self.glac_wide_melt[12*year:12*(year+1)] = (
(self.glac_bin_melt[:,12*year:12*(year+1)][glac_idx] * glacier_area_monthly[glac_idx]).sum(0))
# If mass loss more negative than glacier mass, reduce melt so glacier completely melts (no excess)
if icethickness_t0 is not None and mb_mwea < mb_max_loss:
melt_yr_raw = self.glac_wide_melt[12*year:12*(year+1)].sum()
melt_yr_max = (self.glac_wide_volume_annual[year]
* pygem_prms.density_ice / pygem_prms.density_water +
self.glac_wide_acc[12*year:12*(year+1)].sum() +
self.glac_wide_refreeze[12*year:12*(year+1)].sum())
melt_frac = melt_yr_max / melt_yr_raw
# Update glacier-wide melt (m3 w.e.)
self.glac_wide_melt[12*year:12*(year+1)] = self.glac_wide_melt[12*year:12*(year+1)] * melt_frac
# Glacier-wide frontal ablation (m3 w.e.)
self.glac_wide_frontalablation[12*year:12*(year+1)] = (
(self.glac_bin_frontalablation[:,12*year:12*(year+1)][glac_idx] * glacier_area_monthly[glac_idx]
).sum(0))
# Glacier-wide total mass balance (m3 w.e.)
if np.abs(self.glac_wide_frontalablation.sum()) > 0:
print('\n\nCHECK IF FRONTAL ABLATION IS POSITIVE OR NEGATIVE - WHETHER ADD OR SUBTRACT BELOW HERE')
self.glac_wide_massbaltotal[12*year:12*(year+1)] = (
self.glac_wide_acc[12*year:12*(year+1)] + self.glac_wide_refreeze[12*year:12*(year+1)]
- self.glac_wide_melt[12*year:12*(year+1)] - self.glac_wide_frontalablation[12*year:12*(year+1)])
# Glacier-wide runoff (m3)
self.glac_wide_runoff[12*year:12*(year+1)] = (
self.glac_wide_prec[12*year:12*(year+1)] + self.glac_wide_melt[12*year:12*(year+1)] -
self.glac_wide_refreeze[12*year:12*(year+1)])
# Snow line altitude (m a.s.l.)
heights_monthly = heights[:,np.newaxis].repeat(12, axis=1)
snow_mask = np.zeros(heights_monthly.shape)
snow_mask[self.glac_bin_snowpack[:,12*year:12*(year+1)] > 0] = 1
heights_monthly_wsnow = heights_monthly * snow_mask
heights_monthly_wsnow[heights_monthly_wsnow == 0] = np.nan
heights_change = np.zeros(heights.shape)
heights_change[0:-1] = heights[0:-1] - heights[1:]
try:
snowline_idx = np.nanargmin(heights_monthly_wsnow, axis=0)
self.glac_wide_snowline[12*year:12*(year+1)] = heights[snowline_idx] - heights_change[snowline_idx] / 2
except:
snowline_idx = np.zeros((heights_monthly_wsnow.shape[1])).astype(int)
snowline_idx_nan = []
for ncol in range(heights_monthly_wsnow.shape[1]):
if ~np.isnan(heights_monthly_wsnow[:,ncol]).all():
snowline_idx[ncol] = np.nanargmin(heights_monthly_wsnow[:,ncol])
else:
snowline_idx_nan.append(ncol)
heights_manual = heights[snowline_idx] - heights_change[snowline_idx] / 2
heights_manual[snowline_idx_nan] = np.nan
# this line below causes a potential All-NaN slice encountered issue at some time steps
self.glac_wide_snowline[12*year:12*(year+1)] = heights_manual
# Equilibrium line altitude (m a.s.l.)
ela_mask = np.zeros(heights.shape)
ela_mask[self.glac_bin_massbalclim_annual[:,year] > 0] = 1
ela_onlypos = heights * ela_mask
ela_onlypos[ela_onlypos == 0] = np.nan
if np.isnan(ela_onlypos).all():
self.glac_wide_ELA_annual[year] = np.nan
else:
ela_idx = np.nanargmin(ela_onlypos)
self.glac_wide_ELA_annual[year] = heights[ela_idx] - heights_change[ela_idx] / 2
# ===== Off-glacier ====
offglac_idx = np.where(self.offglac_bin_area_annual[:,year] > 0)[0]
if option_areaconstant == False and len(offglac_idx) > 0:
offglacier_area_monthly = self.offglac_bin_area_annual[:,year][:,np.newaxis].repeat(12,axis=1)
# Off-glacier precipitation (m3)
self.offglac_wide_prec[12*year:12*(year+1)] = (
(self.bin_prec[:,12*year:12*(year+1)][offglac_idx] * offglacier_area_monthly[offglac_idx]).sum(0))
# Off-glacier melt (m3 w.e.)
self.offglac_wide_melt[12*year:12*(year+1)] = (
(self.offglac_bin_melt[:,12*year:12*(year+1)][offglac_idx] * offglacier_area_monthly[offglac_idx]
).sum(0))
# Off-glacier refreeze (m3 w.e.)
self.offglac_wide_refreeze[12*year:12*(year+1)] = (
(self.offglac_bin_refreeze[:,12*year:12*(year+1)][offglac_idx] * offglacier_area_monthly[offglac_idx]
).sum(0))
# Off-glacier runoff (m3)
self.offglac_wide_runoff[12*year:12*(year+1)] = (
self.offglac_wide_prec[12*year:12*(year+1)] + self.offglac_wide_melt[12*year:12*(year+1)] -
self.offglac_wide_refreeze[12*year:12*(year+1)])
# Off-glacier snowpack (m3 w.e.)
self.offglac_wide_snowpack[12*year:12*(year+1)] = (
(self.offglac_bin_snowpack[:,12*year:12*(year+1)][offglac_idx] * offglacier_area_monthly[offglac_idx]
).sum(0))
def ensure_mass_conservation(self, diag):
"""
Ensure mass conservation that may result from using OGGM's glacier dynamics model. This will be resolved on an
annual basis, and since the glacier dynamics are updated annually, the melt and runoff will be adjusted on a
monthly-scale based on percent changes.
OGGM's dynamic model limits mass loss based on the ice thickness and flux divergence. As a result, the actual
volume change, glacier runoff, glacier melt, etc. may be less than that recorded by the mb_model. For PyGEM
this is important because the glacier runoff and all parameters should be mass conserving.
Note: other dynamical models (e.g., mass redistribution curves, volume-length-area scaling) are based on the
total volume change and therefore do not impose limitations like this because they do not estimate the flux
divergence. As a result, they may systematically overestimate mass loss compared to OGGM's dynamical model.
"""
# Compute difference between volume change
vol_change_annual_mbmod = (self.glac_wide_massbaltotal.reshape(-1,12).sum(1) *
pygem_prms.density_water / pygem_prms.density_ice)
vol_change_annual_diag = diag.volume_m3.values[1:] - diag.volume_m3.values[:-1]
vol_change_annual_dif = vol_change_annual_diag - vol_change_annual_mbmod
# Reduce glacier melt by the difference
vol_change_annual_mbmod_melt = (self.glac_wide_melt.reshape(-1,12).sum(1) *
pygem_prms.density_water / pygem_prms.density_ice)
vol_change_annual_melt_reduction = np.zeros(vol_change_annual_mbmod.shape)
chg_idx = vol_change_annual_mbmod.nonzero()[0]
chg_idx_posmbmod = vol_change_annual_mbmod_melt.nonzero()[0]
chg_idx_melt = list(set(chg_idx).intersection(chg_idx_posmbmod))
# print('change_idx:', chg_idx_melt)
# print('vol_change_annual_dif:', vol_change_annual_dif[chg_idx_melt])
# print('vol_change_annual_mbmod_melt:', vol_change_annual_mbmod_melt[chg_idx_melt])
vol_change_annual_melt_reduction[chg_idx_melt] = (
1 - vol_change_annual_dif[chg_idx_melt] / vol_change_annual_mbmod_melt[chg_idx_melt])
vol_change_annual_melt_reduction_monthly = np.repeat(vol_change_annual_melt_reduction, 12)
# Glacier-wide melt (m3 w.e.)
self.glac_wide_melt = self.glac_wide_melt * vol_change_annual_melt_reduction_monthly
# # Reduce glacier accumulation by difference if there was no melt
# print('Do not need to do this. Differences should be very small due to rounding error only')
# chg_idx_acc = np.setdiff1d(chg_idx,chg_idx_posmbmod)
# if len(chg_idx_acc) > 0:
# print('change_idx_acc:', chg_idx_acc)
#
# vol_change_annual_mbmod_acc = (self.glac_wide_acc.reshape(-1,12).sum(1) *
# pygem_prms.density_water / pygem_prms.density_ice)
#
# print('glac_wide_massbaltotal:', self.glac_wide_massbaltotal.reshape(-1,12).sum(1) * 1000 / 900)
# print('vol_change_annual_mbmod all:', vol_change_annual_mbmod)
# print('vol_change_annual_mbmod:', vol_change_annual_mbmod[chg_idx_acc])
# print('vol_change_annual_diag:', vol_change_annual_diag[chg_idx_acc])
# print('vol_change_annual_dif:', vol_change_annual_dif[chg_idx_acc])
# print('vol_change_annual_mbmod_acc:', vol_change_annual_mbmod_acc[chg_idx_acc])
#
# vol_change_annual_acc_reduction = np.zeros(vol_change_annual_mbmod.shape)
# vol_change_annual_acc_reduction[chg_idx_acc] = (
# vol_change_annual_dif[chg_idx_acc] / vol_change_annual_mbmod_acc[chg_idx_acc])
#
# print('vol_change_annual_acc_reduction:', vol_change_annual_acc_reduction[chg_idx_acc])
#
# vol_change_annual_acc_reduction_monthly = np.repeat(vol_change_annual_acc_reduction, 12)
#
# # Glacier-wide accumulation (m3 w.e.)
# self.glac_wide_acc = self.glac_wide_acc * vol_change_annual_acc_reduction_monthly
# Glacier-wide total mass balance (m3 w.e.)
if np.abs(self.glac_wide_frontalablation.sum()) > 0:
print('\n\nCHECK IF FRONTAL ABLATION IS POSITIVE OR NEGATIVE - WHETHER ADD OR SUBTRACT BELOW HERE')
assert True==False, 'Need to account for frontal ablation properly here'
self.glac_wide_massbaltotal = (self.glac_wide_acc + self.glac_wide_refreeze - self.glac_wide_melt -
self.glac_wide_frontalablation)
# Glacier-wide runoff (m3)
self.glac_wide_runoff = self.glac_wide_prec + self.glac_wide_melt - self.glac_wide_refreeze
self.glac_wide_volume_change_ignored_annual = vol_change_annual_dif
#%%
def get_annual_frontalablation(self, heights, year=None, flowline=None, fl_id=None,
sea_level=0, debug=False):
"""NEED TO DETERMINE HOW TO INTEGRATE FRONTAL ABLATION WITH THE FLOWLINE MODEL
Returns annual climatic mass balance
Parameters
----------
heights : np.array
elevation bins
year : int
year starting with 0 to the number of years in the study
"""
print('hack until Fabien provides data')
class Dummy():
pass
flowline = Dummy()
flowline.area = self.glacier_area_t0
# Glacier indices
glac_idx_t0 = flowline.area_km2.nonzero()[0]
# FRONTAL ABLATION
# Glacier bed altitude [masl]
glac_idx_minelev = np.where(self.heights == self.heights[glac_idx_t0].min())[0][0]
glacier_bedelev = (self.heights[glac_idx_minelev] - self.icethickness_initial[glac_idx_minelev])
print('\n-----')
print(self.heights[glac_idx_minelev], self.icethickness_initial[glac_idx_t0], self.glacier_area_t0)
print('-----\n')
print('\nDELETE ME! Switch sea level back to zero\n')
sea_level = 200
if debug and self.glacier_rgi_table['TermType'] != 0:
print('\nyear:', year, '\n sea level:', sea_level, 'bed elev:', np.round(glacier_bedelev, 2))
# If glacier bed below sea level, compute frontal ablation
if glacier_bedelev < sea_level:
# Volume [m3] and bed elevation [masl] of each bin
print('estimate ablation')
# glac_bin_volume = glacier_area_t0 * icethickness_t0
# glac_bin_bedelev = np.zeros((glacier_area_t0.shape))
# glac_bin_bedelev[glac_idx_t0] = heights[glac_idx_t0] - icethickness_initial[glac_idx_t0]
#
# # Option 1: Use Huss and Hock (2015) frontal ablation parameterizations
# # Frontal ablation using width of lowest bin can severely overestimate the actual width of the
# # calving front. Therefore, use estimated calving width from satellite imagery as appropriate.
# if pygem_prms.option_frontalablation_k == 1 and frontalablation_k == None:
# # Calculate frontal ablation parameter based on slope of lowest 100 m of glacier
# glac_idx_slope = np.where((heights <= sea_level + 100) &
# (heights >= heights[glac_idx_t0].min()))[0]
# elev_change = np.abs(heights[glac_idx_slope[0]] - heights[glac_idx_slope[-1]])
# # length of lowest 100 m of glacier
# length_lowest100m = (glacier_area_t0[glac_idx_slope] / width_t0[glac_idx_slope]).sum()
# # slope of lowest 100 m of glacier
# slope_lowest100m = np.rad2deg(np.arctan(elev_change/length_lowest100m))
# # Frontal ablation parameter
# frontalablation_k = frontalablation_k0 * slope_lowest100m
#
# # Calculate frontal ablation
# # Bed elevation with respect to sea level
# # negative when bed is below sea level (Oerlemans and Nick, 2005)
# waterdepth = sea_level - glacier_bedelev
# # Glacier length [m]
# length = (glacier_area_t0[width_t0 > 0] / width_t0[width_t0 > 0]).sum()
# # Height of calving front [m]
# height_calving = np.max([pygem_prms.af*length**0.5,
# pygem_prms.density_water / pygem_prms.density_ice * waterdepth])
# # Width of calving front [m]
# if pygem_prms.hyps_data in ['oggm']:
# width_calving = width_t0[np.where(heights == heights[glac_idx_t0].min())[0][0]] * 1000
# elif pygem_prms.hyps_data in ['Huss', 'Farinotti']:
# if glacier_rgi_table.RGIId in pygem_prms.width_calving_dict:
# width_calving = np.float64(pygem_prms.width_calving_dict[glacier_rgi_table.RGIId])
# else:
# width_calving = width_t0[glac_idx_t0[0]] * 1000
# # Volume loss [m3] due to frontal ablation
# frontalablation_volumeloss = (
# np.max([0, (frontalablation_k * waterdepth * height_calving)]) * width_calving)
# # Maximum volume loss is volume of bins with their bed elevation below sea level
# glac_idx_fa = np.where((glac_bin_bedelev < sea_level) & (glacier_area_t0 > 0))[0]
# frontalablation_volumeloss_max = glac_bin_volume[glac_idx_fa].sum()
# if frontalablation_volumeloss > frontalablation_volumeloss_max:
# frontalablation_volumeloss = frontalablation_volumeloss_max
#
#
#
# if debug:
# print('frontalablation_k:', frontalablation_k)
# print('width calving:', width_calving)
# print('frontalablation_volumeloss [m3]:', frontalablation_volumeloss)
# print('frontalablation_massloss [Gt]:', frontalablation_volumeloss * pygem_prms.density_water /
# pygem_prms.density_ice / 10**9)
# print('frontalalabion_volumeloss_max [Gt]:', frontalablation_volumeloss_max *
# pygem_prms.density_water / pygem_prms.density_ice / 10**9)
## print('glac_idx_fa:', glac_idx_fa)
## print('glac_bin_volume:', glac_bin_volume[0])
## print('glac_idx_fa[bin_count]:', glac_idx_fa[0])
## print('glac_bin_volume[glac_idx_fa[bin_count]]:', glac_bin_volume[glac_idx_fa[0]])
## print('glacier_area_t0[glac_idx_fa[bin_count]]:', glacier_area_t0[glac_idx_fa[0]])
## print('glac_bin_frontalablation:', glac_bin_frontalablation[glac_idx_fa[0], step])
#
# # Frontal ablation [mwe] in each bin
# bin_count = 0
# while (frontalablation_volumeloss > pygem_prms.tolerance) and (bin_count < len(glac_idx_fa)):
# # Sort heights to ensure it's universal (works with OGGM and Huss)
# heights_calving_sorted = np.argsort(heights[glac_idx_fa])
# calving_bin_idx = heights_calving_sorted[bin_count]
# # Check if entire bin removed or not
# if frontalablation_volumeloss >= glac_bin_volume[glac_idx_fa[calving_bin_idx]]:
# glac_bin_frontalablation[glac_idx_fa[calving_bin_idx], step] = (
# glac_bin_volume[glac_idx_fa[calving_bin_idx]] /
# glacier_area_t0[glac_idx_fa[calving_bin_idx]]
# * pygem_prms.density_ice / pygem_prms.density_water)
# else:
# glac_bin_frontalablation[glac_idx_fa[calving_bin_idx], step] = (
# frontalablation_volumeloss / glacier_area_t0[glac_idx_fa[calving_bin_idx]]
# * pygem_prms.density_ice / pygem_prms.density_water)
# frontalablation_volumeloss += (
# -1 * glac_bin_frontalablation[glac_idx_fa[calving_bin_idx],step] * pygem_prms.density_water
# / pygem_prms.density_ice * glacier_area_t0[glac_idx_fa[calving_bin_idx]])
#
# if debug:
# print('glacier idx:', glac_idx_fa[calving_bin_idx],
# 'volume loss:', (glac_bin_frontalablation[glac_idx_fa[calving_bin_idx], step] *
# glacier_area_t0[glac_idx_fa[calving_bin_idx]] * pygem_prms.density_water /
# pygem_prms.density_ice).round(0))
# print('remaining volume loss:', frontalablation_volumeloss, 'tolerance:', pygem_prms.tolerance)
#
# bin_count += 1
#
# if debug:
# print('frontalablation_volumeloss remaining [m3]:', frontalablation_volumeloss)
# print('ice thickness:', icethickness_t0[glac_idx_fa[0]].round(0),
# 'waterdepth:', waterdepth.round(0),
# 'height calving front:', height_calving.round(0),
# 'width [m]:', (width_calving).round(0))
return 0
#%%
# # Example of how to store variables from within the other functions (ex. mass balance components)
# self.diag_df = pd.DataFrame()
# # Example of what could be done!
# def _refreeze_term(self, heights, year):
#
# return 0
# ===== SURFACE TYPE FUNCTIONS =====
def _surfacetypebinsinitial(self, elev_bins):
"""
Define initial surface type according to median elevation such that the melt can be calculated over snow or ice.
Convention: (0 = off-glacier, 1 = ice, 2 = snow, 3 = firn, 4 = debris).
Function options: 1 =
Function options specified in pygem_pygem_prms.py:
- option_surfacetype_initial
> 1 (default) - use median elevation to classify snow/firn above the median and ice below
> 2 - use mean elevation instead
- include_firn : Boolean
To-do list
----------
Add option_surfacetype_initial to specify an AAR ratio and apply this to estimate initial conditions
Parameters
----------
elev_bins : np.ndarray
Elevation bins [masl]
Returns
-------
surfacetype : np.ndarray
Updated surface type for each elevation bin
firnline_idx : int
Firn line index
"""
surfacetype = np.zeros(self.glacier_area_initial.shape)
# Option 1 - initial surface type based on the median elevation
if pygem_prms.option_surfacetype_initial == 1:
surfacetype[(elev_bins < self.glacier_rgi_table.loc['Zmed']) & (self.glacier_area_initial > 0)] = 1
surfacetype[(elev_bins >= self.glacier_rgi_table.loc['Zmed']) & (self.glacier_area_initial > 0)] = 2
# Option 2 - initial surface type based on the mean elevation
elif pygem_prms.option_surfacetype_initial ==2:
surfacetype[(elev_bins < self.glacier_rgi_table['Zmean']) & (self.glacier_area_initial > 0)] = 1
surfacetype[(elev_bins >= self.glacier_rgi_table['Zmean']) & (self.glacier_area_initial > 0)] = 2
else:
print("This option for 'option_surfacetype' does not exist. Please choose an option that exists. "
+ "Exiting model run.\n")
exit()
# Compute firnline index
try:
# firn in bins >= firnline_idx
firnline_idx = np.where(surfacetype==2)[0][0]
except:
# avoid errors if there is no firn, i.e., the entire glacier is melting
firnline_idx = np.where(surfacetype!=0)[0][-1]
# If firn is included, then specify initial firn conditions
if pygem_prms.include_firn == 1:
surfacetype[surfacetype == 2] = 3
# everything initially considered snow is considered firn, i.e., the model initially assumes there is no
# snow on the surface anywhere.
return surfacetype, firnline_idx
def _surfacetypebinsannual(self, surfacetype, glac_bin_massbalclim_annual, year_index):
"""
Update surface type according to climatic mass balance over the last five years.
If 5-year climatic balance is positive, then snow/firn. If negative, then ice/debris.
Convention: 0 = off-glacier, 1 = ice, 2 = snow, 3 = firn, 4 = debris
Function Options:
> 1 (default) - update surface type according to Huss and Hock (2015)
> 2 - Radic and Hock (2011)
Huss and Hock (2015): Initially, above median glacier elevation is firn, below is ice. Surface type updated for
each elevation band and month depending on specific mass balance. If the cumulative balance since the start
of mass balance year is positive, then snow is assigned. If the cumulative mass balance is negative (i.e.,
all snow of current mass balance year has melted), then bare ice or firn exposed. Surface type is assumed to
be firn if the elevation band's average annual balance over the preceding 5 years (B_t-5_avg) is positive. If
B_t-5_avg is negative, surface type is ice.
> climatic mass balance calculated at each bin and used with the mass balance over the last 5 years to
determine whether the surface is firn or ice. Snow is separate based on each month.
Radic and Hock (2011): "DDF_snow is used above the ELA regardless of snow cover. Below the ELA, use DDF_ice is
used only when snow cover is 0. ELA is calculated from observed annual mass balance profiles averaged over
the observational period and is kept constant in time for the calibration period. For the future projections,
ELA is set to the mean glacier height and is time dependent since glacier volume, area, and length are time
dependent (volume-area-length scaling).
Bliss et al. (2014) uses the same as Valentina's model
Parameters
----------
surfacetype : np.ndarray
Surface type for each elevation bin
glac_bin_massbalclim_annual : np.ndarray
Annual climatic mass balance for each year and each elevation bin
year_index : int
Count of the year of model run (first year is 0)
Returns
-------
surfacetype : np.ndarray
Updated surface type for each elevation bin
firnline_idx : int
Firn line index
"""
# Next year's surface type is based on the bin's average annual climatic mass balance over the last 5 years. If
# less than 5 years, then use the average of the existing years.
if year_index < 5:
# Calculate average annual climatic mass balance since run began
massbal_clim_mwe_runningavg = glac_bin_massbalclim_annual[:,0:year_index+1].mean(1)
else:
massbal_clim_mwe_runningavg = glac_bin_massbalclim_annual[:,year_index-4:year_index+1].mean(1)
# If the average annual specific climatic mass balance is negative, then the surface type is ice (or debris)
surfacetype[(surfacetype !=0 ) & (massbal_clim_mwe_runningavg <= 0)] = 1
# If the average annual specific climatic mass balance is positive, then the surface type is snow (or firn)
surfacetype[(surfacetype != 0) & (massbal_clim_mwe_runningavg > 0)] = 2
# Compute the firnline index
try:
# firn in bins >= firnline_idx
firnline_idx = np.where(surfacetype==2)[0][0]
except:
# avoid errors if there is no firn, i.e., the entire glacier is melting
firnline_idx = np.where(surfacetype!=0)[0][-1]
# Apply surface type model options
# If firn surface type option is included, then snow is changed to firn
if pygem_prms.include_firn == 1:
surfacetype[surfacetype == 2] = 3
return surfacetype, firnline_idx
def _surfacetypeDDFdict(self, modelprms, include_firn=pygem_prms.include_firn,
option_ddf_firn=pygem_prms.option_ddf_firn):
"""
Create a dictionary of surface type and its respective DDF.
Convention: [0=off-glacier, 1=ice, 2=snow, 3=firn, 4=debris]
To-do list
----------
- Add option_surfacetype_initial to specify an AAR ratio and apply this to estimate initial conditions
Parameters
----------
modelprms : dictionary
Model parameters may include kp (precipitation factor), precgrad (precipitation gradient), ddfsnow, ddfice,
tsnow_threshold (temperature threshold for snow/rain), tbias (temperature bias)
include_firn : Boolean
Option to include or exclude firn (specified in pygem_pygem_prms.py)
option_ddf_firn : int
Option for the degree day factor of firn to be the average of snow and ice or a different value
Returns
-------
surfacetype_ddf_dict : dictionary
Dictionary relating the surface types with their respective degree day factors
"""
surfacetype_ddf_dict = {
0: modelprms['ddfsnow'],
1: modelprms['ddfice'],
2: modelprms['ddfsnow']}
if include_firn:
if option_ddf_firn == 0:
surfacetype_ddf_dict[3] = modelprms['ddfsnow']
elif option_ddf_firn == 1:
surfacetype_ddf_dict[3] = np.mean([modelprms['ddfsnow'],modelprms['ddfice']])
return surfacetype_ddf_dict
| StarcoderdataPython |
4821278 | <gh_stars>0
import flask_login as login
from flask import Blueprint, Flask, Markup, redirect, request, url_for
from redata.ui_admin.utils import (
BaseRedataView,
JSONField,
grafana_url_formatter_fun,
table_details_link_formatter,
)
class ChecksTableView(BaseRedataView):
can_delete = False
can_view_details = True
column_searchable_list = ("name", "metrics")
column_list = ["table", "name", "metrics", "created_at"]
form_excluded_columns = ["created_at"]
def table_details_formatter(self, context, model, name):
return table_details_link_formatter(model.table)
def is_accessible(self):
return login.current_user.is_authenticated
column_formatters = {
"created_at": BaseRedataView._user_formatter_time,
"table": table_details_formatter,
}
form_overrides = {
"metrics": JSONField,
"query": JSONField,
}
| StarcoderdataPython |
3371315 | """
You are given an integer, N. Write a program to determine if N is an element of the Fibonacci Sequence.
The first few elements of fibonacci sequence are 0,1,1,2,3,5,8,13.... A fibonacci sequence is one where every element is
a sum of the previous two elements in the sequence. The first two elements are 0 and 1.
Formally:
fib0 = 0
fib1 = 1
fibn = fibn-1 + fibn-2 \forall n > 1
Input Format
The first line contains T, number of test cases.
T lines follows. Each line contains an integer N.
"""
__author__ = 'Danyang'
fib = lambda n: reduce(lambda x, n: [x[1], x[0] + x[1]], xrange(n), [0, 1])[0]
class Solution(object):
def solve(self, cipher):
"""
main solution function
:param cipher: the cipher
"""
num = int(cipher)
n = 0
while fib(n) < num:
n += 1
if fib(n) == num:
return "IsFibo"
else:
return "IsNotFibo"
if __name__ == "__main__":
import sys
f = open("1.in", "r")
# f = sys.stdin
testcases = int(f.readline().strip())
for t in xrange(testcases):
# construct cipher
cipher = f.readline().strip()
# solve
s = "%s\n" % (Solution().solve(cipher))
print s,
| StarcoderdataPython |
147741 | """Tests the `session` module in stand-alone mode."""
########################################
# Dependencies #
########################################
import parent # noqa F401
import mph
from fixtures import logging_disabled
from pytest import raises
from platform import system
from sys import argv
import logging
########################################
# Tests #
########################################
def test_start():
if system() != 'Windows':
return
client = mph.start(cores=1)
assert client.java
assert client.cores == 1
assert repr(client) == 'Client(stand-alone)'
model = client.create('empty')
assert 'empty' in client.names()
assert model in client.models()
(model/'components').create(True)
client.remove(model)
assert 'empty' not in client.names()
assert model not in client.models()
with logging_disabled():
with raises(Exception, match='Model node X is removed'):
model.java.component()
with raises(ValueError):
client.remove(model)
with raises(RuntimeError):
client.connect(2036)
with raises(RuntimeError):
client.disconnect()
########################################
# Main #
########################################
if __name__ == '__main__':
arguments = argv[1:]
if 'log' in arguments:
logging.basicConfig(
level = logging.DEBUG if 'debug' in arguments else logging.INFO,
format = '[%(asctime)s.%(msecs)03d] %(message)s',
datefmt = '%H:%M:%S')
test_start()
| StarcoderdataPython |
1772494 | import pendulum
class Clock(object):
@classmethod
def today(cls, tz="UTC"):
return pendulum.today(tz=tz).date()
@classmethod
def now(cls, tz="UTC"):
return pendulum.now(tz=tz)
| StarcoderdataPython |
31946 | <reponame>Terence-Guan/Python.HackerRank<gh_stars>10-100
line = input()
print(line) | StarcoderdataPython |
3291058 | <reponame>xNovax/RoomScout<gh_stars>10-100
from django.contrib.auth import get_user_model
from django.test import TestCase
from houses.models import House
from utils.models import HouseImage
from utils.streetview import load_house_image
class StreetViewTestCase(TestCase):
def setUp(self):
User = get_user_model()
user = User.objects.create_user(username='Fred_Flintstone', email='<EMAIL>', password='<PASSWORD>')
self.user = user
house = House.objects.create(user=self.user)
house.place_id = 'EiwyNTI5IFN0YWxsaW9uIERyLCBPc2hhd2EsIE9OIEwxSCA3SzQsIENhbmFkYSIxEi8KFAoSCY_JD3vDG9WJEe3JFhlBvwOKEOETKhQKEgnrS9FlwxvViRHYx20MM9m-8g'
house.lat = '43.95858010000001'
house.lon = '-78.91587470000002'
house.street_number = 2529
house.street_name = 'Stallion Drive'
house.city = 'Oshawa'
house.prov_state = 'ON'
house.postal_code = 'L1H 0M4'
house.country = 'Canada'
house.save()
self.house = house
def test_load_house_image(self):
print('Testing utils.streetview.load_house_image(house)')
pre_count = HouseImage.objects.count()
load_house_image(self.house)
post_count = HouseImage.objects.count()
self.assertEqual(pre_count + 1, post_count)
| StarcoderdataPython |
146057 | <gh_stars>0
#!/usr/bin/env python3
'''djs setup file'''
from setuptools import setup, find_packages
from os.path import abspath, dirname, join
from io import open
root_dir = abspath(dirname(__file__))
short_description = '''A framework for varying model parameters and automating concurrent usage
of the Wrf-hydro/National Water Model using Docker'''
# Use README.md as long description
with open(join(root_dir, 'README.md'), mode='r') as f:
long_description = f.read()
setup(
# Package version and information
name='djs',
version='0.0.2',
packages=find_packages(exclude=['*test*']),
url='https://github.com/aaraney/NWM-Dockerized-Job-Scheduler',
# Set entry point for CLI
entry_points= {
'console_scripts' : ['djs=djs.cli.djs:main'],
},
# Package description information
description='A framework for varying model parameters and automating concurrent usage of the Wrf-hydro/National Water Model using Docker',
long_description=long_description,
long_description_content_type='text/markdown',
# Author information
author='<NAME>',
author_email='<EMAIL>',
license='MIT License',
# Search keywords
keywords='docker nwm hydrology cuahsi noaa owp nwc',
python_requires='>=3.5',
install_requires=[
'click',
'docker',
'netcdf4',
'numpy',
'pandas',
'pyyaml',
'scipy',
'xarray',
],
)
| StarcoderdataPython |
1620676 | <filename>tracker/cli/setup.py
from os import rename
from os.path import exists
from os.path import join
from pathlib import Path
from re import IGNORECASE
from re import match
from sys import exit
from click import BadParameter
from click import Choice
from click import echo
from click import option
from click import pass_context
from click import password_option
from config import TRACKER_PASSWORD_LENGTH_MAX
from config import TRACKER_PASSWORD_LENGTH_MIN
from config import basedir
from tracker.model.enum import UserRole
from tracker.model.user import User
from tracker.model.user import username_regex
from .db import initdb
from .util import cli
@cli.group()
def setup():
"""Setup and bootstrap the application."""
pass
@setup.command()
@option('--purge', is_flag=True, help='Purge all data and tables.')
@pass_context
def database(ctx, purge=False):
"""Initialize the database tables."""
# Auto rename old database for compatibility
db_old = join(basedir, 'app.db')
db_new = join(basedir, 'tracker.db')
if exists(db_old) and not exists(db_new):
echo('Renaming old database file...', nl=False)
rename(db_old, db_new)
echo('done')
ctx.invoke(initdb, purge=purge)
@setup.command()
@option('--purge', is_flag=True, help='Purge all data and tables.')
@pass_context
def bootstrap(ctx, purge=False):
"""Bootstrap the environment.
Create all folders, database tables and other things that are required to
run the application.
An initial administrator user must be created separately."""
def mkdir(path):
Path(path).mkdir(parents=True, exist_ok=True)
echo('Creating folders...', nl=False)
mkdir(join(basedir, 'pacman/cache'))
mkdir(join(basedir, 'pacman/log'))
mkdir(join(basedir, 'pacman/arch/x86_64/db'))
echo('done')
ctx.invoke(database, purge=purge)
def validate_username(ctx, param, username):
if len(username) > User.NAME_LENGTH:
raise BadParameter('must not exceed {} characters'.format(User.NAME_LENGTH))
if not username or not match(username_regex, username):
raise BadParameter('must match {}'.format(username_regex))
return username
def validate_email(ctx, param, email):
email_regex = r'^.+@([^.@][^@]+)$'
if not email or not match(email_regex, email, IGNORECASE):
raise BadParameter('must match {}'.format(email_regex))
return email
def validate_password(ctx, param, password):
from tracker.user import random_string
if not password or 'generated' == password:
password = <PASSWORD>_string()
print('Generated password: {}'.format(password))
if len(password) > TRACKER_PASSWORD_LENGTH_MAX or len(password) < TRACKER_PASSWORD_LENGTH_MIN:
raise BadParameter('Error: password must be between {} and {} characters.'
.format(TRACKER_PASSWORD_LENGTH_MIN, TRACKER_PASSWORD_LENGTH_MAX))
return password
@setup.command()
@option('--username', prompt=True, callback=validate_username, help='Username used to log in.')
@option('--email', prompt='E-mail', callback=validate_email, help='E-mail address of the user.')
@password_option(default='generated', callback=validate_password, help='Password for the user.')
@option('--role', type=Choice([role.name for role in UserRole]), default=UserRole.reporter.name,
prompt=True, callback=lambda ctx, param, role: UserRole.fromstring(role),
help='Permission group of the user.')
@option('--active/--inactive', default=True, prompt=True, help='Enable or disable the user.')
def user(username, email, password, role, active):
"""Create a new application user."""
from tracker import db
from tracker.user import hash_password
from tracker.user import random_string
user_by_name = db.get(User, name=username)
if user_by_name:
echo('Error: username already exists', err=True)
exit(1)
user_by_email = db.get(User, email=email)
if user_by_email:
echo('Error: e-mail already exists', err=True)
exit(1)
user = User()
user.name = username
user.email = email
user.salt = random_string()
user.password = hash_password(password, user.salt)
user.role = role
user.active = active
db.session.add(user)
db.session.commit()
| StarcoderdataPython |
40888 | <filename>Kivy/gui/crud.py
from kivy.app import App
from kivy.uix.boxlayout import BoxLayout
class Principal(BoxLayout):
pass
class Crud(App):
def build(self):
return Principal()
Crud().run()
| StarcoderdataPython |
3209992 | <filename>zookeeper/scripts/createServerList.py
#!/usr/bin/python
import argparse
parser = argparse.ArgumentParser(prog='serverList')
parser.add_argument('serverName', help='input the zookeeper server general name')
parser.add_argument('serverNum', type=int, help='input the zookeeper servers number')
parser.add_argument('--zkMode', nargs='?', help='input the zookeeper configuration mode, standalone or replicated')
parser.add_argument('--serviceName', nargs='?', help='input the zookeeper service name customized by k8s')
args = parser.parse_args()
def main():
if args.zkMode is not None and args.zkMode != "standalone":
serverName = args.serverName if args.serverName is not None else "zoo"
num = args.serverNum if args.serverNum is not None else 0
if (args.serviceName is not None and args.serviceName.strip() != ""):
serviceName = ".{0}".format(args.serviceName.strip())
else:
serviceName = ""
serverList = ["server.{0}={1}-{0}{2}:2888:3888".format(i, serverName, serviceName) for i in range(0, num)]
for item in serverList:
print item
if __name__ == '__main__':
main()
| StarcoderdataPython |
1617195 | <reponame>cedadev/vulture
# -*- coding: utf-8 -*-
"""Top-level package for vulture."""
from .__version__ import __author__, __email__, __version__ # noqa: F401
from .wsgi import application # noqa: F401
# Test that cfchecker and vulture versions are th same
import cfchecker
cf_checker_version = cfchecker.__version__
if __version__ != cf_checker_version:
raise Exception(f"Version mismatch between 'vulture' ({__version__}) "
f"and 'cfchecker' ({cf_checker_version})")
| StarcoderdataPython |
4839309 | <gh_stars>0
import numpy as np
import torch
import torch.nn as nn
from skimage.transform import resize
from tqdm import tqdm
class RISE(nn.Module):
"""A RISE class that computes saliency maps with RISE.
"""
def __init__(self, model, input_size, N, p1, gpu_batch=100):
super(RISE, self).__init__()
self.model = model
self.input_size = input_size
self.gpu_batch = gpu_batch
self.N = N
self.p1 = p1
def generate_masks(self, N, s, p1, savepath='masks.npy'):
cell_size = np.ceil(np.array(self.input_size) / s)
up_size = (s + 1) * cell_size
grid = np.random.rand(N, s, s, s) < p1
grid = grid.astype('float32')
self.masks = np.empty((N, *self.input_size))
for i in tqdm(range(N), desc='Generating filters'):
# Random shifts
x = np.random.randint(0, cell_size[0])
y = np.random.randint(0, cell_size[1])
z = np.random.randint(0, cell_size[2])
# Linear upsampling and cropping
self.masks[i, :, :, :] = resize(grid[i], up_size, order=1, mode='reflect',anti_aliasing=False)[x:x + self.input_size[0], y:y + self.input_size[1], z:z + self.input_size[2]]
np.save(savepath, self.masks)
self.masks = torch.from_numpy(self.masks).float()
self.masks = self.masks.cuda()
def load_masks(self, filepath):
self.masks = np.load(filepath)
self.masks = torch.from_numpy(self.masks).float()
self.N = self.masks.shape[0]
def forward(self, x):
N = self.N
_, L, H, W = x.size()
# Apply array of filters to the image
stack = torch.mul(self.masks, x.data)
stack = torch.unsqueeze(stack, 1)
stack = stack.to(torch.float32)
# p = nn.Softmax(dim=1)(model(stack)) processed in batches
p = []
for i in range(0, N, self.gpu_batch):
pred, _, _, _ = self.model(stack[i:min(i + self.gpu_batch, N)])
p.append(nn.Softmax(dim=1)(pred))
p = torch.cat(p)
# Number of classes
CL = p.size(1)
sal = torch.matmul(p.data.transpose(0, 1), self.masks.view(N, H * W * L))
sal = sal.view((CL, L, H, W))
sal = sal / N / self.p1
return sal
| StarcoderdataPython |
1639227 | <reponame>Kyushi/pemoi
"""Index module. Render the index page."""
from flask import session as login_session, \
render_template
from sqlalchemy import desc
from pemoi import app
from .database_setup import Item
from .pmoi_db_session import db_session
@app.route('/')
@app.route('/index/')
def index():
"""Render index page with all public items.
If user session exists, user's private items will be displayed as well.
"""
user_id = login_session.get("user_id")
try:
items = db_session.query(Item).filter(\
(Item.public==True) | ((Item.user_id==user_id) & Item.public==True)).\
order_by(desc(Item.add_date)).all()
except:
# Make sure that there is something to be passed to the template.
items = None
return render_template('index.html', items=items)
| StarcoderdataPython |
106181 | import numpy as np
import SimpleITK as sitk
# https://itk.org/SimpleITKDoxygen/html/classitk_1_1simple_1_1CurvatureFlowImageFilter.html#details
def curvatureFlowImageFilter(img, verbose=False):
imgOriginal = img
convertOutput = False
if type(img) != sitk.SimpleITK.Image:
imgOriginal = sitk.GetImageFromArray(img)
convertOutput = True
imgSmooth = sitk.CurvatureFlow(image1=imgOriginal,
timeStep=0.125,
numberOfIterations=5)
if convertOutput:
imgSmooth = sitk.GetArrayFromImage(imgSmooth).astype(float)
return imgSmooth
| StarcoderdataPython |
1730480 | <reponame>oilshell/blog-code
#!/usr/bin/env python3
"""
powerset.py
"""
from __future__ import print_function
import sys
# Transcribing Rust code from
# https://lobste.rs/s/khbbac/generate_all_things#c_xflsh6
def push_powerset(acc, n):
if n == 0:
print(acc)
else:
acc.append(True)
push_powerset(acc, n-1)
acc.pop()
acc.append(False)
push_powerset(acc, n-1)
acc.pop()
def pull_powerset(n):
if n == 0:
yield []
else:
for x in pull_powerset(n-1):
yield [True] + x
for x in pull_powerset(n-1):
yield [False] + x
def main(argv):
print()
print('PUSH STYLE')
print()
push_powerset([], 3)
print()
print('PULL STYLE')
# FLATTENED API
for t in pull_powerset(3):
print(t)
if __name__ == '__main__':
try:
main(sys.argv)
except RuntimeError as e:
print('FATAL: %s' % e, file=sys.stderr)
sys.exit(1)
| StarcoderdataPython |
1761399 | import pymysql.cursors
import itertools
connection = pymysql.connect(host='localhost',
user='root',
password='',
db='r2d2visualisation',
charset='utf8mb4',
cursorclass=pymysql.cursors.DictCursor)
#Github to Trello
name_conversion_table = {
"test_on_github": "test_on_trello"
}
lines_added_total = {}
lines_removed_total = {}
with open("github/sum.txt") as file:
lines = file.read().split("\n")
for line in lines:
if line == "":
continue
information = line.split(" ")
name = information[0]
name = name_conversion_table[name]
lines_added = information[1].split(",")[0]
lines_removed = information[1].split(",")[1]
lines_added_total[name] = int(lines_added_total.get(name, 0)) + int(lines_added)
lines_removed_total[name] = int(lines_removed_total.get(name, 0)) + int(lines_removed)
print(lines_added, lines_removed)
with connection.cursor() as cursor:
for name, lines_added in lines_added_total.items():
sql = "UPDATE `members` SET `lines_added`=%s WHERE `name`=%s"
cursor.execute(sql, (lines_added, name))
for name, lines_removed in lines_removed_total.items():
sql = "UPDATE `members` SET `lines_removed`=%s WHERE `name`=%s"
cursor.execute(sql, (lines_removed, name))
connection.commit()
| StarcoderdataPython |
1788037 | # -*- encoding=utf-8 -*-
from elasticsearch import Elasticsearch
# 设置索引
index = 'test-index'
# 设置type
type = 'test-type'
# 设置实例 fields
fields = ["name"]
# 设置别名
name = "index-alias"
# mapping 这儿为空,可以自定义设置
mapping = {}
es = client = Elasticsearch("localhost:9200")
# 创建索引 忽略400错误
es.indices.create(index, ignore=400)
# 删除索引 忽略400 404错误
es.indices.delete(index, ignore=[400, 404])
# 检查索引是否存在
es.indices.exists(index)
# 检查索引下面的类型是否存在
es.indices.exists_type(index, type)
# 打开索引
es.indices.open(index)
# 关闭索引
es.indices.close(index)
# 索引刷新
es.indices.flush(index)
# 设置mapping
es.indices.put_mapping(doc_type=type, body=mapping, index=index)
# 查看指定index的mapping信息
es.indices.get_mapping(index=index)
# 索引别名是否存在
es.indices.exists_alias(index, name=None)
# 索引设置特定的别名
es.indices.put_alias(index, name)
# 查看所有索引别名
es.indices.get_alias()
# 删除别名
es.indices.delete_alias(index, name, params=None)
# 查询所有index名称
es.indices.get_alias().keys()
# 查询index信息,包含mapping settings信息
es.indices.get(index)
# 检索特定字段的映射定义。
es.indices.get_field_mapping(fields, index=None, doc_type=None, params=None) | StarcoderdataPython |
1784415 | <filename>answers/hackerrank/Interchange two numbers.py<gh_stars>1-10
#@result Submitted a few seconds ago • Score: 10.00 Status: Accepted Test Case #0: 0s Test Case #1: 0s Test Case #2: 0s Test Case #3: 0s Test Case #4: 0.01s Test Case #5: 0s Test Case #6: 0s Test Case #7: 0s Test Case #8: 0s Test Case #9: 0s
# Enter your code here. Read input from STDIN. Print output to STDOUT
a = (raw_input(), raw_input())
print a[1]
print a[0]
| StarcoderdataPython |
4806354 | <gh_stars>10-100
from dask.distributed import Client, LocalCluster
import dask.dataframe as dd
import dask.array as da
import numpy as np
import xgboost as xgb
# Define the function to be executed on each worker
def train(X, y):
print("Start training with worker #{}".format(xgb.rabit.get_rank()))
# X,y are dask objects distributed across the cluster.
# We must obtain the data local to this worker and convert it to DMatrix for training.
# xgb.dask.create_worker_dmatrix follows the API exactly of the standard DMatrix constructor
# (xgb.DMatrix()), except that it 'unpacks' dask distributed objects to obtain data local to
# this worker
dtrain = xgb.dask.create_worker_dmatrix(X, y)
# Train on the data. Each worker will communicate and synchronise during training. The output
# model is expected to be identical on each worker.
bst = xgb.train({}, dtrain)
# Make predictions on local data
pred = bst.predict(dtrain)
print("Finished training with worker #{}".format(xgb.rabit.get_rank()))
# Get text representation of the model
return bst.get_dump()
def train_with_sklearn(X, y):
print("Training with worker #{} using the sklearn API".format(xgb.rabit.get_rank()))
X_local = xgb.dask.get_local_data(X)
y_local = xgb.dask.get_local_data(y)
model = xgb.XGBRegressor(n_estimators=10)
model.fit(X_local, y_local)
print("Finished training with worker #{} using the sklearn API".format(xgb.rabit.get_rank()))
return model.predict(X_local)
def main():
# Launch a very simple local cluster using two distributed workers with two CPU threads each
cluster = LocalCluster(n_workers=2, threads_per_worker=2)
client = Client(cluster)
# Generate some small test data as a dask array
# These data frames are internally split into partitions of 20 rows each and then distributed
# among workers, so we will have 5 partitions distributed among 2 workers
# Note that the partition size MUST be consistent across different dask dataframes/arrays
n = 10
m = 100
partition_size = 20
X = da.random.random((m, n), partition_size)
y = da.random.random(m, partition_size)
# xgb.dask.run launches an arbitrary function and its arguments on the cluster
# Here train(X, y) will be called on each worker
# This function blocks until all work is complete
models = xgb.dask.run(client, train, X, y)
# models contains a dictionary mapping workers to results
# We expect that the models are the same over all workers
first_model = next(iter(models.values()))
assert all(model == first_model for worker, model in models.items())
# We can also train using the sklearn API
results = xgb.dask.run(client, train_with_sklearn, X, y)
if __name__ == '__main__':
main()
| StarcoderdataPython |
3331466 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('mail', '0013_auto_20151104_1154'),
]
operations = [
migrations.CreateModel(
name='AttachmentBlacklist',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('keyword', models.CharField(help_text='\u5bf9\u9644\u4ef6\u8fdb\u884c\u68c0\u6d4b\uff0c\u5982\u679c\u9644\u4ef6\u540d\u79f0\u5305\u542b\u9ed1\u540d\u5355\u5173\u952e\u8bcd,\u3000\u5219\u5c06\u90ae\u4ef6\u6807\u5fd7\u4e3a\u9ad8\u5371\u90ae\u4ef6\u5ba1\u6838\u3002\u652f\u6301\u6b63\u5219', max_length=50, verbose_name='\u9644\u4ef6\u5173\u952e\u5b57')),
('relay', models.BooleanField(default=True, verbose_name='\u662f\u5426\u7528\u4e8e\u4e2d\u7ee7')),
('collect', models.BooleanField(default=True, verbose_name='\u662f\u5426\u7528\u4e8e\u4ee3\u6536')),
('created', models.DateTimeField(auto_now_add=True, verbose_name='\u521b\u5efa\u65e5\u671f')),
],
),
]
| StarcoderdataPython |
3229542 | import os
import base64
from datetime import (
datetime,
timedelta,
)
from werkzeug.security import (
generate_password_hash,
check_password_hash,
)
from flask import url_for
from app import db
class PaginatedAPIMixin(object):
@staticmethod
def to_collection_dict(query, page, per_page, endpoint, **kwargs):
resources = query.paginate(page, per_page, False)
data = {
'items': [item.to_dict() for item in resources.items],
'_meta': {
'page': page,
'per_page': per_page,
'total_pages': resources.pages,
'total_items': resources.total
},
'_links': {
'self': url_for(endpoint, page=page, per_page=per_page,
**kwargs),
'next': url_for(endpoint, page=page + 1, per_page=per_page,
**kwargs) if resources.has_next else None,
'prev': url_for(endpoint, page=page - 1, per_page=per_page,
**kwargs) if resources.has_prev else None
}
}
return data
class User(PaginatedAPIMixin, db.Model):
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(64), index=True, unique=True)
email = db.Column(db.String(120), index=True, unique=True)
password_hash = db.Column(db.String(128)) # 不保存原始密码
token = db.Column(db.String(32), index=True, unique=True)
token_expiration = db.Column(db.DateTime)
def __repr__(self):
return '<User ({})>'.format(self.username)
def set_password(self, password):
self.password_hash = generate_password_hash(password)
def check_password(self, password):
return check_password_hash(self.password_hash, password)
def get_token(self, expires_in=3600):
now = datetime.utcnow()
if self.token and self.token_expiration > now + timedelta(seconds=60):
return self.token
self.token = base64.b64encode(os.urandom(24)).decode('utf-8')
self.token_expiration = now + timedelta(seconds=expires_in)
db.session.add(self)
return self.token
def revoke_token(self):
self.token_expiration = datetime.utcnow() - timedelta(seconds=1)
@staticmethod
def check_token(token):
user = User.query.filter_by(token=token).first()
if user is None or user.token_expiration < datetime.utcnow():
return None
return user
def to_dict(self, include_email=False):
"""
后端 Flask 使用的都是 User 实例对象,而返回响应给前端时,需要传递 JSON 对象。
"""
data = {
'id': self.id,
'username': self.username,
'_links': {
'self': url_for('api.get_user', id=self.id)
}
}
if include_email:
data['email'] = self.email
return data
def from_dict(self, data, new_user=False):
for field in ['username', 'email']:
if field in data:
setattr(self, field, data[field])
if new_user and 'password' in data:
self.set_password(data['password'])
| StarcoderdataPython |
3333835 | from __future__ import annotations
from importlib.abc import Traversable
from importlib.resources import files
from logging import Logger, getLogger
from typing import Iterator
from ..instrset import InstructionSet
from ..instrset_parser import parseInstrSet
from . import defs
def builtinInstructionSetPath(name: str) -> Traversable:
return files(defs) / f"{name}.instr"
def loadInstructionSet(
path: Traversable, logger: Logger, wantSemantics: bool = True
) -> InstructionSet | None:
try:
return parseInstrSet(path, wantSemantics=wantSemantics)
except OSError as ex:
logger.error("%s: Failed to read instruction set: %s", path, ex.strerror)
return None
def loadInstructionSetByName(
name: str, logger: Logger, wantSemantics: bool = True
) -> InstructionSet | None:
logger.info("Loading instruction set: %s", name)
path = builtinInstructionSetPath(name)
return loadInstructionSet(path, logger, wantSemantics)
class InstructionSetProvider:
"""
Abstract base class for providers that can look up instruction sets by name.
"""
def __getitem__(self, name: str) -> InstructionSet | None:
"""
Return the instruction set with the given name, or `None` if it is not
available for any reason.
"""
raise NotImplementedError
def __iter__(self) -> Iterator[str]:
"""Iterate through the names of the instruction sets that can be provided."""
raise NotImplementedError
class InstructionSetDirectory(InstructionSetProvider):
"""
Instruction set provider that loads and cachces definitions from a directory.
Instruction sets will be loaded including semantics.
"""
def __init__(self, path: Traversable, logger: Logger):
self._path = path
self._logger = logger
self._cache: dict[str, InstructionSet | None] = {}
def __getitem__(self, name: str) -> InstructionSet | None:
try:
return self._cache[name]
except KeyError:
logger = self._logger
logger.info("Loading instruction set: %s", name)
path = self._path / f"{name}.instr"
instrSet = loadInstructionSet(path, logger)
self._cache[name] = instrSet
return instrSet
def __iter__(self) -> Iterator[str]:
for path in self._path.iterdir():
if path.is_file():
name = path.name
if name.endswith(".instr"):
yield name[:-6]
builtinInstructionSets = InstructionSetDirectory(files(defs), getLogger(__name__))
"""
Provider for instruction sets from the RetroAsm installation.
"""
| StarcoderdataPython |
1700398 | # Author: <NAME>
# Time: 2020-6-6
import torch.utils.data as tud
import torch
from ..utils import load_from_pickle, processing
import cv2 as cv
import numpy as np
def get_dataset_from_pickle(pkl_path, transforms=None):
img_path_list, target_list = load_from_pickle(pkl_path)
return MyDataset(img_path_list, target_list, transforms)
class MyDataset(tud.Dataset):
def __init__(self, img_path_list, target_list, transform=None):
"""
:param img_path_list: List[str]
:param target_list: List[int]
:param transform: [](image: ndarray[H, W, C]BRG, target) -> image: ndarray[H, W, C]BRG, target
默认(self._default_trans_func)
"""
assert len(img_path_list) == len(target_list)
self.img_path_list = img_path_list
self.target_list = target_list
self.transform = transform
def __getitem__(self, idx):
"""
:param idx:
:return: Tensor[C, H, W] RGB
"""
img_path = self.img_path_list[idx]
target = self.target_list[idx]
if isinstance(idx, slice):
return self.__class__(img_path, target, self.transform)
else:
x = cv.imread(img_path)
x, target = processing(x, target, self.transform)
return x, target
def __len__(self):
return len(self.img_path_list)
| StarcoderdataPython |
18889 | import os
import numpy
from numpy import *
import math
from scipy import integrate, linalg
from matplotlib import pyplot
from pylab import *
from .integral import *
def get_velocity_field(panels, freestream, X, Y):
"""
Computes the velocity field on a given 2D mesh.
Parameters
---------
panels: 1D array of Panel objects
The source panels.
freestream: Freestream object
The freestream conditions.
X: 2D Numpy array of floats
x-coordinates of the mesh points.
Y: 2D Numpy array of floats
y-coordinate of the mesh points.
Returns
-------
u: 2D Numpy array of floats
x-component of the velocity vector field.
v: 2D Numpy array of floats
y-component of the velocity vector field.
"""
# freestream contribution
u = freestream.u_inf * math.cos(freestream.alpha) * numpy.ones_like(X, dtype=float)
v = freestream.u_inf * math.sin(freestream.alpha) * numpy.ones_like(X, dtype=float)
# add the contribution from each source (superposition powers!!!)
vec_intregral = numpy.vectorize(integral)
for panel in panels:
u += panel.sigma / (2.0 * math.pi) * vec_intregral(X, Y, panel, 1, 0)
v += panel.sigma / (2.0 * math.pi) * vec_intregral(X, Y, panel, 0, 1)
return u, v
| StarcoderdataPython |
1744396 | <reponame>omkumar01/seo-audit-tool
def metaTags(page):
meta_data = {}
if page.find_all("meta"):
tags = page.find_all("meta")
for tag in tags:
name = tag.get("name")
prop = tag.get("property")
if name == "viewport":
meta_data["viewport"] = tag.get("content")
if name == "keywords":
meta_data["keywards"] = tag.get("content")
if name == "description":
meta_data["description"] = tag.get("content")
if name == "robots":
meta_data["robots"] = tag.get("content")
if name == "twitter:card":
meta_data["twitter:card"] = tag.get("content")
if name == "twitter:title":
meta_data["twitter:title"] = tag.get("content")
if name == "twitter:description":
meta_data["twitter:description"] = tag.get("content")
if name == "twitter:image":
meta_data["twitter:image"] = tag.get("content")
if prop == "og:type":
meta_data["og:type"] = tag.get("content")
if prop == "og:image":
meta_data["og:image"] = tag.get("content")
if prop == "og:url":
meta_data["og:url"] = tag.get("content")
if prop == "og:title":
meta_data["og:title"] = tag.get("content")
if prop == "og:description":
meta_data["og:description"] = tag.get("content")
if tag.get("charset"):
meta_data["charset"] = tag.get("charset")
return meta_data
else:
return 0
| StarcoderdataPython |
3306969 | #!/usr/bin/env
# -*- coding: utf-8 -*-
"""
Copyright 2017-2018 <NAME>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import logging
import os
from src.infrastructure.dependency_injector import Dependency
from src.domain.model import Keyword
from xml.dom import minidom
class KeywordLoader(Dependency):
"""
Loads the list of keywords that is stored in the 'restaurant_keywords.xml'
file into the database.
"""
def __init__(self):
self.keyword_repository = None
self.keyword_xml_file = None
self.synonyms_xml_file = None
self._keywords = {}
self._synonyms = {}
def execute(self):
self.__read_keyword_xml_file()
self.__read_synonyms_xml_file()
self.__record_words()
self.__link_synonyms('es')
self.__link_synonyms('en')
self.__link_language()
def __read_keyword_xml_file(self):
if not os.path.isfile(self.keyword_xml_file):
logging.error('file \'%s\' does not exist' % self.keyword_xml_file)
return
xml_file = open(self.keyword_xml_file, mode='r+', encoding='utf-8')
document = minidom.parse(xml_file)
for keyword in document.documentElement.getElementsByTagName('keyword'):
identifier = keyword.getAttribute('identifier')
self.__parse_xml_keyword(identifier, keyword)
xml_file.close()
def __parse_xml_keyword(self, identifier, keyword_node):
translations = {}
for translation in keyword_node.getElementsByTagName('word'):
lang = translation.getAttribute('lang')
word = translation.firstChild.nodeValue
translations[lang] = word
self._keywords[identifier] = translations
def __read_synonyms_xml_file(self):
if not os.path.isfile(self.synonyms_xml_file):
logging.error('file \'%s\'does not exist' % self.synonyms_xml_file)
return
xml_file = open(self.synonyms_xml_file, mode='r+', encoding='utf-8')
document = minidom.parse(xml_file)
for keyword in document.documentElement.getElementsByTagName('synonym'):
identifier = keyword.getAttribute('identifier')
values = []
for value in keyword.getElementsByTagName('keyword'):
values.append(value.firstChild.nodeValue)
self._synonyms[identifier] = values
xml_file.close()
def __record_words(self):
for key, item in self._keywords.items():
for lang, word in item.items():
keyword = self.keyword_repository.get_of_name(word, lang)
if keyword:
continue
keyword = Keyword(
file_reference=key,
word=word,
language=lang
)
self.keyword_repository.persist(keyword)
def __link_synonyms(self, language):
for key, items in self._synonyms.items():
self.__link_all_words(items, language)
def __link_all_words(self, items, language):
i = 0
j = 0
while i < len(items):
while j + 1 < len(items):
if language not in self._keywords[items[i]] \
or language not in self._keywords[items[j + 1]]:
j += 1
continue
word_a = self._keywords[items[i]][language]
word_b = self._keywords[items[j + 1]][language]
word_a = self.keyword_repository.get_of_name(word_a, language)
word_b = self.keyword_repository.get_of_name(word_b, language)
word_a.add_synonym(word_b)
self.keyword_repository.persist(word_a)
self.keyword_repository.persist(word_b)
logging.debug('Links %s with %s' % (word_a, word_b))
j += 1
i += 1
def __link_language(self):
for key, word in self._keywords.items():
if 'en' not in word or 'es' not in word:
continue
word_es = self.keyword_repository.get_of_name(word['en'], 'en')
word_en = self.keyword_repository.get_of_name(word['es'], 'es')
word_es.add_translation(word_en)
self.keyword_repository.persist(word_es)
self.keyword_repository.persist(word_en)
logging.debug(
'Link translation %s with %s' % (word_es.word, word_en.word)
)
| StarcoderdataPython |
1776806 | <reponame>trunko/pirates-hijack
# This is where you build your AI for the Pirates game.
from joueur.base_ai import BaseAI
from colorama import init, Fore, Back, Style
from sys import platform
import os
# <<-- Creer-Merge: imports -->> - Code you add between this comment and the end comment will be preserved between Creer re-runs.
# you can add additional import(s) here
# <<-- /Creer-Merge: imports -->>
class AI(BaseAI):
""" The basic AI functions that are the same between games. """
def get_name(self):
""" This is the name you send to the server so your AI will control the player named this string.
Returns
str: The name of your Player.
"""
# <<-- Creer-Merge: get-name -->> - Code you add between this comment and the end comment will be preserved between Creer re-runs.
return "The Senate" # REPLACE THIS WITH YOUR TEAM NAME
# <<-- /Creer-Merge: get-name -->>
def start(self):
""" This is called once the game starts and your AI knows its playerID and game. You can initialize your AI here.
"""
# <<-- Creer-Merge: start -->> - Code you add between this comment and the end comment will be preserved between Creer re-runs.
init()
os.system('clear')
self.display_map()
# <<-- /Creer-Merge: start -->>
def game_updated(self):
""" This is called every time the game's state updates, so if you are tracking anything you can update it here.
"""
# <<-- Creer-Merge: game-updated -->> - Code you add between this comment and the end comment will be preserved between Creer re-runs.
self.display_map()
# <<-- /Creer-Merge: game-updated -->>
def end(self, won, reason):
""" This is called when the game ends, you can clean up your data and dump files here if need be.
Args:
won (bool): True means you won, False means you lost.
reason (str): The human readable string explaining why you won or lost.
"""
# <<-- Creer-Merge: end -->> - Code you add between this comment and the end comment will be preserved between Creer re-runs.
os.system('clear')
self.display_map()
if won:
print(Fore.GREEN + '\nI Won!')
print('Reason: ' + reason + '\n' + Fore.RESET)
else:
print(Fore.RED + '\nI Lost!')
print('Reason: ' + reason + '\n' + Fore.RESET)
# <<-- /Creer-Merge: end -->>
def run_turn(self):
""" This is called every time it is this AI.player's turn.
Returns:
bool: Represents if you want to end your turn. True means end your turn, False means to keep your turn going and re-call this function.
"""
# <<-- Creer-Merge: runTurn -->> - Code you add between this comment and the end comment will be preserved between Creer re-runs.
# Put your game logic here for runTurn
targets = {}
if self.player.port.tile.unit is None and len(self.player.units) == 0 and self.player.gold >= 800:
# Spawn a ship for our crew later if we have no other ships
self.player.port.spawn("ship")
elif self.player.gold >= 200 and (self.player.port.tile.unit is not None and self.player.port.tile.unit.ship_health > 0 and self.player.port.tile.unit.crew < 10):
# Gather up 10 crew on the port if we have enough money.
self.player.port.spawn("crew")
for unit in self.player.units:
if unit.tile is not None:
if unit.tile == self.player.port.tile and (unit.ship_health < self.game.ship_health or unit.crew_health < self.game.crew_health * unit.crew):
unit.rest()
elif unit._ship_health < self.game._ship_health / 2.0 or unit.crew_health <= (self.game.crew_health * unit.crew) / 2.0 or (unit.crew < 10 and (unit.gold >= 200 or self.player.gold >= 200)):
# If the crew or ship is almost dead, try to get to your port to heal up.
# Also heads back if we don't have an adequate crew number, but have gold for more.
# Find a path to our port so we can heal
path = self.a_star(unit.tile, self.player.port.tile, unit)
while unit.moves > 0:
if len(path) > 0:
# Make sure the port is empty before moving
if self.player.port.tile.unit is not None:
break
# Move along the path if there is one
unit.move(path.pop(0))
else:
# Try to deposit any gold we have while we're here
if unit.gold > 0:
unit.deposit()
# Try to rest
unit.rest()
break
else:
# Look for the closest ship to attack
targets[unit] = None
for u in self.game.units:
if targets[unit] is None and u.crew_health == 0 and u.ship_health > 0 and u.tile != self.player.opponent.port.tile and unit.crew > 1:
# Found an abandoned ship
distance = self.distance(unit.tile, u.tile)
if distance is not None and targets[unit] is None:
targets[unit] = u
elif distance is not None and distance < self.distance(unit.tile, targets[unit].tile) and u not in targets.values():
targets[unit] = u
elif targets[unit] is None and u._target_port is not None:
# Found a merchant ship
distance = self.distance(unit.tile, u.tile)
if distance is not None and targets[unit] is None:
targets[unit] = u
elif distance is not None and distance < self.distance(unit.tile, targets[unit].tile) and u not in targets.values():
targets[unit] = u
elif targets[unit] is None and u.owner == self.player.opponent and u.ship_health <= unit.ship_health and u.crew < unit.crew and u.crew_health < unit.crew_health and u.tile != self.player.opponent.port.tile:
# Found an enemy ship
distance = self.distance(unit.tile, u.tile)
if distance is not None and targets[unit] is None:
targets[unit] = u
elif distance is not None and distance < self.distance(unit.tile, targets[unit].tile) and u not in targets.values():
targets[unit] = u
# If we found a target, move to it, then attack it or board it
if targets[unit] is not None:
path = self.a_star(unit.tile, targets[unit].tile, unit)
# Find a path to this unit's target
while unit.moves > 0 and not unit.acted:
if len(path) > 0 and not unit.tile.has_neighbor(targets[unit].tile):
# Move until we're within melee range of the target
unit.move(path.pop(0))
elif unit.tile.has_neighbor(targets[unit].tile):
# Try to attack the ship and break
if targets[unit].crew_health > 0:
unit.attack(targets[unit].tile, "crew")
elif targets[unit].owner is None:
unit.split(targets[unit].tile, unit.crew / 2, 0)
break
else:
# If path is not available, just break
break
return True
# <<-- /Creer-Merge: runTurn -->>
def find_path(self, start, goal, unit):
"""A very basic path finding algorithm (Breadth First Search) that when given a starting Tile, will return a valid path to the goal Tile.
Args:
start (Tile): the starting Tile
goal (Tile): the goal Tile
unit (Unit): the Unit that will move
Returns:
list[Tile]: A list of Tiles representing the path, the the first element being a valid adjacent Tile to the start, and the last element being the goal.
"""
if start == goal:
# no need to make a path to here...
return []
# queue of the tiles that will have their neighbors searched for 'goal'
fringe = []
# How we got to each tile that went into the fringe.
came_from = {}
# Enqueue start as the first tile to have its neighbors searched.
fringe.append(start)
# keep exploring neighbors of neighbors... until there are no more.
while len(fringe) > 0:
# the tile we are currently exploring.
inspect = fringe.pop(0)
# cycle through the tile's neighbors.
for neighbor in inspect.get_neighbors():
# if we found the goal, we have the path!
if neighbor == goal:
# Follow the path backward to the start from the goal and return it.
path = [goal]
# Starting at the tile we are currently at, insert them retracing our steps till we get to the starting tile
while inspect != start:
path.insert(0, inspect)
inspect = came_from[inspect.id]
return path
# else we did not find the goal, so enqueue this tile's neighbors to be inspected
# if the tile exists, has not been explored or added to the fringe yet, and it is pathable
if neighbor and neighbor.id not in came_from and neighbor.is_pathable(unit):
# add it to the tiles to be explored and add where it came from for path reconstruction.
fringe.append(neighbor)
came_from[neighbor.id] = inspect
# if you're here, that means that there was not a path to get to where you want to go.
# in that case, we'll just return an empty path.
return []
# <<-- Creer-Merge: functions -->> - Code you add between this comment and the end comment will be preserved between Creer re-runs.
# if you need additional functions for your AI you can add them here
def a_star(self, start, goal, unit):
if start == goal:
return []
frontier = []
explored = []
came_from = {}
path_cost = {}
frontier.append(start)
path_cost[start] = 0
while len(frontier) > 0:
inspect = None
for tile in frontier:
if inspect is None:
inspect = tile
elif (self.distance(tile, goal) + path_cost[tile]) < (self.distance(inspect, goal) + path_cost[inspect]):
inspect = tile
frontier.remove(inspect)
explored.append(inspect)
for neighbor in inspect.get_neighbors():
if neighbor == goal:
path = [goal]
step = inspect
while step != start:
path.insert(0, step)
step = came_from[step]
return path
if neighbor is not None:
if neighbor not in explored and neighbor not in frontier and neighbor.is_pathable(unit):
frontier.append(neighbor)
came_from[neighbor] = inspect
path_cost[neighbor] = path_cost[inspect] + 1
return []
def distance(self, t1, t2):
if t1 is not None and t2 is not None:
return abs(t1.x - t2.x) + abs(t1.y - t2.y)
else:
return None
def display_map(self):
print('\033[0;0H', end='')
for y in range(0, self.game.map_height):
print(' ', end='')
for x in range(0, self.game.map_width):
t = self.game.tiles[y * self.game.map_width + x]
if t.port != None:
if t.port.owner == self.player:
print(Back.GREEN, end='')
elif t.port.owner == self.player.opponent:
print(Back.RED, end='')
else:
print(Back.MAGENTA, end='')
elif t.type == 'land':
print(Back.YELLOW, end='')
else:
print(Back.CYAN, end='')
foreground = ' '
print(Fore.WHITE, end='')
if t.unit != None:
if t.unit.owner == self.player:
print(Fore.GREEN, end='')
elif t.unit.owner == self.player.opponent:
print(Fore.RED, end='')
else:
print(Fore.MAGENTA, end='')
if t.unit.ship_health > 0:
foreground = 'S'
else:
foreground = 'C'
elif t.gold > 0:
print(Fore.BLACK, end='')
foreground = '$'
print(foreground + Fore.RESET + Back.RESET, end='')
if y < 10:
print(' 0' + str(y))
else:
print(' ' + str(y))
print('\nTurn: ' + str(self.game.current_turn) + ' / ' \
+ str(self.game.max_turns))
print(Fore.GREEN + 'Infamy: ' + str(self.player.infamy) \
+ '\tGold: ' + str(self.player.gold) + Fore.RESET)
print(Fore.RED + 'Infamy: ' + str(self.player.opponent.infamy) \
+ '\tGold: ' + str(self.player.opponent.gold) + Fore.RESET)
return
# <<-- /Creer-Merge: functions -->>
| StarcoderdataPython |
3313319 | <gh_stars>1-10
from enum import Enum
class CPUState(Enum):
""" SARK CPU States
"""
DEAD = 0
POWERED_DOWN = 1
RUN_TIME_EXCEPTION = 2
WATCHDOG = 3
INITIALISING = 4
READY = 5
C_MAIN = 6
RUNNING = 7
SYNC0 = 8
SYNC1 = 9
PAUSED = 10
FINISHED = 11
CPU_STATE_12 = 12
CPU_STATE_13 = 13
CPU_STATE_14 = 14
IDLE = 15
def __new__(cls, value, doc=""):
obj = object.__new__(cls)
obj._value_ = value
return obj
def __init__(self, value, doc=""):
self._value_ = value
self.__doc__ = doc
| StarcoderdataPython |
3224969 | import logging
import textwrap
from datetime import datetime, timedelta
from airflow import DAG # noqa
from airflow import macros # noqa
from airflow.operators.python_operator import PythonOperator # noqa
from pyhocon import ConfigFactory
from databuilder.extractor.hive_table_metadata_extractor import HiveTableMetadataExtractor
from databuilder.extractor.sql_alchemy_extractor import SQLAlchemyExtractor
from databuilder.job.job import DefaultJob
from databuilder.models.table_metadata import DESCRIPTION_NODE_LABEL
from databuilder.loader.file_system_neo4j_csv_loader import FsNeo4jCSVLoader
from databuilder.publisher import neo4j_csv_publisher
from databuilder.publisher.neo4j_csv_publisher import Neo4jCsvPublisher
from databuilder.task.task import DefaultTask
from databuilder.transformer.base_transformer import NoopTransformer
dag_args = {
'concurrency': 10,
# One dagrun at a time
'max_active_runs': 1,
# 4AM, 4PM PST
'schedule_interval': '0 11 * * *',
'catchup': False
}
default_args = {
'owner': 'amundsen',
'start_date': datetime(2018, 6, 18),
'depends_on_past': False,
'email': [''],
'email_on_failure': False,
'email_on_retry': False,
'retries': 3,
'priority_weight': 10,
'retry_delay': timedelta(minutes=5),
'execution_timeout': timedelta(minutes=120)
}
# NEO4J cluster endpoints
NEO4J_ENDPOINT = 'bolt://localhost:7687'
neo4j_endpoint = NEO4J_ENDPOINT
neo4j_user = 'neo4j'
neo4j_password = '<PASSWORD>'
# Todo: user provides a list of schema for indexing
SUPPORTED_HIVE_SCHEMAS = ['hive']
# Global used in all Hive metastore queries.
# String format - ('schema1', schema2', .... 'schemaN')
SUPPORTED_HIVE_SCHEMA_SQL_IN_CLAUSE = "('{schemas}')".format(schemas="', '".join(SUPPORTED_HIVE_SCHEMAS))
# Todo: user needs to modify and provide a hivemetastore connection string
def connection_string():
return 'hivemetastore.connection'
def create_table_wm_job(**kwargs):
sql = textwrap.dedent("""
SELECT From_unixtime(A0.create_time) as create_time,
C0.NAME as schema_name,
B0.tbl_name as table_name,
{func}(A0.part_name) as part_name,
{watermark} as part_type
FROM PARTITIONS A0
LEFT OUTER JOIN TBLS B0
ON A0.tbl_id = B0.tbl_id
LEFT OUTER JOIN DBS C0
ON B0.db_id = C0.db_id
WHERE C0.NAME IN {schemas}
AND B0.tbl_type IN ( 'EXTERNAL_TABLE', 'MANAGED_TABLE' )
AND A0.PART_NAME NOT LIKE '%%__HIVE_DEFAULT_PARTITION__%%'
GROUP BY C0.NAME, B0.tbl_name
ORDER by create_time desc
""").format(func=kwargs['templates_dict'].get('agg_func'),
watermark=kwargs['templates_dict'].get('watermark_type'),
schemas=SUPPORTED_HIVE_SCHEMA_SQL_IN_CLAUSE)
logging.info('SQL query: {}'.format(sql))
tmp_folder = '/var/tmp/amundsen/table_{hwm}'.format(hwm=kwargs['templates_dict']
.get('watermark_type').strip("\""))
node_files_folder = '{tmp_folder}/nodes'.format(tmp_folder=tmp_folder)
relationship_files_folder = '{tmp_folder}/relationships'.format(tmp_folder=tmp_folder)
hwm_extractor = SQLAlchemyExtractor()
csv_loader = FsNeo4jCSVLoader()
task = DefaultTask(extractor=hwm_extractor,
loader=csv_loader,
transformer=NoopTransformer())
job_config = ConfigFactory.from_dict({
'extractor.sqlalchemy.{}'.format(SQLAlchemyExtractor.CONN_STRING): connection_string(),
'extractor.sqlalchemy.{}'.format(SQLAlchemyExtractor.EXTRACT_SQL): sql,
'extractor.sqlalchemy.model_class': 'databuilder.models.hive_watermark.HiveWatermark',
'loader.filesystem_csv_neo4j.{}'.format(FsNeo4jCSVLoader.NODE_DIR_PATH):
node_files_folder,
'loader.filesystem_csv_neo4j.{}'.format(FsNeo4jCSVLoader.RELATION_DIR_PATH):
relationship_files_folder,
'publisher.neo4j.{}'.format(neo4j_csv_publisher.NODE_FILES_DIR):
node_files_folder,
'publisher.neo4j.{}'.format(neo4j_csv_publisher.RELATION_FILES_DIR):
relationship_files_folder,
'publisher.neo4j.{}'.format(neo4j_csv_publisher.NEO4J_END_POINT_KEY):
neo4j_endpoint,
'publisher.neo4j.{}'.format(neo4j_csv_publisher.NEO4J_USER):
neo4j_user,
'publisher.neo4j.{}'.format(neo4j_csv_publisher.NEO4J_PASSWORD):
neo4j_password,
})
job = DefaultJob(conf=job_config,
task=task,
publisher=Neo4jCsvPublisher())
job.launch()
def create_table_metadata_databuilder_job():
"""
Launches data builder job that extracts table and column metadata from MySQL Hive metastore database,
and publishes to Neo4j.
@param kwargs:
@return:
"""
# Adding to where clause to scope schema, filter out temp tables which start with numbers and views
where_clause_suffix = textwrap.dedent("""
WHERE d.NAME IN {schemas}
AND t.TBL_NAME NOT REGEXP '^[0-9]+'
AND t.TBL_TYPE IN ( 'EXTERNAL_TABLE', 'MANAGED_TABLE' )
""").format(schemas=SUPPORTED_HIVE_SCHEMA_SQL_IN_CLAUSE)
tmp_folder = '/var/tmp/amundsen/table_metadata'
node_files_folder = '{tmp_folder}/nodes/'.format(tmp_folder=tmp_folder)
relationship_files_folder = '{tmp_folder}/relationships/'.format(tmp_folder=tmp_folder)
job_config = ConfigFactory.from_dict({
'extractor.hive_table_metadata.{}'.format(HiveTableMetadataExtractor.WHERE_CLAUSE_SUFFIX_KEY):
where_clause_suffix,
'extractor.hive_table_metadata.extractor.sqlalchemy.{}'.format(SQLAlchemyExtractor.CONN_STRING):
connection_string(),
'loader.filesystem_csv_neo4j.{}'.format(FsNeo4jCSVLoader.NODE_DIR_PATH):
node_files_folder,
'loader.filesystem_csv_neo4j.{}'.format(FsNeo4jCSVLoader.RELATION_DIR_PATH):
relationship_files_folder,
'publisher.neo4j.{}'.format(neo4j_csv_publisher.NODE_FILES_DIR):
node_files_folder,
'publisher.neo4j.{}'.format(neo4j_csv_publisher.RELATION_FILES_DIR):
relationship_files_folder,
'publisher.neo4j.{}'.format(neo4j_csv_publisher.NEO4J_END_POINT_KEY):
neo4j_endpoint,
'publisher.neo4j.{}'.format(neo4j_csv_publisher.NEO4J_USER):
neo4j_user,
'publisher.neo4j.{}'.format(neo4j_csv_publisher.NEO4J_PASSWORD):
neo4j_password,
'publisher.neo4j.{}'.format(neo4j_csv_publisher.NEO4J_CREATE_ONLY_NODES):
[DESCRIPTION_NODE_LABEL],
})
job = DefaultJob(conf=job_config,
task=DefaultTask(extractor=HiveTableMetadataExtractor(), loader=FsNeo4jCSVLoader()),
publisher=Neo4jCsvPublisher())
job.launch()
with DAG('amundsen_databuilder', default_args=default_args, **dag_args) as dag:
amundsen_databuilder_table_metadata_job = PythonOperator(
task_id='amundsen_databuilder_table_metadata_job',
python_callable=create_table_metadata_databuilder_job
)
# calculate hive high watermark
amundsen_hwm_job = PythonOperator(
task_id='amundsen_hwm_job',
python_callable=create_table_wm_job,
provide_context=True,
templates_dict={'agg_func': 'max',
'watermark_type': '"high_watermark"',
'part_regex': '{}'.format('{{ ds }}')}
)
# calculate hive low watermark
amundsen_lwm_job = PythonOperator(
task_id='amundsen_lwm_job',
python_callable=create_table_wm_job,
provide_context=True,
templates_dict={'agg_func': 'min',
'watermark_type': '"low_watermark"',
'part_regex': '{}'.format('{{ ds }}')}
)
| StarcoderdataPython |
1689985 | <reponame>Tim232/Python-Things
import tensorflow as tf
layers = tf.contrib.layers
arg_scope = tf.contrib.framework.arg_scope
def lrelu(x, leak=0.2, name='lrelu'):
return tf.maximum(x, leak*x)
def BlockCLayers(inputs, L, in_filters, Growth):
c = inputs
filters = in_filters
for idx in range(L):
net = lrelu(layers.batch_norm(inputs))
net = layers.conv2d(net, in_filters)
net = layers.dropout(net)
c = tf.concat([net, c], axis=3)
filters += Growth
return c, filters | StarcoderdataPython |
4825318 | import os
import types
import operator
from functools import reduce
from collections import OrderedDict
from collections.abc import Sequence
from ..compiler import Compiler
from ..ref import DefnRef, TupleRef
from ..compatibility import IntegerTypes
from ..bit import Digital
from ..clock import Clock, Enable, Reset
from ..array import Array
from ..bits import SInt
from ..tuple import Tuple
from ..is_definition import isdefinition
from magma.passes.clock import drive_undriven_clock_types_in_inst
from ..logging import root_logger
from .util import get_codegen_debug_info, make_relative
from ..config import config, EnvConfig
config._register(
verilog_backend_log_level=EnvConfig(
"MAGMA_VERILOG_BACKEND_LOG_LEVEL", "WARN"),
)
_logger = root_logger().getChild("verilog_backend")
_logger.setLevel(config.verilog_backend_log_level)
#__all__ = ['hstr', 'bstr']
__all__ = ['hstr']
def _verilog_name_of_ref(ref):
if isinstance(ref, TupleRef):
return _verilog_name_of_ref(ref.tuple.name) + "_" + str(ref.index)
return ref.qualifiedname("_")
# return the hex character for int n
def hex(n):
if n < 10: return chr(ord('0')+n)
else: return chr(ord('A')+n-10)
# return a hex string reprenting n
def hstr(n, bits):
format = "%d'h" % bits
nformat = []
n &= (1 << bits)-1
for i in range((bits+3)//4):
nformat.append(n%16)
n //= 16
nformat.reverse()
return format + reduce(operator.add, map(hex, nformat))
def bstr(n, bits):
if bits == 1:
return "1'b1" if init else "1'b0"
format = "%d'b" % bits
nformat = []
n &= (1 << bits)-1
for i in range(bits):
nformat.append(n%2)
n //= 2
nformat.reverse()
return format + reduce(operator.add, map(hex, nformat))
# return the verilog name of a data value
def vname(t):
if isinstance(t, Digital):
if t is type(t).VCC: return "1'b1"
if t is type(t).GND: return "1'b0"
if isinstance(t, Array):
# print(t.ts)
if not t.iswhole():
# the sequence of values is concantenated
t = [vname(i) for i in t.ts]
t.reverse()
return '{' + ','.join(t) + '}'
assert not t.anon(), (t.name)
return _verilog_name_of_ref(t.name)
# return the verilog declaration for the data type
def vdecl(t):
if isinstance(t, Array):
signed = "signed " if isinstance(t, SInt) else ""
return '{}[{}:{}]'.format(signed, t.N-1, 0)
else:
assert isinstance(t, Digital)
return ""
# return the verilog module args
def vmoduleargs(self):
def append(args, port, name):
if port.is_input(): d = "output"
elif port.is_output(): d = "input"
else: d = "inout"
args.append("%s %s %s" % (d, vdecl(port), name))
args = []
for name, port in self.ports.items():
if isinstance(port, Tuple):
for i in range(len(port)):
append(args, port[i], vname(port[i]))
else:
append(args, port, name)
return args
def compileinstance(self):
# print('compileinstance', str(self), str(type(self)))
def arg(k,v):
if not isinstance(v, str): v = str(v)
return '.%s(%s)' % (k, v)
args = []
debug_str = ""
for k, v in self.interface.ports.items():
if getattr(v, "debug_info", False) and get_codegen_debug_info():
filename, lineno, module = v.debug_info
#print('arg', k, v,)
if v.is_input():
# find the output connected to v
w = v.trace()
if w is None:
_logger.warning(f'{v.debug_name} not connected')
continue
v = w
if isinstance(v, Tuple):
for i in range(len(v)):
args.append(arg('%s_%s' %
(v[i].name.tuple.name, v[i].name.index), vname(v[i])))
elif isinstance(k, IntegerTypes):
args.append( vname(v) )
else:
args.append( arg(k,vname(v)) )
if getattr(v, "debug_info", False) and get_codegen_debug_info():
debug_str += f"// Argument {k}({vname(v)}) wired at {make_relative(filename)}:{lineno}\n"
params = []
for k, v in self.kwargs.items():
if k not in {'loc', 'name', 'T'}:
if isinstance(v, tuple):
v = hstr(v[0], v[1])
params.append(arg(k, v))
params = sorted(params)
#s = '(* loc="%d,%d/%d" *)\n' % self.loc if self.loc else ""
s = str(self.__class__.verilog_name)
if len(params):
if len(params) > 2:
s += ' #(' + ",\n".join(params) + ')'
else:
s += ' #(' + ", ".join(params) + ')'
s += ' ' + str(self.name)
return debug_str + '%s (%s)' % (s, ', '.join(args))
def compiledefinition(cls):
if cls.verilogFile:
return cls.verilogFile
# for now only allow Bit or Array(n, Bit)
for name, port in cls.interface.ports.items():
if isinstance(port, Array):
if not issubclass(port.T, Digital):
raise Exception(f'Argument {cls.__name__}.{name} of type {type(port)} is not supported, the verilog backend only supports simple 1-d array of bits of the form Array(N, Bit)')
args = ', '.join(vmoduleargs(cls.interface))
s = ''
if get_codegen_debug_info() and cls.debug_info.filename and cls.debug_info.lineno:
s += f'// Defined at {make_relative(cls.debug_info.filename)}:{cls.debug_info.lineno}\n'
s += 'module %s (%s);\n' % (cls.verilog_name, args)
if cls.verilog:
s += cls.verilog + '\n'
if cls.verilogLib:
import re
for libName in cls.verilogLib:
if re.search("\\.v$",libName):
with open(libName,'r') as libFile:
s = libFile.read() + s
else:
s = libName + s
else:
def wire(port):
return 'wire %s %s;\n' % (vdecl(port), vname(port))
# declare a wire for each instance output
for instance in cls.instances:
for port in instance.interface.ports.values():
if isinstance(port, Tuple):
for i in range(len(port)):
s += wire(port[i])
else:
if not port.is_input():
s += wire(port)
#print('compile instances')
# emit the structured verilog for each instance
for instance in cls.instances:
with cls.open():
drive_undriven_clock_types_in_inst(cls, instance)
if getattr(instance, "debug_info", False) and \
instance.debug_info.filename and instance.debug_info.lineno and \
get_codegen_debug_info():
s += f"// Instanced at {make_relative(instance.debug_info.filename)}:{instance.debug_info.lineno}\n"
s += compileinstance(instance) + ";\n"
# assign to module output arguments
for port in cls.interface.ports.values():
if port.is_input():
output = port.trace()
if output is not None:
if isinstance(output, Tuple):
for name, input in cls.interface.ports.items():
if input.is_input():
output = input.trace()
assert isinstance(output, Tuple)
for i in range(len(input)):
iname = vname(input[i])
oname = vname(output[i])
s += 'assign %s = %s;\n' % (iname, oname)
else:
iname = vname(port)
oname = vname(output)
if getattr(port, "debug_info", False) and get_codegen_debug_info():
s += f"// Wired at {make_relative(port.debug_info[0])}:{port.debug_info[1]}\n"
s += 'assign %s = %s;\n' % (iname, oname)
else:
_logger.warning(f"{cls.__name__}.{port.name} is unwired")
s += "endmodule\n"
return s
def find(circuit, defn):
if not isdefinition(circuit):
return defn
for i in circuit.instances:
find(type(i), defn)
name = circuit.verilog_name
if name not in defn:
defn[name] = circuit
return defn
def compile(main):
defn = find(main,OrderedDict())
code = ''
for k, v in defn.items():
_logger.debug(f'compiling circuit {k}')
code += compiledefinition(v) + '\n'
return code
class VerilogCompiler(Compiler):
def suffix(self):
if hasattr(self.main, "verilog_file_name") and \
os.path.splitext(self.main.verilog_file_name)[-1] == ".sv":
return "sv"
return "v"
def generate_code(self):
return compile(self.main)
| StarcoderdataPython |
3370960 | <reponame>yugangzhang/GitTest
# BlueskyMagics were imported and registered in 00-startup.py
BlueskyMagics.detectors = [pilatus2M]
BlueskyMagics.positioners = [smx,smy,sth,schi,sphi,srot,strans,strans2,stilt,stilt2, DETx,DETy,WAXSx,SAXSx,SAXSy, bsx,bsy,bsphi, camx,camy, armz,armx,armphi,army,armr, bim3y,fs3y,bim4y,bim5y, s0.tp, s0.bt, s0.ob, s0.ib, s0.xc, s0.yc, s0.xg, s0.yg, s1.xc, s1.yc, s1.xg, s1.yg, s2.xc, s2.yc, s2.xg, s2.yg, s3.xc, s3.yc, s3.xg, s3.yg, s4.xc, s4.yc, s4.xg, s4.yg, s5.xc, s5.yc, s5.xg, s5.yg, mono_bragg,mono_pitch2,mono_roll2,mono_perp2, mir_usx,mir_dsx,mir_usy,mir_dsyi,mir_dsyo,mir_bend]
### Override the %wa magic with one that includes offsets.
### Later this will be added to bluesky itself and will not
### need to be customized here.
from IPython.core.magic import Magics, magics_class, line_magic
from operator import attrgetter
@magics_class
class CMSCustomMagics(BlueskyMagics):
@line_magic
def wa(self, line):
"List positioner info. 'wa' stands for 'where all'."
if line.strip():
positioners = eval(line, self.shell.user_ns)
else:
positioners = self.positioners
positioners = sorted(set(positioners), key=attrgetter('name'))
values = []
for p in positioners:
try:
values.append(p.position)
except Exception as exc:
values.append(exc)
headers = ['Positioner', 'Value', 'Low Limit', 'High Limit', 'Offset']
LINE_FMT = '{: <30} {: <10} {: <10} {: <10} {: <10}'
lines = []
lines.append(LINE_FMT.format(*headers))
for p, v in zip(positioners, values):
if not isinstance(v, Exception):
try:
prec = p.precision
except Exception:
prec = self.FMT_PREC
value = np.round(v, decimals=prec)
try:
low_limit, high_limit = p.limits
except Exception as exc:
low_limit = high_limit = exc.__class__.__name__
else:
low_limit = np.round(low_limit, decimals=prec)
high_limit = np.round(high_limit, decimals=prec)
try:
offset = p.user_offset.get()
except Exception as exc:
offset = exc.__class__.__name__
else:
offset = np.round(offset, decimals=prec)
else:
value = v.__class__.__name__ # e.g. 'DisconnectedError'
low_limit = high_limit = ''
lines.append(LINE_FMT.format(p.name, value, low_limit, high_limit,
offset))
print('\n'.join(lines))
# This will override the %wa registered from BlueskyMagics
get_ipython().register_magics(CMSCustomMagics)
| StarcoderdataPython |
1709537 | # -*- coding: utf-8 -*-
"""
* Copyright 2015 Alibaba Group Holding Limited
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
"""
import websocket
try:
import thread
except ImportError:
import _thread as thread
from ali_speech._logging import _log
from ali_speech._create_token import AccessToken
from ali_speech._speech_recognizer import SpeechRecognizer
from ali_speech._speech_transcriber import SpeechTranscriber
from ali_speech._speech_synthesizer import SpeechSynthesizer
__all__ = ["NlsClient"]
class NlsClient:
URL_GATEWAY = 'wss://nls-gateway.cn-shanghai.aliyuncs.com/ws/v1'
def __init__(self):
websocket.enableTrace(False)
@staticmethod
def set_log_level(level):
_log.setLevel(level)
@staticmethod
def create_token(access_key_id, access_key_secret):
return AccessToken.create_token(access_key_id, access_key_secret)
def create_recognizer(self, callback, gateway_url=URL_GATEWAY):
request = SpeechRecognizer(callback, gateway_url)
return request
def create_transcriber(self, callback, gateway_url=URL_GATEWAY):
transcriber = SpeechTranscriber(callback, gateway_url)
return transcriber
def create_synthesizer(self, callback, gateway_url=URL_GATEWAY):
synthesizer = SpeechSynthesizer(callback, gateway_url)
return synthesizer
| StarcoderdataPython |
1665708 | # Generated by Django 2.1.4 on 2018-12-13 18:20
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('data_sets', '0003_dataset_customer'),
]
operations = [
migrations.RenameField(
model_name='dataset',
old_name='customer_id',
new_name='customer',
),
]
| StarcoderdataPython |
188297 |
"""
Lyapunov module
=================
Module with the classes of multi-thread the computation of the various
`Lyapunov vectors`_ and `exponents`_. Integrate using the `Runge-Kutta method`_
defined in the :mod:`~.integrators.integrate` module.
See :cite:`lyap-KP2012` for more details on the Lyapunov vectors theoretical framework.
Module classes
--------------
* :class:`LyapunovsEstimator` to estimate the Backward and Forward Lyapunov Vectors (BLVs and FLVs) along a trajectory
* :class:`CovariantLyapunovsEstimator` to estimate the Covariant Lyapunov Vectors (CLVs) along a trajectory
.. _Lyapunov vectors: https://en.wikipedia.org/wiki/Lyapunov_vector
.. _exponents: https://en.wikipedia.org/wiki/Lyapunov_exponent
.. _Runge-Kutta method: https://en.wikipedia.org/wiki/Runge%E2%80%93Kutta_methods
.. _Numba: https://numba.pydata.org/
References
----------
.. bibliography:: ../model/ref.bib
:labelprefix: LYAP-
:keyprefix: lyap-
"""
from numba import njit
import numpy as np
import qgs.integrators.integrate as integrate
from qgs.functions.util import normalize_matrix_columns, solve_triangular_matrix, reverse
import multiprocessing
class LyapunovsEstimator(object):
"""Class to compute the Forward and Backward `Lyapunov vectors`_ and `exponents`_ along a trajectory of a dynamical system
.. math:: \\dot{\\boldsymbol{x}} = \\boldsymbol{f}(t, \\boldsymbol{x})
with a set of :class:`LyapProcess` and a specified `Runge-Kutta method`_.
The tangent linear model must also be provided. I.e. one must provide the linearized ODEs
.. math :: \\dot{\\boldsymbol{\\delta x}} = \\boldsymbol{\\mathrm{J}}(t, \\boldsymbol{x}) \\cdot \\boldsymbol{\\delta x}
where :math:`\\boldsymbol{\\mathrm{J}} = \\frac{\\partial \\boldsymbol{f}}{\\partial \\boldsymbol{x}}` is the
Jacobian matrix of :math:`\\boldsymbol{f}`.
The method used to compute the Lyapunov vectors is the one introduced by
Benettin et al. :cite:`lyap-BGGS1980`.
Parameters
----------
num_threads: None or int, optional
Number of :class:`LyapProcess` workers (threads) to use. If `None`, use the number of machine's
cores available. Default to `None`.
b: None or ~numpy.ndarray, optional
Vector of coefficients :math:`b_i` of the `Runge-Kutta method`_ .
If `None`, use the classic RK4 method coefficients. Default to `None`.
c: None or ~numpy.ndarray, optional
Matrix of coefficients :math:`c_{i,j}` of the `Runge-Kutta method`_ .
If `None`, use the classic RK4 method coefficients. Default to `None`.
a: None or ~numpy.ndarray, optional
Vector of coefficients :math:`a_i` of the `Runge-Kutta method`_ .
If `None`, use the classic RK4 method coefficients. Default to `None`.
number_of_dimensions: None or int, optional
Allow to hardcode the dynamical system dimension. If `None`, evaluate the dimension from the
callable :attr:`func`. Default to `None`.
Attributes
----------
num_threads: int
Number of :class:`LyapProcess` workers (threads) to use.
b: ~numpy.ndarray
Vector of coefficients :math:`b_i` of the `Runge-Kutta method`_ .
c: ~numpy.ndarray
Matrix of coefficients :math:`c_{i,j}` of the `Runge-Kutta method`_ .
a: ~numpy.ndarray
Vector of coefficients :math:`a_i` of the `Runge-Kutta method`_ .
n_dim: int
Dynamical system dimension.
n_vec: int
The number of Lyapunov vectors to compute.
n_traj: int
The number of trajectories (initial conditions) computed at the last estimation
performed by the estimator.
n_records: int
The number of saved states of the last estimation performed by the estimator.
ic: ~numpy.ndarray
Store the estimator initial conditions.
func: callable
Last function :math:`\\boldsymbol{f}` used by the estimator.
func_jac: callable
Last Jacobian matrix function :math:`\\boldsymbol{J}` used by the estimator.
"""
def __init__(self, num_threads=None, b=None, c=None, a=None, number_of_dimensions=None):
if num_threads is None:
self.num_threads = multiprocessing.cpu_count()
else:
self.num_threads = num_threads
# Default is RK4
if a is None and b is None and c is None:
self.c = np.array([0., 0.5, 0.5, 1.])
self.b = np.array([1./6, 1./3, 1./3, 1./6])
self.a = np.zeros((len(self.c), len(self.b)))
self.a[1, 0] = 0.5
self.a[2, 1] = 0.5
self.a[3, 2] = 1.
else:
self.a = a
self.b = b
self.c = c
self.ic = None
self._time = None
self._pretime = None
self._recorded_traj = None
self._recorded_exp = None
self._recorded_vec = None
self.n_traj = 0
self.n_dim = number_of_dimensions
self.n_records = 0
self.n_vec = 0
self.write_steps = 0
self._adjoint = False
self._forward = -1
self._inverse = 1.
self.func = None
self.func_jac = None
self._ics_queue = None
self._lyap_queue = None
self._processes_list = list()
def terminate(self):
"""Stop the workers (threads) and release the resources of the estimator."""
for process in self._processes_list:
process.terminate()
process.join()
def start(self):
"""Start or restart the workers (threads) of the estimator.
Warnings
--------
If the estimator was not previously terminated, it will be terminated first in the case
of a restart.
"""
self.terminate()
self._processes_list = list()
self._ics_queue = multiprocessing.JoinableQueue()
self._lyap_queue = multiprocessing.Queue()
for i in range(self.num_threads):
self._processes_list.append(LyapProcess(i, self.func, self.func_jac, self.b, self.c, self.a,
self._ics_queue, self._lyap_queue))
for process in self._processes_list:
process.daemon = True
process.start()
def set_bca(self, b=None, c=None, a=None, ic_init=True):
"""Set the coefficients of the `Runge-Kutta method`_ and restart the estimator.
.. _Runge-Kutta method: https://en.wikipedia.org/wiki/Runge%E2%80%93Kutta_methods
Parameters
----------
b: None or ~numpy.ndarray, optional
Vector of coefficients :math:`b_i` of the `Runge-Kutta method`_ .
If `None`, does not reinitialize these coefficients.
c: None or ~numpy.ndarray, optional
Matrix of coefficients :math:`c_{i,j}` of the `Runge-Kutta method`_ .
If `None`, does not reinitialize these coefficients.
a: None or ~numpy.ndarray, optional
Vector of coefficients :math:`a_i` of the `Runge-Kutta method`_ .
If `None`, does not reinitialize these coefficients.
ic_init: bool, optional
Re-initialize or not the initial conditions of the estimator. Default to `True`.
"""
if a is not None:
self.a = a
if b is not None:
self.b = b
if c is not None:
self.c = c
if ic_init:
self.ic = None
self.start()
def set_func(self, f, fjac):
"""Set the `Numba`_-jitted function :math:`\\boldsymbol{f}` and Jacobian matrix function
:math:`\\boldsymbol{\\mathrm{J}}` to integrate.
.. _Numba: https://numba.pydata.org/
Parameters
----------
f: callable
The `Numba`_-jitted function :math:`\\boldsymbol{f}`.
Should have the signature ``f(t, x)`` where ``x`` is the state value and ``t`` is the time.
fjac: callable
The `Numba`_-jitted Jacobian matrix function :math:`\\boldsymbol{J}`.
Should have the signature ``J(t, x)`` where ``x`` is the state value and ``t`` is the time.
Warnings
--------
This function restarts the estimator!
"""
self.func = f
self.func_jac = fjac
self.start()
def compute_lyapunovs(self, t0, tw, t, dt, mdt, ic=None, write_steps=1, n_vec=None, forward=False, adjoint=False,
inverse=False):
"""Estimate the Lyapunov vectors using the Benettin algorithm along a given trajectory, always integrating the said trajectory
forward in time from `ic` at `t0` to time `t`.
The result of the estimation can be obtained afterward by calling :meth:`get_lyapunovs`.
If `forward` is `True`, it yields the Forward Lyapunov Vectors (FLVs) between `t0` and `tw`, otherwise, returns the Backward
Lyapunov Vectors (BLVs) between `tw` and `t`.
Parameters
----------
t0: float
Initial time of the time integration. Corresponds to the initial condition's `ic` time.
tw: float
Time at which the algorithm start to store the Lyapunov vectors. Define thus also the transient before the which the Lyapunov
vectors are considered as having not yet converged. Must be between `t0` and `t`.
t: float
Final time of the time integration. Corresponds to the final condition.
dt: float
Timestep of the integration.
mdt: float
Micro-timestep to integrate the tangent linear equation between the nonlinear system `dt` timesteps. Should be smaller or equal to `dt`.
ic: None or ~numpy.ndarray(float), optional
Initial conditions of the system. Can be a 1D or a 2D array:
* 1D: Provide a single initial condition.
Should be of shape (`n_dim`,) where `n_dim` = :math:`\\mathrm{dim}(\\boldsymbol{x})`.
* 2D: Provide an ensemble of initial condition.
Should be of shape (`n_traj`, `n_dim`) where `n_dim` = :math:`\\mathrm{dim}(\\boldsymbol{x})`,
and where `n_traj` is the number of initial conditions.
If `None`, use the initial conditions stored in :attr:`ic`.
If then :attr:`ic` is `None`, use a zero initial condition.
Default to `None`.
forward: bool, optional
If `True`, yield the `Forward Lyapunov Vectors` (FLVs) between `t0` and `tw`.
If `False`, yield the `Backward Lyapunov Vectors` (BLVs) between `tw` and `t`.
Default to `False`, i.e. Backward Lyapunov Vectors estimation.
adjoint: bool, optional
If true, integrate the tangent :math:`\\dot{\\boldsymbol{\\delta x}} = \\boldsymbol{\\mathrm{J}}(t, \\boldsymbol{x}) \\cdot \\boldsymbol{\\delta x}` ,
else, integrate the adjoint linear model :math:`\\dot{\\boldsymbol{\\delta x}} = \\boldsymbol{\\mathrm{J}}^T(t, \\boldsymbol{x}) \\cdot \\boldsymbol{\\delta x}`.
Integrate the tangent model by default.
inverse: bool, optional
Whether or not to invert the Jacobian matrix
:math:`\\boldsymbol{\\mathrm{J}}(t, \\boldsymbol{x}) \\rightarrow \\boldsymbol{\\mathrm{J}}^{-1}(t, \\boldsymbol{x})`.
`False` by default.
write_steps: int, optional
Save the state of the integration in memory every `write_steps` steps. The other intermediary
steps are lost. It determines the size of the returned objects. Default is 1.
Set to 0 to return only the final state.
n_vec: int, optional
The number of Lyapunov vectors to compute. Should be smaller or equal to :attr:`n_dim`.
"""
if self.func is None or self.func_jac is None:
print('No function to integrate defined!')
return 0
if ic is None:
i = 1
while True:
self.ic = np.zeros(i)
try:
x = self.func(0., self.ic)
except:
i += 1
else:
break
i = len(self.func(0., self.ic))
self.ic = np.zeros(i)
else:
self.ic = ic
if len(self.ic.shape) == 1:
self.ic = self.ic.reshape((1, -1))
self.n_traj = self.ic.shape[0]
self.n_dim = self.ic.shape[1]
if n_vec is not None:
self.n_vec = n_vec
else:
self.n_vec = self.n_dim
self._pretime = np.concatenate((np.arange(t0, tw, dt), np.full((1,), tw)))
self._time = np.concatenate((np.arange(tw, t, dt), np.full((1,), t)))
self.write_steps = write_steps
if forward:
self._forward = 1
else:
self._forward = -1
self._adjoint = adjoint
self._inverse = 1.
if inverse:
self._inverse *= -1.
if write_steps == 0:
self.n_records = 1
else:
if not forward:
tot = self._time[::self.write_steps]
self.n_records = len(tot)
if tot[-1] != self._time[-1]:
self.n_records += 1
else:
tot = self._pretime[::self.write_steps]
self.n_records = len(tot)
if tot[-1] != self._pretime[-1]:
self.n_records += 1
self._recorded_traj = np.zeros((self.n_traj, self.n_dim, self.n_records))
self._recorded_vec = np.zeros((self.n_traj, self.n_dim, self.n_vec, self.n_records))
self._recorded_exp = np.zeros((self.n_traj, self.n_vec, self.n_records))
for i in range(self.n_traj):
self._ics_queue.put((i, self._pretime, self._time, mdt, self.ic[i], self.n_vec, self.write_steps,
self._forward, self._adjoint, self._inverse))
self._ics_queue.join()
for i in range(self.n_traj):
args = self._lyap_queue.get()
self._recorded_traj[args[0]] = args[1]
self._recorded_exp[args[0]] = args[2]
self._recorded_vec[args[0]] = args[3]
def get_lyapunovs(self):
"""Returns the result of the previous Lyapunov vectors estimation.
Returns
-------
time, traj, exponents, vectors: ~numpy.ndarray
The result of the estimation:
* **time:** Time at which the state of the system was saved. Array of shape (:attr:`n_records`,).
* **traj:** Saved dynamical system states. 3D array of shape (:attr:`n_traj`, :attr:`n_dim`, :attr:`n_records`).
If :attr:`n_traj` = 1, a 2D array of shape (:attr:`n_dim`, :attr:`n_records`) is returned instead.
* **exponents:** Saved estimates of the local Lyapunov exponents along the trajectory. 3D array of shape (:attr:`n_traj`, :attr:`n_vec`, :attr:`n_records`).
If :attr:`n_traj` = 1, a 2D array of shape (:attr:`n_vec`, :attr:`n_records`) is returned instead.
* **vectors:** Saved estimates of the local Lyapunov vectors along the trajectory.
Depending on the input initial conditions, it is maximum a 4D array of shape
(:attr:`n_traj`, :attr:`n_dim`, :attr:`n_vec`, :attr:`n_records`).
If one of the dimension is 1, it is squeezed.
"""
if self._forward == -1:
tt = self._time
else:
tt = self._pretime
if self.write_steps > 0:
if tt[::self.write_steps][-1] == tt[-1]:
return tt[::self.write_steps], np.squeeze(self._recorded_traj), np.squeeze(self._recorded_exp), \
np.squeeze(self._recorded_vec)
else:
return np.concatenate((tt[::self.write_steps], np.full((1,), tt[-1]))), \
np.squeeze(self._recorded_traj), np.squeeze(self._recorded_exp), np.squeeze(self._recorded_vec)
else:
return tt[-1], np.squeeze(self._recorded_traj), np.squeeze(self._recorded_exp), \
np.squeeze(self._recorded_vec)
class LyapProcess(multiprocessing.Process):
""":class:`LyapunovsEstimator`'s workers class. Allows to multi-thread Lyapunov vectors estimation.
Parameters
----------
processID: int
Number identifying the worker.
func: callable
`Numba`_-jitted function to integrate assigned to the worker.
func_jac: callable
`Numba`_-jitted Jacobian matrix function to integrate assigned to the worker.
b: ~numpy.ndarray, optional
Vector of coefficients :math:`b_i` of the `Runge-Kutta method`_ .
c: ~numpy.ndarray, optional
Matrix of coefficients :math:`c_{i,j}` of the `Runge-Kutta method`_ .
a: ~numpy.ndarray, optional
Vector of coefficients :math:`a_i` of the `Runge-Kutta method`_ .
ics_queue: multiprocessing.JoinableQueue
Queue to which the worker ask for initial conditions and parameters input.
lyap_queue: multiprocessing.Queue
Queue to which the worker returns the estimation results.
Attributes
----------
processID: int
Number identifying the worker.
func: callable
`Numba`_-jitted function to integrate assigned to the worker.
func_jac: callable
`Numba`_-jitted Jacobian matrix function to integrate assigned to the worker.
b: ~numpy.ndarray
Vector of coefficients :math:`b_i` of the `Runge-Kutta method`_ .
c: ~numpy.ndarray
Matrix of coefficients :math:`c_{i,j}` of the `Runge-Kutta method`_ .
a: ~numpy.ndarray
Vector of coefficients :math:`a_i` of the `Runge-Kutta method`_ .
"""
def __init__(self, processID, func, func_jac, b, c, a, ics_queue, lyap_queue):
super().__init__()
self.processID = processID
self._ics_queue = ics_queue
self._lyap_queue = lyap_queue
self.func = func
self.func_jac = func_jac
self.a = a
self.b = b
self.c = c
def run(self):
"""Main worker computing routine. Perform the estimation with the fetched initial conditions and parameters."""
while True:
args = self._ics_queue.get()
if args[7] == -1:
recorded_traj, recorded_exp, recorded_vec = _compute_backward_lyap_jit(self.func, self.func_jac,
args[1], args[2], args[3],
args[4][np.newaxis, :], args[5],
args[6], args[8], args[9],
self.b, self.c, self.a)
else:
recorded_traj, recorded_exp, recorded_vec = _compute_forward_lyap_jit(self.func, self.func_jac,
args[1], args[2], args[3],
args[4][np.newaxis, :], args[5],
args[6], args[8], args[9],
self.b, self.c, self.a)
self._lyap_queue.put((args[0], np.squeeze(recorded_traj), np.squeeze(recorded_exp),
np.squeeze(recorded_vec)))
self._ics_queue.task_done()
@njit
def _compute_forward_lyap_jit(f, fjac, time, posttime, mdt, ic, n_vec, write_steps, adjoint, inverse, b, c, a):
ttraj = integrate._integrate_runge_kutta_jit(f, np.concatenate((time[:-1], posttime)), ic, 1, 1, b, c, a)
recorded_traj, recorded_exp, recorded_vec = _compute_forward_lyap_traj_jit(f, fjac, time, posttime, ttraj, mdt,
n_vec, write_steps, adjoint, inverse, b, c, a)
return recorded_traj, recorded_exp, recorded_vec
@njit
def _compute_forward_lyap_traj_jit(f, fjac, time, posttime, ttraj, mdt, n_vec, write_steps, adjoint, inverse, b, c, a):
traj = ttraj[:, :, :len(time)]
posttraj = ttraj[:, :, len(time)-1:]
n_traj = ttraj.shape[0]
n_dim = ttraj.shape[1]
Id = np.zeros((1, n_dim, n_dim))
Id[0] = np.eye(n_dim)
if write_steps == 0:
n_records = 1
else:
tot = time[::write_steps]
n_records = len(tot)
if tot[-1] != time[-1]:
n_records += 1
recorded_vec = np.zeros((n_traj, n_dim, n_vec, n_records))
recorded_traj = np.zeros((n_traj, n_dim, n_records))
recorded_exp = np.zeros((n_traj, n_vec, n_records))
rposttime = reverse(posttime)
rtime = reverse(time)
for i_traj in range(n_traj):
y = np.zeros((1, n_dim))
qr = np.linalg.qr(np.random.random((n_dim, n_vec)))
q = qr[0]
m_exp = np.zeros((n_dim))
for ti, (tt, dt) in enumerate(zip(rposttime[:-1], np.diff(rposttime))):
y[0] = posttraj[i_traj, :, -1-ti]
subtime = np.concatenate((np.arange(tt + dt, tt, mdt), np.full((1,), tt)))
y_new, prop = integrate._integrate_runge_kutta_tgls_jit(f, fjac, subtime, y, Id, -1, 0, b, c, a,
adjoint, inverse, integrate._zeros_func)
q_new = prop[0, :, :, 0] @ q
qr = np.linalg.qr(q_new)
q = qr[0]
r = qr[1]
iw = -1
for ti, (tt, dt) in enumerate(zip(rtime[:-1], np.diff(rtime))):
y[0] = traj[i_traj, :, -1-ti]
m_exp = np.log(np.abs(np.diag(r)))/dt
if write_steps > 0 and np.mod(ti, write_steps) == 0:
recorded_exp[i_traj, :, iw] = m_exp
recorded_traj[i_traj, :, iw] = y[0]
recorded_vec[i_traj, :, :, iw] = q
iw -= 1
subtime = np.concatenate((np.arange(tt + dt, tt, mdt), np.full((1,), tt)))
y_new, prop = integrate._integrate_runge_kutta_tgls_jit(f, fjac, subtime, y, Id, -1, 0, b, c, a,
adjoint, inverse, integrate._zeros_func)
q_new = prop[0, :, :, 0] @ q
qr = np.linalg.qr(q_new)
q = qr[0]
r = qr[1]
recorded_exp[i_traj, :, 0] = m_exp
recorded_traj[i_traj, :, 0] = y[0]
recorded_vec[i_traj, :, :, 0] = q
return recorded_traj, recorded_exp, recorded_vec
@njit
def _compute_backward_lyap_jit(f, fjac, pretime, time, mdt, ic, n_vec, write_steps, adjoint, inverse, b, c, a):
ttraj = integrate._integrate_runge_kutta_jit(f, np.concatenate((pretime[:-1], time)), ic, 1, 1, b, c, a)
recorded_traj, recorded_exp, recorded_vec = _compute_backward_lyap_traj_jit(f, fjac, pretime, time, ttraj, mdt,
n_vec, write_steps, adjoint, inverse, b, c, a)
return recorded_traj, recorded_exp, recorded_vec
@njit
def _compute_backward_lyap_traj_jit(f, fjac, pretime, time, ttraj, mdt, n_vec, write_steps, adjoint, inverse, b, c, a):
pretraj = ttraj[:, :, :len(pretime)]
traj = ttraj[:, :, (len(pretime)-1):]
n_traj = ttraj.shape[0]
n_dim = ttraj.shape[1]
Id = np.zeros((1, n_dim, n_dim))
Id[0] = np.eye(n_dim)
if write_steps == 0:
n_records = 1
else:
tot = time[::write_steps]
n_records = len(tot)
if tot[-1] != time[-1]:
n_records += 1
recorded_vec = np.zeros((n_traj, n_dim, n_vec, n_records))
recorded_traj = np.zeros((n_traj, n_dim, n_records))
recorded_exp = np.zeros((n_traj, n_vec, n_records))
for i_traj in range(n_traj):
y = np.zeros((1, n_dim))
y[0] = pretraj[i_traj, :, 0]
qr = np.linalg.qr(np.random.random((n_dim, n_vec)))
q = qr[0]
m_exp = np.zeros((n_dim))
for ti, (tt, dt) in enumerate(zip(pretime[:-1], np.diff(pretime))):
subtime = np.concatenate((np.arange(tt, tt + dt, mdt), np.full((1,), tt + dt)))
y_new, prop = integrate._integrate_runge_kutta_tgls_jit(f, fjac, subtime, y, Id, 1, 0, b, c, a,
adjoint, inverse, integrate._zeros_func)
y[0] = pretraj[i_traj, :, ti+1]
q_new = prop[0, :, :, 0] @ q
qr = np.linalg.qr(q_new)
q = qr[0]
r = qr[1]
iw = 0
for ti, (tt, dt) in enumerate(zip(time[:-1], np.diff(time))):
m_exp = np.log(np.abs(np.diag(r)))/dt
if write_steps > 0 and np.mod(ti, write_steps) == 0:
recorded_exp[i_traj, :, iw] = m_exp
recorded_traj[i_traj, :, iw] = y[0]
recorded_vec[i_traj, :, :, iw] = q
iw += 1
subtime = np.concatenate((np.arange(tt, tt + dt, mdt), np.full((1,), tt + dt)))
y_new, prop = integrate._integrate_runge_kutta_tgls_jit(f, fjac, subtime, y, Id, 1, 0, b, c, a,
adjoint, inverse, integrate._zeros_func)
y[0] = traj[i_traj, :, ti+1]
q_new = prop[0, :, :, 0] @ q
qr = np.linalg.qr(q_new)
q = qr[0]
r = qr[1]
recorded_exp[i_traj, :, -1] = m_exp
recorded_traj[i_traj, :, -1] = y[0]
recorded_vec[i_traj, :, :, -1] = q
return recorded_traj, recorded_exp, recorded_vec
class CovariantLyapunovsEstimator(object):
"""Class to compute the Covariant `Lyapunov vectors`_ (CLVs) and `exponents`_ along a trajectory of a dynamical system
.. math:: \\dot{\\boldsymbol{x}} = \\boldsymbol{f}(t, \\boldsymbol{x})
with a set of :class:`LyapProcess` and a specified `Runge-Kutta method`_.
The tangent linear model must also be provided. I.e. one must provide the linearized ODEs
.. math :: \\dot{\\boldsymbol{\\delta x}} = \\boldsymbol{\\mathrm{J}}(t, \\boldsymbol{x}) \\cdot \\boldsymbol{\\delta x}
where :math:`\\boldsymbol{\\mathrm{J}} = \\frac{\\partial \\boldsymbol{f}}{\\partial \\boldsymbol{x}}` is the
Jacobian matrix of :math:`\\boldsymbol{f}`.
Parameters
----------
num_threads: None or int, optional
Number of :class:`LyapProcess` workers (threads) to use. If `None`, use the number of machine's
cores available. Default to `None`.
b: None or ~numpy.ndarray, optional
Vector of coefficients :math:`b_i` of the `Runge-Kutta method`_ .
If `None`, use the classic RK4 method coefficients. Default to `None`.
c: None or ~numpy.ndarray, optional
Matrix of coefficients :math:`c_{i,j}` of the `Runge-Kutta method`_ .
If `None`, use the classic RK4 method coefficients. Default to `None`.
a: None or ~numpy.ndarray, optional
Vector of coefficients :math:`a_i` of the `Runge-Kutta method`_ .
If `None`, use the classic RK4 method coefficients. Default to `None`.
number_of_dimensions: None or int, optional
Allow to hardcode the dynamical system dimension. If `None`, evaluate the dimension from the
callable :attr:`func`. Default to `None`.
method: int, optional
Allow to select the method used to compute the CLVs. Presently can be `0` or `1`:
* `0`: Uses the method of Ginelli et al. :cite:`lyap-GPTCLP2007`. Suitable for a trajectory not too long (depends on the memory available).
* `1`: Uses the method of the intersection of the subspace spanned by the BLVs and FLVs described in :cite:`lyap-ER1985` and :cite:`lyap-KP2012`
(see also :cite:`lyap-DPV2021`, Appendix A). Suitable for longer trajectories (uses less memory).
Default to `0`, i.e. Ginelli et al. algorithm.
noise_pert: float, optional
Noise perturbation amplitude parameter of the diagonal of the R matrix in the QR decomposition during the Ginelli step. Mainly done to avoid ill-conditioned matrices
near tangencies (see :cite:`lyap-KP2012`). Default to 0 (no perturbation).
Only apply if using the Ginelli et al. algorithm, i.e. if ``method=0``.
Attributes
----------
num_threads: int
Number of :class:`LyapProcess` workers (threads) to use.
b: ~numpy.ndarray
Vector of coefficients :math:`b_i` of the `Runge-Kutta method`_ .
c: ~numpy.ndarray
Matrix of coefficients :math:`c_{i,j}` of the `Runge-Kutta method`_ .
a: ~numpy.ndarray
Vector of coefficients :math:`a_i` of the `Runge-Kutta method`_ .
n_dim: int
Dynamical system dimension.
n_vec: int
The number of Lyapunov vectors to compute.
n_traj: int
The number of trajectories (initial conditions) computed at the last estimation
performed by the estimator.
n_records: int
The number of saved states of the last estimation performed by the estimator.
ic: ~numpy.ndarray
Store the estimator initial conditions.
func: callable
Last function :math:`\\boldsymbol{f}` used by the estimator.
func_jac: callable
Last Jacobian matrix function :math:`\\boldsymbol{J}` used by the estimator.
method: int
Select the method used to compute the CLVs:
* `0`: Uses the method of Ginelli et al. :cite:`lyap-GPTCLP2007`. Suitable for a trajectory not too long (depends on the memory available).
* `1`: Uses the method of the intersection of the subspaces spanned by the BLVs and FLVs described in :cite:`lyap-ER1985` and :cite:`lyap-KP2012`
(see also :cite:`lyap-DPV2021`, Appendix A). Suitable for longer trajectories (uses less memory).
noise_pert: float
Noise perturbation parameter of the diagonal of the matrix resulting from the backpropagation during the Ginelli step.
Mainly done to avoid ill-conditioned matrices near tangencies (see :cite:`lyap-KP2012`).
Only apply if using the Ginelli et al. algorithm, i.e. if ``method=0``.
"""
def __init__(self, num_threads=None, b=None, c=None, a=None, number_of_dimensions=None, noise_pert=0., method=0):
if num_threads is None:
self.num_threads = multiprocessing.cpu_count()
else:
self.num_threads = num_threads
# Default is RK4
if a is None and b is None and c is None:
self.c = np.array([0., 0.5, 0.5, 1.])
self.b = np.array([1./6, 1./3, 1./3, 1./6])
self.a = np.zeros((len(self.c), len(self.b)))
self.a[1, 0] = 0.5
self.a[2, 1] = 0.5
self.a[3, 2] = 1.
else:
self.a = a
self.b = b
self.c = c
self.noise_pert = noise_pert
self.ic = None
self._time = None
self._pretime = None
self._aftertime = None
self._recorded_traj = None
self._recorded_exp = None
self._recorded_vec = None
self._recorded_bvec = None
self._recorded_fvec = None
self.n_traj = 0
self.n_dim = number_of_dimensions
self.n_records = 0
self.n_vec = 0
self.write_steps = 0
self.method = method
self.func = None
self.func_jac = None
self._ics_queue = None
self._clv_queue = None
self._processes_list = list()
def terminate(self):
"""Stop the workers (threads) and release the resources of the estimator."""
for process in self._processes_list:
process.terminate()
process.join()
def set_noise_pert(self, noise_pert):
"""Set the noise perturbation :attr:`noise_pert` parameter.
Parameters
----------
noise_pert: float, optional
Noise perturbation amplitude parameter of the diagonal of the R matrix in the QR decomposition during the Ginelli step. Mainly done to avoid ill-conditioned matrices
near tangencies (see :cite:`lyap-KP2012`).
Only apply if using the Ginelli et al. algorithm, i.e. if :attr:`method` is 0.
"""
self.noise_pert = noise_pert
self.start()
def set_bca(self, b=None, c=None, a=None, ic_init=True):
"""Set the coefficients of the `Runge-Kutta method`_ and restart the estimator.
.. _Runge-Kutta method: https://en.wikipedia.org/wiki/Runge%E2%80%93Kutta_methods
Parameters
----------
b: None or ~numpy.ndarray, optional
Vector of coefficients :math:`b_i` of the `Runge-Kutta method`_ .
If `None`, does not reinitialize these coefficients.
c: None or ~numpy.ndarray, optional
Matrix of coefficients :math:`c_{i,j}` of the `Runge-Kutta method`_ .
If `None`, does not reinitialize these coefficients.
a: None or ~numpy.ndarray, optional
Vector of coefficients :math:`a_i` of the `Runge-Kutta method`_ .
If `None`, does not reinitialize these coefficients.
ic_init: bool, optional
Re-initialize or not the initial conditions of the estimator. Default to `True`.
"""
if a is not None:
self.a = a
if b is not None:
self.b = b
if c is not None:
self.c = c
if ic_init:
self.ic = None
self.start()
def start(self):
"""Start or restart the workers (threads) of the estimator.
Warnings
--------
If the estimator was not previously terminated, it will be terminated first in the case
of a restart.
"""
self.terminate()
self._processes_list = list()
self._ics_queue = multiprocessing.JoinableQueue()
self._clv_queue = multiprocessing.Queue()
for i in range(self.num_threads):
self._processes_list.append(ClvProcess(i, self.func, self.func_jac, self.b, self.c, self.a,
self._ics_queue, self._clv_queue, self.noise_pert))
for process in self._processes_list:
process.daemon = True
process.start()
def set_func(self, f, fjac):
"""Set the `Numba`_-jitted function :math:`\\boldsymbol{f}` and Jacobian matrix function
:math:`\\boldsymbol{\\mathrm{J}}` to integrate.
.. _Numba: https://numba.pydata.org/
Parameters
----------
f: callable
The `Numba`_-jitted function :math:`\\boldsymbol{f}`.
Should have the signature ``f(t, x)`` where ``x`` is the state value and ``t`` is the time.
fjac: callable
The `Numba`_-jitted Jacobian matrix function :math:`\\boldsymbol{J}`.
Should have the signature ``J(t, x)`` where ``x`` is the state value and ``t`` is the time.
Warnings
--------
This function restarts the estimator!
"""
self.func = f
self.func_jac = fjac
self.start()
def compute_clvs(self, t0, ta, tb, tc, dt, mdt, ic=None, write_steps=1, n_vec=None, method=None, backward_vectors=False, forward_vectors=False):
"""Estimate the Covariant Lyapunov Vectors (CLVs) along a given trajectory, always integrating the said trajectory
forward in time from `ic` at `t0` to time `tc`. Return the CLVs between `ta` and `tb`.
The result of the estimation can be obtained afterward by calling :meth:`get_clvs`.
Parameters
----------
t0: float
Initial time of the time integration. Corresponds to the initial condition's `ic` time.
ta: float
Define the time span between `t0` and `ta` of the first part of the algorithm, which obtain the convergence to the Backward Lyapunov vectors
(initialization of the Benettin algorithm).
tb: float
Define the time span between `ta` and `tb` where the Covariant Lyapunov Vectors are computed.
tc: float
Final time of the time integration algorithm. Define the time span between `tb` and `tc` where, depending on the value of :attr:`method`,
the convergence to the Forward Lyapunov Vectors or to the Covariant Lyapunov Vectors (thanks to the Ginelli steps) is obtained.
dt: float
Timestep of the integration.
mdt: float
Micro-timestep to integrate the tangent linear equation between the nonlinear system `dt` timesteps. Should be smaller or equal to `dt`.
ic: None or ~numpy.ndarray(float), optional
Initial conditions of the system. Can be a 1D or a 2D array:
* 1D: Provide a single initial condition.
Should be of shape (`n_dim`,) where `n_dim` = :math:`\\mathrm{dim}(\\boldsymbol{x})`.
* 2D: Provide an ensemble of initial condition.
Should be of shape (`n_traj`, `n_dim`) where `n_dim` = :math:`\\mathrm{dim}(\\boldsymbol{x})`,
and where `n_traj` is the number of initial conditions.
If `None`, use the initial conditions stored in :attr:`ic`.
If then :attr:`ic` is `None`, use a zero initial condition.
Default to `None`.
write_steps: int, optional
Save the state of the integration in memory every `write_steps` steps. The other intermediary
steps are lost. It determines the size of the returned objects. Default is 1.
Set to 0 to return only the final state.
n_vec: int, optional
The number of Lyapunov vectors to compute. Should be smaller or equal to :attr:`n_dim`.
method: int, optional
Allow to select the method used to compute the CLVs. Presently can be `0` or `1`:
* `0`: Uses the method of Ginelli et al. :cite:`lyap-GPTCLP2007`. Suitable for a trajectory not too long (depends on the memory available).
* `1`: Uses the method of the intersection of the subspace spanned by the BLVs and FLVs described in :cite:`lyap-ER1985` and :cite:`lyap-KP2012`
(see also :cite:`lyap-DPV2021`, Appendix A). Suitable for longer trajectories (uses less memory).
Use the Ginelli et al. algorithm if not provided.
backward_vectors: bool, optional
Store also the computed Backward Lyapunov vectors between `ta` and `tb`. Only applies if ``method=1``.
Does not store the BLVs if not provided.
forward_vectors: bool, optional
Store also the computed Forward Lyapunov vectors between `ta` and `tb`. Only applies if ``method=1``.
Does not store the FLVs if not provided.
"""
if self.func is None or self.func_jac is None:
print('No function to integrate defined!')
return 0
if ic is None:
i = 1
while True:
self.ic = np.zeros(i)
try:
x = self.func(0., self.ic)
except:
i += 1
else:
break
i = len(self.func(0., self.ic))
self.ic = np.zeros(i)
else:
self.ic = ic
if len(self.ic.shape) == 1:
self.ic = self.ic.reshape((1, -1))
self.n_traj = self.ic.shape[0]
self.n_dim = self.ic.shape[1]
if n_vec is not None:
self.n_vec = n_vec
else:
self.n_vec = self.n_dim
if method is not None:
self.method = method
self._pretime = np.concatenate((np.arange(t0, ta, dt), np.full((1,), ta)))
self._time = np.concatenate((np.arange(ta, tb, dt), np.full((1,), tb)))
self._aftertime = np.concatenate((np.arange(tb, tc, dt), np.full((1,), tc)))
self.write_steps = write_steps
if write_steps == 0:
self.n_records = 1
else:
tot = self._time[::self.write_steps]
self.n_records = len(tot)
if tot[-1] != self._time[-1]:
self.n_records += 1
self._recorded_traj = np.zeros((self.n_traj, self.n_dim, self.n_records))
self._recorded_vec = np.zeros((self.n_traj, self.n_dim, self.n_vec, self.n_records))
self._recorded_exp = np.zeros((self.n_traj, self.n_vec, self.n_records))
if self.method == 1:
if forward_vectors:
self._recorded_fvec = np.zeros((self.n_traj, self.n_dim, self.n_vec, self.n_records))
if backward_vectors:
self._recorded_bvec = np.zeros((self.n_traj, self.n_dim, self.n_vec, self.n_records))
for i in range(self.n_traj):
self._ics_queue.put((i, self._pretime, self._time, self._aftertime, mdt, self.ic[i], self.n_vec,
self.write_steps, self.method))
self._ics_queue.join()
for i in range(self.n_traj):
args = self._clv_queue.get()
self._recorded_traj[args[0]] = args[1]
self._recorded_exp[args[0]] = args[2]
self._recorded_vec[args[0]] = args[3]
if self.method == 1:
if forward_vectors:
self._recorded_fvec[args[0]] = args[5]
if backward_vectors:
self._recorded_bvec[args[0]] = args[4]
def get_clvs(self):
"""Returns the result of the previous CLVs estimation.
Returns
-------
time, traj, exponents, vectors: ~numpy.ndarray
The result of the estimation:
* **time:** Time at which the state of the system was saved. Array of shape (:attr:`n_records`,).
* **traj:** Saved dynamical system states. 3D array of shape (:attr:`n_traj`, :attr:`n_dim`, :attr:`n_records`).
If :attr:`n_traj` = 1, a 2D array of shape (:attr:`n_dim`, :attr:`n_records`) is returned instead.
* **exponents:** Saved estimates of the local Lyapunov exponents along the trajectory. 3D array of shape (:attr:`n_traj`, :attr:`n_vec`, :attr:`n_records`).
If :attr:`n_traj` = 1, a 2D array of shape (:attr:`n_vec`, :attr:`n_records`) is returned instead.
* **vectors:** Saved estimates of the local Lyapunov vectors along the trajectory.
Depending on the input initial conditions, it is maximum a 4D array of shape
(:attr:`n_traj`, :attr:`n_dim`, :attr:`n_vec`, :attr:`n_records`).
If one of the dimension is 1, it is squeezed.
"""
if self.write_steps > 0:
if self._time[::self.write_steps][-1] == self._time[-1]:
return self._time[::self.write_steps], np.squeeze(self._recorded_traj), np.squeeze(self._recorded_exp), \
np.squeeze(self._recorded_vec)
else:
return np.concatenate((self._time[::self.write_steps], np.full((1,), self._time[-1]))), \
np.squeeze(self._recorded_traj), np.squeeze(self._recorded_exp), np.squeeze(self._recorded_vec)
else:
return self._time[-1], np.squeeze(self._recorded_traj), np.squeeze(self._recorded_exp), \
np.squeeze(self._recorded_vec)
def get_blvs(self):
"""Returns the BLVs obtained during the previous CLVs estimation.
Returns
-------
time, traj, exponents, vectors: ~numpy.ndarray
The result of the estimation:
* **time:** Time at which the state of the system was saved. Array of shape (:attr:`n_records`,).
* **traj:** Saved dynamical system states. 3D array of shape (:attr:`n_traj`, :attr:`n_dim`, :attr:`n_records`).
If :attr:`n_traj` = 1, a 2D array of shape (:attr:`n_dim`, :attr:`n_records`) is returned instead.
* **exponents:** Saved estimates of the local Lyapunov exponents along the trajectory. 3D array of shape (:attr:`n_traj`, :attr:`n_vec`, :attr:`n_records`).
If :attr:`n_traj` = 1, a 2D array of shape (:attr:`n_vec`, :attr:`n_records`) is returned instead.
* **vectors:** Saved estimates of the local Lyapunov vectors along the trajectory.
Depending on the input initial conditions, it is maximum a 4D array of shape
(:attr:`n_traj`, :attr:`n_dim`, :attr:`n_vec`, :attr:`n_records`).
If one of the dimension is 1, it is squeezed.
Warnings
--------
The BLVs are only available if :attr:`method` is set to 1.
"""
if self._recorded_bvec is None:
return None
if self.write_steps > 0:
if self._time[::self.write_steps][-1] == self._time[-1]:
return self._time[::self.write_steps], np.squeeze(self._recorded_traj), np.squeeze(self._recorded_exp), \
np.squeeze(self._recorded_bvec)
else:
return np.concatenate((self._time[::self.write_steps], np.full((1,), self._time[-1]))), \
np.squeeze(self._recorded_traj), np.squeeze(self._recorded_exp), np.squeeze(self._recorded_bvec)
else:
return self._time[-1], np.squeeze(self._recorded_traj), np.squeeze(self._recorded_exp), \
np.squeeze(self._recorded_bvec)
def get_flvs(self):
"""Returns the FLVs obtained during the previous CLVs estimation.
Returns
-------
time, traj, exponents, vectors: ~numpy.ndarray
The result of the estimation:
* **time:** Time at which the state of the system was saved. Array of shape (:attr:`n_records`,).
* **traj:** Saved dynamical system states. 3D array of shape (:attr:`n_traj`, :attr:`n_dim`, :attr:`n_records`).
If :attr:`n_traj` = 1, a 2D array of shape (:attr:`n_dim`, :attr:`n_records`) is returned instead.
* **exponents:** Saved estimates of the local Lyapunov exponents along the trajectory. 3D array of shape (:attr:`n_traj`, :attr:`n_vec`, :attr:`n_records`).
If :attr:`n_traj` = 1, a 2D array of shape (:attr:`n_vec`, :attr:`n_records`) is returned instead.
* **vectors:** Saved estimates of the local Lyapunov vectors along the trajectory.
Depending on the input initial conditions, it is maximum a 4D array of shape
(:attr:`n_traj`, :attr:`n_dim`, :attr:`n_vec`, :attr:`n_records`).
If one of the dimension is 1, it is squeezed.
Warnings
--------
The FLVs are only available if :attr:`method` is set to 1.
"""
if self._recorded_fvec is None:
return None
if self.write_steps > 0:
if self._time[::self.write_steps][-1] == self._time[-1]:
return self._time[::self.write_steps], np.squeeze(self._recorded_traj), np.squeeze(self._recorded_exp), \
np.squeeze(self._recorded_fvec)
else:
return np.concatenate((self._time[::self.write_steps], np.full((1,), self._time[-1]))), \
np.squeeze(self._recorded_traj), np.squeeze(self._recorded_exp), np.squeeze(self._recorded_fvec)
else:
return self._time[-1], np.squeeze(self._recorded_traj), np.squeeze(self._recorded_exp), \
np.squeeze(self._recorded_fvec)
class ClvProcess(multiprocessing.Process):
""":class:`CovariantLyapunovsEstimator`'s workers class. Allows to multi-thread Lyapunov vectors estimation.
Parameters
----------
processID: int
Number identifying the worker.
func: callable
`Numba`_-jitted function to integrate assigned to the worker.
func_jac: callable
`Numba`_-jitted Jacobian matrix function to integrate assigned to the worker.
b: ~numpy.ndarray, optional
Vector of coefficients :math:`b_i` of the `Runge-Kutta method`_ .
c: ~numpy.ndarray, optional
Matrix of coefficients :math:`c_{i,j}` of the `Runge-Kutta method`_ .
a: ~numpy.ndarray, optional
Vector of coefficients :math:`a_i` of the `Runge-Kutta method`_ .
ics_queue: multiprocessing.JoinableQueue
Queue to which the worker ask for initial conditions and parameters input.
clv_queue: multiprocessing.Queue
Queue to which the worker returns the estimation results.
Attributes
----------
processID: int
Number identifying the worker.
func: callable
`Numba`_-jitted function to integrate assigned to the worker.
func_jac: callable
`Numba`_-jitted Jacobian matrix function to integrate assigned to the worker.
b: ~numpy.ndarray
Vector of coefficients :math:`b_i` of the `Runge-Kutta method`_ .
c: ~numpy.ndarray
Matrix of coefficients :math:`c_{i,j}` of the `Runge-Kutta method`_ .
a: ~numpy.ndarray
Vector of coefficients :math:`a_i` of the `Runge-Kutta method`_ .
"""
def __init__(self, processID, func, func_jac, b, c, a, ics_queue, clv_queue, noise_pert):
super().__init__()
self.processID = processID
self._ics_queue = ics_queue
self._clv_queue = clv_queue
self.func = func
self.func_jac = func_jac
self.a = a
self.b = b
self.c = c
self.noise_pert = noise_pert
def run(self):
"""Main worker computing routine. Perform the estimation with the fetched initial conditions and parameters."""
while True:
args = self._ics_queue.get()
method = args[8]
if method == 0:
recorded_traj, recorded_exp, recorded_vec = _compute_clv_gin_jit(self.func, self.func_jac, args[1], args[2],
args[3], args[4], args[5][np.newaxis, :],
args[6], args[7],
self.b, self.c, self.a, self.noise_pert)
self._clv_queue.put((args[0], np.squeeze(recorded_traj), np.squeeze(recorded_exp),
np.squeeze(recorded_vec)))
else:
recorded_traj, recorded_exp, recorded_vec, backward_vec, forward_vec = _compute_clv_sub_jit(self.func, self.func_jac, args[1], args[2],
args[3], args[4], args[5][np.newaxis, :],
args[7], self.b, self.c, self.a)
self._clv_queue.put((args[0], np.squeeze(recorded_traj), np.squeeze(recorded_exp),
np.squeeze(recorded_vec), np.squeeze(backward_vec), np.squeeze(forward_vec)))
self._ics_queue.task_done()
# Ginelli et al. method
@njit
def _compute_clv_gin_jit(f, fjac, pretime, time, aftertime, mdt, ic, n_vec, write_steps, b, c, a, noise_pert):
n_traj = ic.shape[0]
n_dim = ic.shape[1]
Id = np.zeros((1, n_dim, n_dim))
Id[0] = np.eye(n_dim)
if write_steps == 0:
n_records = 1
else:
tot = time[::write_steps]
n_records = len(tot)
if tot[-1] != time[-1]:
n_records += 1
recorded_vec = np.zeros((n_traj, n_dim, n_vec, n_records))
recorded_traj = np.zeros((n_traj, n_dim, n_records))
recorded_exp = np.zeros((n_traj, n_vec, n_records))
for i_traj in range(n_traj):
# first part, making the backward vectors converge (initialization of the Benettin algorithm)
y = np.zeros((1, n_dim))
y[0] = ic[i_traj]
qr = np.linalg.qr(np.random.randn(n_dim, n_vec))
q = qr[0]
for tt, dt in zip(pretime[:-1], np.diff(pretime)):
subtime = np.concatenate((np.arange(tt, tt + dt, mdt), np.full((1,), tt + dt)))
y_new, prop = integrate._integrate_runge_kutta_tgls_jit(f, fjac, subtime, y, Id, 1, 0, b, c, a,
False, 1, integrate._zeros_func)
y[0] = y_new[0, :, 0]
q_new = prop[0, :, :, 0] @ q
qr = np.linalg.qr(q_new)
q = qr[0]
# second part, stores the backward vectors and the r matrix (Benettin steps)
# save the trajectories
tw = len(time)-1
tew = len(time)+len(aftertime)-2
tmp_traj = np.zeros((tw+1, n_dim))
tmp_vec = np.zeros((tw+1, n_dim, n_vec))
tmp_R = np.zeros((tew, n_vec, n_vec))
for ti, (tt, dt) in enumerate(zip(time[:-1], np.diff(time))):
tmp_vec[ti] = q.copy()
tmp_traj[ti] = y[0].copy()
subtime = np.concatenate((np.arange(tt, tt + dt, mdt), np.full((1,), tt + dt)))
y_new, prop = integrate._integrate_runge_kutta_tgls_jit(f, fjac, subtime, y, Id, 1, 0, b, c, a,
False, 1, integrate._zeros_func)
y[0] = y_new[0, :, 0]
q_new = prop[0, :, :, 0] @ q
qr = np.linalg.qr(q_new)
q = qr[0]
tmp_R[ti] = qr[1].copy()
tmp_vec[-1] = q.copy()
tmp_traj[-1] = y[0].copy()
# third part, stores the r matrix (Benettin steps)
for ti, (tt, dt) in enumerate(zip(aftertime[:-1], np.diff(aftertime))):
subtime = np.concatenate((np.arange(tt, tt + dt, mdt), np.full((1,), tt + dt)))
y_new, prop = integrate._integrate_runge_kutta_tgls_jit(f, fjac, subtime, y, Id, 1, 0, b, c, a,
False, 1, integrate._zeros_func)
y[0] = y_new[0, :, 0]
q_new = prop[0, :, :, 0] @ q
qr = np.linalg.qr(q_new)
q = qr[0]
tmp_R[ti+tw] = qr[1].copy()
# fourth part, going backward until tb (Ginelli steps)
qr = np.linalg.qr(np.random.randn(n_dim, n_vec))
am, norm = normalize_matrix_columns(qr[1])
for ti in range(tew-1, tw, -1):
am_new = solve_triangular_matrix(tmp_R[ti], am)
noise = np.random.randn(n_dim)
for i in range(n_vec):
am_new[i, i] += noise[i] * noise_pert
am, norm = normalize_matrix_columns(am_new)
# fifth and last part, going backward from tb to ta (Ginelli steps)
# save the data
dte = np.concatenate((np.diff(time), np.full((1,), aftertime[1] - aftertime[0])))
iw = 1
for ti in range(tw, -1, -1):
am_new = solve_triangular_matrix(tmp_R[ti], am)
noise = np.random.randn(n_vec)
for i in range(n_dim):
am_new[i, i] += noise[i] * noise_pert
am, mloc_exp = normalize_matrix_columns(am_new)
if write_steps > 0 and np.mod(tw-ti, write_steps) == 0:
recorded_traj[i_traj, :, -iw] = tmp_traj[ti]
recorded_exp[i_traj, :, -iw] = -np.log(np.abs(mloc_exp))/dte[ti]
recorded_vec[i_traj, :, :, -iw] = tmp_vec[ti] @ am
iw += 1
recorded_traj[i_traj, :, 0] = tmp_traj[0]
recorded_exp[i_traj, :, 0] = -np.log(np.abs(mloc_exp))/dte[0]
recorded_vec[i_traj, :, :, 0] = tmp_vec[0] @ am
return recorded_traj, recorded_exp, recorded_vec
# Subspace intersection method
@njit
def _compute_clv_sub_jit(f, fjac, pretime, time, aftertime, mdt, ic, write_steps, b, c, a):
n_traj = ic.shape[0]
n_dim = ic.shape[1]
lp = len(pretime)
la = len(aftertime)
ttraj = integrate._integrate_runge_kutta_jit(f, np.concatenate((pretime[:-1], time[:-1], aftertime)), ic, 1, 1, b, c, a)
traj, exp, fvec = _compute_forward_lyap_traj_jit(f, fjac, time, aftertime, ttraj[:, :, lp-1:], mdt,
n_dim, write_steps, False, 1, b, c, a)
traj, exp, bvec = _compute_backward_lyap_traj_jit(f, fjac, pretime, time, ttraj[:, :, :-la+1], mdt,
n_dim, write_steps, False, 1, b, c, a)
recorded_traj = traj
recorded_exp = np.zeros_like(traj)
n_records = traj.shape[-1]
recorded_vec = np.zeros((n_traj, n_dim, n_dim, n_records))
subtime = np.array([0., mdt])
y = np.zeros((1, n_dim))
vec = np.zeros((1, n_dim, n_dim))
for i_traj in range(n_traj):
for ti in range(n_records):
for j in range(n_dim):
u, z, w = np.linalg.svd(bvec[i_traj, :, :j+1, ti].T @ fvec[i_traj, :, :n_dim-j, ti])
basis = bvec[i_traj, :, :j+1, ti] @ u
recorded_vec[i_traj, :, j, ti] = basis[:, 0]
y[0] = recorded_traj[i_traj, :, ti]
vec[0] = recorded_vec[i_traj, :, :, ti]
y_new, sol = integrate._integrate_runge_kutta_tgls_jit(f, fjac, subtime, y, vec, 1, 0, b, c, a,
False, 1, integrate._zeros_func)
soln, mloc_exp = normalize_matrix_columns(sol[0, :, :, 0])
recorded_exp[i_traj, :, ti] = np.log(np.abs(mloc_exp))/mdt
return recorded_traj, recorded_exp, recorded_vec, bvec, fvec
if __name__ == "__main__":
a = 0.25
F = 8.
G = 1.
b = 4.
@njit
def fL84(t, x):
xx = -x[1] ** 2 - x[2] ** 2 - a * x[0] + a * F
yy = x[0] * x[1] - b * x[0] * x[2] - x[1] + G
zz = b * x[0] * x[1] + x[0] * x[2] - x[2]
return np.array([xx, yy, zz])
@njit
def DfL84(t, x):
return np.array([[ -a , -2. * x[1], -2. * x[2]],
[x[1] - b * x[2], -1. + x[0], -b * x[0]],
[b * x[1] + x[2], b * x[0], -1. + x[0]]])
sigma = 10.
r = 28.
bb = 8. / 3.
@njit
def fL63(t, x):
xx = sigma * (x[1] - x[0])
yy = r * x[0] - x[1] - x[0] * x[2]
zz = x[0] * x[1] - bb * x[2]
return np.array([xx, yy, zz])
@njit
def DfL63(t, x):
return np.array([[-sigma, sigma, 0.],
[r - x[2], -1., - x[0]],
[x[1], x[0], -bb]])
ic = np.random.random(3)
# tt, ic_L84 = integrate.integrate_runge_kutta(fL84, 0., 10000., 0.01, ic=ic, write_steps=0)
tt, ic = integrate.integrate_runge_kutta(fL63, 0., 10000., 0.01, ic=ic, write_steps=0)
print('Computing Backward Lyapunovs')
lyapint = LyapunovsEstimator()
# lyapint.set_func(fL84, DfL84)
lyapint.set_func(fL63, DfL63)
lyapint.compute_lyapunovs(0., 10000., 30000., 0.01, 0.01, ic, write_steps=1) #, n_vec=2)
btl, btraj, bexp, bvec = lyapint.get_lyapunovs()
print('Computing Forward Lyapunovs')
# lyapint.set_func(fL84, DfL84)
lyapint.set_func(fL63, DfL63)
lyapint.compute_lyapunovs(0., 20000., 30000., 0.01, 0.01, ic, write_steps=1, forward=True, adjoint=False, inverse=False) #, n_vec=2)
ftl, ftraj, fexp, fvec = lyapint.get_lyapunovs()
print('Computing Covariant Lyapunovs')
clvint = CovariantLyapunovsEstimator()
# clvint.set_func(fL84, DfL84)
clvint.set_func(fL63, DfL63)
clvint.compute_clvs(0., 10000., 20000., 30000., 0.01, 0.01, ic, write_steps=1) #, n_vec=2)
ctl, ctraj, cexp, cvec = clvint.get_clvs()
clvint.compute_clvs(0., 10000., 20000., 30000., 0.01, 0.01, ic, write_steps=10, method=1, backward_vectors=True) #, n_vec=2)
ctl2, ctraj2, cexp2, cvec2 = clvint.get_clvs()
lyapint.terminate()
clvint.terminate() | StarcoderdataPython |
3363923 | <filename>run.py
import requests
from PIL import Image, ImageDraw, ImageFont
from clients.youtube_client import YouTubeClient
IMAGE_INPUT_FILE = './trump_biden.png'
IMAGE_OUTPUT_FILE = './trump_biden_generated.png'
OPENSANS_FONT_FILE = './fonts/OpenSans-ExtraBold.ttf'
YOUTUBE_DATA_API_CREDENTIALS_LOCATION = './creds/client_secret.json'
LAST_UPDATED_URL = "https://interactive.guim.co.uk/2020/11/us-general-election-data/prod/last_updated.json"
VOTE_COUNT_URL = "https://interactive.guim.co.uk/2020/11/us-general-election-data/prod/data-out/{}/president_details.json"
YOUTUBE_VIDEO_ID = "9c8P6VuymdE"
def get_election_vote_counts():
print("Getting vote counts for the campaign")
last_updated_response_json = requests.get(LAST_UPDATED_URL).json()
last_updated_time = last_updated_response_json['time']
latest_counts_response_json = requests.get(VOTE_COUNT_URL.format(last_updated_time)).json()
biden_vote_count = latest_counts_response_json['US']['candidates'][0]['votes']
trump_vote_count = latest_counts_response_json['US']['candidates'][1]['votes']
print("Successfully retrieved the vote counts")
return biden_vote_count, trump_vote_count
def create_thumbnail(biden_vote_count, trump_vote_count):
print("Creating the thumbnail")
image = Image.open(IMAGE_INPUT_FILE)
font = ImageFont.truetype(OPENSANS_FONT_FILE, 80)
drawing = ImageDraw.Draw(image)
drawing.text((130, 480), str(biden_vote_count), font=font, fill='#FFFFFF')
drawing.text((770, 480), str(trump_vote_count), font=font, fill='#FFFFFF')
image.save(IMAGE_OUTPUT_FILE)
print(f"Successfully generated the image and saved to {IMAGE_OUTPUT_FILE}")
def set_thumbnail_for_youtube_video(video_id, thumbnail):
youtube_client = YouTubeClient(YOUTUBE_DATA_API_CREDENTIALS_LOCATION)
response = youtube_client.set_thumbnail(video_id, thumbnail)
print(response)
def run():
# Get current vote counts for trump and biden
biden_vote_count, trump_vote_count = get_election_vote_counts()
# Create/edit thumbnail image that will show the vote counts
create_thumbnail(biden_vote_count, trump_vote_count)
# Upload that thumbnail to your YouTube video
set_thumbnail_for_youtube_video(YOUTUBE_VIDEO_ID, IMAGE_OUTPUT_FILE)
if __name__ == '__main__':
run()
| StarcoderdataPython |
62542 | from modules.attention import MultiHeadedAttention
from modules.encoder import Encoder
from modules.decoder import Decoder
from modules.encoder_layer import EncoderLayer
from modules.decoder_layer import DecoderLayer
from modules.embedding import Embeddings
from modules.positional_encoding import PositionalEncoding
from modules.mask import subsequent_mask
from modules.feed_forward import PositionwiseFeedForward
from modules.generator import Generator
from modules.label_smoothing import LabelSmoothing
from modules.optimizer import NoamOpt, get_std_opt
| StarcoderdataPython |
1798660 | def tester():
print("Testing... 1 2 3....")
| StarcoderdataPython |
1753788 | <reponame>snake-biscuits/bsp_tool_examples
import mapcycle
import os
import struct
TF2 = 'E:/Steam/SteamApps/common/Team Fortress 2/tf/maps/'
official_maps = mapcycle.load_maplist()
def filename_of(filepath): #handles folders with '.' in name but not double extensions e.g. '.bsp.old'
if '.' not in filepath:
return filepath
else:
return '.'.join(filepath.split('.')[:-1]) #includes path
def path_above(filepath):
filepath = filepath.split('/')
if '' in filepath:
filepath.remove('')
return '/'.join(filepath[:-1]) + '/'
def ensure_dir(path):
os.makedirs(path, exist_ok=True)
props = {}
for MAP in official_maps:
try:
bsp = open(f'{TF2}{MAP}.bsp', 'rb')
bsp.seek(568) #LUMP_GAME_LUMP
offset = int.from_bytes(bsp.read(4), 'little')
length = int.from_bytes(bsp.read(4), 'little')
lump_version = int.from_bytes(bsp.read(4), 'little')
fourCC = int.from_bytes(bsp.read(4), 'little')
if fourCC != 0:
raise NotImplemented("Can't decompress gamelumps just yet, use:\nbspzip -repack <bspfile> and try again")
bsp.seek(offset)
glump_headers = []
glump_count = int.from_bytes(bsp.read(4), 'little')
for i in range(glump_count):
glump = bsp.read(16)
glump = struct.unpack('iHHii', glump)
glump_headers.append({
'id': abs(glump[0]).to_bytes(4, 'big'),
'flags': glump[1],
'version': glump[2],
'fileofs': glump[3],
'filelen': glump[4]})
for header in glump_headers:
if header['id'] == b'sprp': #static prop lump
sprp_version = header['version']
sprp_offset = header['fileofs']
sprp_length = header['filelen']
sprp_flags = header['flags']
break
try:
bsp.seek(sprp_offset)
except NameError:
raise RuntimeError('.bsp file has no static prop game lump')
if sprp_flags == 1:
print(bsp.name)
raise NotImplementedError("Can't decompress the sprp lump just yet, use:\nbspzip -repack <bspfile> and try again")
sprp_dict_len = int.from_bytes(bsp.read(4), 'little') * 128
try:
sprp_dict = bsp.read(sprp_dict_len)
except MemoryError:
raise RuntimeError("You can't just load " + str(round(sprp_dict_len / 1024**2, 2)) + 'MB!')
sprp_dict = struct.iter_unpack('128s', sprp_dict)
sprp_names = [name[0].decode().strip('\x00') for name in sprp_dict] #sprp_names
sprp_leaves_len = int.from_bytes(bsp.read(4), 'little') * 2
sprp_leaves = bsp.read(sprp_leaves_len)
sprp_leaves = struct.iter_unpack('H', sprp_leaves)
sprp_lump_len = int.from_bytes(bsp.read(4), 'little')
static_props = []
for i in range(sprp_lump_len):
splump = bsp.read(72) #assuming sprp_version 10 (new maps should be OK)
splump = struct.unpack('6f3H2Bi6f8Bf', splump) #no X360 bool or DXlevel
static_props.append({
'pos': splump[:3],
#'angles': splump[3:6], #Y (yaw), Z (pitch), X (roll)
'angles': [splump[3:6][0], -splump[3:6][2], splump[3:6][1] + 90], #XYZ >>> Y -X Z+90
'model': splump[6],
'first leaf': splump[7],
'leaf count': splump[8],
'solid': splump[9],
'flags': splump[10],
'skin': splump[11],
'fademindist': splump[12], #to match prop_dynamic
'fademaxdist': splump[13],
'lighting origin': splump[14:17],
'force fade scale': splump[17],
'min CPU level': splump[18],
'max CPU level': splump[19],
'min GPU level': splump[20],
'max GPU level': splump[21],
'diffuse': splump[22:26],
'unknown': splump[26],
'type': 'prop_static'
})
bsp.seek(8)
offset = int.from_bytes(bsp.read(4), 'little')
length = int.from_bytes(bsp.read(4), 'little')
lump_version = int.from_bytes(bsp.read(4), 'little')
fourCC = int.from_bytes(bsp.read(4), 'little')
if fourCC != 0:
raise NotImplemented("can't decompress entitiy lumps just yet")
bsp.seek(offset)
## entities = bsp.read(length)
## entities.remove('\n\x00') #could still be cleaner
## prop_entities = []
entities = bsp.read(length).decode('ascii').replace('{', '').split('}')[:-1]
new_entities = []
for entity in entities:
entity = entity.lstrip('\n').rstrip('\n')
entity = entity.split('\n')
new_entities.append(dict())
for line in entity:
key, value = line.split('" "')
key, value = key[1:], value[:-1]
new_entities[-1][key] = value
entities = new_entities
prop_entities = filter(lambda x: x['classname'].startswith('prop_'), entities)
ent_prop_names = []
for x in prop_entities:
try:
ent_prop_names.append(x['model'])
except:
pass
#cp_junction has a prop_dynamic with no model
## print(bsp.name)
## print('\n'.join([f'{key}: {value}' for key, value in x.items()]))
## print(x.keys())
## raise RuntimeError()
all_sprp_names = []
for x in static_props:
try:
all_sprp_names.append(sprp_names[x['model']])
except:
pass
#occasionally the index massively exceeds expected values
all_prop_names = all_sprp_names + ent_prop_names
## all_prop_names = sprp_names + ent_prop_names
all_prop_names = [*map(lambda x: x[7:-4], all_prop_names)]
props[MAP] = {x: all_prop_names.count(x) for x in list(set(all_prop_names))}
except Exception as exc:
print(exc, MAP)
raise exc
outfile = open('props.csv', 'w')
outfile.write('MAP, PROPS\n')
for MAP in props:
ranked_props = sorted(props[MAP], key=lambda x: props[MAP][x], reverse=True)
outfile.write(f'{MAP}, ')
outfile.write(','.join([f'{props[MAP][x]},{x}' for x in ranked_props]))
outfile.write('\n')
##outfile.close()
import itertools
all_props = {}
for MAP in props:
for p in props[MAP]:
if p in all_props:
all_props[p] += props[MAP][p]
else:
all_props[p] = props[MAP][p]
ranked_props = sorted(all_props, key=lambda p: all_props[p], reverse=True)
##print('\n'.join([f'{all_props[prop]} {prop}' for prop in ranked_props[:50]]))
outfile.write('TOTAL\n,')
outfile.write(','.join([f'{all_props[prop]},{prop}' for prop in ranked_props]))
outfile.close()
| StarcoderdataPython |
1643540 | <filename>src/core/set.py
#!/usr/bin/env python
#
#
# The Social-Engineer Toolkit
# Written by: <NAME> (ReL1K)
#
#
import shutil
import os
import time
import re
import sys
import socket
from src.core.setcore import *
from src.core.menu import text
try:
raw_input
except:
raw_input = input
ipaddr = ""
me = mod_name()
#
# Define path and set it to the SET root dir
#
definepath = os.getcwd()
sys.path.append(definepath)
#
# ROOT CHECK
#
# grab the operating system
operating_system = check_os()
# grab metasploit path
msf_path = meta_path()
if operating_system == "posix":
if os.geteuid() != 0:
print(
"\n The Social-Engineer Toolkit (SET) - by <NAME> (ReL1K)")
print(
"\n Not running as root. \n\nExiting the Social-Engineer Toolkit (SET).\n")
sys.exit(1)
define_version = get_version()
try:
while 1:
show_banner(define_version, '1')
#
# USER INPUT: SHOW MAIN MENU #
#
debug_msg(me, "printing 'text.main'", 5)
show_main_menu = create_menu(text.main_text, text.main)
# special case of list item 99
print('\n 99) Return back to the main menu.\n')
main_menu_choice = (raw_input(setprompt("0", "")))
if main_menu_choice == 'exit':
break
if operating_system == "windows" or msf_path == False:
if main_menu_choice == "1" or main_menu_choice == "4" or main_menu_choice == "8" or main_menu_choice == "3":
print_warning(
"Sorry. This feature is not yet supported in Windows or Metasploit was not found.")
return_continue()
break
if main_menu_choice == '1': # 'Spearphishing Attack Vectors
while 1:
#
# USER INPUT: SHOW SPEARPHISH MENU #
#
if operating_system != "windows":
debug_msg(me, "printing 'text.spearphish_menu'", 5)
show_spearphish_menu = create_menu(
text.spearphish_text, text.spearphish_menu)
spearphish_menu_choice = raw_input(setprompt(["1"], ""))
if spearphish_menu_choice == 'exit':
exit_set()
if spearphish_menu_choice == 'help':
print(text.spearphish_text)
# Spearphish menu choice 1: Perform a Mass Email Attack
if spearphish_menu_choice == '1':
sys.path.append(definepath + "/src/core/msf_attacks/")
debug_msg(
me, "importing 'src.core.msf_attacks.create_payload'", 1)
try:
module_reload(create_payload)
except:
pass
import create_payload
# Spearphish menu choice 2: Create a FileFormat Payload
if spearphish_menu_choice == '2':
sys.path.append(definepath + "/src/core/msf_attacks/")
debug_msg(
me, "importing 'src.core.msf_attacks.create_payload'", 1)
try:
reload(create_payload)
except:
import create_payload
# Spearphish menu choice 3: Create a Social-Engineering
# Template
if spearphish_menu_choice == '3':
debug_msg(
me, "calling function 'custom_template' from 'src.core.setcore'", 3)
custom_template()
# Spearphish menu choice 99
if spearphish_menu_choice == '99':
break
#
# Web Attack Menu
#
# Main Menu choice 2: Website Attack Vectors
if main_menu_choice == '2':
while 1:
#
# USER INPUT: SHOW WEB ATTACK MENU #
#
debug_msg(me, "printing 'text.webattack_menu'", 5)
show_webattack_menu = create_menu(
text.webattack_text, text.webattack_menu)
attack_vector = raw_input(setprompt(["2"], ""))
choice3 = ""
if attack_vector == 'exit':
exit_set()
if attack_vector == "":
debug_msg(
me, "no attack vector entered, defaulting to '1) Java Applet Attack Method'", 3)
attack_vector = "1"
# check unsupported features
if operating_system == "windows" or msf_path == False:
if attack_vector == "2" or attack_vector == "9":
print_warning(
"Sorry. This option is not yet available in Windows or Metasploit was not found.")
return_continue()
break
# Web Attack menu choice 9: Return to the Previous Menu
if attack_vector == '99':
break
try:
attack_check = int(attack_vector)
except:
print_error("ERROR:Invalid selection, going back to menu.")
break
if attack_check > 9:
print_warning("Invalid option")
return_continue()
break
#
# HTA ATTACK VECTOR METHOD HERE
#
# if attack_vector == '8':
# assign HTA attack vector - do more later
# attack_vector = "hta"
# Removed to delete MLITM
#if attack_vector != "99999":
#
# USER INPUT: SHOW WEB ATTACK VECTORS MENU #
#
#if attack_vector != "7":
debug_msg(me, "printing 'text.webattack_vectors_menu'", 5)
show_webvectors_menu = create_menu(text.webattack_vectors_text, text.webattack_vectors_menu)
print(' 99) Return to Webattack Menu\n')
choice3 = raw_input(setprompt(["2"], ""))
if choice3 == 'exit':
exit_set()
if choice3 == "99":
break
if choice3 == "quit" or choice3 == '4':
break
try:
# write our attack vector to file to be called later
filewrite = open(userconfigpath + "attack_vector", "w")
# webjacking and web templates are not allowed
if attack_vector == "5" and choice3 == "1":
print(bcolors.RED + "\n Sorry, you can't use the Web Jacking vector with Web Templates." + bcolors.ENDC)
return_continue()
break
# if we select multiattack, web templates are not allowed
if attack_vector == "6" and choice3 == "1":
print(bcolors.RED + "\n Sorry, you can't use the Multi-Attack vector with Web Templates." + bcolors.ENDC)
return_continue()
break
# if we select web template and tabnabbing, throw this
# error and bomb out to menu
if attack_vector == "4" and choice3 == "1":
print(bcolors.RED + "\n Sorry, you can only use the cloner option with the tabnabbing method." + bcolors.ENDC)
return_continue()
break
# if attack vector is default or 1 for java applet
if attack_vector == '':
attack_vector = '1'
# specify java applet attack
if attack_vector == '1':
attack_vector = "java"
filewrite.write(attack_vector)
filewrite.close()
# specify browser exploits
if attack_vector == '2':
attack_vector = "browser"
filewrite.write(attack_vector)
filewrite.close()
if attack_vector == '':
attack_vector = '3'
# specify web harvester method
if attack_vector == '3':
attack_vector = "harvester"
filewrite.write(attack_vector)
filewrite.close()
print_info("Credential harvester will allow you to utilize the clone capabilities within SET")
print_info("to harvest credentials or parameters from a website as well as place them into a report")
# specify tab nabbing attack vector
if attack_vector == '4':
attack_vector = "tabnabbing"
filewrite.write(attack_vector)
filewrite.close()
# specify webjacking attack vector
if attack_vector == "5":
attack_vector = "webjacking"
filewrite.write(attack_vector)
filewrite.close()
# specify Multi-Attack Vector
attack_vector_multi = ""
if attack_vector == '6':
# trigger the multiattack flag in SET
attack_vector = "multiattack"
# write the attack vector to file
filewrite.write(attack_vector)
filewrite.close()
# hta attack vector
if attack_vector == '7':
# call hta attack vector
attack_vector = "hta"
filewrite.write(attack_vector)
filewrite.close()
# pull ip address
if choice3 != "-1":
fileopen = open(
"/etc/setoolkit/set.config", "r").readlines()
for line in fileopen:
line = line.rstrip()
match = re.search("AUTO_DETECT=ON", line)
if match:
try:
ipaddr = socket.socket(
socket.AF_INET, socket.SOCK_DGRAM)
ipaddr.connect(('google.com', 0))
ipaddr.settimeout(2)
ipaddr = ipaddr.getsockname()[0]
update_options("IPADDR=" + ipaddr)
except Exception as error:
log(error)
ipaddr = raw_input(
setprompt(["2"], "Your interface IP Address"))
update_options("IPADDR=" + ipaddr)
# if AUTO_DETECT=OFF prompt for IP Address
for line in fileopen:
line = line.rstrip()
match = re.search("AUTO_DETECT=OFF", line)
if match:
if attack_vector != "harvester":
if attack_vector != "tabnabbing":
if attack_vector != "webjacking":
if attack_vector != "hta":
# this part is to determine if NAT/port forwarding is used
# if it is it'll prompt for
# additional questions
print_info("NAT/Port Forwarding can be used in the cases where your SET machine is")
print_info("not externally exposed and may be a different IP address than your reverse listener.")
nat_or_fwd = yesno_prompt('0', 'Are you using NAT/Port Forwarding [yes|no]')
if nat_or_fwd == "YES":
ipquestion = raw_input(setprompt(["2"], "IP address to SET web server (this could be your external IP or hostname)"))
filewrite2 = open(userconfigpath + "interface", "w")
filewrite2.write(ipquestion)
filewrite2.close()
# is your payload/listener
# on a different IP?
natquestion = yesno_prompt(["2"], "Is your payload handler (metasploit) on a different IP from your external NAT/Port FWD address [yes|no]")
if natquestion == 'YES':
ipaddr = raw_input(setprompt(["2"], "IP address for the reverse handler (reverse payload)"))
if natquestion == "NO":
ipaddr = ipquestion
# if you arent using NAT/Port
# FWD
if nat_or_fwd == "NO":
ipaddr = grab_ipaddress()
if attack_vector == "harvester" or attack_vector == "tabnabbing" or attack_vector == "webjacking":
print("""
-------------------------------------------------------------------------------
--- * IMPORTANT * READ THIS BEFORE ENTERING IN THE IP ADDRESS * IMPORTANT * ---
The way that this works is by cloning a site and looking for form fields to
rewrite. If the POST fields are not usual methods for posting forms this
could fail. If it does, you can always save the HTML, rewrite the forms to
be standard forms and use the "IMPORT" feature. Additionally, really
important:
If you are using an EXTERNAL IP ADDRESS, you need to place the EXTERNAL
IP address below, not your NAT address. Additionally, if you don't know
basic networking concepts, and you have a private IP address, you will
need to do port forwarding to your NAT IP address from your external IP
address. A browser doesn’t know how to communicate with a private IP
address, so if you don't specify an external IP address if you are using
this from an external perspective, it will not work. This isn't a SET issue
this is how networking works.
""")
try:
revipaddr = detect_public_ip()
ipaddr = raw_input(setprompt(["2"], "IP address for the POST back in Harvester/Tabnabbing [" + revipaddr + "]"))
if ipaddr == "": ipaddr=revipaddr
except Exception:
rhost = raw_input("Enter the IP address for POST back in Harvester/Tabnabbing: ")
ipaddr = rhost
if check_options("IPADDR=") != 0:
ipaddr = check_options("IPADDR=")
update_options("IPADDR=" + ipaddr)
else:
if ipaddr != "":
update_options("IPADDR=" + ipaddr)
# if java applet attack
if attack_vector == "java":
applet_choice()
# Select SET quick setup
if choice3 == '1':
# get the template ready
sys.path.append(definepath + "/src/html/templates")
debug_msg(me, "importing src.html.templates.template'", 1)
try:
module_reload(template)
except:
import template
# grab browser exploit selection
if attack_vector == "browser":
# grab clientattack
sys.path.append(
definepath + "/src/webattack/browser_exploits")
debug_msg(me, "line 357: importing 'src.webattack.browser_exploits.gen_payload'", 1)
try:
module_reload(gen_payload)
except:
import gen_payload
# arp cache attack, will exit quickly
# if not in config file
sys.path.append(definepath + "/src/core/arp_cache")
debug_msg(me, "line 364: importing 'src.core.arp_cache.arp'", 1)
try:
module_reload(arp)
except:
import arp
# actual website attack here
# web_server.py is main core
sys.path.append(definepath + "/src/html/")
# clean up stale file
if os.path.isfile(userconfigpath + "cloner.failed"):
os.remove(userconfigpath + "cloner.failed")
site_cloned = True
debug_msg(me, "line 375: importing 'src.webattack.web_clone.cloner'", 1)
try:
module_reload(src.webattack.web_clone.cloner)
except:
import src.webattack.web_clone.cloner
# grab java applet attack
if attack_vector == "java":
debug_msg(me, "importing 'src.core.payloadgen.create_payloads'", 1)
try:
module_reload(src.core.payloadgen.create_payloads)
except:
import src.core.payloadgen.create_payloads
if os.path.isfile(userconfigpath + "cloner.failed"):
site_cloned = False
if site_cloned == True:
# cred harvester for auto site here
if attack_vector == "harvester" or attack_vector == "tabnabbing" or attack_vector == "webjacking":
if attack_vector == "tabnabbing" or attack_vector == "webjacking":
debug_msg(
me, "importing 'src.webattack.tabnabbing.tabnabbing'", 1)
try:
module_reload(src.webattack.tabnabbing)
except:
import src.webattack.tabnabbing
# start web cred harvester here
debug_msg(
me, "importing 'src.webattack.harvester.harvester'", 1)
sys.path.append(
definepath + "/src/webattack/harvester/")
try:
module_reload(harvester)
except:
import harvester
# if we are using profiler lets prep everything to
# get ready
if attack_vector == "profiler":
from src.webattack.profiler.webprofiler import *
prep_website()
# launch HTA attack vector after the website has
# been cloned
if attack_vector == "hta":
# launch HTA attack vector after the website
# has been cloned
from src.webattack.hta.main import *
# update config
update_options("ATTACK_VECTOR=HTA")
gen_hta_cool_stuff()
attack_vector = "hta"
print_status("Automatically starting Apache for you...")
subprocess.Popen("service apache2 start", shell=True).wait()
if attack_vector != "harvester":
if attack_vector != "tabnabbing":
if attack_vector != "multiattack":
if attack_vector != "webjacking":
if attack_vector != "multiattack":
if attack_vector != "profiler":
if attack_vector != "hta":
# spawn web server here
debug_msg(
me, "importing 'src.html.spawn'", 1)
import src.html.spawn
# multi attack vector here
if attack_vector == "multiattack":
if choice3 == "1":
try:
filewrite = open(
"src/progam_junk/multiattack.template", "w")
filewrite.write("TEMPLATE=TRUE")
filewrite.close()
except:
pass
debug_msg(
me, "importing 'src.webattack.multi_attack.multiattack'", 1)
import src.webattack.multi_attack.multiattack
# Create a website clone
if choice3 == '2':
# flag that we want a custom website
definepath = os.getcwd()
sys.path.append(
definepath + "/src/webattack/web_clone/")
if os.path.isfile(userconfigpath + "site.template"):
os.remove(userconfigpath + "site.template")
filewrite = open(userconfigpath + "site.template", "w")
filewrite.write("TEMPLATE=CUSTOM")
print_info("SET supports both HTTP and HTTPS")
# specify the site to clone
print_info("Example: http://www.thisisafakesite.com")
URL = raw_input(
setprompt(["2"], "Enter the url to clone"))
match = re.search("http://", URL)
match1 = re.search("https://", URL)
if not match:
if not match1:
URL = ("http://" + URL)
match2 = re.search("facebook.com", URL)
if match2:
URL = ("https://login.facebook.com/login.php")
# changed based on new landing page for gmail.com
match3 = re.search("gmail.com", URL)
if match3:
URL = ("https://accounts.google.com")
filewrite.write("\nURL=%s" % (URL))
filewrite.close()
# launch HTA attack vector after the website has been
# cloned
if attack_vector == "hta":
# launch HTA attack vector after the website has
# been cloned
from src.webattack.hta.main import *
# update config
update_options("ATTACK_VECTOR=HTA")
gen_hta_cool_stuff()
attack_vector = "hta"
print_status(
"Automatically starting Apache for you...")
subprocess.Popen(
"service apache2 start", shell=True).wait()
# grab browser exploit selection
if attack_vector == "browser":
# grab clientattack
sys.path.append(
definepath + "/src/webattack/browser_exploits")
debug_msg(
me, "importing 'src.webattack.browser_exploits.gen_payload'", 1)
try:
module_reload(gen_payload)
except:
import gen_payload
# set site cloner to true
site_cloned = True
if attack_vector != "multiattack":
# import our website cloner
site_cloned = True
debug_msg(
me, "importing 'src.webattack.web_clone.cloner'", 1)
try:
module_reload(src.webattack.web_clone.cloner)
except:
import src.webattack.web_clone.cloner
if os.path.isfile(userconfigpath + "cloner.failed"):
site_cloned = False
if site_cloned == True:
if attack_vector == "java":
# import our payload generator
debug_msg(
me, "importing 'src.core.payloadgen.create_payloads'", 1)
try:
module_reload(
src.core.payloadgen.create_payloads)
except:
import src.core.payloadgen.create_payloads
# arp cache if applicable
definepath = os.getcwd()
sys.path.append(definepath + "/src/core/arp_cache")
debug_msg(
me, "line 500: importing 'src.core.arp_cache.arp'", 1)
try:
module_reload(arp)
except:
import arp
# tabnabbing and harvester selection here
if attack_vector == "harvester" or attack_vector == "tabnabbing" or attack_vector == "webjacking":
if attack_vector == "tabnabbing" or attack_vector == "webjacking":
sys.path.append(
definepath + "/src/webattack/tabnabbing")
debug_msg(
me, "importing 'src.webattack.tabnabbing.tabnabbing'", 1)
try:
module_reload(tabnabbing)
except:
import tabnabbing
sys.path.append(
definepath + "/src/webattack/harvester")
debug_msg(
me, "importing 'src.webattack.harvester.harvester'", 1)
try:
module_reload(harvester)
except:
import harvester
# multi_attack vector here
if attack_vector == "multiattack":
sys.path.append(
definepath + "/src/webattack/multi_attack/")
debug_msg(
me, "importing 'src.webattack.multi_attack.multiattack'", 1)
try:
module_reload(multiattack)
except:
import multiattack
# if we arent using credential harvester or
# tabnabbing
if attack_vector != "harvester":
if attack_vector != "tabnabbing":
if attack_vector != "multiattack":
if attack_vector != "webjacking":
if attack_vector != "hta":
sys.path.append(
definepath + "/src/html")
debug_msg(
me, "importing 'src.html.spawn'", 1)
try:
module_reload(spawn)
except:
import spawn
# Import your own site
if choice3 == '3':
sys.path.append(
definepath + "/src/webattack/web_clone/")
if os.path.isfile(userconfigpath + "site.template"):
os.remove(userconfigpath + "site.template")
filewrite = open(userconfigpath + "site.template", "w")
filewrite.write("TEMPLATE=SELF")
# specify the site to clone
if not os.path.isdir(userconfigpath + "web_clone"):
os.makedirs(userconfigpath + "web_clone")
print_warning(
"Example: /home/website/ (make sure you end with /)")
print_warning(
"Also note that there MUST be an index.html in the folder you point to.")
URL = raw_input(
setprompt(["2"], "Path to the website to be cloned"))
if not URL.endswith("/"):
if not URL.endswith("index.html"):
URL = URL + "/"
if not os.path.isfile(URL + "index.html"):
if os.path.isfile(URL):
shutil.copyfile(
"%s" % (URL), userconfigpath + "web_clone/index.html")
if not os.path.isfile(URL):
if URL.endswith("index.html"):
shutil.copyfile(
URL, "%s/web_clone/index.html" % (userconfigpath))
else:
print_error("ERROR:index.html not found!!")
print_error(
"ERROR:Did you just put the path in, not file?")
print_error(
"Exiting the Social-Engineer Toolkit...Hack the Gibson.\n")
exit_set()
if os.path.isfile(URL + "index.html"):
print_status(
"Index.html found. Do you want to copy the entire folder or just index.html?")
choice = raw_input(
"\n1. Copy just the index.html\n2. Copy the entire folder\n\nEnter choice [1/2]: ")
if choice == "1" or choice == "":
if os.path.isfile("%s/web_clone/index.html" % (userconfigpath)):
os.remove("%s/web_clone/index.html" % (userconfigpath))
shutil.copyfile(URL + "index.html", "%s/web_clone/index.html" % (userconfigpath))
if choice == "2":
if os.path.isdir(URL + "src/webattack"):
print_error("You cannot specify a folder in the default SET path. This goes into a loop Try something different.")
URL = raw_input("Enter the folder to import into SET, this CANNOT be the SET directory: ")
if os.path.isdir(URL + "src/webattack" % (URL)):
print_error("You tried the same thing. Exiting now.")
sys.exit()
copyfolder(URL, "%s/web_clone/" % userconfigpath)
filewrite.write("\nURL=%s" % (URL))
filewrite.close()
# if not harvester then load up cloner
if attack_vector == "java" or attack_vector == "browser":
# import our website cloner
debug_msg(
me, "importing 'src.webattack.web_clone.cloner'", 1)
import src.webattack.web_clone.cloner
# launch HTA attack vector after the website has been
# cloned
if attack_vector == "hta":
# launch HTA attack vector after the website has
# been cloned
from src.webattack.hta.main import *
# update config
update_options("ATTACK_VECTOR=HTA")
gen_hta_cool_stuff()
attack_vector = "hta"
print_status(
"Automatically starting Apache for you...")
subprocess.Popen(
"service apache2 start", shell=True).wait()
# if java applet attack
if attack_vector == "java":
# import our payload generator
debug_msg(
me, "importing 'src.core.payloadgen.create_payloads'", 1)
import src.core.payloadgen.create_payloads
# grab browser exploit selection
if attack_vector == "browser":
# grab clientattack
sys.path.append(
definepath + "/src/webattack/browser_exploits")
debug_msg(
me, "importing 'src.webattack.browser_exploits.gen_payload'", 1)
try:
module_reload(gen_payload)
except:
import gen_payload
# arp cache if applicable
sys.path.append(definepath + "/src/core/arp_cache")
debug_msg(
me, "line 592: importing 'src.core.arp_cache.arp'", 1)
try:
module_reload(arp)
except:
import arp
# if not harvester spawn server
if attack_vector == "java" or attack_vector == "browser":
# import web_server and do magic
sys.path.append(definepath + "/src/html")
debug_msg(me, "importing 'src.html.spawn'", 1)
try:
module_reload(spawn)
except:
import spawn
# cred harvester for auto site here
if attack_vector == "harvester":
# get the url
print_info("Example: http://www.blah.com")
URL = raw_input(
setprompt(["2"], "URL of the website you imported"))
match = re.search("http://", URL)
match1 = re.search("https://", URL)
if not match:
if not match1:
URL = ("http://" + URL)
filewrite = open(userconfigpath + "site.template", "w")
filewrite.write("\nURL=%s" % (URL))
filewrite.close()
# start web cred harvester here
sys.path.append(
definepath + "/src/webattack/harvester")
debug_msg(
me, "importing 'src.webattack.harvester.harvester'", 1)
try:
module_reload(harvester)
except:
import harvester
# tabnabbing for auto site here
if attack_vector == "tabnabbing" or attack_vector == "webjacking":
# get the url
print_info("Example: http://www.blah.com")
URL = raw_input(
setprompt(["2"], "URL of the website you imported"))
match = re.search("http://", URL)
match1 = re.search("https://", URL)
if not match:
if not match1:
URL = ("http://" + URL)
filewrite = open(userconfigpath + "site.template", "w")
filewrite.write("\nURL=%s" % (URL))
filewrite.close()
# start tabnabbing here
sys.path.append(
definepath + "/src/webattack/tabnabbing")
debug_msg(
me, "importing 'src.webattack.tabnabbing.tabnabbing'", 1)
try:
module_reload(tabnabbing)
except:
import tabnabbing
# start web cred harvester here
sys.path.append(
definepath + "/src/webattack/harvester")
debug_msg(
me, "importing 'src.webattack.harvester.harvester'", 1)
try:
module_reload(harvester)
except:
import harvester
# multi attack vector here
if attack_vector == "multiattack":
try:
filewrite = open(
"src/progam_junk/multiattack.template", "w")
filewrite.write("TEMPLATE=TRUE")
filewrite.close()
except:
pass
debug_msg(
me, "importing 'src.webattack.multi_attack.multiattack'", 1)
import src.webattack.multi_attack.multiattack
# Return to main menu
if choice3 == '4':
print (" Returning to main menu.\n")
break
except KeyboardInterrupt:
print(
" Control-C detected, bombing out to previous menu..")
break
# Define Auto-Infection USB/CD Method here
if main_menu_choice == '3':
#
# USER INPUT: SHOW INFECTIOUS MEDIA MENU #
#
# Main Menu choice 3: Infectious Media Generator
debug_msg(me, "printing 'text.infectious_menu'", 5)
show_infectious_menu = create_menu(
text.infectious_text, text.infectious_menu)
infectious_menu_choice = raw_input(setprompt(["3"], ""))
if infectious_menu_choice == 'exit':
exit_set()
if infectious_menu_choice == "99":
menu_back()
if infectious_menu_choice == "":
infectious_menu_choice = "1"
# if fileformat
if infectious_menu_choice == "1":
ipaddr = raw_input(
setprompt(["3"], "IP address for the reverse connection (payload)"))
update_options("IPADDR=" + ipaddr)
filewrite1 = open(userconfigpath + "payloadgen", "w")
filewrite1.write("payloadgen=solo")
filewrite1.close()
# if choice is file-format
if infectious_menu_choice == "1":
filewrite = open(userconfigpath + "fileformat.file", "w")
filewrite.write("fileformat=on")
filewrite.close()
sys.path.append(definepath + "/src/core/msf_attacks/")
debug_msg(
me, "importing 'src.core.msf_attacks.create_payload'", 1)
try:
module_reload(create_payload)
except:
import create_payload
# if choice is standard payload
if infectious_menu_choice == "2":
# trigger set options for infectious media
update_options("INFECTION_MEDIA=ON")
try:
import src.core.payloadgen.solo
except:
module_reload(src.core.payloadgen.solo)
# if we aren't exiting, then launch autorun
if infectious_menu_choice != "99":
try:
import src.autorun.autolaunch
except:
module_reload(src.autorun.autolaunch)
#
#
# Main Menu choice 4: Create a Payload and Listener
#
#
if main_menu_choice == '4':
update_options("PAYLOADGEN=SOLO")
import src.core.payloadgen.solo
# try: import src.core.payloadgen.solo
# except: module_reload(src.core.payloadgen.solo)
# if the set payload is there
if os.path.isfile(userconfigpath + "msf.exe"):
shutil.copyfile(userconfigpath + "msf.exe", "payload.exe")
return_continue()
# Main Menu choice 5: Mass Mailer Attack
if main_menu_choice == '5':
debug_msg(me, "importing 'src.phishing.smtp.client.smtp_web'", 1)
try:
module_reload(src.phishing.smtp.client.smtp_web)
except:
import src.phishing.smtp.client.smtp_web
# Main Menu choice 6: Teensy USB HID Attack Vector
if main_menu_choice == '6':
#
# USER INPUT: SHOW TEENSY MENU #
#
debug_msg(me, "printing 'text.teensy_menu'", 5)
show_teensy_menu = create_menu(text.teensy_text, text.teensy_menu)
teensy_menu_choice = raw_input(setprompt(["6"], ""))
if teensy_menu_choice == 'exit':
exit_set()
# if not return to main menu
yes_or_no = ''
if teensy_menu_choice != "99":
# set our teensy info file in program junk
filewrite = open(userconfigpath + "teensy", "w")
filewrite.write(teensy_menu_choice + "\n")
if teensy_menu_choice != "3" and teensy_menu_choice != "7" and teensy_menu_choice != "8" and teensy_menu_choice != "9" and teensy_menu_choice != "10" and teensy_menu_choice != "11" and teensy_menu_choice != "12" and teensy_menu_choice != "13" and teensy_menu_choice != "14":
yes_or_no = yesno_prompt(
"0", "Do you want to create a payload and listener [yes|no]: ")
if yes_or_no == "YES":
filewrite.write("payload")
filewrite.close()
# load a payload
sys.path.append(definepath + "/src/core/payloadgen")
debug_msg(
me, "importing 'src.core.payloadgen.create_payloads'", 1)
try:
module_reload(create_payloads)
except:
import create_payloads
if yes_or_no == "NO":
filewrite.close()
# need these default files for web server load
filewrite = open(userconfigpath + "site.template", "w")
filewrite.write("TEMPLATE=CUSTOM")
filewrite.close()
filewrite = open(userconfigpath + "attack_vector", "w")
filewrite.write("hid")
filewrite.close()
# if we are doing binary2teensy
if teensy_menu_choice != "7" and teensy_menu_choice != "8" and teensy_menu_choice != "9" and teensy_menu_choice != "10" and teensy_menu_choice != "11" and teensy_menu_choice != "12" and teensy_menu_choice != "14":
sys.path.append(definepath + "/src/teensy")
debug_msg(me, "importing 'src.teensy.teensy'", 1)
try:
module_reload(teensy)
except:
import teensy
if teensy_menu_choice == "7":
debug_msg(me, "importing 'src.teensy.binary2teensy'", 1)
import src.teensy.binary2teensy
# if we are doing sd2teensy attack
if teensy_menu_choice == "8":
debug_msg(me, "importing 'src.teensy.sd2teensy'", 1)
import src.teensy.sd2teensy
# if we are doing the sd2teensy osx attack
if teensy_menu_choice == "9":
print_status(
"Generating the SD2Teensy OSX ino file for you...")
if not os.path.isdir(userconfigpath + "reports/osx_sd2teensy"):
os.makedirs(userconfigpath + "reports/osx_sd2teensy")
shutil.copyfile("src/teensy/osx_sd2teensy.ino",
"%s/reports/osx_sd2teensy/osx_sd2teensy.ino" % (userconfigpath))
print_status(
"File has been exported to ~/.set/reports/osx_sd2teensy/osx_sd2teensy.ino")
return_continue()
# if we are doing the X10 Arduino Sniffer
if teensy_menu_choice == "10":
print_status(
"Generating the Arduino sniffer and libraries ino..")
if not os.path.isdir(userconfigpath + "reports/arduino_sniffer"):
os.makedirs(userconfigpath + "reports/arduino_sniffer")
shutil.copyfile("src/teensy/x10/x10_sniffer.ino",
userconfigpath + "reports/arduino_sniffer/x10_sniffer.ino")
shutil.copyfile("src/teensy/x10/libraries.zip",
userconfigpath + "reports/arduino_sniffer/libraries.zip")
print_status(
"Arduino sniffer files and libraries exported to ~/.set/reports/arduino_sniffer")
return_continue()
# if we are doing the X10 Jammer
if teensy_menu_choice == "11":
print_status(
"Generating the Arduino jammer ino and libraries...")
if not os.path.isdir(userconfigpath + "reports/arduino_jammer"):
os.makedirs(userconfigpath + "reports/arduino_jammer")
shutil.copyfile("src/teensy/x10/x10_blackout.ino",
userconfigpath + "reports/arduino_jammer/x10_blackout.ino")
shutil.copyfile("src/teensy/x10/libraries.zip",
userconfigpath + "reports/arduino_jammer/libraries.zip")
print_status(
"Arduino jammer files and libraries exported to ~/.set/reports/arduino_jammer")
return_continue()
# powershell shellcode injection
if teensy_menu_choice == "12":
print_status(
"Generating the Powershell - Shellcode injection ino..")
debug_msg(
me, "importing 'src.teensy.powershell_shellcode'", 1)
import src.teensy.powershell_shellcode
# HID Msbuild compile to memory Shellcode Attack
if teensy_menu_choice == "14":
print_status(
"HID Msbuild compile to memory Shellcode Attack selected")
debug_msg(
me, "importing '-----file-----'", 1)
import src.teensy.ino_gen
if teensy_menu_choice == "99":
teensy_menu_choice = None
#
# Main Menu choice 8: Wireless Attack Point Attack Vector
#
if main_menu_choice == '7':
if operating_system == "windows":
print_warning(
"Sorry. The wireless attack vector is not yet supported in Windows.")
return_continue()
if operating_system != "windows":
# set path to nothing
airbase_path = ""
dnsspoof_path = ""
# need to pull the SET config file
fileopen = open("/etc/setoolkit/set.config", "r")
for line in fileopen:
line = line.rstrip()
match = re.search("AIRBASE_NG_PATH=", line)
if match:
airbase_path = line.replace("AIRBASE_NG_PATH=", "")
match1 = re.search("DNSSPOOF_PATH=", line)
if match1:
dnsspoof_path = line.replace("DNSSPOOF_PATH=", "")
if not os.path.isfile(airbase_path):
if not os.path.isfile("/usr/local/sbin/airbase-ng"):
print_warning(
"Warning airbase-ng was not detected on your system. Using one in SET.")
print_warning(
"If you experience issues, you should install airbase-ng on your system.")
print_warning(
"You can configure it through the set_config and point to airbase-ng.")
airbase_path = ("src/wireless/airbase-ng")
if os.path.isfile("/usr/local/sbin/airbase-ng"):
airbase_path = "/usr/local/sbin/airbase-ng"
if not os.path.isfile(dnsspoof_path):
if os.path.isfile("/usr/local/sbin/dnsspoof"):
dnsspoof_path = "/usr/local/sbin/dnsspoof"
if os.path.isfile("/usr/sbin/dnsspoof"):
dnsspoof_path = "/usr/sbin/dnsspoof"
# if we can find airbase-ng
if os.path.isfile(airbase_path):
if os.path.isfile(dnsspoof_path):
# start the menu here
while 1:
#
# USER INPUT: SHOW WIRELESS MENU #
#
debug_msg(
me, "printing 'text.wireless_attack_menu'", 5)
show_wireless_menu = create_menu(
text.wireless_attack_text, text.wireless_attack_menu)
wireless_menu_choice = raw_input(
setprompt(["8"], ""))
# if we want to start access point
if wireless_menu_choice == "1":
sys.path.append(definepath + "/src/wireless/")
debug_msg(
me, "importing 'src.wireless.wifiattack'", 1)
try:
module_reload(wifiattack)
except:
import wifiattack
# if we want to stop the wifi attack
if wireless_menu_choice == "2":
sys.path.append(definepath + "/src/wireless/")
debug_msg(
me, "importing 'src.wireless.stop_wifiattack'", 1)
try:
module_reload(stop_wifiattack)
except:
import stop_wifiattack
# if we want to return to the main menu
if wireless_menu_choice == "99":
print (" [*] Returning to the main menu ...")
break
if not os.path.isfile(dnsspoof_path):
if not os.path.isfile("/usr/local/sbin/dnsspoof"):
print_error(
"ERROR:DNS Spoof was not detected. Check the set_config file.")
return_continue()
#
# END WIFI ATTACK MODULE
#
# Main Menu choice 9: QRCode Generator
if main_menu_choice == '8':
try:
from PIL import Image, ImageDraw
from src.qrcode.qrgenerator import *
print("""
The QRCode Attack Vector will create a QRCode for you with whatever URL you want.
When you have the QRCode Generated, select an additional attack vector within SET and
deploy the QRCode to your victim. For example, generate a QRCode of the SET Java Applet
and send the QRCode via a mailer.
""")
url = raw_input(
"Enter the URL you want the QRCode to go to (99 to exit): ")
if url != "99":
# if the reports directory does not exist then create it
if not os.path.isdir("%s/reports" % (userconfigpath)):
os.makedirs("%s/reports" % (userconfigpath))
gen_qrcode(url)
return_continue()
except ImportError:
print_error(
"This module requires PIL (Or Pillow) and qrcode to work properly.")
print_error(
"Just do pip install Pillow; pip install qrcode")
print_error(
"Else refer to here for installation: http://pillow.readthedocs.io/en/3.3.x/installation.html")
return_continue()
# Main Menu choice 9: PowerShell Attacks
if main_menu_choice == '9':
try:
module_reload(src.powershell.powershell)
except:
import src.powershell.powershell
# Main Menu choice 11: Third Party Modules
if main_menu_choice == '10':
sys.path.append(definepath + "/src/core")
debug_msg(me, "importing 'src.core.module_handler'", 1)
try:
module_reload(module_handler)
except:
import module_handler
# Main Menu choice 99: Exit the Social-Engineer Toolkit
if main_menu_choice == '99':
break
# handle keyboard interrupts
except KeyboardInterrupt:
print("\n\n Thank you for " + bcolors.RED + "shopping" + bcolors.ENDC +
" with the Social-Engineer Toolkit.\n\n Hack the Gibson...and remember...hugs are worth more than handshakes.\n")
| StarcoderdataPython |
1690578 | '''
Population functions.
Code from https://github.com/cortex-lab/phylib/blob/master/phylib/stats/ccg.py by <NAME>.
Code for decoding by <NAME>
'''
import numpy as np
import scipy as sp
import types
from itertools import groupby
from sklearn.ensemble import RandomForestClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.linear_model import LogisticRegression
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.model_selection import KFold, LeaveOneOut, LeaveOneGroupOut
from sklearn.metrics import accuracy_score, f1_score, confusion_matrix, roc_auc_score
from sklearn.utils import shuffle as sklearn_shuffle
def _get_spike_counts_in_bins(spike_times, spike_clusters, intervals):
"""
Return the number of spikes in a sequence of time intervals, for each neuron.
Parameters
----------
spike_times : 1D array
spike times (in seconds)
spike_clusters : 1D array
cluster ids corresponding to each event in `spikes`
intervals : 2D array of shape (n_events, 2)
the start and end times of the events
Returns
---------
counts : 2D array of shape (n_neurons, n_events)
the spike counts of all neurons ffrom scipy.stats import sem, tor all events
value (i, j) is the number of spikes of neuron `neurons[i]` in interval #j
cluster_ids : 1D array
list of cluster ids
"""
# Check input
assert intervals.ndim == 2
assert intervals.shape[1] == 2
assert np.all(np.diff(spike_times) >= 0), "Spike times need to be sorted"
intervals_idx = np.searchsorted(spike_times, intervals)
# For each neuron and each interval, the number of spikes in the interval.
cluster_ids = np.unique(spike_clusters)
n_neurons = len(cluster_ids)
n_intervals = intervals.shape[0]
counts = np.zeros((n_neurons, n_intervals), dtype=np.uint32)
for j in range(n_intervals):
t0, t1 = intervals[j, :]
# Count the number of spikes in the window, for each neuron.
x = np.bincount(
spike_clusters[intervals_idx[j, 0]:intervals_idx[j, 1]],
minlength=cluster_ids.max() + 1)
counts[:, j] = x[cluster_ids]
return counts, cluster_ids
def _index_of(arr, lookup):
"""Replace scalars in an array by their indices in a lookup table.
Implicitely assume that:
* All elements of arr and lookup are non-negative integers.
* All elements or arr belong to lookup.
This is not checked for performance reasons.
"""
# Equivalent of np.digitize(arr, lookup) - 1, but much faster.
# TODO: assertions to disable in production for performance reasons.
# TODO: np.searchsorted(lookup, arr) is faster on small arrays with large
# values
lookup = np.asarray(lookup, dtype=np.int32)
m = (lookup.max() if len(lookup) else 0) + 1
tmp = np.zeros(m + 1, dtype=np.int)
# Ensure that -1 values are kept.
tmp[-1] = -1
if len(lookup):
tmp[lookup] = np.arange(len(lookup))
return tmp[arr]
def _increment(arr, indices):
"""Increment some indices in a 1D vector of non-negative integers.
Repeated indices are taken into account."""
bbins = np.bincount(indices)
arr[:len(bbins)] += bbins
return arr
def _diff_shifted(arr, steps=1):
return arr[steps:] - arr[:len(arr) - steps]
def _create_correlograms_array(n_clusters, winsize_bins):
return np.zeros((n_clusters, n_clusters, winsize_bins // 2 + 1), dtype=np.int32)
def _symmetrize_correlograms(correlograms):
"""Return the symmetrized version of the CCG arrays."""
n_clusters, _, n_bins = correlograms.shape
assert n_clusters == _
# We symmetrize c[i, j, 0].
# This is necessary because the algorithm in correlograms()
# is sensitive to the order of identical spikes.
correlograms[..., 0] = np.maximum(
correlograms[..., 0], correlograms[..., 0].T)
sym = correlograms[..., 1:][..., ::-1]
sym = np.transpose(sym, (1, 0, 2))
return np.dstack((sym, correlograms))
def xcorr(spike_times, spike_clusters, bin_size=None, window_size=None):
"""Compute all pairwise cross-correlograms among the clusters appearing in `spike_clusters`.
Parameters
----------
:param spike_times: Spike times in seconds.
:type spike_times: array-like
:param spike_clusters: Spike-cluster mapping.
:type spike_clusters: array-like
:param bin_size: Size of the bin, in seconds.
:type bin_size: float
:param window_size: Size of the window, in seconds.
:type window_size: float
Returns an `(n_clusters, n_clusters, winsize_samples)` array with all pairwise
cross-correlograms.
"""
assert np.all(np.diff(spike_times) >= 0), ("The spike times must be increasing.")
assert spike_times.ndim == 1
assert spike_times.shape == spike_clusters.shape
# Find `binsize`.
bin_size = np.clip(bin_size, 1e-5, 1e5) # in seconds
# Find `winsize_bins`.
window_size = np.clip(window_size, 1e-5, 1e5) # in seconds
winsize_bins = 2 * int(.5 * window_size / bin_size) + 1
# Take the cluster order into account.
clusters = np.unique(spike_clusters)
n_clusters = len(clusters)
# Like spike_clusters, but with 0..n_clusters-1 indices.
spike_clusters_i = _index_of(spike_clusters, clusters)
# Shift between the two copies of the spike trains.
shift = 1
# At a given shift, the mask precises which spikes have matching spikes
# within the correlogram time window.
mask = np.ones_like(spike_times, dtype=np.bool)
correlograms = _create_correlograms_array(n_clusters, winsize_bins)
# The loop continues as long as there is at least one spike with
# a matching spike.
while mask[:-shift].any():
# Interval between spike i and spike i+shift.
spike_diff = _diff_shifted(spike_times, shift)
# Binarize the delays between spike i and spike i+shift.
spike_diff_b = np.round(spike_diff / bin_size).astype(np.int64)
# Spikes with no matching spikes are masked.
mask[:-shift][spike_diff_b > (winsize_bins / 2)] = False
# Cache the masked spike delays.
m = mask[:-shift].copy()
d = spike_diff_b[m]
# Find the indices in the raveled correlograms array that need
# to be incremented, taking into account the spike clusters.
indices = np.ravel_multi_index(
(spike_clusters_i[:-shift][m], spike_clusters_i[+shift:][m], d), correlograms.shape)
# Increment the matching spikes in the correlograms array.
_increment(correlograms.ravel(), indices)
shift += 1
return _symmetrize_correlograms(correlograms)
def decode(spike_times, spike_clusters, event_times, event_groups, pre_time=0, post_time=0.5,
classifier='bayes', cross_validation='kfold', num_splits=5, prob_left=None,
custom_validation=None, n_neurons='all', iterations=1, shuffle=False, phase_rand=False):
"""
Use decoding to classify groups of trials (e.g. stim left/right). Classification is done using
the population vector of summed spike counts from the specified time window. Cross-validation
is achieved using n-fold cross validation or leave-one-out cross validation. Decoders can
decode any number of groups. When providing the classfier with an imbalanced dataset (not
the same number of trials in each group) the chance level will not be 1/groups. In that case,
to compare the classification performance against change one has to either determine chance
level by decoding a shuffled dataset or use the 'auroc' metric as readout (this metric is
robust against imbalanced datasets)
Parameters
----------
spike_times : 1D array
spike times (in seconds)
spike_clusters : 1D array
cluster ids corresponding to each event in `spikes`
event_times : 1D array
times (in seconds) of the events from the two groups
event_groups : 1D array
group identities of the events, can be any number of groups, accepts integers and strings
pre_time : float
time (in seconds) preceding the event times
post_time : float
time (in seconds) following the event times
classifier : string or sklearn object
which decoder to use, either input a scikit learn clf object directly or a string.
When it's a string options are (all classifiers are used with default options):
'bayes' Naive Bayes
'forest' Random forest
'regression' Logistic regression
'lda' Linear Discriminant Analysis
cross_validation : string
which cross-validation method to use, options are:
'none' No cross-validation
'kfold' K-fold cross-validation
'leave-one-out' Leave out the trial that is being decoded
'block' Leave out the block the to-be-decoded trial is in
'custom' Any custom cross-validation provided by the user
num_splits : integer
** only for 'kfold' cross-validation **
Number of splits to use for k-fold cross validation, a value of 5 means that the decoder
will be trained on 4/5th of the data and used to predict the remaining 1/5th. This process
is repeated five times so that all data has been used as both training and test set.
prob_left : 1D array
** only for 'block' cross-validation **
the probability of the stimulus appearing on the left for each trial in event_times
custom_validation : generator
** only for 'custom' cross-validation **
a generator object with the splits to be used for cross validation using this format:
(
(split1_train_idxs, split1_test_idxs),
(split2_train_idxs, split2_test_idxs),
(split3_train_idxs, split3_test_idxs),
...)
n_neurons : string or integer
number of neurons to randomly subselect from the population (default is 'all')
iterations : int
number of times to repeat the decoding (especially usefull when subselecting neurons)
shuffle : boolean
whether to shuffle the trial labels each decoding iteration
phase_rand : boolean
whether to use phase randomization of the activity over trials to use as a "chance"
predictor
Returns
-------
results : dict
dictionary with decoding results
accuracy : float
accuracy of the classifier in percentage correct
f1 : float
F1 score of the classifier
auroc : float
the area under the ROC curve of the classification performance
confusion_matrix : 2D array
normalized confusion matrix
predictions : 2D array with dimensions iterations x trials
predicted group label for all trials in every iteration
probabilities : 2D array with dimensions iterations x trials
classification probability for all trials in every iteration
"""
# Check input
assert classifier in ['bayes', 'forest', 'regression', 'lda']
assert cross_validation in ['none', 'kfold', 'leave-one-out', 'block', 'custom']
assert event_times.shape[0] == event_groups.shape[0]
if cross_validation == 'block':
assert event_times.shape[0] == prob_left.shape[0]
if cross_validation == 'custom':
assert isinstance(custom_validation, types.GeneratorType)
# Get matrix of all neuronal responses
times = np.column_stack(((event_times - pre_time), (event_times + post_time)))
pop_vector, cluster_ids = _get_spike_counts_in_bins(spike_times, spike_clusters, times)
pop_vector = pop_vector.T
# Exclude last trial if the number of trials is even and phase shuffling
if (phase_rand is True) & (event_groups.shape[0] % 2 == 0):
event_groups = event_groups[:-1]
pop_vector = pop_vector[:-1]
# Initialize classifier
if type(classifier) == str:
if classifier == 'forest':
clf = RandomForestClassifier()
elif classifier == 'bayes':
clf = GaussianNB()
elif classifier == 'regression':
clf = LogisticRegression()
elif classifier == 'lda':
clf = LinearDiscriminantAnalysis()
else:
clf = classifier
# Pre-allocate variables
acc = np.zeros(iterations)
f1 = np.zeros(iterations)
auroc = np.zeros(iterations)
conf_matrix_norm = np.zeros((np.shape(np.unique(event_groups))[0],
np.shape(np.unique(event_groups))[0],
iterations))
pred = np.zeros([iterations, pop_vector.shape[0]])
prob = np.zeros([iterations, pop_vector.shape[0]])
for i in range(iterations):
# Pre-allocate variables for this iteration
y_pred = np.zeros(event_groups.shape)
y_probs = np.zeros(event_groups.shape)
# Get neurons to use for this iteration
if n_neurons == 'all':
sub_pop_vector = pop_vector
else:
use_neurons = np.random.choice(pop_vector.shape[1], n_neurons, replace=False)
sub_pop_vector = pop_vector[:, use_neurons]
# Shuffle trail labels if necessary
if shuffle is True:
event_groups = sklearn_shuffle(event_groups)
# Perform phase randomization of activity over trials if necessary
if phase_rand is True:
if i == 0:
original_pop_vector = sub_pop_vector
rand_pop_vector = np.empty(original_pop_vector.shape)
frequencies = int((original_pop_vector.shape[0] - 1) / 2)
fsignal = sp.fft.fft(original_pop_vector, axis=0)
power = np.abs(fsignal[1:1 + frequencies])
phases = 2 * np.pi * np.random.rand(frequencies)
for k in range(original_pop_vector.shape[1]):
newfsignal = fsignal[0, k]
newfsignal = np.append(newfsignal, np.exp(1j * phases) * power[:, k])
newfsignal = np.append(newfsignal, np.flip(np.exp(-1j * phases) * power[:, k]))
newsignal = sp.fft.ifft(newfsignal)
rand_pop_vector[:, k] = np.abs(newsignal.real)
sub_pop_vector = rand_pop_vector
if cross_validation == 'none':
# Fit the model on all the data and predict
clf.fit(sub_pop_vector, event_groups)
y_pred = clf.predict(sub_pop_vector)
# Get the probability of the prediction for ROC analysis
probs = clf.predict_proba(sub_pop_vector)
y_probs = probs[:, 1] # keep positive only
else:
# Perform cross-validation
if cross_validation == 'leave-one-out':
cv = LeaveOneOut().split(sub_pop_vector)
elif cross_validation == 'kfold':
cv = KFold(n_splits=num_splits).split(sub_pop_vector)
elif cross_validation == 'block':
block_lengths = [sum(1 for i in g) for k, g in groupby(prob_left)]
blocks = np.repeat(np.arange(len(block_lengths)), block_lengths)
cv = LeaveOneGroupOut().split(sub_pop_vector, groups=blocks)
elif cross_validation == 'custom':
cv = custom_validation
# Loop over the splits into train and test
for train_index, test_index in cv:
# Fit the model to the training data
clf.fit(sub_pop_vector[train_index], event_groups[train_index])
# Predict the test data
y_pred[test_index] = clf.predict(sub_pop_vector[test_index])
# Get the probability of the prediction for ROC analysis
probs = clf.predict_proba(sub_pop_vector[test_index])
y_probs[test_index] = probs[:, 1] # keep positive only
# Calculate performance metrics and confusion matrix
acc[i] = accuracy_score(event_groups, y_pred)
f1[i] = f1_score(event_groups, y_pred)
auroc[i] = roc_auc_score(event_groups, y_probs)
conf_matrix = confusion_matrix(event_groups, y_pred)
conf_matrix_norm[:, :, i] = conf_matrix / conf_matrix.sum(axis=1)[:, np.newaxis]
# Add prediction and probability to matrix
pred[i, :] = y_pred
prob[i, :] = y_probs
# Make integers from arrays when there's only one iteration
if iterations == 1:
acc = acc[0]
f1 = f1[0]
auroc = auroc[0]
# Add to results dictionary
if cross_validation == 'kfold':
results = dict({'accuracy': acc, 'f1': f1, 'auroc': auroc,
'predictions': pred, 'probabilities': prob,
'confusion_matrix': conf_matrix_norm,
'n_groups': np.shape(np.unique(event_groups))[0],
'classifier': classifier, 'cross_validation': '%d-fold' % num_splits,
'iterations': iterations, 'shuffle': shuffle})
else:
results = dict({'accuracy': acc, 'f1': f1, 'auroc': auroc,
'predictions': pred, 'probabilities': prob,
'confusion_matrix': conf_matrix_norm,
'n_groups': np.shape(np.unique(event_groups))[0],
'classifier': classifier, 'cross_validation': cross_validation,
'iterations': iterations, 'shuffle': shuffle})
return results
def lda_project(spike_times, spike_clusters, event_times, event_groups, pre_time=0, post_time=0.5,
cross_validation='kfold', num_splits=5, prob_left=None, custom_validation=None):
"""
Use linear discriminant analysis to project population vectors to the line that best separates
the two groups. When cross-validation is used, the LDA projection is fitted on the training
data after which the test data is projected to this projection.
spike_times : 1D array
spike times (in seconds)
spike_clusters : 1D array
cluster ids corresponding to each event in `spikes`
event_times : 1D array
times (in seconds) of the events from the two groups
event_groups : 1D array
group identities of the events, can be any number of groups, accepts integers and strings
pre_time : float
time (in seconds) preceding the event times
post_time : float
time (in seconds) following the event times
cross_validation : string
which cross-validation method to use, options are:
'none' No cross-validation
'kfold' K-fold cross-validation
'leave-one-out' Leave out the trial that is being decoded
'block' Leave out the block the to-be-decoded trial is in
'custom' Any custom cross-validation provided by the user
num_splits : integer
** only for 'kfold' cross-validation **
Number of splits to use for k-fold cross validation, a value of 5 means that the decoder
will be trained on 4/5th of the data and used to predict the remaining 1/5th. This process
is repeated five times so that all data has been used as both training and test set.
prob_left : 1D array
** only for 'block' cross-validation **
the probability of the stimulus appearing on the left for each trial in event_times
custom_validation : generator
** only for 'custom' cross-validation **
a generator object with the splits to be used for cross validation using this format:
(
(split1_train_idxs, split1_test_idxs),
(split2_train_idxs, split2_test_idxs),
(split3_train_idxs, split3_test_idxs),
...)
n_neurons : int
Group size of number of neurons to be sub-selected
Returns
-------
lda_projection : 1D array
the position along the LDA projection axis for the population vector of each trial
"""
# Check input
assert cross_validation in ['none', 'kfold', 'leave-one-out', 'block', 'custom']
assert event_times.shape[0] == event_groups.shape[0]
if cross_validation == 'block':
assert event_times.shape[0] == prob_left.shape[0]
if cross_validation == 'custom':
assert isinstance(custom_validation, types.GeneratorType)
# Get matrix of all neuronal responses
times = np.column_stack(((event_times - pre_time), (event_times + post_time)))
pop_vector, cluster_ids = _get_spike_counts_in_bins(spike_times, spike_clusters, times)
pop_vector = np.rot90(pop_vector)
# Initialize
lda = LinearDiscriminantAnalysis()
lda_projection = np.zeros(event_groups.shape)
if cross_validation == 'none':
# Find the best LDA projection on all data and transform those data
lda_projection = lda.fit_transform(pop_vector, event_groups)
else:
# Perform cross-validation
if cross_validation == 'leave-one-out':
cv = LeaveOneOut().split(pop_vector)
elif cross_validation == 'kfold':
cv = KFold(n_splits=num_splits).split(pop_vector)
elif cross_validation == 'block':
block_lengths = [sum(1 for i in g) for k, g in groupby(prob_left)]
blocks = np.repeat(np.arange(len(block_lengths)), block_lengths)
cv = LeaveOneGroupOut().split(pop_vector, groups=blocks)
elif cross_validation == 'custom':
cv = custom_validation
# Loop over the splits into train and test
for train_index, test_index in cv:
# Find LDA projection on the training data
lda.fit(pop_vector[train_index], [event_groups[j] for j in train_index])
# Project the held-out test data to projection
lda_projection[test_index] = np.rot90(lda.transform(pop_vector[test_index]))[0]
return lda_projection
| StarcoderdataPython |
1767245 | class Solution:
def busyStudent(self, startTime, endTime, queryTime):
ans = 0
for s, e in zip(startTime, endTime):
ans += s <= queryTime <= e
return ans
| StarcoderdataPython |
169943 | <reponame>sxfang32/meiduo_29<filename>meiduo_mall/meiduo_mall/apps/meiduo_admin/views/spu_view.py<gh_stars>1-10
from rest_framework.response import Response
from rest_framework.viewsets import ModelViewSet
from rest_framework.generics import ListAPIView
from goods.models import SPU, GoodsCategory
from meiduo_admin.serializers.spu_serializer import *
from meiduo_admin.pages import MyPage
class SPUViewSet(ModelViewSet):
queryset = SPU.objects.all().order_by('id')
serializer_class = SPUModelSerializer
pagination_class = MyPage
def get_queryset(self):
# 如果一个请求处理的视图是方法spu_brand
# self.action指的就是处理当前请求的视图方法的名称
if self.action == "spu_brands":
# 那么返回的数据集是Brand的查询集
return Brand.objects.all().order_by('id')
return self.queryset.all()
def get_serializer_class(self):
# 如果一个请求处理的处理方法是spu_brands
# 那么返回用于处理数据集的序列化器是
if self.action == "spu_brands":
return SPUBrandSimpleSerializer
return self.serializer_class
def spu_brands(self, request):
"""序列化返回所有品牌信息"""
# 1.获得品牌数据对象的查询集
brand_query = self.get_queryset()
# 2.获得序列化器对象
s = self.get_serializer(brand_query, many=True)
# 3.序列化返回
return Response(s.data)
class SPUCategoryView(ListAPIView):
queryset = GoodsCategory.objects.all().order_by('id')
serializer_class = SPUCategorySimpleSerializer
def get_queryset(self):
# 如果请求路径中有pk,需要根据这个pk过滤出二级或三级分类
pk = self.kwargs.get('pk')
if pk:
return self.queryset.filter(parent_id=pk)
return self.queryset.filter(parent=None)
| StarcoderdataPython |
136393 | <reponame>huxian123/mindspore<filename>tests/st/probability/test_uncertainty.py
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
""" test uncertainty toolbox """
import mindspore.dataset as ds
import mindspore.dataset.transforms.c_transforms as C
import mindspore.dataset.vision.c_transforms as CV
import mindspore.nn as nn
from mindspore import context, Tensor
from mindspore.common import dtype as mstype
from mindspore.common.initializer import TruncatedNormal
from mindspore.dataset.vision import Inter
from mindspore.nn.probability.toolbox.uncertainty_evaluation import UncertaintyEvaluation
from mindspore.train.serialization import load_checkpoint, load_param_into_net
context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
def conv(in_channels, out_channels, kernel_size, stride=1, padding=0):
"""weight initial for conv layer"""
weight = weight_variable()
return nn.Conv2d(in_channels, out_channels,
kernel_size=kernel_size, stride=stride, padding=padding,
weight_init=weight, has_bias=False, pad_mode="valid")
def fc_with_initialize(input_channels, out_channels):
"""weight initial for fc layer"""
weight = weight_variable()
bias = weight_variable()
return nn.Dense(input_channels, out_channels, weight, bias)
def weight_variable():
"""weight initial"""
return TruncatedNormal(0.02)
class LeNet5(nn.Cell):
def __init__(self, num_class=10, channel=1):
super(LeNet5, self).__init__()
self.num_class = num_class
self.conv1 = conv(channel, 6, 5)
self.conv2 = conv(6, 16, 5)
self.fc1 = fc_with_initialize(16 * 5 * 5, 120)
self.fc2 = fc_with_initialize(120, 84)
self.fc3 = fc_with_initialize(84, self.num_class)
self.relu = nn.ReLU()
self.max_pool2d = nn.MaxPool2d(kernel_size=2, stride=2)
self.flatten = nn.Flatten()
def construct(self, x):
x = self.conv1(x)
x = self.relu(x)
x = self.max_pool2d(x)
x = self.conv2(x)
x = self.relu(x)
x = self.max_pool2d(x)
x = self.flatten(x)
x = self.fc1(x)
x = self.relu(x)
x = self.fc2(x)
x = self.relu(x)
x = self.fc3(x)
return x
def create_dataset(data_path, batch_size=32, repeat_size=1,
num_parallel_workers=1):
"""
create dataset for train or test
"""
# define dataset
mnist_ds = ds.MnistDataset(data_path)
resize_height, resize_width = 32, 32
rescale = 1.0 / 255.0
shift = 0.0
rescale_nml = 1 / 0.3081
shift_nml = -1 * 0.1307 / 0.3081
# define map operations
resize_op = CV.Resize((resize_height, resize_width), interpolation=Inter.LINEAR) # Bilinear mode
rescale_nml_op = CV.Rescale(rescale_nml, shift_nml)
rescale_op = CV.Rescale(rescale, shift)
hwc2chw_op = CV.HWC2CHW()
type_cast_op = C.TypeCast(mstype.int32)
# apply map operations on images
mnist_ds = mnist_ds.map(operations=type_cast_op, input_columns="label", num_parallel_workers=num_parallel_workers)
mnist_ds = mnist_ds.map(operations=resize_op, input_columns="image", num_parallel_workers=num_parallel_workers)
mnist_ds = mnist_ds.map(operations=rescale_op, input_columns="image", num_parallel_workers=num_parallel_workers)
mnist_ds = mnist_ds.map(operations=rescale_nml_op, input_columns="image", num_parallel_workers=num_parallel_workers)
mnist_ds = mnist_ds.map(operations=hwc2chw_op, input_columns="image", num_parallel_workers=num_parallel_workers)
# apply DatasetOps
buffer_size = 10000
mnist_ds = mnist_ds.shuffle(buffer_size=buffer_size) # 10000 as in LeNet train script
mnist_ds = mnist_ds.batch(batch_size, drop_remainder=True)
mnist_ds = mnist_ds.repeat(repeat_size)
return mnist_ds
if __name__ == '__main__':
# get trained model
network = LeNet5()
param_dict = load_checkpoint('checkpoint_lenet.ckpt')
load_param_into_net(network, param_dict)
# get train and eval dataset
ds_train = create_dataset('workspace/mnist/train')
ds_eval = create_dataset('workspace/mnist/test')
evaluation = UncertaintyEvaluation(model=network,
train_dataset=ds_train,
task_type='classification',
num_classes=10,
epochs=1,
epi_uncer_model_path=None,
ale_uncer_model_path=None,
save_model=False)
for eval_data in ds_eval.create_dict_iterator(output_numpy=True):
eval_data = Tensor(eval_data['image'], mstype.float32)
epistemic_uncertainty = evaluation.eval_epistemic_uncertainty(eval_data)
aleatoric_uncertainty = evaluation.eval_aleatoric_uncertainty(eval_data)
| StarcoderdataPython |
3352173 | <reponame>liruifeng-01/nlp_xiaojiang
"""
对SequenceToSequence模型进行基本的参数组合测试
Code from: QHDuan(2018-02-05) url: https://github.com/qhduan/just_another_seq2seq
"""
from utils.mode_util.seq2seq.data_utils import batch_flow_bucket as batch_flow
from utils.mode_util.seq2seq.thread_generator import ThreadedGenerator
from utils.mode_util.seq2seq.model_seq2seq import SequenceToSequence
from utils.mode_util.seq2seq.word_sequence import WordSequence
from conf.path_config import chicken_and_gossip_path
from conf.path_config import chatbot_data_cg_char_dir
from conf.path_config import chatbot_data_cg_ws_anti
from conf.path_config import chatbot_data_cg_xy_anti
from conf.path_config import model_ckpt_cg_anti
from conf.path_config import path_params
import tensorflow as tf
import numpy as np
import pickle
import json
import sys
sys.path.append('..')
def predict_anti(params):
"""测试不同参数在生成的假数据上的运行结果"""
x_data, _ = pickle.load(open(chatbot_data_cg_xy_anti, 'rb'))
ws = pickle.load(open(chatbot_data_cg_ws_anti, 'rb'))
for x in x_data[:5]:
print(' '.join(x))
config = tf.ConfigProto(
# device_count={'CPU': 1, 'GPU': 0},
allow_soft_placement=True,
log_device_placement=False
)
save_path = model_ckpt_cg_anti
# 测试部分
tf.reset_default_graph()
model_pred = SequenceToSequence(
input_vocab_size=len(ws),
target_vocab_size=len(ws),
batch_size=1,
mode='decode',
beam_width=0,
**params
)
init = tf.global_variables_initializer()
with tf.Session(config=config) as sess:
sess.run(init)
model_pred.load(sess, save_path)
while True:
user_text = input('Input Chat Sentence:')
if user_text in ('exit', 'quit'):
exit(0)
x_test = [list(user_text.lower())]
# x_test = [word_tokenize(user_text)]
bar = batch_flow([x_test], ws, 1)
x, xl = next(bar)
x = np.flip(x, axis=1)
# x = np.array([
# list(reversed(xx))
# for xx in x
# ])
print(x, xl)
pred = model_pred.predict(
sess,
np.array(x),
np.array(xl)
)
print(pred)
# prob = np.exp(prob.transpose())
print(ws.inverse_transform(x[0]))
# print(ws.inverse_transform(pred[0]))
# print(pred.shape, prob.shape)
for p in pred:
ans = ws.inverse_transform(p)
print(ans)
def main():
"""入口程序"""
import json
predict_anti(json.load(open(path_params)))
if __name__ == '__main__':
main() | StarcoderdataPython |
165246 | <reponame>anivalogy/project_Anivalogy<gh_stars>0
from django.urls import path ,include
from . views import *
urlpatterns = [
path('port',port,name='port'),
path('projects',projects,name='projects'),
path('blog',blog, name='blog'),
path('resume',resume, name='resume'),
path('callus',callus, name='callus'),
]
| StarcoderdataPython |
4836632 | from pathlib import Path
from azureml.core import Run
import argparse
import os
def main(args):
output = Path(args.output)
output.mkdir(parents=True, exist_ok=True)
run_context = Run.get_context()
input_path = run_context.input_datasets["train_10_models"]
for file_name in os.listdir(input_path):
input_file = os.path.join(input_path, file_name)
with open(input_file, "r") as f:
content = f.read()
# Apply any data pre-processing techniques here
output_file = os.path.join(output, file_name)
with open(output_file, "w") as f:
f.write(content)
def my_parse_args():
parser = argparse.ArgumentParser("Test")
parser.add_argument("--input", type=str)
parser.add_argument("--output", type=str)
args = parser.parse_args()
return args
if __name__ == "__main__":
args = my_parse_args()
main(args)
| StarcoderdataPython |
1733192 | from Puzzle.PuzzlePiece import *
from Img.filters import angle_between
from Img.Pixel import *
import math
import numpy as np
def rotate(origin, point, angle):
"""
Rotate the pixel around `origin` by `angle` degrees
:param origin: Coordinates of points used to rotate around
:param angle: number of degrees
:return: Nothing
"""
ox, oy = origin
px, py = point
qx = ox + math.cos(angle) * (px - ox) - math.sin(angle) * (py - oy)
qy = oy + math.sin(angle) * (px - ox) + math.cos(angle) * (py - oy)
if qx != qx or qy != qy:
print("NAN DETECTED: {} {} {} {} {}".format(ox, oy, px, py, qx, qy, angle))
return qx, qy
def stick_pieces(bloc_p, bloc_e, p, e, final_stick=False):
"""
Stick an edge of a piece to the bloc of already resolved pieces
:param bloc_p: bloc of pieces already solved
:param bloc_e: bloc of edges already solved
:param p: piece to add to the bloc
:param e: edge to stick
:return: Nothing
"""
vec_bloc = np.subtract(bloc_e.shape[0], bloc_e.shape[-1])
vec_piece = np.subtract(e.shape[0], e.shape[-1])
translation = np.subtract(bloc_e.shape[0], e.shape[-1])
angle = angle_between((vec_bloc[0], vec_bloc[1], 0), (-vec_piece[0], -vec_piece[1], 0))
# First move the first corner of piece to the corner of bloc edge
for edge in p.edges_:
edge.shape += translation
# Then rotate piece of `angle` degrees centered on the corner
for edge in p.edges_:
for i, point in enumerate(edge.shape):
edge.shape[i] = rotate(bloc_e.shape[0], point, -angle)
if final_stick:
#prev bounding box
minX, minY, maxX, maxY = float('inf'), float('inf'), -float('inf'), -float('inf')
for i, pixel in enumerate(p.img_piece_):
x, y = p.img_piece_[i].translate(translation[1], translation[0])
minX, minY, maxX, maxY = min(minX, x), min(minY, y), max(maxX, x), max(maxY, y)
# pixel.rotate(bloc_e.shape[0], -angle)
#rotation center
img_p = np.full((maxX - minX + 1, maxY - minY + 1, 3), -1)
for pix in p.img_piece_:
x, y = pix.pos
x, y = x - minX, y - minY
img_p[x, y] = pix.color
#new bounding box
minX2, minY2, maxX2, maxY2 = float('inf'), float('inf'), -float('inf'), -float('inf')
for x in [minX, maxX]:
for y in [minY, maxY]:
x2, y2 = rotate((bloc_e.shape[0][1], bloc_e.shape[0][0]), (x,y), angle)
x2, y2 = int(x2), int(y2)
minX2, minY2, maxX2, maxY2 = min(minX2, x2), min(minY2, y2), max(maxX2, x2), max(maxY2, y2)
pixels = []
for px in range(minX2, maxX2 + 1):
for py in range(minY2, maxY2 + 1):
qx, qy = rotate((bloc_e.shape[0][1], bloc_e.shape[0][0]), (px,py), -angle)
qx, qy = int(qx), int(qy)
if minX <= qx <= maxX and minY <= qy <= maxY and img_p[qx - minX, qy - minY][0] != -1:
pixels.append(Pixel((px, py), img_p[qx - minX, qy - minY]))
p.img_piece_ = pixels
| StarcoderdataPython |
68271 | from django.db import models
from django.contrib.auth import get_user_model
from django.db.models.expressions import RawSQL
from django.utils import timezone
class NearbyShelterManager(models.Manager):
def with_distance(self, lat: float, lon: float):
"""
Shelterクエリセットに対してdistanceカラムを追加する
:param lat:
:param lon:
:return:
"""
raw_queryset = self.get_queryset()
# 距離を計算するクエリ
query = """
6371 * acos(
cos(radians(%s)) * cos(radians(lat)) * cos(radians(lon) - radians(%s))
+ sin(radians(%s)) * sin(radians(lat))
)
"""
# 計算したdistanceフィールドをannotate
queryset = raw_queryset.annotate(distance=RawSQL(query, (lat, lon, lat)))
return queryset
def get_nearby_shelters_list(self, lat: float, lon: float, distance: int):
"""
自身の緯度経度から範囲を指定して避難所の情報一覧を取得する
:param lat: 自身の緯度
:param lon: 自身の経度
:param distance: 取得する半径(メートル)
:return: queryset
"""
queryset = self.with_distance(lat, lon)
# キロメートルに変換
distance = distance / 1000
# distanceの内容でフィルタ
return queryset.filter(distance__lte=distance)
class Shelter(models.Model):
"""
避難所のモデル
"""
name = models.CharField(verbose_name='名前', max_length=255)
address = models.CharField(verbose_name='住所', max_length=255)
lat = models.FloatField(verbose_name='緯度')
lon = models.FloatField(verbose_name='経度')
capacity = models.IntegerField('収容可能人数', null=True)
objects = NearbyShelterManager()
class Meta:
unique_together = ('lat', 'lon')
ordering = ['name']
def __str__(self):
return self.name
class PersonalEvacuationHistory(models.Model):
"""
個人の避難履歴を取る
"""
user = models.ForeignKey(get_user_model(), verbose_name='ユーザ', on_delete=models.CASCADE,
related_name='evacuation_histories')
shelter = models.ForeignKey(Shelter, verbose_name='避難所', on_delete=models.CASCADE,
related_name='personal_histories')
created_at = models.DateTimeField('日付')
is_evacuated = models.BooleanField(verbose_name='避難しているか')
class Meta:
ordering = ['-created_at']
class EvacuationHistoryManager(models.Manager):
def create(self, shelter: Shelter, now=None):
"""
10分前から現在までの避難人数を取得
:param shelter:
:param now: 時刻
:return:
"""
if now is None:
now = timezone.now()
latest_date = now
latest_count = 0
# 最新の履歴から人数を取得
personal_histories = PersonalEvacuationHistory.objects.filter(shelter=shelter)
latest_history = EvacuationHistory.objects.filter(shelter=shelter).order_by('-created_at').first()
if latest_history is not None:
latest_count = latest_history.count
latest_date = latest_history.created_at
else:
last_history = personal_histories.order_by('-created_at').first()
if last_history is not None:
latest_date = last_history.created_at
# 前回取得時意向の履歴一覧
personal_histories = personal_histories.filter(created_at__range=[latest_date, now])
# 避難した人数
at_shelter_count = personal_histories.filter(is_evacuated=True).count()
# 帰宅した人数
at_home_count = personal_histories.filter(is_evacuated=False).count()
# 現在避難所に居る人数
current_count = latest_count + at_shelter_count - at_home_count
hist = self.model(shelter=shelter, count=current_count, created_at=now)
hist.save()
return hist
class EvacuationHistory(models.Model):
"""
避難人数の履歴を取る
"""
shelter = models.ForeignKey(Shelter, verbose_name='避難所', related_name='histories', on_delete=models.CASCADE)
count = models.IntegerField('避難している人数')
is_demo = models.BooleanField('デモ用', default=True)
created_at = models.DateTimeField('取得日')
objects = EvacuationHistoryManager()
class Meta:
ordering = ['-created_at']
| StarcoderdataPython |
1732415 | <filename>Back-End/Python/External Libraries/Flask/Flask_website_examples/05 Question - Answer App/app_query.py
# Q_001
# Function ---> Index
get_all_qst = '''select
questions.id as question_id,
questions.question_text,
questions.answer_text,
askers.name as asker_name,
experts.name as expert_name
from questions
join users as askers on askers.id = questions.asked_by_id
join users as experts on experts.id = questions.expert_id
where questions.answer_text is not null'''
# Q_002
# Function ---> signup
check_if_user_exist = 'select id from users where name = ?'
# Q_003
# Function ---> signup/ insert into db
inject_new_user = 'insert into users (name, password, expert, admin) values (?,?,?,?)'
# Q_004
# Function ---> login
login_query = 'select id, name, password from users where name = ?'
# Q_005
# Function ---> ask
experts = 'select id, name from users where expert = 1'
# Q_006
# Function ---> ask /add question
add_question = 'insert into questions (question_text, asked_by_id, expert_id) values (?,?,?)'
# Q_007
# Function ---> question
get_question = '''select
questions.question_text,
questions.answer_text,
askers.name as asker_name,
experts.name as expert_name
from questions
join users as askers on askers.id = questions.asked_by_id
join users as experts on experts.id = questions.expert_id
where questions.id = ?'''
# Q_008
# Function ---> Expert/Answer - get all questions
question_view = 'select id, question_text from questions where id = ?'
# Q_009
# Function ---> Expert/answer - Answer the quetion
inject_answer = 'update questions set answer_text = ? where id = ?'
# Q_010
# Function ---> unanswered
unanswered_qst = '''select
questions.id,
questions.question_text,
users.name
from questions
join users on users.id = questions.asked_by_id
where questions.answer_text is null and questions.expert_id = ?'''
# Q_11
# Function ---> unanswered
unanswered_admin = '''select
questions.id,
questions.question_text,
questions.expert_id,
users.name,
expert.name as expert_name
from questions
join users on users.id = questions.asked_by_id
join users as expert on expert.id = questions.expert_id
where questions.answer_text is null
'''
# Q_12
# Function ---> Admin / users
query_users = 'select id, name, expert, admin from users'
# Q_13
# Function ---> Admin / promote
check_access_level = 'SELECT id, expert, admin FROM users WHERE id = ?'
# Q_14
# Function ---> Admin / promote
remove_promotion = 'update users set expert = 0 where id = ?'
# Q_15
# Function ---> Admin / promote
add_promotion = 'update users set expert = 1 where id = ?'
| StarcoderdataPython |
3383449 | import unittest
from fearquantlib.wavelib import *
class TestBarGreenWaveCnt(unittest.TestCase):
def test_fun(self):
code = "SH.600703"
df = get_df_of_code(code, "2019-09-20", "2019-10-21", KLType.K_30M)
df15 = __do_compute_df_bar(df)
ct_4 = bar_green_wave_cnt(df15[:-4])
self.assertEqual(4, ct_4)
ct_0 = bar_green_wave_cnt(df15)
self.assertEqual(0, ct_0)
if __name__ == '__main__':
unittest.main() | StarcoderdataPython |
3224893 | <reponame>mjwestcott/pypokertools
"""
Translating PokerStove-style holecard notation to the 'individual cards'
notation used for holecards in pokertools.py.
In PokerStove notation (also used by PokerCruncher) Ace-King suited is
represented as AKs. Queen-Jack offsuit is QJo.
Simple examples:
"66" -> ["6c 6d", "6c 6h", "6c 6s", "6d 6h", "6d 6s", "6c 6d"]
"AKs" -> ["Ac Kc", "Ad Kd", "Ah Kh", "As Ks"]
"QJo" -> ["Qc Jd", "Qd Jc", "Qh Jc", "Qs Jc",
"Qc Jh", "Qd Jh", "Qh Jd", "Qs Jd",
"Qc Js", "Qd Js", "Qh Js", "Qs Jh"]
The PokerStove format also includes range operators. For instance:
"QQ+" -> ["Qc Qd", "Qc Qh", "Qc Qs", "Qd Qh", "Qd Qs", "Qc Qd",
"Kc Kd", "Kc Kh", "Kc Ks", "Kd Kh", "Kd Ks", "Kc Kd",
"Ac Ad", "Ac Ah", "Ac As", "Ad Ah", "Ad As", "Ac Ad"]
"A5s-A3s" -> ["Ac 5c", "Ad 5d", "Ah 5h", "As 5s",
"Ac 4c", "Ad 4d", "Ah 4h", "As 4s",
"Ac 3c", "Ad 3d", "Ah 3h", "As 3s"]
Note: we will take position-isomorphs into account. "Ac Kc" is identical to
"Kc Ac" and we only want to to produce one of them. This will simplify and
reduce the space requirements of storing ranges of holecards.
"""
import re
from collections import namedtuple
from itertools import chain
from pokertools import (
CANONICAL_HOLECARDS_NAMES,
SUIT_COMBINATIONS,
SUIT_PERMUATIONS,
SUITS,
get_numerical_rank,
get_string_rank,
holecards,
)
#------------------------------------------------------------------------------
# Tokeniser
token_specification = [ # Examples:
("RANGE", r"[2-9AKQJT]{2}(s|o)-[2-9AKQJT]{2}\2"), # AKs-A2s
("RANGE_PAIR", r"([2-9AKQJT])\4-([2-9AKQJT])\5"), # 99-55
("PAIR", r"([2-9AKQJT])\7\+?"), # 33
("SINGLE_COMBO", r"([2-9AKQJT][cdhs]){2}"), # AhKh
("MULTI_COMBO", r"[2-9AKQJT]{2}(s|o)\+?"), # QJo
("SEPERATOR", r"\s*,\s*"),
("CATCHALL", r".+")
]
master_pat = re.compile("|".join("(?P<{}>{})".format(*pair) for pair in token_specification))
Token = namedtuple("Token", ["type", "value"])
class TokeniserError(Exception):
pass
def generate_tokens(pattern, text):
scanner = pattern.scanner(text)
for m in iter(scanner.match, None):
token = Token(m.lastgroup, m.group())
yield token
def canonise(holecards):
"""
Takes a single pair of cards and returns the canonical representation of
that pair according to CANONICAL_HOLECARDS_NAMES
"""
if holecards in CANONICAL_HOLECARDS_NAMES:
return holecards
else:
return "{} {}".format(holecards[3:5], holecards[0:2])
def process_one_name(stove_name):
"""
Translates a single PokerStove-style name of holecards into an
expanded list of pokertools-style names.
For example:
"AKs" -> ["Ac Kc", "Ad Kd", "Ah Kh", "As Ks"]
"66" -> ["6c 6d", "6c 6h", "6c 6s", "6d 6h", "6d 6s", "6c 6d"]
"""
if len(stove_name) == 3:
rank1, rank2, suit_mark = stove_name
if suit_mark == "s":
return [
"{}{} {}{}".format(rank1, suit, rank2, suit)
for suit in SUITS
]
elif suit_mark == "o":
return [
"{}{} {}{}".format(rank1, suit1, rank2, suit2)
for suit1, suit2 in SUIT_PERMUATIONS
]
else:
raise TokeniserError("incorrect suit_mark in stove_name: {}".format(stove_name))
else:
rank1, rank2 = stove_name
if rank1 == rank2:
return [
"{}{} {}{}".format(rank1, suit1, rank2, suit2)
for suit1, suit2 in SUIT_COMBINATIONS
]
else:
raise TokeniserError("rank1 != rank2 in stove_name: {}".format(stove_name))
def process_one_token(token):
"""
Translates any given single token. For example:
"77-55" -> ["7c 7d", "7c 7h", "7c 7s", "7d 7h", "7d 7s", "7c 7d",
"6c 6d", "6c 6h", "6c 6s", "6d 6h", "6d 6s", "6c 6d",
"5c 5d", "5c 5h", "5c 5s", "5d 5h", "5d 5s", "5c 5d"]
"""
# Let's say token.value is "A5s-A2s". Our naming convention is this:
# 'A' is the 'const_rank'
# '5' is the 'high_rank'
# '2' is the 'low_rank'
# 's' is the 'suit_mark'
if token.type == "RANGE":
const_rank, high_rank, low_rank, suit_mark = token.value[0], token.value[1], token.value[5], token.value[2]
high = get_numerical_rank(high_rank)
low = get_numerical_rank(low_rank)
# Produce a list such as ["A5s", "A4s", "A3s", "A2s"] for processing
names = [
"{}{}{}".format(const_rank, get_string_rank(i), suit_mark)
for i in range(high, (low - 1), -1)
]
return list(chain.from_iterable(process_one_name(name) for name in names))
elif token.type == "RANGE_PAIR":
high_rank, low_rank = token.value[1], token.value[3]
high = get_numerical_rank(high_rank)
low = get_numerical_rank(low_rank)
# Produce a list such as ["77", "66", "55"] for processing
names = [
get_string_rank(i) * 2
for i in range(high, (low - 1), -1)
]
return list(chain.from_iterable(process_one_name(name) for name in names))
elif token.type == "PAIR":
if token.value.endswith("+"):
# '55+' is equivalent to 'AA-55'
return process_one_token(Token("RANGE_PAIR", "AA" + "-" + token.value[0:2]))
else:
return process_one_name(token.value)
elif token.type == "SINGLE_COMBO":
card1, card2 = token.value[0:2], token.value[2:4]
return ["{} {}".format(card1, card2)]
elif token.type == "MULTI_COMBO":
if token.value.endswith("+"):
# 'Q2s+' is equivalent to 'QJs-Q2s'
const_rank, low_rank, suit_mark = token.value[0], token.value[1], token.value[2]
const = get_numerical_rank(const_rank)
high_rank = get_string_rank(const - 1)
new_token = Token("RANGE", "{}{}{}-{}{}{}".format(
const_rank, high_rank, suit_mark,
const_rank, low_rank, suit_mark
))
return process_one_token(new_token)
else:
return process_one_name(token.value)
else:
raise TokeniserError("unexpected token: {}".format(token))
def translate(text):
"""
Translates a string of PokerStove-style names of holecards into the
corresponding string of names from CANONICAL_HOLECARDS_NAMES.
>>> stove_string = "JJ+, 66-22, A5s-A2s, Q9s+, J9s+, 8d7d, ATo+, KTo+"
>>> len(list(translate(stove_string)))
175
"""
tokens = list(generate_tokens(master_pat, text))
errors = [t for t in tokens if t.type == "CATCHALL"]
if errors:
raise TokeniserError("unexpected tokens: {}".format(errors))
for token in tokens:
if token.type != "SEPERATOR":
yield from (canonise(name) for name in process_one_token(token))
def to_cards(text):
return [holecards(name) for name in translate(text)]
| StarcoderdataPython |
1789483 | # coding: utf-8
import os
import pytest
from pathlib import Path
@pytest.fixture(scope='session')
def root_path(app_root) -> Path:
os.environ['project_root'] = str(app_root)
return app_root
| StarcoderdataPython |
3329409 | <filename>poopbox/shell/targets.py
#!/usr/bin/env python
from poopbox.shell.ssh import SSHShellTarget
| StarcoderdataPython |
1742214 |
class Biblioteca:
def __init__(self,id_usuario,id_juego,nombre_juego,foto_juego):
self.id_usuario = id_usuario
self.id_juego = id_juego
self.nombre_juego = nombre_juego
self.foto_juego = foto_juego
#MÉTODOS GET
def getId_usuario(self):
return self.id_usuario
def getId_juego(self):
return self.id_juego
def getNombre_juego(self):
return self.nombre_juego
def getFoto_juego(self):
return self.foto_juego
def dump(self):
return {
'id_usuario' : self.id_usuario,
'id_juego': self.id_juego,
'nombre_juego': self.nombre_juego,
'foto_juego': self.foto_juego
}
| StarcoderdataPython |
3341777 | from vk_bot.core.modules.basicplug import BasicPlug
import random
class Video(BasicPlug):
doc = "Поиск видео"
command = ("видео",)
def main(self):
text = " ".join(self.text[1:])
try:
video = self.vk2.video.search(q=text, count=50)
video = random.choice(video["items"])
videoid = video["id"]
videoow = video["owner_id"]
except IndexError:
self.sendmsg("ничо не найдено")
video = f"video{videoow}_{videoid}"
self.sendmsg(f"Ведосик по заказу - {text}:", video) | StarcoderdataPython |
4821396 | from . import PeriorTree
import numpy as np
def test():
N=int(1e4)
ps = np.random.rand(N,3)
rs= np.random.rand(N,3)*.05
bbox=np.stack([np.zeros(3),np.ones(3)])
t=PeriorTree(bbox)
for i in range(N):
p = ps[i]
r = rs[i]
t.add(p,r,i)
pm=(bbox[0]+bbox[1])/2
rm=np.ones(3)*.25
a=t.query(pm,rm)
#TODO: check correctedness
if __name__=='__main__':
test() | StarcoderdataPython |
3382504 | import base64
from Crypto.Cipher import AES
from Crypto.Random import get_random_bytes
from pkcs7 import PKCS7Encoder
class Encrypter():
"""Encrypting and decrypting strings using AES"""
def __init__(self, key):
self.key = key
self.encoder = PKCS7Encoder()
def get_verifier(self, iv=None):
"""getting the verifier"""
if iv == None:
iv = get_random_bytes(16)
aes = AES.new(self.key, AES.MODE_CBC, iv)
base64_private_key = base64.b64encode(self.key).decode()
base64_iv = base64.b64encode(iv).decode()
padded_iv = self.encoder.encode(base64_iv)
verifier = base64.b64encode(aes.encrypt(padded_iv.encode())).decode()
return (base64_private_key, base64_iv, verifier)
def encrypt(self, plain, iv=None):
"""encryption"""
if iv == None:
iv = get_random_bytes(16)
aes = AES.new(self.key, AES.MODE_CBC, iv)
padded_plain = self.encoder.encode(plain)
return base64.b64encode(aes.encrypt(padded_plain.encode())).decode()
def decrypt(self, encrypted, iv=None):
"""decryption"""
if iv == None:
iv = get_random_bytes(16)
aes = AES.new(self.key, AES.MODE_CBC, iv)
decrypted = aes.decrypt(base64.b64decode(encrypted))
return self.encoder.decode(decrypted.decode())
def generate_key():
"""key generation"""
return get_random_bytes(32)
| StarcoderdataPython |
1790082 | <filename>docs/sphinxext/doxybridge/autosummary/c.py<gh_stars>1-10
"""
doxybridge.autosummary.c
~~~~~~~~~~~~~~~~~~~~~~~~
Autosummary handler for c types
:copyright: Copyright (c) 2011 The Department of Arts and Culture, The Government
of the Republic of South Africa.
:license: MIT, see LICENSE for details.
"""
import re
from doxybridge.finder import NoMatchesError, MultipleMatchesError
from doxybridge.directive.summary import DoxygenSummary
from doxybridge.domains.c import xml_extractor_re
from sphinx.domains.c import c_sig_re, c_funcptr_sig_re
xml_summary_extractor_re = re.compile("<.+?><.+?>(.+)</.+?></.+?>")
arg_parser_re = re.compile("(%*)(.+)")
pending_xref_parser_re = re.compile("(<pending_xref.+?reftype=\"ref\">)")
class CSummary(DoxygenSummary):
def __init__(self, sphinx_directive, project_info, app, data_objects, names):
super(CSummary, self).__init__(sphinx_directive,
project_info,
app,
data_objects,
names)
self.domain = "c"
@classmethod
def get_handler(cls, sphinx_directive, project_info, names):
app = sphinx_directive.state.document.settings.env.app
finder = app.emit("doxybridge-emitter", "create_finder", project_info)[0]
try:
data_objects = cls._get_data_objects(app, finder, sphinx_directive, names)
except NoMatchesError, e:
raise ValueError
objectHandler = CSummaryHandler(sphinx_directive, project_info, app, data_objects, names)
return objectHandler
@classmethod
def _get_data_objects(cls, app, finder, sphinx_directive, names):
"""
Find all the data objects to document in the xml file and return them.
"""
namespace = ""
object_list = []
for entity_name in names:
name_options = entity_name.split(' ')
if len(name_options) > 1:
entity_name = name_options[1]
kind = name_options[0]
else:
kind = None
try:
data_object = app.emit("doxybridge-emitter", "standard_finder",
entity_name,
namespace,
finder,
sphinx_directive.options,
kind)[0]
except NoMatchesError:
this_env = sphinx_directive.state.document.settings.env
warn_string = "Unable to find definition '%s' in doxygen generated xml"
this_env.warn(this_env.docname, warn_string % sphinx_directive.arguments[0],
sphinx_directive.lineno)
raise NoMatchesError
if kind:
data_object.kind = kind
## if hasattr(data_object, "get_location"):
## # add dependency
## filename = data_object.get_location().get_file()
## sphinx_directive.state.document.settings.env.note_dependency(filename)
object_list.append(data_object)
return object_list
class CSummaryHandler(CSummary):
def __init__(self, sphinx_directive, project_info, app, data_objects, names):
super(CSummaryHandler, self).__init__(sphinx_directive,
project_info,
app,
data_objects,
names)
def get_display_name(self, name, renderer):
type_name = name.split(' ')
if len(type_name) > 1:
return type_name[1]
else:
return name
def get_real_name(self, name, renderer):
type_name = name.split(' ')
if len(type_name) > 1:
return type_name[1]
else:
return name
def get_signature(self, renderer):
title = renderer.title()
try:
signature = self._get_signature_from_title(title)
except ValueError:
signature = ''
sig = "(%s)" % self._format_signature(signature)
return sig
def _format_signature(self, signature):
args = signature.strip().split(',')
new_args = []
for argument in args:
new_arg = []
this_arg = argument.split(' ')
for arg in this_arg:
new_arg.append(arg.replace("*", "%"))
last_one = new_arg.pop()
if arg_parser_re.match(last_one):
match = arg_parser_re.findall(last_one)[0]
new_last_one = "%s\\ *%s*\\" % (match[0], match[1])
else:
new_last_one = last_one
new_arg.append(new_last_one)
very_new_arg = []
for arg in new_arg:
very_new_arg.append(arg.replace("%", "\*"))
combined = ' '.join(very_new_arg)
new_args.append(combined)
formatted_args = ','.join(new_args)
return formatted_args
def _get_signature_from_title(self, title):
title_elements = []
for i in title:
if xml_extractor_re.match(str(i)):
elements = xml_extractor_re.findall(str(i))
title_elements.extend(elements)
else:
elements = str(i)
title_elements.append(elements)
signature = "".join(title_elements)
# replace double spaces " " with single space " "
signature = re.sub(" ", " ", signature)
# replace pointer space "* " with pointer "*"
signature = re.sub("\* ", "*", signature)
m = c_funcptr_sig_re.match(signature)
if m is None:
m = c_sig_re.match(signature)
if m is None:
raise ValueError('no match')
rettype, name, arglist, const = m.groups()
return arglist
def get_summary(self, renderer):
summary = renderer.briefdescription()
summary_elements = []
for i in summary:
i_string = str(i)
#print i_string
i_string = i_string.replace("<emphasis>", "*")
i_string = i_string.replace("</emphasis>", "*")
i_string = i_string.replace("<strong>", "**")
i_string = i_string.replace("</strong>", "**")
i_string = i_string.replace("<literal>", "``")
i_string = i_string.replace("</literal>", "``")
i_string = i_string.replace("</pending_xref>", "`")
i_string = pending_xref_parser_re.sub(":c:type:`", i_string)
if xml_summary_extractor_re.match(i_string):
elements = xml_summary_extractor_re.findall(i_string)
summary_elements.extend(elements)
else:
elements = i_string
summary_elements.append(elements)
clean_summary = "".join(summary_elements)
#print clean_summary
return clean_summary
def get_qualifier(self, name):
# qualifier for all c is c:type
return "c:type"
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.