id stringlengths 1 265 | text stringlengths 6 5.19M | dataset_id stringclasses 7
values |
|---|---|---|
3366573 | from bokeh.models import NumeralTickFormatter
from pyesg.validation.report.analysis_builders.base.martingale_analysis_builder import MartingaleAnalysisBuilder
from pyesg.validation.report.charts.line_chart import LineChart
class AverageDiscountFactorBuilder(MartingaleAnalysisBuilder):
"""
Class for building the charts for a discounted TRI analysis.
"""
title = "Average discount factor"
y_axis_label = "Spot rate"
def perform_additional_formatting(self, charter: LineChart):
# Format y-axis labels as %s
charter._figure.yaxis[0].formatter = NumeralTickFormatter(format="0.00%")
| StarcoderdataPython |
1781984 | #!/usr/bin/env python3
# author @danbros
# Pset4 do curso MITx: 6.00.1x (edX)
import random
from typing import Union, Dict, List, Optional
d_si = Dict[str, int]
def loadWords() -> List[str]:
"""Load the file with words.
Returns:
list: list of valid words
"""
print("Loading word list from file...")
# inFile: file
inFile = open(WORDLIST_FILENAME, 'r')
# wordList: list of strings
wordList = []
for line in inFile:
wordList.append(line.strip().lower())
print(" ", len(wordList), "words loaded.")
return wordList
def getFrequencyDict(sequence: Union[str, list]) -> d_si:
"""Convert a string/list to a dict (k = letter, v = num of letter freq)
Args:
sequence: words to be converted in dict freq
Returns:
Dict[str, int]: a dictionary where the keys are elements of the
sequence and the values are integer counts, for the number of times
that an element is repeated in the sequence.
"""
freq = {}
for x in sequence:
freq[x] = freq.get(x,0) + 1
return freq
# Problem #1: Scoring a word
def getWordScore(word: str, n: int) -> int:
"""Returns the score for a word. Assumes the word is a valid word.
The score for a word is the sum of the points for letters in the word,
multiplied by the length of the word, PLUS 50 points if all n letters are
used on the first turn.
Letters are scored as in Scrabble; A is worth 1, B is worth 3, C is worth
3, D is worth 2, E is worth 1, and so on (see SCRABBLE_LETTER_VALUES)
Args:
word: lowercase letters
n: hand size
Returns:
int: score of word
"""
# (SCRABBLE_LETTER_VALUES[char]) rise a exception if char not in SCRABBL...
ans = sum(SCRABBLE_LETTER_VALUES.get(char, 0) for char in word) * len(word)
# [if False, if True] [condition] (ternary op)
return [ans, ans + 50] [len(word) == n]
# Problem #2:Make sure you understand how this function works and what it does!
def displayHand(hand: d_si) -> None:
"""Displays the letters currently in the hand.
Args:
hand: letters in a hand ({'a':1, 'x':2, 'l':3, 'e':1})
Returns:
None: (but print('a, x, x, l, l, l, e')).
"""
for letter in hand.keys():
for _ in range(hand[letter]):
print(letter,end=" ")
print()
# Problem #2:Make sure you understand how this function works and what it does!
def dealHand(n: int) -> d_si:
"""Returns a random hand containing n lowercase letters.
At least n/3 the letters in the hand should be VOWELS.
Hands are represented as dictionaries. The keys are letters and the values
are the number of times the particular letter is repeated in that hand.
Args:
n: int >= 0, number of cards to keep in the hands
Returns:
Dict[str, int]: random dict of letter of "deck"
"""
hand = {} # type: Dict [str, int]
numVowels = n // 3
for _ in range(numVowels):
x = VOWELS[random.randrange(0,len(VOWELS))]
hand[x] = hand.get(x, 0) + 1
for _ in range(numVowels, n): # Or (n - numVowels)
x = CONSONANTS[random.randrange(0,len(CONSONANTS))]
hand[x] = hand.get(x, 0) + 1
return hand
# Problem #2: Update a hand by removing letters
def updateHand(hand: d_si, word: str) -> d_si:
"""Update hand, del all letter in word from hand
Assumes that 'hand' has all the letters in word.
In other words, this assumes that however many times
a letter appears in 'word', 'hand' has at least as
many of that letter in it.
Has no side effects: does not modify hand.
Args:
hand: All letter in your hand
word: word formed by player
Returns:
Dict[str, int]: new hand, without those int values from letters in it.
"""
cp_hand = hand.copy()
for char in word:
cp_hand[char] = cp_hand.get(char, 0) - 1
return cp_hand
# One line:
# return {char: hand[char] - word.count(char) for char in hand} # Kiwitrade
# Problem #3: Test word validity
def isValidWord(word: str, hand: Dict[str, int], wordList: List[str]) -> bool:
"""Verify if word answer is in wordList and in hand
Does not mutate hand or wordList.
Args:
word: guest user
hand: current letter in a hand
wordList: valid list of all words
Returns:
bool: True if word is in the wordList and is entirely
composed of letters in the hand. Otherwise, returns False.
"""
cp_hand = hand.copy()
if word not in wordList:
return False
for char in word:
if cp_hand.get(char, 0) < 1:
return False
else:
cp_hand[char] = cp_hand.get(char,0) - 1
return True
# one line:
# return word in wordList and all(word.count(c) <= hand.get(c, 0)
# for c in word) # Kiwitrader
# Problem #4: Playing a hand
def calculateHandlen(hand: Dict[str, int]) -> int:
"""Calculate hand length
Args:
hand:
Returns:
int: the length (number of letters) in the current hand.
"""
return sum(hand.values())
# Problem #4: Playing a hand
def playHand(hand: Dict[str, int], wordList: List[str], n: int) -> None:
"""Allows the user to play the given hand, as follows:
* The hand is displayed.
* The user may input a word or a single period (the string ".")
to indicate they're done playing
* Invalid words are rejected, and a message is displayed asking
the user to choose another word until they enter a valid word or "."
* When a valid word is entered, it uses up letters from the hand.
* After every valid word: the score for that word is displayed,
the remaining letters in the hand are displayed, and the user
is asked to input another word.
* The sum of the word scores is displayed when the hand finishes.
* The hand finishes when there are no more unused letters or the user
inputs a "."
Args:
hand: dict of letter generate for a func
wordList: all words acepted
n: hand size
Returns:
None:
"""
# Keep track of the total score
score = 0
cp_hand = hand.copy()
# As long as there are still letters left in the hand:
while calculateHandlen(cp_hand) > 0:
# Display the hand
print('Current Hand: ', end = " ")
displayHand(cp_hand)
# Ask user for input
user_ans = input(
'Enter word, or a "." to indicate that you are finished: ')
# If the input is a single period:
if user_ans == '.':
# End the game (break out of the loop)
break
# Otherwise (the input is not a single period):
else:
# If the word is not valid:
if not isValidWord(user_ans, cp_hand, wordList):
# Reject invalid word(print a message followed by a blank line)
print('Invalid word, please try again.\n')
# Otherwise (the word is valid):
else:
# Tell the user how many points the word earned, and the
# updated total score, in one line followed by a blank line
score += getWordScore(user_ans, n)
print(f'"{user_ans}" earned {getWordScore(user_ans, n)} '
+ f'points. Total: {score} points\n')
# Update the hand
cp_hand = updateHand(cp_hand, user_ans)
# Game is over (user entered a '.' or ran out of letters), so tell user
# the total score
if user_ans == ".":
print(f'Goodbye! Total score: {score}')
else:
print(f'Run out of letters. Total score: {score}\n')
# Problem #5: Playing a game
def playGame(wordList: List[str]) -> None:
"""Allow the user to play an arbitrary number of hands.
1) Asks the user to input 'n' or 'r' or 'e'.
* If the user inputs 'n', let the user play a new (random) hand.
* If the user inputs 'r', let the user play the last hand again.
* If the user inputs 'e', exit the game.
* If the user inputs anything else, tell them their input was invalid.
2) When done playing the hand, repeat from step 1
Args:
wordList: list with all words playable
Returns:
None
"""
hand = {}
while True:
user_ans = input('\nEnter n to deal a new hand, r to replay the last'
+ 'hand, or e to end game: ')
if user_ans == 'n':
hand = dealHand(HAND_SIZE)
playHand(hand, wordList, HAND_SIZE)
elif user_ans == 'e':
break
elif user_ans == 'r':
if len(hand) == 0:
print('You have not played a hand yet. Please play a new hand'
+ 'first!')
else:
playHand(hand, wordList, HAND_SIZE)
else:
print('Invalid command.')
VOWELS = 'aeiou'
CONSONANTS = 'bcdfghjklmnpqrstvwxyz'
HAND_SIZE = 7
SCRABBLE_LETTER_VALUES = {
'a': 1, 'b': 3, 'c': 3, 'd': 2, 'e': 1, 'f': 4, 'g': 2, 'h': 4, 'i': 1,
'j': 8, 'k': 5, 'l': 1, 'm': 3, 'n': 1, 'o': 1, 'p': 3, 'q': 10, 'r': 1,
's': 1, 't': 1, 'u': 1, 'v': 4, 'w': 4, 'x': 8, 'y': 4, 'z': 10
}
WORDLIST_FILENAME = 'words.txt'
if __name__ == '__main__':
wordList = loadWords()
playGame(wordList) | StarcoderdataPython |
4807157 | from .compute_log_manager import GCSComputeLogManager
from .resources import gcs_resource
from .system_storage import gcs_intermediate_storage, gcs_plus_default_intermediate_storage_defs
| StarcoderdataPython |
3249569 | __author__ = "<NAME>"
__copyright__ = "Carnegie Mellon University"
__license__ = "MIT"
__maintainer__ = ["<NAME>", "<NAME>"]
__credits__ = ["<NAME>", "<NAME>", "<NAME>", "<NAME>", "<NAME>"]
__email__ = ["<EMAIL>", "<EMAIL>"]
__status__ = "Production"
import json
import imp
import os
from os.path import basename
from stat import *
from common import db
from common import cache
from pymongo.errors import DuplicateKeyError
from datetime import datetime
root_web_path = ""
relative_auto_prob_path = ""
auto_generators = dict()
def load_autogenerators():
"""Pre-fetch all autogenerators
Pulls all problems from mongo where 'autogen' == True and then passes them one by one to load_autogenerator(p).
"""
print "Loading autogenerators"
for prob in list(db.problems.find({'autogen': True})):
if load_autogenerator(prob) is None:
print "ERROR - load_autogenerator(prob) returned None for pid: " + prob['pid']
def load_autogenerator(prob):
"""Loads an auto-generator from disk.
Determines if the passed 'prob' variable is a problem dict from the db or simply a 'pid'. If it is a 'pid' the
corresponding prob dict is queried. The generator code specified in the db is then loaded from disk and added
to the table of generators if it has a callable 'generate' function. If the generator has a callable
'validate_dependencies' function it is called prior to insertion.
"""
if 'pid' not in prob: # Prob is a 'pid', not a 'prob'
prob = db.problems.find_one({'pid': prob})
generator = imp.load_source(prob['generator'][:-3], 'autogenerators/'+prob['generator'])
if hasattr(generator, 'validate_dependencies') and callable(generator.validate_dependencies):
if not generator.validate_dependencies():
return None
if hasattr(generator, 'generate') and callable(generator.generate):
auto_generators[prob['pid']] = generator
return generator
return None
def move_temporary_files(file_list, desc):
"""Move files in the tmp directory.
Takes a list of temporary files and a problem description. The files are enumerated and moved to the web
auto-problems directory (publicly accessible) and performs a string substitution on the passed problem desc
replacing the enumerated strings in the form ###file_X_url### with publicly accessible file path.
"""
for idx, file_path in enumerate(file_list):
file_name = basename(file_path)
write_path = _full_auto_prob_path() + file_name
print "Moving file %s to %s." % (file_path, write_path)
os.rename(file_path, _full_auto_prob_path() + file_name)
desc = desc.replace("###file_%s_url###" % str(idx + 1), "autoproblems/" + file_name)
os.chmod(write_path, S_IWUSR | S_IRUSR | S_IRGRP | S_IROTH)
return desc
def build_problem_instance(prob, tid):
"""Builds unique problem dependencies for an auto-generated problem.
Gets the auto-generator instance for the passed problem and generates a problem instance. If no generator is found
in the preloaded generator dict the generator script is loaded from the database. We then build the problem
dependencies, grader, and description using the generator module. We move temporary files generated by the
generator to the web path and perform description substitutions to enable external access to these resources.
We then update the team document to specify that an auto-generated problem has been created for this team.
"""
generator = auto_generators.get(prob['pid'], None)
if generator is None:
print "Autogenerator for %s was not found in the precached list, rebuilding..." % prob['pid']
generator = load_autogenerator(prob['pid'])
if generator is None:
print "ERROR - load_autogenerator(pid) returned None for pid: " + prob['pid']
(file_list, grader, desc) = generator.generate()
if file_list is not None:
desc = move_temporary_files(file_list, desc)
if prob['grader'] == "key":
db.teams.update({'tid': tid}, {'$set': {'probinstance.'+prob['pid']: {'pid': prob['pid'],
'desc': desc,
'key': grader}}})
elif prob['grader'] == 'file':
db.teams.update({'tid': tid}, {'$set': {'probinstance.'+prob['pid']: {'pid': prob['pid'],
'desc': desc,
'grader': grader}}})
return desc
def load_unlocked_problems(tid):
"""Gets the list of all unlocked problems for a team.
First check for 'unlocked_<tid>' in the cache, if it exists return it otherwise rebuild the unlocked list.
Query all problems from the database as well as all submissions from the current team.
Cycle over all problems while looking at their weightmap, check to see if problems in the weightmap are solved.
Increment the threshold counter for solved weightmap problems.
If the threshold counter is higher than the problem threshold then add the problem to the return list (ret).
"""
unlocked = cache.get('unlocked_' + tid) # Get the teams list of unlocked problems from the cache
if unlocked is not None: # Return this if it is not empty in the cache
return json.loads(unlocked)
unlocked = []
team = db.teams.find_one({'tid': tid})
if 'probinstance' not in team.keys():
db.teams.update({'tid': tid}, {'$set': {'probinstance': {}}})
team['probinstance'] = dict()
correctPIDs = {p['pid'] for p in list(db.submissions.find({"tid": tid, "correct": True}))}
for p in list(db.problems.find()):
if 'weightmap' not in p or 'threshold' not in p or sum([p['weightmap'][pid] for pid in correctPIDs if pid in p['weightmap']]) >= p['threshold']:
unlocked.append({'pid': p['pid'],
'displayname': p.get('displayname', None),
'hint': p.get('hint', None),
'basescore': p.get('basescore', None),
'correct': True if p['pid'] in correctPIDs else False,
'desc': p.get('desc') if not p.get('autogen', False)
else team['probinstance'][p['pid']].get('desc', None) if p['pid'] in team.get('probinstance', dict())
else build_problem_instance(p, tid)})
unlocked.sort(key=lambda k: k['basescore'] if 'basescore' in k else 99999)
cache.set('unlocked_' + tid, json.dumps(unlocked), 60 * 60)
return unlocked
def get_solved_problems(tid):
"""Returns a list of all problems the team has solved.
Checks for 'solved_<tid>' in the cache, if the list does not exists it rebuilds/inserts it.
Queries the database for all submissions by the logged in team where correct == True.
Finds all problems with a PID in the list of correct submissions.
All solved problems are returned as a pid and display name.
"""
solved = cache.get('solved_' + tid)
if solved is not None:
return json.loads(solved)
sPIDs = {d['pid'] for d in list(db.submissions.find({"tid": tid, "correct": True}))}
probs = list(db.problems.find({"pid": {"$in": list(sPIDs)}}, {'pid': 1, 'displayname': 1, 'basescore': 1}))
solved = sorted([{'pid': p['pid'],
'displayname': p.get('displayname', None),
'basescore': p.get('basescore', None)} for p in probs],
key=lambda k: k['basescore'] if 'basescore' in k else 99999,
reverse=True)
cache.set('solved_' + tid, json.dumps(solved), 60 * 60)
return solved
def get_single_problem(pid, tid):
"""Retrieve a single problem.
Grab all problems from load_unlocked_problems (most likely cached). Iterate over the problems looking for the
desired pid. Return the problem if found. If not found return status:0 with an error message.
"""
for prob in load_unlocked_problems(tid):
if prob['pid'] == pid:
return prob
return {'status': 0, 'message': 'Internal error, problem not found.'}
def submit_problem(tid, request):
"""Handle problem submission.
Gets the key and pid from the submitted problem, calls the respective grading function if the values aren't empty.
If correct all relevant cache values are cleared. The submission is the inserted into the database
(an attempt is made). A relevant message is returned if the problem has already been solved or the answer
has been tried.
"""
pid = request.form.get('pid', '').strip()
key = request.form.get('key', '').strip()
correct = False
if pid == '':
return {"status": 0, "points": 0, "message": "Problem ID cannot be empty."}
if key == '':
return {"status": 0, "points": 0, "message": "Answer cannot be empty."}
if pid not in [p['pid'] for p in load_unlocked_problems(tid)]:
return {"status": 0, "points": 0, "message": "You cannot submit problems you have not unlocked."}
prob = db.problems.find_one({"pid": pid})
if prob is None:
return {"status": 0, "points": 0, "message": "Problem ID not found in the database."}
if not prob.get('autogen', False): # This is a standard problem, not auto-generated
(correct, message) = imp.load_source(prob['grader'][:-3], "./graders/" + prob['grader']).grade(tid, key)
else: # This is an auto-generated problem, grading is different.
team = db.teams.find_one({'tid': tid})
grader_type = prob.get('grader', 'file')
if grader_type == 'file':
(correct, message) = imp.load_source(team['probinstance'][pid]['grader'][:-3],
team['probinstance'][pid]['grader']).grade(tid, key)
elif grader_type == 'key':
correct = team['probinstance'][pid]['key'] == key
message = prob.get('correct_msg', 'Correct!') if correct else prob.get('wrong_msg', 'Nope!')
submission = {'tid': tid,
'timestamp': datetime.now(),
'pid': pid,
'ip': request.headers.get('X-Real-IP', None),
'key': key,
'correct': correct}
if correct:
cache.delete('unlocked_' + tid) # Clear the unlocked problem cache as it needs updating
cache.delete('solved_' + tid) # Clear the list of solved problems
cache.delete('teamscore_' + tid) # Clear the team's cached score
cache.delete('lastsubdate_' + tid)
try:
db.submissions.insert(submission)
except DuplicateKeyError:
return {"status": 0, "points": 0, "message": "You have already solved this problem!"}
else:
try:
db.submissions.insert(submission)
except DuplicateKeyError:
return {"status": 0, "points": 0, "message": "You already tried that!"}
return {"status": 1 if correct else 0, "points": prob.get('basescore', 0), "message": message}
def _full_auto_prob_path():
return root_web_path + relative_auto_prob_path
| StarcoderdataPython |
1727204 | <reponame>YouFacai/iWiki<filename>backend/modules/doc/migrations/0012_doc_pv_docversion_pv.py
# Generated by Django 4.0.1 on 2022-01-23 12:30
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("doc", "0011_alter_doc_index_together"),
]
operations = [
migrations.AddField(
model_name="doc",
name="pv",
field=models.IntegerField(db_index=True, default=0, verbose_name="访问量"),
),
migrations.AddField(
model_name="docversion",
name="pv",
field=models.IntegerField(db_index=True, default=0, verbose_name="访问量"),
),
]
| StarcoderdataPython |
2567 | <reponame>purkhusid/rules_dotnet
"Actions for compiling resx files"
load(
"@io_bazel_rules_dotnet//dotnet/private:providers.bzl",
"DotnetResourceInfo",
)
def _make_runner_arglist(dotnet, source, output, resgen):
args = dotnet.actions.args()
if type(source) == "Target":
args.add_all(source.files)
else:
args.add(source)
args.add(output)
return args
def emit_resx_core(
dotnet,
name = "",
src = None,
identifier = None,
out = None,
customresgen = None):
"""The function adds an action that compiles a single .resx file into .resources file.
Returns [DotnetResourceInfo](api.md#dotnetresourceinfo).
Args:
dotnet: [DotnetContextInfo](api.md#dotnetcontextinfo).
name: name of the file to generate.
src: The .resx source file that is transformed into .resources file. Only `.resx` files are permitted.
identifier: The logical name for the resource; the name that is used to load the resource. The default is the basename of the file name (no subfolder).
out: An alternative name of the output file (if name should not be used).
customresgen: custom resgen program to use.
Returns:
DotnetResourceInfo: [DotnetResourceInfo](api.md#dotnetresourceinfo).
"""
if name == "" and out == None:
fail("either name or out must be set")
if not out:
result = dotnet.actions.declare_file(name + ".resources")
else:
result = dotnet.actions.declare_file(out)
args = _make_runner_arglist(dotnet, src, result, customresgen.files_to_run.executable.path)
# We use the command to extrace shell path and force runfiles creation
resolve = dotnet._ctx.resolve_tools(tools = [customresgen])
inputs = src.files.to_list() if type(src) == "Target" else [src]
dotnet.actions.run(
inputs = inputs + resolve[0].to_list(),
tools = customresgen.default_runfiles.files,
outputs = [result],
executable = customresgen.files_to_run,
arguments = [args],
env = {"RUNFILES_MANIFEST_FILE": customresgen.files_to_run.runfiles_manifest.path},
mnemonic = "CoreResxCompile",
input_manifests = resolve[1],
progress_message = (
"Compiling resoources" + dotnet.label.package + ":" + dotnet.label.name
),
)
return DotnetResourceInfo(
name = name,
result = result,
identifier = identifier,
)
| StarcoderdataPython |
3349799 | from time import sleep
from legislative_act import model as dm
from lexparency import get_document_history
inconsistents = set()
for h in dm.Search().filter('term', doc_type='cover').filter('term', abstract__in_force=True).scan():
try:
if h.repealed_by:
inconsistents.add((h.abstract.domain, h.abstract.id_local))
except AttributeError:
continue
print('\n'.join(sorted(map('-'.join, inconsistents))))
for k, (domain, celex) in enumerate(inconsistents):
dh = get_document_history(domain, celex)
dh.in_force = False
sleep(30)
if __name__ == '__main__':
print('Done')
| StarcoderdataPython |
4826072 | <reponame>Captricity/cappa
from __future__ import print_function, absolute_import
from .pip import Pip
class Pip3(Pip):
def __init__(self, *flags):
super(Pip3, self).__init__(*flags)
self.name = 'pip3'
self.friendly_name = 'pip3'
| StarcoderdataPython |
3252349 | <reponame>michelbeyrouty/aws-python-lambda-with-sns-input-handler
from app.controllers import test
routes = {
"TEST": test
}
def fetch_controller(snsTopic):
controller = routes.get(snsTopic, "notFound")
if controller == "notFound":
raise TypeError("Controller not found")
return controller
| StarcoderdataPython |
62275 | # -*- coding: UTF-8 -*-
from itertools import cycle
import sklearn
from sklearn import linear_model
from scipy import interp
from sklearn.metrics import accuracy_score
import scipy
import os
import sys
import traceback
import glob
import numpy as np
##from sklearn.externals import joblib
import pickle
import joblib
from time import gmtime, strftime
import pymysql
import os
import datetime
from python_speech_features import mfcc
import scipy.io.wavfile
from utils1 import GENRE_DIR, GENRE_LIST
#import cPickle as pickle
genre_list = GENRE_LIST
"""reads MFCC-files and prepares X_train and y_train.
genre_list must consist of names of folders/genres consisting of the required MFCC-files
base_dir must contain genre_list of directories
"""
# Given a wavfile, computes mfcc and saves mfcc data
def create_ceps(wavfile):
sampling_rate, song_array = scipy.io.wavfile.read(wavfile)
#print(sampling_rate)
"""Get MFCC
ceps : ndarray of MFCC
mspec : ndarray of log-spectrum in the mel-domain
spec : spectrum magnitude
"""
ceps=mfcc(song_array)
#ceps, mspec, spec= mfcc(song_array)
#print(ceps.shape)
#this is done in order to replace NaN and infinite value in array
bad_indices = np.where(np.isnan(ceps))
b=np.where(np.isinf(ceps))
ceps[bad_indices]=0
ceps[b]=0
return write_ceps(ceps, wavfile)
# Saves mfcc data
def write_ceps(ceps, wavfile):
base_wav, ext = os.path.splitext(wavfile)
data_wav = base_wav + ".ceps"
ceps_result = np.save(data_wav, ceps)
print (data_wav)
return data_wav
def main():
HOME = os.listdir("/home")[0]
hive_id = HOME
data=[]
#genre_list = ["classical", "jazz"] IF YOU WANT TO CLASSIFY ONLY CLASSICAL AND JAZZ
#use FFT
# X, y = read_fft(genre_list, base_dir_fft)
# X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = .20)
# print('\n******USING FFT******')
# learn_and_classify(X_train, y_train, X_test, y_test, genre_list)
# print('*********************\n')
#use MFCC
clf = open('/home/{HOME}/bee_sound/saved_models/model_mfcc_LR_all_v1.pkl'.format(HOME=HOME),'rb+')
#clf = open('/home/normal/bee_sound/saved_models/model_mfcc_LR_all_v1.pkl','rb+')
clf_a = pickle.load(clf)
## with open('saved_models/model_mfcc_log_1_without_knn.pkl','rb') as f:
## print(f)
## clf =pickle.load(f)
#f=open('saved_models/model_mfcc_log_1_without_knn.pkl')
#clf = pickle.load(f)
X=[]
path="/media/normal/TOSHIBA EXT/sound/" #待讀取的資料夾
path_list=os.listdir(path)
path_list.sort(reverse=True)
n = 0
for filename in path_list:
print(filename)
if n == 1:
target = filename
#print target
n += 1
break
else:
n += 1
result = create_ceps(path+target)
#print result
ceps = np.load(result+".npy")
num_ceps = len(ceps)
X.append(np.mean(ceps[int(num_ceps*1/10):int(num_ceps*9/10)], axis=0))
#print('******USING MFCC******')
knn_predictions = clf_a.predict(X)
data.append(hive_id)
data.append(knn_predictions)
ISOTIMEFORMAT = '%Y-%m-%d %H:%M:%S'
data.append(datetime.datetime.now().strftime(ISOTIMEFORMAT))
print (data[0])
print (data[1][0])
print (data[2])
DatabaseSender(data)
def DatabaseSender(data):
db = pymysql.connect(host='192.168.127.12', user='root' , passwd='<PASSWORD>', db='110_bee_sound_test', port=33306)
#db = pymysql.connect(host='localhost', user='root' , passwd='', db='bee')
cursor = db.cursor()
sql = "INSERT INTO `test`(`id`, `hive_id`, `status`, `time`) VALUES (NULL, '"+str(data[0])+"', '"+str(data[1][0])+"', '"+str(data[2])+"')"
#print sql
#Execute the SQL command
try:
cursor.execute(sql)
except Exception:
print(traceback.format_exc())
#print sql
#Commit your changes in the database
db.commit()
print 'Insert data successful...'
#LogData('Insert Weather data successful...','weatherStatus.txt')
db.close()
if __name__ == "__main__":
main()
| StarcoderdataPython |
174912 | <filename>yelp_data_parsing.py
import ujson, json
import sets
import io
busdata = []
with open('business.json', 'rb') as bus:
for line in bus:
business = ujson.loads(line)
busdata.append(business)
print "Opened Yelp JSON file"
citydict = {}
citylist = []
for business in busdata:
citylist.append(business['city'].lower())
cities = set(citylist)
for city in cities:
citydict[city] = []
for business in busdata:
for i in business['categories']:
if i == "Restaurants":
yelpdata = []
if (business['attributes'].has_key("Wheelchair Accessible")):
yelpdata.append(business['attributes']['Wheelchair Accessible'])
else:
yelpdata.append(False)
yelpdata.append(business['review_count'])
yelpdata.append(business['stars'])
yelpdata.append(business['name'])
yelpdata.append(business['full_address'].replace("\n", ' '))
citydict[business['city'].lower()].append(yelpdata)
print "Finished Preprocessing"
print len(citydict)
for k, v in citydict.items():
if len(v) == 0:
del citydict[k]
print len(citydict)
with io.open('newdata.json', 'w', encoding='utf-8') as f:
f.write(unicode(json.dumps(citydict, ensure_ascii=False)))
print "Wrote dict to file"
| StarcoderdataPython |
89458 | <gh_stars>10-100
#!/usr/bin/env python
##
## Project: Simple4All - November 2013 - www.simple4all.org
## Contact: <NAME> - <EMAIL>
import sys
import re
from configobj import ConfigObj
## History: public HTS -> Junichi's script -> Reima made stream independent -> Oliver
## put in separate script and moved from perl to python
# sub routine for generating proto-type model (Copy from HTS-2.1)
# Made stream-independent 23/4/2012 rk
proto_out = sys.argv[1]
config_in = sys.argv[2]
config = ConfigObj(config_in)
static_stream_sizes = config.get('STATIC_STREAM_SIZES', default='25 1 1 1') ### defaults for SPTK
MSD_stream_info = config.get('MSD_STREAM_INFO', default='0 1 1 1')
stream_weights = config.get('STREAM_WEIGHTS', default='1.0 1.0 1.0 0.9')
#static_stream_sizes = config.get('static_stream_sizes', default='25 1 1 1') ### defaults for SPTK
#MSD_stream_info = config.get('MSD_stream_info', default='0 1 1 1')
#stream_weights = config.get('stream_weights', default='1.0 1.0 1.0 0.9')
NSTATE = 1 ## fixed for skip model
## string -> numeric list conversion:
def int_list(string):
seq = re.split('\s+', string.strip())
return [int(item) for item in seq]
static_stream_sizes = int_list(static_stream_sizes)
MSD_stream_info = int_list(MSD_stream_info)
n_weights = len(re.split('\s+', stream_weights.strip()))
num_stream = len(static_stream_sizes)
if (len(MSD_stream_info) != num_stream) or (n_weights!= num_stream):
sys.exit('stream info not same: %s %s %s'%(static_stream_sizes, MSD_stream_info, stream_weights))
stream_indexes = range(1, num_stream+1)
total_stream_sizes = []
for (MSD,size) in zip(MSD_stream_info, static_stream_sizes):
if MSD:
total_stream_sizes.append(size)
else:
total_stream_sizes.append(size * 3)
vsize = sum(total_stream_sizes)
d = ''
## ----- HEADER -----
d += '~o <VecSize> %s <USER> <DIAGC> '%(vsize)
d += '<MSDInfo> %s '%(num_stream)
d += ' '.join([str(val) for val in MSD_stream_info])
d += '\n'
d += '<StreamInfo> %s '%(num_stream)
d += ' '.join([str(val) for val in total_stream_sizes])
d += '\n'
## ----- output HMMs ------
d += "<BeginHMM>\n"
d += " <NumStates> %d\n"%(NSTATE+2)
# output HMM states
for i in range(2, NSTATE+2):
# output state information
d += " <State> %s\n"%(i)
# output stream weight
d += ' <SWeights> %d '%(num_stream)
d += stream_weights
d += '\n'
for (i, MSD, size) in zip(stream_indexes, MSD_stream_info, total_stream_sizes):
d += " <Stream> %d\n"%(i)
if not MSD:
d += " <Mean> %d\n "%(size)
for j in range(size):
d += "0.0 "
d += '\n'
d += " <Variance> %d\n "%(size)
for j in range(size):
d += "1.0 "
d += '\n'
else:
d += " <NumMixes> 2\n"
# output 1st space (non 0-dimensional space)
d += " <Mixture> 1 0.5000\n"
d += " <Mean> 1 0.0 \n"
d += " <Variance> 1 1.0 \n"
# output 2nd space (0-dimensional space)
d += " <Mixture> 2 0.5000\n"
d += " <Mean> 0 \n"
d += " <Variance> 0 \n"
# output state transition matrix
d += '<TransP> %d\n'%(NSTATE+2)
d += " 0.0 0.0 1.0 \n"
d += " 0.0 0.5 0.5 \n"
d += " 0.0 0.0 0.0 \n"
d += "\n<EndHMM>\n"
f = open(proto_out, 'w')
for line in d:
f.write(line)
f.close()
| StarcoderdataPython |
4841018 | <gh_stars>0
import paho.mqtt.client as mqtt # import the client1
def on_message(client, userdata, message):
DATA_RECEIVED = str(message.payload.decode("utf-8"))
TOPIC_RECEIVED = message.topic
OBJ = dict()
OBJ['topic'] = TOPIC_RECEIVED
OBJ['sms'] = DATA_RECEIVED
print(OBJ)
IP_BROKER = "192.168.1.108"
PORT_BROKER = "1883"
# IP_BROKER = "172.30.19.92"
# PORT_BROKER = "1883"
PORT_BROKER = int(PORT_BROKER)
print(f"Credenciales usar: ip={IP_BROKER} y port={PORT_BROKER}")
client_receive = mqtt.Client("P2")
client_receive.on_message=on_message
client_receive.connect(IP_BROKER, port=PORT_BROKER)
client_receive.subscribe("/#")
client_receive.loop_start()
client_receive.publish("/house/bulbs/bulb1","OFF")
import time
while True:
time.sleep(5)
print(".", end="")
# time.sleep(30) # wait
# client_receive.loop_stop()
| StarcoderdataPython |
3200453 | """
Django settings for intranet project.
Generated by 'django-admin startproject' using Django 3.0.3.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
PROJECT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
BASE_DIR = os.path.dirname(PROJECT_DIR)
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# Application definition
INSTALLED_APPS = [
'box',
'custom_user',
'home',
'blog',
'search',
'wagtail.contrib.forms',
'wagtail.contrib.redirects',
'wagtail.contrib.routable_page',
'wagtail.embeds',
'wagtail.sites',
'wagtail.users',
'wagtail.snippets',
'wagtail.documents',
'wagtail.images',
'wagtail.search',
'wagtail.admin',
'wagtail.core',
'wagtail.contrib.table_block',
'wagtail.contrib.postgres_search',
'django_prometheus',
'django_tables2',
'modelcluster',
'taggit',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django_prometheus.middleware.PrometheusBeforeMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
'wagtail.core.middleware.SiteMiddleware',
'wagtail.contrib.redirects.middleware.RedirectMiddleware',
'django_prometheus.middleware.PrometheusAfterMiddleware',
]
ROOT_URLCONF = 'intranet.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
os.path.join(PROJECT_DIR, 'templates'),
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'intranet.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django_prometheus.db.backends.postgresql',
'NAME': os.environ.get('PG_DB', 'postgres'),
'USER': os.environ.get('PG_USER', 'postgres'),
'PASSWORD': os.environ.get('PG_PASSWORD'),
'HOST': os.environ.get('DB_HOST', 'db'),
'PORT': 5432,
}
}
CACHES = {
'default': {
'BACKEND': 'django_prometheus.cache.backends.filebased.FileBasedCache',
'LOCATION': '/tmp/django_cache',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'America/Denver'
USE_I18N = True
USE_L10N = True
USE_TZ = True
USE_THOUSAND_SEPARATOR = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATICFILES_FINDERS = [
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
]
STATICFILES_DIRS = [
os.path.join(PROJECT_DIR, 'static'),
]
# ManifestStaticFilesStorage is recommended in production, to prevent outdated
# Javascript / CSS assets being served from cache (e.g. after a Wagtail upgrade).
# See https://docs.djangoproject.com/en/3.0/ref/contrib/staticfiles/#manifeststaticfilesstorage
STATICFILES_STORAGE = 'django.contrib.staticfiles.storage.ManifestStaticFilesStorage'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
STATIC_URL = '/static/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
MEDIA_URL = '/media/'
AUTH_USER_MODEL = 'custom_user.User'
# Wagtail settings
WAGTAIL_SITE_NAME = "intranet"
WAGTAILEMBEDS_RESPONSIVE_HTML = True
WAGTAIL_USER_EDIT_FORM = 'custom_user.forms.CustomUserEditForm'
WAGTAIL_USER_CREATION_FORM = 'custom_user.forms.CustomUserCreationForm'
WAGTAIL_USER_CUSTOM_FIELDS = ['title']
WAGTAILSEARCH_BACKENDS = {
'default': {
'BACKEND': 'wagtail.contrib.postgres_search.backend',
'SEARCH_CONFIG': 'english',
}
}
# Base URL to use when referring to full URLs within the Wagtail admin backend -
# e.g. in notification emails. Don't include '/admin' or a trailing slash
BASE_URL = 'https://intranet.redbutte.utah.edu'
# Email Settings
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_USE_TLS = True
EMAIL_HOST = 'smtp.utah.edu'
EMAIL_HOST_USER = '<EMAIL>'
EMAIL_HOST_PASSWORD = os.environ.get('EMAIL_PASSWORD')
EMAIL_PORT = 587
DEFAULT_FROM_EMAIL = '<EMAIL>'
SERVER_EMAIL = '<EMAIL>'
# Django_tables2 Settings
DJANGO_TABLES2_TEMPLATE = "django_tables2/bootstrap4.html"
| StarcoderdataPython |
57700 | <filename>mongocat/mongocat.py
"""Main module."""
from pymongo import MongoClient
from pymongo.errors import DuplicateKeyError
import yaml
import json
from bson import json_util as bson_ser
import sys
def get_parser(parser_name):
if parser_name == 'yaml':
return yaml.safe_load
if parser_name == 'json':
return json.loads
if parser_name == 'bson':
return bson_ser.loads
class MongoCat:
def __init__(self,
database,
collection,
url,
parser='yaml'
,update_on_exists=True
,**kw
):
self.database_name = database
self.collection_name = collection
self.update_on_exists = update_on_exists
self.url = url
self.parser = get_parser(parser)
self.client = MongoClient(url)
self.database = self.client[self.database_name]
self.collection = self.database[self.collection_name]
def on_error(self, error):
print(f'E: {error}', file=sys.stderr)
def writeln(self, line):
if len(line) > 1:
object = self.parser(line)
print(object)
try:
id = self.put(object, self.update_on_exists)
return id
except Exception as e:
self.on_error(e)
def put(self, object, update_on_exists=True):
try:
id = self.collection.insert_one(object).inserted_id
except DuplicateKeyError:
if update_on_exists:
result = self.collection.replace_one(
{'_id': object['_id']}
,object, upsert=True
)
id = object['_id']
else:
raise
return id
def iter_query(self, query):
for obj in self.collection.find(query):
yield obj
def iter_all(self):
for obj in self.collection.find():
yield obj
| StarcoderdataPython |
1660417 | # Stdlib imports
import logging
# Django imports
from django.db.models import Sum
# Pip imports
from rest_framework.decorators import api_view
from rest_framework.response import Response
# App imports
from ..models import EthVoter
from ..models import VoteLog
logger = logging.getLogger(__name__)
@api_view(['GET'])
def gas_voting_view(request, proposal_id):
vote_addresses = VoteLog.objects.filter(proposal_id=proposal_id).values_list('voter', flat=True)
vote_addresses_yay = vote_addresses.filter(selected_option=VoteLog.YAY)
vote_addresses_nay = vote_addresses.filter(selected_option=VoteLog.NAY)
vote_addresses_abstain = vote_addresses.filter(selected_option=VoteLog.ABSTAIN)
used_gas_yay = EthVoter.objects.filter(address__in=vote_addresses_yay).aggregate(Sum('used_gas'))["used_gas__sum"]
used_gas_yay = 0 if not used_gas_yay else used_gas_yay
used_gas_nay = EthVoter.objects.filter(address__in=vote_addresses_nay).aggregate(Sum('used_gas'))["used_gas__sum"]
used_gas_nay = 0 if not used_gas_nay else used_gas_nay
used_gas_abstain = EthVoter.objects.filter(address__in=vote_addresses_abstain).aggregate(Sum('used_gas'))["used_gas__sum"]
used_gas_abstain = 0 if not used_gas_abstain else used_gas_abstain
return Response({
'yay': used_gas_yay,
'nay': used_gas_nay,
'abstain': used_gas_abstain,
}) | StarcoderdataPython |
1775170 | <reponame>isl-org/adaptive-surface-reconstruction
#
# Copyright 2022 Intel (Autonomous Agents Lab)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numpy as np
def compute_scale_compatibility(query_scale,
point_scale,
neighbors_index,
neighbors_row_splits,
gamma=2):
"""Computes the scale compatibility.
Args:
query_scale: 1D array with the scales for each query point.
point_scale: 1D array with the scales for each point.
neighbors_index: 1D array with the indices into point_scale.
neighbors_row_splits: 1D array defining the start and end of each row of neighbors_index.
gamma: exponent.
Returns:
Returns min(s_i,s_j)^gamma/max(s_i,s_j) where s_i is the scale of the query point
and s_j are the scales of the neighbor points for each pair. The returned array has
the same length and structure as neighbors_index.
"""
row_lengths = neighbors_row_splits[1:] - neighbors_row_splits[:-1]
query_index = np.repeat(np.arange(len(neighbors_row_splits) - 1),
row_lengths)
a = query_scale[query_index]
b = point_scale[neighbors_index]
ans = (np.minimum(a, b) / np.maximum(a, b))**gamma
return ans
| StarcoderdataPython |
1628485 | # Copyright 2022 Accenture Global Solutions Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
import types
import typing as tp
import tempfile
import pathlib
import tracdap.rt.api as _api
import tracdap.rt.metadata as _meta
import tracdap.rt.config as _cfg
import tracdap.rt.exceptions as _ex
import tracdap.rt.impl.util as _util
import tracdap.rt.impl.type_system as _types
import tracdap.rt.impl.repos as _repos
import tracdap.rt.impl.shim as _shim
class ModelLoader:
class _ScopeState:
def __init__(self, scratch_dir: tp.Union[pathlib.Path, str]):
self.scratch_dir = scratch_dir
self.cache: tp.Dict[str, _api.TracModel.__class__] = dict()
def __init__(self, sys_config: _cfg.RuntimeConfig):
self.__repos = _repos.RepositoryManager(sys_config)
self.__scopes: tp.Dict[str, ModelLoader._ScopeState] = dict()
self.__log = _util.logger_for_object(self)
def create_scope(self, scope: str, model_scratch_dir: tp.Union[str, pathlib.Path, types.NoneType] = None):
# TODO: Use a per-job location for model checkouts, that can be cleaned up?
if model_scratch_dir is None:
model_scratch_dir = tempfile.mkdtemp()
self.__scopes[scope] = ModelLoader._ScopeState(model_scratch_dir)
def destroy_scope(self, scope: str):
# TODO: Delete model checkout location
del self.__scopes[scope]
def load_model_class(self, scope: str, model_def: _meta.ModelDefinition) -> _api.TracModel.__class__:
state = self.__scopes[scope]
model_key = f"{model_def.repository}#{model_def.path}#{model_def.version}#{model_def.entryPoint}"
model_class = state.cache.get(model_key)
if model_class is not None:
return model_class
self.__log.info(f"Loading model [{model_def.entryPoint}] (version=[{model_def.version}], scope=[{scope}])...")
# TODO: Prevent duplicate checkout per scope
repo = self.__repos.get_repository(model_def.repository)
checkout_dir = pathlib.Path(state.scratch_dir)
checkout = repo.checkout_model(model_def, checkout_dir)
with _shim.ShimLoader.use_checkout(checkout):
module_name = model_def.entryPoint.rsplit(".", maxsplit=1)[0]
class_name = model_def.entryPoint.rsplit(".", maxsplit=1)[1]
model_class = _shim.ShimLoader.load_class(module_name, class_name, _api.TracModel)
state.cache[model_key] = model_class
return model_class
def scan_model(self, model_class: _api.TracModel.__class__) -> _meta.ModelDefinition:
model: _api.TracModel = object.__new__(model_class)
model_class.__init__(model)
try:
parameters = model.define_parameters()
inputs = model.define_inputs()
outputs = model.define_outputs()
for parameter in parameters.values():
if parameter.defaultValue is not None:
parameter.defaultValue = _types.MetadataCodec.encode_value(parameter.defaultValue)
# TODO: Model validation
model_def = _meta.ModelDefinition()
model_def.parameters.update(parameters)
model_def.inputs.update(inputs)
model_def.outputs.update(outputs)
for name, param in model_def.parameters.items():
self.__log.info(f"Parameter [{name}] - {param.paramType.basicType.name}")
for name, schema in model_def.inputs.items():
self.__log.info(f"Input [{name}] - {schema.schema.schemaType.name}")
for name, schema in model_def.outputs.items():
self.__log.info(f"Output [{name}] - {schema.schema.schemaType.name}")
return model_def
except Exception as e:
model_class_name = f"{model_class.__module__}.{model_class.__name__}"
msg = f"An error occurred while scanning model class [{model_class_name}]: {str(e)}"
self.__log.error(msg, exc_info=True)
raise _ex.EModelValidation(msg) from e
| StarcoderdataPython |
4810364 | <gh_stars>10-100
"""
Testing clustering algorithms in Clusterpy -Helper functions-
Tests for one of the core classes in clusterpy. Region Maker.
"""
from unittest import TestCase, skip
from math import pi
from clusterpy import importArcData
from clusterpy.core.toolboxes.cluster.componentsAlg import AreaManager
from clusterpy.core.toolboxes.cluster.componentsAlg import RegionMaker
map_type = 'n100'
max_num_regions = 10
sample_input_path = "clusterpy/data_examples/" + map_type
class TestRegionMaker(TestCase):
def setUp(self):
map_instance = importArcData(sample_input_path)
self.Y = map_instance.Y
self.Wrook = map_instance.Wrook
self.Wqueen = map_instance.Wqueen
def tearDown(self):
pass
@skip
def test_construct_regions_method(self):
self.assertTrue(False)
def test_grow_exogenous_regions_rook(self):
"""Number of regions is exogenous, aka given (Wrook)"""
am = AreaManager(self.Wrook, self.Y)
for regions in xrange(1, max_num_regions):
rm = RegionMaker(am, pRegions=regions)
self.assertEqual(regions, len(rm.region2Area))
self.assertTrue(am.checkFeasibility(rm.returnRegions()))
def test_grow_exogenous_regions_queen(self):
"""Number of regions is exogenous, aka given (Wqueen)"""
am = AreaManager(self.Wqueen, self.Y)
for regions in xrange(1, max_num_regions):
rm = RegionMaker(am, pRegions=regions)
self.assertEqual(regions, len(rm.region2Area))
self.assertTrue(am.checkFeasibility(rm.returnRegions()))
@skip
def test_grow_exogenous_regions_with_initial_solution(self):
"""Number of regions is exogenous, aka given, and an initial solution"""
am = AreaManager(self.Wqueen, self.Y)
rm = RegionMaker(am)
self.assertIsNotNone(rm)
@skip
def test_grow_endogenous_threshold_regions(self):
"""Number of regions is endogenous with a threshold value"""
am = AreaManager(self.Wqueen, self.Y)
rm = RegionMaker(am)
self.assertIsNotNone(rm)
@skip
def test_grow_endogenous_range_regions(self):
"""Number of regions is endogenous with a range value"""
am = AreaManager(self.Wqueen, self.Y)
rm = RegionMaker(am)
self.assertIsNotNone(rm)
| StarcoderdataPython |
1614017 | <reponame>aNOOBisTheGod/yandex-lyceum-qt
import string
def totenth(num, base):
num = num.upper()
for i in num:
print(i)
if i == '.':
continue
if i.isalpha():
if string.ascii_uppercase.find(i) + 10 >= base:
raise Exception('Invalid number')
else:
if int(i) >= base:
raise Exception('Invalid number')
"""converts number to standard number system"""
num = num.split('.')
res1 = 0.0
for power, n in enumerate(reversed(list(num[0]))):
try:
x = int(n)
except:
x = 10 + string.ascii_uppercase.find(n)
res1 += x * base ** power
res2 = 0.0
if len(num) > 1:
for power, n in enumerate(list(num[1])):
try:
x = int(n)
except:
x = 10 + string.ascii_uppercase.find(n)
res2 += x * base ** -(power + 1)
return str(res1 + res2)
def fromtenth(num, base):
"""converts number from standard number system to other"""
x = int(num.split('.')[0])
res = ''
while x >= base:
if x % base > 10:
res += string.ascii_uppercase[x % base // 1 - 10]
else:
res += str(x % base)
x = x // base
res += str(x)
res = res[::-1]
if len(num.split('.')) > 1:
res += '.'
x = float('0.' + num.split('.')[1])
for i in range(5):
x *= base
res += str(x)[0] if x % 1 < 10 else string.ascii_uppercase[x // 1 - 10]
x %= 1
if x == 0:
return res
return res
| StarcoderdataPython |
3220628 | <gh_stars>0
from yeelight import discover_bulbs, Bulb
from flask import Flask, render_template, request, redirect, url_for
app = Flask(__name__)
@app.route('/')
def index():
bulbs = discover_bulbs()
bulbs = sorted(bulbs, key=lambda k: k["ip"])
return render_template('index.html', bulbs=bulbs)
@app.route('/toggle')
def toggle():
bulb_ip = request.args.get('bulb_ip')
bulb = Bulb(bulb_ip)
if bulb:
bulb.toggle()
return redirect(url_for('index'))
if __name__ == "__main__":
app.run(host='0.0.0.0', port=5000, debug=True)
| StarcoderdataPython |
142433 | # -*- coding: utf-8 -*-
"""API models package."""
from . import asset_mixin, devices, fields, labels, saved_query, users
from .asset_mixin import AssetMixin
from .devices import Devices
from .fields import Fields
from .labels import Labels
from .saved_query import SavedQuery
from .users import Users
__all__ = (
"Users",
"Devices",
"AssetMixin",
"SavedQuery",
"Fields",
"Labels",
"users",
"devices",
"fields",
"asset_mixin",
"labels",
"saved_query",
)
| StarcoderdataPython |
3310546 | <reponame>jlohmoeller/ngsi-timeseries-api
from conftest import QL_URL, crate_translator as translator
from datetime import datetime
from reporter.tests.utils import insert_test_data
import pytest
import requests
entity_type = 'Room'
attr_name = 'temperature'
n_days = 6
def query_url(values=False):
url = "{qlUrl}/types/{entityType}/attrs/{attrName}"
if values:
url += '/value'
return url.format(
qlUrl=QL_URL,
entityType=entity_type,
attrName=attr_name,
)
@pytest.fixture()
def reporter_dataset(translator):
insert_test_data(translator,
[entity_type],
n_entities=3,
n_days=n_days)
yield
def test_1TNE1A_defaults(reporter_dataset):
# Query without specific id
query_params = {
'type': entity_type,
}
r = requests.get(query_url(), params=query_params)
assert r.status_code == 200, r.text
# Assert Results
expected_values = list(range(n_days))
expected_index = [
'1970-01-{:02}T00:00:00'.format(i+1) for i in expected_values
]
expected_entities = [
{
'entityId': 'Room0',
'index': expected_index,
'values': expected_values,
},
{
'entityId': 'Room1',
'index': expected_index,
'values': expected_values,
},
{
'entityId': 'Room2',
'index': expected_index,
'values': expected_values,
}
]
obtained_data = r.json()
assert isinstance(obtained_data, dict)
assert obtained_data['data']['entityType'] == entity_type
assert obtained_data['data']['attrName'] == attr_name
assert obtained_data['data']['entities'] == expected_entities
def test_1TNE1A_one_entity(reporter_dataset):
# Query
entity_id = 'Room1'
query_params = {
'type': entity_type,
'id': entity_id
}
r = requests.get(query_url(), params=query_params)
assert r.status_code == 200, r.text
obtained_data = r.json()
assert isinstance(obtained_data, dict)
expected_values = list(range(n_days))
expected_index = [
'1970-01-{:02}T00:00:00'.format(i+1) for i in expected_values
]
expected_entities = [
{
'entityId': 'Room1',
'index': expected_index,
'values': expected_values,
}
]
assert obtained_data['data']['entityType'] == entity_type
assert obtained_data['data']['attrName'] == attr_name
assert obtained_data['data']['entities'] == expected_entities
def test_1TNE1A_some_entities(reporter_dataset):
# Query
entity_id = 'Room0,Room2'
query_params = {
'type': entity_type,
'id': entity_id
}
r = requests.get(query_url(), params=query_params)
assert r.status_code == 200, r.text
# Assert Results
expected_values = list(range(n_days))
expected_index = [
'1970-01-{:02}T00:00:00'.format(i+1) for i in expected_values
]
expected_entities = [
{
'entityId': 'Room0',
'index': expected_index,
'values': expected_values,
},
{
'entityId': 'Room2',
'index': expected_index,
'values': expected_values,
}
]
obtained_data = r.json()
assert isinstance(obtained_data, dict)
assert obtained_data['data']['entityType'] == entity_type
assert obtained_data['data']['attrName'] == attr_name
assert obtained_data['data']['entities'] == expected_entities
def test_1TNE1A_values_defaults(reporter_dataset):
# Query
query_params = {
'type': entity_type,
'id': 'Room0,,Room1,RoomNotValid', # -> validates to Room0,Room1.
}
r = requests.get(query_url(values=True), params=query_params)
assert r.status_code == 200, r.text
# Assert Results
expected_values = list(range(n_days))
expected_index = [
'1970-01-{:02}T00:00:00'.format(i+1) for i in expected_values
]
expected_entities = [
{
'entityId': 'Room0',
'index': expected_index,
'values': expected_values,
},
{
'entityId': 'Room1',
'index': expected_index,
'values': expected_values,
}
]
obtained_data = r.json()
assert isinstance(obtained_data, dict)
assert obtained_data == {'data': {'values': expected_entities}}
def test_not_found():
query_params = {
'type': entity_type,
'id': 'RoomNotValid'
}
r = requests.get(query_url(), params=query_params)
assert r.status_code == 404, r.text
assert r.json() == {
"error": "Not Found",
"description": "No records were found for such query."
}
def test_weird_ids(reporter_dataset):
"""
Invalid ids are ignored (provided at least one is valid to avoid 404).
Empty values are ignored.
Order of ids is preserved in response (e.g., Room1 first, Room0 later)
"""
query_params = {
'type': entity_type,
'id': 'Room1,RoomNotValid,,Room0,', # -> validates to Room0,Room1.
}
r = requests.get(query_url(), params=query_params)
assert r.status_code == 200, r.text
# Assert Results
expected_values = list(range(n_days))
expected_index = [
'1970-01-{:02}T00:00:00'.format(i+1) for i in expected_values
]
expected_entities = [
{
'entityId': 'Room1',
'index': expected_index,
'values': expected_values,
},
{
'entityId': 'Room0',
'index': expected_index,
'values': expected_values,
}
]
obtained_data = r.json()
assert isinstance(obtained_data, dict)
assert obtained_data['data']['entityType'] == entity_type
assert obtained_data['data']['attrName'] == attr_name
assert obtained_data['data']['entities'] == expected_entities
def test_different_time_indexes(translator):
"""
Each entity should have its time_index array.
"""
t = 'Room'
insert_test_data(translator, [t], n_entities=1, entity_id='Room1', n_days=2)
insert_test_data(translator, [t], n_entities=1, entity_id='Room3', n_days=4)
insert_test_data(translator, [t], n_entities=1, entity_id='Room2', n_days=3)
query_params = {
'type': 'Room',
'id': 'Room3,Room1,Room2',
}
r = requests.get(query_url(), params=query_params)
assert r.status_code == 200, r.text
expected_entities = [
{
'entityId': 'Room3',
'index': ['1970-01-{:02}T00:00:00'.format(i+1) for i in range(4)],
'values': list(range(4)),
},
{
'entityId': 'Room1',
'index': ['1970-01-{:02}T00:00:00'.format(i+1) for i in range(2)],
'values': list(range(2)),
},
{
'entityId': 'Room2',
'index': ['1970-01-{:02}T00:00:00'.format(i+1) for i in range(3)],
'values': list(range(3)),
}
]
obtained_data = r.json()
assert isinstance(obtained_data, dict)
assert obtained_data['data']['entityType'] == 'Room'
assert obtained_data['data']['attrName'] == attr_name
assert obtained_data['data']['entities'] == expected_entities
def test_aggregation_is_per_instance(translator):
"""
Attribute Aggregation works by default on a per-instance basis.
Cross-instance aggregation not yet supported.
It would change the shape of the response.
"""
t = 'Room'
insert_test_data(translator, [t], n_entities=1, entity_id='Room0', n_days=3)
insert_test_data(translator, [t], n_entities=1, entity_id='Room1', n_days=9)
query_params = {
'type': t,
'id': 'Room0,Room1',
'aggrMethod': 'sum'
}
r = requests.get(query_url(), params=query_params)
assert r.status_code == 200, r.text
# Assert Results
expected_entities = [
{
'entityId': 'Room0',
'index': ['', ''],
'values': [sum(range(3))],
},
{
'entityId': 'Room1',
'index': ['', ''],
'values': [sum(range(9))],
}
]
obtained_data = r.json()
assert isinstance(obtained_data, dict)
assert obtained_data['data']['entityType'] == t
assert obtained_data['data']['attrName'] == attr_name
assert obtained_data['data']['entities'] == expected_entities
# Index array in the response is the used fromDate and toDate
query_params = {
'type': t,
'id': 'Room0,Room1',
'aggrMethod': 'max',
'fromDate': datetime(1970, 1, 1).isoformat(),
'toDate': datetime(1970, 1, 6).isoformat(),
}
r = requests.get(query_url(), params=query_params)
assert r.status_code == 200, r.text
# Assert Results
expected_entities = [
{
'entityId': 'Room0',
'index': ['1970-01-01T00:00:00', '1970-01-06T00:00:00'],
'values': [2],
},
{
'entityId': 'Room1',
'index': ['1970-01-01T00:00:00', '1970-01-06T00:00:00'],
'values': [5],
}
]
obtained_data = r.json()
assert isinstance(obtained_data, dict)
assert obtained_data['data']['entityType'] == t
assert obtained_data['data']['attrName'] == attr_name
assert obtained_data['data']['entities'] == expected_entities
def test_1T1ENA_aggrPeriod(reporter_dataset):
# GH issue https://github.com/smartsdk/ngsi-timeseries-api/issues/89
# aggrPeriod needs aggrMethod
query_params = {
'type': entity_type,
'aggrPeriod': 'minute',
}
r = requests.get(query_url(), params=query_params)
assert r.status_code == 400, r.text
query_params = {
'type': entity_type,
'aggrMethod': 'avg',
'aggrPeriod': 'minute',
}
r = requests.get(query_url(), params=query_params)
assert r.status_code == 501, r.text
| StarcoderdataPython |
3320493 | <reponame>tahmidbintaslim/screenlamp
# <NAME> 2017
#
# screenlamp is a Python toolkit
# for hypothesis-driven virtual screening.
#
# Copyright (C) 2017 Michigan State University
# License: Apache v2
#
# Software author: <NAME> <http://sebastianraschka.com>
# Software author email: <EMAIL>
#
# Software source repository: https://github.com/rasbt/screenlamp
# Documentation: https://psa-lab.github.io/screenlamp
#
# screenlamp was developed in the
# Protein Structural Analysis & Design Laboratory
# (http://www.kuhnlab.bmb.msu.edu)
#
# If you are using screenlamp in your research, please cite
# the following journal article:
#
# Raschka, Sebastian, <NAME>, <NAME>,
# <NAME>, <NAME>, <NAME>,
# and <NAME>. 2017
#
# Enabling the hypothesis-driven prioritization of
# ligand candidates in big databases:
# Screenlamp and its application to GPCR inhibitor
# discovery for invasive species control.
#
import os
import subprocess
import sys
import argparse
from multiprocessing import cpu_count
def get_num_cpus(n_cpus):
if not n_cpus:
n_cpus = cpu_count()
elif n_cpus < 0:
n_cpus = cpu_count() - n_cpus
return n_cpus
def get_mol2_files(dir_path):
files = []
if os.path.isdir(dir_path):
for f in os.listdir(dir_path):
if f.endswith(('.mol2', 'mol2.gz')):
file_path = os.path.join(dir_path, f)
files.append(file_path)
elif (os.path.isfile(dir_path) and
dir_path.endswith(('.mol2', 'mol2.gz'))):
files.append(dir_path)
return files
def run_omega(source_file, target_file, n_processes, settings):
prefix = ''.join(target_file.split('.mol2')[:-1])
sys.stdout.write('Processing %s\n' % source_file)
sys.stdout.flush()
cmd = [EXECUTABLE,
'-in', source_file,
'-out', target_file,
'-prefix', prefix,
'-mpi_np', str(n_processes)]
if settings:
for s in settings.split():
s = s.strip()
if s:
cmd.append(s)
subprocess.call(cmd, stdout=subprocess.PIPE, bufsize=1)
def main(input_dir, output_dir, n_processes, settings):
if not os.path.exists(output_dir):
os.mkdir(output_dir)
mol2_in_files = get_mol2_files(input_dir)
mol2_out_files = [os.path.join(output_dir, os.path.basename(mol2))
for mol2 in mol2_in_files]
n_processes = get_num_cpus(n_processes)
for i, j in zip(mol2_in_files, mol2_out_files):
run_omega(source_file=i,
target_file=j,
n_processes=n_processes,
settings=settings)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Wrapper running OpenEye OMEGA on one'
'\nor more database partitions.',
epilog="""Example:
python generate_conformers_omega.py\\
--input dbase_mol2\\
--output dbase_conformers/\\
--executable /.../omega2-2.5.1.4\\
--processes 0""",
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('-i', '--input',
type=str,
required=True,
help='Input directory with `.mol2`'
' and `.mol2.gz` files.')
parser.add_argument('-o', '--output',
type=str,
required=True,
help='Directory for writing the output files.')
parser.add_argument('--executable',
type=str,
required=True,
help="""(Required.) The path or command for running
OpenEye OMEGA2 on your system.""")
parser.add_argument('--settings',
type=str,
default='-maxconfs 200 -warts false -progress percent',
help='(Optional.) OMEGA settings to use.')
parser.add_argument('--processes',
type=int,
default=1,
help='(Optional, default: `1`.) Number of processes to'
' run in parallel.'
'\nIf processes > 0, the specified number of CPUs'
'\nwill be used.'
'\nIf processes = 0, all available CPUs will'
'\nbe used.'
'\nIf processes = -1, all available CPUs'
'\nminus `processes` will be used.')
parser.add_argument('-v', '--version', action='version', version='v. 1.0')
args = parser.parse_args()
EXECUTABLE = args.executable
main(input_dir=args.input,
output_dir=args.output,
n_processes=args.processes,
settings=args.settings)
| StarcoderdataPython |
1797299 | <filename>example/dt/7.preprocessor/docker/build/KETI_Preprocessor.py<gh_stars>0
from pyspark import SparkContext, SparkConf, SparkFiles
from pyspark.streaming import StreamingContext
from pyspark.streaming.kafka import KafkaUtils
from msgParser import *
import msgParser
import json
import happybase
import sys
import os
import pika
import time
from influxdb import InfluxDBClient
SPARK_PERIOD_SEC = int(os.environ["SPARK_PERIOD_SEC"]) # 5
ZKQUORUM = os.environ['ZKQUORUM'] #"zk-cs.datacenter.svc.cluster.local:2181"
QUEUE_NAME = os.environ["QUEUE_NAME"]
QUEUE_TOPIC = os.environ["QUEUE_TOPIC"]
RABITMQ = os.environ["RABITMQ"]
sensorList_table = "_SensorList"
data_table = "_Data"
version_table = "_Version"
INFLUXDB = os.environ["INFLUXDB_SERVICE"]
def saveInfluxDBData(data_dict) :
client = InfluxDBClient(INFLUXDB, 8086)
try:
client.create_database('mydb')
except Exception as e:
print(e)
pass
try:
client.switch_database('mydb')
except Exception as e:
print(e)
pass
data_dict = json.loads(data_dict)
CraneFullName = data_dict['CraneFullName']
input_value = []
for key , value in data_dict.items() :
dict_put = { 'measurement' : CraneFullName + data_table,
'tags' : { 'SensorName' : key },
'fields' : { 'Value' : value }
}
input_value.append(dict_put)
client.write_points(input_value)
def streaming_set() :
sc = SparkContext(appName="PythonStreamingPreprocessing")
ssc = StreamingContext(sc, SPARK_PERIOD_SEC) # 1 second window
zkQuorum = ZKQUORUM
#Dict of (topic_name -> numPartitions) to consume. Each partition is consumed in its own thread.
topics = {'http': 1, 'mqtt' : 1, 'coap' : 1}
stream = KafkaUtils.createStream(ssc, zkQuorum, "raw-event-streaming-consumer", topics, {"auto.offset.reset": "largest"})
return ssc, stream
def msg_parse(data) :
data_list = data.split("|")
CraneFullName = data_list[0]
Time = data_list[1]
OBD_DataList = data_list[2:-1]
Crane_Data = data_list[-1]
getDataFromInfluxDB(INFLUXDB, CraneFullName)
OBD_II_data_dict = {}
sensor_data_dict = {}
if Crane_Data == None:
pass
else:
sensor_data_encoded_hex = Crane_Data
sensor_data_dict = sensor_data_parser(sensor_data_encoded_hex)
if sensor_data_dict == 'ignore' :
error_dict = {}
error_dict["FormatError"] = "Crane"
error_dict["CraneFullName"] = CraneFullName
jsonString = makeJson(error_dict)
return jsonString
data_dict = make_dict(Time, CraneFullName, OBD_II_data_dict , sensor_data_dict)
jsonString = makeJson(data_dict)
return jsonString
credentials = pika.PlainCredentials('dtuser01', '<PASSWORD>')
params = pika.ConnectionParameters(RABITMQ, 5672, '/', credentials)
#queue_connection = pika.BlockingConnection(params)
#channel = queue_connection.channel()
#channel.queue_declare(queue=QUEUE_NAME)
def transfer_list(json_list) :
global channel
global queue_connection
for json_data in json_list:
check_dict = json.loads(json_data)
if not "FormatError" in check_dict:
try:
channel.basic_publish(exchange='',routing_key=QUEUE_TOPIC, body=json_data)
except Exception as e:
print("AMQP publish Exception.." +str(e))
print("recreate connection...")
print("JsonData : "+str(json_data))
queue_connection = pika.BlockingConnection(params)
channel = queue_connection.channel()
channel.queue_declare(queue=QUEUE_NAME)
channel.basic_publish(exchange='',routing_key=QUEUE_TOPIC, body=json_data)
saveInfluxDBData(json_data)
else:
print("ERROR")
def parse_function(rdd) :
json_format_rdd = rdd.map(lambda data : msg_parse(data))
json_format_list = json_format_rdd.take(int(json_format_rdd.count()))
if len(json_format_list) != 0:
transfer_list(json_format_list)
def main() :
ssc, stream = streaming_set()
raw_data = stream.map(lambda value: value[1])
raw_data.foreachRDD(lambda rdd : parse_function(rdd) )
ssc.start()
ssc.awaitTermination()
if __name__ == "__main__":
main()
# test_msg = "SHINHAN_Crane_1|2020-11-06 11:09:20|H 135.2000.0-07.6000.0364.9000.001030101999.9 99.9 99.9 99.9 99.9 99.9 99.9 99.9 99.9 99.9 99.9 99.9 99.9 99.9 99.9 99.9 99.9 99.9 99.9 99.9 99.9 99.9 99.9000.000000000.00000000000000000010000000000"
# test_msg = "SHINHAN_Crane_1|3|2019-12-19 10:38:14|+287.724852+20.527328+22.54130544.3022.4277.4003266909840113017505822271-07.89-2.28+1.61+0.00-07.27+0.4001112209N"
# jsonString = msg_parse(test_msg)
# saveInfluxDBData(jsonString)
| StarcoderdataPython |
38636 | """
A Python module to facilitate JIT-compiled CPU-GPU agnostic compute kernels.
Kernel libraries are collections of functions written in C code that can be
compiled for CPU execution using a normal C compiler via the CFFI module, or
for GPU execution using a CUDA or ROCm compiler via cupy.
"""
from . import library
from . import parse_api
from . import system
| StarcoderdataPython |
19332 | <gh_stars>0
"""Build rules to create C++ code from an Antlr4 grammar."""
def antlr4_cc_lexer(name, src, namespaces = None, imports = None, deps = None, lib_import = None):
"""Generates the C++ source corresponding to an antlr4 lexer definition.
Args:
name: The name of the package to use for the cc_library.
src: The antlr4 g4 file containing the lexer rules.
namespaces: The namespace used by the generated files. Uses an array to
support nested namespaces. Defaults to [name].
imports: A list of antlr4 source imports to use when building the lexer.
deps: Dependencies for the generated code.
lib_import: Optional target for importing grammar and token files.
"""
namespaces = namespaces or [name]
imports = imports or []
deps = deps or []
if not src.endswith(".g4"):
fail("Grammar must end with .g4", "src")
if (any([not imp.endswith(".g4") for imp in imports])):
fail("Imported files must be Antlr4 grammar ending with .g4", "imports")
file_prefix = src[:-3]
base_file_prefix = _strip_end(file_prefix, "Lexer")
out_files = [
"%sLexer.h" % base_file_prefix,
"%sLexer.cpp" % base_file_prefix,
]
native.java_binary(
name = "antlr_tool",
jvm_flags = ["-Xmx256m"],
main_class = "org.antlr.v4.Tool",
runtime_deps = ["@maven//:org_antlr_antlr4_4_7_1"],
)
command = ";\n".join([
# Use the first namespace, we'll add the others afterwards.
_make_tool_invocation_command(namespaces[0], lib_import),
_make_namespace_adjustment_command(namespaces, out_files),
])
native.genrule(
name = name + "_source",
srcs = [src] + imports,
outs = out_files,
cmd = command,
heuristic_label_expansion = 0,
tools = ["antlr_tool"],
)
native.cc_library(
name = name,
srcs = [f for f in out_files if f.endswith(".cpp")],
hdrs = [f for f in out_files if f.endswith(".h")],
deps = ["@antlr_cc_runtime//:antlr4_runtime"] + deps,
copts = [
"-fexceptions",
],
features = ["-use_header_modules"], # Incompatible with -fexceptions.
)
def antlr4_cc_parser(
name,
src,
namespaces = None,
token_vocab = None,
imports = None,
listener = True,
visitor = False,
deps = None,
lib_import = None):
"""Generates the C++ source corresponding to an antlr4 parser definition.
Args:
name: The name of the package to use for the cc_library.
src: The antlr4 g4 file containing the parser rules.
namespaces: The namespace used by the generated files. Uses an array to
support nested namespaces. Defaults to [name].
token_vocab: The antlr g4 file containing the lexer tokens.
imports: A list of antlr4 source imports to use when building the parser.
listener: Whether or not to include listener generated files.
visitor: Whether or not to include visitor generated files.
deps: Dependencies for the generated code.
lib_import: Optional target for importing grammar and token files.
"""
suffixes = ()
if listener:
suffixes += (
"%sBaseListener.cpp",
"%sListener.cpp",
"%sBaseListener.h",
"%sListener.h",
)
if visitor:
suffixes += (
"%sBaseVisitor.cpp",
"%sVisitor.cpp",
"%sBaseVisitor.h",
"%sVisitor.h",
)
namespaces = namespaces or [name]
imports = imports or []
deps = deps or []
if not src.endswith(".g4"):
fail("Grammar must end with .g4", "src")
if token_vocab != None and not token_vocab.endswith(".g4"):
fail("Token Vocabulary must end with .g4", "token_vocab")
if (any([not imp.endswith(".g4") for imp in imports])):
fail("Imported files must be Antlr4 grammar ending with .g4", "imports")
file_prefix = src[:-3]
base_file_prefix = _strip_end(file_prefix, "Parser")
out_files = [
"%sParser.h" % base_file_prefix,
"%sParser.cpp" % base_file_prefix,
] + _make_outs(file_prefix, suffixes)
if token_vocab:
imports.append(token_vocab)
command = ";\n".join([
# Use the first namespace, we'll add the others afterwardsm thi .
_make_tool_invocation_command(namespaces[0], lib_import, listener, visitor),
_make_namespace_adjustment_command(namespaces, out_files),
])
native.genrule(
name = name + "_source",
srcs = [src] + imports,
outs = out_files,
cmd = command,
heuristic_label_expansion = 0,
tools = [
":antlr_tool",
],
)
native.cc_library(
name = name,
srcs = [f for f in out_files if f.endswith(".cpp")],
hdrs = [f for f in out_files if f.endswith(".h")],
deps = ["@antlr_cc_runtime//:antlr4_runtime"] + deps,
copts = [
"-fexceptions",
# FIXME: antlr generates broken C++ code that attempts to construct
# a std::string from nullptr. It's not clear whether the relevant
# constructs are reachable.
"-Wno-nonnull",
],
features = ["-use_header_modules"], # Incompatible with -fexceptions.
)
def _make_outs(file_prefix, suffixes):
return [file_suffix % file_prefix for file_suffix in suffixes]
def _strip_end(text, suffix):
if not text.endswith(suffix):
return text
return text[:len(text) - len(suffix)]
def _to_c_macro_name(filename):
# Convert the filenames to a format suitable for C preprocessor definitions.
char_list = [filename[i].upper() for i in range(len(filename))]
return "ANTLR4_GEN_" + "".join(
[a if (("A" <= a) and (a <= "Z")) else "_" for a in char_list],
)
def _make_tool_invocation_command(package, lib_import, listener = False, visitor = False):
return "$(location :antlr_tool) " + \
"$(SRCS)" + \
(" -visitor" if visitor else " -no-visitor") + \
(" -listener" if listener else " -no-listener") + \
(" -lib $$(dirname $(location " + lib_import + "))" if lib_import else "") + \
" -Dlanguage=Cpp" + \
" -package " + package + \
" -o $(@D)" + \
" -Xexact-output-dir"
def _make_namespace_adjustment_command(namespaces, out_files):
if len(namespaces) == 1:
return "true"
commands = []
extra_header_namespaces = "\\\n".join(["namespace %s {" % namespace for namespace in namespaces[1:]])
for filepath in out_files:
if filepath.endswith(".h"):
commands.append("sed -i '/namespace %s {/ a%s' $(@D)/%s" % (namespaces[0], extra_header_namespaces, filepath))
for namespace in namespaces[1:]:
commands.append("sed -i '/} \/\/ namespace %s/i} \/\/ namespace %s' $(@D)/%s" % (namespaces[0], namespace, filepath))
else:
commands.append("sed -i 's/using namespace %s;/using namespace %s;/' $(@D)/%s" % (namespaces[0], "::".join(namespaces), filepath))
return ";\n".join(commands)
| StarcoderdataPython |
1643065 | <gh_stars>1-10
from tkinter import *
from tkinter.colorchooser import askcolor as askcolour
def askBoxColour(focus):
focus.chosenBoxColour = askcolour()
stringVar = StringVar(focus.chosenBoxColour[1])
focus.selectThemeBoxColourBox.set(stringVar)
| StarcoderdataPython |
3378115 | <filename>access/tests/test_euclidean.py
import sys
sys.path.append('../..')
import math
import unittest
import numpy as np
import pandas as pd
import geopandas as gpd
from access import access, weights
import util as tu
class TestEuclidean(unittest.TestCase):
def setUp(self):
demand_data = pd.DataFrame({'id':[0], 'x':[0], 'y':[0], 'value':[1]})
demand_grid = gpd.GeoDataFrame(demand_data, geometry = gpd.points_from_xy(demand_data.x,
demand_data.y))
demand_grid['geometry'] = demand_grid.buffer(.5)
supply_data = pd.DataFrame({'id':[1], 'x':[0], 'y':[1], 'value':[1]})
supply_grid = gpd.GeoDataFrame(supply_data, geometry = gpd.points_from_xy(supply_data.x,
supply_data.y))
supply_grid['geometry'] = supply_grid.buffer(.5)
cost_matrix = pd.DataFrame({'origin': [0],
'dest' : [1],
'cost' : [1]})
self.model = access(demand_df = demand_grid, demand_index = 'id',
demand_value = 'value',
supply_df = supply_grid, supply_index = 'id',
supply_value = 'value',
cost_df = cost_matrix, cost_origin = 'origin',
cost_dest = 'dest',
cost_name = 'cost',
neighbor_cost_df = cost_matrix, neighbor_cost_origin = 'origin',
neighbor_cost_dest = 'dest', neighbor_cost_name = 'cost')
def test_euclidean_point_to_point(self):
self.model.create_euclidean_distance(name = 'euclidian', threshold = 2,
centroid_o = True, centroid_d = True)
actual = self.model.cost_df['euclidian'][0]
self.assertAlmostEqual(actual, 1)
def test_euclidean_point_to_poly(self):
self.model.create_euclidean_distance(name = 'euclidian', threshold = 2,
centroid_o = True, centroid_d = False)
actual = self.model.cost_df['euclidian'][0]
self.assertAlmostEqual(actual, .5)
def test_euclidean_poly_to_poly(self):
self.model.create_euclidean_distance(name = 'euclidian', threshold = 2,
centroid_o = False, centroid_d = False)
actual = self.model.cost_df['euclidian'][0]
self.assertAlmostEqual(actual, 0)
def test_euclidean_without_geopandas_demand_dataframe_raises_TypeError(self):
with self.assertRaises(TypeError):
self.model.demand_df = self.model.demand_df[['x','y','value']]
self.model.create_euclidean_distance()
def test_euclidean_without_geopandas_supply_dataframe_raises_TypeError(self):
with self.assertRaises(TypeError):
self.model.supply_df = self.model.supply_df[['x','y','value']]
self.model.create_euclidean_distance()
def test_euclidean_sets_euclidean_as_default_if_no_default_exists(self):
delattr(self.model, '_default_cost')
self.model.create_euclidean_distance()
actual = hasattr(self.model, '_default_cost')
self.assertEquals(actual, True)
class TestEuclideanNeighbors(unittest.TestCase):
def setUp(self):
demand_data = pd.DataFrame({'id' :[0, 1],
'x' :[0, 0],
'y' :[0, 1],
'value':[1, 1]})
demand_grid = gpd.GeoDataFrame(demand_data, geometry = gpd.points_from_xy(demand_data.x,
demand_data.y))
demand_grid['geometry'] = demand_grid.buffer(.5)
supply_data = pd.DataFrame({'id':[1], 'x':[0], 'y':[1], 'value':[1]})
supply_grid = gpd.GeoDataFrame(supply_data, geometry = gpd.points_from_xy(supply_data.x,
supply_data.y))
cost_matrix = pd.DataFrame({'origin': [0, 0, 1, 1],
'dest' : [1, 0, 0, 1],
'cost' : [1, 0, 1, 0]})
self.model = access(demand_df = demand_grid, demand_index = 'id',
demand_value = 'value',
supply_df = supply_grid, supply_index = 'id',
supply_value = 'value',
cost_df = cost_matrix, cost_origin = 'origin',
cost_dest = 'dest',
cost_name = 'cost',
neighbor_cost_df = cost_matrix, neighbor_cost_origin = 'origin',
neighbor_cost_dest = 'dest', neighbor_cost_name = 'cost')
def test_euclidean_neighbors_centroids(self):
self.model.create_euclidean_distance_neighbors(name = 'euclidian', threshold=2,
centroid = True)
actual1 = self.model.neighbor_cost_df['euclidian'][0]
actual2 = self.model.neighbor_cost_df['euclidian'][2]
self.assertAlmostEqual(actual1, 1)
self.assertAlmostEqual(actual2, 1)
def test_euclidean_neighbors_poly(self):
self.model.create_euclidean_distance_neighbors(name = 'euclidian', threshold=2,
centroid = False)
actual1 = self.model.neighbor_cost_df['euclidian'][0]
actual2 = self.model.neighbor_cost_df['euclidian'][2]
self.assertAlmostEqual(actual1, 0)
self.assertAlmostEqual(actual2, 0)
def test_euclidean_neighbors_without_geopandas_demand_dataframe_raises_TypeError(self):
with self.assertRaises(TypeError):
self.model.demand_df = self.model.demand_df[['x','y','value']]
self.model.create_euclidean_distance_neighbors()
def test_euclidean_neighbors_sets_euclidean_as_default_if_no_default_exists(self):
delattr(self.model, '_neighbor_default_cost')
self.model.create_euclidean_distance_neighbors()
actual = hasattr(self.model, '_neighbor_default_cost')
self.assertEquals(actual, True)
| StarcoderdataPython |
1626384 | # -*- coding: utf-8 -*-
"""Top-level package for vmlib."""
__author__ = """<NAME>"""
__email__ = '<EMAIL>'
__version__ = '0.3.2'
__license__ = 'MIT'
__docformat__ = 'reStructuredText'
__all__ = ['decorators', 'dirs', 'em', 'ert', 'gis', 'hydrology', 'io',
'math', 'pdf', 'plot', 'project', 'seis', 'stats', 'survey',
'units', 'utils']
# Standard libtrary imports
import logging
import sys
import warnings
# Third party imports
# Local imports
from .decorators import *
from .custom_exceptions import *
from . import dirs
from . import em
from . import ert
from . import gis
from . import hydrology
from . import io
from . import math
from . import pdf
from . import plot
from . import project
from . import seis
from . import stats
from . import survey
from . import units
from . import utils
# Set library-wide shared parameters
logging.basicConfig(
level=logging.INFO,
format='%(levelname)s - %(asctime)s - %(message)s',
datefmt='%H:%M:%S'
)
sys.tracebacklimit = 3
warnings.filterwarnings('ignore', category=RuntimeWarning)
warnings.filterwarnings('ignore', category=FutureWarning)
| StarcoderdataPython |
146960 | <filename>zipline/data/fx/__init__.py<gh_stars>1-10
from .base import FXRateReader, DEFAULT_FX_RATE
from .in_memory import InMemoryFXRateReader
from .exploding import ExplodingFXRateReader
from .hdf5 import HDF5FXRateReader, HDF5FXRateWriter
__all__ = [
'DEFAULT_FX_RATE',
'ExplodingFXRateReader',
'FXRateReader',
'HDF5FXRateReader',
'HDF5FXRateWriter',
'InMemoryFXRateReader',
] | StarcoderdataPython |
4834651 | from django.urls import path
from . import views
from .views import MyTokenObtainPairView, RegisterView, VerifyEmail
from rest_framework_simplejwt.views import (
TokenObtainPairView,
TokenRefreshView,
)
urlpatterns = [
path('', views.getRoutes),
path('token/', MyTokenObtainPairView.as_view(), name='token_obtain_pair'),
path('token/refresh/', TokenRefreshView.as_view(), name='token_refresh'),
path('register/', RegisterView.as_view(), name='user_register'),
path('email-verify/', VerifyEmail.as_view(), name='email-verify'),
]
| StarcoderdataPython |
155787 | <reponame>jadkik/emailipy
import emailipy
src = {}
for extension in ["html", "css"]:
with open("tests/test.{}".format(extension), "r") as f:
src[extension] = f.read()
def header(text):
print("\n", text, "\n", "=" * len(text))
header("Original CSS")
print(src["css"])
header("Original HTML")
print(src["html"])
header("Results of CSS Linting")
for violation in emailipy.lint_css(src["css"]):
print(violation)
header("HTML w/ inlined CSS")
print(emailipy.inline_css(*list(src.values()))) | StarcoderdataPython |
116570 | <reponame>vonshednob/metaindex
import argparse
import sys
from metaindex import configuration
from metaindex import stores
from metaindex import indexer
from metaindex import indexers
from metaindex import logger
from metaindex.cache import Cache
from metaindex.find import find
try:
from metaindex.fuse import metaindex_fs
except ImportError:
metaindex_fs = None
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('-c', '--config',
default=None,
type=str,
help="The configuration file to use. Defaults "
f"to {configuration.CONFIGFILE}.")
parser.add_argument('-l', '--log-level',
default='warning',
choices=['debug', 'info', 'warning', 'error', 'fatal'],
help="The level of logging. Defaults to %(default)s.")
parser.add_argument('--log-file',
default=None,
type=str,
help="Write the log to this file instead of stderr.")
parser.add_argument('--list',
action="store_true",
default=False,
help="List all available file indexers")
subparsers = parser.add_subparsers(dest='command')
indexparser = subparsers.add_parser('index')
indexparser.add_argument('-r', '--recursive',
default=False,
action='store_true',
help='Go through all subdirectories of any paths')
indexparser.add_argument('-f', '--force',
default=False,
action="store_true",
help="Enforce indexing, even if the files on disk "
"have not changed.")
indexparser.add_argument('-m', '--flush-missing',
default=False,
action="store_true",
help="Remove files from cache that can no longer be "
"found on disk.")
indexparser.add_argument('-i', '--index',
nargs='*',
type=str,
help="Path(s) to index. If you provide none, all "
"cached items will be refreshed. If you pass "
"- the files will be read from stdin, one "
"file per line.")
indexparser.add_argument('-p', '--processes',
type=int,
default=None,
help="Number of indexers to run at the same time. "
"Defaults to the number of CPUs that are available.")
indexparser.add_argument('-C', '--clear',
default=False,
action='store_true',
help="Remove all entries from the cache")
findparser = subparsers.add_parser('find')
findparser.add_argument('-t', '--tags',
nargs='*',
help="Print these metadata tags per file, if they "
"are set. If you provide -t, but no tags, all "
"will be shown.")
findparser.add_argument('-f', '--force',
default=False,
action='store_true',
help="When creating symlinks, accept a non-empty "
"directory if it only contains symbolic links.")
findparser.add_argument('-l', '--link',
type=str,
default=None,
help="Create symbolic links to all files inside "
"the given directory.")
findparser.add_argument('-k', '--keep',
default=False,
action='store_true',
help="Together with --force: do not delete existing "
"links but extend with the new search result.")
findparser.add_argument('query',
nargs='*',
help="The search query. If the query is - it will "
"be read from stdin.")
if metaindex_fs is not None:
fsparser = subparsers.add_parser('fs')
fsparser.add_argument('action',
choices=('mount', 'unmount', 'umount'),
help="The command to control the filesystem")
fsparser.add_argument('mountpoint',
type=str,
help="Where to mount the metaindex filesystem.")
result = parser.parse_args()
if result.list:
pass
elif result.command is None:
parser.print_help()
return result
def run():
args = parse_args()
logger.setup(level=args.log_level.upper(), filename=args.log_file)
config = configuration.load(args.config)
if args.list:
for name in sorted(indexer._registered_indexers.keys()):
print(name)
return 0
if args.command == "index":
cache = Cache(config)
if args.clear:
cache.clear()
if args.flush_missing:
cache.cleanup()
index = args.index
if index == ['-']:
index = [file_ for file_ in sys.stdin.read().split("\n") if len(file_) > 0]
elif index == []:
index = None
if args.force:
cache.expire_metadata(index)
cache.refresh(index, args.recursive, args.processes)
return 0
if args.command == "find":
return find(config, args)
if args.command == 'fs' and metaindex_fs is not None:
return metaindex_fs(config, args)
return -1
| StarcoderdataPython |
4839333 | <reponame>mayank0926/pidnn-double-pendulum
import torch
import yaml
import numpy as np
import pandas as pd
import os
import sys
from dp_datagen import double_pendulum_data
from dp_pidnn import pidnn_driver
from dp_dataloader import testloader
from dp_ff import ff_driver
if len(sys.argv) > 2:
config_filename = sys.argv[2]
else:
config_filename = "dp_config.yaml"
with open(config_filename, "r") as f:
all_configs = yaml.safe_load(f)
common_config = all_configs['COMMON'].copy()
# Filling in models from model templates
for instance in common_config['MODEL_CONFIGS']:
template_name = instance[:instance.rfind('_')]
training_points = int(instance[(instance.rfind('_')+1):])
template_config = all_configs[template_name].copy()
template_config['num_datadriven'] = training_points
# if template_config['num_collocation'] == -1: template_config['num_collocation'] = 10 * training_points
template_config['model_name'] = template_name.lower() + '_' + str(training_points)
all_configs[template_name + '_' + str(training_points)] = template_config
def generate_folders():
datadir = './Data'
for filename in common_config['DATA_CONFIGS']:
path = os.path.join(datadir, filename.lower())
try:
os.makedirs(path, exist_ok = True)
print("Successfully created '%s'" % (datadir+'/'+filename.lower()))
except OSError as error:
print("'%s' can not be created" % (datadir+'/'+filename.lower()))
modeldir = './Models'
for seed in common_config['SEEDS']:
seeddir = f'SEED_{seed}'
for noise in common_config['NOISE_CONFIGS']:
noisedir = f'Noise_{int(100*noise)}'
for filename in common_config['DATA_CONFIGS']:
path = os.path.join(modeldir, seeddir + '/' + noisedir + '/' + filename.lower())
try:
os.makedirs(path, exist_ok = True)
print("Successfully created '%s'" % (modeldir + '/' + seeddir + '/' + noisedir + '/' + filename.lower()))
for modelname in common_config['ALL_MODEL_CONFIGS']:
modelpath = os.path.join(path, modelname.lower())
os.makedirs(modelpath, exist_ok = True)
print("Successfully created '%s'" % (path + '/' + modelname.lower()))
except OSError as error:
print("'%s' can not be created" % (modeldir + '/' + noisedir + '/' + filename.lower()))
print('Successfully created all directories!')
def generate_all_datasets():
for active_data_config_name in common_config['DATA_CONFIGS']:
active_data_config = all_configs[active_data_config_name].copy()
active_data_config.update(common_config)
config = active_data_config
config['datafile'] = config['TRAINFILE']
config['theta_range'] = np.linspace(config['TRAIN_THETA_START']*np.pi/180.0, config['TRAIN_THETA_END']*np.pi/180.0, num = config['TRAIN_THETA_VALUES'], dtype=np.float32)
config['t_range'] = np.arange(start=0.0, stop = config['TIMESTEP']*config['TRAIN_ITERATIONS'], step = config['TIMESTEP'])
if config['DATASET_CACHING'] and os.path.isfile(config['datadir']+config['datafile']):
print('Skipping ' + config['datadir'] + config['datafile'])
else:
double_pendulum_data(config)
config['datafile'] = config['TESTFILE']
new_theta_range = []
for i in range(len(config['theta_range'])-1):
new_theta_range.append(np.random.uniform(low=config['theta_range'][i], high=config['theta_range'][i+1], size=(config['TESTSET_MULTIPLIER'],)))
config['theta_range'] = np.array(new_theta_range).reshape((-1,))
if config['DATASET_CACHING'] and os.path.isfile(config['datadir']+config['datafile']):
print('Skipping ' + config['datadir'] + config['datafile'])
else:
double_pendulum_data(config)
# generate_all_datasets()
def train_all_models():
for seed in common_config['SEEDS']:
for noise in common_config['NOISE_CONFIGS']:
for active_data_config_name in common_config['DATA_CONFIGS']:
active_data_config = all_configs[active_data_config_name].copy()
active_data_config.update(common_config)
for active_model_config_name in common_config['MODEL_CONFIGS']:
if common_config['MODEL_CACHING'] and os.path.isfile(f'./Models/SEED_{seed}/Noise_{int(100*noise)}/{active_data_config_name.lower()}/{active_model_config_name.lower()}.pt'):
print(f'======================= Skipping ./Models/SEED_{seed}/Noise_{int(100*noise)}/{active_data_config_name.lower()}/{active_model_config_name.lower()}.pt =======================')
continue
active_model_config = all_configs[active_model_config_name].copy()
active_model_config.update(active_data_config)
config = active_model_config
config['datafile'] = config['TRAINFILE']
config['noise'] = noise
config['seed'] = seed
config['modeldir'] = 'Models/' + f'SEED_{seed}/' + f'Noise_{int(100*noise)}/' + active_data_config_name.lower() + '/'
print(f'======================={active_data_config_name}, {active_model_config_name}, Noise {int(100*noise)}%=======================')
if config['take_differential_points']:
pidnn_driver(config)
else:
ff_driver(config)
# train_all_models()
def test_all_models():
dicts_testdata = []
for noise in common_config['NOISE_CONFIGS']:
for active_data_config_name in common_config['DATA_CONFIGS']:
active_data_config = all_configs[active_data_config_name].copy()
active_data_config.update(common_config)
for active_model_config_name in common_config['MODEL_CONFIGS']:
active_model_config = all_configs[active_model_config_name].copy()
active_model_config.update(active_data_config)
config = active_model_config
seed_results = []
for seed in common_config['SEEDS']:
model = torch.load(f'Models/SEED_{seed}/Noise_' + f'{int(100*noise)}/{active_data_config_name.lower()}/' + active_model_config_name.lower() + '.pt')
model.eval()
seed_results.append(testloader(config, config['datadir'] + config['TESTFILE'], model).item())
seed_results = np.array(seed_results)
result = (noise,
active_data_config_name,
active_model_config_name,
np.mean(seed_results),
np.std(seed_results),
np.max(seed_results),
np.min(seed_results),
np.std(-np.log(seed_results))
)
print(result)
dicts_testdata.append(result)
df_testdata = pd.DataFrame(dicts_testdata,\
columns=['NOISE', 'DATASET', 'MODEL', 'ERR_AVG', 'ERR_STD', 'ERR_MAX', 'ERR_MIN', 'LOG_STD'])
df_testdata.to_csv(f'Inferences/inferences.csv')
# test_all_models()
if __name__=="__main__":
command = sys.argv[1]
if command == 'folders':
generate_folders()
elif command == 'datasets':
generate_all_datasets()
elif command == 'train':
train_all_models()
elif command == 'test':
test_all_models()
else:
print('Please input valid keyword')
sys.exit(1)
| StarcoderdataPython |
3254522 | <gh_stars>1-10
# adapted from http://www.pygame.org/wiki/OBJFileLoader
import os
import cv2
import numpy as np
from visnav.algo import tools
def MTL(filename):
contents = {}
mtl = None
for line in open(filename, "r"):
if line.startswith('#'): continue
values = line.split()
if not values: continue
if values[0] == 'newmtl':
mtl = contents[values[1]] = {}
elif mtl is None:
raise ValueError("mtl file doesn't start with newmtl stmt")
elif values[0] == 'map_Kd':
mtl[values[0]] = values[1]
## load the texture referred to by this declaration
# surf = pygame.image.load(mtl['map_Kd'])
# image = pygame.image.tostring(surf, 'RGBA', 1)
# ix, iy = surf.get_rect().size
# texid = mtl['texture_Kd'] = glGenTextures(1)
# glBindTexture(GL_TEXTURE_2D, texid)
# glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER,
# GL_LINEAR)
# glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER,
# GL_LINEAR)
# glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, ix, iy, 0, GL_RGBA,
# GL_UNSIGNED_BYTE, image)
else:
mtl[values[0]] = list(map(float, values[1:]))
return contents
class ShapeModel:
def __init__(self, fname=None, data=None):
self.vertices = None
self.normals = None
self.texcoords = None
self.faces = None
self.texfile = None
self._tex = None
if fname is not None:
self.from_file(fname)
elif data is not None:
self.from_dict(data)
def from_file(self, fname, swapyz=False):
"""Loads a Wavefront OBJ file. """
vertices = []
normals = []
texcoords = []
faces = []
self.texfile = None
dir = os.path.abspath(os.path.dirname(fname))
#material = None
for line in open(fname, "r"):
if line.startswith('#'): continue
values = line.split()
if not values: continue
if values[0] == 'v':
v = list(map(float, values[1:4]))
if swapyz:
v = v[0], v[2], v[1]
vertices.append(v)
elif values[0] == 'vn':
v = list(map(float, values[1:4]))
if swapyz:
v = v[0], v[2], v[1]
normals.append(v)
elif values[0] == 'vt':
txc = list(map(float, values[1:3]))
assert len(txc) == 2, 'wrong length texture coordinates'
texcoords.append(txc)
elif values[0] in ('usemtl', 'usemat'):
pass
# material = values[1]
elif values[0] == 'mtllib':
mtl = MTL(os.path.join(dir, values[1]))
material = tuple(mtl.values())[0]
if 'map_Kd' in material:
self.texfile = os.path.join(dir, material['map_Kd'])
elif values[0] == 'f':
fvert = []
ftext = []
# norm = []
for v in values[1:]:
w = v.split('/')
fvert.append(int(w[0])-1)
if len(w) >= 2 and len(w[1]) > 0:
ftext.append(int(w[1])-1)
# if len(w) >= 3 and len(w[2]) > 0:
# norm.append(int(w[2]))
# else:
# norm.append(0)
#self.faces.append((face, norms, texcoords, material))
if len(fvert) == 3:
assert len(ftext) == 0 or len(fvert) == len(ftext), 'Some tex coords missing!'
# normals are calculated for each face => same indices as faces
faces.append((fvert, len(faces), ftext)) # v idx, n idx, t idx
# self.triangles.append(tuple(face))
else:
assert False, 'Not a triangle!'
nf = len(faces)
faces = ShapeModel._face_massage(faces)
self.faces = np.array(faces, dtype=np.uint32)
assert self.faces.shape == (nf*3, 3),\
'wrong shape "faces" array %s should be (nf*3, 3)' % (self.faces.shape,)
self.vertices = np.array(vertices, dtype=np.float32)
assert self.vertices.shape[1:] == (3,),\
'wrong shape "vertices" array %s should be (-1, 3)' % (self.vertices.shape,)
self.texcoords = np.array(texcoords, dtype=np.float32)
assert len(self.texcoords) == 0 or self.texcoords.shape[1:] == (2,),\
'wrong shape "texcoords" array %s should be (-1, 2)' % (self.texcoords.shape,)
self.recalc_norms()
assert self.normals.shape[1:] == (3,), \
'wrong shape "normals" array %s should be (-1, 3)' % (self.normals.shape,)
@staticmethod
def _face_massage(faces):
# (n faces, v&n&t, 3 x vertices) => (nf*3, v&n&t)
faces = [(vx, i, (txs or 0) and txs[j])
for i, (vxs, nrm, txs) in enumerate(faces)
for j, vx in enumerate(vxs)]
return faces
def from_dict(self, data):
self.faces = data['faces']
self.vertices = data['vertices']
self.normals = data.get('normals', [])
self.texcoords = data.get('texcoords', [])
self.texfile = data.get('texfile', None)
self.tex = data.get('tex', None)
# backwards compatibility
if not isinstance(self.faces, np.ndarray):
nf = len(self.faces)
self.faces = np.array(ShapeModel._face_massage(self.faces), dtype=np.uint32)
self.faces[:, 2] -= 1 # tx idxs started from 1
assert self.faces.shape == (nf * 3, 3),\
'wrong shape "faces" array %s should be (nf*3, 3)' % (self.faces.shape,)
self.vertices = np.array(self.vertices, dtype=np.float32)
assert self.vertices.shape[1:] == (3,),\
'wrong shape "vertices" array %s should be (-1, 3)' % (self.vertices.shape,)
self.texcoords = np.array(self.texcoords, dtype=np.float32)
assert self.texcoords.shape[1:] == (2,),\
'wrong shape "texcoords" array %s should be (-1, 2)' % (self.texcoords.shape,)
self.normals = np.array(self.normals, dtype=np.float32)
assert len(self.normals) == 0 or self.normals.shape[1:] == (3,), \
'wrong shape "normals" array %s should be (-1, 3)' % (self.normals.shape,)
if len(self.normals) == 0:
self.recalc_norms()
self.faces = self.faces.astype(np.uint32)
self.vertices = self.vertices.astype(np.float32)
self.texcoords = self.texcoords.astype(np.float32)
self.normals = self.normals.astype(np.float32)
def as_dict(self):
return {'faces': self.faces, 'vertices': self.vertices, 'normals': self.normals,
'texcoords': self.texcoords, 'texfile': self.texfile, 'tex': self.tex}
def recalc_norms(self):
"""
Recalculate normals so that each vertex of a face has the normal of the face. For optional smooth normals,
would need to average normals across the faces each unique vertex belongs to and set faces[:, 1] = faces[:, 0]
"""
# reshape faces to be (nf, 3v, v&n&t)
f, v = self.faces.reshape((-1, 3, 3)), self.vertices
v1, v2, v3 = v[f[:, 0, 0]], v[f[:, 1, 0]], v[f[:, 2, 0]]
n = np.cross(v2 - v1, v3 - v1)
self.normals = n / np.linalg.norm(n, axis=1).reshape((-1, 1))
def pack_all(self):
f, v, n, t = self.faces, self.vertices, self.normals, self.texcoords
t = t if len(t) else np.zeros((len(f), 2), dtype=np.float32)
return np.hstack((v[f[:, 0], :], n[f[:, 1], :], t[f[:, 2], :])).astype(np.float32).tobytes()
def pack_simple(self):
f, v = self.faces, self.vertices
return v[f[:, 0], :].astype(np.float32).tobytes()
def texture_to_vertex_map(self):
tx2vx = np.ones((len(self.texcoords),), dtype=np.int64) * -1
for v, n, t in self.faces:
tx2vx[t] = v
return tx2vx
def export_smooth_faces(self):
"""
compatible output for a moderngl ext obj
"""
assert False, 'not supported anymore'
# norms = np.zeros((len(self.vertices), 3))
# for f, n, t in self.faces:
# norms[f[0]] += self.normals[n]
# norms[f[1]] += self.normals[n]
# norms[f[2]] += self.normals[n]
# norms = norms / np.linalg.norm(norms, axis=1).reshape((-1, 1))
# faces = [(vx + 1, (txs or None) and txs[i]+1, vx + 1)
# for vxs, nrm, txs in self.faces
# for i, vx in enumerate(vxs)]
# return self.vertices, [(tx, ty, 0) for tx, ty in self.texcoords], norms, faces
def export_angular_faces(self):
"""
compatible output for a moderngl ext obj
"""
texcoords = np.hstack((self.texcoords, np.zeros((len(self.texcoords), 1), dtype=np.float32)))
return self.vertices, texcoords, self.normals, self.faces[:, (0, 2, 1)] + 1
def load_texture(self, normalize=True):
if self.tex is not None:
return self.tex
if self.texfile is None:
return None
self._tex = cv2.imread(self.texfile, cv2.IMREAD_GRAYSCALE).astype('f4')
if normalize:
self._tex /= np.max(self._tex) # normalize so that max relative albedo is 1
return self._tex
@property
def tex(self):
return self._tex
@tex.setter
def tex(self, new_tex):
self._tex = None if new_tex is None else new_tex.astype('f4')
| StarcoderdataPython |
52059 | import os
from SSHLibrary import SSHLibrary
from constants import (
HPC_IP,
HPC_USERNAME,
HPC_KEY_PATH,
HPC_HOME_PATH,
)
# The Service broker will use the SSHCommunication class
# to communicate with the HPC environment
# This is just a dummy implementation
# TODO: Improve the code and implement appropriate error handling
class SSHCommunication:
def __init__(self):
# print(f"SSH Communication Constructor:")
self.__ssh = SSHLibrary()
if not os.path.exists(HPC_KEY_PATH) or not os.path.isfile(HPC_KEY_PATH):
print(f"{HPC_KEY_PATH} file does not exist or is not a readable file!")
exit(1)
self.__connect_with_public_key(host=HPC_IP,
username=HPC_USERNAME,
keyfile=HPC_KEY_PATH)
self.home_path = HPC_HOME_PATH
# Example code to create a connection with a username and password
def __connect_login(self, host, username, password):
self.__connection_index = self.__ssh.open_connection(host=host)
self.__login = self.__ssh.login(username=username,
password=password)
def __connect_with_public_key(self, host, username, keyfile):
self.__connection_index = self.__ssh.open_connection(host=host)
self.__login = self.__ssh.login_with_public_key(username=username,
keyfile=keyfile,
allow_agent=True)
# print(f"Login: {self.__login}")
# TODO: Handle the output and return_code instead of just returning them
# Execute blocking commands
# Waiting for an output and return_code
def execute_blocking(self, command="ls -la", return_stdout=True, return_stderr=True, return_rc=True):
output, err, return_code = self.__ssh.execute_command(command=command,
return_stdout=return_stdout,
return_stderr=return_stderr,
return_rc=return_rc)
return output, err, return_code
# Execute non-blocking commands
# Does not return anything as expected
def execute_non_blocking(self, command="ls -la"):
self.__ssh.start_command(command)
# TODO: Improve the wrappers and set the defaults appropriately
# The next few functions are wrapper functions with simplified parameters
# We can abstract some parameters as constants to simplify the signature
def put_file(self, source, destination):
mode = "0744"
scp = "ON"
scp_preserve_times = True
self.__ssh.put_file(source=source,
destination=destination,
mode=mode,
scp=scp,
scp_preserve_times=scp_preserve_times)
def put_directory(self, source, destination, recursive=True):
mode = "0744"
scp = "ON"
scp_preserve_times = True
self.__ssh.put_directory(source=source,
destination=destination,
mode=mode,
recursive=recursive,
scp=scp,
scp_preserve_times=scp_preserve_times)
def get_file(self, source, destination):
scp = "ON"
scp_preserve_times = True
self.__ssh.get_file(source=source,
destination=destination,
scp=scp,
scp_preserve_times=scp_preserve_times)
def get_directory(self, source, destination, recursive=True):
scp = "ON"
scp_preserve_times = True
self.__ssh.get_directory(source=source,
destination=destination,
recursive=recursive,
scp=scp,
scp_preserve_times=scp_preserve_times)
| StarcoderdataPython |
123188 | from distutils.core import setup
setup(
name='gg_group_manager',
version='1.0.0',
description='AWS Greengrass Group Manager',
packages=[
'gg_manager',
'gg_manager.definitions',
'gg_manager.playbooks',
'gg_manager.utilities'
],
install_requires=[
'fire==0.1.3',
'boto3==1.9.98',
'botocore==1.12.98',
'schema==0.7.0',
'ansible==2.7.10'
],
entry_points={
'console_scripts': [
'gg-manager=gg_manager:main'
],
},
include_package_data=True
)
| StarcoderdataPython |
1752056 | <gh_stars>0
from spade.message import Message
from spade.behaviour import State
from agents import FactoryAgent
from .metadata import *
from copy import deepcopy
import random
from messages import *
from behaviours import WorkingState
MAX_TIMES = 4
def parseSets(string): # the format of the string [[[],[], ..], 'break', [[],[],...]]
string = string.replace(" ", "")
lists = string.split("break")
toReturn = []
for l in lists:
l = l.replace("'", "")
sigmas = l.split("]")
converted = []
for sigma in sigmas:
sigma = sigma.replace("[", "")
if( len(sigma)> 0 and sigma[0] == ","):
sigma = sigma[1:]
if(len(sigma)) > 0:
nums = sigma.split(",")
convertedSigma = []
for n in nums:
convertedSigma.append(int(n))
converted.append(convertedSigma)
toReturn.append(converted)
return toReturn
def removeDuplicats(elements):
toReturn = []
for el in elements:
if el not in toReturn:
toReturn.append(el)
return toReturn
class StateComputeConcession(State):
def __init__(self, agent):
super().__init__()
self.fAgent = agent
def computeRisk(self, my, others):
myUtility = float(self.fAgent.worst - self.fAgent.getMyCost(str(my),my))
if myUtility == 0.0:
return 1.0
#zwracamy najwieksze ryzyko
risk = 0.0
for o in others:
utility = float(self.fAgent.worst - self.fAgent.getMyCost(str(o),o))
temp = (myUtility - utility)/myUtility
if(temp > risk):
risk = temp
return risk
def linkMessages(self, allMessages, allMessagesLen):
keys = allMessages.keys()
toRet = [] # list of strings
for k in keys:
length = len(allMessages[k])
if (length != allMessagesLen[k]):
self.fAgent.logger.log_error("There is a problem with messages!!")
oneAgents = [None] * length
for m in allMessages[k]:
which = int(m.metadata["which"]) - 1
oneAgents[which] = m.body
msgAllBody = ""
for string in oneAgents:
msgAllBody = msgAllBody + string
toRet.append(msgAllBody)
# ... we should add messages in
return toRet
async def run(self):
self.fAgent.logger.log_info("Computing concession")
#1. we should propose something what is at least as good for our mates as the one whe proposed before
# we should propopse something that for one agent is better
for co in self.fAgent.activeCoworkers:
msg = SetsMessage(to=co, body=self.fAgent.currentSigma) # Instantiate the message
msg.set_metadata("performative", "request")
msg.set_metadata("language","list")
msg.body = str(self.fAgent.currentSigma)
await self.send(msg)
waitingCoworkers = deepcopy(self.fAgent.activeCoworkers)
matesPropositionsAll = dict((worker, []) for worker in self.fAgent.activeCoworkers)
matesPropositionsAllCount = dict((worker, -1) for worker in self.fAgent.activeCoworkers)
matesPropositions = []
counter = 0
while len(waitingCoworkers) > 0:
msg = await self.receive(timeout = 5)
if msg is not None:
if msg.metadata["performative"] == "inform" and msg.metadata["language"] == "list" and str(msg.sender) in waitingCoworkers :
sender = str(msg.sender)
if matesPropositionsAllCount[sender] == -1:
#first msg
matesPropositionsAllCount[sender] = int(msg.metadata["howMany"])
matesPropositionsAll[sender].append(msg)
if len(matesPropositionsAll[sender]) == matesPropositionsAllCount[sender]:
waitingCoworkers.remove(sender)
else:
self.fAgent.saveMessage(msg)
else:
counter = counter + 1
self.fAgent.logger.log_info("we have not received and msgs from 5s")
if counter > MAX_TIMES:
self.fAgent.logger.log_info(f"something is wrong with {len(waitingCoworkers)}")
for c in waitingCoworkers:
alarmMsg = WatchdogMessage(to = self.fAgent.manager, body = str(WorkingState.COMPLAINT)+" "+c)
await self.send(alarmMsg)
self.set_next_state(STATE_PROPOSE)
return
#we received All msgs!
allMsg = self.linkMessages(matesPropositionsAll, matesPropositionsAllCount)
for s in allMsg:
matesPropositions.append(parseSets(s))
#right not in matesPropositions we have got elements for each of active coworkers
#we should combine it so that we should check that an element which we got from one coworker is also in others and that
#en element is better at least for one agent
sigmas = []
numOfCoworkers = len(matesPropositions)
for i in range(len(matesPropositions)):
for sigma in matesPropositions[i][0]: #sigmas which are better
toAdd = True
for j in range(len(matesPropositions)):
if i == j or (i != j and sigma not in matesPropositions[j][0] and sigma not in matesPropositions[j][1]):
toAdd = False
break
if toAdd == True:
sigmas.append(sigma)
sigmas = removeDuplicats(sigmas)
#2. We should not propose anything that we proposed previously
for prev in self.fAgent.myProposals:
if prev in sigmas:
sigmas.remove(prev)
#3. we should change the risk
others = []
sigmasWithGoodRisk = []
for co in self.fAgent.activeCoworkers:
others.append(self.fAgent.matesProposals[co][len(self.fAgent.matesProposals[co]) - 1])
for s in sigmas:
myRisk = self.computeRisk(s,others)
found = False
for co in self.fAgent.activeCoworkers:
body = []
body.append(s)
body.append('break')
body.append(self.fAgent.matesProposals[co][len(self.fAgent.matesProposals[co]) - 1])
msg = RiskMessage(to=co, body=body)
msg.set_metadata("performative", "request")
msg.set_metadata("language", "list")
await self.send(msg)
received = False
newRisk = 0.0
counter = 0
while received == False:
resp = await self.receive(timeout = 10)
if resp is not None:
if resp.metadata["performative"] == "inform" and resp.metadata["language"] == "float" :
received = True
newRisk = float(resp.body)
else:
self.fAgent.saveMessage(resp)
else:
counter = counter + 1
if counter < MAX_TIMES:
await self.send(msg)
else:
self.fAgent.logger.log_error(f"something is wrong with {co}")
alarmMsg = WatchdogMessage(to = self.fAgent.manager, body = str(WorkingState.COMPLAINT)+" "+co)
await self.send(alarmMsg)
self.set_next_sate(STATE_PROPOSE)
return
if newRisk < myRisk:
# we found something what might be concession!
found = True
break
if found == True:
sigmasWithGoodRisk.append(s)
#4. we should propose something that has lower cost than what we proposed before
#5. from this set we should chose something what has the smallest cost
myOldCost = self.fAgent.getCostAll(str(self.fAgent.currentSigma))
if myOldCost == -1:
self.fAgent.logger.log_error("Error, my old cost is equal to -1") #this situation should not occure
lowestCostFound = myOldCost # jesli znajdziemy cos co jest nizsze to to wybieramy
bestSigmas = []
for s in sigmasWithGoodRisk:
tempCost = self.fAgent.getMyCost(str(s), s)
costAllTemp = self.fAgent.getCostAll(str(s))
if costAllTemp != -1 :
tempCost = costAllTemp
else:
for co in self.fAgent.coworkers:
msg=CostMessage(to=co, body=s)
msg.set_metadata("save", "False")
await self.send(msg)
gotResponse = False
counter = 0
while gotResponse == False:
resp = await self.receive(timeout=5)
if resp is None:
counter = counter + 1
if counter < MAX_TIMES:
await self.send(msg)
else:
self.fAgent.logger.log_error("Error") #probably we should raise exception or something !!!!!!!!!!!!!
alarmMsg = WatchdogMessage(to = self.fAgent.manager, body = str(WorkingState.COMPLAINT)+" "+co)
await self.send(alarmMsg)
self.set_next_state(STATE_PROPOSE)
return
else:
if resp.metadata["performative"] == "inform" and resp.metadata["language"] == "int" and str(resp.sender) == co:
tempCost = tempCost + int(resp.body)
gotResponse = True
else:
self.fAgent.saveMessage(resp)
self.fAgent.setCostAll(str(s), tempCost)
if tempCost == lowestCostFound and tempCost < myOldCost:
bestSigmas.append(s)
elif tempCost < lowestCostFound:
bestSigmas.clear()
bestSigmas.append(s)
lowestCostFound = tempCost
#Finally we have got set of true concession !!! we should chose one of them
if len(bestSigmas) > 0:
self.fAgent.logger.log_success("We have true concession!!")
chosen = random.choice(bestSigmas)
if chosen in self.fAgent.B0:
self.fAgent.B0.remove(chosen)
if self.fAgent.getMyCost(str(self.fAgent.myWorstProposal), self.fAgent.myWorstProposal) < self.fAgent.getMyCost(str(chosen), chosen):
self.fAgent.myWorstProposal = chosen
self.fAgent.currentSigma = chosen
self.set_next_state(STATE_WAIT_FOR_NEXT_ROUND)
elif len(self.fAgent.B0 ) > 0:
self.fAgent.logger.log_warning("No concession found, need to check B0 set")
chosen = random.choice(self.fAgent.B0)
self.fAgent.B0.remove(chosen)
if self.fAgent.getMyCost(str(self.fAgent.myWorstProposal), self.fAgent.myWorstProposal) < self.fAgent.getMyCost(str(chosen), chosen):
self.fAgent.myWorstProposal = chosen
self.fAgent.currentSigma = chosen
self.set_next_state(STATE_WAIT_FOR_NEXT_ROUND)
else:
self.set_next_state(STATE_NOT_ACTIVE)
# new state to add!!
| StarcoderdataPython |
169259 | <gh_stars>0
import warnings
import os
import re
import urllib
import requests
import itertools
import json
from tqdm import tqdm
from .photomosaic import options
PUBLIC_URL = "https://www.flickr.com/photos/"
API_URL = 'https://api.flickr.com/services/rest/'
PATH = "http://farm{farm}.staticflickr.com/{server}/"
NAME = "{id}_{secret}_b.jpg"
def _flickr_request(**kwargs):
params = dict(api_key=options['flickr_api_key'],
format='json',
nojsoncallback=1,
**kwargs)
response = requests.get(API_URL, params=params)
return response.json()
def from_search(text, dest, cutoff=4000, license=None):
"""
Download photos matching a search query and the specified license(s).
Parameters
----------
text : string
Search query
dest : string
Output directory
cutoff : integer or None, optional
Max number of images to download. By default, None; all matches
up to Flickr's max (4000) will be downloaded.
license : list or None
List of license codes documented by Flickr at
https://www.flickr.com/services/api/flickr.photos.licenses.getInfo.html
If None, photomosaic defaults to ``[1, 2, 4, 5, 7, 8]``. See link for
details.
"""
dest = os.path.expanduser(dest)
if license is None:
license = [1, 2, 4, 5, 7, 8]
os.makedirs(dest, exist_ok=True)
total = itertools.count(0)
raw_licenses = _flickr_request(method='flickr.photos.licenses.getInfo')
licenses = {item.pop('id'): item
for item in raw_licenses['licenses']['license']}
for page in itertools.count(1):
response = _flickr_request(
method='flickr.photos.search',
license=','.join(map(str, license)),
extras='owner_name,license',
per_page=500, # the max allowed value, to conserve our queries
text=text,
content_type=1, # photos only
page=page
)
if response.get('stat') != 'ok':
# If we fail requesting page 1, that's an error. If we fail
# requesting page > 1, we're just out of photos.
if page == 1:
raise RuntimeError("response: {}".format(response))
break
photos = response['photos']['photo']
pbar = tqdm(photos, desc='downloading page {}'.format(page))
for photo in pbar:
if (cutoff is not None) and (next(total) > cutoff):
pbar.close()
return
# Download and save image.
url = (PATH + NAME).format(**photo)
filename = (NAME).format(**photo)
filepath = os.path.join(dest, filename)
_try_retrieve_warn_failure(url, filepath)
# Save metadata for attribution.
metapath = os.path.splitext(filepath)[0] + '.json'
with open(metapath, 'w') as metafile:
# Collect attribution info as specified by Creative Commons
# best practices:
# https://wiki.creativecommons.org/wiki/best_practices_for_attribution#Title.2C_Author.2C_Source.2C_License
license_id = photo['license']
attribution = {'title': photo['title'],
'owner': photo['owner'],
'owner_name': photo['ownername'],
'owner_url': PUBLIC_URL + photo['ownername'],
'license_url': licenses[license_id]['url'],
'license_name': licenses[license_id]['name'],
'license': license_id}
json.dump(attribution, metafile)
def _get_photoset(photoset_id, nsid, dest):
dest = os.path.expanduser(dest)
os.makedirs(dest, exist_ok=True)
for page in itertools.count(1):
response = _flickr_request(
method='flickr.photosets.getPhotos',
photoset_id=photoset_id,
nsid=nsid,
content_type=1, # photos only
page=page
)
if response.get('stat') != 'ok':
# If we fail requesting page 1, that's an error. If we fail
# requesting page > 1, we're just out of photos.
if page == 1:
raise RuntimeError("response: {}".format(response))
break
photos = response['photoset']['photo']
for photo in tqdm(photos, desc='downloading page {}'.format(page)):
url = (PATH + NAME).format(**photo)
filename = (NAME).format(**photo)
filepath = os.path.join(dest, filename)
_try_retrieve_warn_failure(url, filepath)
# Save metadata for attribution.
metapath = os.path.splitext(filepath)[0] + '.json'
with open(metapath, 'w') as metafile:
json.dump(photo, metafile)
def _try_retrieve_warn_failure(url, filepath):
errors = []
for _ in range(3):
try:
urllib.request.urlretrieve(url, filepath)
except urllib.error.HTTPError as error:
errors.append(error)
continue # try again
else:
break
else:
# tried 3 times, failed every time
warnings.warn("Skipping {}: {}".format(url, errors))
def from_url(url, dest):
"""
Download an album ("photoset") from its url.
The is no programmatic license-checking here; that is up to the user.
Parameters
----------
url : string
e.g., https://www.flickr.com/photos/<username>/sets/<photoset_id>
dest : string
Output directory
"""
dest = os.path.expanduser(dest)
m = re.match(PUBLIC_URL + "(.*)/sets/([0-9]+)", url)
if m is None:
raise ValueError("""Expected URL like:
https://www.flickr.com/photos/<username>/sets/<photoset_id>""")
username, photoset_id = m.groups()
response = _flickr_request(method="flickr.urls.lookupUser",
url=PUBLIC_URL + username)
nsid = response['user']['username']['_content']
return _get_photoset(photoset_id, nsid, dest)
| StarcoderdataPython |
177061 | <filename>1. Variables and Data Types/exercises.py
"""
=========== Exercise 1 =============
Using a list, create a shopping list of 5 items. Then
print the whole list, and then each item individually.
"""
shopping_list = [] # Fill in with some values
print(shopping_list) # Print the whole list
print() # Figure out how to print individual values
"""
=========== Exercise 2 =============
Find something that you can eat that has nutrition
facts on the label. Fill in the dictionary below with
the info on the label and try printing specific information.
If you can't find anything nearby you can use this example: https://www.donhummertrucking.com/media/cms/Nutrition_Facts_388AE27A88B67.png
"""
# When ready to work on these exercises uncomment below code
# nutrition_facts = {} # Fill in with the nutrition facts from the label
# print(nutrition_facts) # Print all the nutrition facts
# print(nutrition_facts["value"]) # Uncomment this line and pick a value to print individually
"""
=========== Exercise 3 =============
Python has a function built in to allow you to
take input from the command line and store it.
The function is called input() and it takes one
argument, which is the string to display when
asking the user for input.
Here is an example:
```
>> name = input('What is your name?: ')
>> print(name)
```
Using the information about type casting take an input
from the command line (which is always a string), convert
it to an int and then double it and print it.
i.e. if the user provides 21 then the program should print 42
"""
# When ready to work on these exercises uncomment below code
# age = input('What is your age?: ')
# print(age * 2) # Find a way to convert the age to an int and multiply by 2
| StarcoderdataPython |
129017 | <gh_stars>1-10
from keras.models import Model
from keras.optimizers import SGD, Adam
from keras.layers import Input, Dense, Dropout, Flatten, Lambda, Embedding
from keras.layers.convolutional import Convolution1D, MaxPooling1D
from keras.initializers import RandomNormal
def create_model(filter_kernels, dense_outputs, maxlen, vocab_size, nb_filter, cat_output):
initializer = RandomNormal(mean=0.0, stddev=0.05, seed=None)
# Define what the input shape looks like
inputs = Input(shape=(maxlen,), dtype='int64')
# Option one:
# Uncomment following code to use a lambda layer to create a onehot encoding of a sequence of characters on the fly.
# Holding one-hot encodings in memory is very inefficient.
# The output_shape of embedded layer will be: batch x maxlen x vocab_size
#
import tensorflow as tf
def one_hot(x):
return tf.one_hot(x, vocab_size, on_value=1.0, off_value=0.0, axis=-1, dtype=tf.float32)
def one_hot_outshape(in_shape):
return in_shape[0], in_shape[1], vocab_size
embedded = Lambda(one_hot, output_shape=one_hot_outshape)(inputs)
# Option two:
# Or, simply use Embedding layer as following instead of use lambda to create one-hot layer
# Think of it as a one-hot embedding and a linear layer mashed into a single layer.
# See discussion here: https://github.com/keras-team/keras/issues/4838
# Note this will introduce one extra layer of weights (of size vocab_size x vocab_size = 69*69 = 4761)
# embedded = Embedding(input_dim=vocab_size, output_dim=vocab_size)(inputs)
# All the convolutional layers...
conv = Convolution1D(filters=nb_filter, kernel_size=filter_kernels[0], kernel_initializer=initializer,
padding='valid', activation='relu',
input_shape=(maxlen, vocab_size))(embedded)
conv = MaxPooling1D(pool_size=3)(conv)
conv1 = Convolution1D(filters=nb_filter, kernel_size=filter_kernels[1], kernel_initializer=initializer,
padding='valid', activation='relu')(conv)
conv1 = MaxPooling1D(pool_size=3)(conv1)
conv2 = Convolution1D(filters=nb_filter, kernel_size=filter_kernels[2], kernel_initializer=initializer,
padding='valid', activation='relu')(conv1)
conv3 = Convolution1D(filters=nb_filter, kernel_size=filter_kernels[3], kernel_initializer=initializer,
padding='valid', activation='relu')(conv2)
conv4 = Convolution1D(filters=nb_filter, kernel_size=filter_kernels[4], kernel_initializer=initializer,
padding='valid', activation='relu')(conv3)
conv5 = Convolution1D(filters=nb_filter, kernel_size=filter_kernels[5], kernel_initializer=initializer,
padding='valid', activation='relu')(conv4)
conv5 = MaxPooling1D(pool_size=3)(conv5)
conv5 = Flatten()(conv5)
# Two dense layers with dropout of .5
z = Dropout(0.5)(Dense(dense_outputs, activation='relu')(conv5))
z = Dropout(0.5)(Dense(dense_outputs, activation='relu')(z))
# Output dense layer with softmax activation
pred = Dense(cat_output, activation='softmax', name='output')(z)
model = Model(inputs=inputs, outputs=pred)
sgd = SGD(lr=0.01, momentum=0.9)
adam = Adam(lr=0.001) # Feel free to use SGD above. I found Adam with lr=0.001 is faster than SGD with lr=0.01
model.compile(loss='categorical_crossentropy', optimizer=adam, metrics=['accuracy'])
return model
| StarcoderdataPython |
161420 | <gh_stars>1-10
# noqa: D100
from dataclasses import dataclass
from typing import Iterable, Optional
import warnings
from .vessel_class import VesselClass
from ._internals import contains_caseless
@dataclass(eq=False)
class VesselClassFilter:
"""A filter used to find specific vessel classes.
Attributes:
name_like: Used to find vessel classes by name. When specified, vessel
classes whose names partially match (contain) the attribute's value
will be returned. Matching is case-insensitive.
"""
name_like: Optional[str] = None
def __post_init__(self) -> None: # noqa: D105
warnings.warn(
"signal_ocean.VesselClassFilter is deprecated and will be removed "
"in a future version of the SDK. Please use "
"tonnage_list.VesselClassFilter with tonnage_list.TonnageListAPI "
"instead.",
DeprecationWarning,
stacklevel=3,
)
def _apply(
self, vessel_classes: Iterable[VesselClass]
) -> Iterable[VesselClass]:
return filter(self.__does_class_match, vessel_classes)
def __does_class_match(self, vessel_class: VesselClass) -> bool:
return not self.name_like or contains_caseless(
self.name_like, vessel_class.name
)
| StarcoderdataPython |
1743400 | from ....data.pancreas import load_pancreas
from ....tools.decorators import dataset
from ._utils import generate_synthetic_dataset
@dataset("Pancreas (average)")
def pancreas_average(test=False):
adata = load_pancreas(test=test)
adata.obs["label"] = adata.obs["celltype"]
adata_spatial = generate_synthetic_dataset(adata, sim_type="avg")
return adata_spatial
@dataset("Pancreas (cell)")
def pancreas_cell(test=False):
adata = load_pancreas(test=test)
adata.obs["label"] = adata.obs["celltype"]
adata_spatial = generate_synthetic_dataset(adata, sim_type="cell")
return adata_spatial
| StarcoderdataPython |
1775378 | <reponame>braceal/DeepDriveMD<filename>examples/cvae_dbscan/scripts/cvae.py
import os
import click
import numpy as np
from keras.optimizers import RMSprop
from molecules.utils import open_h5
from molecules.ml.unsupervised import (VAE, EncoderConvolution2D,
DecoderConvolution2D,
EncoderHyperparams,
DecoderHyperparams)
from molecules.ml.unsupervised.callbacks import (EmbeddingCallback,
LossHistory)
from deepdrive.utils.validators import validate_positive
@click.command()
@click.option('-i', '--input', 'input_path', required=True,
type=click.Path(exists=True),
help='Path to file containing preprocessed contact matrix data')
@click.option('-o', '--out', 'out_path', required=True,
type=click.Path(exists=True),
help='Output directory for model data')
@click.option('-m', '--model_id', required=True,
help='Model ID in for file naming')
@click.option('-g', '--gpu', default=0, type=int,
callback=validate_positive,
help='GPU id')
@click.option('-e', '--epochs', default=100, type=int,
callback=validate_positive,
help='Number of epochs to train for')
@click.option('-b', '--batch_size', default=512, type=int,
callback=validate_positive,
help='Batch size for training')
@click.option('-d', '--latent_dim', default=3, type=int,
callback=validate_positive,
help='Number of dimensions in latent space')
def main(input_path, out_path, model_id, gpu, epochs, batch_size, latent_dim):
# Set CUDA environment variables
os.environ['CUDA_DEVICE_ORDER'] = 'PCI_BUS_ID'
os.environ['CUDA_VISIBLE_DEVICES'] = str(gpu)
with open_h5(input_path) as input_file:
# Access contact matrix data from h5 file
data = np.array(input_file['contact_maps'])
# Shuffle data before train validation split
np.random.shuffle(data)
# 80-20 train validation split index
split = int(0.8 * len(data))
# Partition input data into 80-20 train valid split
train, valid = data[:split], data[split:]
# Get shape of an individual contact matrix
# (ignore total number of matrices)
input_shape = train.shape[1:]
# Set model hyperparameters for encoder and decoder
shared_hparams = {'num_conv_layers': 4,
'filters': [64, 64, 64, 64],
'kernels': [3, 3, 3, 3],
'strides': [1, 2, 1, 1],
'num_affine_layers': 1,
'affine_widths': [128],
'latent_dim': latent_dim
}
affine_dropouts = [0]
encoder_hparams = EncoderHyperparams(affine_dropouts=affine_dropouts,
**shared_hparams)
decoder_hparams = DecoderHyperparams(**shared_hparams)
encoder = EncoderConvolution2D(input_shape=input_shape,
hyperparameters=encoder_hparams)
# Get shape attributes of the last encoder layer to define the decoder
encode_conv_shape, num_conv_params = encoder.get_final_conv_params()
decoder = DecoderConvolution2D(output_shape=input_shape,
enc_conv_params=num_conv_params,
enc_conv_shape=encode_conv_shape,
hyperparameters=decoder_hparams)
optimizer = RMSprop(lr=0.001, rho=0.9, epsilon=1e-08, decay=0.0)
cvae = VAE(input_shape=input_shape,
encoder=encoder,
decoder=decoder,
optimizer=optimizer)
# Define callbacks to report model performance for analysis
embed_callback = EmbeddingCallback(train, cvae)
loss_callback = LossHistory()
cvae.train(data=train, validation_data=valid,
batch_size=batch_size, epochs=epochs,
callbacks=[embed_callback, loss_callback])
# Define file paths to store model performance and weights
ae_weight_path = os.path.join(out_path, f'ae-weight-{model_id}.h5')
encoder_weight_path = os.path.join(out_path, f'encoder-weight-{model_id}.h5')
encoder_hparams_path = os.path.join(out_path, f'encoder-hparams-{model_id}.pkl')
decoder_hparams_path = os.path.join(out_path, f'decoder-hparams-{model_id}.pkl')
embed_path = os.path.join(out_path, f'embed-{model_id}.npy')
idx_path = os.path.join(out_path, f'embed-idx-{model_id}.npy')
loss_path = os.path.join(out_path, f'loss-{model_id}.npy')
val_loss_path = os.path.join(out_path, f'val-loss-{model_id}.npy')
# Save weights, hyperparameters, and model performance.
# Save encoder weights seperately so the full model doesn't need to be
# loaded during the outlier detection stage.
cvae.save_weights(ae_weight_path)
encoder.save_weights(encoder_weight_path)
encoder_hparams.save(encoder_hparams_path)
decoder_hparams.save(decoder_hparams_path)
embed_callback.save(embed_path=embed_path, idx_path=idx_path)
loss_callback.save(loss_path=loss_path, val_loss_path=val_loss_path)
if __name__ == '__main__':
main()
| StarcoderdataPython |
3374311 | <gh_stars>0
# coding=utf-8
from lxml import etree
'''
Super class for csv-files rows. Implemented child classes at the moment are Invoice and InvoiceRow classes.
'''
class Row(object):
def __init__(self, records, xmlRoot):
self._root = xmlRoot
self.records = records
self.fields = dict()
self._parse()
def toCSV(self):
csvFields = map(self.__getRecord, self.records)
output = ""
for x in csvFields:
output += (x + ';')
return output
# get the data of given column in the row this spesifies.
def __getRecord(self, record):
if (record in self.fields):
return self.fields[record]
return ''
# Super method to parse the xml and make it ready to be printed into csv-file.
# This is supposed to be implemented by child classes.
def _parse(self):
pass
# set data to given column.
def _setElem(self, elemName, recordName):
elem = self._root.find(elemName)
if (elem is not None):
self.fields[recordName] = elem.text
| StarcoderdataPython |
1747610 | from starlette.testclient import TestClient
from starlette.status import HTTP_200_OK
from mydata.personal_data import app
def test_status_should_return_200():
client = TestClient(app)
response = client.get("/info")
assert response.status_code == HTTP_200_OK
def test_retun_data_in_json():
client = TestClient(app)
response = client.get("/info")
assert response.headers["Content-Type"] == "application/json"
def test_response_should_be_a_list():
client = TestClient(app)
response = client.get("/info")
assert isinstance(response.json(), list) | StarcoderdataPython |
3241196 | from dataclasses import dataclass
import json
from typing import Union
from pathlib import Path
import pytest
from bitcoin_client.ledger_bitcoin import TransportClient, Client, Chain, createClient
from speculos.client import SpeculosClient
import os
import re
import random
random.seed(0) # make sure tests are repeatable
# path with tests
conftest_folder_path: Path = Path(__file__).parent
ASSIGNMENT_RE = re.compile(r'^\s*([a-zA-Z_][a-zA-Z_0-9]*)\s*=\s*(.*)$', re.MULTILINE)
def get_app_version() -> str:
makefile_path = conftest_folder_path.parent / "Makefile"
if not makefile_path.is_file():
raise FileNotFoundError(f"Can't find file: '{makefile_path}'")
makefile: str = makefile_path.read_text()
assignments = {
identifier: value for identifier, value in ASSIGNMENT_RE.findall(makefile)
}
return f"{assignments['APPVERSION_M']}.{assignments['APPVERSION_N']}.{assignments['APPVERSION_P']}"
def pytest_addoption(parser):
parser.addoption("--hid", action="store_true")
parser.addoption("--headless", action="store_true")
parser.addoption("--enableslowtests", action="store_true")
@pytest.fixture(scope="module")
def sw_h_path():
# sw.h should be in src/boilerplate/sw.h
sw_h_path = conftest_folder_path.parent / "src" / "boilerplate" / "sw.h"
if not sw_h_path.is_file():
raise FileNotFoundError(f"Can't find sw.h: '{sw_h_path}'")
return sw_h_path
@pytest.fixture(scope="module")
def app_version() -> str:
return get_app_version()
@pytest.fixture
def hid(pytestconfig):
return pytestconfig.getoption("hid")
@pytest.fixture
def headless(pytestconfig):
return pytestconfig.getoption("headless")
@pytest.fixture
def enable_slow_tests(pytestconfig):
return pytestconfig.getoption("enableslowtests")
@pytest.fixture
def comm(request, hid, app_version: str) -> Union[TransportClient, SpeculosClient]:
if hid:
client = TransportClient("hid")
else:
# We set the app's name before running speculos in order to emulate the expected
# behavior of the SDK's GET_VERSION default APDU.
# The app name is 'Bitcoin' or 'Bitcoin Test' for mainnet/testnet respectively.
# We leave the speculos default 'app' to avoid relying on that value in tests.
os.environ['SPECULOS_APPNAME'] = f'app:{app_version}'
client = SpeculosClient(
str(conftest_folder_path.parent.joinpath("bin/app.elf")),
['--sdk', '2.1']
)
client.start()
try:
automation_file = conftest_folder_path.joinpath(request.function.automation_file)
except AttributeError:
automation_file = None
if automation_file:
rules = json.load(open(automation_file))
client.set_automation_rules(rules)
yield client
client.stop()
@pytest.fixture
def is_speculos(comm: Union[TransportClient, SpeculosClient]) -> bool:
return isinstance(comm, SpeculosClient)
@pytest.fixture
def client(comm: Union[TransportClient, SpeculosClient]) -> Client:
return createClient(comm, chain=Chain.TEST, debug=True)
@dataclass(frozen=True)
class SpeculosGlobals:
seed = "glory promote mansion idle axis finger extra february uncover one trip resource lawn turtle enact monster seven myth punch hobby comfort wild raise skin"
# TODO: those are for testnet; we could compute them for any network from the seed
master_extended_privkey = "<KEY>"
master_extended_pubkey = "<KEY>"
master_key_fingerprint = 0xF5ACC2FD
master_compressed_pubkey = bytes.fromhex(
"<KEY>"
)
wallet_registration_key = bytes.fromhex(
"7463d6d1a82f4647ead048c625ae0c27fe40b6d0d5f2d24104009ae9d3b7963c"
)
@pytest.fixture
def speculos_globals() -> SpeculosGlobals:
return SpeculosGlobals()
| StarcoderdataPython |
3325852 | <reponame>rakhi2001/ecom7<filename>Python3/551.py
__________________________________________________________________________________________________
sample 16 ms submission
class Solution:
def checkRecord(self, s: str) -> bool:
s=s.replace('LLL','XX')
return s.count('A')<=1 and s.count('XX') == 0
__________________________________________________________________________________________________
sample 13116 kb submission
class Solution:
def checkRecord(self, s: str) -> bool:
hasA = False
curL = 0
for c in s:
if c == 'A':
if hasA:
return False
hasA = True
curL = 0
elif c == 'L':
curL += 1
if curL > 2:
return False
else:
curL = 0
return True
__________________________________________________________________________________________________
| StarcoderdataPython |
1750134 | import execjs
import requests
import re
import random
import time
class Reply(object):
def __init__(self):
self.headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'
' (KHTML, like Gecko) Chrome/76.0.3809.132 Safari/537.36',
}
self.add_href = "https://tieba.baidu.com/f/commit/post/add"
self.session = requests.session()
self.cookies = {
# 'BDUSS': 'lJBWmxEYk5lOFUFBJCQAAAAAAAAAAAEAAAAsoIBBg9XS5MTjqnDcg52rgvsAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAHC6c11wunNdS'
}
with open('get_form_data.js', 'r', encoding='utf-8') as f:
get_bsk_js = f.read()
self.ctx = execjs.compile(get_bsk_js)
self.session.get('https://tieba.baidu.com/', headers=self.headers)
def get_html(self, href):
response = self.session.get(href, headers=self.headers)
if "贴吧404" in response.text:
print('请输入正确的标题或者该贴已被删除\n')
elif response.status_code == 200:
return response.text
else:
print('get_html 出问题了')
def get_detail(self, href):
html = self.get_html(href)
if html:
title = re.search("'threadTitle': '([^']+)'", html, re.S).group(1)
fname = re.search('"forum_name":"([^"]+)"', html).group(1)
fid = re.search('"forum_id":([\d]+),', html).group(1)
tbs = re.search("tbs:\s*'([^']+)',", html).group(1)
return {
'title': title,
'fname': fname,
'fid': fid,
'tbs': tbs
}
def get_form_data(self, tdata):
bsk = self.ctx.call('get_bsk_data', tdata['tbs'])
data = {
'ie': 'utf-8',
'kw': tdata['fname'], # 吧名
'fid': tdata['fid'], # 吧 id
'tid': tdata['tid'], # 贴的id
'vcode_md5': '',
'floor_num': '28', # 当前楼层数,可以随意
'rich_text': '1',
'tbs': "e5a55a45df7944b41567906433", # 帖子首页就有,一定需要登陆
'content': tdata['content'], # 需要发的内容
'basilisk': '1',
'files': '[]',
# 鼠标的轨迹 + 时间戳,直接模拟即可
'mouse_pwd': bsk['mouse_pwd'],
'mouse_pwd_t': bsk['mouse_pwd_t'], # 时间戳
'mouse_pwd_isclick': '1',
'nick_name': '',
'__type__': 'reply',
'geetest_success': '1',
'_BSK': bsk['bsk']
}
return data
def add(self, tdata):
succ = 0
self.headers['Referer'] = tdata['href']
data = self.get_form_data(tdata)
for x in range(int(tdata['num'])):
response = requests.post(self.add_href, headers=self.headers, data=data, cookies=self.cookies)
data['floor_num'] = int(data['floor_num']) + 1
if response.json()['no'] == 0:
succ += 1
time.sleep(0.5)
data['content'] += str(random.randint(0, 100))
print('顶贴成功 {} 次'.format(succ))
flag = input('如果继续顶这贴请输入 1 ,输入任意键重新选择贴子\n>')
if flag == '1':
num = input('输入顶贴次数(一次性回复)\n>')
while not re.match('^\d+$', num):
num = input('请输入正确的数字\n>')
tdata['num'] = num
self.add(tdata)
def run(self):
while True:
href = input('请输入你要进行顶贴的链接(比如:https://tieba.baidu.com/p/6248363819),退出请输入 #\n>')
if href == '#':
print('程序已退出,有问题或者建议请联系微信公众号:日常学python 进行改进,谢谢\n')
break
tid = re.search('/p/([\d]+)', href)
if tid:
tid = tid.group(1)
href = "https://tieba.baidu.com/p/" + tid
tdata = self.get_detail(href)
print('贴吧:{}\n贴子:{}'.format(tdata['fname'], tdata['title']))
content = input('输入你的顶贴内容\n>')
num = input('输入顶贴次数(一次性回复)\n>')
while not re.match('^\d+$', num):
num = input('请输入正确的数字\n>')
tdata['tid'] = tid
tdata['href'] = href
tdata['content'] = content
tdata['num'] = num
self.add(tdata)
else:
print("请输入正确的链接")
if __name__ == '__main__':
reply = Reply()
reply.run()
| StarcoderdataPython |
32410 | ## Built-in packages
import getopt
import json
import os
import sys
## Third-party packages
from PIL import Image
import joblib
import numpy as np
import tqdm
## Tensorflow
from tensorflow.keras.layers import BatchNormalization
from tensorflow.keras.layers import Dense
from tensorflow.keras.layers import Dropout
from tensorflow.keras.layers import GlobalMaxPool2D
from tensorflow.keras.layers import Input
from tensorflow.keras.layers import MaxPool2D
from tensorflow.keras.models import Model
from tensorflow.keras.layers import SeparableConv2D
import tensorflow_addons as tfa
## Global variable declarations
global INPUT_WIDTH
global INPUT_HEIGHT
global FILTER_SIZE
global DENSE_UNITS
global DROPOUT
global OUTPUT_CLASS
## Global model parameters (DO NOT CHANGE)
INPUT_WIDTH = 1500
INPUT_HEIGHT = 850
FILTER_SIZE = 32
DENSE_UNITS = 1024
DROPOUT = 0.3
OUTPUT_CLASS = 3
| StarcoderdataPython |
3277273 | <reponame>mreveil/disaster-monitoring
# Generated by Django 3.2.6 on 2021-08-22 00:26
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('app', '0017_auto_20210820_2007'),
]
operations = [
migrations.CreateModel(
name='Fundraiser',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('description', models.CharField(max_length=250)),
('title', models.CharField(max_length=100)),
('goal', models.IntegerField(default=0)),
('unit', models.CharField(max_length=50)),
('status', models.CharField(max_length=50)),
('pub_link', models.CharField(max_length=250)),
('embed_code', models.CharField(max_length=1500)),
('institution', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='app.institution')),
('target_location', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='app.location')),
],
),
migrations.CreateModel(
name='Relief',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('item_type', models.CharField(max_length=50)),
('item_subtype', models.CharField(max_length=50)),
('quantity', models.IntegerField(default=0)),
('unit', models.CharField(max_length=50)),
('status', models.CharField(max_length=50)),
('pub_link', models.CharField(max_length=250)),
('embed_code', models.CharField(max_length=1500)),
('institution', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='app.institution')),
('target_location', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='app.location')),
],
),
migrations.DeleteModel(
name='Aid',
),
]
| StarcoderdataPython |
4817504 | <filename>parsers/messenger.py<gh_stars>1-10
#!/usr/bin/env python3
import os
import time
import random
import argparse
import pandas as pd
from langdetect import *
from lxml import etree
from parsers import log
from parsers import utils, config
def parse_arguments():
parser = argparse.ArgumentParser()
parser.add_argument('--own-name', dest='own_name', type=str,
help='name of the owner of the chat logs, written as in the logs', required=True)
parser.add_argument('-f', '--file-path', dest='file_path', help='Facebook chat log file (HTML file)',
default=config.DEFAULT_MESSENGER_RAW_FILE)
parser.add_argument('--max', '--max-exported-messages', dest='max_exported_messages', type=int,
default=config.MAX_EXPORTED_MESSAGES, help='maximum number of messages to export')
args = parser.parse_args()
return args
def main():
args = parse_arguments()
fallbackDateParsing = False
data = []
warnedNameChanges = []
nbInvalidSender = 0
# make sure we don't crash if chat logs contain exotic characters
etree.set_default_parser(etree.XMLParser(encoding='utf-8', ns_clean=True, recover=True))
for filename in os.listdir(args.file_path):
if not filename.endswith('.html'):
continue
document = os.path.join(args.file_path, filename)
archive = etree.parse(document)
conversationId = filename.replace('.html', '')
groupConversation = False
timestamp = ''
senderName = ''
conversationWithName = None
for element in archive.iter():
tag = element.tag
className = element.get('class')
content = element.text
if tag == 'p':
text = content
if conversationWithName != '' and senderName != '':
# handles when the interlocutor's name changed at some point
if (senderName != conversationWithName) and (senderName != args.own_name) and \
(senderName not in warnedNameChanges) and (not groupConversation):
if senderName not in warnedNameChanges:
print('\t', 'Assuming', senderName, 'is', conversationWithName)
warnedNameChanges.append(senderName)
senderName = conversationWithName
data += [[timestamp, conversationId, conversationWithName, senderName, text]]
else:
nbInvalidSender = nbInvalidSender + 1
elif tag == 'span':
if className == 'user':
senderName = content
elif className == 'meta':
try:
if not fallbackDateParsing:
timestamp = time.mktime(
pd.to_datetime(content, format='%A, %B %d, %Y at %H:%M%p', exact=False).timetuple())
else:
timestamp = time.mktime(pd.to_datetime(content, infer_datetime_format=True).timetuple())
except ValueError:
if not fallbackDateParsing:
print('Unexpected date format. '
'Falling back to infer_datetime_format, parsing will be slower.')
timestamp = time.mktime(pd.to_datetime(content, infer_datetime_format=True).timetuple())
fallbackDateParsing = True
else:
raise
elif tag == 'div' and className == 'thread':
nbParticipants = str(element.xpath("text()")).count(', ') + 1
if nbParticipants > 1:
groupConversation = True
elif tag == 'h3':
if conversationWithName is not None:
print('Something is wrong. File format changed? (multiple conversation hearder in a single file)')
exit(0)
else:
content = content.replace('Conversation with ', '')
conversationWithName = content
print(conversationId, conversationWithName, "(group?", groupConversation, ")")
if len(data) >= args.max_exported_messages:
break
print(len(data), 'messages parsed.')
if nbInvalidSender > 0:
print(nbInvalidSender, 'messages discarded because of bad ID.')
if len(data) < 1:
print('Nothing to save.')
exit(0)
log.info('Converting to DataFrame...')
df = pd.DataFrame(data)
df.columns = config.DATAFRAME_COLUMNS
df['platform'] = 'messenger'
log.info('Detecting languages...')
df['language'] = 'unknown'
for name, group in df.groupby(df.conversationWithName):
sample = ''
df2 = df[df.conversationWithName == name].dropna()
if len(df2) > 10:
for x in range(0, min(len(df2), 100)):
sample = sample + df2.iloc[random.randint(0, len(df2) - 1)]['text']
print('\t', name, detect(sample), "(", len(df2), "msgs)")
df.loc[df.conversationWithName == name, 'language'] = detect(sample)
log.info('Computing dates...')
df['datetime'] = df['timestamp'].apply(utils.timestamp_to_ordinal)
print(df.head())
utils.export_dataframe(df, 'messenger.pkl')
log.info('Done.')
if __name__ == '__main__':
main()
| StarcoderdataPython |
3206386 | <gh_stars>0
"""
Sum Lists: You have two numbers represented by a linked list, where each node contains a single digit.The digits are stored in reverse order, such that the 1 's digit is at the head of the list. Write a function that adds the two numbers and returns the sum as a linked list.
EXAMPLE
Input:(7-> 1 -> 6) + (5 -> 9 -> 2).Thatis,617 + 295. Output:2 -> 1 -> 9.That is,912.
FOLLOW UP
Suppose the digits are stored in forward order. Repeat the above problem.
EXAMPLE
Input:(6 -> 1 -> 7) + (2 -> 9 -> 5).That is,617 + 295. Output:9 -> 1 -> 2.That is,912.
"""
from linked_list import SinglyLinkedList, SinglyLinkedNode
def inner_step(n1, n2, n3, sum_ll, carry):
total = carry
if n1:
total += n1.value
n1 = n1.next
if n2:
total += n2.value
n2 = n2.next
result = total % 10
carry = total // 10
new_node = SinglyLinkedNode(result)
if not n3:
sum_ll.head = new_node
n3 = sum_ll.head
else:
n3.next = new_node
n3 = new_node
return n1, n2, n3, carry
def sum_reverse(self, ll2):
sum_ll = SinglyLinkedList()
carry = 0
n1, n2, n3 = self.head, ll2.head, sum_ll.head
while n1 and n2:
n1, n2, n3, carry = inner_step(n1, n2, n3, sum_ll, carry)
while n1:
n1, n2, n3, carry = inner_step(n1, n2, n3, sum_ll, carry)
while n2:
n1, n2, n3, carry = inner_step(n1, n2, n3, sum_ll, carry)
if carry:
n1, n2, n3, carry = inner_step(n1, n2, n3, sum_ll, carry)
return sum_ll
SinglyLinkedList.sum_reverse = sum_reverse
def add_zero_nodes(ll, count):
node = SinglyLinkedNode(0)
head = node
for i in range(count - 1):
node.next = SinglyLinkedNode(0)
node = node.next
node.next = ll.head
return head
def do_sum_forward(node1, node2):
if not node1:
return None, 0
elif not node1.next:
total = node1.value + node2.value
carry = total // 10
value = total % 10
return SinglyLinkedNode(value), carry
child_node, carry = do_sum_forward(node1.next, node2.next)
total = node1.value + node2.value + carry
carry = total // 10
value = total % 10
node = SinglyLinkedNode(value)
node.next = child_node
return node, carry
def sum_forward(self, ll2):
len1, len2 = len(self), len(ll2)
if len1 > len2:
head = add_zero_nodes(ll2, len1 - len2)
ll2.head = head
len2 = len1
elif len2 > len1:
head = add_zero_nodes(self, len2 - len1)
self.head = head
len1 = len2
if len1 == 0:
return None
node, carry = do_sum_forward(self.head, ll2.head)
if carry > 0:
head = SinglyLinkedNode(carry)
node, head.next = head, node
ll = SinglyLinkedList()
ll.head = node
return ll
SinglyLinkedList.sum_forward = sum_forward
if __name__ == "__main__":
import sys
for line in sys.stdin:
ll1, ll2 = line.strip().split("; ")
ll1 = SinglyLinkedList((int(val) for val in ll1.split(', ')))
ll2 = SinglyLinkedList((int(val) for val in ll2.split(', ')))
for node in ll1.sum_reverse(ll2):
print(node.value)
print("")
for node in ll1.sum_forward(ll2):
print(node.value)
| StarcoderdataPython |
126370 | from gym_risk.envs.game.ai import AI
import random
import collections
class BetterAI(AI):
"""
BetterAI: Thinks about what it is doing a little more - picks a priority
continent and priorities holding and reinforcing it.
"""
def start(self):
self.area_priority = list(self.world.areas)
random.shuffle(self.area_priority)
def priority(self):
priority = sorted([t for t in self.player.territories if t.border],
key=lambda x: self.area_priority.index(x.area.name))
priority = [t for t in priority if t.area == priority[0].area]
return priority if priority else list(self.player.territories)
def initial_placement(self, empty):
if empty:
empty = sorted(empty, key=lambda x: self.area_priority.index(x.area.name))
return empty[0]
else:
return random.choice(self.priority())
def reinforce(self, available):
priority = self.priority()
result = collections.defaultdict(int)
while available:
result[random.choice(priority)] += 1
available -= 1
return result
def attack(self):
for t in self.player.territories:
if t.forces > 1:
adjacent = [a for a in t.connect if a.owner != t.owner and t.forces >= a.forces + 3]
if len(adjacent) == 1:
yield (t.name, adjacent[0].name,
lambda a, d: a > d, None)
else:
total = sum(a.forces for a in adjacent)
for adj in adjacent:
yield (t, adj, lambda a, d: a > d + total - adj.forces + 3,
lambda a: 1)
def freemove(self):
srcs = sorted([t for t in self.player.territories if not t.border],
key=lambda x: x.forces)
if srcs:
src = srcs[-1]
n = src.forces - 1
return (src, self.priority()[0], n)
return None
| StarcoderdataPython |
181613 | <gh_stars>1-10
# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
from typing import Any
import torch
from torch.nn.parallel import DistributedDataParallel
from pytorch_lightning.core.lightning import LightningModule
from pytorch_lightning.overrides.base import _LightningModuleWrapperBase
class LightningDistributedModule(_LightningModuleWrapperBase):
def __init__(self, pl_module: LightningModule):
"""
Wraps the user's LightningModule and redirects the forward call to the appropriate
method, either ``training_step``, ``validation_step``, ``test_step`` or ``predict``.
This class is used in combination with :class:`~torch.nn.parallel.DistributedDataParallel` as
shown in the example.
Example:
ddp_model = torch.nn.parallel.DistributedDataParallel(
module=LightningDistributedModule(lightning_module),
device_ids=[local_rank],
...
)
Args:
pl_module: the model to wrap
"""
super().__init__(pl_module)
def _find_tensors(obj): # pragma: no-cover
r"""
Recursively find all tensors contained in the specified object.
"""
if isinstance(obj, torch.Tensor):
return [obj]
if isinstance(obj, (list, tuple)):
return itertools.chain(*map(_find_tensors, obj))
if isinstance(obj, dict):
return itertools.chain(*map(_find_tensors, obj.values()))
return []
# In manual_optimization, we need to call reducer prepare_for_backward.
# Note: Keep track of Pytorch DDP and update if there is a change
# https://github.com/pytorch/pytorch/blob/v1.7.1/torch/nn/parallel/distributed.py#L626-L638
def prepare_for_backward(model: DistributedDataParallel, output: Any):
if torch.is_grad_enabled() and model.require_backward_grad_sync:
model.require_forward_param_sync = True
# We'll return the output object verbatim since it is a freeform
# object. We need to find any tensors in this object, though,
# because we need to figure out which parameters were used during
# this forward pass, to ensure we short circuit reduction for any
# unused parameters. Only if `find_unused_parameters` is set.
if model.find_unused_parameters:
model.reducer.prepare_for_backward(list(_find_tensors(output)))
else:
model.reducer.prepare_for_backward([])
else:
model.require_forward_param_sync = False
| StarcoderdataPython |
3238653 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) BaseDetection, Inc. and its affiliates. All Rights Reserved
from .base_detection_config import BaseDetectionConfig
_config_dict = dict(
MODEL=dict(
# META_ARCHITECTURE='GeneralizedRCNN',
LOAD_PROPOSALS=False,
MASK_ON=False,
KEYPOINT_ON=False,
ANCHOR_GENERATOR=dict(
SIZES=[[32, 64, 128, 256, 512]], ASPECT_RATIOS=[[0.5, 1.0, 2.0]],
),
PROPOSAL_GENERATOR=dict(
# Current proposal generators include "RPN", "RRPN" and "PrecomputedProposals"
NAME="RPN",
MIN_SIZE=0,
),
RPN=dict(
# HEAD_NAME="StandardRPNHead",
# Names of the input feature maps to be used by RPN
# e.g., ["p2", "p3", "p4", "p5", "p6"] for FPN
IN_FEATURES=["res4"],
# Remove RPN anchors that go outside the image by BOUNDARY_THRESH pixels
# Set to -1 or a large value, e.g. 100000, to disable pruning anchors
BOUNDARY_THRESH=-1,
# IOU overlap ratios [BG_IOU_THRESHOLD, FG_IOU_THRESHOLD]
# Minimum overlap required between an anchor and ground-truth box for the
# (anchor, gt box) pair to be a positive example (IoU >= FG_IOU_THRESHOLD
# ==> positive RPN example: 1)
# Maximum overlap allowed between an anchor and ground-truth box for the
# (anchor, gt box) pair to be a negative examples (IoU < BG_IOU_THRESHOLD
# ==> negative RPN example: 0)
# Anchors with overlap in between (BG_IOU_THRESHOLD <= IoU < FG_IOU_THRESHOLD)
# are ignored (-1)
IOU_THRESHOLDS=[0.3, 0.7],
IOU_LABELS=[0, -1, 1],
# Total number of RPN examples per image
BATCH_SIZE_PER_IMAGE=256,
# Target fraction of foreground (positive) examples per RPN minibatch
POSITIVE_FRACTION=0.5,
# Weights on (dx, dy, dw, dh) for normalizing RPN anchor regression targets
BBOX_REG_WEIGHTS=(1.0, 1.0, 1.0, 1.0),
# The transition point from L1 to L2 loss. Set to 0.0 to make the loss simply L1.
SMOOTH_L1_BETA=0.0,
LOSS_WEIGHT=1.0,
# Number of top scoring RPN proposals to keep before applying NMS
# When FPN is used, this is *per FPN level* (not total)
PRE_NMS_TOPK_TRAIN=12000,
PRE_NMS_TOPK_TEST=6000,
# Number of top scoring RPN proposals to keep after applying NMS
# When FPN is used, this limit is applied per level and then again to the union
# of proposals from all levels
# NOTE: When FPN is used, the meaning of this config is different from Detectron1.
# It means per-batch topk in Detectron1, but per-image topk here.
# See "modeling/rpn/rpn_outputs.py" for details.
POST_NMS_TOPK_TRAIN=2000,
POST_NMS_TOPK_TEST=1000,
# NMS threshold used on RPN proposals
NMS_THRESH=0.7,
# NMS type for RPN
# Format: str. (e.g., 'normal' means using normal nms)
# Allowed values are 'normal', 'softnms-linear', 'softnms-gaussian'
NMS_TYPE='normal'
),
ROI_HEADS=dict(
# ROI_HEADS type: "Res5ROIHeads",
# Names of the input feature maps to be used by ROI heads
# Currently all heads (box, mask, ...) use the same input feature map list
# e.g., ["p2", "p3", "p4", "p5"] is commonly used for FPN
IN_FEATURES=["res4"],
# Number of foreground classes
NUM_CLASSES=80,
# IOU overlap ratios [IOU_THRESHOLD]
# Overlap threshold for an RoI to be considered background (if < IOU_THRESHOLD)
# Overlap threshold for an RoI to be considered foreground (if >= IOU_THRESHOLD)
IOU_THRESHOLDS=[0.5],
IOU_LABELS=[0, 1],
# RoI minibatch size *per image* (number of regions of interest [ROIs])
# Total number of RoIs per training minibatch =
# ROI_HEADS.BATCH_SIZE_PER_IMAGE * SOLVER.IMS_PER_BATCH
# E.g., a common configuration is: 512 * 16 = 8192
BATCH_SIZE_PER_IMAGE=512,
# Target fraction of RoI minibatch that is labeled foreground (i.e. class > 0)
POSITIVE_FRACTION=0.25,
# Only used in test mode
# Minimum score threshold (assuming scores in a [0, 1] range); a value chosen to
# balance obtaining high recall with not having too many low precision
# detections that will slow down inference post processing steps (like NMS)
# A default threshold of 0.0 increases AP by ~0.2-0.3 but significantly slows down
# inference.
SCORE_THRESH_TEST=0.05,
# Overlap threshold used for non-maximum suppression (suppress boxes with
# IoU >= this threshold)
NMS_THRESH_TEST=0.5,
# If True, augment proposals with ground-truth boxes before sampling proposals to
# train ROI heads.
PROPOSAL_APPEND_GT=True,
),
ROI_BOX_HEAD=dict(
# C4 don't use head name option
# Options for non-C4 models: FastRCNNConvFCHead,
# Default weights on (dx, dy, dw, dh) for normalizing bbox regression targets
# These are empirically chosen to approximately lead to unit variance targets
BBOX_REG_WEIGHTS=(10.0, 10.0, 5.0, 5.0),
# The transition point from L1 to L2 loss. Set to 0.0 to make the loss simply L1.
SMOOTH_L1_BETA=0.0,
POOLER_RESOLUTION=14,
POOLER_SAMPLING_RATIO=0,
# Type of pooling operation applied to the incoming feature map for each RoI
POOLER_TYPE="ROIAlignV2",
NUM_FC=0,
# Hidden layer dimension for FC layers in the RoI box head
FC_DIM=1024,
NUM_CONV=0,
# Channel dimension for Conv layers in the RoI box head
CONV_DIM=256,
# Normalization method for the convolution layers.
# Options: "" (no norm), "GN", "SyncBN".
NORM="",
# Whether to use class agnostic for bbox regression
CLS_AGNOSTIC_BBOX_REG=False,
# If true, RoI heads use bounding boxes predicted by the box head
# rather than proposal boxes
TRAIN_ON_PRED_BOXES=False,
),
ROI_BOX_CASCADE_HEAD=dict(
# The number of cascade stages is implicitly defined by
# the length of the following two configs.
BBOX_REG_WEIGHTS=(
(10.0, 10.0, 5.0, 5.0),
(20.0, 20.0, 10.0, 10.0),
(30.0, 30.0, 15.0, 15.0),
),
IOUS=(0.5, 0.6, 0.7),
),
ROI_MASK_HEAD=dict(
# NAME="MaskRCNNConvUpsampleHead",
POOLER_RESOLUTION=14,
POOLER_SAMPLING_RATIO=0,
# The number of convs in the mask head
NUM_CONV=0,
CONV_DIM=256,
# Normalization method for the convolution layers.
# Options: "" (no norm), "GN", "SyncBN".
NORM="",
# Whether to use class agnostic for mask prediction
CLS_AGNOSTIC_MASK=False,
# Type of pooling operation applied to the incoming feature map for each RoI
POOLER_TYPE="ROIAlignV2",
),
),
)
class RCNNConfig(BaseDetectionConfig):
def __init__(self, d=None, **kwargs):
super().__init__(d, **kwargs)
self._register_configuration(_config_dict)
config = RCNNConfig()
| StarcoderdataPython |
189036 | <reponame>ped998/scripts<filename>python/unprotectPhysicalServer/unprotectPhysicalServer.py
#!/usr/bin/env python
"""unprotect physical servers"""
# version 2021-12-03
# import pyhesity wrapper module
from pyhesity import *
# command line arguments
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-v', '--vip', type=str, required=True)
parser.add_argument('-u', '--username', type=str, default='helios')
parser.add_argument('-d', '--domain', type=str, default='local')
parser.add_argument('-i', '--useApiKey', action='store_true')
parser.add_argument('-pwd', '--password', type=str, default=None)
parser.add_argument('-n', '--servername', action='append', type=str)
parser.add_argument('-l', '--serverlist', type=str)
args = parser.parse_args()
vip = args.vip
username = args.username
domain = args.domain
password = <PASSWORD>
useApiKey = args.useApiKey
servernames = args.servername
serverlist = args.serverlist
paramPaths = {'kFile': 'fileProtectionTypeParams', 'kVolume': 'volumeProtectionTypeParams'}
# gather server list
if servernames is None:
servernames = []
if serverlist is not None:
f = open(serverlist, 'r')
servernames += [s.strip() for s in f.readlines() if s.strip() != '']
f.close()
if len(servernames) == 0:
print('no servers specified')
exit()
# authenticate
apiauth(vip=vip, username=username, domain=domain, password=password, useApiKey=useApiKey)
serverfound = {}
for server in servernames:
serverfound[server] = False
jobs = api('get', 'data-protect/protection-groups?isDeleted=false&isActive=true&environments=kPhysical', v=2)
if 'protectionGroups' in jobs and jobs['protectionGroups'] is not None:
for job in jobs['protectionGroups']:
saveJob = False
paramPath = paramPaths[job['physicalParams']['protectionType']]
for server in servernames:
protectedObjectCount = len(job['physicalParams'][paramPath]['objects'])
job['physicalParams'][paramPath]['objects'] = [o for o in job['physicalParams'][paramPath]['objects'] if o['name'].lower() != server.lower()]
if len(job['physicalParams'][paramPath]['objects']) < protectedObjectCount:
print('%s removed from from group: %s' % (server, job['name']))
serverfound[server] = True
saveJob = True
if saveJob is True:
if len(job['physicalParams'][paramPath]['objects']) == 0:
print('0 objects left in %s. Deleting...' % job['name'])
result = api('delete', 'data-protect/protection-groups/%s' % job['id'], v=2)
else:
pass
result = api('put', 'data-protect/protection-groups/%s' % job['id'], job, v=2)
for server in servernames:
if serverfound[server] is False:
print('%s not found in any physical protection group. * * * * * *' % server)
| StarcoderdataPython |
4836281 | <gh_stars>1-10
from math import sin, cos, ceil
from math import radians as rad
import pygame
import pymunk
# CLASS AND FUNCTION IMPORTS
from bullet import Bullet
from text import text
class Tank:
"""Class for creating a Tank
Arguments:
pos {tuple} -- Starting position of the Tank.
team {str} -- Team name of the Tank.
"""
def __init__(self, team):
# Size Ratio = 9:5
width = 23
height = width * 0.5555555556
self.vertices = (
(0, 0),
(width, 0),
(width, height),
(0, height)
)
mass = width * 0.5555555556
self.body = pymunk.Body(mass)
self.body.moment = pymunk.moment_for_poly(mass, self.vertices)
self.body.center_of_gravity = (width/2, height/2)
self.shape = pymunk.Poly(self.body, self.vertices)
self.shape.elasticity = 0.3
self.shape.friction = 1.0
if team == "red":
self.color = (200, 40, 40)
self.direction = 135
elif team == "blue":
self.color = (40, 40, 200)
self.direction = 225
else:
self.color = (200, 200, 200)
self.direction = 0
self.turret_size = width * 0.6944444444
self.score = 0
self.fire_timer = 0
self.fire_power = 50 # in percentage
self.update()
def shoot(self):
tank_size = self.vertices[2]
bullet_speed = self.fire_power * 12
start_pos = self.turret_end_pos
start_vel = (
sin(rad(self.direction)) * bullet_speed,
cos(rad(self.direction))*bullet_speed
)
radius = tank_size[0] * 0.2173913043
return Bullet(start_pos, start_vel, radius)
def turn_turret(self, angle):
self.direction += angle
if self.direction <= 0:
self.direction = 359
elif self.direction >= 360:
self.direction = 1
def add_to_space(self, space):
space.add(self.body, self.shape)
def update(self):
if self.fire_power < 0:
self.fire_power = 0
elif self.fire_power > 100:
self.fire_power = 100
pos = self.body.position
size = self.vertices[2]
self.rect = pygame.Rect(pos[0], pos[1], size[0], size[1])
self.turret_start_pos = (
self.body.position[0] + size[0] / 2,
self.body.position[1]
)
self.turret_end_pos = (
sin(rad(self.direction)) * self.turret_size +
self.turret_start_pos[0],
cos(rad(self.direction)) *
self.turret_size + self.turret_start_pos[1]
)
self.fire_timer += 1
def render(self, window):
# Render Body
pygame.draw.rect(window, self.color, self.rect)
# Render Turret
pos = self.body.position
size = self.vertices[2]
pygame.draw.line(window, (100, 100, 100), self.turret_start_pos,
self.turret_end_pos, ceil(size[0] * 0.2))
# Render Stats
angle_pos = (pos[0] + size[0]/2, pos[1] + size[1]*2)
text(window, str(int(self.direction)) + "˚", 25, (255, 255, 255), angle_pos)
power_pos = (angle_pos[0], angle_pos[1] + size[1] * 2)
text(window, str(int(self.fire_power)), 25, (255, 255, 255), power_pos)
| StarcoderdataPython |
4816671 | #!/usr/bin/env python
import numpy as np
from pycrazyswarm import *
Z = 1.0
def setUp():
crazyflies_yaml = """
crazyflies:
- channel: 100
id: 1
initialPosition: [1.0, 0.0, 0.0]
"""
swarm = Crazyswarm(crazyflies_yaml=crazyflies_yaml, args="--sim --vis null")
timeHelper = swarm.timeHelper
return swarm.allcfs, timeHelper
def test_cmdFullState_zeroVel():
allcfs, timeHelper = setUp()
cf = allcfs.crazyflies[0]
pos = np.array(cf.initialPosition) + np.array([1, 1, Z])
cf.cmdFullState(pos, np.zeros(3), np.zeros(3), 0, np.zeros(3))
timeHelper.sleep(1.0)
assert np.all(np.isclose(cf.position(), pos))
def test_cmdPosition():
allcfs, timeHelper = setUp()
cf = allcfs.crazyflies[0]
pos = np.array(cf.initialPosition) + np.array([1, 1, Z])
cf.cmdPosition(pos,yaw=0.0)
timeHelper.sleep(1.0)
assert np.all(np.isclose(cf.position(), pos))
def test_cmdVelocityWorld_checkVelocity():
allcfs, timeHelper = setUp()
cf = allcfs.crazyflies[0]
vel = np.ones(3)
cf.cmdVelocityWorld(vel, yawRate=0)
timeHelper.sleep(1.0)
assert np.all(np.isclose(cf.velocity(), vel))
def test_cmdVelocityWorld_checkIntegrate():
allcfs, timeHelper = setUp()
cf = allcfs.crazyflies[0]
vel = np.ones(3)
cf.cmdVelocityWorld(vel, yawRate=0)
timeHelper.sleep(1.0)
pos = cf.initialPosition + vel
assert np.all(np.isclose(cf.position(), pos))
def test_cmdVelocityWorld_disturbance():
crazyflies_yaml = """
crazyflies:
- channel: 100
id: 1
initialPosition: [1.0, 0.0, 0.0]
"""
swarm = Crazyswarm(crazyflies_yaml=crazyflies_yaml, args="--sim --vis null --disturbance 1.0")
timeHelper = swarm.timeHelper
cf = swarm.allcfs.crazyflies[0]
vel = np.ones(3)
cf.cmdVelocityWorld(vel, yawRate=0)
timeHelper.sleep(1.0)
pos = cf.initialPosition + vel
assert not np.any(np.isclose(cf.position(), pos)) | StarcoderdataPython |
3225565 | from django.contrib.auth.models import User
from django.db import models
from django.urls import reverse
from django.utils.text import Truncator
class Post(models.Model):
"""Model for posts in the blog"""
title = models.CharField(max_length=254)
publ_date = models.DateTimeField(auto_now_add=True)
post_text = models.TextField()
before_spoiler = models.TextField(default='The post do not have a brief description.')
def get_absolute_url(self):
return reverse('post', kwargs={'pk': self.pk})
def __str__(self):
return self.title
class Comment(models.Model):
"""Model for comments in the blog"""
# on_delete=models.SET_NULL assign NULL to the record if the post with
# this comment is deleted (doesn't work without null=True),
# related_name is for a reverse relationship (to get all the comments for the post)
post_id = models.ForeignKey(Post, on_delete=models.SET_NULL, null=True,
related_name='post_comments')
# If we don't need a reverse relationship, '+'
# With some DBs on_delete=models.DO_NOTHING may cause referential integrity
# errors, SQLite is ok about that (as far as I know)
user = models.ForeignKey(User, on_delete=models.DO_NOTHING, related_name='+')
publ_date = models.DateTimeField(auto_now_add=True)
comment_text = models.TextField()
def __str__(self):
return Truncator(self.comment_text).chars(50)
# After implementing the models, we need to apply this changes to the db, for that
# py manage.py makemigrations
# py manage.py migrate
| StarcoderdataPython |
3208189 | import sqlite3
def __sqlite(query: str):
con = sqlite3.connect("../resources_manager/ttbm.db")
cur = con.cursor()
cur.execute(query)
result = cur.fetchall()
con.commit()
con.close()
return result
def sqlite_3_add_user(name: str, password: str, id: int):
__sqlite(f"INSERT INTO users(name, password, id) VALUES('{name}', '{password}', {id})")
def sqlite_3_select_identity_name(name: str):
try:
text = __sqlite(f"SELECT * FROM users WHERE name = '{name}'")[0]
return text
except IndexError:
return []
def sqlite_3_create_statistic(name: str, hours: float, win_rates: float, count_of_wins: int, count_of_plays: int):
key = __sqlite(f"SELECT key FROM users WHERE name = '{name}'")[0][0]
__sqlite(f"INSERT INTO statistic(hours, win_rates, count_of_wins, count_of_plays, key) VALUES({hours}, {win_rates}"
f", {count_of_wins}, {count_of_plays}, {key});")
def sqlite_3_update_statistic(name: str, hours: float, win_rates: float, count_of_wins: int, count_of_plays: int):
key = __sqlite(f"SELECT key FROM users WHERE name = '{name}'")[0][0]
__sqlite(f"UPDATE statistic SET hours = {hours}, win_rates = {win_rates}, count_of_wins = {count_of_wins}, "
f"count_of_plays = {count_of_plays} WHERE key = {key}")
def sqlite_3_get_statistic(name: str):
key = __sqlite(f"SELECT key FROM users WHERE name = '{name}'")[0][0]
statistic = __sqlite(f"SELECT hours, win_rates, count_of_wins, count_of_plays FROM statistic WHERE key = {key}")[0]
return statistic
def sqlite_3_create_info(name: str, date: str, gender: str, description: str):
key = __sqlite(f"SELECT key FROM users WHERE name = '{name}'")[0][0]
__sqlite(f"INSERT INTO info(date, gender, description, key) VALUES('{date}', '{gender}', '{description}', {key})")
def sqlite_3_get_info(name: str):
key = __sqlite(f"SELECT key FROM users WHERE name = '{name}'")[0][0]
info = __sqlite(f"SELECT date, gender, description FROM info WHERE key = {key}")[0]
return info
def sqlite_3_create_view(table: str):
__sqlite(f"CREATE VIEW [{table}] AS SELECT users.key, users.name, users.id, users.password, info.date, "
f"info.description FROM users INNER JOIN info ON users.key=info.key ORDER BY users.key;")
def sqlite_3_get_view(table: str):
view = __sqlite(f"SELECT * FROM [{table}]")
return view
def sqlite_3_drop_view(table: str):
__sqlite(f"DROP VIEW [{table}]")
# print(sqlite_3_select_identity_name('Leshqa_Random'))
# sqlite_3_create_statistic('Leshqa_Random', 0, 0, 0, 0)
# sqlite_3_update_statistic('Leshqa_Random', 0, 50, 1, 2)
# print(sqlite_3_get_statistic('Leshqa_Random'))
# sqlite_3_create_info('Leshqa_Random', '2001-10-18', 'male', 'NULL')
# print(sqlite_3_get_info('Leshqa_Random'))
# sqlite_3_create_view("test")
# print(sqlite_3_get_view("test"))
# sqlite_3_drop_view("test")
| StarcoderdataPython |
141278 | <reponame>bitdotioinc/dagster
from .compute_log_manager import S3ComputeLogManager
from .file_cache import S3FileCache, s3_file_cache
from .file_manager import S3FileHandle, S3FileManager
from .intermediate_storage import S3IntermediateStorage
from .object_store import S3ObjectStore
from .resources import s3_file_manager, s3_resource
from .s3_fake_resource import S3FakeSession, create_s3_fake_resource
from .solids import S3Coordinate, file_handle_to_s3
from .system_storage import (
s3_intermediate_storage,
s3_plus_default_intermediate_storage_defs,
s3_plus_default_storage_defs,
s3_system_storage,
)
from .utils import S3Callback
| StarcoderdataPython |
3267641 | <reponame>Bensuperpc/Environment_Installer<gh_stars>0
#
#
# Qt_installer.py - Commands for KDE env
#
# Created by Benoît(<EMAIL>) 30, April of 2019
# Updated by X for python 3.X
#
# Released into the Public domain with MIT licence
# https://opensource.org/licenses/MIT
#
# Written with Sublime text 3 and python 3.7.3
# Script compatibility : Linux (Ubuntu ad debian based)
#
# ==============================================================================
import os
import platform
class Qt_installer:
major = "1"
minor = "0"
micro = "0"
releaselevel = "final"
name = ""
def class_version(self):
return self.major + "." + self.minor + "." + self.micro + ":" + self.releaselevel
def Qt_installer():
'''
Sert à supprimer kde'''
if platform.system() == 'Linux':
print("Remove KDE...")
os.system('sudo apt-get install libfontconfig1 -y')
os.system('sudo apt-get install mesa-common-dev -y')
os.system('sudo apt-get install libglu1-mesa-dev -y')
os.system('sudo apt-get install cmake -y')
else:
print("Wrong OS Only for Linux !")
def __init__(self):
self.name = "Qt_installer"
if __name__ == '__main__':
objName = Qt_installer()
print(objName.class_version())
# objName.install_KDE()
| StarcoderdataPython |
4823488 | <gh_stars>1-10
import requests
import datetime
import xml.etree.ElementTree as ET
class blueGroups():
def addMemberTo(self,group,uid,email,password):
"""
This method add a new member to your bluegroup previously created.
This method requires the next parameters:
:param bluegroup: Name of bluegroup :str : string.
:param uid: User id of new member (9 digits) :str : string.
:param email: Your Email with domain @ibm :str : string.
:param password: Your password of intranet :str : string.
"""
try:
group = group.split(' ')
new_group = ''
if len(group) == 1:
new_group = group[0]
else:
for item in group:
new_group = new_group+item+'%20'
new_group = new_group[:-3]
url = "https://bluepages.ibm.com/tools/groups/protect/groups.wss?gName="+new_group+"&task=Members&mebox="+uid+"&Select=Add+Members&API=1"
petition = requests.get(url,auth=(email,password))
response = int(petition.text)
if response == 0:
status = {'Status':'User added successfully'}
return status
else:
message = petition.text.split(':')
if message[0] == '4 The following members have not been added':
status = {'Status': 'Invalid user, verify your UID. it been expect 9 digit.'}
return status
elif message[0] == '5 An Error Has Occurred During Processing.Error':
status = {'Status': 'Invalid BlueGroup, verify your BlueGroup it is correct.'}
return status
except Exception as error:
return error
def deleteMemberTo(self,group,uid,email,password):
"""
This method deletes a member from your bluegroup previously created.
This method requires the next parameters:
:param bluegroup: Name of bluegroup :str : string.
:param uid: User id of member to remove (9 digits) :str : string.
:param email: Your Email with domain @ibm :str : string.
:param password: Your password of intranet :str : string.
"""
try:
group = group.split(' ')
new_group = ''
if len(group) == 1:
new_group = group[0]
else:
for item in group:
new_group = new_group+item+'%20'
new_group = new_group[:-3]
url = "https://bluepages.ibm.com/tools/groups/protect/groups.wss?gName="+new_group+"&task=DelMem&mebox="+uid+"&Delete=Delete+Checked&API=1"
petition = requests.get(url,auth=(email,password))
response = int(petition.text)
if response == 0:
status = {'Status':'User deleted successfully'}
return status
else:
message = petition.text.split(':')
if message[0] == '4 The following members have not been added':
status = {'Status': 'Invalid user, verify your UID. it been expect 9 digit.'}
return status
elif message[0] == '5 An Error Has Occurred During Processing.Error':
status = {'Status': 'Invalid BlueGroup, verify your BlueGroup it is correct.'}
return status
except Exception as error:
return error
def addAdminTo(self,group,uid,email,password):
"""
This method add a new administrator to your bluegroup previously created.
This method requires the next parameters:
:param bluegroup: Name of bluegroup :str : string.
:param uid: User id of new administrator (9 digits) :str : string.
:param email: Your Email with domain @ibm :str : string.
:param password: Your password of in<PASSWORD>et :str : string.
"""
try:
group = group.split(' ')
new_group = ''
if len(group) == 1:
new_group = group[0]
else:
for item in group:
new_group = new_group+item+'%20'
new_group = new_group[:-3]
url = "https://bluepages.ibm.com/tools/groups/protect/groups.wss?gName="+new_group+"&task=Administrators&mebox="+uid+"&Submit=Add+Administrators&API=1"
petition = requests.get(url,auth=(email,password))
response = int(petition.text)
if response == 0:
status = {'Status':'Admin added successfully'}
return status
else:
message = petition.text.split(':')
if message[0] == '4 The following admin have not been added':
status = {'Status': 'Invalid user, verify your UID. it been expect 9 digit.'}
return status
elif message[0] == '5 An Error Has Occurred During Processing.Error':
status = {'Status': 'Invalid BlueGroup, verify your BlueGroup it is correct.'}
return status
except Exception as error:
return error
def deleteAdminTo(self,group,uid,email,password):
"""
This method deletes an administrator to your bluegroup previously created.
This method requires the next parameters:
:param bluegroup: Name of bluegroup :str : string.
:param uid: User id of administrator to remove (9 digits) :str : string.
:param email: Your Email with domain @ibm :str : string.
:param password: Your password of intranet :str : string.
"""
try:
group = group.split(' ')
new_group = ''
if len(group) == 1:
new_group = group[0]
else:
for item in group:
new_group = new_group+item+'%20'
new_group = new_group[:-3]
url = "https://bluepages.ibm.com/tools/groups/protect/groups.wss?gName="+new_group+"&task=DelAdm&mebox="+uid+"&API=1"
petition = requests.get(url,auth=(email,password))
response = int(petition.text)
if response == 0:
status = {'Status':'Admin deleted successfully'}
return status
else:
message = petition.text.split(':')
if message[0] == '4 The following members have not been added':
status = {'Status': 'Invalid admin, verify your UID. it been expect 9 digit.'}
return status
elif message[0] == '5 An Error Has Occurred During Processing.Error':
status = {'Status': 'Invalid BlueGroup, verify your BlueGroup it is correct.'}
return status
except Exception as error:
return error
def createGroup(self,group,description,access,email,password):
"""
This method create a new bluegroup.
This method require the next parameters:
:param bluegroup: Name of bluegroup :str : string.
:param description: Short description about your bluegroup :str : string.
:param access: Access type (Everyone or Owner/Admins):str : string.
:param email: Your Email with domain @ibm :str : string.
:param password: Your password of intranet :str : string.
"""
try:
day = datetime.datetime.now()
day = str(day).split(' ')
day = day[0].split('-')
group = group.split(' ')
year = int(day[0]) + 1
year = str(year)
description = description.split(' ')
new_group = ''
new_description = ''
if len(group) == 1:
new_group = group[0]
else:
for item in group:
new_group = new_group+item+'%20'
new_group = new_group[:-3]
if len(description) == 1:
new_description = description[0]
else:
for item in description:
new_description = new_description+item+'%20'
new_description = new_description[:-3]
url = "https://bluepages.ibm.com/tools/groups/protect/groups.wss?selectOn="+new_group+"&task=GoNew&gDesc="+new_description+"&mode=members&vAcc="+access+"&Y="+year+"&M="+day[1]+"&D="+day[2]+"&API=1"
petition = requests.get(url,auth=(email,password))
response = int(petition.text)
if response == 0:
status = {'Status':'Group created successfully'}
return status
else:
message = petition.text.split(':')
if message[0] == '5 An Error Has Occurred During Processing.Error':
status = {'Status': 'Invalid information, please verify your information that is correct.'}
return status
except Exception as error:
return error
def deleteGroup(self,group,email,password):
"""
This method deletes a bluegroup.
This method requires the next parameters:
:param bluegroup: Name of bluegroup :str : string.
:param email: Your Email with domain @ibm :str : string.
:param password: Your password of intranet :str : string.
"""
try:
group = group.split(' ')
new_group = ''
if len(group) == 1:
new_group = group[0]
else:
for item in group:
new_group = new_group+item+'%20'
new_group = new_group[:-3]
url = "https://bluepages.ibm.com/tools/groups/protect/groups.wss?gName="+new_group+"&task=GoDel&API=1"
petition = requests.get(url,auth=(email,password))
response = int(petition.text)
if response == 0:
status = {'Status':'Group deleted successfully'}
return status
else:
message = petition.text.split(':')
if message[0] == '5 An Error Has Occurred During Processing.Error':
status = {'Status': 'Invalid BlueGroup, verify your BlueGroup it is correct.'}
return status
except Exception as error:
return error
def memberInGroup(self,group,email):
"""
This method shows the member that belongs to bluegroup.
This method require the next parameters:
:param bluegroup: Name of bluegroup :str : string.
:param email: Email from the person that you are looking for :str : string.
"""
try:
group = group.split(' ')
new_group = ''
if len(group) == 1:
new_group = group[0]
else:
for item in group:
new_group = new_group+item+'%20'
new_group = new_group[:-3]
url = "https://bluepages.ibm.com/tools/groups/groupsxml.wss?task=listMembers&group="+new_group
petition = requests.get(url)
root = ET.fromstring(petition.text)
emails = []
for element in root:
if element.tag == "member":
emails.append(element.text)
for correo in emails:
if correo == email:
return True
return False
except Exception as error:
return error
def listGroup(self,group):
"""
This method shows all members that belong to bluegroup.
This method require the next parameters:
:param bluegroup: Name of bluegroup :str : string.
"""
try:
group = group.split(' ')
new_group = ''
if len(group) == 1:
new_group = group[0]
else:
for item in group:
new_group = new_group+item+'%20'
new_group = new_group[:-3]
url = "https://bluepages.ibm.com/tools/groups/groupsxml.wss?task=listMembers&group="+new_group
petition = requests.get(url)
root = ET.fromstring(petition.text)
response = []
for element in root:
if element.tag == "member":
json = {
'user': element.text
}
response.append(json)
return response
except Exception as error:
return error
def changeOwnerGroup(self,group,uid,email,password):
"""
This method changes the current owner of bluegroup.
This method require the next parameters:
:param bluegroup: Name of bluegroup :str : string.
:param uid: User id which will be the owner :str : string.
:param email: Your Email with domain @ibm :str : string.
:param password: Your password of intranet :str : string.
"""
try:
group = group.split(' ')
new_group = ''
if len(group) == 1:
new_group = group[0]
else:
for item in group:
new_group = new_group+item+'%20'
new_group = new_group[:-3]
url = "https://bluepages.ibm.com/tools/groups/protect/groups.wss?gName="+new_group+"&task=GoCO&mebox="+uid+"&API=1"
petition = requests.get(url,auth=(email,password))
response = int(petition.text)
if response == 0:
status = {'Status':'The new owner of group is ' +uid}
return status
else:
message = petition.text.split(':')
if message[0] == '4 The following members have not been added':
status = {'Status': 'Invalid user, verify your UID. it been expect 9 digit.'}
return status
elif message[0] == '5 An Error Has Occurred During Processing.Error':
status = {'Status': 'Invalid BlueGroup, verify your BlueGroup it is correct.'}
return status
except Exception as error:
return error
def renameGroup(self,group,name,email,password):
"""
This method renames to bluegroup.
This method requires the next parameters:
:param bluegroup: Name of current bluegroup :str : string.
:param name: New name of bluegroup :str : string.
:param email: Your Email with domain @ibm :str : string.
:param password: Your password of intranet :str : string.
"""
try:
group = group.split(' ')
new_group = ''
new_name = ''
if len(group) == 1:
new_group = group[0]
else:
for item in group:
new_group = new_group+item+'%20'
new_group = new_group[:-3]
name = name.split(' ')
if len(name) == 1:
new_name = name[0]
else:
for item in name:
new_name = new_name+item+'%20'
new_name = new_name[:-3]
url = "https://bluepages.ibm.com/tools/groups/protect/groups.wss?gName="+new_group+"&task=GoCc&selectOn="+new_name+"&API=1"
petition = requests.get(url,auth=(email,password))
print(petition.text)
response = int(petition.text)
if response == 0:
status = {'Status':'Group renamed successfully'}
return status
else:
message = petition.text.split(':')
if message[0] == '5 An Error Has Occurred During Processing.Error':
status = {'Status': 'Invalid BlueGroup, verify your BlueGroup it is correct.'}
return status
except Exception as error:
return error
def changeDescription(self,group,description,email,password):
"""
This method changes description from your bluegroup.
This method requires the next parameters:
:param bluegroup: Name of bluegroup :str : string.
:param description: New short description about your bluegroup :str : string.
:param email: Your Email with domain @ibm :str : string.
:param password: Your password of intranet :str : string.
"""
try:
group = group.split(' ')
new_group = ''
description = description.split(' ')
new_description = ''
if len(group) == 1:
new_group = group[0]
else:
for item in group:
new_group = new_group+item+'%20'
new_group = new_group[:-3]
if len(description) == 1:
new_description = description[0]
else:
for item in description:
new_description = new_description+item+'%20'
new_description = new_description[:-3]
url = "https://bluepages.ibm.com/tools/groups/protect/groups.wss?gName="+new_group+"&task=GoCc&gDesc="+new_description+"&API=1"
petition = requests.get(url,auth=(email,password))
response = int(petition.text)
if response == 0:
status = {'Status':'Description changed successfully'}
return status
else:
message = petition.text.split(':')
if message[0] == '5 An Error Has Occurred During Processing.Error':
status = {'Status': 'Invalid BlueGroup or description verify your information.'}
return status
except Exception as error:
return error
"""def changeExpirationDate(self,group,day,month,year,email,password):
try:
group = group.split(' ')
new_group = ''
if len(group) == 1:
new_group = group[0]
else:
for item in group:
new_group = new_group+item+'%20'
new_group = new_group[:-3]
url = "https://bluepages.ibm.com/tools/groups/protect/groups.wss?gName="+new_group+"&task=GoCc&Y="+year+"&M="+month+"&D="+day+"&API=1"
petition = requests.get(url,auth=(email,password))
print(petition.text)
response = int(petition.text)
if response == 0:
status = {'Status':'Change expiration date for group '+group}
return status
else:
message = petition.text.split(':')
if message[0] == '5 An Error Has Occurred During Processing.Error':
status = {'Status': 'Invalid information verify that is correct.'}
return status
except Exception as error:
return error"""
| StarcoderdataPython |
126326 | <filename>extras/forms.py
from django import forms
from django.contrib.auth.models import User
from django.core.exceptions import ValidationError
from requests.exceptions import HTTPError
from utils.forms import (
APISelectMultiple,
BootstrapMixin,
DynamicModelMultipleChoiceField,
StaticSelect,
add_blank_choice,
)
from .enums import JobResultStatus
from .models import IXAPI, JobResult
class IXAPIForm(BootstrapMixin, forms.ModelForm):
identity = forms.CharField(widget=StaticSelect)
class Meta:
model = IXAPI
fields = ("name", "url", "api_key", "api_secret", "identity")
def clean(self):
cleaned_data = super().clean()
ixapi = IXAPI(
url=cleaned_data["url"],
api_key=cleaned_data["api_key"],
api_secret=cleaned_data["api_secret"],
)
try:
# Try to query API and see if it raises an error
ixapi.get_accounts()
except HTTPError as e:
# Fail form validation on HTTP error to provide a feedback to the user
if e.response.status_code >= 400 and e.response.status_code < 500:
possible_issue = "make sure the URL, key and secret are correct"
else:
possible_issue = "the server is malfunctioning or unavailable"
raise ValidationError(
f"Unable to connect to IX-API ({e.response.status_code} {e.response.reason}), {possible_issue}."
)
class IXAPIFilterForm(BootstrapMixin, forms.Form):
model = IXAPI
q = forms.CharField(required=False, label="Search")
class JobResultFilterForm(BootstrapMixin, forms.Form):
model = JobResult
q = forms.CharField(required=False, label="Search")
name = forms.CharField(required=False)
user_id = DynamicModelMultipleChoiceField(
queryset=User.objects.all(),
required=False,
display_field="username",
label="User",
widget=APISelectMultiple(api_url="/api/users/users/"),
)
status = forms.ChoiceField(
required=False,
choices=add_blank_choice(JobResultStatus.choices),
widget=StaticSelect(),
)
| StarcoderdataPython |
1723189 | # -*- coding: utf-8 -*-
import base64
import os
import time
from PIL import Image
from hashlib import sha1
import requests
params = {"token": "", # github 第三方客户端授权token
# 用户
"user": "",
# 项目名称
"project": "",
# 文件路径
"path": "img",
# 提交备注
"message": "img commit",
# 缩小比例, 2为缩小一半
"scale": 1.7
}
def saveImageFromClipboard(file_folder_path, file_name):
file_path = file_folder_path + '/' + file_name
data = os.system('/usr/local/bin/pngpaste ' + file_path)
if data != 0:
raise Exception("从粘贴板获取图片失败")
return file_path
def readFile2Base64(file_path):
with open(file_path, 'rb') as f:
return str(base64.b64encode(f.read()))
def shrink(file_path):
im = Image.open(file_path)
w, h = im.size
im.thumbnail((w // params['scale'], h // params['scale']))
return im.save(file_path, 'jpeg')
def getSha1(file_path):
sha1Obj = sha1()
with open(file_path, 'rb') as f:
sha1Obj.update(f.read())
return sha1Obj.hexdigest()
def sendImage(params, content, sha, file_name):
headers = {"content-type": "application/json", "Authorization": "token " + params['token']}
r1 = requests.put(
"https://api.github.com/repos/" + params['user'] + "/" + params['project'] + "/contents/" + params[
'path'] + "/" + file_name,
json={
"message": params['message'],
"content": content,
"sha": sha
}, headers=headers)
def getFileName():
t = time.time()
return str(int(round(t * 1000))) + '.jpg'
def printImagePath(name):
print ""
save_dir = os.path.expandvars('$HOME').rstrip()
name = getFileName()
try:
file = saveImageFromClipboard(save_dir, name)
shrink(file)
base64_str = readFile2Base64(file)
sha1Obj = getSha1(file)
sendImage(params, base64_str, sha1Obj, name)
printImagePath(name)
except Exception as e:
print("执行错误"),
print(e),
finally:
path = save_dir + '/' + name
if os.path.exists(path):
os.remove(path)
| StarcoderdataPython |
1674652 | def f ( one, two, three, four, five, six, seven, eight, nine, ten, eleven, twelve ) :
pass
def g (): return 5 ** 2
| StarcoderdataPython |
7210 | <gh_stars>1-10
from __future__ import annotations
from contextlib import contextmanager
from contextvars import ContextVar
from typing import Optional, Tuple
from magicgui.widgets import FunctionGui
from pydantic import BaseModel
class Source(BaseModel):
"""An object to store the provenance of a layer.
Parameters
----------
path: str, optional
filpath/url associated with layer
reader_plugin: str, optional
name of reader plugin that loaded the file (if applicable)
sample: Tuple[str, str], optional
Tuple of (sample_plugin, sample_name), if layer was loaded via
`viewer.open_sample`.
widget: FunctionGui, optional
magicgui widget, if the layer was added via a magicgui widget.
"""
path: Optional[str] = None
reader_plugin: Optional[str] = None
sample: Optional[Tuple[str, str]] = None
widget: Optional[FunctionGui] = None
class Config:
arbitrary_types_allowed = True
frozen = True
def __deepcopy__(self, memo):
"""Custom deepcopy implementation.
this prevents deep copy. `Source` doesn't really need to be copied
(i.e. if we deepcopy a layer, it essentially has the same `Source`).
Moreover, deepcopying a widget is challenging, and maybe odd anyway.
"""
return self
# layer source context management
_LAYER_SOURCE: ContextVar[dict] = ContextVar('_LAYER_SOURCE', default={})
@contextmanager
def layer_source(**source_kwargs):
"""Creates context in which all layers will be given `source_kwargs`.
The module-level variable `_LAYER_SOURCE` holds a set of key-value pairs
that can be used to create a new `Source` object. Any routine in napari
that may result in the creation of a new layer (such as opening a file,
using a particular plugin, or calling a magicgui widget) can use this
context manager to declare that any layers created within the context
result from a specific source. (This applies even if the layer
isn't "directly" created in the context, but perhaps in some sub-function
within the context).
`Layer.__init__` will call :func:`current_source`, to query the current
state of the `_LAYER_SOURCE` variable.
Contexts may be stacked, meaning a given layer.source can reflect the
actions of multiple events (for instance, an `open_sample` call that in
turn resulted in a `reader_plugin` opening a file). However, the "deepest"
context will "win" in the case where multiple calls to `layer_source`
provide conflicting values.
Parameters
----------
**source_kwargs
keys/values should be valid parameters for :class:`Source`.
Examples
--------
>>> with layer_source(path='file.ext', reader_plugin='plugin'): # doctest: +SKIP
... points = some_function_that_creates_points()
...
>>> assert points.source == Source(path='file.ext', reader_plugin='plugin') # doctest: +SKIP
"""
token = _LAYER_SOURCE.set({**_LAYER_SOURCE.get(), **source_kwargs})
try:
yield
finally:
_LAYER_SOURCE.reset(token)
def current_source():
"""Get the current layer :class:`Source` (inferred from context).
The main place this function is used is in :meth:`Layer.__init__`.
"""
return Source(**_LAYER_SOURCE.get())
| StarcoderdataPython |
1605142 | class Request(object):
"""
A Request is the atomic unit in an Action.
It represents a single customer's *request* for a ride
"""
MAX_PICKUP_DELAY: float = 300.0
MAX_DROPOFF_DELAY: float = 600.0
def __init__(self,
request_id: int,
source: int,
destination: int,
current_time: float,
travel_time: float,
value: float=1,
):
self.request_id = request_id
self.pickup = source
self.dropoff = destination
self.value = value # In the deafult case, all requests have equal value
self.pickup_deadline = current_time + self.MAX_PICKUP_DELAY
self.dropoff_deadline = current_time + travel_time + self.MAX_DROPOFF_DELAY
def __deepcopy__(self, memo):
return self
def __str__(self):
return("{}->{}".format(self.pickup, self.dropoff))
def __repr__(self):
return str(self)
def __hash__(self):
return hash(self.request_id)
def __eq__(self, other):
# Request is only comparable with other Requests
if isinstance(other, self.__class__):
# If the ids are the same, they are equal
if (self.request_id == other.request_id):
return True
return False
| StarcoderdataPython |
3217733 | from typing import Hashable, Iterable, Union
import pandas_flavor as pf
import pandas as pd
from pandas.api.types import is_list_like
import warnings
from janitor.utils import check, check_column, deprecated_alias
from enum import Enum
@pf.register_dataframe_method
@deprecated_alias(columns="column_names")
def encode_categorical(
df: pd.DataFrame,
column_names: Union[str, Iterable[str], Hashable] = None,
**kwargs,
) -> pd.DataFrame:
"""Encode the specified columns with Pandas' [category dtype][cat].
[cat]: http://pandas.pydata.org/pandas-docs/stable/user_guide/categorical.html
It is syntactic sugar around `pd.Categorical`.
This method does not mutate the original DataFrame.
Note: In versions < 0.20.11, this method mutates the original DataFrame.
TODO: The big chunk of examples below
should be moved into a Jupyter notebook.
This will keep the docstring consistent and to-the-point.
Examples:
```python
col1 col2 col3
0 2.0 a 2020-01-01
1 1.0 b 2020-01-02
2 3.0 c 2020-01-03
3 1.0 d 2020-01-04
4 NaN a 2020-01-05
df.dtypes
col1 float64
col2 object
col3 datetime64[ns]
dtype: object
```
Specific columns can be converted to category type:
```python
df = (
pd.DataFrame(...)
.encode_categorical(
column_names=['col1', 'col2', 'col3']
)
)
df.dtypes
col1 category
col2 category
col3 category
dtype: object
```
Note that for the code above, the categories were inferred from
the columns, and is unordered:
df['col3']
0 2020-01-01
1 2020-01-02
2 2020-01-03
3 2020-01-04
4 2020-01-05
Name: col3, dtype: category
Categories (5, datetime64[ns]):
[2020-01-01, 2020-01-02, 2020-01-03, 2020-01-04, 2020-01-05]
Explicit categories can be provided, and ordered via the `kwargs``
parameter:
df = (pd.DataFrame(...)
.encode_categorical(
col1 = ([3, 2, 1, 4], "appearance"),
col2 = (['a','d','c','b'], "sort")
)
)
df['col1']
0 2
1 1
2 3
3 1
4 NaN
Name: col1, dtype: category
Categories (4, int64): [3 < 2 < 1 < 4]
df['col2']
0 a
1 b
2 c
3 d
4 a
Name: col2, dtype: category
Categories (4, object): [a < b < c < d]
When the `order` parameter is "appearance",
the categories argument is used as-is;
if the `order` is "sort",
the categories argument is sorted in ascending order;
if `order` is `None``,
then the categories argument is applied unordered.
A User Warning will be generated if some or all of the unique values
in the column are not present in the provided `categories` argument.
```python
df = (pd.DataFrame(...)
.encode_categorical(
col1 = (
categories = [4, 5, 6],
order = "appearance"
)
)
UserWarning: None of the values in col1 are in [4, 5, 6];
this might create nulls for all your values
in the new categorical column.
df['col1']
0 NaN
1 NaN
2 NaN
3 NaN
4 NaN
Name: col1, dtype: category
Categories (3, int64): [4 < 5 < 6]
```
.. note:: if `categories` is None in the `kwargs` tuple, then the
values for `categories` are inferred from the column; if `order`
is None, then the values for categories are applied unordered.
.. note:: `column_names` and `kwargs` parameters cannot be used at
the same time.
Functional usage syntax:
```python
import pandas as pd
import janitor as jn
- With `column_names``::
categorical_cols = ['col1', 'col2', 'col4']
df = jn.encode_categorical(
df,
columns = categorical_cols) # one way
- With `kwargs``::
df = jn.encode_categorical(
df,
col1 = (categories, order),
col2 = (categories = [values],
order="sort" # or "appearance" or None
)
Method chaining syntax:
- With `column_names``::
categorical_cols = ['col1', 'col2', 'col4']
df = (pd.DataFrame(...)
.encode_categorical(columns=categorical_cols)
)
- With `kwargs``::
df = (
pd.DataFrame(...)
.encode_categorical(
col1 = (categories, order),
col2 = (categories = [values]/None,
order="sort" # or "appearance" or None
)
)
:param df: The pandas DataFrame object.
:param column_names: A column name or an iterable (list or
tuple) of column names.
:param kwargs: A pairing of column name to a tuple of (`categories`, `order`).
This is useful in creating categorical columns that are ordered, or
if the user needs to explicitly specify the categories.
:returns: A pandas DataFrame.
:raises ValueError: if both ``column_names`` and ``kwargs`` are provided.
""" # noqa: E501
if all((column_names, kwargs)):
raise ValueError(
"""
Only one of `column_names` or `kwargs`
can be provided.
"""
)
# column_names deal with only category dtype (unordered)
# kwargs takes care of scenarios where user wants an ordered category
# or user supplies specific categories to create the categorical
if column_names is not None:
check("column_names", column_names, [list, tuple, Hashable])
if isinstance(column_names, (list, tuple)):
check_column(df, column_names)
dtypes = {col: "category" for col in column_names}
return df.astype(dtypes)
if isinstance(column_names, Hashable):
check_column(df, [column_names])
return df.astype({column_names: "category"})
return _computations_as_categorical(df, **kwargs)
def _computations_as_categorical(df: pd.DataFrame, **kwargs) -> pd.DataFrame:
"""
This function handles cases where
categorical columns are created with an order,
or specific values supplied for the categories.
It uses a kwarg, where the key is the column name,
and the value is a tuple of categories, order.
The defaults for the tuple are (None, None)
and will return a categorical dtype
with no order and categories inferred from the column.
A DataFrame, with catetorical columns, is returned.
"""
categories_dict = _as_categorical_checks(df, **kwargs)
categories_dtypes = {}
for column_name, (
cat,
order,
) in categories_dict.items():
error_msg = f"""
Kindly ensure there is at least
one non-null value in {column_name}.
"""
if (cat is None) and (order is None):
cat_dtype = pd.CategoricalDtype()
elif (cat is None) and (order is _CategoryOrder.SORT.value):
cat = df[column_name].factorize(sort=True)[-1]
if cat.empty:
raise ValueError(error_msg)
cat_dtype = pd.CategoricalDtype(categories=cat, ordered=True)
elif (cat is None) and (order is _CategoryOrder.APPEARANCE.value):
cat = df[column_name].factorize(sort=False)[-1]
if cat.empty:
raise ValueError(error_msg)
cat_dtype = pd.CategoricalDtype(categories=cat, ordered=True)
elif cat is not None: # order is irrelevant if cat is provided
cat_dtype = pd.CategoricalDtype(categories=cat, ordered=True)
categories_dtypes[column_name] = cat_dtype
return df.astype(categories_dtypes)
def _as_categorical_checks(df: pd.DataFrame, **kwargs) -> dict:
"""
This function raises errors if columns in `kwargs` are
absent in the the dataframe's columns.
It also raises errors if the tuple in `kwargs`
has a length greater than 2, or the `order` value,
if not None, is not one of `appearance` or `sort`.
Error is raised if the `categories` in the tuple in `kwargs`
is not a 1-D array-like object.
This function is executed before proceeding to the computation phase.
If all checks pass, a dictionary of column names and tuple
of (categories, order) is returned.
:param df: The pandas DataFrame object.
:param kwargs: A pairing of column name
to a tuple of (`categories`, `order`).
:returns: A dictionary.
:raises TypeError: if the value in ``kwargs`` is not a tuple.
:raises ValueError: if ``categories`` is not a 1-D array.
:raises ValueError: if ``order`` is not one of
`sort`, `appearance`, or `None`.
"""
# column checks
check_column(df, kwargs)
categories_dict = {}
for column_name, value in kwargs.items():
# type check
check("Pair of `categories` and `order`", value, [tuple])
len_value = len(value)
if len_value != 2:
raise ValueError(
f"""
The tuple of (categories, order) for {column_name}
should be length 2; the tuple provided is
length {len_value}.
"""
)
cat, order = value
if cat is not None:
if not is_list_like(cat):
raise TypeError(f"{cat} should be list-like.")
if not hasattr(cat, "shape"):
checker = pd.Index([*cat])
else:
checker = cat
arr_ndim = checker.ndim
if (arr_ndim != 1) or isinstance(checker, pd.MultiIndex):
raise ValueError(
f"""
{cat} is not a 1-D array.
Kindly provide a 1-D array-like object to `categories`.
"""
)
if not isinstance(checker, (pd.Series, pd.Index)):
checker = pd.Index(checker)
if checker.hasnans:
raise ValueError(
"Kindly ensure there are no nulls in `categories`."
)
if not checker.is_unique:
raise ValueError(
"""
Kindly provide unique,
non-null values for `categories`.
"""
)
if checker.empty:
raise ValueError(
"""
Kindly ensure there is at least
one non-null value in `categories`.
"""
)
# uniques, without nulls
uniques = df[column_name].factorize(sort=False)[-1]
if uniques.empty:
raise ValueError(
f"""
Kindly ensure there is at least
one non-null value in {column_name}.
"""
)
missing = uniques.difference(checker, sort=False)
if not missing.empty and (uniques.size > missing.size):
warnings.warn(
f"""
Values {tuple(missing)} are missing from
the provided categories {cat}
for {column_name}; this may create nulls
in the new categorical column.
""",
UserWarning,
stacklevel=2,
)
elif uniques.equals(missing):
warnings.warn(
f"""
None of the values in {column_name} are in
{cat};
this might create nulls for all values
in the new categorical column.
""",
UserWarning,
stacklevel=2,
)
if order is not None:
check("order", order, [str])
category_order_types = [ent.value for ent in _CategoryOrder]
if order.lower() not in category_order_types:
raise ValueError(
"""
`order` argument should be one of
"appearance", "sort" or `None`.
"""
)
categories_dict[column_name] = value
return categories_dict
class _CategoryOrder(Enum):
"""
order types for encode_categorical.
"""
SORT = "sort"
APPEARANCE = "appearance"
| StarcoderdataPython |
1691509 | <gh_stars>1-10
import biome
from pbrtwriter import PbrtWriter
class Foliage:
def __init__(self, block):
self.block = block
def write(self, pbrtwriter: PbrtWriter, face):
tex = face["texture"]
tint_color = biome.getFoliageColor(self.block.biome_id, 0)
pbrtwriter.material("translucent", "texture Kd", "%s-color" % tex,
"rgb reflect", tint_color, "rgb transmit", tint_color)
class Grass:
def __init__(self, block):
self.block = block
def write(self, pbrtwriter: PbrtWriter, face):
tex = face["texture"]
tint_color = biome.getGrassColor(self.block.biome_id, 0)
pbrtwriter.material("translucent", "texture Kd", "%s-color" % tex,
"rgb reflect", tint_color, "rgb transmit", tint_color)
class Matte:
def __init__(self, block):
self.block = block
def write(self, pbrtwriter: PbrtWriter, face):
tex = face["texture"]
if "tintindex" in face:
if self.block._is("leaves"):
tint_color = biome.getFoliageColor(self.block.biome_id, 0)
else:
tint_color = biome.getGrassColor(self.block.biome_id, 0)
pbrtwriter.material("matte", "texture Kd", "%s-color" %
tex, "rgb tintMap", tint_color)
else:
pbrtwriter.material("matte", "texture Kd", "%s-color" % tex)
class Glass:
def __init__(self, block):
self.block = block
def write(self, pbrtwriter: PbrtWriter, face):
tex = face["texture"]
pbrtwriter.material("glass", "texture Kr", "%s-color" % tex)
class Light:
FULL_LIGHT = 5.
def __init__(self, block):
self.block = block
def write(self, pbrtwriter: PbrtWriter, face):
tex = face["texture"]
light = self.block.getLight()
le = (light/15.)**2*Light.FULL_LIGHT
pbrtwriter.areaLightSource(
"texlight", "texture L", "%s-color" % tex, "rgb scale", (le, le, le))
| StarcoderdataPython |
3228752 | <gh_stars>0
"""
"abacabad" c
"abacabaabacaba" _
"abcdefghijklmnopqrstuvwxyziflskecznslkjfabe" d
"bcccccccccccccyb" y
"""
def first_not_repeating_char(char_sequence):
if __name__ == '__main__':
char_sequence = str(raw_input('Escribe una secuencia de caracteres: '))
result = first_not_repeating_char(char_sequence)
if result == '_':
print('Todos los caracteres se repiten.')
else:
print('El primer caracter no repetido es: {}'.format(result)) | StarcoderdataPython |
1789383 | from django.urls import path
from django.contrib.auth import views as auth_views
from . import views as api_views
urlpatterns = [
path('profile/delete/solution/<int:pk>/', api_views.delete_solution, name="delete_solution"),
path('profile/delete/problem/<int:pk>/', api_views.delete_problem, name="delete_problem"),
path('profile/stats/', api_views.ProfileStatsView.as_view(), name="profile_stats"),
path("code/analysis/", api_views.AnalysisView.as_view(), name="analysis"),
path('code/save_problem/', api_views.SaveProblemView.as_view(), name="save_problem"),
path('courses/create_course/', api_views.create_course, name="create_course"),
path('courses/enrol_course/<str:course_code>/', api_views.enrol_course, name="enrol_course"),
path('courses/get_course/', api_views.get_course, name="get_course"),
path('courses/delete/<str:del_type>/<int:del_id>/', api_views.delete_entity, name="delete_entity"),
path('courses/get_global_problem_stats/<int:problem_id>/<int:course_id>/<str:role>/', api_views.get_global_problem_stats, name="get_global_problem_stats"),
# path('save_problem/', code_views.SaveProblemView.as_view(), name="save_problem"),
]
| StarcoderdataPython |
156362 | <reponame>ckod3/vfxpipe<gh_stars>10-100
# test_utils.py -- Tests for git test utilities.
# Copyright (C) 2010 Google, Inc.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor,
# Boston, MA 02110-1301, USA.
"""Tests for git test utilities."""
from dulwich.object_store import (
MemoryObjectStore,
)
from dulwich.objects import (
Blob,
)
from dulwich.tests import (
TestCase,
)
from dulwich.tests.utils import (
make_object,
build_commit_graph,
)
class BuildCommitGraphTest(TestCase):
def setUp(self):
super(BuildCommitGraphTest, self).setUp()
self.store = MemoryObjectStore()
def test_linear(self):
c1, c2 = build_commit_graph(self.store, [[1], [2, 1]])
for obj_id in [c1.id, c2.id, c1.tree, c2.tree]:
self.assertTrue(obj_id in self.store)
self.assertEqual([], c1.parents)
self.assertEqual([c1.id], c2.parents)
self.assertEqual(c1.tree, c2.tree)
self.assertEqual([], list(self.store[c1.tree].iteritems()))
self.assertTrue(c2.commit_time > c1.commit_time)
def test_merge(self):
c1, c2, c3, c4 = build_commit_graph(self.store,
[[1], [2, 1], [3, 1], [4, 2, 3]])
self.assertEqual([c2.id, c3.id], c4.parents)
self.assertTrue(c4.commit_time > c2.commit_time)
self.assertTrue(c4.commit_time > c3.commit_time)
def test_missing_parent(self):
self.assertRaises(ValueError, build_commit_graph, self.store,
[[1], [3, 2], [2, 1]])
def test_trees(self):
a1 = make_object(Blob, data='aaa1')
a2 = make_object(Blob, data='aaa2')
c1, c2 = build_commit_graph(self.store, [[1], [2, 1]],
trees={1: [('a', a1)],
2: [('a', a2, 0o100644)]})
self.assertEqual((0o100644, a1.id), self.store[c1.tree]['a'])
self.assertEqual((0o100644, a2.id), self.store[c2.tree]['a'])
def test_attrs(self):
c1, c2 = build_commit_graph(self.store, [[1], [2, 1]],
attrs={1: {'message': 'Hooray!'}})
self.assertEqual('Hooray!', c1.message)
self.assertEqual('Commit 2', c2.message)
def test_commit_time(self):
c1, c2, c3 = build_commit_graph(self.store, [[1], [2, 1], [3, 2]],
attrs={1: {'commit_time': 124},
2: {'commit_time': 123}})
self.assertEqual(124, c1.commit_time)
self.assertEqual(123, c2.commit_time)
self.assertTrue(c2.commit_time < c1.commit_time < c3.commit_time)
| StarcoderdataPython |
1794609 | <reponame>Rajatkhatri7/Project-Milap
#!/usr/bin/env python3
from stack import Stack
def is_balanced(inp_str):
s = Stack()
index = 0
is_balanced = True
while index<len(inp_str) and is_balanced:
str_ele = inp_str[index]
if str_ele in "({[":
s.push(str_ele)
else:
if s.is_empty():
is_balanced = False #opening bracket finished and stack is empty
else:
top = s.pop()
if not is_match(top,str_ele):
is_balanced = False
index+=1
if s.is_empty() and is_balanced:
return True
else:
return False
def is_match(p1,p2):
if p1=="(" and p2 == ")":
return True
elif p1 == "{" and p2 == "}":
return True
elif p1 == "[" and p2 == "]":
return True
else:
return False
print("String : (((({})))) Balanced or not?")
print(is_balanced("(((({}))))"))
print("String : [][]]] Balanced or not?")
print(is_balanced("[][]]]"))
print("String : [][] Balanced or not?")
print(is_balanced("[][]")) | StarcoderdataPython |
1645440 | from . import db, login_manager
from datetime import datetime
from werkzeug.security import generate_password_hash,check_password_hash
from flask_login import UserMixin
@login_manager.user_loader
def load_user(user_id):
return User.query.get(int(user_id))
# """
# Pitch class to define Pitch objects
# """
# all_pitches = []
# def __init__ (self,author, your_pitch, date_posted):
# self.your_pitch = your_pitch
# self.author = author
# self.date_posted = date_posted
# def save_pitch(self):
# Pitch.all_pitches.append(self)
# @classmethod
# def clear_pitches(cls):
# Pitch.all_pitches.clear()
class User (db.Model,UserMixin):
id = db.Column(db.Integer, primary_key = True)
username = db.Column(db.String(50), unique = True, nullable = False)
email = db.Column(db.String(150), unique = True, nullable = False)
image_file = db.Column(db.String(20), nullable = False, default ='mypic.jpg')
password = db.Column(db.String(60), nullable = False)
pitches = db.relationship('Pitch', backref='author', lazy= True)
# @property
# def pass_secure(self):
# raise AttributeError("You cannot read the passwor attribute")
# @pass_secure.setter
# def pass_secure(self, pass_secure):
# self.password = generate_password_hash(pass_secure)
# def verify_pass_secure(self, pass_secure):
# return check_password_hash(self.password, pass_secure)
def __repr__(self):
return f'User ("{self.username}","{self.email}","{self.image_file}")'
class Pitch (db.Model):
id = db.Column(db.Integer, primary_key = True)
# author = db.Column(db.String(50), nullable = False)
date_posted = db.Column(db.DateTime, nullable = False, default= datetime.utcnow)
your_pitch = db.Column(db.Text, nullable = False)
user_id = db.Column(db.Integer, db.ForeignKey('user.id'), nullable = False)
def __repr__(self):
return f'Pitch ("{self.your_pitch}","{self.date_posted}")' | StarcoderdataPython |
3322775 | import argparse
import os
import pickle as pk
import keras
import numpy as np
from keras import optimizers
from keras.callbacks import CSVLogger, ModelCheckpoint
from keras.layers import (ELU, GRU, LSTM, BatchNormalization, Bidirectional,
Conv2D, Dense, Dropout, Flatten, Input, MaxPooling2D,
Reshape, TimeDistributed)
from keras.layers.noise import GaussianNoise
from keras.models import Model, Sequential, load_model
from keras.utils import to_categorical
from sklearn.feature_selection import RFECV
from sklearn.model_selection import StratifiedKFold
from sklearn.svm import SVC
from sklearn.utils import shuffle
import models
SampFreq = 256
ChannelNum = 22
def get_parser():
parser = argparse.ArgumentParser(description='train multiple win LOPO jobs')
parser.add_argument('-f', '--folder', help='data floder')
parser.add_argument('-s', '--save_dir', help='save dir')
parser.add_argument('-se', '--start_epoch', help='start epoch')
parser.add_argument('-e', '--epoch', help='train epoch')
parser.add_argument('-c', '--ckpt_file', help='ckpt file')
parser.add_argument('-m', '--model', help='model name')
return parser.parse_args()
def load_data(data, label):
t_data = np.load(open(data, 'rb'))
t_label = np.load(open(label, 'rb'))
return t_data, t_label
def dataset_preprocess(data, label):
# data shuffle
s_data, s_label = shuffle(data, label, random_state=2018)
return s_data, s_label
def main():
args = get_parser()
data_path = args.folder
save_dir = args.save_dir
start_epoch = int(args.start_epoch)
epoch = int(args.epoch)
ckpt_path = args.ckpt_file
model_name = args.model
model_options = {
'raw_cnn':models.raw_cnn,
'shallowconv': models.shallow_conv_net,
'deepconv': models.deep_conv_net,
'orig_EEG': models.origin_EEG_net,
'hyper_EEG': models.hyper_tune_EEGnet,
'swwae': models.SWWAE_model}
train_data, train_label = load_data(
os.path.join(
data_path, '{}-data-train.npy'.format(data_path[-5:])),
os.path.join(
data_path, '{}-label-train.npy'.format(data_path[-5:])))
val_data, val_label = load_data(
os.path.join(
data_path, '{}-data-val.npy'.format(data_path[-5:])),
os.path.join(
data_path, '{}-label-val.npy'.format(data_path[-5:])))
print('train_size:', np.shape(train_data))
print('val_size:', np.shape(val_data))
data_shape = np.shape(train_data[0])
train_label = to_categorical(train_label)
val_label = to_categorical(val_label)
train_label = np.asarray(train_label)
val_label = np.asarray(val_label)
if not ckpt_path:
model = model_options[model_name](2, 22, 768)
model.compile(loss='categorical_crossentropy',
optimizer = optimizers.Adagrad(),
metrics = ['accuracy'])
else:
model = load_model(ckpt_path)
print(model.summary())
if not os.path.isdir(save_dir):
os.mkdir(save_dir)
checkpoint = ModelCheckpoint(
os.path.join(save_dir, 'model.{epoch:04d}-{val_loss:.2f}.hdf5'))
logger = CSVLogger(os.path.join(save_dir, "training-{}-{}.log.csv".format(start_epoch, epoch)))
model.fit(
train_data, train_label, batch_size=32,
epochs=epoch, verbose=1,
validation_data=(val_data, val_label), shuffle=True,
initial_epoch=start_epoch, callbacks=[checkpoint, logger])
if __name__ == '__main__':
main()
| StarcoderdataPython |
1637815 | # Generate Delphi wrapper for HDF5 library.
# by <NAME>
from __future__ import print_function
import sys
import os.path
import argparse
import networkx as nx
import datetime
import re
from collections import *
from itertools import *
parser = argparse.ArgumentParser(description = 'Generate Delphi wrapper for HDF5 library.')
parser.add_argument('srcdir', help = 'directory containing HDF5 *.h files.',
nargs = '?', default = '.')
args = parser.parse_args()
def parsedeps(header, graph):
if header.startswith('H5') and header not in graph.onodes:
graph.onodes.append(header)
for line in open(os.path.join(args.srcdir, header)):
m = re.match('#include "(H5.*public.h)".*', line)
if m:
include = m.group(1)
if header.startswith('H5'):
if include not in graph.onodes:
graph.onodes.append(include)
graph.add_edge(header, include)
parsedeps(include, graph)
defs = ''
classname = 'THDF5Dll'
types = ''
fields = ''
props = ''
init = ''
cinit = ''
template = \
'''unit hdf5dll;
// Delphi wrapper for HDF5 library.
// Auto-generated {date} by hdf5pas.py.
interface
uses
windows;
{{$ALIGN ON}}
{{$MINENUMSIZE 4}}
type
int32_t = Integer;
Pint32_t = ^int32_t;
uint32_t = Cardinal;
Puint32_t = ^uint32_t;
int64_t = Int64;
Pint64_t = ^int64_t;
uint64_t = UInt64;
Puint64_t = ^uint64_t;
time_t = NativeInt;
Ptime_t = ^time_t;
size_t = NativeUInt;
Psize_t = ^size_t;
ssize_t = NativeInt;
Pssize_t = ^ssize_t;
off_t = NativeInt;
Poff_t = ^off_t;
PFILE = Pointer;
type
hsize_t = UInt64;
Phsize_t = ^hsize_t;
hssize_t = Int64;
Phssize_t = ^hssize_t;
haddr_t = UInt64;
Phaddr_t = ^haddr_t;
const
HADDR_UNDEF = haddr_t(-1);
{defs}
type
{classname} = class
private
type
{types}
private
FHandle: THandle;
{fields}
public
constructor Create(APath: string);
destructor Destroy; override;
{props}
property Handle: THandle read FHandle;
function IsValid: Boolean;
end;
implementation
{{ {classname} }}
constructor {classname}.Create(APath: string);
function GetDllProc(AModule: THandle; AName: string): Pointer;
begin
Result := GetProcAddress(AModule, PChar(AName));
Assert(Assigned(Result));
end;
begin
inherited Create;
FHandle := LoadLibrary(PChar(APath));
{init}
H5open;
{cinit}
end;
destructor {classname}.Destroy;
begin
if FHandle <> 0 then
FreeLibrary(FHandle);
inherited;
end;
function {classname}.IsValid: Boolean;
begin
Result := (FHandle <> 0);
end;
end.
'''
def parse(header):
def smartjoin(sep, *args):
if args[0]:
return sep.join(args)
else:
return args[1]
def stripcomment(s):
return re.sub(' *(\(\*.*\*\))?$', '', s)
def strtoint(value):
value = re.sub('^\(\(.*\)\)$', r'\1', value.strip())
if value.startswith('('):
tokens = re.findall('(\((.*?)\)( *|$))|([^()]+$)', value)
value = (tokens[-1][1] or tokens[-1][3]).strip()
else:
tokens = None
value = value.rstrip('uL')
try:
result = int(value, 0)
if tokens:
for token in reversed(tokens[:-1]):
typ = token[1].strip()
(name, typ) = convnametype('', typ)
result = '{}({})'.format(typ, result)
except ValueError:
m = re.match('(.*) << (.*)', value)
if m:
result = '{} shl {}'.format(m.group(1), int(m.group(2), 0))
else:
return
return result
def strtofloat(value):
try:
value = value.rstrip('f')
return float(value)
except ValueError:
pass
def parseprocdecl(signature, istype):
signature = re.sub('\(\*[^()]*?\*\)', '', signature).replace('*', ' * ')
if istype:
(rettype, name, args) = re.match('(.*) ?\( \* ([^ ]*)\) ?\((.*)\);', signature).groups()
else:
(rettype, name, args) = re.match('(.*) ([^ ]*) ?\((.*)\);', signature).groups()
if args != 'void':
args = [s.strip() for s in args.split(',')]
else:
args = []
varargs = False
for i in range(len(args)):
arg = args[i].strip().split(' ')
if len([p for p in arg if p != '*']) < 2 and args[i] != '...':
arg.append('p')
atyp = ' '.join(arg[:-1])
aname = arg[-1]
(aname, atyp) = convnametype(aname, atyp, arraytypes = False)
if args[i] != '...':
args[i] = '{}: {}'.format(aname, atyp)
else:
args[i] = None
varargs = True
args = [s for s in args if s]
rettype = convnametype('', rettype, arraytypes = False)[-1]
return name, args, rettype, varargs
def getnametype(signature):
while ' ' in signature:
signature = signature.replace(' ', ' ')
m = re.match('([^\[\]]*)(\[(.+)\])?', signature.strip())
lexems = m.group(1).split(' ')
if lexems[0] == 'enum':
lexems = lexems[1:]
arr = m.group(2) or ''
return lexems[-1] + arr, ' '.join(lexems[:-1])
def convnametype(cname, ctype, arraytypes = True):
# Convert C-style variable/constant/field declaration to Delphi-style
def replace(where, olditems, newitem):
items = where
for item in olditems:
if item in items:
items = [s for s in items if s != item]
else:
return where
return items + [newitem]
typ = ctype.replace('*', ' * ')
while ' ' in typ:
typ = typ.replace(' ', ' ')
typ = typ.strip().split(' ')
stars = len([s for s in cname if s == '*'])
name = cname.strip('* ')
typ += ['*']*stars
if name.endswith('[]'):
name = name.rstrip('[]')
typ += ['*']
m = re.match('([^\[\]]*)(\[(.+)\])?', name)
arrsize = m.group(3)
name = m.group(1)
if name == 'type':
name = 'typ'
elif name == 'object':
name = 'obj'
elif name == 'end':
name = 'end_'
elif name == 'file':
name = 'file_'
typ = [s for s in typ if s != 'const']
typ = replace(typ, ['unsigned', 'long', 'long', 'int'], 'UInt64')
typ = replace(typ, ['unsigned', 'long', 'long'], 'UInt64')
typ = replace(typ, ['long', 'long', 'int'], 'Int64')
typ = replace(typ, ['long', 'long'], 'Int64')
typ = replace(typ, ['unsigned', 'long', 'int'], 'Cardinal')
typ = replace(typ, ['unsigned', 'long'], 'Cardinal')
typ = replace(typ, ['long', 'int'], 'Integer')
typ = replace(typ, ['long'], 'Integer')
typ = replace(typ, ['unsigned', 'short', 'int'], 'Word')
typ = replace(typ, ['unsigned', 'short'], 'Word')
typ = replace(typ, ['short', 'int'], 'ShortInt')
typ = replace(typ, ['short'], 'ShortInt')
typ = replace(typ, ['unsigned', 'int'], 'Cardinal')
typ = replace(typ, ['int'], 'Integer')
typ = replace(typ, ['unsigned', 'char'], 'Byte')
typ = replace(typ, ['char'], 'AnsiChar')
typ = replace(typ, ['unsigned'], 'Cardinal')
typ = replace(typ, ['bool'], 'Boolean')
typ = replace(typ, ['double'], 'Double')
if '*' in typ:
typ = replace(typ, ['void'], 'ointer')
stars = len([s for s in typ if s == '*'])
typ = 'P'*stars + ''.join([s for s in typ if s != '*'])
if arrsize:
if arraytypes:
if arrsize.endswith(' + 1'):
typ = 'array[0..{}] of {}'.format(arrsize[0:len(arrsize) - 4], typ)
else:
typ = 'array[0..{} - 1] of {}'.format(arrsize, typ)
else:
typ = 'P' + typ
return (name, typ)
def preprocess(lines):
'''
Parse and strip off pre-processor directives.
Currently all #if/#ifdef/#ifndef are considered as false.
'''
print('{}: Pre-processing...'.format(header), file = sys.stderr)
ifdef = 0
result = []
for line in lines:
line = line.strip('\n').expandtabs()
if line.strip() == '':
line = ''
m = re.match('(.*)(/\*.*)', line)
if m and not re.search('\*/', m.group(2)):
if m.group(1).strip() == '':
sublines = [m.group(2)]
else:
sublines = m.groups()
else:
sublines = [line]
for line in sublines:
line = line.replace('/*', '(*').replace('*/', '*)')
hdef = '_{}_H'.format(os.path.splitext(header)[0])
if re.match('#ifndef {}'.format(hdef), line) or \
re.match('#define {}'.format(hdef), line):
pass
elif line.startswith('#if') or \
line.startswith('#ifdef') or \
line.startswith('#ifndef'):
ifdef += 1
elif line.startswith('#endif'):
ifdef -= 1
elif not ifdef:
if line.startswith('#include') or line.startswith('#undef'):
pass
else:
result.append(line)
print('{}: {} of {} lines left'.format(header, len(result), len(lines)), file = sys.stderr)
return result
lines = open(os.path.join(args.srcdir, header)).readlines()
lines = preprocess(lines)
print('{}: Parsing...'.format(header), file = sys.stderr)
def process(state, stateinfo, comment):
def procdefine(lines):
'''
Process sequence of #define's.
'''
global props
result = ''
comment = False
for line in lines.split('\n'):
m = re.match(' *?(((\(\*)|( \*)).*)', line)
if m:
comment = True
if len(result) > 0:
result += '\n' + m.group(1)
else:
m = re.match(r'#define +(.*?) +([^\\]+)$', stripcomment(line))
if m:
comment = False
mm = re.search('\(\*(.*)\*\)', line.strip())
comment = mm.group(1) if mm else None
(name, value) = m.groups()
value = re.sub('^\((.*)\)$', r'\1', value)
value = re.sub('^H5CHECK ', '', value)
if name.startswith('H5F_ACC_'):
value = re.sub('^H5OPEN ', '', value)
value = value.replace('sizeof', 'SizeOf')
comment = ' '.join(['(*', comment.strip(), '*)'] if comment else '')
if '?' in value or ',' in value:
print('WARN: {}'.format(line), file = sys.stderr)
elif value.startswith('H5OPEN'):
props += ' property {}: hid_t read F{};\n'.format(name, value.split(' ')[-1].strip('_g'))
elif 'SIZEOF' in name:
pass
elif strtoint(value) != None:
result += '\n {} = {}; {}'.format(name, strtoint(value), comment)
elif strtofloat(value) != None:
result += '\n {} = {}; {}'.format(name, strtofloat(value), comment)
elif value.startswith('"') and value.endswith('"'):
result += "\n {} = '{}'; {}".format(name, value.strip('"'), comment)
elif len(value.split('|')) > 1:
result += '\n {} = {}; {}'.format(name,
' or '.join([item.strip()
for item in value.split('|')]),
comment)
elif name.startswith('H5T_INTEL') or \
name.startswith('H5T_ALPHA') or \
name.startswith('H5T_MIPS'):
props += ' property {}: hid_t read F{};\n'.format(name, value)
else:
result += '\n {} = {}; {}'.format(name, value, comment)
elif comment:
result += '\n' + line
else:
print('WARN: {}'.format(line), file = sys.stderr)
return result
def proctypedef(lines):
'''
Process sequence of typedefs.
'''
def process(prevstate, state, stateinfo):
'''
Process one typedef.
'''
result = ''
if len(stateinfo) == 1:
if state == 'enum':
stateinfo[0] = stateinfo[0].replace('typedef enum', 'typedef')
elif state == 'struct':
stateinfo[0] = stateinfo[0].replace('typedef struct', 'typedef')
state = 'other'
if state == 'enum':
'''
Enumerated type declaration.
'''
result += '\ntype'
name = stateinfo[-1].strip('}; ') or stateinfo[0].split(' ')[2]
result += '\n P{name} = ^{name};'.format(name = name)
result += '\n {} ='.format(name)
lines = list()
Line = namedtuple('Line', ['line', 'name', 'value', 'comment'])
lastname = None
for line in stateinfo[1:len(stateinfo) - 1]:
if stripcomment(line).strip() == '{':
continue
m = re.match(' *([^ *(),]+)( *= ?([^,]+))?,?', stripcomment(line))
if m:
(name, dummy, value) = m.groups()
value = strtoint(value) if value else None
mm = re.search('\(\*(.*)\*\)', line.strip())
comment = mm.group(1) if mm else None
comment = ' '.join(['(*', comment.strip(), '*)'] if comment else '')
lines.append(Line(line = None, name = name, value = value, comment = comment))
lastname = name
elif not stripcomment(line).strip():
lines.append(Line(line = line.strip(), name = None, value = None, comment = None))
elif re.match(' *([( ]\*.*)', line):
lines.append(Line(line = re.sub(' *([( ]\*.*)', r'\1', line), name = None, value = None, comment = None))
else:
print('WARN: {}'.format(line), file = sys.stderr)
firstline = True
for line in lines:
if line.line != None:
result += '\n' + line.line
else:
result += '\n {}{}{}{} {}'.format(
'(' if firstline else ' ', line.name, ' = {}'.format(line.value) if line.value else '',
');' if line.name == lastname else ',', line.comment)
firstline = False
elif state == 'struct':
'''
Compound type (struct) declaration.
'''
result += '\ntype'
def procstruct(lines, offset, pointertypes = False):
result = ''
typename = lines[-1].strip('}; ') or lines[0].split(' ')[2]
if pointertypes:
result += '\n{}P{name} = ^{name};'.format(' '*offset, name = typename)
result += '\n{}PP{name} = ^P{name};'.format(' '*offset, name = typename)
result += '\n{}{} = record'.format(' '*offset, typename)
else:
result += '\n{}{}: record'.format(' '*offset, typename)
item = ''
nested = []
for line in lines[1:len(lines) - 1]:
if stripcomment(line).strip() in ('', '{'):
continue
item += line.strip()
if stripcomment(item).strip()[-1] not in ('{', ';'):
continue
mm = re.search('\(\*(.*)\*\)$', item.strip())
comment = ' '.join(['(*', mm.group(1).strip(), '*)'] if mm else '')
if item.startswith('struct') or item.startswith('union'):
nested += [item]
elif nested:
nested += [item]
if item.startswith('}'):
if nested[0].startswith('union'):
result += '\n {}case Integer of'.format(' '*offset)
for n, line in zip(count(1), nested[1:len(nested) - 1]):
mm = re.search('\(\*(.*)\*\)$', line.strip())
comment = ' '.join(['(*', mm.group(1).strip(), '*)'] if mm else '')
(cname, ctype) = getnametype(stripcomment(line).rstrip(';'));
(name, typ) = convnametype(cname, ctype)
result += '\n {}{}: ({}: {}); {}'.format(' '*offset, n, name, typ, comment).rstrip()
else:
result += procstruct(nested, offset + 2)
nested = []
else:
if item.endswith(');'):
name, args, rettype, varargs = parseprocdecl(item, True)
if typename == 'H5FD_class_t':
args = [arg.replace('PH5FD_t', 'Pointer {PH5FD_t}') for arg in args]
rettype = rettype.replace('PH5FD_t', 'Pointer {PH5FD_t}')
if args:
args = '({})'.format('; '.join(args))
else:
args = ''
if rettype == 'void':
result += '\n {}{}: procedure{}; cdecl; {}'.format(' '*offset, name, args, comment).rstrip()
else:
result += '\n {}{}: function{}: {}; cdecl; {}'.format(' '*offset, name, args, rettype, comment).rstrip()
else:
(cname, ctype) = getnametype(stripcomment(item).rstrip(';'));
(name, typ) = convnametype(cname, ctype)
if typename == 'H5FD_class_t':
typ = typ.replace('array[0..H5FD_MEM_NTYPES - 1]', 'array[H5FD_MEM_DEFAULT..Pred(H5FD_MEM_NTYPES)]')
result += '\n {}{}: {}; {}'.format(' '*offset, name, typ, comment).rstrip()
item = ''
result += '\n{}end;'.format(' '*offset)
return result
result += procstruct(stateinfo, 2, True)
elif state == 'other':
comments = None
for i in range(len(stateinfo)):
if stateinfo[i].startswith('(*'):
comments = stateinfo[i:]
stateinfo = stateinfo[:i]
break
if len(stateinfo) == 1 and re.match('typedef *([^(),]*) +([^(),]*);', stateinfo[0]):
'''
Type synonym.
'''
(typ, name) = re.match('typedef *([^(),]*) +([^(),]*);', stateinfo[0]).groups()
(name, typ) = convnametype(name.strip(), typ.strip())
if name != typ:
if prevstate != 'other':
result += 'type\n'
if name.endswith(']'):
result += ' P{} = P{};'.format(re.sub('\[.*', '', name), typ)
else:
result += ' {} = {};'.format(name, typ)
result += '\n P{name} = ^{name};'.format(name = name)
else:
'''
Procedural type declaration.
'''
if prevstate != 'other':
result += 'type\n'
signature = ' '.join(stateinfo)
name, args, rettype, varargs = parseprocdecl(re.match('typedef (.*;)', signature.strip()).group(1), True)
if rettype == 'void':
result += ' {} = procedure({}); cdecl;'.format(name, '; '.join(args))
else:
result += ' {} = function({}): {}; cdecl;'.format(name, '; '.join(args), rettype)
result += '\n P{name} = ^{name};'.format(name = name)
if comments:
result += '\n'.join([''] + comments)
return result
result = ''
prevstate = None
state = None
stateinfo = []
for line in lines.split('\n'):
line = re.sub('^enum', 'typedef enum', line)
line = re.sub('^struct', 'typedef struct', line)
if line.startswith('typedef enum'):
result += '\n' + process(prevstate, state, stateinfo)
prevstate = state
state = 'enum'
stateinfo = []
elif line.startswith('typedef struct'):
result += '\n' + process(prevstate, state, stateinfo)
prevstate = state
state = 'struct'
stateinfo = []
elif line.startswith('typedef '):
result += '\n' + process(prevstate, state, stateinfo)
prevstate = state
state = 'other'
stateinfo = []
if state:
stateinfo.append(line)
else:
print('WARN: {}'.format(line), file = sys.stderr)
if state:
result += '\n' + process(prevstate, state, stateinfo)
return result
def procexport(lines):
'''
Process sequence of exported symbols.
'''
global defs, types, fields, props, init, cinit
signature = None
for line in lines.split('\n'):
if line.startswith('(*') or line.startswith(' *'):
continue
line = re.sub('[(/]\*.*?\*[)/]', '', line.strip())
if line.startswith('H5_DLLVAR'):
'''
Exported variable.
'''
(dummy, ctype, cname) = line.split(' ')
cname = cname.strip('_g;')
(cname, ctype) = convnametype(cname, ctype)
fields += ' F{}: {};\n'.format(cname, ctype)
cinit += " F{cname} := P{ctype}(GetDllProc(FHandle, '{cname}_g'))^;\n".format(cname = cname, ctype = ctype)
else:
'''
Exported procedure.
'''
signature = smartjoin(' ', signature, line)
if not ')' in line:
continue
signature = signature.replace(' (', '(')
fname, args, rettype, varargs = parseprocdecl(re.match('H5_DLL (.*;)', signature.strip()).group(1), False)
if len(args) > 0:
fdef = '(' + '; '.join(args) + ')'
else:
fdef = ''
fdef = fdef + ': ' + rettype
if varargs:
types += ' // T{} = function{}; cdecl; varargs;\n'.format(fname, fdef)
fields += ' // F{}: T{};\n'.format(fname, fname)
props += ' // property {}: T{} read {};\n'.format(fname, fname, fname)
print('ERROR: Ignoring varargs procedure {}.'.format(fname), file = sys.stderr)
else:
types += ' T{} = function{}; cdecl;\n'.format(fname, fdef)
fields += ' F{}: T{};\n'.format(fname, fname)
props += ' property {}: T{} read F{};\n'.format(fname, fname, fname)
init += " @F{0} := GetDllProc(FHandle, '{0}');\n".format(fname)
signature = None
global defs, types, fields, props, init, cinit
if stateinfo:
stateinfo = stateinfo.strip('\n')
if state == 'define':
newdefs = procdefine(stateinfo).lstrip('\n')
if len(newdefs) > 0:
if comment:
defs += '\n'
defs += comment.strip('\n') + '\n'
defs += 'const\n'
defs += newdefs
defs += '\n'
elif state == 'typedef':
newdefs = proctypedef(stateinfo).lstrip('\n')
if len(newdefs) > 0:
if comment:
defs += '\n'
defs += comment.strip('\n') + '\n'
defs += newdefs
defs += '\n'
elif state == 'export':
newdefs = procexport(stateinfo)
global state, stateinfo, comment
state = None
stateinfo = None
comment = None
def setstate(newstate):
global state, stateinfo, comment
if stateinfo and stateinfo.endswith('\n'):
if state:
process(state, stateinfo, comment)
state = newstate
stateinfo = None
comment = None
elif newstate != state:
if newstate == 'comment':
if state:
return
else:
if state == 'comment':
comment = stateinfo
process(state, stateinfo, comment)
if state != 'comment':
comment = None
state = newstate
stateinfo = None
for line in lines + ['']:
if line.startswith('(*') or line.startswith(' *'):
setstate('comment')
elif not state and line.lstrip(' ').startswith('(*'):
setstate('comment')
elif line.startswith('#define'):
setstate('define')
elif line.startswith('typedef') or \
line.startswith('struct') or \
line.startswith('enum'):
setstate('typedef')
elif line.startswith('H5_DLL'):
setstate('export')
elif line and not state:
raise Exception(header, line)
if state:
stateinfo = smartjoin('\n', stateinfo, line)
setstate(None)
print(file = sys.stderr)
graph = nx.DiGraph()
graph.onodes = []
parsedeps('hdf5.h', graph)
paths = nx.all_pairs_shortest_path_length(graph)
for header in sorted(graph.onodes, key = lambda header: len(paths[header])):
parse(header)
for line in template.format(date = datetime.date.today(),
defs = defs.strip('\n'),
classname = classname,
types = types.strip('\n'),
fields = fields.strip('\n'),
props = props.strip('\n'),
init = init.strip('\n'),
cinit = cinit.strip('\n')).split('\n'):
print(line.rstrip())
| StarcoderdataPython |
1787002 | <reponame>bluseking/-first-agnostic-computer-vision-framework-to-offer-a-curated-collection-with-hundreds-of-high-qualit
__all__ = [
"draw_label",
"bbox_polygon",
"draw_mask",
"as_rgb_tuple",
"get_default_font",
]
from icevision.imports import *
from icevision.utils import *
from matplotlib import patches
from PIL import Image, ImageFont, ImageDraw
import PIL
def draw_label(ax, x, y, name, color, fontsize=18):
ax.text(
x + 1,
y - 2,
name,
fontsize=fontsize,
color="white",
va="bottom",
bbox=dict(facecolor=color, edgecolor=color, pad=2, alpha=0.9),
)
def bbox_polygon(bbox):
bx, by, bw, bh = bbox.xywh
poly = np.array([[bx, by], [bx, by + bh], [bx + bw, by + bh], [bx + bw, by]])
return patches.Polygon(poly)
def draw_mask(ax, mask, color):
color_mask = np.ones((*mask.shape, 3)) * color
ax.imshow(np.dstack((color_mask, mask * 0.5)))
ax.contour(mask, colors=[color_mask[0, 0, :]], alpha=0.4)
def as_rgb_tuple(x: Union[np.ndarray, tuple, list, str]) -> tuple:
"Convert np RGB values -> tuple for PIL compatibility"
if isinstance(x, (np.ndarray, tuple, list)):
if not len(x) == 3:
raise ValueError(f"Expected 3 (RGB) numbers, got {len(x)}")
if isinstance(x, np.ndarray):
return tuple(x.astype(np.int))
elif isinstance(x, tuple):
return x
elif isinstance(x, list):
return tuple(x)
elif isinstance(x, str):
return PIL.ImageColor.getrgb(x)
else:
raise ValueError(f"Expected {{np.ndarray|list|tuple}}, got {type(x)}")
def get_default_font() -> str:
import requests
font_dir = get_root_dir() / "fonts"
font_dir.mkdir(exist_ok=True)
font_file = font_dir / "SpaceGrotesk-Medium.ttf"
if not font_file.exists():
URL = "https://raw.githubusercontent.com/airctic/storage/master/SpaceGrotesk-Medium.ttf"
logger.info(
"Downloading default `.ttf` font file - SpaceGrotesk-Medium.ttf from {} to {}",
URL,
font_file,
)
font_file.write_bytes(requests.get(URL).content)
return str(font_file)
| StarcoderdataPython |
41669 | from ..base import OptionsGroup
from ..exceptions import ConfigurationError
from ..utils import KeyValue, filter_locals
from .subscriptions_algos import *
class Subscriptions(OptionsGroup):
"""
This allows some uWSGI instances to announce their presence to subscriptions managing server,
which in its turn can address those nodes (e.g. delegate request processing to them)
and automatically remove dead nodes from the pool.
Some routers provide subscription server functionality. See `.routing.routers`.
.. note:: Subscription system in many ways relies on Master Process.
.. warning:: The subscription system is meant for "trusted" networks.
All of the nodes in your network can potentially make a total mess with it.
* http://uwsgi.readthedocs.io/en/latest/SubscriptionServer.html
"""
class algorithms:
"""Balancing algorithms available to use with ``subscribe``."""
ip_hash = IpHash
least_reference_count = LeastReferenceCount
weighted_least_reference_count = WeightedLeastReferenceCount
weighted_round_robin = WeightedRoundRobin
def set_server_params(
self, client_notify_address=None, mountpoints_depth=None, require_vassal=None,
tolerance=None, tolerance_inactive=None, key_dot_split=None):
"""Sets subscription server related params.
:param str client_notify_address: Set the notification socket for subscriptions.
When you subscribe to a server, you can ask it to "acknowledge" the acceptance of your request.
pointing address (Unix socket or UDP), on which your instance will bind and
the subscription server will send acknowledgements to.
:param int mountpoints_depth: Enable support of mountpoints of certain depth for subscription system.
* http://uwsgi-docs.readthedocs.io/en/latest/SubscriptionServer.html#mountpoints-uwsgi-2-1
:param bool require_vassal: Require a vassal field (see ``subscribe``) from each subscription.
:param int tolerance: Subscription reclaim tolerance (seconds).
:param int tolerance_inactive: Subscription inactivity tolerance (seconds).
:param bool key_dot_split: Try to fallback to the next part in (dot based) subscription key.
Used, for example, in SNI.
"""
# todo notify-socket (fallback) relation
self._set('subscription-notify-socket', client_notify_address)
self._set('subscription-mountpoint', mountpoints_depth)
self._set('subscription-vassal-required', require_vassal, cast=bool)
self._set('subscription-tolerance', tolerance)
self._set('subscription-tolerance-inactive', tolerance_inactive)
self._set('subscription-dotsplit', key_dot_split, cast=bool)
return self._section
def set_server_verification_params(
self, digest_algo=None, dir_cert=None, tolerance=None, no_check_uid=None,
dir_credentials=None, pass_unix_credentials=None):
"""Sets peer verification params for subscription server.
These are for secured subscriptions.
:param str digest_algo: Digest algorithm. Example: SHA1
.. note:: Also requires ``dir_cert`` to be set.
:param str dir_cert: Certificate directory.
.. note:: Also requires ``digest_algo`` to be set.
:param int tolerance: Maximum tolerance (in seconds) of clock skew for secured subscription system.
Default: 24h.
:param str|int|list[str|int] no_check_uid: Skip signature check for the specified uids
when using unix sockets credentials.
:param str|list[str] dir_credentials: Directories to search for subscriptions
key credentials.
:param bool pass_unix_credentials: Enable management of SCM_CREDENTIALS in subscriptions UNIX sockets.
"""
if digest_algo and dir_cert:
self._set('subscriptions-sign-check', f'{digest_algo}:{dir_cert}')
self._set('subscriptions-sign-check-tolerance', tolerance)
self._set('subscriptions-sign-skip-uid', no_check_uid, multi=True)
self._set('subscriptions-credentials-check', dir_credentials, multi=True)
self._set('subscriptions-use-credentials', pass_unix_credentials, cast=bool)
return self._section
def set_client_params(
self, start_unsubscribed=None, clear_on_exit=None, unsubscribe_on_reload=None,
announce_interval=None):
"""Sets subscribers related params.
:param bool start_unsubscribed: Configure subscriptions but do not send them.
.. note:: Useful with master FIFO.
:param bool clear_on_exit: Force clear instead of unsubscribe during shutdown.
:param bool unsubscribe_on_reload: Force unsubscribe request even during graceful reload.
:param int announce_interval: Send subscription announce at the specified interval. Default: 10 master cycles.
"""
self._set('start-unsubscribed', start_unsubscribed, cast=bool)
self._set('subscription-clear-on-shutdown', clear_on_exit, cast=bool)
self._set('unsubscribe-on-graceful-reload', unsubscribe_on_reload, cast=bool)
self._set('subscribe-freq', announce_interval)
return self._section
def subscribe(
self, server=None, key=None, address=None, address_vassal=None,
balancing_weight=None, balancing_algo=None, modifier=None, signing=None, check_file=None, protocol=None,
sni_cert=None, sni_key=None, sni_client_ca=None):
"""Registers a subscription intent.
:param str server: Subscription server address (UDP or UNIX socket).
Examples:
* 127.0.0.1:7171
:param str key: Key to subscribe. Generally the domain name (+ optional '/< mountpoint>').
Examples:
* mydomain.it/foo
* mydomain.it/foo/bar (requires ``mountpoints_depth=2``)
* mydomain.it
* ubuntu64.local:9090
:param str|int address: Address to subscribe (the value for the key)
or zero-based internal socket number (integer).
:param str address: Vassal node address.
:param int balancing_weight: Load balancing value. Default: 1.
:param balancing_algo: Load balancing algorithm to use. See ``balancing_algorithms``
.. note:: Since 2.1
:param Modifier modifier: Routing modifier object. See ``.routing.modifiers``
:param list|tuple signing: Signing basics, expects two elements list/tuple:
(signing_algorithm, key).
Examples:
* SHA1:idlessh001
:param str check_file: If this file exists the subscription packet is sent,
otherwise it is skipped.
:param str protocol: the protocol to use, by default it is ``uwsgi``.
See ``.networking.socket_types``.
.. note:: Since 2.1
:param str sni_cert: Certificate file to use for SNI proxy management.
* http://uwsgi.readthedocs.io/en/latest/SNI.html#subscription-system-and-sni
:param str sni_key: sni_key Key file to use for SNI proxy management.
* http://uwsgi.readthedocs.io/en/latest/SNI.html#subscription-system-and-sni
:param str sni_client_ca: Ca file to use for SNI proxy management.
* http://uwsgi.readthedocs.io/en/latest/SNI.html#subscription-system-and-sni
"""
# todo params: inactive (inactive slot activation)
if not any((server, key)):
raise ConfigurationError('Subscription requires `server` or `key` to be set.')
address_key = 'addr'
if isinstance(address, int):
address_key = 'socket'
if balancing_algo:
backup = getattr(balancing_algo, 'backup_level', None)
if signing:
signing = ':'.join(signing)
if modifier:
modifier1 = modifier
if modifier.submod:
modifier2 = modifier.submod
rule = KeyValue(
filter_locals(locals(), drop=['address_key', 'modifier']),
aliases={
'address': address_key,
'address_vassal': 'vassal',
'signing': 'sign',
'check_file': 'check',
'balancing_weight': 'weight',
'balancing_algo': 'algo',
'protocol': 'proto',
'sni_cert': 'sni_crt',
'sni_client_ca': 'sni_ca',
},
)
self._set('subscribe2', rule)
return self._section
| StarcoderdataPython |
137429 | <filename>meshpy/meshpy/image_converter.py
"""
Classes to convert binary images to extruded meshes
Author: <NAME>
"""
import IPython
import logging
import numpy as np
import os
from PIL import Image, ImageDraw
import sklearn.decomposition
import sys
import matplotlib.pyplot as plt
import skimage.morphology as morph
from skimage.transform import resize
from autolab_core import RigidTransform
from meshpy import Mesh3D
from autolab_core import BinaryImage
class ImageToMeshConverter:
""" Namespace class for converting binary images to SDFs and meshes. """
@staticmethod
def binary_image_to_mesh(binary_im, extrusion=1000, scale_factor=1.0):
"""
Converts a binary image to a 3D extruded polygonal mesh
Parameters
----------
binary_im : :obj:`perception.BinaryImage`
binary image for silhouette
extrusion : float
amount to extrude the polygon in meters
scale_factor : float
amount to rescale the final mesh (from units of pixels to meters)
Returns
-------
:obj:`Mesh3D`
the resulting mesh
Raises
------
:obj:`ValueError`
if the triangulation was not successful due to topology or other factors
"""
# check valid input
if not isinstance(binary_im, BinaryImage):
raise ValueError('Must provide perception.BinaryImage as input')
# get occupied indices from binary image
binary_data = binary_im.data
occ_coords = binary_im.nonzero_pixels()
# create mesh faces and concatenate
front_face_depth = extrusion / 2.0
back_face_depth = -extrusion / 2.0
front_verts, front_tris, front_ind_map = ImageToMeshConverter.create_mesh_face(occ_coords, front_face_depth,
binary_data.shape, cw=True)
back_verts, back_tris, back_ind_map = ImageToMeshConverter.create_mesh_face(occ_coords, back_face_depth,
binary_data.shape, cw=False)
verts, tris = ImageToMeshConverter.join_vert_tri_lists(front_verts, front_tris, back_verts, back_tris)
num_verts = len(front_verts)
back_ind_map = back_ind_map + num_verts
# connect boundaries
boundary_im = binary_im.boundary_map()
ImageToMeshConverter.add_boundary_tris(boundary_im, verts, tris, front_ind_map, back_ind_map)
# convert to mesh and clean
m = Mesh3D(verts, tris)
m.remove_unreferenced_vertices()
T_im_world = RigidTransform(rotation=np.array([[0, 1, 0],
[-1, 0, 0],
[0, 0, 1]]),
from_frame='obj',
to_frame='obj')
m = m.transform(T_im_world)
m.rescale_dimension(scale_factor, Mesh3D.ScalingTypeRelative)
return m
@staticmethod
def join_vert_tri_lists(verts1, tris1, verts2, tris2):
"""
Concatenates two lists of vertices and triangles.
Parameters
----------
verts1 : :obj:`list` of 3-:obj:`list` of float
first list of vertices
tris1 : :obj:`list` of 3-:obj`list` of int
first list of triangles
verts2 : :obj:`list` of 3-:obj:`list` of float
second list of vertices
tris2 : :obj:`list` of 3-:obj`list` of int
second list of triangles
Returns
-------
verts : :obj:`list` of 3-:obj:`list` of float
joined list of vertices
tris : :obj:`list` of 3-:obj`list` of int
joined list of triangles
"""
num_verts1 = len(verts1)
# simple append for verts
verts = list(verts1)
verts.extend(verts2)
# offset and append triangle (vertex indices)
tris = list(tris1)
tris2_offset = [[num_verts1 + t[0], num_verts1 + t[1], num_verts1 + t[2]] for t in tris2]
tris.extend(tris2_offset)
return verts, tris
@staticmethod
def add_boundary_tris(boundary_im, verts, tris, front_ind_map, back_ind_map):
"""
Connects front and back faces along the boundary, modifying tris IN PLACE
NOTE: Right now this only works for points topologically equivalent to a sphere, eg. no holes!
This can be extended by parsing back over untriangulated boundary points.
Parameters
----------
boundary_im : :obj:`perception.BinaryImage`
binary image of the boundary
verts : :obj:`list` of 3-:obj:`list` of float
list of vertices
tris : :obj:`list` of 3-:obj`list` of int
list of triangles
front_ind_map : :obj:`numpy.ndarray`
maps vertex coords to the indices of their front face vertex in list
back_ind_map : :obj:`numpy.ndarray`
maps vertex coords to the indices of their back face vertex in list
Raises
------
:obj:`ValueError`
triangulation failed
"""
# TODO: fix multiple connected comps
# setup variables for boundary coords
upper_bound = np.iinfo(np.uint8).max
remaining_boundary = boundary_im.data.copy()
boundary_ind = np.where(remaining_boundary == upper_bound)
boundary_coords = list(zip(boundary_ind[0], boundary_ind[1]))
if len(boundary_coords) == 0:
raise ValueError('No boundary coordinates')
# setup inital vars
tris_arr = np.array(tris)
visited_map = np.zeros(boundary_im.shape)
another_visit_avail = True
# make sure to start with a reffed tri
visited_marker = 128
finished = False
it = 0
i = 0
coord_visits = []
while not finished:
finished = True
logging.info('Boundary triangulation iter %d' %(it))
reffed = False
while not reffed and i < len(boundary_coords):
cur_coord = boundary_coords[i]
if visited_map[cur_coord[0], cur_coord[1]] == 0:
visited_map[cur_coord[0], cur_coord[1]] = 1
front_ind = front_ind_map[cur_coord[0], cur_coord[1]]
back_ind = back_ind_map[cur_coord[0], cur_coord[1]]
ref_tris = np.where(tris_arr == front_ind)
ref_tris = ref_tris[0]
reffed = (ref_tris.shape[0] > 0)
remaining_boundary[cur_coord[0], cur_coord[1]] = visited_marker
i = i+1
coord_visits.extend([cur_coord])
cur_dir_angle = np.pi / 2 # start straight down
# loop around boundary and add faces connecting front and back
while another_visit_avail:
front_ind = front_ind_map[cur_coord[0], cur_coord[1]]
back_ind = back_ind_map[cur_coord[0], cur_coord[1]]
ref_tris = np.where(tris_arr == front_ind)
ref_tris = ref_tris[0]
num_reffing_tris = ref_tris.shape[0]
# get all possible cadidates from neighboring tris
another_visit_avail = False
candidate_next_coords = []
for i in range(num_reffing_tris):
reffing_tri = tris[ref_tris[i]]
for j in range(3):
v = verts[reffing_tri[j]]
if boundary_im[v[0], v[1]] == upper_bound and visited_map[v[0], v[1]] == 0:
candidate_next_coords.append([v[0], v[1]])
another_visit_avail = True
# get the "rightmost" next point
num_candidates = len(candidate_next_coords)
if num_candidates > 0:
# calculate candidate directions
directions = []
next_dirs = np.array(candidate_next_coords) - np.array(cur_coord)
dir_norms = np.linalg.norm(next_dirs, axis = 1)
next_dirs = next_dirs / np.tile(dir_norms, [2, 1]).T
# calculate angles relative to positive x axis
new_angles = np.arctan(next_dirs[:,0] / next_dirs[:,1])
negative_ind = np.where(next_dirs[:,1] < 0)
negative_ind = negative_ind[0]
new_angles[negative_ind] = new_angles[negative_ind] + np.pi
# compute difference in angles
angle_diff = new_angles - cur_dir_angle
correction_ind = np.where(angle_diff <= -np.pi)
correction_ind = correction_ind[0]
angle_diff[correction_ind] = angle_diff[correction_ind] + 2 * np.pi
# choose the next coordinate with the maximum angle diff (rightmost)
next_ind = np.where(angle_diff == np.max(angle_diff))
next_ind = next_ind[0]
cur_coord = candidate_next_coords[next_ind[0]]
cur_dir_angle = new_angles[next_ind[0]]
# add triangles (only add if there is a new candidate)
next_front_ind = front_ind_map[cur_coord[0], cur_coord[1]]
next_back_ind = back_ind_map[cur_coord[0], cur_coord[1]]
tris.append([int(front_ind), int(back_ind), int(next_front_ind)])
tris.append([int(back_ind), int(next_back_ind), int(next_front_ind)])
# mark coordinate as visited
visited_map[cur_coord[0], cur_coord[1]] = 1
coord_visits.append(cur_coord)
remaining_boundary[cur_coord[0], cur_coord[1]] = visited_marker
# add edge back to first coord
cur_coord = coord_visits[0]
next_front_ind = front_ind_map[cur_coord[0], cur_coord[1]]
next_back_ind = back_ind_map[cur_coord[0], cur_coord[1]]
tris.append([int(front_ind), int(back_ind), int(next_front_ind)])
tris.append([int(back_ind), int(next_back_ind), int(next_front_ind)])
# check success
finished = (np.sum(remaining_boundary == upper_bound) == 0) or (i == len(boundary_coords))
it += 1
@staticmethod
def create_mesh_face(occ_coords, depth, index_shape, cw=True):
"""
Creates a 2D mesh face of vertices and triangles from the given coordinates at a specified depth.
Parameters
----------
occ_coords : :obj:`list` of 3-:obj:`tuple
the coordinates of vertices
depth : float
the depth at which to place the face
index_shape : 2-:obj:`tuple`
the shape of the numpy grid on which the vertices lie
cw : bool
clockwise or counterclockwise orientation
Returns
-------
verts : :obj:`list` of 3-:obj:`list` of float
list of vertices
tris : :obj:`list` of 3-:obj`list` of int
list of triangles
"""
# get mesh vertices
verts = []
tris = []
ind_map = -1 * np.ones(index_shape) # map vertices to indices in vert list
for coord in occ_coords:
verts.append([coord[0], coord[1], depth])
ind_map[coord[0], coord[1]] = len(verts) - 1
# get mesh triangles
# rule: vertex adds triangles that it is the 90 degree corner of
for coord in occ_coords:
coord_right = [coord[0] + 1, coord[1]]
coord_left = [coord[0] - 1, coord[1]]
coord_below = [coord[0], coord[1] + 1]
coord_above = [coord[0], coord[1] - 1]
cur_ind = ind_map[coord[0], coord[1]]
# add tri above left
if coord_left[0] >= 0 and coord_above[1] >= 0:
left_ind = ind_map[coord_left[0], coord_left[1]]
above_ind = ind_map[coord_above[0], coord_above[1]]
# check if valid vertices and add
if left_ind > -1 and above_ind > -1:
if cw:
tris.append([int(cur_ind), int(left_ind), int(above_ind)])
else:
tris.append([int(cur_ind), int(above_ind), int(left_ind)])
elif above_ind > -1:
# try to patch area
coord_left_above = [coord[0] - 1, coord[1] - 1]
if coord_left_above[0] > 0 and coord_left_above[1] > 0:
left_above_ind = ind_map[coord_left_above[0], coord_left_above[1]]
# check validity
if left_above_ind > -1:
if cw:
tris.append([int(cur_ind), int(left_above_ind), int(above_ind)])
else:
tris.append([int(cur_ind), int(above_ind), int(left_above_ind)])
# add tri below right
if coord_right[0] < index_shape[1] and coord_below[1] < index_shape[0]:
right_ind = ind_map[coord_right[0], coord_right[1]]
below_ind = ind_map[coord_below[0], coord_below[1]]
# check if valid vertices and add
if right_ind > -1 and below_ind > -1:
if cw:
tris.append([int(cur_ind), int(right_ind), int(below_ind)])
else:
tris.append([int(cur_ind), int(below_ind), int(right_ind)])
elif below_ind > -1:
# try to patch area
coord_right_below = [coord[0] + 1, coord[1] + 1]
if coord_right_below[0] < index_shape[0] and coord_right_below[1] < index_shape[1]:
right_below_ind = ind_map[coord_right_below[0], coord_right_below[1]]
# check validity
if right_below_ind > -1:
if cw:
tris.append([int(cur_ind), int(right_below_ind), int(below_ind)])
else:
tris.append([int(cur_ind), int(below_ind), int(right_below_ind)])
return verts, tris, ind_map
| StarcoderdataPython |
135330 | # coding: utf-8
import json
class StixCyberObservable:
def __init__(self, opencti, file):
self.opencti = opencti
self.file = file
self.properties = """
id
standard_id
entity_type
parent_types
spec_version
created_at
updated_at
createdBy {
... on Identity {
id
standard_id
entity_type
parent_types
spec_version
name
description
roles
contact_information
x_opencti_aliases
created
modified
objectLabel {
edges {
node {
id
value
color
}
}
}
}
... on Organization {
x_opencti_organization_type
x_opencti_reliability
}
... on Individual {
x_opencti_firstname
x_opencti_lastname
}
}
objectMarking {
edges {
node {
id
standard_id
entity_type
definition_type
definition
created
modified
x_opencti_order
x_opencti_color
}
}
}
objectLabel {
edges {
node {
id
value
color
}
}
}
externalReferences {
edges {
node {
id
standard_id
entity_type
source_name
description
url
hash
external_id
created
modified
}
}
}
observable_value
x_opencti_description
x_opencti_score
indicators {
edges {
node {
id
pattern
pattern_type
}
}
}
... on AutonomousSystem {
number
name
rir
}
... on Directory {
path
path_enc
ctime
mtime
atime
}
... on DomainName {
value
}
... on EmailAddr {
value
display_name
}
... on EmailMessage {
is_multipart
attribute_date
content_type
message_id
subject
received_lines
body
}
... on Artifact {
mime_type
payload_bin
url
encryption_algorithm
decryption_key
hashes {
algorithm
hash
}
}
... on StixFile {
extensions
size
name
name_enc
magic_number_hex
mime_type
ctime
mtime
atime
hashes {
algorithm
hash
}
}
... on X509Certificate {
is_self_signed
version
serial_number
signature_algorithm
issuer
validity_not_before
validity_not_after
hashes {
algorithm
hash
}
}
... on IPv4Addr {
value
}
... on IPv6Addr {
value
}
... on MacAddr {
value
}
... on Mutex {
name
}
... on NetworkTraffic {
extensions
start
end
is_active
src_port
dst_port
protocols
src_byte_count
dst_byte_count
src_packets
dst_packets
}
... on Process {
extensions
is_hidden
pid
created_time
cwd
command_line
environment_variables
}
... on Software {
name
cpe
swid
languages
vendor
version
}
... on Url {
value
}
... on UserAccount {
extensions
user_id
credential
account_login
account_type
display_name
is_service_account
is_privileged
can_escalate_privs
is_disabled
account_created
account_expires
credential_last_changed
account_first_login
account_last_login
}
... on WindowsRegistryKey {
attribute_key
modified_time
number_of_subkeys
}
... on WindowsRegistryValueType {
name
data
data_type
}
... on X509V3ExtensionsType {
basic_constraints
name_constraints
policy_constraints
key_usage
extended_key_usage
subject_key_identifier
authority_key_identifier
subject_alternative_name
issuer_alternative_name
subject_directory_attributes
crl_distribution_points
inhibit_any_policy
private_key_usage_period_not_before
private_key_usage_period_not_after
certificate_policies
policy_mappings
}
... on XOpenCTICryptographicKey {
value
}
... on XOpenCTICryptocurrencyWallet {
value
}
... on XOpenCTIText {
value
}
... on XOpenCTIUserAgent {
value
}
"""
"""
List StixCyberObservable objects
:param types: the array of types
:param filters: the filters to apply
:param search: the search keyword
:param first: return the first n rows from the after ID (or the beginning if not set)
:param after: ID of the first row
:return List of StixCyberObservable objects
"""
def list(self, **kwargs):
types = kwargs.get("types", None)
filters = kwargs.get("filters", None)
search = kwargs.get("search", None)
first = kwargs.get("first", 500)
after = kwargs.get("after", None)
order_by = kwargs.get("orderBy", None)
order_mode = kwargs.get("orderMode", None)
custom_attributes = kwargs.get("customAttributes", None)
get_all = kwargs.get("getAll", False)
with_pagination = kwargs.get("withPagination", False)
if get_all:
first = 500
self.opencti.log(
"info",
"Listing StixCyberObservables with filters " + json.dumps(filters) + ".",
)
query = (
"""
query StixCyberObservables($types: [String], $filters: [StixCyberObservablesFiltering], $search: String, $first: Int, $after: ID, $orderBy: StixCyberObservablesOrdering, $orderMode: OrderingMode) {
stixCyberObservables(types: $types, filters: $filters, search: $search, first: $first, after: $after, orderBy: $orderBy, orderMode: $orderMode) {
edges {
node {
"""
+ (custom_attributes if custom_attributes is not None else self.properties)
+ """
}
}
pageInfo {
startCursor
endCursor
hasNextPage
hasPreviousPage
globalCount
}
}
}
"""
)
result = self.opencti.query(
query,
{
"types": types,
"filters": filters,
"search": search,
"first": first,
"after": after,
"orderBy": order_by,
"orderMode": order_mode,
},
)
if get_all:
final_data = []
data = self.opencti.process_multiple(result["data"]["stixCyberObservables"])
final_data = final_data + data
while result["data"]["stixCyberObservables"]["pageInfo"]["hasNextPage"]:
after = result["data"]["stixCyberObservables"]["pageInfo"]["endCursor"]
self.opencti.log("info", "Listing StixCyberObservables after " + after)
result = self.opencti.query(
query,
{
"types": types,
"filters": filters,
"search": search,
"first": first,
"after": after,
"orderBy": order_by,
"orderMode": order_mode,
},
)
data = self.opencti.process_multiple(
result["data"]["stixCyberObservables"]
)
final_data = final_data + data
return final_data
else:
return self.opencti.process_multiple(
result["data"]["stixCyberObservables"], with_pagination
)
"""
Read a StixCyberObservable object
:param id: the id of the StixCyberObservable
:param filters: the filters to apply if no id provided
:return StixCyberObservable object
"""
def read(self, **kwargs):
id = kwargs.get("id", None)
filters = kwargs.get("filters", None)
custom_attributes = kwargs.get("customAttributes", None)
if id is not None:
self.opencti.log("info", "Reading StixCyberObservable {" + id + "}.")
query = (
"""
query StixCyberObservable($id: String!) {
stixCyberObservable(id: $id) {
"""
+ (
custom_attributes
if custom_attributes is not None
else self.properties
)
+ """
}
}
"""
)
result = self.opencti.query(query, {"id": id})
return self.opencti.process_multiple_fields(
result["data"]["stixCyberObservable"]
)
elif filters is not None:
result = self.list(filters=filters, customAttributes=custom_attributes)
if len(result) > 0:
return result[0]
else:
return None
else:
self.opencti.log(
"error",
"[opencti_stix_cyber_observable] Missing parameters: id or filters",
)
return None
"""
Create a Stix-Observable object
:param observableData: the data of the observable (STIX2 structure)
:return Stix-Observable object
"""
def create(self, **kwargs):
observable_data = kwargs.get("observableData", {})
simple_observable_id = kwargs.get("simple_observable_id", None)
simple_observable_key = kwargs.get("simple_observable_key", None)
simple_observable_value = kwargs.get("simple_observable_value", None)
simple_observable_description = kwargs.get(
"simple_observable_description", None
)
x_opencti_score = kwargs.get("x_opencti_score", None)
created_by = kwargs.get("createdBy", None)
object_marking = kwargs.get("objectMarking", None)
object_label = kwargs.get("objectLabel", None)
external_references = kwargs.get("externalReferences", None)
update = kwargs.get("update", False)
create_indicator = (
observable_data["x_opencti_create_indicator"]
if "x_opencti_create_indicator" in observable_data
else kwargs.get("createIndicator", False)
)
attribute = None
if simple_observable_key is not None:
key_split = simple_observable_key.split(".")
type = key_split[0].title()
attribute = key_split[1]
if attribute not in ["hashes", "extensions"]:
observable_data[attribute] = simple_observable_value
else:
type = (
observable_data["type"].title() if "type" in observable_data else None
)
if type is None:
return
if type.lower() == "file":
type = "StixFile"
elif type.lower() == "ipv4-addr":
type = "IPv4-Addr"
elif type.lower() == "ipv6-addr":
type = "IPv6-Addr"
elif type.lower() == "x-opencti-hostname":
type = "X-OpenCTI-Hostname"
elif type.lower() == "x-opencti-cryptocurrency-wallet":
type = "X-OpenCTI-Cryptocurrency-Wallet"
elif type.lower() == "x-opencti-user-agent":
type = "X-OpenCTI-User-Agent"
elif type.lower() == "x-opencti-cryptographic-key":
type = "X-OpenCTI-Cryptographic-Key"
elif type.lower() == "x-opencti-text":
type = "X-OpenCTI-text"
x_opencti_description = (
observable_data["x_opencti_description"]
if "x_opencti_description" in observable_data
else None
)
if simple_observable_description is not None:
x_opencti_description = simple_observable_description
x_opencti_score = (
observable_data["x_opencti_score"]
if "x_opencti_score" in observable_data
else x_opencti_score
)
if simple_observable_description is not None:
x_opencti_description = simple_observable_description
stix_id = observable_data["id"] if "id" in observable_data else None
if simple_observable_id is not None:
stix_id = simple_observable_id
hashes = []
if (
simple_observable_key is not None
and simple_observable_key.lower() == "file.hashes.md5"
):
hashes.append({"algorithm": "MD5", "hash": simple_observable_value})
if (
simple_observable_key is not None
and simple_observable_key.lower() == "file.hashes.sha-1"
):
hashes.append({"algorithm": "SHA-1", "hash": simple_observable_value})
if (
simple_observable_key is not None
and simple_observable_key.lower() == "file.hashes.sha-256"
):
hashes.append({"algorithm": "SHA-256", "hash": simple_observable_value})
if "hashes" in observable_data:
for key, value in observable_data["hashes"].items():
hashes.append({"algorithm": key, "hash": value})
if type is not None:
self.opencti.log(
"info",
"Creating Stix-Cyber-Observable {"
+ type
+ "} with indicator at "
+ str(create_indicator)
+ ".",
)
input_variables = {
"type": type,
"stix_id": stix_id,
"x_opencti_score": x_opencti_score,
"x_opencti_description": x_opencti_description,
"createIndicator": create_indicator,
"createdBy": created_by,
"objectMarking": object_marking,
"objectLabel": object_label,
"externalReferences": external_references,
"update": update,
}
query = """
mutation StixCyberObservableAdd(
$type: String!,
$stix_id: String,
$x_opencti_score: Int,
$x_opencti_description: String,
$createIndicator: Boolean,
$createdBy: String,
$objectMarking: [String],
$objectLabel: [String],
$externalReferences: [String],
$AutonomousSystem: AutonomousSystemAddInput,
$Directory: DirectoryAddInput,
$DomainName: DomainNameAddInput,
$EmailAddr: EmailAddrAddInput,
$EmailMessage: EmailMessageAddInput,
$EmailMimePartType: EmailMimePartTypeAddInput,
$Artifact: ArtifactAddInput,
$StixFile: StixFileAddInput,
$X509Certificate: X509CertificateAddInput,
$IPv4Addr: IPv4AddrAddInput,
$IPv6Addr: IPv6AddrAddInput,
$MacAddr: MacAddrAddInput,
$Mutex: MutexAddInput,
$NetworkTraffic: NetworkTrafficAddInput,
$Process: ProcessAddInput,
$Software: SoftwareAddInput,
$Url: UrlAddInput,
$UserAccount: UserAccountAddInput,
$WindowsRegistryKey: WindowsRegistryKeyAddInput,
$WindowsRegistryValueType: WindowsRegistryValueTypeAddInput,
$X509V3ExtensionsType: X509V3ExtensionsTypeAddInput,
$XOpenCTICryptographicKey: XOpenCTICryptographicKeyAddInput,
$XOpenCTICryptocurrencyWallet: XOpenCTICryptocurrencyWalletAddInput,
$XOpenCTIHostname: XOpenCTIHostnameAddInput
$XOpenCTIText: XOpenCTITextAddInput,
$XOpenCTIUserAgent: XOpenCTIUserAgentAddInput
) {
stixCyberObservableAdd(
type: $type,
stix_id: $stix_id,
x_opencti_score: $x_opencti_score,
x_opencti_description: $x_opencti_description,
createIndicator: $createIndicator,
createdBy: $createdBy,
objectMarking: $objectMarking,
objectLabel: $objectLabel,
externalReferences: $externalReferences,
AutonomousSystem: $AutonomousSystem,
Directory: $Directory,
DomainName: $DomainName,
EmailAddr: $EmailAddr,
EmailMessage: $EmailMessage,
EmailMimePartType: $EmailMimePartType,
Artifact: $Artifact,
StixFile: $StixFile,
X509Certificate: $X509Certificate,
IPv4Addr: $IPv4Addr,
IPv6Addr: $IPv6Addr,
MacAddr: $MacAddr,
Mutex: $Mutex,
NetworkTraffic: $NetworkTraffic,
Process: $Process,
Software: $Software,
Url: $Url,
UserAccount: $UserAccount,
WindowsRegistryKey: $WindowsRegistryKey,
WindowsRegistryValueType: $WindowsRegistryValueType,
X509V3ExtensionsType: $X509V3ExtensionsType,
XOpenCTICryptographicKey: $XOpenCTICryptographicKey,
XOpenCTICryptocurrencyWallet: $XOpenCTICryptocurrencyWallet,
XOpenCTIHostname: $XOpenCTIHostname,
XOpenCTIText: $XOpenCTIText,
XOpenCTIUserAgent: $XOpenCTIUserAgent
) {
id
standard_id
entity_type
parent_types
indicators {
edges {
node {
id
pattern
pattern_type
}
}
}
}
}
"""
if type == "Autonomous-System":
input_variables["AutonomousSystem"] = {
"number": observable_data["number"],
"name": observable_data["name"]
if "name" in observable_data
else None,
"rir": observable_data["rir"] if "rir" in observable_data else None,
}
elif type == "Directory":
input_variables["Directory"] = {
"path": observable_data["path"],
"path_enc": observable_data["path_enc"]
if "path_enc" in observable_data
else None,
"ctime": observable_data["ctime"]
if "ctime" in observable_data
else None,
"mtime": observable_data["mtime"]
if "mtime" in observable_data
else None,
"atime": observable_data["atime"]
if "atime" in observable_data
else None,
}
elif type == "Domain-Name":
input_variables["DomainName"] = {"value": observable_data["value"]}
if attribute is not None:
input_variables["DomainName"][attribute] = simple_observable_value
elif type == "Email-Addr":
input_variables["EmailAddr"] = {
"value": observable_data["value"],
"display_name": observable_data["display_name"]
if "display_name" in observable_data
else None,
}
elif type == "Email-Message":
input_variables["EmailMessage"] = {
"is_multipart": observable_data["is_multipart"]
if "is_multipart" in observable_data
else None,
"attribute_date": observable_data["attribute_date"]
if "date" in observable_data
else None,
"message_id": observable_data["message_id"]
if "message_id" in observable_data
else None,
"subject": observable_data["subject"]
if "subject" in observable_data
else None,
"received_lines": observable_data["received_lines"]
if "received_lines" in observable_data
else None,
"body": observable_data["body"]
if "body" in observable_data
else None,
}
elif type == "Email-Mime-Part-Type":
input_variables["EmailMimePartType"] = {
"body": observable_data["body"]
if "body" in observable_data
else None,
"content_type": observable_data["content_type"]
if "content_type" in observable_data
else None,
"content_disposition": observable_data["content_disposition"]
if "content_disposition" in observable_data
else None,
}
elif type == "Artifact":
input_variables["Artifact"] = {
"hashes": hashes if len(hashes) > 0 else None,
"mime_type": observable_data["mime_type"]
if "mime_type" in observable_data
else None,
"payload_bin": observable_data["payload_bin"]
if "payload_bin" in observable_data
else None,
"url": observable_data["url"] if "url" in observable_data else None,
"encryption_algorithm": observable_data["encryption_algorithm"]
if "encryption_algorithm" in observable_data
else None,
"decryption_key": observable_data["decryption_key"]
if "decryption_key" in observable_data
else None,
}
elif type == "StixFile":
input_variables["StixFile"] = {
"hashes": hashes if len(hashes) > 0 else None,
"extensions": observable_data["extensions"]
if "extensions" in observable_data
else None,
"size": observable_data["size"]
if "size" in observable_data
else None,
"name": observable_data["name"]
if "name" in observable_data
else None,
"name_enc": observable_data["name_enc"]
if "name_enc" in observable_data
else None,
"magic_number_hex": observable_data["magic_number_hex"]
if "magic_number_hex" in observable_data
else None,
"mime_type": observable_data["mime_type"]
if "mime_type" in observable_data
else None,
"mtime": observable_data["mtime"]
if "mtime" in observable_data
else None,
"ctime": observable_data["ctime"]
if "ctime" in observable_data
else None,
"atime": observable_data["atime"]
if "atime" in observable_data
else None,
}
elif type == "X509-Certificate":
input_variables["X509Certificate"] = {
"hashes": hashes if len(hashes) > 0 else None,
"is_self_signed": observable_data["is_self_signed"]
if "is_self_signed" in observable_data
else False,
"version": observable_data["version"]
if "version" in observable_data
else None,
"serial_number": observable_data["serial_number"]
if "serial_number" in observable_data
else None,
"signature_algorithm": observable_data["signature_algorithm"]
if "signature_algorithm" in observable_data
else None,
"issuer": observable_data["issuer"]
if "issuer" in observable_data
else None,
"validity_not_before": observable_data["validity_not_before"]
if "validity_not_before" in observable_data
else None,
"validity_not_after": observable_data["validity_not_after"]
if "validity_not_after" in observable_data
else None,
"subject": observable_data["subject"]
if "subject" in observable_data
else None,
"subject_public_key_algorithm": observable_data[
"subject_public_key_algorithm"
]
if "subject_public_key_algorithm" in observable_data
else None,
"subject_public_key_modulus": observable_data[
"subject_public_key_modulus"
]
if "subject_public_key_modulus" in observable_data
else None,
"subject_public_key_exponent": observable_data[
"subject_public_key_exponent"
]
if "subject_public_key_exponent" in observable_data
else None,
}
elif type == "IPv4-Addr":
input_variables["IPv4Addr"] = {
"value": observable_data["value"]
if "value" in observable_data
else None,
}
elif type == "IPv6-Addr":
input_variables["IPv6Addr"] = {
"value": observable_data["value"]
if "value" in observable_data
else None,
}
elif type == "Mac-Addr":
input_variables["MacAddr"] = {
"value": observable_data["value"]
if "value" in observable_data
else None,
}
elif type == "Mutex":
input_variables["Mutex"] = {
"name": observable_data["name"]
if "name" in observable_data
else None,
}
elif type == "Network-Traffic":
input_variables["NetworkTraffic"] = {
"extensions": observable_data["extensions"]
if "extensions" in observable_data
else None,
"start": observable_data["start"]
if "start" in observable_data
else None,
"end": observable_data["end"] if "end" in observable_data else None,
"is_active": observable_data["is_active"]
if "is_active" in observable_data
else None,
"src_port": observable_data["src_port"]
if "src_port" in observable_data
else None,
"dst_port": observable_data["dst_port"]
if "dst_port" in observable_data
else None,
"protocols": observable_data["protocols"]
if "protocols" in observable_data
else None,
"src_byte_count": observable_data["src_byte_count"]
if "src_byte_count" in observable_data
else None,
"dst_byte_count": observable_data["dst_byte_count"]
if "dst_byte_count" in observable_data
else None,
"src_packets": observable_data["src_packets"]
if "src_packets" in observable_data
else None,
"dst_packets": observable_data["dst_packets"]
if "dst_packets" in observable_data
else None,
}
elif type == "Process":
input_variables["Process"] = {
"extensions": observable_data["extensions"]
if "extensions" in observable_data
else None,
"is_hidden": observable_data["is_hidden"]
if "is_hidden" in observable_data
else None,
"pid": observable_data["pid"] if "pid" in observable_data else None,
"created_time": observable_data["created_time"]
if "created_time" in observable_data
else None,
"cwd": observable_data["cwd"] if "cwd" in observable_data else None,
"command_line": observable_data["command_line"]
if "command_line" in observable_data
else None,
"environment_variables": observable_data["environment_variables"]
if "environment_variables" in observable_data
else None,
}
elif type == "Software":
input_variables["Software"] = {
"name": observable_data["name"]
if "name" in observable_data
else None,
"cpe": observable_data["cpe"] if "cpe" in observable_data else None,
"swid": observable_data["swid"]
if "swid" in observable_data
else None,
"languages": observable_data["languages"]
if "languages" in observable_data
else None,
"vendor": observable_data["vendor"]
if "vendor" in observable_data
else None,
"version": observable_data["version"]
if "version" in observable_data
else None,
}
elif type == "Url":
input_variables["Url"] = {
"value": observable_data["value"]
if "value" in observable_data
else None,
}
elif type == "User-Account":
input_variables["UserAccount"] = {
"extensions": observable_data["extensions"]
if "extensions" in observable_data
else None,
"user_id": observable_data["user_id"]
if "user_id" in observable_data
else None,
"credential": observable_data["credential"]
if "credential" in observable_data
else None,
"account_login": observable_data["account_login"]
if "account_login" in observable_data
else None,
"account_type": observable_data["account_type"]
if "account_type" in observable_data
else None,
"display_name": observable_data["display_name"]
if "display_name" in observable_data
else None,
"is_service_account": observable_data["is_service_account"]
if "is_service_account" in observable_data
else None,
"is_privileged": observable_data["is_privileged"]
if "is_privileged" in observable_data
else None,
"can_escalate_privs": observable_data["can_escalate_privs"]
if "can_escalate_privs" in observable_data
else None,
"is_disabled": observable_data["is_disabled"]
if "is_disabled" in observable_data
else None,
"account_created": observable_data["account_created"]
if "account_created" in observable_data
else None,
"account_expires": observable_data["account_expires"]
if "account_expires" in observable_data
else None,
"credential_last_changed": observable_data[
"credential_last_changed"
]
if "credential_last_changed" in observable_data
else None,
"account_first_login": observable_data["account_first_login"]
if "account_first_login" in observable_data
else None,
"account_last_login": observable_data["account_last_login"]
if "account_last_login" in observable_data
else None,
}
elif type == "Windows-Registry-Key":
input_variables["WindowsRegistryKey"] = {
"attribute_key": observable_data["key"]
if "key" in observable_data
else None,
"modified_time": observable_data["modified_time"]
if "modified_time" in observable_data
else None,
"number_of_subkeys": observable_data["number_of_subkeys"]
if "number_of_subkeys" in observable_data
else None,
}
print(input_variables)
elif type == "Windows-Registry-Value-Type":
input_variables["WindowsRegistryKeyValueType"] = {
"name": observable_data["name"]
if "name" in observable_data
else None,
"data": observable_data["data"]
if "data" in observable_data
else None,
"data_type": observable_data["data_type"]
if "data_type" in observable_data
else None,
}
elif type == "X509-V3-Extensions-Type":
input_variables["X509V3ExtensionsType"] = {
"basic_constraints": observable_data["basic_constraints"]
if "basic_constraints" in observable_data
else None,
"name_constraints": observable_data["name_constraints"]
if "name_constraints" in observable_data
else None,
"policy_constraints": observable_data["policy_constraints"]
if "policy_constraints" in observable_data
else None,
"key_usage": observable_data["key_usage"]
if "key_usage" in observable_data
else None,
"extended_key_usage": observable_data["extended_key_usage"]
if "extended_key_usage" in observable_data
else None,
"subject_key_identifier": observable_data["subject_key_identifier"]
if "subject_key_identifier" in observable_data
else None,
"authority_key_identifier": observable_data[
"authority_key_identifier"
]
if "authority_key_identifier" in observable_data
else None,
"subject_alternative_name": observable_data[
"subject_alternative_name"
]
if "subject_alternative_name" in observable_data
else None,
"issuer_alternative_name": observable_data[
"issuer_alternative_name"
]
if "issuer_alternative_name" in observable_data
else None,
"subject_directory_attributes": observable_data[
"subject_directory_attributes"
]
if "subject_directory_attributes" in observable_data
else None,
"crl_distribution_points": observable_data[
"crl_distribution_points"
]
if "crl_distribution_points" in observable_data
else None,
"inhibit_any_policy": observable_data["inhibit_any_policy"]
if "inhibit_any_policy" in observable_data
else None,
"private_key_usage_period_not_before": observable_data[
"private_key_usage_period_not_before"
]
if "private_key_usage_period_not_before" in observable_data
else None,
"private_key_usage_period_not_after": observable_data[
"private_key_usage_period_not_after"
]
if "private_key_usage_period_not_after" in observable_data
else None,
"certificate_policies": observable_data["certificate_policies"]
if "certificate_policies" in observable_data
else None,
"policy_mappings": observable_data["policy_mappings"]
if "policy_mappings" in observable_data
else None,
}
elif type == "X-OpenCTI-Cryptographic-Key":
input_variables["XOpenCTICryptographicKey"] = {
"value": observable_data["value"]
if "value" in observable_data
else None,
}
elif type == "X-OpenCTI-Cryptocurrency-Wallet":
input_variables["XOpenCTICryptocurrencyWallet"] = {
"value": observable_data["value"]
if "value" in observable_data
else None,
}
elif type == "X-OpenCTI-Hostname":
input_variables["XOpenCTIHostname"] = {
"value": observable_data["value"]
if "value" in observable_data
else None,
}
elif type == "X-OpenCTI-Text":
input_variables["XOpenCTIText"] = {
"value": observable_data["value"]
if "value" in observable_data
else None,
}
elif type == "X-OpenCTI-User-Agent":
input_variables["XOpenCTIUserAgent"] = {
"value": observable_data["value"]
if "value" in observable_data
else None,
}
result = self.opencti.query(query, input_variables)
return self.opencti.process_multiple_fields(
result["data"]["stixCyberObservableAdd"]
)
else:
self.opencti.log("error", "Missing parameters: type")
"""
Update a Stix-Observable object field
:param id: the Stix-Observable id
:param key: the key of the field
:param value: the value of the field
:return The updated Stix-Observable object
"""
def update_field(self, **kwargs):
id = kwargs.get("id", None)
key = kwargs.get("key", None)
value = kwargs.get("value", None)
operation = kwargs.get("operation", "replace")
if id is not None and key is not None and value is not None:
self.opencti.log(
"info", "Updating Stix-Observable {" + id + "} field {" + key + "}."
)
query = """
mutation StixCyberObservableEdit($id: ID!, $input: EditInput!, $operation: EditOperation) {
stixCyberObservableEdit(id: $id) {
fieldPatch(input: $input, operation: $operation) {
id
standard_id
}
}
}
"""
result = self.opencti.query(
query,
{
"id": id,
"input": {"key": key, "value": value},
"operation": operation,
},
)
return self.opencti.process_multiple_fields(
result["data"]["stixCyberObservableEdit"]["fieldPatch"]
)
else:
self.opencti.log(
"error",
"[opencti_stix_cyber_observable_update_field] Missing parameters: id and key and value",
)
return None
"""
Delete a Stix-Observable
:param id: the Stix-Observable id
:return void
"""
def delete(self, **kwargs):
id = kwargs.get("id", None)
if id is not None:
self.opencti.log("info", "Deleting Stix-Observable {" + id + "}.")
query = """
mutation StixCyberObservableEdit($id: ID!) {
stixCyberObservableEdit(id: $id) {
delete
}
}
"""
self.opencti.query(query, {"id": id})
else:
self.opencti.log(
"error", "[opencti_stix_cyber_observable_delete] Missing parameters: id"
)
return None
"""
Update the Identity author of a Stix-Cyber-Observable object (created_by)
:param id: the id of the Stix-Cyber-Observable
:param identity_id: the id of the Identity
:return Boolean
"""
def update_created_by(self, **kwargs):
id = kwargs.get("id", None)
identity_id = kwargs.get("identity_id", None)
if id is not None:
self.opencti.log(
"info",
"Updating author of Stix-Cyber-Observable {"
+ id
+ "} with Identity {"
+ str(identity_id)
+ "}",
)
custom_attributes = """
id
createdBy {
... on Identity {
id
standard_id
entity_type
parent_types
name
x_opencti_aliases
description
created
modified
}
... on Organization {
x_opencti_organization_type
x_opencti_reliability
}
... on Individual {
x_opencti_firstname
x_opencti_lastname
}
}
"""
stix_domain_object = self.read(id=id, customAttributes=custom_attributes)
if stix_domain_object["createdBy"] is not None:
query = """
mutation StixCyberObservableEdit($id: ID!, $toId: String! $relationship_type: String!) {
stixCyberObservableEdit(id: $id) {
relationDelete(toId: $toId, relationship_type: $relationship_type) {
id
}
}
}
"""
self.opencti.query(
query,
{
"id": id,
"toId": stix_domain_object["createdBy"]["id"],
"relationship_type": "created-by",
},
)
if identity_id is not None:
# Add the new relation
query = """
mutation StixCyberObservableEdit($id: ID!, $input: StixMetaRelationshipAddInput) {
stixCyberObservableEdit(id: $id) {
relationAdd(input: $input) {
id
}
}
}
"""
variables = {
"id": id,
"input": {
"toId": identity_id,
"relationship_type": "created-by",
},
}
self.opencti.query(query, variables)
else:
self.opencti.log("error", "Missing parameters: id")
return False
"""
Add a Marking-Definition object to Stix-Cyber-Observable object (object_marking_refs)
:param id: the id of the Stix-Cyber-Observable
:param marking_definition_id: the id of the Marking-Definition
:return Boolean
"""
def add_marking_definition(self, **kwargs):
id = kwargs.get("id", None)
marking_definition_id = kwargs.get("marking_definition_id", None)
if id is not None and marking_definition_id is not None:
custom_attributes = """
id
objectMarking {
edges {
node {
id
standard_id
entity_type
definition_type
definition
x_opencti_order
x_opencti_color
created
modified
}
}
}
"""
stix_cyber_observable = self.read(id=id, customAttributes=custom_attributes)
if stix_cyber_observable is None:
self.opencti.log(
"error", "Cannot add Marking-Definition, entity not found"
)
return False
if marking_definition_id in stix_cyber_observable["markingDefinitionsIds"]:
return True
else:
self.opencti.log(
"info",
"Adding Marking-Definition {"
+ marking_definition_id
+ "} to Stix-Cyber-Observable {"
+ id
+ "}",
)
query = """
mutation StixCyberObservableAddRelation($id: ID!, $input: StixMetaRelationshipAddInput) {
stixCyberObservableEdit(id: $id) {
relationAdd(input: $input) {
id
}
}
}
"""
self.opencti.query(
query,
{
"id": id,
"input": {
"toId": marking_definition_id,
"relationship_type": "object-marking",
},
},
)
return True
else:
self.opencti.log(
"error", "Missing parameters: id and marking_definition_id"
)
return False
"""
Remove a Marking-Definition object to Stix-Cyber-Observable object
:param id: the id of the Stix-Cyber-Observable
:param marking_definition_id: the id of the Marking-Definition
:return Boolean
"""
def remove_marking_definition(self, **kwargs):
id = kwargs.get("id", None)
marking_definition_id = kwargs.get("marking_definition_id", None)
if id is not None and marking_definition_id is not None:
self.opencti.log(
"info",
"Removing Marking-Definition {"
+ marking_definition_id
+ "} from Stix-Cyber-Observable {"
+ id
+ "}",
)
query = """
mutation StixCyberObservableRemoveRelation($id: ID!, $toId: String!, $relationship_type: String!) {
stixCyberObservableEdit(id: $id) {
relationDelete(toId: $toId, relationship_type: $relationship_type) {
id
}
}
}
"""
self.opencti.query(
query,
{
"id": id,
"toId": marking_definition_id,
"relationship_type": "object-marking",
},
)
return True
else:
self.opencti.log("error", "Missing parameters: id and label_id")
return False
"""
Add a Label object to Stix-Cyber-Observable object
:param id: the id of the Stix-Cyber-Observable
:param label_id: the id of the Label
:return Boolean
"""
def add_label(self, **kwargs):
id = kwargs.get("id", None)
label_id = kwargs.get("label_id", None)
label_name = kwargs.get("label_name", None)
if label_name is not None:
label = self.opencti.label.read(
filters=[{"key": "value", "values": [label_name]}]
)
if label:
label_id = label["id"]
else:
label = self.opencti.label.create(value=label_name)
label_id = label["id"]
if id is not None and label_id is not None:
self.opencti.log(
"info",
"Adding label {" + label_id + "} to Stix-Cyber-Observable {" + id + "}",
)
query = """
mutation StixCyberObservableAddRelation($id: ID!, $input: StixMetaRelationshipAddInput) {
stixCyberObservableEdit(id: $id) {
relationAdd(input: $input) {
id
}
}
}
"""
self.opencti.query(
query,
{
"id": id,
"input": {
"toId": label_id,
"relationship_type": "object-label",
},
},
)
return True
else:
self.opencti.log("error", "Missing parameters: id and label_id")
return False
"""
Remove a Label object to Stix-Cyber-Observable object
:param id: the id of the Stix-Cyber-Observable
:param label_id: the id of the Label
:return Boolean
"""
def remove_label(self, **kwargs):
id = kwargs.get("id", None)
label_id = kwargs.get("label_id", None)
label_name = kwargs.get("label_name", None)
if label_name is not None:
label = self.opencti.label.read(
filters=[{"key": "value", "values": [label_name]}]
)
if label:
label_id = label["id"]
if id is not None and label_id is not None:
self.opencti.log(
"info",
"Removing label {"
+ label_id
+ "} to Stix-Cyber-Observable {"
+ id
+ "}",
)
query = """
mutation StixCyberObservableRemoveRelation($id: ID!, $toId: String!, $relationship_type: String!) {
stixCyberObservableEdit(id: $id) {
relationDelete(toId: $toId, relationship_type: $relationship_type) {
id
}
}
}
"""
self.opencti.query(
query,
{
"id": id,
"toId": label_id,
"relationship_type": "object-label",
},
)
return True
else:
self.opencti.log("error", "Missing parameters: id and label_id")
return False
"""
Add a External-Reference object to Stix-Cyber-Observable object (object_marking_refs)
:param id: the id of the Stix-Cyber-Observable
:param marking_definition_id: the id of the Marking-Definition
:return Boolean
"""
def add_external_reference(self, **kwargs):
id = kwargs.get("id", None)
external_reference_id = kwargs.get("external_reference_id", None)
if id is not None and external_reference_id is not None:
custom_attributes = """
id
externalReferences {
edges {
node {
id
standard_id
entity_type
source_name
description
url
hash
external_id
created
modified
}
}
}
"""
stix_domain_object = self.read(id=id, customAttributes=custom_attributes)
if stix_domain_object is None:
self.opencti.log(
"error", "Cannot add External-Reference, entity not found"
)
return False
if external_reference_id in stix_domain_object["externalReferencesIds"]:
return True
else:
self.opencti.log(
"info",
"Adding External-Reference {"
+ external_reference_id
+ "} to Stix-Cyber-Observable {"
+ id
+ "}",
)
query = """
mutation StixCyberObservabletEditRelationAdd($id: ID!, $input: StixMetaRelationshipAddInput) {
stixCyberObservableEdit(id: $id) {
relationAdd(input: $input) {
id
}
}
}
"""
self.opencti.query(
query,
{
"id": id,
"input": {
"toId": external_reference_id,
"relationship_type": "external-reference",
},
},
)
return True
else:
self.opencti.log(
"error", "Missing parameters: id and external_reference_id"
)
return False
"""
Remove a Label object to Stix-Cyber-Observable object
:param id: the id of the Stix-Cyber-Observable
:param label_id: the id of the Label
:return Boolean
"""
def remove_external_reference(self, **kwargs):
id = kwargs.get("id", None)
external_reference_id = kwargs.get("external_reference_id", None)
if id is not None and external_reference_id is not None:
self.opencti.log(
"info",
"Removing External-Reference {"
+ external_reference_id
+ "} to Stix-Cyber-Observable {"
+ id
+ "}",
)
query = """
mutation StixCyberObservableRemoveRelation($id: ID!, $toId: String!, $relationship_type: String!) {
stixCyberObservableEdit(id: $id) {
relationDelete(toId: $toId, relationship_type: $relationship_type) {
id
}
}
}
"""
self.opencti.query(
query,
{
"id": id,
"toId": external_reference_id,
"relationship_type": "external-reference",
},
)
return True
else:
self.opencti.log("error", "Missing parameters: id and label_id")
return False
def push_list_export(self, file_name, data, list_filters=""):
query = """
mutation StixCyberObservablesExportPush($file: Upload!, $listFilters: String) {
stixCyberObservablesExportPush(file: $file, listFilters: $listFilters)
}
"""
self.opencti.query(
query,
{
"file": (self.file(file_name, data)),
"listFilters": list_filters,
},
)
| StarcoderdataPython |
1618273 | <reponame>byung-u/HackerRank<gh_stars>0
#!/usr/bin/env python3
import sys
from functools import reduce
'''
terms
1~9 10~99 100~999 ...
9 180 2700 ...
9 * 1 * 10^0 9 * 2 * 10^1 9 * 3 * 10^2 ...
==> 9 * i * (10 ** (i - 1))
'''
for _ in range(int(input().strip())):
arr = list(map(int, input().strip().split()))
result = []
for a in arr:
num = a
for i in range(1, 19): # max 10 ** 18
term = 9 * i * (10 ** (i - 1))
if num > term:
num -= term
continue
d, m = divmod(num - 1, i)
d += 10 ** (i - 1)
result.append(int(str(d)[m]))
break
print(reduce(lambda x, y: x * y, result))
| StarcoderdataPython |
1753348 | from datetime import datetime
import requests, time, statistics
class BangumiMark(object):
def __init__(self, subnumber):
self.subnumber = subnumber
self.avgmark = 0
self.standard_deviation = 0
self.marklist = []
#set a list to store each score is marked by how many people
self.url = 'http://api.bgm.tv/subject/' + str(subnumber)
r = requests.get(self.url)
self.anime_dict = r.json()
#use a dict to store serialized json data which is got from api
r.close()
def calculate(self):
marksum = 0
allmarklist = []
marknumber = 1
for i in range(1, 11):
markrank = str(i)
count = self.anime_dict['rating']['count'][markrank]
self.marklist.append(count)
marksum = marksum + count * i
#anime_dict['rating']['count'][markrank] stores the number of people marking 'i' score
#then add the number into the list, pay attention that '1' is the first
self.avgmark = marksum / self.anime_dict['rating']['total']
#anime_dict['rating']['total'] stores the number of people who have marked
for peoplecount in self.marklist:
if peoplecount != 0:
for i in range(peoplecount):
allmarklist.append(marknumber)
marknumber = marknumber + 1
self.standard_deviation = statistics.stdev(allmarklist)
def write_file(self):
with open(r'C:\pythonworks\bangumimark\markingsingle.txt', 'a', encoding='utf-8') as f:
f.write('原名: %s\n' % self.anime_dict['name'])
f.write('中文名: %s\n' % self.anime_dict['name_cn'])
f.write('评分: %.4f\n' % self.avgmark)
f.write('评分人数: %s\n' % self.anime_dict['rating']['total'])
f.write('排名: %s\n' % self.anime_dict['rank'])
n = 1
for voterank in self.marklist:
f.write('打%s分人数:%s; ' % (n, voterank))
n = n + 1
f.write('\n标准差: %.4f\n' % self.standard_deviation)
updatetime = str(datetime.now())[:-7]
f.write('更新时间: %s\n' % updatetime)
f.write('--------------------------------------------\n')
| StarcoderdataPython |
1766935 | <filename>config.py
import inspect
import os
from pathlib import Path
class MissingConfigError(Exception):
pass
class Config:
RABBIT_QUEUE = os.getenv('RABBIT_QUEUE', 'Action.Printer')
RABBIT_EXCHANGE = os.getenv('RABBIT_EXCHANGE', 'action-outbound-exchange')
RABBIT_HOST = os.getenv('RABBIT_HOST')
RABBIT_PORT = os.getenv('RABBIT_PORT', 5672)
RABBIT_VIRTUALHOST = os.getenv('RABBIT_VIRTUALHOST', '/')
RABBIT_USERNAME = os.getenv('RABBIT_USERNAME')
RABBIT_PASSWORD = os.getenv('RABBIT_PASSWORD')
RABBIT_ROUTING_KEY = os.getenv('RABBIT_ROUTING_KEY', 'Action.Printer.binding')
PARTIAL_FILES_DIRECTORY = Path(os.getenv('PARTIAL_FILES_DIRECTORY', 'partial_files/'))
ENCRYPTED_FILES_DIRECTORY = Path(os.getenv('ENCRYPTED_FILES_DIRECTORY', 'encrypted_files/'))
QUARANTINED_FILES_DIRECTORY = Path(os.getenv('QUARANTINED_FILES_DIRECTORY', 'quarantined_files/'))
SORTING_FILES_DIRECTORY = Path(os.getenv('SORTING_FILES', 'sorting_files/'))
FILE_POLLING_DELAY_SECONDS = int(os.getenv('FILE_POLLING_DELAY_SECONDS', 10))
READINESS_FILE_PATH = Path(os.getenv('READINESS_FILE_PATH', 'print-file-service-ready'))
NAME = os.getenv('NAME', 'census-rm-print-file-service')
LOG_LEVEL = os.getenv('LOG_LEVEL', 'INFO')
LOG_DATE_FORMAT = os.getenv('LOG_DATE_FORMAT', '%Y-%m-%dT%H:%M:%S.%f')
LOG_LEVEL_PIKA = os.getenv('LOG_LEVEL_PIKA', 'ERROR')
LOG_LEVEL_PARAMIKO = os.getenv('LOG_LEVEL_PARAMIKO', 'ERROR')
EXCEPTIONMANAGER_CONNECTION_HOST = os.getenv('EXCEPTIONMANAGER_CONNECTION_HOST')
EXCEPTIONMANAGER_CONNECTION_PORT = os.getenv('EXCEPTIONMANAGER_CONNECTION_PORT')
EXCEPTION_MANAGER_URL = f'http://{EXCEPTIONMANAGER_CONNECTION_HOST}:{EXCEPTIONMANAGER_CONNECTION_PORT}'
SFTP_HOST = os.getenv('SFTP_HOST')
SFTP_PORT = os.getenv('SFTP_PORT')
SFTP_USERNAME = os.getenv('SFTP_USERNAME')
SFTP_KEY_FILENAME = os.getenv('SFTP_KEY_FILENAME')
SFTP_PASSPHRASE = os.getenv('SFTP_PASSPHRASE')
SFTP_PPO_DIRECTORY = os.getenv('SFTP_PPO_DIRECTORY')
SFTP_QM_DIRECTORY = os.getenv('SFTP_QM_DIRECTORY')
MAX_FILE_SIZE_BYTES = int(os.getenv('MAX_FILE_SIZE_BYTES', 9 * 10 ** 8))
OUR_PUBLIC_KEY_PATH = Path(os.getenv('OUR_PUBLIC_KEY_PATH')) if os.getenv('OUR_PUBLIC_KEY_PATH') else None
QM_SUPPLIER_PUBLIC_KEY_PATH = Path(os.getenv('QM_SUPPLIER_PUBLIC_KEY_PATH')) if os.getenv(
'QM_SUPPLIER_PUBLIC_KEY_PATH') else None
PPO_SUPPLIER_PUBLIC_KEY_PATH = Path(os.getenv('PPO_SUPPLIER_PUBLIC_KEY_PATH')) if os.getenv(
'PPO_SUPPLIER_PUBLIC_KEY_PATH') else None
ENVIRONMENT = os.getenv('ENVIRONMENT', 'PROD')
SENT_PRINT_FILE_BUCKET = os.getenv('SENT_PRINT_FILE_BUCKET', '')
@classmethod
def check_config(cls):
missing_config_items = set()
for config_key, config_value in (member for member in inspect.getmembers(cls) if
not inspect.isbuiltin(member) and
not inspect.isroutine(member) and
not member[0].startswith('__') and
not member[0].endswith('__')):
if config_value is None:
missing_config_items.add(config_key)
if missing_config_items:
raise MissingConfigError(f'Missing config items: {[item for item in missing_config_items]}')
class DevConfig(Config):
RABBIT_HOST = os.getenv('RABBIT_HOST', 'localhost')
RABBIT_PORT = os.getenv('RABBIT_PORT', '6672')
RABBIT_USERNAME = os.getenv('RABBIT_USERNAME', 'guest')
RABBIT_PASSWORD = os.getenv('RABBIT_PASSWORD', '<PASSWORD>')
FILE_POLLING_DELAY_SECONDS = int(os.getenv('FILE_POLLING_DELAY_SECONDS', 1))
EXCEPTIONMANAGER_CONNECTION_HOST = os.getenv('EXCEPTIONMANAGER_CONNECTION_HOST', 'localhost')
EXCEPTIONMANAGER_CONNECTION_PORT = os.getenv('EXCEPTIONMANAGER_CONNECTION_PORT', '8666')
EXCEPTION_MANAGER_URL = f'http://{EXCEPTIONMANAGER_CONNECTION_HOST}:{EXCEPTIONMANAGER_CONNECTION_PORT}'
SFTP_HOST = os.getenv('SFTP_HOST', 'localhost')
SFTP_PORT = os.getenv('SFTP_PORT', '122')
SFTP_USERNAME = os.getenv('SFTP_USERNAME', 'centos')
SFTP_KEY_FILENAME = os.getenv('SFTP_KEY_FILENAME', 'dummy_keys/dummy_rsa')
SFTP_PASSPHRASE = os.getenv('SFTP_PASSPHRASE', 'secret')
SFTP_PPO_DIRECTORY = os.getenv('SFTP_PPO_DIRECTORY', 'ppo_dev/print_services/')
SFTP_QM_DIRECTORY = os.getenv('SFTP_QM_DIRECTORY', 'qmprint_dev/print_services/')
OUR_PUBLIC_KEY_PATH = Path(os.getenv('OUR_PUBLIC_KEY_PATH') or Path(__file__).parent.joinpath('dummy_keys')
.joinpath('our_dummy_public.asc'))
QM_SUPPLIER_PUBLIC_KEY_PATH = Path(
os.getenv('QM_SUPPLIER_PUBLIC_KEY_PATH') or Path(__file__).parent.joinpath('dummy_keys')
.joinpath('dummy_qm_supplier_public_key.asc'))
PPO_SUPPLIER_PUBLIC_KEY_PATH = Path(
os.getenv('PPO_SUPPLIER_PUBLIC_KEY_PATH') or Path(__file__).parent.joinpath('dummy_keys')
.joinpath('dummy_ppo_supplier_public_key.asc'))
class TestConfig(DevConfig):
RABBIT_PORT = os.getenv('RABBIT_PORT', '35672')
SFTP_PORT = os.getenv('SFTP_PORT', '2222')
TMP_TEST_DIRECTORY = Path(__file__).parent.joinpath('tmp_test_files')
PARTIAL_FILES_DIRECTORY = TMP_TEST_DIRECTORY.joinpath('partial_files/')
ENCRYPTED_FILES_DIRECTORY = TMP_TEST_DIRECTORY.joinpath('encrypted_files/')
QUARANTINED_FILES_DIRECTORY = TMP_TEST_DIRECTORY.joinpath('quarantined_files/')
SORTING_FILES_DIRECTORY = TMP_TEST_DIRECTORY.joinpath('sorting_files/')
EXCEPTION_MANAGER_URL = 'http://test'
# Use dev or test defaults depending on environment
if Config.ENVIRONMENT == 'DEV':
Config = DevConfig
elif Config.ENVIRONMENT == 'TEST':
Config = TestConfig
| StarcoderdataPython |
108844 | <filename>configure.py
from setup import *
setup() | StarcoderdataPython |
3290737 | """
bytechomp.serialization
"""
import struct
from typing import Annotated, get_origin, get_args
from dataclasses import is_dataclass, fields
from bytechomp.datatypes.lookups import ELEMENTARY_TYPE_LIST, TYPE_TO_TAG, TYPE_TO_PYTYPE
from bytechomp.byte_order import ByteOrder
def flatten_dataclass(data_object: type) -> tuple[str, list[int | float | str | bytes]]:
"""Flattens out the dataclass into a pattern and a list of values.
Args:
data_object (type): Dataclass object.
Returns:
tuple[str, list[int | float | str | bytes]]: (pattern string, values list)
"""
# pylint: disable=too-many-branches
# pylint: disable=line-too-long
# pylint: disable=too-many-nested-blocks
# pylint: disable=too-many-statements
if not is_dataclass(data_object):
raise TypeError("provided object must be a valid dataclass")
pattern: str = ""
values: list[int | float | str | bytes] = []
for field in fields(data_object):
val = getattr(data_object, field.name)
val_t = type(val)
if field.type in ELEMENTARY_TYPE_LIST:
if not isinstance(val, TYPE_TO_PYTYPE[field.type]): # type: ignore
raise TypeError(
f"{field.name} field contains {val_t} type but requires {field.type}"
)
pattern += TYPE_TO_TAG[field.type]
values.append(val)
elif is_dataclass(field.type):
if not isinstance(val, val_t):
raise TypeError(
f"{field.name} field contains {val_t} type but requires {field.type}"
)
nested_pattern, nested_values = flatten_dataclass(val)
pattern += nested_pattern
values.extend(nested_values)
elif get_origin(field.type) == Annotated:
args = get_args(field.type)
if len(args) != 2:
raise Exception(
f"annotated value should only have two arguments (field: {field.name})"
)
arg_type = args[0]
length = args[1]
if not isinstance(length, int):
raise TypeError("second annotated argument must be an integer to denote length")
# deal with string type
if arg_type == str:
if not isinstance(val, str):
raise TypeError(
f"{field.name} field contains {val_t} type but requires {field.type}"
)
if length != len(val):
raise TypeError(
f"{field.name} string field has a length of {len(val)} but requires a length of {length}"
)
pattern += f"{length}s"
values.append(val.encode())
# deal with bytes type
elif arg_type == bytes:
if not isinstance(val, bytes):
raise TypeError(
f"{field.name} field contains {val_t} type but requires {field.type}"
)
if length != len(val):
raise TypeError(
f"{field.name} bytes field has a length of {len(val)} but requires a length of {length}"
)
pattern += f"{length}p"
values.append(val)
# deal with list type
elif get_origin(arg_type) == list:
if not isinstance(val, list):
raise TypeError(
f"{field.name} field contains {val_t} type but requires {field.type}"
)
if length != len(val):
raise TypeError(
f"{field.name} list field has a length of {len(val)} but requires a length of {length}"
)
list_type_args = get_args(arg_type)
if len(list_type_args) != 1:
raise Exception(
f"list must contain only one kind of data type (field: {field.name})"
)
list_type = list_type_args[0]
if list_type in ELEMENTARY_TYPE_LIST:
element_type = TYPE_TO_PYTYPE[list_type]
for field_element in val:
if not isinstance(field_element, element_type): # type: ignore
raise TypeError(
f"{field.name} field contains {val_t} type but requires {field.type}"
)
pattern += TYPE_TO_TAG[list_type] * length
values.extend(val)
elif is_dataclass(list_type):
element_type = list_type
for field_element in val:
if not isinstance(field_element, element_type): # type: ignore
raise TypeError(
f"{field.name} field contains {val_t} type but requires {field.type}"
)
nested_pattern, nested_values = flatten_dataclass(field_element)
pattern += nested_pattern
values.extend(nested_values)
else:
raise Exception(f"unsupported list type: {list_type} (field: {field.name})")
else:
raise Exception(f"unsupported annotated type: {arg_type} (field: {field.name})")
elif field.type in [list, bytes, str]:
raise Exception(
f"annotation needed for list/string/bytes (length required, field: {field.name})"
)
else:
raise Exception(f"unsupported data type ({field.type}) on field {field.name}")
return pattern, values
def serialize(data_object: type, byte_order: ByteOrder = ByteOrder.NATIVE) -> bytes:
"""Serializes a completely populated dataclass into a byte string according to the bytechomp
serialization rules.
Args:
data_object (type): Dataclass object.
Returns:
bytes: Serialization of the datclass object.
"""
if not is_dataclass(data_object):
raise TypeError("provided object must be a valid dataclass")
pattern, values = flatten_dataclass(data_object)
pattern = byte_order.to_pattern() + pattern
return struct.pack(pattern, *values)
| StarcoderdataPython |
38930 | <reponame>FitchOpenSource/C4D-To-Unity
import c4d
from c4d import gui, documents
#Welcome to the world of Python
def findNameMaterial(string, materials, cnt):
cnt = cnt + 1
string = string + "_" + str(cnt)
if materials.count(string) == 0:
return string
else:
string = findNameMaterial(string,materials,cnt)
return string
#Create a unique name for the materials and remove unorthodox characters
def cleanMaterials():
mats = doc.GetMaterials()
materials = []
for x in mats:
string = x.GetName()
if string.find(".") != -1:
string = x.GetName()
string = string.replace(".", "")
string = string.replace("*", "")
if materials.count(string) == 0:
materials.append(string)
else:
string = findNameMaterial(string, materials,0)
materials.append(string)
x.SetName(string)
c4d.documents.SetActiveDocument(doc)
#def findNameObject(string, objects, cnt):
#
#cnt = cnt + 1
#tmp = string;
#string = string + "_" + str(cnt)
#if objects.count(string) == 0:
#return string
#else:
#string = findNameObject(tmp,objects,cnt)
#return string
def iterateChildren(obj, objects):
#cleanObjects(obj, objects)
CleanTags(obj)
for child in obj.GetChildren():
iterateChildren(child, objects)
def CleanTags(obj):
doc.SetActiveObject(obj)
lists = []
lists.append(obj)
listTags = obj.GetTags()
listMultipleTextureTags = []
# Make current Object Editable
c4d.CallCommand(12236)
c4d.documents.SetActiveDocument(doc)
# Null Object are ignored
if obj.GetType() == c4d.Onull:
return
####################
#Remove Duplicated texture tags (keeps Polygon Selection)
hasUVWLock = False
for t in listTags:
if type(t) == c4d.TextureTag and t.GetMaterial() is not None:
selection = t[c4d.TEXTURETAG_RESTRICTION]
if selection == "":
listMultipleTextureTags.append(t)
if type(t) == c4d.TextureTag and t.GetMaterial() is None:
t.Remove()
increment = 0
tag = None
for tTags in listMultipleTextureTags:
selection = listMultipleTextureTags[increment][c4d.TEXTURETAG_RESTRICTION]
if len(listMultipleTextureTags) != (increment + 1):
listMultipleTextureTags[increment].Remove()
tag = listMultipleTextureTags[increment]
increment = increment + 1
####################
#if uvw tag is locked(UVWTAG_LOCK = true) then we don't erase it
UVWtag = obj.GetTag(c4d.Tuvw)
if UVWtag is not None and (tag is None or tag[c4d.TEXTURETAG_PROJECTION] == 6):
obj.GetTag(c4d.Tuvw)[c4d.UVWTAG_LOCK] = True
c4d.EventAdd()
listTags = obj.GetTags()
for t in listTags:
if type(t) == c4d.UVWTag:
if t[c4d.UVWTAG_LOCK] == False:
t.Remove()
else:
hasUVWLock = True
# Generate 2 UVW tags one for texture and second for lighting
if tag is None or tag[c4d.TEXTURETAG_PROJECTION] == 6 or UVWtag is None:
doc.SetActiveObject(obj)
if hasUVWLock == False:
# Tags menu, UVW tags -> set from projection command
c4d.CallCommand(1030000, 1030000)
doc.SetActiveObject(obj)
if obj.GetTag(c4d.Tuvw) is not None:
obj.GetTag(c4d.Tuvw)[c4d.UVWTAG_LOCK] = True
doc.SetActiveObject(obj)
# Tags menu, UVW tags -> set from projection command
c4d.CallCommand(1030000, 1030000)
else:
doc.SetActiveTag(tag)
# Tags menu, Generate UVW cordinates (this is in case texture tags projection is not set to UVW mapping )
c4d.CallCommand(12235, 12235)
tag[c4d.TEXTURETAG_TILE]=True
obj.GetTag(c4d.Tuvw)[c4d.UVWTAG_LOCK] = True
doc.SetActiveObject(obj)
# Tags menu, UVW tags -> set from projection command
c4d.CallCommand(1030000, 1030000)
c4d.documents.SetActiveDocument(doc)
Ttag = obj.GetTag(c4d.Ttexture)
if Ttag is not None:
obj.InsertTag(Ttag, None)
# Create a Unique name for the material (unity needs a unique name for materials)
#def cleanObjects(x,objects):
#string = x.GetName()
#if string.find(".") != -1:
#string = string.replace(".", "")
#if objects.count(string) == 0:
#objects.append(string)
#else:
#string = findNameObject(string, objects,0)
#objects.append(string)
#x.SetName(string)
#c4d.documents.SetActiveDocument(doc)
#Remove Invisble Objects
def iterateChildrenInvisble(obj):
removeInvisbleObjects(obj)
for child in obj.GetChildren():
iterateChildrenInvisble(child)
def removeInvisbleObjects(obj):
# if object is Invisible in Editor and Render delete it
if obj.GetEditorMode()== 1 and obj.GetRenderMode()== 1:
obj.Remove()
####################
def main():
#Remove Invisble Objects
for obj in doc.GetObjects():
iterateChildrenInvisble(obj)
#Create a unique name for the materials and remove unorthodox characters
cleanMaterials()
#Add UVW tags (one for texture and other on for light)and Removed usless Texture tags
objects = []
for obj in doc.GetObjects():
iterateChildren(obj, objects)
print("Done")
if __name__=='__main__':
main()
| StarcoderdataPython |
1789608 | <reponame>iflyings/flask-website<gh_stars>0
from . import login_manager
@login_manager.user_loader
def load_user(user_id):
return User.get(user_id)
| StarcoderdataPython |
5104 | # Draw image time series for one or more plots
from jicbioimage.core.image import Image
import dtoolcore
import click
from translate_labels import rack_plot_to_image_plot
from image_utils import join_horizontally, join_vertically
def identifiers_where_match_is_true(dataset, match_function):
return [i for i in dataset.identifiers if match_function(i)]
def generate_image_series_for_plot(rack, plot):
n_image, n_plot = rack_plot_to_image_plot(rack, plot)
# n_image, n_plot = 55, 24
print "{}_{}".format(n_image, n_plot)
dataset_uri = 'file:/Users/hartleym/data_intermediate/separate_plots'
dataset = dtoolcore.DataSet.from_uri(dataset_uri)
plot_number_overlay = dataset.get_overlay('plot_number')
ordering_overlay = dataset.get_overlay('ordering')
date_overlay = dataset.get_overlay('date')
def is_match(i):
try:
ordering_as_int = int(ordering_overlay[i])
except TypeError:
return False
if ordering_as_int != n_image:
return False
if int(plot_number_overlay[i]) != n_plot:
return False
return True
identifiers = identifiers_where_match_is_true(dataset, is_match)
def sort_identifiers_by_date(identifiers):
dates_and_identifiers = [(date_overlay[i], i) for i in identifiers]
sorted_dates_and_identifiers = sorted(dates_and_identifiers)
_, sorted_identifiers = zip(*sorted_dates_and_identifiers)
return(sorted_identifiers)
sorted_identifiers = sort_identifiers_by_date(identifiers)
def identifiers_to_joined_image(identifiers):
images = []
for identifier in identifiers:
image_fpath = dataset.item_content_abspath(identifier)
image = Image.from_file(image_fpath)
images.append(image)
return join_horizontally(images)
result = identifiers_to_joined_image(sorted_identifiers)
output_fname = 'example_from_tobin.png'
with open(output_fname, 'wb') as fh:
fh.write(result.png())
@click.command()
def main():
# Early leaf senescence
# generate_image_series_for_plot(3, 16)
# generate_image_series_for_plot(7, 9)
# generate_image_series_for_plot(9, 1)
# Late leaf senescence
generate_image_series_for_plot(7, 15)
if __name__ == '__main__':
main()
| StarcoderdataPython |
1708235 | <filename>pawpyseed/core/noncollinear.py
from pawpyseed.core.wavefunction import *
class NCLWavefunction(pawpyc.CNCLWavefunction, Wavefunction):
def __init__(self, struct, pwf, cr, dim, symprec=1e-4, setup_projectors=False):
"""
Arguments:
struct (pymatgen.core.Structure): structure that the wavefunction describes
pwf (pawpyc.PWFPointer): holder class for pswf_t and k-points/k-point weights
cr (CoreRegion): Contains the pseudopotentials, with projectors and
partials waves, for the structure
dim (pymatgen.io.vasp.outputs.Outcar OR np.ndarry OR list of length 3):
Outcar object for reading ngf or the dimensions NG* of the FFT grid
setup_projectors (bool, False): Whether to set up the core region
components of the wavefunctions. Pawpyseed will set up the projectors
automatically when they are first needed, so this generally
can be left as False.
Returns:
Wavefunction object
"""
self.band_props = pwf.band_props.copy(order="C")
super(Wavefunction, self).__init__(pwf)
if not self.ncl:
raise PAWpyError(
"Pseudowavefunction is collinear! Call Wavefunction(...) instead"
)
self.structure = struct
self.cr = cr
self.dim = np.array(dim).astype(np.int32)
if setup_projectors:
self.check_c_projectors()
@staticmethod
def from_files(
struct="CONTCAR",
wavecar="WAVECAR",
cr="POTCAR",
vr="vasprun.xml",
setup_projectors=False,
):
"""
Construct a Wavefunction object from file paths.
Arguments:
struct (str): VASP POSCAR or CONTCAR file path
wavecar (str): VASP WAVECAR file path
cr (str): VASP POTCAR file path
vr (str): VASP vasprun file path
outcar (str): VASP OUTCAR file path
setup_projectors (bool, False): Whether to set up the core region
components of the wavefunctions. Pawpyseed will set up the projectors
automatically when they are first needed, so this generally
can be left as False.
Returns:
Wavefunction object
"""
vr = Vasprun(vr)
dim = np.array(
[vr.parameters["NGX"], vr.parameters["NGY"], vr.parameters["NGZ"]]
)
symprec = vr.parameters["SYMPREC"]
pwf = pawpyc.PWFPointer(wavecar, vr)
return NCLWavefunction(
Poscar.from_file(struct).structure,
pwf,
CoreRegion(Potcar.from_file(cr)),
dim,
symprec,
setup_projectors,
)
@staticmethod
def from_directory(path, setup_projectors=False):
"""
Assumes VASP output has the default filenames and is located
in the directory specificed by path.
Arguments:
path (str): VASP output directory
setup_projectors (bool, False): Whether to set up the core region
components of the wavefunctions. Pawpyseed will set up the projectors
automatically when they are first needed, so this generally
can be left as False.
Returns:
Wavefunction object
"""
filepaths = []
for d in ["CONTCAR", "WAVECAR", "POTCAR", "vasprun.xml"]:
filepaths.append(str(os.path.join(path, d)))
args = filepaths + [setup_projectors]
return NCLWavefunction.from_files(*args)
def desymmetrized_copy(self, allkpts=None, weights=None):
raise NotImplementedError()
def write_state_realspace(
self, b, k, s, fileprefix="", dim=None, scale=1, remove_phase=False
):
"""
Writes the real and imaginary parts of a given band to two files,
prefixed by fileprefix
Args:
b (int): band number (0-indexed!)
k (int): kpoint number (0-indexed!)
s (int): spin number (0-indexed!)
fileprefix (string, ""): first part of the file name
dim (numpy array of 3 ints, None): dimensions of the FFT grid
scale (scalar, 1): number to multiply the realspace wavefunction by.
For example, VASP multiplies charge density by the volume
of the structure.
remove_phase (False): If True, removes the e^(ikr) phase
from the wavefunction (this does not necessarily mean
the wavefunction is real). This is useful if you want
to visualize the wavefunction because the e^(ikr) phase
makes the wavefunction non-periodic
Returns:
A 3D array (indexed by x,y,z where x,y,z are fractional coordinates)
with complex double values for the realspace wavefunction
The wavefunction is written in two files with z the slow index.
"""
self.check_c_projectors()
if dim is not None:
self.update_dim(np.array(dim))
filename_base = "%sB%dK%dS%d" % (fileprefix, b, k, s)
filename1 = "%s_UP_REAL.vasp" % filename_base
filename2 = "%s_UP_IMAG.vasp" % filename_base
filename3 = "%s_DOWN_REAL.vasp" % filename_base
filename4 = "%s_DOWN_IMAG.vasp" % filename_base
res0, res1 = self._write_realspace_state(
filename1,
filename2,
filename3,
filename4,
scale,
b,
k,
s,
remove_phase=remove_phase,
)
self._convert_to_vasp_volumetric(filename1, self.dim)
self._convert_to_vasp_volumetric(filename2, self.dim)
self._convert_to_vasp_volumetric(filename3, self.dim)
self._convert_to_vasp_volumetric(filename4, self.dim)
return res0, res1
def write_density_realspace(self, filename="PYAECCAR.vasp", dim=None, scale=1):
"""
Writes the real and imaginary parts of a given band to two files,
prefixed by fileprefix
Args:
b (int): band number (0-indexed!)
k (int): kpoint number (0-indexed!)
s (int): spin number (0-indexed!)
fileprefix (string, ""): first part of the file name
dim (numpy array of 3 ints, None): dimensions of the FFT grid
scale (scalar, 1): number to multiply the realspace wavefunction by.
For example, VASP multiplies charge density by the volume
of the structure.
Returns:
A 3D array (indexed by x,y,z where x,y,z are fractional coordinates)
with complex double values for the realspace wavefunction
The charge density is written with z the slow index.
"""
self.check_c_projectors()
if dim is not None:
self.update_dim(np.array(dim))
res = self._write_realspace_density(filename, scale)
self._convert_to_vasp_volumetric(filename, self.dim)
return res
| StarcoderdataPython |
3317653 | <reponame>Soooyeon-Kim/Data-Analysis
import numpy as np
import pandas as pd
print("Masking & query")
df = pd.DataFrame(np.random.rand(5, 2), columns=["A", "B"])
#print(df, "\n")
# A컬럼값이 0.5보다 작고 B컬럼 값이 0.3보다 큰값 출력
# 마스킹 연산 활용
print(df[(df['A']<0.5)&(df['B']>0.3)])
# query 함수 활용
print(df.query("A<0.5 and B>0.3"))
| StarcoderdataPython |
3204968 | <filename>core/views.py<gh_stars>0
from django.http import HttpResponse
from django.shortcuts import render
from django.core.mail import send_mail
def about(request):
return render(request, 'templatepage.html',{})
def contact(request):
if request.method == "POST":
name = request.POST.get('full-name')
email = request.POST.get('email')
subject = request.POST.get('subject')
message = request.POST.get('message')
data = {
'name': name,
'email': email,
'subject': subject,
'message': message,
}
message = '''
New message: {}
From: {}
'''.format(data['message'], data['email'])
send_mail(data['subject'], message, '', ['<EMAIL>'])
return HttpResponse('Thank you for submitting the form, we will be touch soon')
return render(request, 'contact.html', {}) | StarcoderdataPython |
1645038 | from libs.scan.check_line import check_line
from libs.scan.verbosity_levels_usage import verbosity_levels_usage
def extract_log_metrics(lang_logs_list, matching_files, repo_full_name):
found_log_lines = []
files_with_logs_count = 0
for code_file in matching_files:
are_there_logs_in_file = False
with open(code_file, 'rb') as file_lines:
for line in file_lines:
if check_line(line.decode("ISO-8859-1"), lang_logs_list, repo_full_name):
found_log_lines.append(line.decode("ISO-8859-1").strip())
are_there_logs_in_file = True
if are_there_logs_in_file:
files_with_logs_count += 1
verbosity_levels, no_logger_logs_amount = verbosity_levels_usage(found_log_lines)
metrics = {
"logs_total_amount": len(found_log_lines),
"no_logger_logs_amount": no_logger_logs_amount,
"debug_verbosity_level_usage": verbosity_levels["Debug"],
"info_verbosity_level_usage": verbosity_levels["Info"],
"warning_verbosity_level_usage": verbosity_levels["Warning"],
"error_verbosity_level_usage": verbosity_levels["Error"],
"critical_verbosity_level_usage": verbosity_levels["Critical"],
"amount_of_files_which_contains_logs": files_with_logs_count
}
return metrics
| StarcoderdataPython |
3344642 | from django.db import models
from django.core.validators import RegexValidator
import re
from django.db.models.fields import BooleanField, CharField
from django.db.models.signals import post_save
from django.db.models.deletion import CASCADE
class UserManager(models.Manager):
def validate(self, form):
errors = {}
EMAIL_REGEX = re.compile(r'^[a-zA-Z0-9.+_-]+@[a-zA-Z0-9._-]+\.[a-zA-Z]+$')
if not EMAIL_REGEX.match(form['email']):
errors['email'] = 'Invalid Email Address'
emailCheck = self.filter(email=form['email'])
if emailCheck:
errors['email'] = 'Email Address already in use'
usernameCheck = self.filter(username=form['username'])
if usernameCheck:
errors['username'] = 'Username already in use'
if len(form['password']) < 8:
errors['password'] = 'Password must be at least 8 characters'
if form['password'] != form['confirm']:
errors['password'] = 'Passwords do not match'
return errors
class User(models.Model):
firstName = models.CharField(max_length=45)
lastName = models.CharField(max_length=45)
email = models.EmailField(unique=True)
username = models.CharField(max_length=45, unique=True)
level = models.IntegerField(default=0)
password = models.CharField(max_length=45)
objects = UserManager()
userCreatedAt = models.DateTimeField(auto_now_add=True)
userUpdatedAt = models.DateTimeField(auto_now=True)
def __str__(self):
return self.username
class Profile(models.Model):
discord = models.CharField(max_length=255, blank=True)
user = models.OneToOneField(User, unique=True, on_delete=models.CASCADE)
image = models.ImageField(upload_to='profileImgs', default='bee.jpg')
def __str__(self):
return f'{self.user.username} Profile'
def create_user_profile(sender, instance, created, **kwargs):
if created:
User.objects.create(user=instance)
post_save.connect(create_user_profile, sender=User)
class Stack(models.Model):
stackName = models.CharField(max_length=255)
class Note(models.Model):
subject = models.CharField(max_length=255)
content = models.TextField()
private = models.BooleanField(default=False)
code = models.TextField(blank=True)
upvote = models.IntegerField(default=0)
resourceLink = models.CharField(max_length=255, blank=True)
author = models.ForeignKey(User, related_name='noteUser', on_delete=CASCADE)
stack = models.ForeignKey(Stack, related_name='noteStack', on_delete=CASCADE)
def __str__(self):
return self.subject
class Upload(models.Model):
uploadName = models.CharField(max_length=255, blank=True)
upload = models.FileField(upload_to='docs', default='bee.jpg')
note = models.OneToOneField(Note, unique=True, on_delete=models.CASCADE)
def __str__(self) -> str:
return f'{self.note.subject} Upload'
def create_note_upload(sender, instance, created, **kwargs):
if created:
Note.objects.create(note=instance)
post_save.connect(create_note_upload, sender=Note)
class Comment(models.Model):
comment = models.TextField()
commentCode = models.TextField(blank=True)
like = models.IntegerField(default=0)
resourceUrl = models.CharField(max_length=255, blank=True)
commenter = models.ForeignKey(User, related_name='commentUser', on_delete=CASCADE)
note = models.ForeignKey(Note, related_name='commentNote', on_delete=CASCADE)
commentCreatedAt = models.DateTimeField(auto_now_add=True)
commentUpdatedAt = models.DateTimeField(auto_now=True) | StarcoderdataPython |
3361121 | from django.contrib.auth.models import Group, User
from django.test import TestCase
from tally_ho.libs.permissions.groups import create_permission_groups, \
create_demo_users_with_groups
class TestGroups(TestCase):
number_of_groups = 14
def setUp(self):
pass
def test_create_permission_groups(self):
count = Group.objects.count()
create_permission_groups()
diff_count = Group.objects.count() - count
self.assertEqual(diff_count, self.number_of_groups)
def test_create_demo_users_with_groups(self):
count = Group.objects.count()
user_count = User.objects.count()
password = '<PASSWORD>'
create_demo_users_with_groups(password)
diff_count = Group.objects.count() - count
self.assertEqual(diff_count, self.number_of_groups)
user_diff_count = User.objects.count() - user_count
self.assertEqual(user_diff_count, self.number_of_groups)
user = User.objects.get(username='administrator')
self.assertTrue(user.check_password(password))
| StarcoderdataPython |
3353871 | <reponame>RemiDesgrange/onegeo-api
# Copyright (c) 2017-2018 Neogeo-Technologies.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.contrib.auth.models import User
from django.contrib.postgres.fields import JSONField
from django.core.exceptions import ValidationError
from django.db import models
from functools import reduce
from onegeo_api.elastic import elastic_conn
from onegeo_api.exceptions import ConflictError
from onegeo_api.exceptions import ElasticError
from onegeo_api.utils import merge_two_objs
from uuid import uuid4
class Analysis(models.Model):
class Meta(object):
verbose_name = 'Analysis document'
verbose_name_plural = 'Analysis documents'
title = models.TextField(verbose_name='Title')
user = models.ForeignKey(User, verbose_name='User', on_delete=models.CASCADE)
document = JSONField(verbose_name='Document')
def clean(self, *args, **kwargs):
if not self.user_id:
raise ValidationError('User is mandatory.') # C'est caca
components = self.get_components(
user=self.user, exclude_pk=self.pk and [self.pk])
# Vérifie si le document ne contient pas des composants du même nom..
for component in components.keys():
key = component[:-1] # plural to singular
if key in self.document:
for val in self.document[key].keys():
if val in components[component]:
raise ValidationError(
"The {0} name '{1}' is already taken.".format(key, val))
# Vérifie si le document ne contient pas de doublon ambigu..
# Par exemple deux composants de même nom (c'est le cas pour les
# « filters » et « token filters ») doivent être strictement identique
# afin de ne pas génèrer des incohérences lors de la compilation
# du « settings ».
try:
reduce(merge_two_objs, [
instance.document for instance
in Analysis.objects.filter(user=self.user)] + [self.document])
except ConflictError as e:
raise ValidationError(e.__str__())
# Vérifie si le document est valide..
try:
index = str(uuid4())
elastic_conn.create_index(
index, {'settings': {'analysis': self.document}})
except ElasticError as e:
raise ValidationError(e.description)
else:
elastic_conn.delete_index(index)
@classmethod
def get_components(cls, user=None, exclude_pk=None):
data = {'analyzers': [], 'normalizers': []}
queryset = cls.objects.filter(user=user)
if exclude_pk:
queryset = queryset.exclude(pk__in=exclude_pk)
for instance in queryset:
document = instance.document
for component in data.keys():
key = component[:-1] # plural to singular
if key in document:
data[component].extend(list(document[key].keys()))
return data
@classmethod
def get_component_by_name(cls, component, name, user=None):
for instance in cls.objects.filter(user=user):
if component in instance.document \
and name in instance.document[component]:
return instance.document
# else:
raise cls.DoesNotExist(
"Analysis component '{0}' as {1} does not exists.".format(
name, component))
def get_complete_analysis(user=None, **kwargs):
documents = []
for component, names in kwargs.items():
for name in names:
documents.append(
Analysis.get_component_by_name(component, name, user=user))
if len(documents) > 1:
return reduce(merge_two_objs, documents)
if len(documents) == 1:
return documents[0]
return {}
| StarcoderdataPython |
43389 | <filename>commons.py<gh_stars>0
# -*- coding: utf-8 -*-
#burden -> n # нагрузка
#bandage -> k # шина
import numpy
import collections
class RepresentationBasic(object):
"""
Basic Representation of the current solution
"""
def __init__(self, burdens, bandage_n, zero_assignments=False,
burdens_assignmts=None, bandages_burdens=None, eval_repr=None):
self.burdens = burdens
self.bandage_n = bandage_n
if burdens_assignmts is None:
self.burdens_assignmts = self._burdens_init_zeroassignments(burdens, bandage_n) if zero_assignments \
else self._burdens_init_rndassignments(burdens, bandage_n)
else:
self.burdens_assignmts = burdens_assignmts
assert len(burdens) == len(self.burdens_assignmts)
self.bandages_burdens_mem = bandages_burdens
# self.bandages_burdens = self._get_updated_bandages_burdens() if bandages_burdens is None else bandages_burdens
self._eval_mem = eval_repr
self.duration = None
def _burdens_init_zeroassignments(self, burdens, bandage_n):
return [0]*len(burdens)
def _burdens_init_rndassignments(self, burdens, bandage_n):
return numpy.random.randint(bandage_n, size=len(burdens))
def all_burdens_assigned(self):
return all([a is not None for a in self.burdens_assignmts])
def get_bandages_burdens(self):
if self.bandages_burdens_mem is None:
self.bandages_burdens_mem = self._get_updated_bandages_burdens()
return self.bandages_burdens_mem
def _get_updated_bandages_burdens(self):
bandages_burdens = [0]*self.bandage_n
for i in range(len(self.burdens)):
if self.burdens_assignmts[i] is not None:
bandages_burdens[self.burdens_assignmts[i]] += self.burdens[i]
return bandages_burdens
def eval_repr(self):
if self._eval_mem is None:
self._eval_mem = eval_repr(self.get_bandages_burdens())
return self._eval_mem
def get_bandage_assignmts(self):
bandage_assignmts = collections.defaultdict(list)
for i in range(len(self.burdens_assignmts)):
bandage_assignmts[self.burdens_assignmts[i]].append(i)
return bandage_assignmts
def __str__(self):
return "burdens={}\nbandage_n={}\nburdens_assignmts= {}\nbandage_assignmts= {}\nbandages_burdens= {}\neval: {}" \
.format(self.burdens, self.bandage_n, self.burdens_assignmts,
self.get_bandage_assignmts().items(), self.get_bandages_burdens(),
self.eval_repr()) \
+ ("\nduration= {:.3f}s".format(self.duration) if self.duration is not None else "")
def __eq__(self, other):
if type(other) is type(self):
return self.__dict__ == other.__dict__
return False
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return hash(frozenset(self.__dict__.iterkeys()))
def eval_repr(bandages_burdens):
return numpy.std(bandages_burdens)
| StarcoderdataPython |
173493 | # CRIANDO UMA FUNÇÃO
def fahrenheit(x):
return (x * (9/5)) + 32
x = float(input('Digite uma temperatura em (C°): '))
print(f'A temperatura digitada em {x}C°, é igual a {fahrenheit(x)}F°') | StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.