id stringlengths 1 265 | text stringlengths 6 5.19M | dataset_id stringclasses 7
values |
|---|---|---|
3399174 | <reponame>jmstevens/aesopbot<filename>src/features/genius/song_structure.py
"""From dlarsen5's PyRap song structure script.
https://github.com/dlarsen5/PyRap/blob/master/Song_Structure.py
"""
import itertools
import os
import pickle
import re
import json
# from nltk.corpus import stopwords
artists = ['<NAME>','Drake','Chance The Rapper','<NAME>','Logic','Future','Chief Keef','Eminem','Kanye West','JAY-Z','Big Sean',
'Lil Uzi Vert','Tyler, The Creator','Earl Sweatshirt','2 Chainz','G-Eazy','ScHoolboy Q','Young Thug','<NAME>$$', 'Wu Tang Clan',
'Flatbush Zombies','A$AP Rocky','A$AP Ferg','Dumbfoundead','<NAME>','Waka Flocka Flame','Nas','A Tribe Called Quest','Vic Mensa',
'$UICIDEBOY$','<NAME>','<NAME>','<NAME>','<NAME>','<NAME>','Yonas','<NAME>','<NAME>',
'Three 6 Mafia','<NAME>','RiFF RAFF','<NAME>','<NAME>','Tyga','<NAME>','<NAME>','<NAME>','<NAME>','Migos','Rihanna',
'<NAME>','21 Savage','<NAME>','<NAME>','<NAME>','XXXTENTACION','Lil Pump','Ski Mask the Slump God','<NAME>',
'SmokePurpp','A Boogie Wit Da Hoodie','Playboi Carti','Ugly God','Wiz Khalifa','<NAME>','Beyoncé','<NAME>','Meek Mill', 'Aesop Rock']
def get_lyrics_file():
with open('data/raw/artist_lyrics.json') as f:
data = json.load(f)
return data
def get_verses(data):
verse_lines = list()
for k in data['artists']:
for v in k['songs']:
song = v['lyrics']
lines = song.splitlines()
for l in range(len(lines)):
title = [x.lower() for x in lines[l].replace('[', '').replace(']', '').split()]
if '[' in lines[l] and 'verse' in title:
section_lines = []
count = l + 1
done = False
while count < len(lines) and not done:
if '[' not in lines[count]:
if lines[count] != '':
section_lines.append(lines[count])
count += 1
else:
done = True
verse_lines.append(section_lines)
return verse_lines
def clean_verses(verses):
verses_list = []
bad_characters = ['"',"'",'_','(',')','$',',','.','?','!','—','%','=']
for song in verses:
verses = list()
for line in song:
if line == '\n':
continue
if '[' in line:
continue
new_word = []
separate = line.split()
words = []
for word in separate:
for character in word:
character = character.lower()
if character not in bad_characters:
new_word.append(character)
w = ''.join(new_word)
# if w not in total_stop_words:
words.append(w)
new_word = []
if words != []:
words = words + ['<eol>']
verses.append(words)
verses = verses + ['<eov>']
verses_list.append(verses)
return verses_list
def segment_to_verses(verse_list):
verses = []
for i in clean:
verse = ''
for j in i:
if isinstance(j, list):
verse = verse + ' ' + ' '.join(j)
else:
verse = verse + ' ' + j
verses.append(verse)
return verses
# with open('data/raw/artist_lyrics.json') as f:
# data = json.load(f)
#
# verse_lines = list()
# for k in data['artists']:
# for v in k['songs']:
# song = v['lyrics']
# lines = song.splitlines()
# for l in range(len(lines)):
# title = [x.lower() for x in lines[l].replace('[', '').replace(']', '').split()]
# if '[' in lines[l] and 'verse' in title:
# section_lines = []
# count = l + 1
# done = False
# while count < len(lines) and not done:
# if '[' not in lines[count]:
# if lines[count] != '':
# section_lines.append(lines[count])
# count += 1
# else:
# done = True
# verse_lines.append(section_lines)
#
#
#
#
#
def get_verses(all_lyrics,artist):
#finds total verses, hooks, bridges, choruses written by a specific artist
one_song_verse_lines = []
one_song_chorus_lines = []
one_song_hook_lines = []
one_song_bridge_lines = []
total_verse_lines = []
total_chorus_lines = []
total_hook_lines = []
total_bridge_lines = []
total_lines = []
Songs = {}
with open('data/raw/artist_lyrics.json') as f:
data = json.load(f)
verse_lines = list()
for k in data['artists']:
for v in k['songs']:
song = v['lyrics']
lines = song.splitlines()
for l in range(len(lines)):
title = [x.lower() for x in lines[l].replace('[', '').replace(']', '').split()]
if '[' in lines[l] and 'verse' in title:
section_lines = []
count = l + 1
done = False
while count < len(lines) and not done:
if '[' not in lines[count]:
if lines[count] != '':
section_lines.append(lines[count])
count += 1
else:
done = True
verse_lines.append(section_lines)
all_lyrics = data['artists']
for _artist in all_lyrics:
for artist,songs in _artist.items():
for _song in songs:
print(_song)
if isinstance(_song, dict):
song_title = _song['title']
song_lyrics = _song['lyrics']
art = _song['artist']
clean_title = song_title.replace('(','').replace('.','').split()
if art == artist and 'Ft' not in clean_title:
lines = song_lyrics.splitlines()
for l in range(len(lines)):
title = [x.lower() for x in lines[l].replace('[', '').replace(']', '').split()]
if '[' in lines[l] and 'verse' in title:
section_lines = []
count = l + 1
done = False
while count < len(lines) and not done:
if '[' not in lines[count]:
if lines[count] != '':
section_lines.append(lines[count])
count += 1
else:
done = True
total_verse_lines.append(section_lines)
one_song_verse_lines.append(section_lines)
print(section_lines)
elif '[' in lines[l] and 'chorus' in title:
section_lines = []
count = l + 1
done = False
while count < len(lines) and not done:
if '[' not in lines[count]:
if lines[count] != '':
section_lines.append(lines[count])
count += 1
else:
done = True
total_chorus_lines.append(section_lines)
one_song_chorus_lines.append(section_lines)
elif '[' in lines[l] and 'hook' in title:
section_lines = []
count = l + 1
done = False
while count < len(lines) and not done:
if '[' not in lines[count]:
if lines[count] != '':
section_lines.append(lines[count])
count += 1
else:
done = True
total_hook_lines.append(section_lines)
one_song_hook_lines.append(section_lines)
elif '[' in lines[l] and 'bridge' in title:
section_lines = []
count = l + 1
done = False
while count < len(lines) and not done:
if '[' not in lines[count]:
if lines[count] != '':
section_lines.append(lines[count])
count += 1
else:
done = True
total_bridge_lines.append(section_lines)
one_song_bridge_lines.append(section_lines)
artist_first_name = artist.split()[0].lower()
total_lines = []
one_song_verse_lines = []
one_song_chorus_lines = []
one_song_hook_lines = []
one_song_bridge_lines = []
if 'Ft' in clean_title:
lines = song_lyrics.splitlines()
for l in range(len(lines)):
title = [x.lower() for x in lines[l].replace('[','').replace(']','').replace('-','').replace(':','').split()]
if '[' in lines[l] and 'verse' in title and artist_first_name in title:
section_lines = []
count = l + 1
done = False
while count < len(lines) and not done:
if '[' not in lines[count]:
if lines[count] != '':
section_lines.append(lines[count])
count += 1
else:
done = True
total_verse_lines.append(section_lines)
one_song_verse_lines.append(section_lines)
elif '[' in lines[l] and 'chorus' in title and artist_first_name in title:
section_lines = []
count = l + 1
done = False
while count < len(lines) and not done:
if '[' not in lines[count]:
if lines[count] != '':
section_lines.append(lines[count])
count+=1
else:
done = True
total_chorus_lines.append(section_lines)
one_song_chorus_lines.append(section_lines)
elif '[' in lines[l] and 'hook' in title and artist_first_name in title:
section_lines = []
count = l + 1
done = False
while count < len(lines) and not done:
if '[' not in lines[count]:
if lines[count] != '':
section_lines.append(lines[count])
count+=1
else:
done = True
total_hook_lines.append(section_lines)
one_song_hook_lines.append(section_lines)
if len(one_song_verse_lines) > 0:
total_lines.append(total_verse_lines)
if len(one_song_chorus_lines) > 0:
total_lines.append(total_chorus_lines)
if len(one_song_hook_lines) > 0:
total_lines.append(total_hook_lines)
if len(one_song_bridge_lines) > 0:
total_lines.append(total_bridge_lines)
if len(total_lines) > 0:
Songs[song_title] = list(itertools.chain.from_iterable(total_lines))
#FIXME: Songs has all duplicates
Lines = {'Verses':total_verse_lines,'Choruses':total_chorus_lines,'Hooks':total_hook_lines,'Bridges':total_bridge_lines}
return Lines, Songs
def clean_name(song_title):
return re.sub(r'[^\x00-\x7F]+', ' ', song_title)
def clean_song_titles(song_dict):
keys = [x.replace(u'\xa0', u' ').replace(u'\u200b',u'') for x in song_dict.keys()]
new_keys = []
featured_artists = []
for k in keys:
if '(' in k:
if 'ft' in k.lower():
new_k = k.split('(')[0][:-1]
featured_artists.append(k.split('(')[1][:-1])
new_keys.append(new_k)
else:
new_keys.append(k)
else:
new_keys.append(k)
return new_keys,featured_artists
def make_one_list(song_lyrics):
sentence_list = []
bad_characters = ['"',"'",'_','(',')','$',',','.','?','!','—']
bad_words = ['it', 'the', 'you', 'they', 'she', 'he', 'this', 'my', 'to', 'me', 'in', 'like', 'yeah', "you're",
"that's", "really", "couldn't",
'youre','get','want','come','uh','put','got','one','im',
'ran','em','right','gon','need','take','dont','every',
'turn','back','lets','better','look','see','til',
'aint','tryna','oh','still','yo',"don't","i'm",'gotta',
'know','go','yuh']
stopword = set(stopwords.words('english'))
total_stop_words = bad_words + list(stopword)
lines = song_lyrics.splitlines()
for line in lines:
if line == '\n':
continue
if '[' in line:
continue
new_word = []
separate = line.split()
words = []
for word in separate:
for character in word:
character = character.lower()
if character not in bad_characters:
new_word.append(character)
w = ''.join(new_word)
if w not in total_stop_words:
words.append(w)
new_word = []
if words != []:
sentence_list.append(words)
return sentence_list
def Get_Artist_Lyrics(artist):
song_lyrics_path = 'Song Lyrics/'
artist_dict = {}
for root,dirs,files in os.walk(song_lyrics_path):
for f in files:
name = root + f
artist_dict[f.replace('_',' ')] = name
all_lyrics = {}
for art,path in artist_dict.items():
with open(path,'rb') as f:
all_lyrics[art] = pickle.load(f)
Lines, Songs = get_verses(all_lyrics,artist)
return Lines, Songs, all_lyrics
def Get_Lyrics():
song_lyrics_path = 'data/raw/'
artist_dict = {}
for root,dirs,files in os.walk(song_lyrics_path):
for f in files:
name = root + f
artist_dict[f.replace('_',' ')] = name
json.loads(song_lyrics_path)
all_lyrics = []
for art,path in artist_dict.items():
with open(path,'rb') as f:
lyrics = pickle.load(f)
for title,song_lyrics in lyrics.items():
lyrics = make_one_list(song_lyrics)
all_lyrics+=lyrics
return all_lyrics
def Get_All_Lyrics(all_lyrics):
sentence_list = []
bad_characters = ['"',"'",'_','(',')','$',',','.','?','!','—']
for artist,songs in all_lyrics.items():
for title,song in songs.items():
new_word = []
separate = song.split()
words = []
for word in separate:
for character in word:
character = character.lower()
if character not in bad_characters:
new_word.append(character)
words.append(''.join(new_word))
new_word = []
sentence_list.append(words)
return sentence_list
def get_song_structures(artist):
Lines, Songs, All_lyrics = Get_Artist_Lyrics(artist)
song_structures = {}
for section, lines in Lines.items():
rhyming_sections = []
for lines_ in lines:
rhyming_words = [line.split()[-2:] for line in lines_ ]
rhyming_sections.append(rhyming_words)
song_structures[section] = rhyming_sections
return song_structures
song_structures = get_song_structures('Kendrick Lamar')
| StarcoderdataPython |
1795958 | <filename>cnn_training/vgg2_2_tfrecords.py
#!/usr/bin/env python
# coding: utf-8
"""
Trains some face recognition baselines using ARC based models
Usage:
vgg2_2_tfrecords.py <vgg-path> <output-path>
vgg2_2_tfrecords.py -h | --help
Options:
-h --help Show this screen.
"""
from docopt import docopt
import numpy as np
import os
import bob.io.image
import bob.io.base
import tensorflow as tf
import sys
from datetime import datetime
def _bytes_feature(value):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def _int64_feature(value):
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
def search_landmark(landmark_path, img_path):
with open(landmark_path) as f:
next(f)
for line in f:
line = line.split(",")
if img_path in line[0]:
return np.array(
[[float(line[i + 1]), float(line[i + 2])] for i in [0, 2, 4, 6, 8]]
)
else:
return None
from bob.bio.face.preprocessor import FaceCrop
def align(image, annotations, cropped_image_size=(126, 126)):
cropped_image_height, cropped_image_width = cropped_image_size
# RIGHT_EYE_POS = (40, 46)
# LEFT_EYE_POS = (40, 80)
# cropped_positions = {"leye": LEFT_EYE_POS, "reye": RIGHT_EYE_POS}
# cropped_positions = {"leye": (49, 72), "reye": (49, 38)}
cropped_positions = {"leye": (55, 81), "reye": (55, 42)}
cropper = FaceCrop(
cropped_image_size=cropped_image_size,
cropped_positions=cropped_positions,
color_channel="rgb",
fixed_positions=None,
annotator=None,
)
return bob.io.image.to_matplotlib(
cropper.transform([image], [annotations])[0].astype("uint8")
)
def get_id_by_line(line):
return line.split("/")[0]
def generate_tfrecord(
base_path, landmark_path, file_list, output_tf_record_path, indexes
):
def write_single_line_tfrecord(writer, image, offset, user_id):
# Serializing
serialized_img = image.tobytes()
# Writing
feature = {
"data": _bytes_feature(serialized_img),
"label": _int64_feature(offset),
"key": _bytes_feature(str.encode(user_id)),
}
example = tf.train.Example(features=tf.train.Features(feature=feature))
writer.write(example.SerializeToString())
with tf.io.TFRecordWriter(output_tf_record_path) as tf_writer:
current_id = None
with open(file_list) as f:
for file_name in f.readlines():
user_id = get_id_by_line(file_name)
if user_id in indexes:
img = bob.io.base.load(
os.path.join(base_path, file_name).rstrip("\n")
)
l_name = file_name.rstrip(".jpg\n")
if current_id != user_id:
current_id = user_id
sys.stdout.write(
f"Writing user {current_id}. {str(datetime.now())} \n"
)
sys.stdout.flush()
landmarks = search_landmark(landmark_path, l_name)
if landmarks[0][0] > landmarks[1][0]:
annotations = {
"reye": (landmarks[1][1], landmarks[1][0]),
"leye": (landmarks[0][1], landmarks[0][0]),
}
else:
annotations = {
"reye": (landmarks[0][1], landmarks[0][0]),
"leye": (landmarks[1][1], landmarks[1][0]),
}
if landmarks is None:
raise ValueError(f"Landmark for {file_name} not found!")
aligned_image = align(img, annotations)
write_single_line_tfrecord(
tf_writer, aligned_image, int(indexes[user_id]), user_id
)
def map_indexes(image_path, n_chunks):
"""
Create a dictionary mapping the ID to VGG2-ID, like:
{0: 'n000001'],
1: 'n000002']}
"""
indexes = sorted(list(set([l.split("/")[0] for l in open(image_path).readlines()])))
identities_map = {indexes[i]: i for i in range(len(indexes))}
# SPLIT THE DICTIONARY IN TOTAL_CHUNKS
indexes_as_list = list(identities_map.items())
dict_as_list = np.array_split(indexes_as_list, n_chunks)
dicts = [dict(d) for d in dict_as_list]
return dicts
if __name__ == "__main__":
args = docopt(__doc__)
VGG2_PATH = args["<vgg-path>"]
LANDMARK_PATH = os.path.join(VGG2_PATH, "bb_landmark", "loose_landmark_train.csv")
if "SGE_TASK_LAST" in os.environ:
TOTAL_CHUNKS = int(os.environ["SGE_TASK_LAST"])
CURRENT_CHUNK = int(os.environ["SGE_TASK_ID"]) - 1
else:
TOTAL_CHUNKS = 1
CURRENT_CHUNK = 0
# TOTAL_CHUNKS = 140
# CURRENT_CHUNK = 0
TRAINING_LIST = os.path.join(VGG2_PATH, "train_list.txt")
# TEST_LIST = os.path.join(VGG2_PATH, "test_list.txt")
# MAP ALL INDEXES
indexes = map_indexes(TRAINING_LIST, TOTAL_CHUNKS)
generate_tfrecord(
os.path.join(VGG2_PATH, "train"),
LANDMARK_PATH,
TRAINING_LIST,
os.path.join(
args["<output-path>"], f"train_vgg2_chunk{CURRENT_CHUNK}.tfrecords"
),
indexes[CURRENT_CHUNK],
)
| StarcoderdataPython |
3219847 | <reponame>Phill240/chrome-remote-interface-py
"""This is an auto-generated file. Modify at your own risk"""
from typing import Awaitable, Any, Callable, Dict, List, Optional, Union, TYPE_CHECKING
if TYPE_CHECKING:
from cripy import ConnectionType, SessionType
__all__ = ["Fetch"]
class Fetch:
"""
A domain for letting clients substitute browser's network layer with client code.
Domain Dependencies:
* Network
* IO
* Page
Status: Experimental
See `https://chromedevtools.github.io/devtools-protocol/tot/Fetch`
"""
__slots__ = ["client"]
def __init__(self, client: Union["ConnectionType", "SessionType"]) -> None:
"""Initialize a new instance of Fetch
:param client: The client instance to be used to communicate with the remote browser instance
"""
self.client: Union["ConnectionType", "SessionType"] = client
def disable(self) -> Awaitable[Dict]:
"""
Disables the fetch domain.
See `https://chromedevtools.github.io/devtools-protocol/tot/Fetch#method-disable`
:return: The results of the command
"""
return self.client.send("Fetch.disable", {})
def enable(
self,
patterns: Optional[List[Dict[str, Any]]] = None,
handleAuthRequests: Optional[bool] = None,
) -> Awaitable[Dict]:
"""
Enables issuing of requestPaused events. A request will be paused until client
calls one of failRequest, fulfillRequest or continueRequest/continueWithAuth.
See `https://chromedevtools.github.io/devtools-protocol/tot/Fetch#method-enable`
:param patterns: If specified, only requests matching any of these patterns will produce
fetchRequested event and will be paused until clients response. If not set,
all requests will be affected.
:param handleAuthRequests: If true, authRequired events will be issued and requests will be paused
expecting a call to continueWithAuth.
:return: The results of the command
"""
msg = {}
if patterns is not None:
msg["patterns"] = patterns
if handleAuthRequests is not None:
msg["handleAuthRequests"] = handleAuthRequests
return self.client.send("Fetch.enable", msg)
def failRequest(self, requestId: str, errorReason: str) -> Awaitable[Dict]:
"""
Causes the request to fail with specified reason.
See `https://chromedevtools.github.io/devtools-protocol/tot/Fetch#method-failRequest`
:param requestId: An id the client received in requestPaused event.
:param errorReason: Causes the request to fail with the given reason.
:return: The results of the command
"""
return self.client.send(
"Fetch.failRequest", {"requestId": requestId, "errorReason": errorReason}
)
def fulfillRequest(
self,
requestId: str,
responseCode: int,
responseHeaders: List[Dict[str, Any]],
body: Optional[str] = None,
responsePhrase: Optional[str] = None,
) -> Awaitable[Dict]:
"""
Provides response to the request.
See `https://chromedevtools.github.io/devtools-protocol/tot/Fetch#method-fulfillRequest`
:param requestId: An id the client received in requestPaused event.
:param responseCode: An HTTP response code.
:param responseHeaders: Response headers.
:param body: A response body.
:param responsePhrase: A textual representation of responseCode.
If absent, a standard phrase mathcing responseCode is used.
:return: The results of the command
"""
msg = {
"requestId": requestId,
"responseCode": responseCode,
"responseHeaders": responseHeaders,
}
if body is not None:
msg["body"] = body
if responsePhrase is not None:
msg["responsePhrase"] = responsePhrase
return self.client.send("Fetch.fulfillRequest", msg)
def continueRequest(
self,
requestId: str,
url: Optional[str] = None,
method: Optional[str] = None,
postData: Optional[str] = None,
headers: Optional[List[Dict[str, Any]]] = None,
) -> Awaitable[Dict]:
"""
Continues the request, optionally modifying some of its parameters.
See `https://chromedevtools.github.io/devtools-protocol/tot/Fetch#method-continueRequest`
:param requestId: An id the client received in requestPaused event.
:param url: If set, the request url will be modified in a way that's not observable by page.
:param method: If set, the request method is overridden.
:param postData: If set, overrides the post data in the request.
:param headers: If set, overrides the request headrts.
:return: The results of the command
"""
msg = {"requestId": requestId}
if url is not None:
msg["url"] = url
if method is not None:
msg["method"] = method
if postData is not None:
msg["postData"] = postData
if headers is not None:
msg["headers"] = headers
return self.client.send("Fetch.continueRequest", msg)
def continueWithAuth(
self, requestId: str, authChallengeResponse: Dict[str, Any]
) -> Awaitable[Dict]:
"""
Continues a request supplying authChallengeResponse following authRequired event.
See `https://chromedevtools.github.io/devtools-protocol/tot/Fetch#method-continueWithAuth`
:param requestId: An id the client received in authRequired event.
:param authChallengeResponse: Response to with an authChallenge.
:return: The results of the command
"""
return self.client.send(
"Fetch.continueWithAuth",
{"requestId": requestId, "authChallengeResponse": authChallengeResponse},
)
def getResponseBody(self, requestId: str) -> Awaitable[Dict]:
"""
Causes the body of the response to be received from the server and
returned as a single string. May only be issued for a request that
is paused in the Response stage and is mutually exclusive with
takeResponseBodyForInterceptionAsStream. Calling other methods that
affect the request or disabling fetch domain before body is received
results in an undefined behavior.
See `https://chromedevtools.github.io/devtools-protocol/tot/Fetch#method-getResponseBody`
:param requestId: Identifier for the intercepted request to get body for.
:return: The results of the command
"""
return self.client.send("Fetch.getResponseBody", {"requestId": requestId})
def takeResponseBodyAsStream(self, requestId: str) -> Awaitable[Dict]:
"""
Returns a handle to the stream representing the response body.
The request must be paused in the HeadersReceived stage.
Note that after this command the request can't be continued
as is -- client either needs to cancel it or to provide the
response body.
The stream only supports sequential read, IO.read will fail if the position
is specified.
This method is mutually exclusive with getResponseBody.
Calling other methods that affect the request or disabling fetch
domain before body is received results in an undefined behavior.
See `https://chromedevtools.github.io/devtools-protocol/tot/Fetch#method-takeResponseBodyAsStream`
:param requestId: The requestId
:return: The results of the command
"""
return self.client.send(
"Fetch.takeResponseBodyAsStream", {"requestId": requestId}
)
def requestPaused(
self, listener: Optional[Callable[[Dict[str, Any]], Any]] = None
) -> Any:
"""
Issued when the domain is enabled and the request URL matches the
specified filter. The request is paused until the client responds
with one of continueRequest, failRequest or fulfillRequest.
The stage of the request can be determined by presence of responseErrorReason
and responseStatusCode -- the request is at the response stage if either
of these fields is present and in the request stage otherwise.
See `https://chromedevtools.github.io/devtools-protocol/tot/Fetch#event-requestPaused`
:param listener: Optional listener function
:return: If a listener was supplied the return value is a callable that
will remove the supplied listener otherwise a future that resolves
with the value of the event
"""
event_name = "Fetch.requestPaused"
if listener is None:
future = self.client.loop.create_future()
def _listener(event: Optional[Dict] = None) -> None:
future.set_result(event)
self.client.once(event_name, _listener)
return future
self.client.on(event_name, listener)
return lambda: self.client.remove_listener(event_name, listener)
def authRequired(
self, listener: Optional[Callable[[Dict[str, Any]], Any]] = None
) -> Any:
"""
Issued when the domain is enabled with handleAuthRequests set to true.
The request is paused until client responds with continueWithAuth.
See `https://chromedevtools.github.io/devtools-protocol/tot/Fetch#event-authRequired`
:param listener: Optional listener function
:return: If a listener was supplied the return value is a callable that
will remove the supplied listener otherwise a future that resolves
with the value of the event
"""
event_name = "Fetch.authRequired"
if listener is None:
future = self.client.loop.create_future()
def _listener(event: Optional[Dict] = None) -> None:
future.set_result(event)
self.client.once(event_name, _listener)
return future
self.client.on(event_name, listener)
return lambda: self.client.remove_listener(event_name, listener)
| StarcoderdataPython |
1784558 | <filename>code/python/lib/mayavi_utils.py
#
# For licensing see accompanying LICENSE.txt file.
# Copyright (C) 2020 Apple Inc. All Rights Reserved.
#
from pylab import *
import mayavi.mlab
def points3d_color_by_scalar(positions, scalars, sizes=None, mode="sphere", scale_factor=1.0, colormap="jet", opacity=1.0):
assert scalars is not None
num_pts = positions.shape[0]
S = c_[ones(num_pts), zeros(num_pts), zeros(num_pts)]
if sizes is not None:
S = S*sizes[:,newaxis]
pts = mayavi.mlab.quiver3d(positions[:,0], positions[:,1], positions[:,2], S[:,0], S[:,1], S[:,2], scalars=scalars, mode=mode, scale_factor=scale_factor, colormap=colormap, opacity=opacity)
pts.glyph.color_mode = "color_by_scalar"
pts.glyph.glyph_source.glyph_source.center = [0,0,0]
def points3d_color_by_rgb_value(positions, colors, sizes=None, mode="sphere", scale_factor=1.0, colormap="jet", opacity=1.0):
assert colors is not None
assert all(colors >= 0.0) and all(colors <= 1.0)
assert colors.shape[1] == 3 or colors.shape[1] == 4
num_pts = positions.shape[0]
S = c_[ones(num_pts), zeros(num_pts), zeros(num_pts)]
if sizes is not None:
S = S*sizes[:,newaxis]
scalars = arange(num_pts)
if colors.shape[1] == 3:
colors_cmap = (c_[colors, ones(num_pts)]*255).astype(int32)
if colors.shape[1] == 4:
colors_cmap = (colors*255).astype(int32)
pts = mayavi.mlab.quiver3d(positions[:,0], positions[:,1], positions[:,2], S[:,0], S[:,1], S[:,2], scalars=scalars, mode=mode, scale_factor=scale_factor, opacity=opacity)
pts.glyph.color_mode = "color_by_scalar"
pts.glyph.glyph_source.glyph_source.center = [0,0,0]
pts.module_manager.scalar_lut_manager.lut.table = colors_cmap
mayavi.mlab.draw()
| StarcoderdataPython |
1716305 | from job_board.users.forms import BaseSignupForm
from django import forms
from django.utils.translation import gettext_lazy as _
class EmployerSignupForm(BaseSignupForm):
employer_name = forms.CharField(required=True, label=_("Employer Name"), widget=forms.TextInput(
attrs={"placeholder": _("Employer Name"), "autocomplete": "employer_name"}
),)
employer_industry = forms.CharField(required=True, label=_("Employer Industry"), widget=forms.TextInput(
attrs={"placeholder": _("Employer Industry"), "autocomplete": "employer_industry"}
))
| StarcoderdataPython |
3288958 | #!/usr/bin/python
# coding: utf-8
"""Visual keyboard for Pygame engine. Aims to be easy to use as highly customizable as well.
``VKeyboard`` only require a pygame surface to be displayed on and a text consumer function, as in the following example :
```python
from pygame_vkeyboard import *
# Initializes your window object or surface your want
# vkeyboard to be displayed on top of.
surface = ...
def consume(text):
print(repr('Current text : %s' % text))
# Initializes and activates vkeyboard
layout = VKeyboardLayout(VKeyboardLayout.AZERTY)
keyboard = VKeyboard(window, consumer, layout)
keyboard.enable()
```
"""
import logging
import pygame
from os.path import join, dirname
from pygame.locals import *
pygame.font.init()
# Configure logger.
logging.basicConfig()
logger = logging.getLogger(__name__)
class VKeyboardRenderer(object):
"""A VKeyboardRenderer is in charge of keyboard rendering.
It handles keyboard rendering properties such as color or padding,
and provides two rendering methods : one for the keyboard background
and another one the the key rendering.
.. note::
A DEFAULT style instance is available as class attribute.
"""
def __init__(self, font, keyboard_background_color, key_background_color, text_color, special_key_background_color=None):
"""VKeyboardStyle default constructor.
:param font: Used font for rendering key.
:param keyboard_background_color: Background color use for the keyboard.
:param key_background_color: Tuple of background color for key (one value per state).
:param text_color: Tuple of key text color (one value per state).
:param special_key_background_color: Background color for special key if required.
"""
self.font = font
self.keyboard_background_color = keyboard_background_color
self.key_background_color = key_background_color
self.special_key_background_color = special_key_background_color
self.text_color = text_color
def draw_background(self, surface, position, size):
"""Default drawing method for background.
Background is drawn as a simple rectangle filled using this
style background color attribute.
:param surface: Surface background should be drawn in.
:param position: Surface relative position the keyboard should be drawn at.
:param size: Expected size of the drawn keyboard.
"""
pygame.draw.rect(surface, self.keyboard_background_color, position + size)
def draw_key(self, surface, key):
"""Default drawing method for key.
Draw the key accordingly to it type.
:param surface: Surface background should be drawn in.
:param key: Target key to be drawn.
"""
if isinstance(key, VSpaceKey):
self.draw_space_key(surface, key)
elif isinstance(key, VBackKey):
self.draw_back_key(surface, key)
elif isinstance(key, VUppercaseKey):
self.draw_uppercase_key(surface, key)
elif isinstance(key, VSpecialCharKey):
self.draw_special_char_key(surface, key)
else:
self.draw_character_key(surface, key)
def draw_character_key(self, surface, key, special=False):
"""Default drawing method for key.
Key is drawn as a simple rectangle filled using this
cell style background color attribute. Key value is printed
into drawn cell using internal font.
:param surface: Surface background should be drawn in.
:param key: Target key to be drawn.
:param special: BOolean flag that indicates if the drawn key should use special background color if available.
"""
background_color = self.key_background_color
if special and self.special_key_background_color is not None:
background_color = self.special_key_background_color
pygame.draw.rect(surface, background_color[key.state], key.position + key.size)
size = self.font.size(key.value)
x = key.position[0] + ((key.size[0] - size[0]) / 2)
y = key.position[1] + ((key.size[1] - size[1]) / 2)
surface.blit(self.font.render(key.value, 1, self.text_color[key.state], None), (x, y))
def draw_space_key(self, surface, key):
"""Default drawing method space key.
Key is drawn as a simple rectangle filled using this
cell style background color attribute. Key value is printed
into drawn cell using internal font.
:param surface: Surface background should be drawn in.
:param key: Target key to be drawn.
"""
self.draw_character_key(surface, key, False)
def draw_back_key(self, surface, key):
"""Default drawing method for back key. Drawn as character key.
:param surface: Surface background should be drawn in.
:param key: Target key to be drawn.
"""
self.draw_character_key(surface, key, True)
def draw_uppercase_key(self, surface, key):
"""Default drawing method for uppercase key. Drawn as character key.
:param surface: Surface background should be drawn in.
:param key: Target key to be drawn.
"""
key.value = u'\u21e7'
if key.is_activated():
key.value = u'\u21ea'
self.draw_character_key(surface, key, True)
def draw_special_char_key(self, surface, key):
"""Default drawing method for special char key. Drawn as character key.
:param surface: Surface background should be drawn in.
:param key: Target key to be drawn.
"""
key.value = u'#'
if key.is_activated():
key.value = u'Ab'
self.draw_character_key(surface, key, True)
""" Default style implementation. """
VKeyboardRenderer.DEFAULT = VKeyboardRenderer(
pygame.font.Font(join(dirname(__file__), 'DejaVuSans.ttf'), 25),
(255, 255, 255),
((255, 255, 255), (0, 0, 0)),
((0, 0, 0), (255, 255, 255)),
((180, 180, 180), (0, 0, 0)),
)
class VKey(object):
"""Simple key holder class.
Holds key information (its value), as it's state, 1 for pressed,
0 for released. Also contains it size / position properties.
"""
def __init__(self, value):
"""Default key constructor.
:param value: Value of this key which also is the label displayed to the screen.
"""
self.state = 0
self.value = value
self.position = (0, 0)
self.size = (0, 0)
def set_size(self, size):
"""Sets the size of this key.
:param size: Size of this key.
"""
self.size = (size, size)
def is_touched(self, position):
"""Hit detection method.
Indicates if this key has been hit by a touch / click event at the given position.
:param position: Event position.
:returns: True is the given position collide this key, False otherwise.
"""
return position[0] >= self.position[0] and position[0] <= self.position[0]+ self.size[0]
def update_buffer(self, buffer):
"""Text update method.
Aims to be called internally when a key collision has been detected.
Updates and returns the given buffer using this key value.
:param buffer: Buffer to be updated.
:returns: Updated buffer value.
"""
return buffer + self.value
class VSpaceKey(VKey):
""" Custom key for spacebar. """
def __init__(self, length):
"""Default constructor.
:param length: Key length.
"""
VKey.__init__(self, 'Space')
self.length = length
def set_size(self, size):
"""Sets the size of this key.
:param size: Size of this key.
"""
self.size = (size * self.length, size)
def update_buffer(self, buffer):
"""Text update method. Adds space to the given buffer.
:param buffer: Buffer to be updated.
:returns: Updated buffer value.
"""
return buffer + ' '
class VBackKey(VKey):
""" Custom key for back. """
def __init__(self):
""" Default constructor. """
VKey.__init__(self, u'\u21a9')
def update_buffer(self, buffer):
"""Text update method. Removes last character.
:param buffer: Buffer to be updated.
:returns: Updated buffer value.
"""
return buffer[:-1]
class VActionKey(VKey):
"""A VActionKey is a key that trigger and action
rather than updating the buffer when pressed.
"""
def __init__(self, action, state_holder):
"""Default constructor.
:param action: Delegate action called when this key is pressed.
:param state_holder: Holder for this key state (activated or not).
"""
VKey.__init__(self, '')
self.action = action
self.state_holder = state_holder
def update_buffer(self, buffer):
"""Do not update text but trigger the delegate action.
:param buffer: Not used, just to match parent interface.
:returns: Buffer provided as parameter.
"""
self.action()
return buffer
class VUppercaseKey(VActionKey):
""" Action key for the uppercase switch. """
def __init__(self, keyboard):
""" Default constructor.
:param keyboard: Keyboard to trigger on_uppercase() when pressed.
"""
VActionKey.__init__(self, lambda: keyboard.on_uppercase(), keyboard)
def is_activated(self):
"""Indicates if this key is activated.
:returns: True if activated, False otherwise:
"""
return self.state_holder.uppercase
class VSpecialCharKey(VActionKey):
""" Action key for the special char switch. """
def __init__(self, keyboard):
""" Default constructor.
:param keyboard: Keyboard to trigger on_special_char() when pressed.
"""
VActionKey.__init__(self, lambda: keyboard.on_special_char(), keyboard)
def is_activated(self):
"""Indicates if this key is activated.
:returns: True if activated, False otherwise:
"""
return self.state_holder.special_char
class VKeyRow(object):
"""A VKeyRow defines a keyboard row which is composed of a list of VKey.
This class aims to be created internally after parsing a keyboard layout model.
It is used to optimize collision detection, by first checking row collision,
then internal row key detection.
"""
def __init__(self):
""" Default row constructor. """
self.keys = []
self.y = -1
self.height = 0
self.space = None
def add_key(self, key, first=False):
"""Adds the given key to this row.
:param key: Key to be added to this row.
:param first: BOolean flag that indicates if key is added at the beginning or at the end.
"""
if first:
self.keys = [key] + self.keys
else:
self.keys.append(key)
if isinstance(key, VSpaceKey):
self.space = key
def set_size(self, position, size, padding):
"""Row size setter.
The size correspond to the row height, since the row width is constraint
to the surface width the associated keyboard belongs. Once size is settled,
the size for each child keys is associated.
:param position: Position of this row.
:param size: Size of the row (height)
:param padding: Padding between key.
"""
self.height = size
self.position = position
x = position[0]
for key in self.keys:
key.set_size(size)
key.position = (x, position[1])
x += padding + key.size[0]
def __contains__(self, position):
"""Indicates if the given position collide this row.
:param position: Position to check againt this row.
:returns: True if the given position collide this row, False otherwise.
"""
return position[1] >= self.position[1] and position[1] <= self.position[1] + self.height
def __len__(self):
"""len() operator overload.
:returns: Number of keys thi row contains.
"""
return len(self.keys)
class VKeyboardLayout(object):
"""Keyboard layout class.
A keyboard layout is built using layout model which consists in an
list of supported character. Such list item as simple string containing
characters assigned to a row.
An erasing key is inserted automatically to the first row.
If allowUpperCase flag is True, then an upper case key will be inserted at
the beginning of the second row.
If allowSpecialChars flag is True, then an special characters / number key will
be inserted at the beginning of the third row. Pressing this key will switch the
associated keyboard current layout.
"""
""" Azerty layout. """
AZERTY = ['1234567890', 'azertyuiop', 'qsdfghjklm', 'wxcvbn']
""" Number only layout. """
NUMBER = ['123', '456', '789', '0']
""" """
SPECIAL = [u'&é"\'(§è!çà)', u'°_-^$¨*ù`%£', u',;:=?.@+<>#', u'[]{}/\\|'] # TODO : Insert special characters layout which include number.
def __init__(self, model, key_size=None, padding=5, allow_uppercase=True, allow_special_chars=True, allow_space=True):
"""Default constructor. Initializes layout rows.
:param model: Layout model to use.
:param key_size Size of the key, if not specified will be computed dynamically.
:param padding: Padding between key (work horizontally as vertically).
:param allowUpperCase: Boolean flag that indicates usage of upper case switching key.
:param allowSpecialChars: Boolean flag that indicates usage of special char switching key.
:param allowSpace: Boolean flag that indicates usage of space bar.
"""
self.rows = []
self.key_size = key_size
self.padding = padding
self.allow_space = allow_space
self.allow_uppercase = allow_uppercase
self.allow_special_chars = allow_special_chars
for model_row in model:
row = VKeyRow()
for value in model_row:
row.add_key(VKey(value))
self.rows.append(row)
self.max_length = len(max(self.rows, key=len))
if self.max_length == 0:
raise ValueError('Empty layout model provided')
def configure_specials_key(self, keyboard):
"""Configures specials key if needed.
:param keyboard: Keyboard instance this layout belong.
"""
special_row = VKeyRow()
max_length = self.max_length
i = len(self.rows) - 1
current_row = self.rows[i]
special_keys = [VBackKey()]
if self.allow_uppercase: special_keys.append(VUppercaseKey(keyboard))
if self.allow_special_chars: special_keys.append(VSpecialCharKey(keyboard))
while len(special_keys) > 0:
first = False
while len(special_keys) > 0 and len(current_row) < max_length:
current_row.add_key(special_keys.pop(0), first=first)
first = not first
if i > 0:
i -= 1
current_row = self.rows[i]
else:
break
if self.allow_space:
space_length = len(current_row) - len(special_keys)
special_row.add_key(VSpaceKey(space_length))
first = True
# Adding left to the special bar.
while len(special_keys) > 0:
special_row.add_key(special_keys.pop(0), first=first)
first = not first
if len(special_row) > 0:
self.rows.append(special_row)
def configure_bound(self, surface_size):
"""Compute keyboard bound regarding of this layout.
If key_size is None, then it will compute it regarding of the given surface_size.
:param surface_size: Size of the surface this layout will be rendered on.
:raise ValueError: If the layout model is empty.
"""
r = len(self.rows)
max_length = self.max_length
if self.key_size is None:
self.key_size = (surface_size[0] - (self.padding * (max_length + 1))) / max_length
height = self.key_size * r + self.padding * (r + 1)
if height >= surface_size[1] / 2:
logger.warning('Computed keyboard height outbound target surface, reducing key_size to match')
self.key_size = ((surface_size[1] / 2) - (self.padding * (r + 1))) / r
height = self.key_size * r + self.padding * (r + 1)
logger.warning('Normalized key_size to %spx' % self.key_size)
self.set_size((surface_size[0], height), surface_size)
def set_size(self, size, surface_size):
"""Sets the size of this layout, and updates
position, and rows accordingly.
:param size: Size of this layout.
:param surface_size: Target surface size on which layout will be displayed.
"""
self.size = size
self.position = (0, surface_size[1] - self.size[1])
y = self.position[1] + self.padding
max_length = self.max_length
for row in self.rows:
r = len(row)
width = (r * self.key_size) + ((r + 1) * self.padding)
x = (surface_size[0] - width) / 2
if row.space is not None:
x -= ((row.space.length - 1) * self.key_size) / 2
row.set_size((x, y), self.key_size, self.padding)
y += self.padding + self.key_size
def invalidate(self):
""" Rests all keys states. """
for row in self.rows:
for key in row.keys:
key.state = 0
def set_uppercase(self, uppercase):
"""Sets layout uppercase state.
:param uppercase: True if uppercase, False otherwise.
"""
for row in self.rows:
for key in row.keys:
if type(key) == VKey:
if uppercase:
key.value = key.value.upper()
else:
key.value = key.value.lower()
def get_key_at(self, position):
"""Retrieves if any key is located at the given position
:param position: Position to check key at.
:returns: The located key if any at the given position, None otherwise.
"""
for row in self.rows:
if position in row:
for key in row.keys:
if key.is_touched(position):
return key
return None
def synchronizeLayout(primary, secondary, surface_size):
"""Synchronizes given layouts by normalizing height by using
max height of given layouts to avoid transistion dirty effects.
:param primary: Primary layout used.
:param secondary: Secondary layout used.
:param surface_size: Target surface size on which layout will be displayed.
"""
primary.configure_bound(surface_size)
secondary.configure_bound(surface_size)
# Check for key size.
if (primary.key_size < secondary.key_size):
logging.warning('Normalizing key size from secondary to primary')
secondary.key_size = primary.key_size
elif (primary.key_size > secondary.key_size):
logging.warning('Normalizing key size from primary to secondary')
primary.key_size = secondary.key_size
if (primary.size[1] > secondary.size[1]):
logging.warning('Normalizing layout size from secondary to primary')
secondary.set_size(primary.size, surface_size)
elif (primary.size[1] < secondary.size[1]):
logging.warning('Normalizing layout size from primary to secondary')
primary.set_size(secondary.size, surface_size)
class VKeyboard(object):
"""Virtual Keyboard class.
A virtual keyboard consists in a VKeyboardLayout that acts as the keyboard model
and a VKeyboardRenderer which is in charge of drawing keyboard component to screen.
"""
def __init__(self, surface, text_consumer, layout, special_char_layout=VKeyboardLayout(VKeyboardLayout.SPECIAL), renderer=VKeyboardRenderer.DEFAULT):
"""Default constructor.
:param surface: Surface this keyboard will be displayed at.
:param text_consumer: Consumer that process text for each update.
:param layout: Layout this keyboard will use.
:param special_char_layout: Alternative layout to use, using VKeyboardLayout.SPECIAL if not specified.
:param renderer: Keyboard renderer instance, using VKeyboardStyle.DEFAULT if not specified.
"""
self.surface = surface
self.text_consumer = text_consumer
self.renderer = renderer
self.buffer = u''
self.state = 0
self.last_pressed = None
self.uppercase = False
self.special_char = False
self.original_layout = layout
self.original_layout.configure_specials_key(self)
self.special_char_layout = special_char_layout
self.special_char_layout.configure_specials_key(self)
synchronizeLayout(self.original_layout, self.special_char_layout, self.surface.get_size())
self.set_layout(layout)
def invalidate(self):
""" Invalidates keyboard state, reset layout and redraw. """
self.layout.invalidate()
self.draw()
def set_layout(self, layout):
"""Sets the layout this keyboard work with.
Keyboard is invalidate by this action and redraw itself.
:param layout: Layout to set.
"""
self.layout = layout
self.invalidate()
def enable(self):
""" Sets this keyboard as active. """
self.state = 1
self.invalidate()
def disable(self):
""" Sets this keyboard as non active. """
self.state = 0
def draw(self):
""" Draw the virtual keyboard into the delegate surface object if enabled. """
if self.state > 0:
self.renderer.draw_background(self.surface, self.layout.position, self.layout.size)
for row in self.layout.rows:
for key in row.keys:
self.renderer.draw_key(self.surface, key)
def on_uppercase(self):
""" Uppercase key press handler. """
self.uppercase = not self.uppercase
self.original_layout.set_uppercase(self.uppercase)
self.special_char_layout.set_uppercase(self.uppercase)
self.invalidate()
def on_special_char(self):
""" Special char key press handler. """
self.special_char = not self.special_char
if self.special_char:
self.set_layout(self.special_char_layout)
else:
self.set_layout(self.original_layout)
self.invalidate()
def on_event(self, event):
"""Pygame event processing callback method.
:param event: Event to process.
"""
if self.state > 0:
if event.type == MOUSEBUTTONDOWN:
key = self.layout.get_key_at(pygame.mouse.get_pos())
if key is not None:
self.on_key_down(key)
elif event.type == MOUSEBUTTONUP:
self.on_key_up()
elif event.type == KEYDOWN:
value = pygame.key.name(event.key)
# TODO : Find from layout (consider checking layout key space ?)
elif event.type == KEYUP:
value = pygame.key.name(event.key)
# TODO : Find from layout (consider checking layout key space ?)
def set_key_state(self, key, state):
"""Sets the key state and redraws it.
:param key: Key to update state for.
:param state: New key state.
"""
key.state = state
self.renderer.draw_key(self.surface, key)
def on_key_down(self, key):
"""Process key down event by pressing the given key.
:param key: Key that receives the key down event.
"""
self.set_key_state(key, 1)
self.last_pressed = key
def on_key_up(self):
""" Process key up event by updating buffer and release key. """
if (self.last_pressed is not None):
self.set_key_state(self.last_pressed, 0)
self.buffer = self.last_pressed.update_buffer(self.buffer)
self.text_consumer(self.buffer)
self.last_pressed = None
| StarcoderdataPython |
1655748 | <reponame>z687wang/Hikari
from .serializers import MyTokenObtainPairSerializer
from rest_framework.permissions import AllowAny
from rest_framework_simplejwt.views import TokenObtainPairView
from django.contrib.auth.models import User
from .serializers import RegisterSerializer
from rest_framework import generics
from rest_framework.views import APIView
from rest_framework_simplejwt.tokens import RefreshToken
from rest_framework.response import Response
from rest_framework import status
from rest_framework.permissions import IsAuthenticated, AllowAny
from rest_framework_simplejwt.token_blacklist.models import BlacklistedToken, OutstandingToken
class LogoutAllView(APIView):
permission_classes = (IsAuthenticated,)
def post(self, request):
tokens = OutstandingToken.objects.filter(user_id=request.user.id)
for token in tokens:
t, _ = BlacklistedToken.objects.get_or_create(token=token)
return Response(status=status.HTTP_205_RESET_CONTENT)
class LogoutView(APIView):
permission_classes = (IsAuthenticated,)
def post(self, request):
try:
refresh_token = request.data["refresh_token"]
token = RefreshToken(refresh_token)
token.blacklist()
return Response(status=status.HTTP_205_RESET_CONTENT)
except Exception as e:
return Response(status=status.HTTP_400_BAD_REQUEST)
class MyObtainTokenPairView(TokenObtainPairView):
permission_classes = (AllowAny,)
serializer_class = MyTokenObtainPairSerializer
class RegisterView(generics.CreateAPIView):
queryset = User.objects.all()
permission_classes = (AllowAny,)
serializer_class = RegisterSerializer | StarcoderdataPython |
198664 | from typing import Iterable
class InfiniteIterator:
"""Infinitely repeat the iterable."""
def __init__(self, iterable: Iterable):
self._iterable = iterable
self.iterator = iter(self._iterable)
def __iter__(self):
return self
def __next__(self):
for _ in range(2):
try:
return next(self.iterator)
except StopIteration:
# reset iterator
del self.iterator
self.iterator = iter(self._iterable) | StarcoderdataPython |
1602251 | import unittest
# On a staircase, the i-th step has some non-negative cost cost[i] assigned (0 indexed).
# Once you pay the cost, you can either climb one or two steps.
# You need to find minimum cost to reach the top of the floor,
# and you can either start from the step with index 0, or the step with index 1.
input_values = [1, 100, 1, 1, 1, 100, 1, 1, 100, 1]
output_value = 6
class funcTest(unittest.TestCase):
def test(self):
solution = Solution()
self.assertEqual(solution.minCostClimbingStairs(input_values), output_value)
class Solution:
def minCostClimbingStairs(self, cost):
"""
:type cost: List[int]
:rtype: int
"""
c1, c2 = 0, 0
for cost in reversed(cost): # so we can have the cost ready from the end state
c1, c2 = cost + min(c1, c2), c1
return min(c1, c2)
if __name__ == '__main__':
unittest.main(argv=['first-arg-is-ignored'], exit=False) # extra conditions for jupyter notebook
| StarcoderdataPython |
3275138 | <reponame>diefans/python-arangodb
"""Some classes to easy work with arangodb."""
from . import meta, util, query
import logging
LOG = logging.getLogger(__name__)
class QueryMixin(object):
# pylint: disable=E0213
@util.classproperty
def alias(cls):
"""A query alias for this collection."""
return query.Alias(cls.__collection_name__)
@util.classproperty
def query(cls):
"""Prepare a query against this collection.
The default action is to return the alias.
"""
return query.Query(cls.alias, query.Collection(cls)).action(cls.alias)
class Document(meta.DocumentBase, QueryMixin):
pass
EDGE_DIRECTION_ANY = 'any'
EDGE_DIRECTION_INBOUND = 'inbound'
EDGE_DIRECTION_OUTBOUND = 'outbound'
class Edge(meta.EdgeBase, QueryMixin):
"""An edge between two documents.
When the edge is loaded the two dowuments are also loaded.
"""
def __init__(self, *args, **kwargs):
"""
call scheme:
Edge([_from, _to,] [iterable,] **kwargs)
If _from and _to are not given, they have to be set later before saving!
"""
# split args
args_len = len(args)
if args_len < 2:
# _to and _from must be in kwargs or args
pass
else:
kwargs['_from'], kwargs['_to'] = args[:2]
args = args[2:]
# else:
# pass
super(Edge, self).__init__(*args, **kwargs)
def __setitem__(self, key, value):
if key in ('_from', '_to') and isinstance(value, meta.BaseDocument):
# check for documents and reduce to _id
if '_id' not in value:
raise TypeError(
"The document for setting `{0}` has no `_id`: {1}"
.format(key, value)
)
super(Edge, self).__setitem__(key, value['_id'])
else:
super(Edge, self).__setitem__(key, value)
@property
def _from(self):
return self['_from']
@_from.setter
def _from(self, value):
self['_from'] = value
@property
def _to(self):
return self['_to']
@_to.setter
def _to(self, value):
self['_to'] = value
@classmethod
def _create(cls, doc):
"""Create a db instance."""
assert '_from' in doc and '_to' in doc, \
"You must create an edge ether by calling __init__ " \
"with _from and _to args or an appropriate dict!"
return cls.api.create(
cls.__collection_name__,
doc['_from'],
doc['_to'],
doc)
@classmethod
def load(cls, key):
"""Load the edge and connected documents."""
edge = super(Edge, cls).load(key)
edge['_from'] = Document.load(edge['_from'])
edge['_to'] = Document.load(edge['_to'])
return edge
@classmethod
def connections_query(cls, alias, document, direction=EDGE_DIRECTION_ANY):
assert isinstance(document, meta.BaseDocument), "document is Document or Edge"
# pylint: disable=W0212
q = query.Query(
alias,
query.PATHS(
query.Collection(document),
query.Collection(cls),
direction)
)\
.filter(alias.source._id == document._id)\
.filter(query.LENGTH(alias.edges) == 1)\
.action(alias.destination)
return q
@classmethod
def connections(cls, document, collection=None, direction=EDGE_DIRECTION_ANY):
alias = query.Alias('p')
q = cls.connections_query(alias, document, direction)
if collection is not None:
# pylint: disable=W0212
q = q\
.filter(query.FIND_FIRST(alias.destination._id, collection.__collection_name__) != -1)
return q.cursor.iter_documents()
@classmethod
def inbounds(cls, document, collection=None):
return cls.connections(document, collection, direction=EDGE_DIRECTION_INBOUND)
@classmethod
def outbounds(cls, document, collection=None):
return cls.connections(document, collection, direction=EDGE_DIRECTION_OUTBOUND)
class Index(meta.IndexBase):
"""An index representation."""
collection = None
index_type = None
unique = False
INDEX_TYPE_HASH = "hash"
class Hash(Index):
"""A hash index."""
collection = None
index_type = INDEX_TYPE_HASH
# unique can be setted at class level
unique = False
def __init__(self, *fields, **kwargs):
self.fields = fields
if "unique" in kwargs:
self.unique = kwargs['unique']
if "collection" in kwargs:
self.collection = kwargs['collection']
if self.collection is None:
raise TypeError("No index collection specified!")
def save(self):
if isinstance(self.collection, meta.BaseDocument):
collection = self.collection.__collection_name__
else:
collection = self.collection
result = self.api.create(collection, self.index_type, fields=self.fields, unique=self.unique)
return result
class UniqueHash(Hash):
"""A unique hash index."""
unique = True
| StarcoderdataPython |
1736516 | from .hover.processor import HoverNetPostProcessor
from .cellpose.processor import CellposePostProcessor
from .drfns.processor import DRFNSPostProcessor
from .dcan.processor import DCANPostProcessor
from .dran.processor import DRANPostProcessor
from .basic.processor import BasicPostProcessor
from .thresholding import *
from .combine_type_inst import *
POST_PROC_LOOKUP = {
"hover":"HoverNetPostProcessor",
"cellpose":"CellposePostProcessor",
"drfns":"DRFNSPostProcessor",
"dcan":"DCANPostProcessor",
"dran":"DRANPostProcessor",
"basic":"BasicPostProcessor",
}
| StarcoderdataPython |
185625 | <filename>paddleseg3d/datasets/preprocess_utils/geometry.py
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import SimpleITK as sitk
import scipy
def resample(image,
spacing=None,
new_spacing=[1.0, 1.0, 1.0],
new_shape=None,
order=1):
"""
Resample image from the original spacing to new_spacing, e.g. 1x1x1
image(numpy array): 3D numpy array of raw HU values from CT series in [z, y, x] order.
spacing(list|tuple): float * 3, raw CT spacing in [z, y, x] order.
new_spacing: float * 3, new spacing used for resample, typically 1x1x1,
which means standardizing the raw CT with different spacing all into
1x1x1 mm.
new_shape(list|tuple): the new shape of resampled numpy array.
order(int): order for resample function scipy.ndimage.interpolation.zoom
return: 3D binary numpy array with the same shape of the image after,
resampling. The actual resampling spacing is also returned.
"""
if new_shape is None:
spacing = np.array([spacing[0], spacing[1], spacing[2]])
new_shape = np.round(image.shape * spacing / new_spacing)
else:
new_shape = np.array(new_shape)
resize_factor = new_shape / image.shape
image_new = scipy.ndimage.interpolation.zoom(
image, resize_factor, mode='nearest', order=order)
return image_new
| StarcoderdataPython |
3386694 | <filename>tcx_set_timings.py
#!/usr/bin/env python3
""" Add new timestamps to trackpoints in a TCX file, with an equal amount between each trackpoint """
import argparse
from lxml import etree
import datetime
import math
parser = argparse.ArgumentParser()
parser.add_argument('input')
parser.add_argument('output')
parser.add_argument('starttime')
parser.add_argument('totaltime')
date_str = '%Y-%m-%dT%H:%M:%SZ'
ns1 = '{http://www.garmin.com/xmlschemas/TrainingCenterDatabase/v2}'
def set_time(trackpoint, time):
for child in trackpoint:
if child.tag == '{:s}Time'.format(ns1):
child.text = time.strftime(date_str)
if __name__ == '__main__':
args = parser.parse_args()
tree = etree.parse(args.input)
startTime = datetime.datetime.strptime(args.starttime,date_str)
totaltime = float(args.totaltime)
trackpoints = tree.findall('//{:s}Trackpoint'.format(ns1))
trackpointscount = len(trackpoints)
addseconds = totaltime / trackpointscount
time = startTime
for trackpoint in trackpoints:
set_time(trackpoint, time)
time = time + datetime.timedelta(0, addseconds)
lap = tree.find('//{:s}Lap'.format(ns1))
lap.attrib["StartTime"] = startTime.strftime(date_str)
for child in lap:
if child.tag == '{:s}TotalTimeSeconds'.format(ns1):
child.text = '{:.1f}'.format(totaltime)
tree.write(args.output, xml_declaration=True, encoding='utf-8')
print('Wrote output XML to', args.output)
| StarcoderdataPython |
191193 | import os, time, json
import numpy as np
from scellseg import models, io, metrics
from scellseg.contrast_learning.dataset import DatasetPairEval
from scellseg.dataset import DatasetShot, DatasetQuery
from torch.utils.data import DataLoader
from scellseg.utils import set_manual_seed, make_folder, process_different_model
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import pandas as pd
import warnings
warnings.filterwarnings("ignore")
use_GPU = True
num_batch = 8
channel = [2, 1]
flow_threshold = 0.4
cellprob_threshold = 0.5
min_size = ((30. // 2) ** 2) * np.pi * 0.05
project_path = os.path.abspath(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))+os.path.sep+".")
output_path = os.path.join(project_path, 'output')
make_folder(output_path)
output_excel_path = os.path.join(output_path, 'excels')
make_folder(output_excel_path)
dataset_dir_root = r'G:\Python\9-Project\1-cellseg\scellseg\input\eval'
dataset_names = ['BBBC010_elegans'] # 'BBBC010_elegans', 'mito', 'bv2'
model_name = 'scellseg' # unet2, unet3, hover, cellpose, scellseg, scellseg_sneuro, scellseg_sfluor, scellseg_scell, scellseg_smicro
net_avg = False
finetune_model = r'G:\Python\9-Project\1-cellseg\scellseg-gui\output\fine-tune' # TODO: you can provide the model file or folder_name of model files
pretrained_model = os.path.join(project_path, 'assets', 'pretrained_models', model_name)
task_mode, postproc_mode, attn_on, dense_on, style_scale_on = process_different_model(model_name) # task_mode mean different instance representation
for dataset_name in dataset_names:
dataset_dir = os.path.join(dataset_dir_root, dataset_name)
shot_data_dir = os.path.join(dataset_dir, 'shot')
shot_img_names = io.get_image_files(shot_data_dir, '_masks', '_img')
index_label = ['AP', 'AJI', '']
output = pd.DataFrame()
save_name = model_name+'_'+dataset_name
t0 = time.time()
set_manual_seed(5)
shotset = DatasetShot(eval_dir=dataset_dir, class_name=None, image_filter='_img', mask_filter='_masks',
channels=channel, task_mode=task_mode, active_ind=None, rescale=True)
queryset = DatasetQuery(dataset_dir, class_name=None, image_filter='_img', mask_filter='_masks')
query_image_names = queryset.query_image_names
query_label_names = queryset.query_label_names
diameter = shotset.md
print('>>>> mean diameter of this style,', round(diameter, 3))
model = models.sCellSeg(pretrained_model=pretrained_model, gpu=use_GPU, nclasses=3,
task_mode=task_mode, net_avg=net_avg,
attn_on=attn_on, dense_on=dense_on, style_scale_on=style_scale_on,
last_conv_on=True, model=None)
model_dict = model.net.state_dict()
model.net.save_name = save_name
shot_pairs = (np.array(shotset.shot_img_names), np.array(shotset.shot_mask_names), True) # 第三个参数为是否根据shot重新计算
masks, flows, styles = model.inference(finetune_model=finetune_model, net_avg=net_avg,
query_image_names=query_image_names, channel=channel, diameter=diameter,
resample=False, flow_threshold=flow_threshold, cellprob_threshold=cellprob_threshold,
min_size=min_size, tile_overlap=0.5, eval_batch_size=16, tile=True,
postproc_mode=postproc_mode, shot_pairs=shot_pairs)
t1 = time.time()
print('\033[1;32m>>>> Total Time:\033[0m', t1 - t0, 's')
show_single = False
query_labels = [np.array(io.imread(query_label_name)) for query_label_name in query_label_names]
image_names = [query_image_name.split('query\\')[-1] for query_image_name in query_image_names]
query_labels = metrics.refine_masks(query_labels) # prevent
# compute AP
thresholds = np.arange(0.5, 1.05, 0.05)
ap, _, _, _, pred_ious = metrics.average_precision(query_labels, masks, threshold=thresholds, return_pred_iou=True)
if show_single:
ap_dict = dict(zip(image_names, ap))
print('\033[1;34m>>>> scellseg - AP\033[0m')
for k,v in ap_dict.items():
print(k, v)
print('\033[1;34m>>>> AP:\033[0m', [round(ap_i, 3) for ap_i in ap.mean(axis=0)])
# save AJI
aji = metrics.aggregated_jaccard_index(query_labels, masks)
if show_single:
aji_dict = dict(zip(image_names, aji))
print('\033[1;34m>>>> scellseg - AJI\033[0m')
for k,v in aji_dict.items():
print(k, v)
print('\033[1;34m>>>> AJI:\033[0m', aji.mean())
# make dataframe
output1 = pd.DataFrame([round(ap_i, 3) for ap_i in ap.mean(axis=0)]).T
output2 = pd.DataFrame(aji).T
output_blank = pd.DataFrame([' ']).T
output = pd.concat([output, output1, output2, output_blank], ignore_index=True)
# save output images
diams = np.ones(len(query_image_names)) * diameter
imgs = [io.imread(query_image_name) for query_image_name in query_image_names]
io.masks_flows_to_seg(imgs, masks, flows, diams, query_image_names, [channel for i in range(len(query_image_names))])
io.save_to_png(imgs, masks, flows, query_image_names, labels=query_labels, aps=None, task_mode=task_mode)
output.index = index_label * 1
output.to_excel(os.path.join(output_excel_path, save_name+'.xlsx'))
| StarcoderdataPython |
1707034 | <reponame>tedunderwood/fiction
# select_random_corpus.py
#
# This module imports metadata about volumes
# in a given set of genre(s), as well as a
# given random set, and then helps the user
# select more volumes to balance the sets.
#
# It is loosely based on
#
# /Users/tunder/Dropbox/GenreProject/python/reception/select_poetry_corpus3.py
import csv, os, sys
import SonicScrewdriver as utils
import random
selecteddates = dict()
selected = list()
selectedmeta = dict()
knownnations = {'us', 'uk'}
def user_added_meta():
meta = dict()
meta['birthdate'] = input('Authors year of birth? ')
meta['gender'] = input ('Authors gender? ')
meta['nationality'] = input('Authors nationality? ')
meta['firstpub'] = input('Date of first publication? ')
return meta
def forceint(astring):
try:
intval = int(astring)
except:
intval = 0
return intval
def get_metasets(inmetadata, targettags, randomtag):
''' Reads rows from path: inmetadata and identifies
two groups of volumes: those that have one of the
genre tag in the target group, and those that possess
the "random" tag.
'''
randomvollist = list()
targetvollist = list()
with open(inmetadata, encoding = 'utf-8') as f:
reader = csv.DictReader(f)
for row in reader:
firstpub = forceint(row['firstpub'])
if firstpub < 1700 or firstpub > 2000:
continue
genretags = row['genretags'].split('|')
genretags = set([x.strip() for x in genretags])
# to remove spaces on either side of the virgule
if 'drop' in genretags:
continue
target = False
random = False
for tag in targettags:
if tag in genretags:
target = True
if randomtag in genretags:
random = True
if random and target:
# These should never happen at the same time.
# A random tag should preclude other tags.
print("ERROR CONDITION: random tag and target genre")
print("tags both present for a single volume.")
sys.exit(0)
elif random:
randomvollist.append(row)
elif target:
targetvollist.append(row)
return targetvollist, randomvollist
def closest_idx(targetvollist, row):
global knownnations
date = forceint(row['firstpub'])
gender = row['gender']
nationality = row['nationality']
proximities = list()
for atarget in targetvollist:
targetdate = forceint(atarget['firstpub'])
proximity = abs(targetdate - date)
targetgender = atarget['gender']
targetnation = atarget['nationality']
if gender != targetgender and gender != '' and targetgender != '':
proximity += 0.1
if nationality != targetnation and nationality in knownnations and targetnation in knownnations:
proximity += 0.1
proximities.append(proximity)
closestidx = proximities.index(min(proximities))
return closestidx
def get_difference(targetvollist, randomvollist):
''' Identifies volumes in targetvollist matching the dates of
randomvollist and subtracts those from targetvollist in order to
identify a list of dates that remain unmatched.
'''
if len(randomvollist) >= len(targetvollist):
return []
for row in randomvollist:
closest_target = closest_idx(targetvollist, row)
popped = targetvollist.pop(closest_target)
print("MATCH this: " + str(row['firstpub']) + " : " + row['title'] + " " + row['gender'])
print('with this: ' + str(popped['firstpub']) + " : " + popped['title'] + " " + popped['gender'])
print()
return targetvollist
# START MAIN PROCEDURE
fieldstocopy = ['recordid', 'oclc', 'locnum', 'author', 'imprint', 'enumcron', 'subjects', 'title']
fieldstowrite = ['docid', 'recordid', 'oclc', 'locnum', 'author', 'imprint', 'date', 'birthdate', 'firstpub', 'enumcron', 'subjects', 'title', 'nationality', 'gender', 'genretags']
sourcemetafile = "/Users/tunder/Dropbox/fiction/meta/genremeta.csv"
targetphrase = input("Comma-separated list of target genres: ")
targettags = [x.strip() for x in targetphrase.split(',')]
randomtag = input('Random tag to use for this run? ')
targetvollist, randomvollist = get_metasets(sourcemetafile, targettags, randomtag)
unmatchedtargets = get_difference(targetvollist, randomvollist)
usa = 0
nonusa = 0
male = 0
female = 0
for row in unmatchedtargets:
gender = row['gender']
nationality = row['nationality']
if nationality == 'us':
usa += 1
else:
nonusa += 1
if gender == 'f':
female += 1
elif gender == 'm':
male += 1
bydate = dict()
fictionmetadata = dict()
datesbydocid = dict()
with open('/Users/tunder/work/genre/metadata/ficmeta.csv', encoding = 'utf-8') as f:
reader = csv.DictReader(f)
for row in reader:
docid = utils.clean_pairtree(row['htid'])
fictionmetadata[docid] = row
date = utils.date_row(row)
datesbydocid[docid] = date
if docid in selected:
continue
if date in bydate:
bydate[date].append(docid)
else:
bydate[date] = [docid]
controlset = set()
controlmeta = dict()
usedfromselected = list()
print("IN UNMATCHED VOLUMES: ")
print("Male/female ratio: " + str(male) + " / " + str(female))
print("US / nonUS ratio: " + str(usa) + " / " + str(nonusa))
tarfemale = 0
confemale = 0
tarusa = 0
conusa = 0
for row in unmatchedtargets:
print()
print("Women in targetvols / women selected: " + str(tarfemale) + " / " + str(confemale))
print("US in targetvols / US selected: " + str(tarusa)+ " / " + str(conusa) )
theid = row['docid']
date = forceint(row['firstpub'])
usedfromselected.append(theid)
print(theid)
print(date)
print(row['author'])
print(row['title'])
print(row['nationality'] + " -- " + row['gender'])
if row['gender'].strip() == 'f':
tarfemale += 1
if row['nationality'] == 'us':
tarusa += 1
found = False
while not found:
candidates = bydate[date]
choice = random.sample(candidates, 1)[0]
print(choice)
print(fictionmetadata[choice]['author'])
print(fictionmetadata[choice]['title'])
acceptable = input("ACCEPT? (y/n): ")
if acceptable == "y":
controlset.add(choice)
found = True
controlmeta[choice] = user_added_meta()
controlmeta[choice]['docid'] = choice
controlmeta[choice]['date'] = datesbydocid[choice]
controlmeta[choice]['genretags'] = randomtag
for field in fieldstocopy:
controlmeta[choice][field] = fictionmetadata[choice][field]
if controlmeta[choice]['gender'] == 'f':
confemale += 1
if controlmeta[choice]['nationality'] == 'us':
conusa += 1
if acceptable == 'quit':
break
if acceptable == 'quit':
break
with open(sourcemetafile, mode='a', encoding = 'utf-8') as f:
writer = csv.DictWriter(f, fieldnames = fieldstowrite)
for docid, row in controlmeta.items():
writer.writerow(row)
| StarcoderdataPython |
1665808 | <reponame>drednout/locust_on_meetup
from locust import HttpLocust, TaskSet, task
class HttpPingTasks(TaskSet):
@task
def ping(self):
self.client.get("/")
class HttpPingLocust(HttpLocust):
task_set = HttpPingTasks
min_wait = 100
max_wait = 500
| StarcoderdataPython |
80298 | import pytest
from ..encryptor import Encryptor, Decryptor
from secrets import token_bytes, randbelow
@pytest.fixture
def private_key_bytes():
private_key_bytes = Decryptor.generate_private_key(
password=None
)
yield private_key_bytes
@pytest.fixture
def decryptor(private_key_bytes):
decryptor = Decryptor(
private_key=private_key_bytes,
password=None
)
yield decryptor
@pytest.fixture
def encryptor(decryptor):
encryptor = decryptor.get_encryptor()
yield encryptor
@pytest.fixture
def secret_bytes():
secret_bytes = token_bytes(512 + randbelow(512))
yield secret_bytes
@pytest.fixture
def long_secret_bytes(secret_bytes):
long_secret_bytes = b''.join(int.to_bytes(i, length=4, byteorder='big') + secret_bytes for i in range(0, 512 + randbelow(512)))
yield long_secret_bytes
def test_decryptor_generate_private_key(private_key_bytes):
assert private_key_bytes.find(b'-----BEGIN PRIVATE KEY-----') != -1
def test_decryptor_private_key_bytes(private_key_bytes, decryptor):
assert decryptor.get_private_bytes(password=None) == private_key_bytes
def test_encryptor_public_key_byptes(encryptor):
assert encryptor.get_public_bytes().find(b'-----BEGIN PUBLIC KEY-----') != -1
def test_name_hash(encryptor):
assert encryptor.name == Encryptor.hash(encryptor.get_public_bytes()).hex()
def test_encryption(encryptor, secret_bytes):
encrypted, _ = encryptor.encrypt(
data=secret_bytes
)
assert len(encrypted)
assert encrypted != secret_bytes
def test_decryption(encryptor, secret_bytes, decryptor):
encrypted, metadata = encryptor.encrypt(
data=secret_bytes
)
decrypted = decryptor.decrypt(
data=encrypted,
metadata=metadata
)
assert decrypted == secret_bytes
def test_appended_metadata(encryptor, long_secret_bytes, decryptor):
encrypted = Encryptor.append_metadata(*encryptor.encrypt(
data=long_secret_bytes
))
decrypted = decryptor.decrypt(*Decryptor.strip_metadata(
data=encrypted,
))
assert decrypted == long_secret_bytes
def test_key(encryptor):
key = encryptor.generate_key()
assert len(key)
assert key != encryptor.generate_key()
def test_initialization_vector(encryptor):
initialization_vector = encryptor.generate_initialization_vector()
assert len(initialization_vector)
assert initialization_vector != encryptor.generate_initialization_vector()
def test_cipher(encryptor, secret_bytes):
key = encryptor.generate_key()
initialization_vector = encryptor.generate_initialization_vector()
enciphered = Encryptor.encipher(
data=secret_bytes,
key=key,
initialization_vector=initialization_vector,
backend=encryptor.backend
)
assert enciphered != secret_bytes
deciphered = Encryptor.decipher(
data=enciphered,
key=key,
initialization_vector=initialization_vector,
backend=encryptor.backend
)
assert deciphered == secret_bytes
deciphered_different_key = None
try:
deciphered_different_key = Encryptor.decipher(
data=enciphered,
key=encryptor.generate_key(),
initialization_vector=initialization_vector,
backend=encryptor.backend
)
except (KeyboardInterrupt, SystemExit):
raise
except Exception:
pass
assert deciphered_different_key is None or deciphered_different_key != secret_bytes
deciphered_different_initialization_vector = None
try:
deciphered_different_initialization_vector = Encryptor.decipher(
data=enciphered,
key=key,
initialization_vector=encryptor.generate_initialization_vector(),
backend=encryptor.backend
)
except (KeyboardInterrupt, SystemExit):
raise
except Exception:
pass
assert deciphered_different_initialization_vector is None or deciphered_different_initialization_vector != secret_bytes
def test_registry(encryptor, long_secret_bytes, decryptor):
Decryptor.register_decryptor(decryptor=decryptor)
encrypted, _ = Encryptor.encrypt_with_registry(
data=long_secret_bytes,
name=encryptor.name,
append_metadata=True
)
decrypted = Decryptor.decrypt_with_registry(
data=encrypted
)
assert decrypted == long_secret_bytes
| StarcoderdataPython |
3248269 | """
Parametric spline interpolator
Useful when you need to interpolate a curve of values, like points along a flux surface or a chord
"""
import warnings
import numpy as np
from scipy.interpolate import splprep, splev
class ParametricSpline:
"""
A wrapper class around slprep and splev from scipy.interpolate
Uses cubic spline interpolation
"""
def __init__(
self,
sample_points: list,
t_points: np.array = None,
smoothing: float = 0.0,
periodic: bool = False,
order: int = 3,
):
"""
Calculates the knots and coefficients for a parametric cubic spline interpolator
sample_points should be a list of 1D arrays, i.e. [x_points, y_points, ...]
If t_points is given, then the arrays given in sample points should be parametrised by t
If smoothing if > 0.0, then the input points will be smoothed. (Typically smoothing required << 1, recommend
to check the fit)
Order should be less than the number of sample points - 1
"""
sample_length = None
for sample_array in sample_points:
if sample_length is None:
# Use the first array to set the length
sample_length = len(sample_array)
else:
assert len(sample_array) == sample_length
assert sample_length > order, (
f"Not enough sample points ({sample_length}) for an order ({order}) "
"ParametricSpline"
)
if order < 3 and smoothing > 0.0:
warnings.warn(
UserWarning(
"Should not use smoothing for order < 3 in ParametricSpline"
)
)
if t_points is None:
tck, self.t_points = splprep(
sample_points, s=smoothing, per=(1 if periodic else 0), k=order
)
else:
tck, self.t_points = splprep(
sample_points,
u=t_points,
s=smoothing,
per=(1 if periodic else 0),
k=order,
)
self.t_min, self.t_max = np.min(self.t_points), np.max(self.t_points)
self.knots, self.coeffs, self.order = tck
def __call__(self, t_evaluations: np.array):
"""
Returns the spline evaluations at points given by t_evaluations
N.b. if no t_points are provided to init, then the given sample points are assumed to be parametrised between 0 and 1
"""
# Interpolation only!
assert (
t_evaluations.min() >= self.t_min and t_evaluations.max() <= self.t_max
), f"Requested points in the range\
{t_evaluations.min()}, {t_evaluations.max()}, which is outside the interval {self.t_min}, {self.t_max}"
return splev(t_evaluations, (self.knots, self.coeffs, self.order))
| StarcoderdataPython |
96632 | <gh_stars>1-10
import os
bot_token = '<KEY>'
bot_user_name = 'xxx_bot'
URL = "https://cf45064e05ed.ngrok.io"
| StarcoderdataPython |
3377452 | from nxos_ebay import NexusOSNetConfDriver
| StarcoderdataPython |
1751184 | <gh_stars>1-10
from sklearn.metrics import recall_score
from metrics.metric import Metric
class Recall(Metric):
name = 'recall'
def apply(self,y_true, y_pred):
recall_labels = recall_score(y_true, y_pred, average=None, zero_division=1)
macro = recall_score(y_true, y_pred, average='macro', zero_division=1)
micro = recall_score(y_true, y_pred, average='micro', zero_division=1)
weighted = recall_score(y_true, y_pred, average='weighted', zero_division=1)
samples = recall_score(y_true, y_pred, average='samples', zero_division=1)
output = {}
for i in range(len(recall_labels)):
output[str(i)] = recall_labels[i]
output['macro avg'] = macro
output['micro avg'] = micro
output['weighted avg'] = weighted
output['samples avg'] = samples
return output
METRIC = Recall() | StarcoderdataPython |
1738125 | <gh_stars>10-100
"""
Example demonstrating how to add DID with the role of Trust Anchor to ledger.
Uses seed to obtain Steward's DID which already exists on the ledger.
Then it generates new DID/Verkey pair for Trust Anchor.
Using Steward's DID, NYM transaction request is built to add Trust Anchor's DID and Verkey
on the ledger with the role of Trust Anchor.
Once the NYM is successfully written on the ledger, it generates new DID/Verkey pair that represents
a client, which are used to create GET_NYM request to query the ledger and confirm Trust Anchor's Verkey.
For the sake of simplicity, a single wallet is used. In the real world scenario, three different wallets
would be used and DIDs would be exchanged using some channel of communication
"""
import asyncio
import json
import pprint
from indy import pool, ledger, wallet, did
from indy.error import IndyError
from src.utils import run_coroutine, PROTOCOL_VERSION
pool_name = 'pool1'
pool_genesis_txn_path = "/home/indy/.indy_client/pool/pool1.txn"
wallet_config = json.dumps({"id": "wallet"})
wallet_credentials = json.dumps({"key": "wallet_key"})
# Set protocol version to 2 to work with the current version of Indy Node
PROTOCOL_VERSION = 2
def print_log(value_color="", value_noncolor=""):
"""set the colors for text."""
HEADER = '\033[92m'
ENDC = '\033[0m'
print(HEADER + value_color + ENDC + str(value_noncolor))
async def write_nym_and_query_verkey():
try:
await pool.set_protocol_version(PROTOCOL_VERSION)
# Step 2 code goes here.
# Step 3 code goes here.
# Step 4 code goes here.
# Step 5 code goes here.
except IndyError as e:
print('Error occurred: %s' % e)
if __name__ == '__main__':
run_coroutine(write_nym_and_query_verkey)
time.sleep(1) # FIXME waiting for libindy thread complete
| StarcoderdataPython |
1728909 | <reponame>marsggbo/hyperbox
from .constants import NONE, SKIP_CONNECT, CONV_1X1, CONV_3X3, AVG_POOL_3X3, PRIMITIVES
from .model import Nb201TrialStats, Nb201IntermediateStats, Nb201TrialConfig
from .query import query_nb201_trial_stats
| StarcoderdataPython |
3236693 | # This code is part of Qiskit.
#
# (C) Copyright IBM 2021.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Utility resource methods."""
from typing import List, Tuple, Union
def read_expected_file(path: str) -> List[Tuple[Union[str, float], ...]]:
"""Reads and parses resource file."""
types = str, float
with open(path, "r") as file:
expected_fermionic_op = [tuple(t(e) for t, e in zip(types, line.split())) for line in file]
return expected_fermionic_op
| StarcoderdataPython |
123505 | #!/usr/bin/env python
# This is largely adopted from https://github.com/enode-engineering/tesla-oauth2
import base64
import hashlib
import os
import sys
import re
import random
import time
import argparse
import json
from urllib.parse import parse_qs
import requests
MAX_ATTEMPTS = 7
CLIENT_ID = "81527cff06843c8634fdc09e8ac0abefb46ac849f38fe1e431c2ef2106796384"
UA = "Mozilla/5.0 (Linux; Android 10; Pixel 3 Build/QQ2A.200305.002; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/85.0.4183.81 Mobile Safari/537.36"
X_TESLA_USER_AGENT = "TeslaApp/3.10.9-433/adff2e065/android/10"
AUTH_URL = "https://auth.tesla.com/oauth2/v3/authorize"
verbose = False
def gen_params():
verifier_bytes = os.urandom(86)
code_verifier = base64.urlsafe_b64encode(verifier_bytes).rstrip(b"=")
code_challenge = base64.urlsafe_b64encode(hashlib.sha256(code_verifier) \
.digest()).rstrip(b"=")
state = base64.urlsafe_b64encode(os.urandom(16)).rstrip(b"=") \
.decode("utf-8")
return code_verifier, code_challenge, state
def login(email, password, mfa_code):
if type(password) != str:
# We were not given a password as a string. In that case we
# assume it's a function which will return the password as a
# string.
password = password()
headers = {
"User-Agent": UA,
"x-tesla-user-agent": X_TESLA_USER_AGENT,
"X-Requested-With": "com.teslamotors.tesla",
}
# Step 1: Obtain the login page
for attempt in range(MAX_ATTEMPTS):
code_verifier, code_challenge, state = gen_params()
params = (
("client_id", "ownerapi"),
("code_challenge", code_challenge),
("code_challenge_method", "S256"),
("redirect_uri", "https://auth.tesla.com/void/callback"),
("response_type", "code"),
("scope", "openid email offline_access"),
("state", state),
)
session = requests.Session()
resp = session.get(AUTH_URL, headers = headers, params = params)
if resp.ok and "<title>" in resp.text:
if verbose:
print(f"Get auth form success - {attempt + 1} attempt(s).")
break
time.sleep(3)
else:
raise ValueError(f"Didn't get auth form in {MAX_ATTEMPTS} attempts.")
# Step 2: Obtain an authorization code
csrf = re.search(r'name="_csrf".+value="([^"]+)"', resp.text).group(1)
transaction_id = re.search(r'name="transaction_id".+value="([^"]+)"',
resp.text).group(1)
data = {
"_csrf": csrf,
"_phase": "authenticate",
"_process": "1",
"transaction_id": transaction_id,
"cancel": "",
"identity": email,
"credential": password,
}
for attempt in range(MAX_ATTEMPTS):
resp = session.post(AUTH_URL, headers = headers, params = params,
data = data, allow_redirects = False)
if resp.ok and (resp.status_code == 302 or "<title>" in resp.text):
if verbose:
print(f"Post auth form success - {attempt + 1} attempt(s).")
break
time.sleep(3)
else:
raise ValueError(f"Didn't post auth form in {MAX_ATTEMPTS} attempts.")
# Determine if user has MFA enabled. In that case there is no
# redirect to `https://auth.tesla.com/void/callback` and app shows
# new form with Passcode / Backup Passcode field
if resp.status_code == 200 and "/mfa/verify" in resp.text:
if type(mfa_code) != str:
# We were not given the mfa code as a string. In that case
# we assume it's a function which will return the code as
# a string.
mfa_code = mfa_code()
resp = session.get(f"{AUTH_URL}/mfa/factors?transaction_id={transaction_id}",
headers = headers)
# {
# "data": [
# {
# "dispatchRequired": false,
# "id": "41d6c32c-b14a-4cef-9834-36f819d1fb4b",
# "name": "Device #1",
# "factorType": "token:software",
# "factorProvider": "TESLA",
# "securityLevel": 1,
# "activatedAt": "2020-12-07T14:07:50.000Z",
# "updatedAt": "2020-12-07T06:07:49.000Z",
# }
# ]
# }
if verbose:
print(resp.text)
factor_id = resp.json()["data"][0]["id"]
if len(mfa_code) == 6:
# Use Passcode
data = {
"transaction_id": transaction_id,
"factor_id": factor_id,
"passcode": mfa_code
}
resp = session.post(f"{AUTH_URL}/mfa/verify", headers = headers,
json = data)
# ^^ Content-Type - application/json
if verbose:
print("mfa verify response:")
print(resp.text)
# {
# "data": {
# "id": "63375dc0-3a11-11eb-8b23-75a3281a8aa8",
# "challengeId": "c7febba0-3a10-11eb-a6d9-2179cb5bc651",
# "factorId": "41d6c32c-b14a-4cef-9834-36f819d1fb4b",
# "passCode": "<PASSWORD>",
# "approved": true,
# "flagged": false,
# "valid": true,
# "createdAt": "2020-12-09T03:26:31.000Z",
# "updatedAt": "2020-12-09T03:26:31.000Z",
# }
# }
if "error" in resp.text or not resp.json()["data"]["approved"] or \
not resp.json()["data"]["valid"]:
raise ValueError("Invalid passcode.")
elif len(mfa_code) == 9:
# Use Backup Passcode
data = {
"transaction_id": transaction_id,
"backup_code": mfa_code
}
resp = session.post(f"{AUTH_URL}/mfa/backupcodes/attempt",
headers = headers, json = data)
# ^^ Content-Type - application/json
if verbose:
print(resp.text)
# {
# "data": {
# "valid": true,
# "reason": null,
# "message": null,
# "enrolled": true,
# "generatedAt": "2020-12-09T06:14:23.170Z",
# "codesRemaining": 9,
# "attemptsRemaining": 10,
# "locked": false,
# }
# }
if "error" in resp.text or not resp.json()["data"]["valid"]:
raise ValueError("Invalid backup passcode.")
else:
raise ValueError(f"Expected a 6 or 9 character code, "
f"got {mfa_code}")
data = {
"transaction_id": transaction_id
}
for attempt in range(MAX_ATTEMPTS):
resp = session.post(AUTH_URL, headers = headers, params = params,
data = data, allow_redirects = False)
if resp.headers.get("location"):
if verbose:
print(f"Got location in {attempt + 1} attempt(s).")
break
else:
raise ValueError(f"Didn't get location in {MAX_ATTEMPTS} attempts.")
# Step 3: Exchange authorization code for bearer token
code = parse_qs(resp.headers["location"]) \
["https://auth.tesla.com/void/callback?code"]
if verbose:
print("Code -", code)
headers = {"user-agent": UA, "x-tesla-user-agent": X_TESLA_USER_AGENT}
payload = {
"grant_type": "authorization_code",
"client_id": "ownerapi",
"code_verifier": code_verifier.decode("utf-8"),
"code": code,
"redirect_uri": "https://auth.tesla.com/void/callback",
}
resp = session.post("https://auth.tesla.com/oauth2/v3/token",
headers = headers, json = payload)
resp_json = resp.json()
refresh_token = resp_json["refresh_token"]
access_token = resp_json["access_token"]
if verbose:
print("{\"refresh_token\": \"" + refresh_token + "\"}")
# Step 4: Exchange bearer token for access token
headers["authorization"] = "bearer " + access_token
payload = {
"grant_type": "urn:ietf:params:oauth:grant-type:jwt-bearer",
"client_id": CLIENT_ID,
}
resp = session.post("https://owner-api.teslamotors.com/oauth/token",
headers = headers, json = payload)
# save our tokens
resp_json = resp.json()
return {
"refresh_token": refresh_token,
"access_token": resp_json["access_token"],
"created_at": resp_json["created_at"],
"expires_in": resp_json["expires_in"]
}
def refresh(auth):
# Refresh if expiration is within 7 days of now.
expiration = auth["created_at"] + auth["expires_in"] - (7 * 24 * 3600)
if time.time() < expiration:
return False
headers = {
"user-agent": UA,
"x-tesla-user-agent": X_TESLA_USER_AGENT
}
payload = {
"grant_type": "refresh_token",
"client_id": "ownerapi",
"refresh_token": auth["refresh_token"],
"scope": "openid email offline_access",
}
session = requests.Session()
resp = session.post("https://auth.tesla.com/oauth2/v3/token",
headers = headers, json = payload).json()
refresh_token = resp["refresh_token"]
access_token = resp["access_token"]
if verbose:
print("{\"refresh_token\": \"" + refresh_token + "\"}")
# Step 4: Exchange bearer token for access token
headers["authorization"] = "bearer " + access_token
payload = {
"grant_type": "urn:ietf:params:oauth:grant-type:jwt-bearer",
"client_id": CLIENT_ID,
}
resp = session.post("https://owner-api.teslamotors.com/oauth/token",
headers = headers, json = payload).json()
# update auth data
auth["refresh_token"] = refresh_token
auth["access_token"] = resp["access_token"]
auth["created_at"] = resp["created_at"]
auth["expires_in"] = resp["expires_in"]
return True
def request_headers(auth):
return {
"user-agent": UA,
"x-tesla-user-agent": X_TESLA_USER_AGENT,
"authorization": "bearer " + auth["access_token"]
}
def expiration(auth):
return auth["created_at"] + auth["expires_in"]
| StarcoderdataPython |
3300283 | import maya.cmds as cmds
import maya.mel as mel
import os
import ART_rigUtils as utils
reload(utils)
#----------------------------------------------------------------------------------------
# Currently this script is written to work with multiple twists, but if you have more than one the last one is the only one that will work.
def upperArmTwist(color, rollGrpParent, fkArmJoint, ikArmJoint, suffix, name, prefix, upperArm, lowerArm):
print "START Building the "+ suffix +" UPPER arm twists---------------------------------------"
#create a nice name
name = prefix + name + suffix
if name.find("_") == 0:
name = name.partition("_")[2]
# define the joints for this rig.
upperarm = prefix + upperArm + suffix
lowerarm = prefix + lowerArm + suffix
driver_clavicle = "driver_" + prefix + "clavicle" + suffix
driver_upperarm = "driver_deltoid_" + prefix + upperArm + suffix
driver_lowerarm = "driver_" + prefix + lowerArm + suffix
numRolls = 0
for joint in ["_twist_01", "_twist_02", "_twist_03", "_twist_04", "_twist_05", "_twist_06"]:
if cmds.objExists("driver_" + prefix + upperArm + joint + suffix):
numRolls = numRolls + 1
print "...There are a total of " + str(numRolls) + " to build."
for i in range(int(numRolls)):
print "...Building upper arm twist_0" + str(i + 1)
driver_upperarm_twist = "driver_"+prefix+upperArm+"_twist_0"+str(i+1)+suffix
rollGrp = cmds.group(empty=True, name=upperarm+"_roll_grp_0"+str(i+1))
cmds.parent(rollGrp, "arm_sys_grp")
# Move the driver_upperarm_twist joint to its correct position between the driver_upperarm and its driver_lowerarm using a point constraint.
const = cmds.parentConstraint(driver_upperarm, driver_upperarm_twist, n=driver_upperarm+"_temp_parentConst", weight=1)
cmds.delete(const)
const = cmds.pointConstraint(driver_upperarm, driver_lowerarm, driver_upperarm_twist, n=driver_upperarm+"_temp_PointConst", weight=1)
cmds.delete(const)
# Duplicate the driver_upperarm, driver_lowerarm, and driver_upperarm_twist joints
blendJnts=[]
blend_driver_upperarmParent = cmds.duplicate(driver_upperarm, n="Blend_"+driver_upperarm+"_parent_ignore_0"+str(i+1), ic=True, po=True)[0]
blendJnts.append(blend_driver_upperarmParent)
blend_driver_lowerarm = cmds.duplicate(driver_lowerarm, n="Blend_"+driver_lowerarm+"_ignore_0"+str(i+1), ic=True, po =True)[0]
blendJnts.append(blend_driver_lowerarm)
blend_driver_upperarm = cmds.duplicate(driver_upperarm, n="Blend_"+driver_upperarm+"_ignore_0"+str(i+1), po=True)[0]
blendJnts.append(blend_driver_upperarm)
blend_driver_upperarm_twist = cmds.duplicate(driver_upperarm_twist, n="Blend_"+driver_upperarm_twist+"_ignore_0"+str(i+1), ic=True, po= True)[0]
blendJnts.append(blend_driver_upperarm_twist)
cmds.parent(blend_driver_lowerarm, blend_driver_upperarm_twist, blend_driver_upperarmParent)
cmds.parent(blend_driver_upperarm, blend_driver_upperarm_twist)
# Set the parent bone to have zyx rotation order
cmds.setAttr(blend_driver_upperarmParent+".rotateOrder", 2)
originalJnts = [driver_upperarm, driver_lowerarm]
#print originalJnts
# Now we are going to disconnect the constraints that were driving the original driver_upperarm
# and driver_lowerarm joints and re-connect them to the parent_ignore and ignore joints that were duped from them.
# This is so that we can create new constraints on the driver_upperarm and driver_lowerarm joints for the rig.
j = 0
a = 0
#while a < len(originalJnts):
for a in range(0,len(originalJnts)):
# Go through the inputs of the original joints and disconnect them
origConn = cmds.listConnections(originalJnts[a], d=False, c=+True, p=True)
for j in range(0,len(origConn),2):
cmds.disconnectAttr(origConn[j+1], origConn[j])
nameList = origConn[j+1].split(".")[0]
destConn = cmds.listConnections(nameList, s=False)
# Find out if the node that was an input to the original joint is also an output
if originalJnts[a] in destConn:
sourceConn = cmds.listConnections(nameList, d=False, c=True, p=True)
for k in range(0,len(sourceConn),2):
# Get the input connections to the node that are connected to the original joint
nameConnList = sourceConn[k+1].split(".")
if (nameConnList[0] == originalJnts[a]):
# Disconnect from the original joint and connect to the blend joint
cmds.disconnectAttr(sourceConn[k+1], sourceConn[k])
nameConnList[0] = blendJnts[a]
cmds.connectAttr(nameConnList[0] + "." + nameConnList[-1], sourceConn[k])
# create the manual twist control
if i == 0:
twistCtrl = utils.createControl("circle", 15, upperarm + "_twist_anim")
else:
twistCtrl = utils.createControl("circle", 15, upperarm + "_twist"+str(i+1)+"_anim")
cmds.setAttr(twistCtrl + ".ry", -90)
cmds.setAttr(twistCtrl + ".sx", 0.8)
cmds.setAttr(twistCtrl + ".sy", 0.8)
cmds.setAttr(twistCtrl + ".sz", 0.8)
cmds.makeIdentity(twistCtrl, r = 1, s = 1, apply =True)
# move the manual control to the correct location
constraint = cmds.parentConstraint(blend_driver_upperarm_twist, twistCtrl)[0]
cmds.delete(constraint)
# create a group for the manual control and parent the twist to it.
twistCtrlGrp = cmds.group(empty = True, name = twistCtrl + "_grp")
constraint = cmds.parentConstraint(blend_driver_upperarm_twist, twistCtrlGrp)[0]
cmds.delete(constraint)
cmds.parent(twistCtrl, twistCtrlGrp)
cmds.parent(twistCtrlGrp, rollGrp)
cmds.makeIdentity(twistCtrl, t = 1, r = 1, s = 1, apply = True)
cmds.parentConstraint(blend_driver_upperarm_twist, twistCtrlGrp)
# set the manual controls visibility settings
cmds.setAttr(twistCtrl + ".overrideEnabled", 1)
cmds.setAttr(twistCtrl + ".overrideColor", color)
for attr in [".sx", ".sy", ".sz"]:
cmds.setAttr(twistCtrl + attr, lock = True, keyable = False)
cmds.setAttr(twistCtrl + ".v", keyable = False)
# add attr on rig settings for manual twist control visibility
cmds.select("Rig_Settings")
if i == 0:
cmds.addAttr(longName=(name + "twistCtrlVisUpper"), at = 'bool', dv = 0, keyable = True)
cmds.connectAttr("Rig_Settings." + name + "twistCtrlVisUpper", twistCtrl + ".v")
# add attr to rig settings for the twist ammount values
cmds.select("Rig_Settings")
cmds.addAttr(longName= (name+"UpperarmTwist"+str(i+1)+"Amount"), defaultValue=0.5, minValue=0, maxValue=1, keyable = True)
for u in range(int(i+1)):
cmds.setAttr("Rig_Settings."+name+"UpperarmTwist"+str(u+1)+"Amount", (1.0/(i+2.0)*((2.0-u)+(i-1.0))) )
cmds.parent(blend_driver_upperarmParent, rollGrp)
# Constrain the original joints to the blend joints
cmds.orientConstraint(blend_driver_upperarm, driver_upperarm, n=driver_upperarm+"_0"+str(i+1)+"_OrntCnst", mo=True, w=1)
cmds.pointConstraint(blend_driver_upperarm, driver_upperarm, n=driver_upperarm+"_0"+str(i+1)+"_PtCnst", mo=True, w=1)
cmds.orientConstraint(blend_driver_lowerarm, driver_lowerarm, n=driver_lowerarm+"_0"+str(i+1)+"_OrntCnst", mo=True, w=1 )
cmds.pointConstraint(blend_driver_lowerarm, driver_lowerarm, n=driver_lowerarm+"_0"+str(i+1)+"_PtCnst", mo=True, w=1)
cmds.orientConstraint(twistCtrl, driver_upperarm_twist, n=driver_upperarm_twist+"_0"+str(i+1)+"_OrntCnst", mo=True, w=1)
cmds.pointConstraint(twistCtrl, driver_upperarm_twist, n=driver_upperarm_twist+"_0"+str(i+1)+"_PtCnst", mo=True, w=1)
# Create a driver_upperarm_twist multiplier to multiply the driver_upperarm_twist values by -.5 to counter rotate the driver_upperarm_twist and driver_upperarm back from the overall limb's driver_upperarm_twist amount.
twistMult = cmds.shadingNode("multiplyDivide", asUtility=True, name=blend_driver_upperarm_twist+"_0"+str(i+1)+"_MDnode")
twistMultDrive = cmds.shadingNode("multiplyDivide", asUtility=True, name=blend_driver_upperarm_twist+"_0"+str(i+1)+"_Drive_MDnode")
twistMultDriveInv = cmds.shadingNode("multiplyDivide", asUtility=True, name=blend_driver_upperarm_twist+"_0"+str(i+1)+"_DriveInv_MDnode")
twistMultMultiplier = cmds.shadingNode("multiplyDivide", asUtility=True, name=blend_driver_upperarm_twist+"_0"+str(i+1)+"_Multiplier_MDnode")
twistMultMultiplierInv = cmds.shadingNode("multiplyDivide", asUtility=True, name=blend_driver_upperarm_twist+"_0"+str(i+1)+"_MultiplierInv_MDnode")
twistReverse = cmds.shadingNode("reverse", asUtility=True, name=blend_driver_upperarm_twist+"_0"+str(i+1)+"_REVnode")
cmds.connectAttr(blend_driver_upperarmParent+".rotateX", twistMult+".input1X", force=True)
cmds.connectAttr("Rig_Settings."+name+"UpperarmTwist"+str(i+1)+"Amount", twistReverse+".inputX")
cmds.connectAttr("Rig_Settings."+name+"UpperarmTwist"+str(i+1)+"Amount", twistMultMultiplierInv+".input1X")
cmds.connectAttr(twistReverse+".outputX", twistMultMultiplier+".input1X", force=True)
cmds.connectAttr(twistMult+".outputX", twistMultDrive+".input1X", force=True)
cmds.connectAttr(twistMultMultiplier+".outputX", twistMultDrive+".input2X", force=True)
cmds.connectAttr(twistMult+".outputX", twistMultDriveInv+".input1X", force=True)
cmds.connectAttr(twistMultMultiplierInv+".outputX", twistMultDriveInv+".input2X", force=True)
cmds.connectAttr(twistMultDrive+".outputX", blend_driver_upperarm_twist+".rotateX", force=True)
cmds.connectAttr(twistMultDriveInv+".outputX", blend_driver_upperarm+".rotateX", force=True)
cmds.setAttr(twistMult+".input2X", -0.5)
cmds.setAttr(twistMult+".input2X", l=True)
cmds.setAttr(twistMultMultiplier+".input2X", 2)
cmds.setAttr(twistMultMultiplierInv+".input2X", 2)
cmds.select(blend_driver_upperarmParent)
cmds.parentConstraint(driver_clavicle, rollGrp, mo=True)
print ".END Building the "+ suffix +" UPPER arm twists---------------------------------------" | StarcoderdataPython |
1600893 | from sys import stdout, stderr
from core.error import print_error, Errors
from core.parser import TokenType
class Interpreter:
def __init__(
self,
tree,
cell_array_size: int,
filepath: str = None,
silent: bool = False,
print_to_string: bool = False,
exit_on_fail: bool = True,
input_text: str = "",
) -> None:
if print_to_string:
stderr.write("WARNING: print_to_string has not been implemented\n\r")
self.tree = tree
self.filepath = filepath
self.silent = silent
# self.print_to_string = print_to_string
self.exit_on_fail = exit_on_fail
self.input_text = input_text
self.CELL_ARRAY = [0] * cell_array_size
self.CELL_POINTER = 0
self.index = 0
self.line = 0
self.input_text_index = 0
# self.output = ""
def interpret(self) -> None | Errors:
for node in self.tree:
res = self.__interpret_node(node)
if isinstance(res, Errors):
return res
def __interpret_node(self, node) -> None | Errors:
if type(node) == list:
tmp_ptr = self.CELL_POINTER
while self.CELL_ARRAY[tmp_ptr]:
# print(self.CELL_ARRAY[tmp_ptr])
# print(node)
for n in node:
self.__interpret_node(n)
match node:
case TokenType.ADD:
self.CELL_ARRAY[self.CELL_POINTER] += 1
case TokenType.MINUS:
self.CELL_ARRAY[self.CELL_POINTER] -= 1
case TokenType.CELL_SHIFT_LEFT:
if self.CELL_POINTER <= 0:
if not self.silent:
print_error(
Errors.NEGATIVE_CELL_POINTER,
filepath=self.filepath,
index=(self.line, self.index),
)
if self.exit_on_fail:
exit(1)
else:
return Errors.NEGATIVE_CELL_POINTER
self.CELL_POINTER -= 1
case TokenType.CELL_SHIFT_RIGHT:
if self.CELL_POINTER >= len(self.CELL_ARRAY) - 1:
if not self.silent:
print_error(
Errors.OVERFLOW_CELL_POINTER,
filepath=self.filepath,
index=(self.line, self.index),
)
if self.exit_on_fail:
exit(1)
else:
return Errors.OVERFLOW_CELL_POINTER
self.CELL_POINTER += 1
case TokenType.PRINT_CHAR:
if self.CELL_ARRAY[self.CELL_POINTER] < 0:
if not self.silent:
print_error(
Errors.PRINT_NEGATIVE_CELL_VALUE,
filepath=self.filepath,
index=(self.line, self.index),
)
if self.exit_on_fail:
exit(1)
else:
return Errors.PRINT_NEGATIVE_CELL_VALUE
if not self.silent:
stdout.write(chr(self.CELL_ARRAY[self.CELL_POINTER]))
case TokenType.PRINT_NUM:
if not self.silent:
stdout.write(str(self.CELL_ARRAY[self.CELL_POINTER]))
case TokenType.PRINT_NEWLINE:
if not self.silent:
stdout.write("\n")
case TokenType.ALPHA_RESET:
self.CELL_ARRAY[self.CELL_POINTER] = ord("a")
case TokenType.UPPER_ALPHA_RESET:
self.CELL_ARRAY[self.CELL_POINTER] = ord("A")
case TokenType.HARD_RESET:
self.CELL_ARRAY[self.CELL_POINTER] = 0
case TokenType.VALUE_GET:
if not self.input_text:
self.CELL_ARRAY[self.CELL_POINTER] = 0
else:
self.CELL_ARRAY[self.CELL_POINTER] = ord(
self.input_text[self.input_text_index]
)
if self.input_text_index < len(self.input_text):
self.input_text_index += 1
case TokenType.NEWLINE:
self.line += 1
self.index = -1
case TokenType.WHITESPACE:
pass # handled below
self.index += 1
| StarcoderdataPython |
118675 | # from fit import FitCLI, fit, fit_sat
# from . import fit # works weirdly
from .fit import FitCLI, fit, fit_sat
__all__ = ["FitCLI", "fit", "fit_sat"]
| StarcoderdataPython |
3283701 | <gh_stars>10-100
# Copyright 2021 The MLX Contributors
#
# SPDX-License-Identifier: Apache-2.0
# coding: utf-8
from __future__ import absolute_import
from datetime import date, datetime # noqa: F401
from typing import List, Dict # noqa: F401
from swagger_server.models.base_model_ import Model
from swagger_server.models.api_dataset import ApiDataset # noqa: F401,E501
from swagger_server import util
class ApiListDatasetsResponse(Model):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, datasets: List[ApiDataset]=None, total_size: int=None, next_page_token: str=None): # noqa: E501
"""ApiListDatasetsResponse - a model defined in Swagger
:param datasets: The datasets of this ApiListDatasetsResponse. # noqa: E501
:type datasets: List[ApiDataset]
:param total_size: The total_size of this ApiListDatasetsResponse. # noqa: E501
:type total_size: int
:param next_page_token: The next_page_token of this ApiListDatasetsResponse. # noqa: E501
:type next_page_token: str
"""
self.swagger_types = {
'datasets': List[ApiDataset],
'total_size': int,
'next_page_token': str
}
self.attribute_map = {
'datasets': 'datasets',
'total_size': 'total_size',
'next_page_token': 'next_page_token'
}
self._datasets = datasets
self._total_size = total_size
self._next_page_token = next_page_token
@classmethod
def from_dict(cls, dikt) -> 'ApiListDatasetsResponse':
"""Returns the dict as a model
:param dikt: A dict.
:type: dict
:return: The apiListDatasetsResponse of this ApiListDatasetsResponse. # noqa: E501
:rtype: ApiListDatasetsResponse
"""
return util.deserialize_model(dikt, cls)
@property
def datasets(self) -> List[ApiDataset]:
"""Gets the datasets of this ApiListDatasetsResponse.
:return: The datasets of this ApiListDatasetsResponse.
:rtype: List[ApiDataset]
"""
return self._datasets
@datasets.setter
def datasets(self, datasets: List[ApiDataset]):
"""Sets the datasets of this ApiListDatasetsResponse.
:param datasets: The datasets of this ApiListDatasetsResponse.
:type datasets: List[ApiDataset]
"""
self._datasets = datasets
@property
def total_size(self) -> int:
"""Gets the total_size of this ApiListDatasetsResponse.
:return: The total_size of this ApiListDatasetsResponse.
:rtype: int
"""
return self._total_size
@total_size.setter
def total_size(self, total_size: int):
"""Sets the total_size of this ApiListDatasetsResponse.
:param total_size: The total_size of this ApiListDatasetsResponse.
:type total_size: int
"""
self._total_size = total_size
@property
def next_page_token(self) -> str:
"""Gets the next_page_token of this ApiListDatasetsResponse.
:return: The next_page_token of this ApiListDatasetsResponse.
:rtype: str
"""
return self._next_page_token
@next_page_token.setter
def next_page_token(self, next_page_token: str):
"""Sets the next_page_token of this ApiListDatasetsResponse.
:param next_page_token: The next_page_token of this ApiListDatasetsResponse.
:type next_page_token: str
"""
self._next_page_token = next_page_token
| StarcoderdataPython |
151055 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import logging
import os
import subprocess
import time
import requests
import hcl
import distutils.spawn
from unittest import SkipTest
from tests.utils import get_config_file_path, load_config_file, create_client
logger = logging.getLogger(__name__)
class ServerManager(object):
"""Runs vault process running with test configuration and associates a hvac Client instance with this process."""
def __init__(self, config_paths, client, use_consul=False):
"""Set up class attributes for managing a vault server process.
:param config_paths: Full path to the Vault config to use when launching `vault server`.
:type config_paths: list[str]
:param client: Hvac Client that is used to initialize the vault server process.
:type client: hvac.v1.Client
"""
self.config_paths = config_paths
self.client = client
self.use_consul = use_consul
self.keys = None
self.root_token = None
self._processes = []
def start(self):
"""Launch the vault server process and wait until its online and ready."""
if self.use_consul:
self.start_consul()
if distutils.spawn.find_executable('vault') is None:
raise SkipTest('Vault executable not found')
# If a vault server is already running then we won't be able to start another one.
# If we can't start our vault server then we don't know what we're testing against.
try:
self.client.sys.is_initialized()
except Exception:
pass
else:
raise Exception('Vault server already running')
cluster_ready = False
for config_path in self.config_paths:
command = ['vault', 'server', '-config=' + config_path]
logger.debug('Starting vault server with command: {cmd}'.format(cmd=command))
process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
self._processes.append(process)
logger.debug('Spawned vault server with PID {pid}'.format(pid=process.pid))
attempts_left = 20
last_exception = None
while attempts_left > 0 and not cluster_ready:
try:
logger.debug('Checking if vault is ready...')
self.client.sys.is_initialized()
cluster_ready = True
break
except Exception as ex:
if process.poll() is not None:
raise Exception('Vault server terminated before becoming ready')
logger.debug('Waiting for Vault to start')
time.sleep(0.5)
attempts_left -= 1
last_exception = ex
if not cluster_ready:
if process.poll() is None:
process.kill()
stdout, stderr = process.communicate()
raise Exception(
'Unable to start Vault in background:\n{err}\n{stdout}\n{stderr}'.format(
err=last_exception,
stdout=stdout,
stderr=stderr,
)
)
def start_consul(self):
if distutils.spawn.find_executable('consul') is None:
raise SkipTest('Consul executable not found')
try:
requests.get('http://127.0.0.1:8500/v1/catalog/nodes')
except Exception:
pass
else:
raise Exception('Consul service already running')
command = ['consul', 'agent', '-dev']
logger.debug('Starting consul service with command: {cmd}'.format(cmd=command))
process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
self._processes.append(process)
attempts_left = 20
last_exception = None
while attempts_left > 0:
try:
catalog_nodes_response = requests.get('http://127.0.0.1:8500/v1/catalog/nodes')
nodes_list = catalog_nodes_response.json()
logger.debug('JSON response from request to consul/v1/catalog/noses: {resp}'.format(resp=nodes_list))
node_name = nodes_list[0]['Node']
logger.debug('Current consul node name: {name}'.format(name=node_name))
node_health_response = requests.get('http://127.0.0.1:8500/v1/health/node/{name}'.format(name=node_name))
node_health = node_health_response.json()
logger.debug('Node health response: {resp}'.format(resp=node_health))
assert node_health[0]['Status'] == 'passing', 'Node {name} status != "passing"'.format(name=node_name)
return True
except Exception as error:
if process.poll() is not None:
raise Exception('Consul service terminated before becoming ready')
logger.debug('Unable to connect to consul while waiting for process to start: {err}'.format(err=error))
time.sleep(0.5)
attempts_left -= 1
last_exception = error
raise Exception('Unable to start consul in background: {0}'.format(last_exception))
def stop(self):
"""Stop the vault server process being managed by this class."""
for process_num, process in enumerate(self._processes):
logger.debug('Terminating vault server with PID {pid}'.format(pid=process.pid))
if process.poll() is None:
process.kill()
if os.getenv('HVAC_OUTPUT_VAULT_STDERR', False):
stdout_lines, stderr_lines = process.communicate()
stderr_filename = 'vault{num}_stderr.log'.format(num=process_num)
with open(get_config_file_path(stderr_filename), 'w') as f:
logger.debug(stderr_lines.decode())
f.writelines(stderr_lines.decode())
stdout_filename = 'vault{num}_stdout.log'.format(num=process_num)
with open(get_config_file_path(stdout_filename), 'w') as f:
logger.debug(stdout_lines.decode())
f.writelines(stdout_lines.decode())
def initialize(self):
"""Perform initialization of the vault server process and record the provided unseal keys and root token."""
assert not self.client.sys.is_initialized()
result = self.client.sys.initialize()
self.root_token = result['root_token']
self.keys = result['keys']
def restart_vault_cluster(self, perform_init=True):
self.stop()
self.start()
if perform_init:
self.initialize()
def get_active_vault_addresses(self):
vault_addresses = []
for config_path in self.config_paths:
config_hcl = load_config_file(config_path)
config = hcl.loads(config_hcl)
try:
vault_address = 'https://{addr}'.format(addr=config['listener']['tcp']['address'])
except KeyError as error:
logger.debug('Unable to find explict Vault address in config file {path}: {err}'.format(
path=config_path,
err=error,
))
vault_address = 'https://127.0.0.1:8200'
logger.debug('Using default address: {addr}'.format(addr=vault_address))
vault_addresses.append(vault_address)
return vault_addresses
def unseal(self):
"""Unseal the vault server process."""
vault_addresses = self.get_active_vault_addresses()
for vault_address in vault_addresses:
create_client(url=vault_address).sys.submit_unseal_keys(self.keys)
| StarcoderdataPython |
1610119 | <gh_stars>1-10
# Copyright 2019-2021 ETH Zurich and the DaCe authors. All rights reserved.
from collections import defaultdict
import copy
from typing import Dict, List, Tuple
import networkx as nx
import warnings
import sympy
from dace.transformation import transformation as xf
from dace import (data, dtypes, nodes, properties, registry, memlet as mm, subsets, symbolic, symbol, Memlet)
from dace.sdfg import SDFG, SDFGState, utils as sdutil, graph as gr
from dace.libraries.standard import Gearbox
def get_post_state(sdfg: SDFG, state: SDFGState):
"""
Returns the post state (the state that copies the data a back from the FGPA device) if there is one.
"""
for s in sdfg.all_sdfgs_recursive():
for post_state in s.states():
if 'post_' + str(state) == str(post_state):
return post_state
return None
def is_int(i):
return isinstance(i, int) or isinstance(i, sympy.core.numbers.Integer)
def _collect_map_ranges(state: SDFGState,
memlet_path: List[gr.MultiConnectorEdge[mm.Memlet]]) -> List[Tuple[str, subsets.Range]]:
"""
Collects a list of parameters and ranges for every map (entry or exit)
in the given memlet path.
"""
ranges: List[Tuple[str, subsets.Range]] = []
# Outgoing (write) memlet path
if any(isinstance(e.src, nodes.MapExit) for e in memlet_path):
for e in reversed(memlet_path):
if isinstance(e.src, nodes.MapExit):
entry = state.entry_node(e.src)
ranges.extend([(p, r) for p, r in zip(entry.params, entry.range)])
else: # Incoming (read) memlet path
for e in memlet_path:
if isinstance(e.dst, nodes.MapEntry):
ranges.extend([(p, r) for p, r in zip(e.dst.params, e.dst.range)])
return ranges
def _canonicalize_memlet(memlet: mm.Memlet, mapranges: List[Tuple[str, subsets.Range]]) -> Tuple[symbolic.SymbolicType]:
"""
Turn a memlet subset expression (of a single element) into an expression
that does not depend on the map symbol names.
"""
repldict = {symbolic.symbol(p): symbolic.symbol('__dace%d' % i) for i, (p, _) in enumerate(mapranges)}
return tuple(rb.subs(repldict) for rb, _, _ in memlet.subset.ndrange())
def _do_memlets_correspond(memlet_a: mm.Memlet, memlet_b: mm.Memlet, mapranges_a: List[Tuple[str, subsets.Range]],
mapranges_b: List[Tuple[str, subsets.Range]]) -> bool:
"""
Returns True if the two memlets correspond to each other, disregarding
symbols from equivalent maps.
"""
for s1, s2 in zip(memlet_a.subset, memlet_b.subset):
# Check for matching but disregard parameter names
s1b = s1[0].subs(
{symbolic.symbol(k1): symbolic.symbol(k2)
for (k1, _), (k2, _) in zip(mapranges_a, mapranges_b)})
s2b = s2[0]
# Since there is one element in both subsets, we can check only
# the beginning
if s1b != s2b:
return False
return True
def _streamify_recursive(node: nodes.NestedSDFG, to_replace: str, desc: data.Stream):
""" Helper function that changes an array in a nested SDFG to a stream. """
nsdfg: SDFG = node.sdfg
newdesc = copy.deepcopy(desc)
newdesc.transient = False
nsdfg.arrays[to_replace] = newdesc
# Replace memlets in path with stream access
for state in nsdfg.nodes():
for dnode in state.data_nodes():
if dnode.data != to_replace:
continue
for edge in state.all_edges(dnode):
mpath = state.memlet_path(edge)
for e in mpath:
e.data = mm.Memlet(data=to_replace, subset='0', other_subset=e.data.other_subset)
if isinstance(e.src, nodes.NestedSDFG):
e.data.dynamic = True
_streamify_recursive(e.src, e.src_conn, newdesc)
if isinstance(e.dst, nodes.NestedSDFG):
e.data.dynamic = True
_streamify_recursive(e.dst, e.dst_conn, newdesc)
@properties.make_properties
class StreamingMemory(xf.SingleStateTransformation):
"""
Converts a read or a write to streaming memory access, where data is
read/written to/from a stream in a separate connected component than the
computation.
If 'use_memory_buffering' is True, the transformation reads/writes data from memory
using a wider data format (e.g. 512 bits), and then convert it
on the fly to the right data type used by the computation:
"""
access = xf.PatternNode(nodes.AccessNode)
entry = xf.PatternNode(nodes.EntryNode)
exit = xf.PatternNode(nodes.ExitNode)
buffer_size = properties.Property(dtype=int, default=1, desc='Set buffer size for the newly-created stream')
storage = properties.EnumProperty(dtype=dtypes.StorageType,
desc='Set storage type for the newly-created stream',
default=dtypes.StorageType.Default)
use_memory_buffering = properties.Property(dtype=bool,
default=False,
desc='Set if memory buffering should be used.')
memory_buffering_target_bytes = properties.Property(
dtype=int, default=64, desc='Set bytes read/written from memory if memory buffering is enabled.')
@classmethod
def expressions(cls) -> List[gr.SubgraphView]:
return [
sdutil.node_path_graph(cls.access, cls.entry),
sdutil.node_path_graph(cls.exit, cls.access),
]
def can_be_applied(self, graph: SDFGState, expr_index: int, sdfg: SDFG, permissive: bool = False) -> bool:
access = self.access
# Make sure the access node is only accessed once (read or write),
# and not at the same time
if graph.out_degree(access) > 0 and graph.in_degree(access) > 0:
return False
# If already a stream, skip
if isinstance(sdfg.arrays[access.data], data.Stream):
return False
# If does not exist on off-chip memory, skip
if sdfg.arrays[access.data].storage not in [
dtypes.StorageType.CPU_Heap, dtypes.StorageType.CPU_Pinned, dtypes.StorageType.GPU_Global,
dtypes.StorageType.FPGA_Global
]:
return False
# Only free nodes are allowed (search up the SDFG tree)
curstate = graph
node = access
while curstate is not None:
if curstate.entry_node(node) is not None:
return False
if curstate.parent.parent_nsdfg_node is None:
break
node = curstate.parent.parent_nsdfg_node
curstate = curstate.parent.parent
# Only one memlet path is allowed per outgoing/incoming edge
edges = (graph.out_edges(access) if expr_index == 0 else graph.in_edges(access))
for edge in edges:
mpath = graph.memlet_path(edge)
if len(mpath) != len(list(graph.memlet_tree(edge))):
return False
# The innermost end of the path must have a clearly defined memory
# access pattern
innermost_edge = mpath[-1] if expr_index == 0 else mpath[0]
if (innermost_edge.data.subset.num_elements() != 1 or innermost_edge.data.dynamic
or innermost_edge.data.volume != 1):
return False
# Check if any of the maps has a dynamic range
# These cases can potentially work but some nodes (and perhaps
# tasklets) need to be replicated, which are difficult to track.
for pe in mpath:
node = pe.dst if expr_index == 0 else graph.entry_node(pe.src)
if isinstance(node, nodes.MapEntry) and sdutil.has_dynamic_map_inputs(graph, node):
return False
# If already applied on this memlet and this is the I/O component, skip
if expr_index == 0:
other_node = self.entry
else:
other_node = self.exit
other_node = graph.entry_node(other_node)
if other_node.label.startswith('__s'):
return False
## Check Memory Buffering Properties
if self.use_memory_buffering:
access = self.access
desc = sdfg.arrays[access.data]
# Array has to be global array
if desc.storage != dtypes.StorageType.FPGA_Global:
return False
# Type has to divide target bytes
if self.memory_buffering_target_bytes % desc.dtype.bytes != 0:
return False
# Target bytes has to be >= size of data type
if self.memory_buffering_target_bytes < desc.dtype.bytes:
return False
strides = list(desc.strides)
# Last stride has to be one
if strides[-1] != 1:
return False
vector_size = int(self.memory_buffering_target_bytes / desc.dtype.bytes)
strides.pop() # Remove last element since we already checked it
# Other strides have to be divisible by vector size
for stride in strides:
if is_int(stride) and stride % vector_size != 0:
return False
# Check if map has the right access pattern
# Stride 1 access by innermost loop, innermost loop counter has to be divisible by vector size
# Same code as in apply
state = sdfg.node(self.state_id)
dnode: nodes.AccessNode = self.access
if self.expr_index == 0:
edges = state.out_edges(dnode)
else:
edges = state.in_edges(dnode)
mapping: Dict[Tuple[subsets.Range], List[gr.MultiConnectorEdge[mm.Memlet]]] = defaultdict(list)
ranges = {}
for edge in edges:
mpath = state.memlet_path(edge)
ranges[edge] = _collect_map_ranges(state, mpath)
mapping[tuple(r[1] for r in ranges[edge])].append(edge)
for edges_with_same_range in mapping.values():
for edge in edges_with_same_range:
# Get memlet path and innermost edge
mpath = state.memlet_path(edge)
innermost_edge = copy.deepcopy(mpath[-1] if self.expr_index == 0 else mpath[0])
edge_subset = [a_tuple[0] for a_tuple in list(innermost_edge.data.subset)]
if self.expr_index == 0:
map_subset = innermost_edge.src.map.params.copy()
ranges = list(innermost_edge.src.map.range)
else:
map_subset = innermost_edge.dst.map.params.copy()
ranges = list(innermost_edge.dst.map.range)
# Check is correct access pattern
# Correct ranges in map
if is_int(ranges[-1][1]) and (ranges[-1][1] + 1) % vector_size != 0:
return False
if ranges[-1][2] != 1:
return False
# Correct access in array
if isinstance(edge_subset[-1], symbol) and str(edge_subset[-1]) == map_subset[-1]:
pass
elif isinstance(edge_subset[-1], sympy.core.add.Add):
counter: int = 0
for arg in edge_subset[-1].args:
if isinstance(arg, symbol) and str(arg) == map_subset[-1]:
counter += 1
if counter != 1:
return False
else:
return False
return True
def apply(self, state: SDFGState, sdfg: SDFG) -> nodes.AccessNode:
dnode: nodes.AccessNode = self.access
if self.expr_index == 0:
edges = state.out_edges(dnode)
else:
edges = state.in_edges(dnode)
# To understand how many components we need to create, all map ranges
# throughout memlet paths must match exactly. We thus create a
# dictionary of unique ranges
mapping: Dict[Tuple[subsets.Range], List[gr.MultiConnectorEdge[mm.Memlet]]] = defaultdict(list)
ranges = {}
for edge in edges:
mpath = state.memlet_path(edge)
ranges[edge] = _collect_map_ranges(state, mpath)
mapping[tuple(r[1] for r in ranges[edge])].append(edge)
# Collect all edges with the same memory access pattern
components_to_create: Dict[Tuple[symbolic.SymbolicType],
List[gr.MultiConnectorEdge[mm.Memlet]]] = defaultdict(list)
for edges_with_same_range in mapping.values():
for edge in edges_with_same_range:
# Get memlet path and innermost edge
mpath = state.memlet_path(edge)
innermost_edge = copy.deepcopy(mpath[-1] if self.expr_index == 0 else mpath[0])
# Store memlets of the same access in the same component
expr = _canonicalize_memlet(innermost_edge.data, ranges[edge])
components_to_create[expr].append((innermost_edge, edge))
components = list(components_to_create.values())
# Split out components that have dependencies between them to avoid
# deadlocks
if self.expr_index == 0:
ccs_to_add = []
for i, component in enumerate(components):
edges_to_remove = set()
for cedge in component:
if any(nx.has_path(state.nx, o[1].dst, cedge[1].dst) for o in component if o is not cedge):
ccs_to_add.append([cedge])
edges_to_remove.add(cedge)
if edges_to_remove:
components[i] = [c for c in component if c not in edges_to_remove]
components.extend(ccs_to_add)
# End of split
desc = sdfg.arrays[dnode.data]
# Create new streams of shape 1
streams = {}
mpaths = {}
for edge in edges:
if self.use_memory_buffering:
arrname = str(self.access)
# Add gearbox
total_size = edge.data.volume
vector_size = int(self.memory_buffering_target_bytes / desc.dtype.bytes)
if not is_int(sdfg.arrays[dnode.data].shape[-1]):
warnings.warn(
"Using the MemoryBuffering transformation is potential unsafe since {sym} is not an integer. There should be no issue if {sym} % {vec} == 0"
.format(sym=sdfg.arrays[dnode.data].shape[-1], vec=vector_size))
for i in sdfg.arrays[dnode.data].strides:
if not is_int(i):
warnings.warn(
"Using the MemoryBuffering transformation is potential unsafe since {sym} is not an integer. There should be no issue if {sym} % {vec} == 0"
.format(sym=i, vec=vector_size))
if self.expr_index == 0: # Read
edges = state.out_edges(dnode)
gearbox_input_type = dtypes.vector(desc.dtype, vector_size)
gearbox_output_type = desc.dtype
gearbox_read_volume = total_size / vector_size
gearbox_write_volume = total_size
else: # Write
edges = state.in_edges(dnode)
gearbox_input_type = desc.dtype
gearbox_output_type = dtypes.vector(desc.dtype, vector_size)
gearbox_read_volume = total_size
gearbox_write_volume = total_size / vector_size
input_gearbox_name, input_gearbox_newdesc = sdfg.add_stream("gearbox_input",
gearbox_input_type,
buffer_size=self.buffer_size,
storage=self.storage,
transient=True,
find_new_name=True)
output_gearbox_name, output_gearbox_newdesc = sdfg.add_stream("gearbox_output",
gearbox_output_type,
buffer_size=self.buffer_size,
storage=self.storage,
transient=True,
find_new_name=True)
read_to_gearbox = state.add_read(input_gearbox_name)
write_from_gearbox = state.add_write(output_gearbox_name)
gearbox = Gearbox(total_size / vector_size)
state.add_node(gearbox)
state.add_memlet_path(read_to_gearbox,
gearbox,
dst_conn="from_memory",
memlet=Memlet(input_gearbox_name + "[0]", volume=gearbox_read_volume))
state.add_memlet_path(gearbox,
write_from_gearbox,
src_conn="to_kernel",
memlet=Memlet(output_gearbox_name + "[0]", volume=gearbox_write_volume))
if self.expr_index == 0:
streams[edge] = input_gearbox_name
name = output_gearbox_name
newdesc = output_gearbox_newdesc
else:
streams[edge] = output_gearbox_name
name = input_gearbox_name
newdesc = input_gearbox_newdesc
else:
name, newdesc = sdfg.add_stream(dnode.data,
desc.dtype,
buffer_size=self.buffer_size,
storage=self.storage,
transient=True,
find_new_name=True)
streams[edge] = name
# Add these such that we can easily use output_gearbox_name and input_gearbox_name without using if statements
output_gearbox_name = name
input_gearbox_name = name
mpath = state.memlet_path(edge)
mpaths[edge] = mpath
# Replace memlets in path with stream access
for e in mpath:
e.data = mm.Memlet(data=name, subset='0', other_subset=e.data.other_subset)
if isinstance(e.src, nodes.NestedSDFG):
e.data.dynamic = True
_streamify_recursive(e.src, e.src_conn, newdesc)
if isinstance(e.dst, nodes.NestedSDFG):
e.data.dynamic = True
_streamify_recursive(e.dst, e.dst_conn, newdesc)
# Replace access node and memlet tree with one access
if self.expr_index == 0:
replacement = state.add_read(output_gearbox_name)
state.remove_edge(edge)
state.add_edge(replacement, edge.src_conn, edge.dst, edge.dst_conn, edge.data)
else:
replacement = state.add_write(input_gearbox_name)
state.remove_edge(edge)
state.add_edge(edge.src, edge.src_conn, replacement, edge.dst_conn, edge.data)
if self.use_memory_buffering:
arrname = str(self.access)
vector_size = int(self.memory_buffering_target_bytes / desc.dtype.bytes)
# Vectorize access to global array.
dtype = sdfg.arrays[arrname].dtype
sdfg.arrays[arrname].dtype = dtypes.vector(dtype, vector_size)
new_shape = list(sdfg.arrays[arrname].shape)
contigidx = sdfg.arrays[arrname].strides.index(1)
new_shape[contigidx] /= vector_size
try:
new_shape[contigidx] = int(new_shape[contigidx])
except TypeError:
pass
sdfg.arrays[arrname].shape = new_shape
# Change strides
new_strides: List = list(sdfg.arrays[arrname].strides)
for i in range(len(new_strides)):
if i == len(new_strides) - 1: # Skip last dimension since it is always 1
continue
new_strides[i] = new_strides[i] / vector_size
sdfg.arrays[arrname].strides = new_strides
post_state = get_post_state(sdfg, state)
if post_state != None:
# Change subset in the post state such that the correct amount of memory is copied back from the device
for e in post_state.edges():
if e.data.data == self.access.data:
new_subset = list(e.data.subset)
i, j, k = new_subset[-1]
new_subset[-1] = (i, (j + 1) / vector_size - 1, k)
e.data = mm.Memlet(data=str(e.src), subset=subsets.Range(new_subset))
# Make read/write components
ionodes = []
for component in components:
# Pick the first edge as the edge to make the component from
innermost_edge, outermost_edge = component[0]
mpath = mpaths[outermost_edge]
mapname = streams[outermost_edge]
innermost_edge.data.other_subset = None
# Get edge data and streams
if self.expr_index == 0:
opname = 'read'
path = [e.dst for e in mpath[:-1]]
rmemlets = [(dnode, '__inp', innermost_edge.data)]
wmemlets = []
for i, (_, edge) in enumerate(component):
name = streams[edge]
ionode = state.add_write(name)
ionodes.append(ionode)
wmemlets.append((ionode, '__out%d' % i, mm.Memlet(data=name, subset='0')))
code = '\n'.join('__out%d = __inp' % i for i in range(len(component)))
else:
# More than one input stream might mean a data race, so we only
# address the first one in the tasklet code
if len(component) > 1:
warnings.warn(f'More than one input found for the same index for {dnode.data}')
opname = 'write'
path = [state.entry_node(e.src) for e in reversed(mpath[1:])]
wmemlets = [(dnode, '__out', innermost_edge.data)]
rmemlets = []
for i, (_, edge) in enumerate(component):
name = streams[edge]
ionode = state.add_read(name)
ionodes.append(ionode)
rmemlets.append((ionode, '__inp%d' % i, mm.Memlet(data=name, subset='0')))
code = '__out = __inp0'
# Create map structure for read/write component
maps = []
for entry in path:
map: nodes.Map = entry.map
ranges = [(p, (r[0], r[1], r[2])) for p, r in zip(map.params, map.range)]
# Change ranges of map
if self.use_memory_buffering:
# Find edges from/to map
edge_subset = [a_tuple[0] for a_tuple in list(innermost_edge.data.subset)]
# Change range of map
if isinstance(edge_subset[-1], symbol) and str(edge_subset[-1]) == map.params[-1]:
if not is_int(ranges[-1][1][1]):
warnings.warn(
"Using the MemoryBuffering transformation is potential unsafe since {sym} is not an integer. There should be no issue if {sym} % {vec} == 0"
.format(sym=ranges[-1][1][1].args[1], vec=vector_size))
ranges[-1] = (ranges[-1][0], (ranges[-1][1][0], (ranges[-1][1][1] + 1) / vector_size - 1,
ranges[-1][1][2]))
elif isinstance(edge_subset[-1], sympy.core.add.Add):
for arg in edge_subset[-1].args:
if isinstance(arg, symbol) and str(arg) == map.params[-1]:
if not is_int(ranges[-1][1][1]):
warnings.warn(
"Using the MemoryBuffering transformation is potential unsafe since {sym} is not an integer. There should be no issue if {sym} % {vec} == 0"
.format(sym=ranges[-1][1][1].args[1], vec=vector_size))
ranges[-1] = (ranges[-1][0],
(ranges[-1][1][0], (ranges[-1][1][1] + 1) / vector_size - 1,
ranges[-1][1][2]))
maps.append(state.add_map(f'__s{opname}_{mapname}', ranges, map.schedule))
tasklet = state.add_tasklet(
f'{opname}_{mapname}',
{m[1]
for m in rmemlets},
{m[1]
for m in wmemlets},
code,
)
for node, cname, memlet in rmemlets:
state.add_memlet_path(node, *(me for me, _ in maps), tasklet, dst_conn=cname, memlet=memlet)
for node, cname, memlet in wmemlets:
state.add_memlet_path(tasklet, *(mx for _, mx in reversed(maps)), node, src_conn=cname, memlet=memlet)
return ionodes
@properties.make_properties
class StreamingComposition(xf.SingleStateTransformation):
"""
Converts two connected computations (nodes, map scopes) into two separate
processing elements, with a stream connecting the results. Only applies
if the memory access patterns of the two computations match.
"""
first = xf.PatternNode(nodes.Node)
access = xf.PatternNode(nodes.AccessNode)
second = xf.PatternNode(nodes.Node)
buffer_size = properties.Property(dtype=int, default=1, desc='Set buffer size for the newly-created stream')
storage = properties.EnumProperty(dtype=dtypes.StorageType,
desc='Set storage type for the newly-created stream',
default=dtypes.StorageType.Default)
@classmethod
def expressions(cls) -> List[gr.SubgraphView]:
return [sdutil.node_path_graph(cls.first, cls.access, cls.second)]
def can_be_applied(self, graph: SDFGState, expr_index: int, sdfg: SDFG, permissive: bool = False) -> bool:
access = self.access
# Make sure the access node is only accessed once (read or write),
# and not at the same time
if graph.in_degree(access) > 1 or graph.out_degree(access) > 1:
return False
# If already a stream, skip
desc = sdfg.arrays[access.data]
if isinstance(desc, data.Stream):
return False
# If this check is in the code, almost all applications of StreamingComposition must be permissive
# if not permissive and desc.transient:
# return False
# Only free nodes are allowed (search up the SDFG tree)
curstate = graph
node = access
while curstate is not None:
if curstate.entry_node(node) is not None:
return False
if curstate.parent.parent_nsdfg_node is None:
break
node = curstate.parent.parent_nsdfg_node
curstate = curstate.parent.parent
# Array must not be used anywhere else in the state
if any(n is not access and n.data == access.data for n in graph.data_nodes()):
return False
# Only one memlet path on each direction is allowed
# TODO: Relax so that repeated application of
# transformation would yield additional streams
first_edge = graph.in_edges(access)[0]
second_edge = graph.out_edges(access)[0]
first_mpath = graph.memlet_path(first_edge)
second_mpath = graph.memlet_path(second_edge)
if len(first_mpath) != len(list(graph.memlet_tree(first_edge))):
return False
if len(second_mpath) != len(list(graph.memlet_tree(second_edge))):
return False
# The innermost ends of the paths must have a clearly defined memory
# access pattern and no WCR
first_iedge = first_mpath[0]
second_iedge = second_mpath[-1]
if first_iedge.data.subset.num_elements() != 1:
return False
if first_iedge.data.volume != 1:
return False
if first_iedge.data.wcr is not None:
return False
if second_iedge.data.subset.num_elements() != 1:
return False
if second_iedge.data.volume != 1:
return False
##################################################################
# The memory access pattern must be exactly the same
# Collect all maps and ranges
ranges_first = _collect_map_ranges(graph, first_mpath)
ranges_second = _collect_map_ranges(graph, second_mpath)
# Check map ranges
for (_, frng), (_, srng) in zip(ranges_first, ranges_second):
if frng != srng:
return False
# Check memlets for equivalence
if len(first_iedge.data.subset) != len(second_iedge.data.subset):
return False
if not _do_memlets_correspond(first_iedge.data, second_iedge.data, ranges_first, ranges_second):
return False
return True
def apply(self, state: SDFGState, sdfg: SDFG) -> nodes.AccessNode:
access: nodes.AccessNode = self.access
# Get memlet paths
first_edge = state.in_edges(access)[0]
second_edge = state.out_edges(access)[0]
first_mpath = state.memlet_path(first_edge)
second_mpath = state.memlet_path(second_edge)
# Create new stream of shape 1
desc = sdfg.arrays[access.data]
name, newdesc = sdfg.add_stream(access.data,
desc.dtype,
buffer_size=self.buffer_size,
storage=self.storage,
transient=True,
find_new_name=True)
# Remove transient array if possible
for ostate in sdfg.nodes():
if ostate is state:
continue
if any(n.data == access.data for n in ostate.data_nodes()):
break
else:
if desc.transient:
del sdfg.arrays[access.data]
# Replace memlets in path with stream access
for e in first_mpath:
e.data = mm.Memlet(data=name, subset='0')
if isinstance(e.src, nodes.NestedSDFG):
e.data.dynamic = True
_streamify_recursive(e.src, e.src_conn, newdesc)
if isinstance(e.dst, nodes.NestedSDFG):
e.data.dynamic = True
_streamify_recursive(e.dst, e.dst_conn, newdesc)
for e in second_mpath:
e.data = mm.Memlet(data=name, subset='0')
if isinstance(e.src, nodes.NestedSDFG):
e.data.dynamic = True
_streamify_recursive(e.src, e.src_conn, newdesc)
if isinstance(e.dst, nodes.NestedSDFG):
e.data.dynamic = True
_streamify_recursive(e.dst, e.dst_conn, newdesc)
# Replace array access node with two stream access nodes
wnode = state.add_write(name)
rnode = state.add_read(name)
state.remove_edge(first_edge)
state.add_edge(first_edge.src, first_edge.src_conn, wnode, first_edge.dst_conn, first_edge.data)
state.remove_edge(second_edge)
state.add_edge(rnode, second_edge.src_conn, second_edge.dst, second_edge.dst_conn, second_edge.data)
# Remove original access node
state.remove_node(access)
return wnode, rnode
| StarcoderdataPython |
191833 | <reponame>vindula/collective.ckeditor<filename>collective/ckeditor/tests/__init__.py
# Other packages may find this useful
from collective.ckeditor.tests.base import CKEditorTestCase
| StarcoderdataPython |
1730352 | # -*- coding: utf-8 -*- äöü vim: ts=8 sts=4 sw=4 si et tw=79
"""\
Tools for "brains"
"""
# Python compatibility:
from __future__ import absolute_import
from six import string_types as six_string_types
from six.moves import map
__author__ = "<NAME> <<EMAIL>>"
VERSION = (0,
6, # aufgeräumt
)
__version__ = '.'.join(map(str, VERSION))
# Standard library:
from collections import defaultdict
# visaplan:
from visaplan.tools.minifuncs import NoneOrString
__all__ = [
'make_collector',
]
def make_collector(use, all=None, any=None, separator=" ", empty=None):
"""
Gib eine Funktion zurück, die aus einem Brain-Objekt die angegebenen
Attribute extrahiert und als String (Normalfall)
oder als Liste (separator == None) zurückgibt.
Argumente:
use -- geordnete Sequenz der Attributnamen.
Jedes Element der Sequenz darf ein 2-Tupel sein;
in diesem Fall ist der erste Teil der Attributname, der zweite
eine Transformationsfunktion, der der Wert übergeben wird.
all -- wenn übergeben, müssen alle hier angegebenen Attribute einen
nicht-leeren Wert haben; andernfalls wird <empty> zurückgegeben
any -- komplementär zu <all>: wenn ein beliebiges der hier angegebenen
Attribute einen nicht-leeren Wert hat ...
separator -- Wenn None, wird ggf. eine Liste zurückgegeben; wenn das all-
oder any-Kriterium nicht erfüllt ist, eine leere Liste
empty -- der Wert, wenn das all- oder any-Kriterium nicht erfüllt ist.
Wird ignoriert, wenn <separator> None ist (was bedeutet, daß eine
Liste zurückgegeben wird; in diesem Fall kommt eine leere Liste
zurück).
Die Angaben für all bzw. any sollten Untermengen von use sein - ansonsten
wird hier nicht versprochen, daß sie korrekt verarbeitet werden!
Derzeit kann nur *entweder* all *oder* any angegeben werden;
für genau ein enthaltenes Element ist die Bedeutung allerdings identisch.
>>> from sys import path
>>> from os.path import dirname
>>> path.insert(0, dirname(__file__))
>>> from mock import MockBrain
>>> caesar=MockBrain(getAcademicTitle='Dr.', getFirstname='Hase',
... getLastname='Caesar')
>>> use=sorted(caesar.keys())
>>> f_any=make_collector(use=use)
>>> otto=MockBrain(getFirstname='Otto')
Die Funktion f_any gibt nun immer einen String zurück, wenn das übergebene
dict für mindestens eines dieser Attribute einen Wert hat.
>>> f_any(caesar)
'Dr. <NAME>'
>>> f_any(otto)
'Otto'
Um nur dann etwas auszugeben, wenn ein Nachname vorhanden ist
(ein einzelner Wert darf als String übergeben werden):
>>> f_all=make_collector(use=use, all='getLastname')
>>> f_all(caesar)
'Dr. <NAME>'
>>> f_all(otto)
Achtung:
Die Collectorfunktionen können (und sollten möglichst) schon beim Laden des
Moduls erzeugt werden; es sind nämlich noch nicht alle möglichen Varianten
implementiert ...
"""
# ----------------------- [ make_collector: Argumente prüfen ... [
def make_set(sos):
"""
sos -- string or sequence
Gib ein Set zurück.
Wenn ein String übergeben wurde, füge ihn als Ganzes hinzu:
>>> make_set('getFirstname')
set(['getFirstname'])
Sonstige Sequenzen und Sets werden normal verarbeitet:
>>> make_set(['getFirstname', 'getLastname'])
set(['getFirstname', 'getLastname'])
"""
if isinstance(sos, six_string_types):
return set([sos])
else:
return set(sos)
assert use
if all is None:
if any is None:
any = make_set(use)
else:
any = make_set(any)
else:
assert any is None, ('Es kann nur *entweder* all (%(all)r) *oder*'
' any (%(any)r) angegeben werden!'
% locals())
all = make_set(all)
if separator is None:
assert empty is None, 'empty wird nicht verwendet!'
has_tuples = 0
for key in use:
if isinstance(key, tuple):
has_tuples = 1
break
if has_tuples:
use2 = []
for key in use:
if isinstance(key, tuple):
use2.append(key)
else:
use2.append((key, None))
use = use2
# ----------------------- ] ... make_collector: Argumente prüfen ]
# --------------------------------- [ einfache Werte, String ... [
def collect_any(o):
"""
any angegeben, ggf. empty
"""
found = set()
res = []
allnames = dir(o)
for key in use:
if key in o:
val = getattr(o, key, None)
if val:
res.append(val)
found.add(key)
if res and found.intersection(any):
return separator.join(res)
else:
return empty
def collect_all(o):
"""
all angegeben, ggf. empty
"""
res = []
allnames = dir(o)
for key in use:
if key in o:
val = getattr(o, key, None)
if val:
res.append(val)
continue
if key in all:
return empty
if res:
return separator.join(res)
else:
return empty
# --------------------------------- ] ... einfache Werte, String ]
# ---------------------------- [ einzelne Funktionen, String ... [
def collect_any_funcs(o):
"""
any angegeben, ggf. empty
"""
found = set()
res = []
allnames = dir(o)
for (key, func) in use:
if key in o:
val = getattr(o, key, None)
if func is not None:
val = func(val)
if val:
res.append(val)
found.add(key)
if res and found.intersection(any):
return separator.join(res)
else:
return empty
def collect_all_funcs(o):
"""
all angegeben, ggf. empty
"""
res = []
allnames = dir(o)
for (key, func) in use:
if key in o:
val = getattr(o, key, None)
if func is not None:
val = func(val)
if val:
res.append(val)
continue
if key in all:
return empty
if res:
return separator.join(res)
else:
return empty
# ---------------------------- ] ... einzelne Funktionen, String ]
# ---------------------------------- [ einfache Werte, Liste ... [
def collect_any_list(o):
"""
any angegeben, ggf. []
"""
found = set()
res = []
allnames = dir(o)
for key in use:
if key in o:
val = getattr(o, key, None)
if val:
res.append(val)
found.add(key)
if res and found.intersection(any):
return separator.join(res)
else:
return []
def collect_all_list(o):
"""
all angegeben, ggf. []
"""
res = []
allnames = dir(o)
for key in use:
if key in o:
val = getattr(o, key, None)
if val:
res.append(val)
continue
if key in all:
return []
if res:
return separator.join(res)
else:
return []
# ---------------------------------- ] ... einfache Werte, Liste ]
if separator is not None:
if any:
if has_tuples:
return collect_any_funcs
else:
return collect_any
else:
assert all
if has_tuples:
return collect_all_funcs
else:
return collect_all
## -------------------------------------------- ... make_collector
if __name__ == '__main__':
# Standard library:
import doctest
doctest.testmod()
| StarcoderdataPython |
191290 | import os
import urllib.parse as up
from werkzeug.middleware.proxy_fix import ProxyFix
from flask import url_for, render_template
from flask_migrate import Migrate, MigrateCommand
from flask_script import Manager
from src import api, db, ma, create_app, Config, bp, bcrypt, jwt, admin, login_manager
# config = configs.get(config)
config = Config
extensions = [api, db, ma, admin, jwt, bcrypt, login_manager]
bps = [bp]
app = create_app(__name__, config, extensions=extensions, blueprints=bps)
app.wsgi_app = ProxyFix(app.wsgi_app, num_proxies=2)
manager = Manager(app)
migrate = Migrate(app, db)
manager.add_command('db', MigrateCommand)
@manager.shell
def _shell_context():
return dict(
app=app,
db=db,
ma=ma,
config=config
)
@manager.command
def list_routes():
output = []
for rule in app.url_map.iter_rules():
options = {}
for arg in rule.arguments:
options[arg] = "[{0}]".format(arg)
methods = ','.join(rule.methods)
url = url_for(rule.endpoint, **options)
line = up.unquote("{:50s} {:20s} {}".format(rule.endpoint, methods, url))
output.append(line)
for line in sorted(output):
print(line)
@manager.option('-A', '--application', dest='application', default='', required=True)
@manager.option('-n', '--name', dest='name')
@manager.option('-l', '--debug', dest='debug')
@manager.option('-f', '--logfile', dest='logfile')
@manager.option('-P', '--pool', dest='pool')
@manager.option('-Q', '--queue', dest='queue')
@manager.option('-c', '--concurrency', dest='concurrency', default=2)
def worker(application, concurrency, pool, debug, logfile, name, queue):
celery.start()
@app.route('/', methods=['GET'])
@app.route('/home', methods=['GET'])
def home():
return render_template('home.html')
if __name__ == "__main__":
manager.run()
| StarcoderdataPython |
3343269 | <filename>tests/test_sdk1.py
from client_sdk_python import Web3, HTTPProvider
from client_sdk_python.eth import PlatON
from hexbytes import HexBytes
# get blockNumber
w3 = Web3(HTTPProvider("http://localhost:6789"))
platon = PlatON(w3)
block_number = platon.blockNumber
print(block_number)
# get Balance
address = '0x493301712671Ada506ba6Ca7891F436D29185821'
balance = platon.getBalance(address)
print(balance)
# sendtransaction
to = '0xC1f330B214668beAc2E6418Dd651B09C759a4Bf5'
w3.personal.unlockAccount(address, "password", 60)
data = {
"from": address,
"to": to,
"value": 0x10909,
}
transaction_hex = HexBytes(platon.sendTransaction(data)).hex()
result = platon.waitForTransactionReceipt(transaction_hex)
print(result) | StarcoderdataPython |
1788140 | <gh_stars>0
import re
def increment_string(string):
num = re.findall('\d+', string)
plain_string = re.findall('\D+', string)
new_num = increment_and_diff(num)[0]
length_diff = increment_and_diff(num)[1]
new_num_string = str(new_num).rjust(length_diff + 1, '0')
if len(plain_string) > 0:
return plain_string[0] + new_num_string
else:
return new_num_string
def increment_and_diff(num):
if len(num) > 0:
new_num = int(num[0]) + 1
length_diff = len(num[0]) - len(str(new_num))
else:
new_num = 1
length_diff = 0
return [new_num, length_diff]
| StarcoderdataPython |
3255966 | <reponame>jfuruness/lib_secure_monitoring_service<filename>lib_secure_monitoring_service/__main__.py
from lib_bgp_simulator import Simulator, BGPAS, Graph
from lib_rovpp import ROVPPV1SimpleAS
from lib_secure_monitoring_service.engine_inputs import V4SubprefixHijack
from lib_secure_monitoring_service.rov_sms import ROVSMS, ROVSMSK1, ROVSMSK2, ROVSMSK3
from lib_secure_monitoring_service.v4_graph import V4Graph
def main():
Simulator().run(graphs=[V4Graph(percent_adoptions=[0,5,10,20,40,60,80,100],
adopt_as_classes=[ROVPPV1SimpleAS, ROVSMS, ROVSMSK1, ROVSMSK2, ROVSMSK3],
EngineInputCls=V4SubprefixHijack,
num_trials=20,
BaseASCls=BGPAS)]
)
if __name__ == "__main__":
main()
| StarcoderdataPython |
1798458 | <reponame>Asperger/PCRD-DiscordBot
from pickle import load, dump
from os.path import exists, dirname, join
from threading import Thread, RLock
from datetime import datetime
from utils.log import FileLogger
from utils.timer import get_settlement_time_object
from googleapiclient.discovery import build
from google_auth_oauthlib.flow import InstalledAppFlow
from google.auth.transport.requests import Request
_scopes = ['https://www.googleapis.com/auth/spreadsheets']
_creds_path = join(dirname(__file__), 'credentials.json')
_pickle_path = join(dirname(__file__), 'token.pickle')
_sheet_id_path = join(dirname(__file__), 'sheet.id')
_creds = None
# The file token.pickle stores the user's access and refresh tokens, and is
# created automatically when the authorization flow completes for the first
# time.
if exists(_pickle_path):
with open(_pickle_path, 'rb') as token:
_creds = load(token)
# If there are no (valid) credentials available, let the user log in.
if not _creds or not _creds.valid:
if _creds and _creds.expired and _creds.refresh_token:
_creds.refresh(Request())
else:
flow = InstalledAppFlow.from_client_secrets_file(_creds_path, _scopes)
_creds = flow.run_console()
# Save the credentials for the next run
with open(_pickle_path, 'wb') as token:
dump(_creds, token)
_service = build('sheets', 'v4', credentials=_creds)
_spreadsheet_id = ''
_start_date = datetime.now()
_player_list = {}
_undo = {}
_undo['undostack'] = []
_undo['redostack'] = []
_sheet_lock = RLock()
def get_sheets_id():
global _spreadsheet_id
return _spreadsheet_id
def read_sheet(range_name):
try:
sheets = _service.spreadsheets()
result = sheets.values().get(spreadsheetId=_spreadsheet_id, range=range_name).execute()
except Exception as e:
FileLogger.error(f'Fail to read sheet: ID={_spreadsheet_id}, range={range_name}\n'+ str(e))
return
return result.get('values', [])
def write_sheet(range_name, body, option='RAW'):
try:
sheets = _service.spreadsheets()
result = sheets.values().update(spreadsheetId=_spreadsheet_id, range=range_name, body=body, valueInputOption=option).execute()
except Exception as e:
FileLogger.error(f'Fail to write sheet: ID={_spreadsheet_id}, range={range_name}\n'+ str(e))
return
return result
def append_sheet(range_name, body, option='RAW'):
try:
sheets = _service.spreadsheets()
result = sheets.values().append(spreadsheetId=_spreadsheet_id, range=range_name, body=body, valueInputOption=option).execute()
except Exception as e:
FileLogger.error(f'Fail to append sheet: ID={_spreadsheet_id}, range={range_name}\n'+ str(e))
return
return result
def get_start_date():
global _start_date
values = read_sheet('隊員列表!A1:A1')
if not values:
FileLogger.error('No start date found.')
return None
else:
date_tokens = values[0][0].split('/')
settlement_time = get_settlement_time_object()
_start_date = datetime(year=int(date_tokens[0]), month=int(date_tokens[1]), day=int(date_tokens[2])).replace(tzinfo=settlement_time.tzinfo)
return _start_date
def get_player_list():
global _player_list
values = read_sheet('隊員列表!B2:C')
if not values:
FileLogger.error('No player list found.')
return None
else:
_player_list = {}
for row in values:
_player_list[int(row[1])] = row[0]
return _player_list
def switch_sheets(sheet_id):
global _spreadsheet_id
_spreadsheet_id = sheet_id
start_date = get_start_date()
player_list = get_player_list()
with open(_sheet_id_path, 'w') as f:
f.write(_spreadsheet_id)
return _spreadsheet_id, start_date, player_list
def fill_sheet(player_discord_id, description, play_number, boss_tag, damage, play_option, play_miss):
global _undo, _sheet_lock
if player_discord_id not in _player_list:
FileLogger.warn(f'Discord ID: {player_discord_id} not found in sheet')
return False
player_nickname = _player_list[player_discord_id]
today = get_settlement_time_object()
play_tag = f"{play_number}{'B' if play_option == '補' else 'A'}"
missing_tag = '閃' if play_miss > 0 else ''
body = {
'values': [
[
today.strftime("%Y/%m/%d %H:%M:%S"), player_nickname, play_tag, damage, boss_tag, missing_tag
]
]
}
play_day_offset = today - _start_date
range_name = f'Day {play_day_offset.days + 1}-Log!A2:F'
_sheet_lock.acquire()
result = append_sheet(range_name, body)
_sheet_lock.release()
checkResult = True
try:
updates = result.get('updates')
updated_range = updates.get('updatedRange')
_undo['undostack'].append([updated_range, body, description])
_undo['redostack'] = []
except Exception as e:
FileLogger.error(f'Fail to get result: {description}\n'+ str(e))
checkResult = False
return checkResult
def undo():
global _undo, _sheet_lock
op = _undo['undostack'][-1]
_undo['undostack'] = _undo['undostack'][0:-1]
(range_name, body, description) = op
empty_body = {
'values': [
[
'', '', '', '', '', ''
]
]
}
_sheet_lock.acquire()
result = write_sheet(range_name, empty_body)
_sheet_lock.release()
try:
updated_range = result.get('updatedRange')
except Exception as e:
FileLogger.error(f'Fail to get undo result: {description}\n'+ str(e))
if updated_range and range_name == updated_range:
_undo['redostack'].append([updated_range, body, description])
return description
else:
FileLogger.error(f'Inconsistent undo result: {description}')
return None
def redo():
global _undo, _sheet_lock
op = _undo['redostack'][-1]
_undo['redostack'] = _undo['redostack'][0:-1]
(range_name, body, description) = op
_sheet_lock.acquire()
result = write_sheet(range_name, body)
_sheet_lock.release()
try:
updated_range = result.get('updatedRange')
except Exception as e:
FileLogger.error(f'Fail to get redo result: {description}\n'+ str(e))
if updated_range and range_name == updated_range:
_undo['undostack'].append([updated_range, body, description])
return description
else:
FileLogger.error(f'Inconsistent redo result: {description}')
return None
# The file sheet.id stores the id of a specific google sheet, and is
# created automatically when the switching happens.
if exists(_sheet_id_path):
with open(_sheet_id_path, 'r') as f:
switch_sheets(f.read()) | StarcoderdataPython |
3307164 | # ex-072 - Números por Extenso
lista = ('Zero', 'Um', 'Dois', 'Três', 'Quatro', 'Cinco',
'Seis', 'Sete', 'Oito', 'Nove', 'Dez', 'Onze',
'Doze', 'Treze', 'Quatorze', 'Quinze', 'Dezesseis',
'Dezessete', 'Dezoito', 'Dezenove', 'Vinte')
while True:
while True:
num = int(input('Digite um número de 0 a 20: '))
if 0 <= num <= 20:
break
else:
print('Tente novamente!')
print(f'O número {num} escrito por extenso é {lista[num]}!')
while True:
sair = str(input('Deseja continuar [S/N]: ')).strip().upper()[0]
if sair not in 'SN':
print('INVALIDO! ', end='')
else:
break
if sair == 'N':
break
print('Programa Finalizado!!!')
| StarcoderdataPython |
1647770 | """
Faz a representação de um elemento na lista ligada.
Cada elemento (aqui chamado de nó) terá seu valor representado como inteiro e um apontamento para o próximo elemento da lista.
Keyword arguments:
value -- valor do elemento
"""
class Node:
def __init__(self, value):
self.value = value
self.next = None
def printNode(self):
print(self.value)
"""
Faz a representação da lista ligada.
"""
class LinkedList:
def __init__(self):
self.firstNode = None
"""
Insere o elemento no início da lista.
Cria um novo nó para inserção na lista
Faz o apontamento do atributo next da classe recém criada para o valor do primeiro nó da lista ligada
Altera o valor do primeiro nó da lista ligada com o valor do nó recém criado
Keyword arguments:
value -- valor do elemento a ser inserido na lista
"""
def insertBeginning(self, value):
node = Node(value)
node.next = self.firstNode
self.firstNode = node
"""
Faz a exibição do valor do nó corrente.
"""
def show(self):
if self.firstNode == None:
print('A lista está vazia')
return None
currentNode = self.firstNode
while currentNode != None:
currentNode.printNode()
currentNode = currentNode.next
"""
Faz a busca de um elemento lista.
Keyword arguments:
value -- valor do elemento a ser buscado na lista
"""
def search(self, value):
if self.firstNode == None:
print('A lista está vazia')
return None
currentNode = self.firstNode
while currentNode.value != value:
if currentNode.next == None:
return None
currentNode = currentNode.next
return currentNode
"""
Deleta um elemento do começo da lista.
Guarda o valor do primeiro nó em uma váriavel temporária (apenas para exibição do elemento excluído)
Altera o apontamento do primeiro nó da lista para o seu sucessor
"""
def deleteBeginning(self):
if self.firstNode == None:
print('A lista está vazia')
return None
temp = self.firstNode
self.firstNode = self.firstNode.next
return temp
"""
Deleta um elemento da lista de acordo com seu valor.
Para excluir um elemento de uma lista ligada, primeiro precisamos encontrar o elemento. Desta forma iremos percerror todos os elementos da lista até encontrarmos o elemento desejado
Keyword arguments:
value -- valor a ser excluído da lista
"""
def deleteAtPosition(self, value):
if self.firstNode == None:
print('A lista está vazia')
return None
currentNode = self.firstNode
previousNode = self.firstNode
while currentNode.value != value:
if currentNode.next == None:
return None
previousNode = currentNode
currentNode = currentNode.next
if currentNode == self.firstNode:
self.firstNode = self.firstNode.next
else:
previousNode.next = currentNode.next
return currentNode
# Testes
linkedList = LinkedList()
linkedList.insertBeginning(1)
linkedList.insertBeginning(2)
linkedList.insertBeginning(3)
linkedList.insertBeginning(4)
linkedList.insertBeginning(5)
linkedList.show()
# print("---------")
# pesquisa = linkedList.search(3)
# pesquisa.printNode()
print("---------")
linkedList.deleteBeginning()
linkedList.show()
linkedList.deleteAtPosition(3)
| StarcoderdataPython |
101888 | <reponame>sn0b4ll/Incident-Playbook<gh_stars>1-10
# -*- coding: utf-8 -*-
# Copyright 2017-2019 ControlScan, Inc.
#
# This file is part of Cyphon Engine.
#
# Cyphon Engine is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 3 of the License.
#
# Cyphon Engine is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Cyphon Engine. If not, see <http://www.gnu.org/licenses/>.
"""
Provides helper functions for gathering fixtures for Django TestCases.
"""
# standard library
import logging
import os.path
# third party
from django.conf import settings
FIXTURES_DIR = 'tests/fixtures/'
FILE_EXTENSION = '.json'
LOGGER = logging.getLogger(__name__)
# dictionary of fixture files and their direct dependencies
FIXTURE_DEPENDENCIES = {
'actions': ['destinations'],
'alerts': ['distilleries', 'watchdogs', 'users'],
'articles': [],
'bottles': [],
'categories': [],
'codebooks': ['companies'],
'comments': ['alerts', 'users'],
'companies': [],
'containers': ['bottles', 'labels'],
'contexts': ['distilleries'],
'couriers': ['passports', 'visas', 'actions', 'users'],
'datachutes': ['datamungers', 'datasieves', 'pipes'],
'datacondensers': ['bottles'],
'datamungers': ['datacondensers', 'distilleries'],
'datasieves': [],
'datataggers': ['containers', 'tags'],
'destinations': [],
'dispatches': ['alerts', 'stamps'],
'distilleries': ['categories', 'containers', 'companies',
'codebooks', 'tastes', 'warehouses'],
'filters': ['followees', 'locations', 'searchterms'],
'followees': ['reservoirs'],
'funnels': ['bottles', 'datacondensers', 'pipes'],
'gateways': ['reservoirs', 'pipes'],
'groups': [],
'invoices': ['stamps'],
'inspections': [],
'labels': ['inspections', 'procedures'],
'locations': ['users'],
'logchutes': ['logmungers', 'logsieves'],
'logcondensers': ['bottles'],
'logmungers': ['logcondensers', 'distilleries'],
'logsieves': [],
'mailchutes': ['mailmungers', 'mailsieves'],
'mailcondensers': ['bottles'],
'mailmungers': ['mailcondensers', 'distilleries'],
'mailsieves': [],
'monitors': ['distilleries', 'groups'],
'parameters': ['locations', 'users'],
'passports': ['users'],
'pipes': ['reservoirs'],
'plumbers': ['passports', 'visas', 'pipes', 'users'],
'procedures': [],
'records': ['plumbers'],
'reservoirs': [],
'samples': ['pipes'],
'searchterms': [],
'stamps': ['passports', 'pipes', 'actions', 'users'],
'streams': ['invoices'],
'tags': ['alerts', 'comments', 'articles'],
'tastes': ['containers'],
'timeframes': [],
'warehouses': [],
'watchdogs': ['categories', 'groups'],
'users': ['companies', 'groups'],
'visas': []
}
def get_dependencies(dependencies):
"""
Takes a list of fixture file names (without their extension) and
returns a list of file names for the fixtures and their dependencies.
(This list may contain duplicates.)
"""
fixture_list = []
for dependency in dependencies:
fixture_list.append(dependency)
child_dependencies = FIXTURE_DEPENDENCIES.get(dependency, [])
if child_dependencies != []:
new_list = get_dependencies(child_dependencies)
fixture_list = new_list + fixture_list
return fixture_list
def get_fixtures(dependencies):
"""
Takes a list of fixture file names (without their .json extension)
and returns a list of fixture files containing all those fixtures
and all their dependencies. This list can be assigned to the
fixtures property of a Django TestCase. raises an AssertionError
if any of the files are missing.
"""
assert isinstance(dependencies, list), 'Dependencies must be a list'
fixtures = []
for fixture in get_dependencies(dependencies):
file_name = FIXTURES_DIR + fixture + FILE_EXTENSION
if file_name not in fixtures:
exists = os.path.isfile(os.path.join(settings.BASE_DIR, file_name))
if not exists:
LOGGER.error('Fixture file %s is missing', file_name)
fixtures.append(file_name)
return fixtures
| StarcoderdataPython |
1747672 | <filename>rpiRobot/src/vision/domain/iCamera.py
from abc import ABC, abstractmethod
from vision.domain.image import Image
class ICamera(ABC):
@abstractmethod
def take_picture(self) -> Image:
pass
| StarcoderdataPython |
151501 | n=int(input("enter a number"))
m=int(input("enter a number"))
if n%m==0:
print(n,"is dividible by",m)
else:
print(n,"is not divisible by",m)
if n%2==0:
print(n,"is even")
else:
print(n,"is odd")
| StarcoderdataPython |
69031 | class HistoryStatement:
def __init__(self, hashPrev, hashUploaded, username, comment=""):
self.hashPrev = hashPrev
self.hashUploaded = hashUploaded
self.username = username
self.comment = comment
def to_bytes(self):
buf = bytearray()
buf.extend(self.hashPrev)
buf.extend(self.hashUploaded)
buf.extend(self.username.encode('ascii'))
for _ in range(0, 50 - len(self.username)):
buf.append(0)
buf.extend(self.comment.encode('ascii'))
return buf
def sign(self, key):
return key.sign(self.to_bytes())
pass
def from_bytes(buf):
hashPrev = bytearray(buf[:32])
hashUploaded = bytearray(buf[32:64])
usernameUntrimmed = buf[64:114]
username = ""
i = 0
while usernameUntrimmed[i] != 0:
username += chr(usernameUntrimmed[i])
i += 1
comment = bytes(buf[114:]).decode('ascii')
return HistoryStatement(hashPrev, hashUploaded, username, comment)
def __str__(self):
return "<HistoryStatement %s uploaded %s, previous was %s, comment: %s>" % (self.username, self.hashUploaded, self.hashPrev, self.comment) | StarcoderdataPython |
1627764 | """
With these settings, tests run faster.
"""
import logging.config
import os
import re
from django.conf import settings
DEBUG = False
from django.utils.log import DEFAULT_LOGGING
from .base import *
ALLOWED_HOSTS = env.list("ALLOWED_HOSTS")
# GENERAL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#debug
DEBUG = False
# https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
SECRET_KEY = env.str("DJANGO_SECRET_KEY")
# https://docs.djangoproject.com/en/dev/ref/settings/#test-runner
# TEST_RUNNER = "django.test.runner.DiscoverRunner"
#
# CACHES
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#caches
CACHES = {
"default": {
"BACKEND": "django.core.cache.backends.locmem.LocMemCache",
"LOCATION": "",
}
}
# TEMPLATES
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#templates
TEMPLATES[0]["OPTIONS"]["debug"] = True # noqa F405
DATABASES = {
"default": {
"ENGINE": "django.db.backends.postgresql_psycopg2",
"NAME": env.str("POSTGRES_DB"),
"USER": env.str("POSTGRES_USER"),
"PASSWORD": env.str("POST<PASSWORD>"),
"HOST": env.str("POSTGRES_HOST"),
"PORT": env.str("POSTGRES_PORT"),
}
}
INSTALLED_APPS.append("django_nose")
TEST_RUNNER = "django_nose.NoseTestSuiteRunner"
NOSE_ARGS = [
"--verbosity=3",
"--nologcapture",
"--with-spec",
"--spec-color",
]
# Disable Django's logging setup
LOGGING_CONFIG = None
LOG_LEVEL = env("LOG_LEVEL")
# Test Data
TEST_COMMODITY_CODE = "0101210000"
TEST_COMMODITY_CODE_SPLIT = list(
re.search("([0-9]{6})([0-9]{2})([0-9]{2})", TEST_COMMODITY_CODE).groups()
)
TEST_SUBHEADING_CODE = "0101210000"
TEST_HEADING_CODE = "0101000000"
TEST_HEADING_CODE_SHORT = "0101"
TEST_CHAPTER_CODE = "0100000000"
TEST_SECTION_ID = "1"
TEST_COUNTRY_CODE = "AU"
TEST_COUNTRY_NAME = "Australia"
TEST_COMMODITY_DESCRIPTION = "Pure-bred breeding animals"
TEST_HEADING_DESCRIPTION = "Live horses, asses, mules and hinnies"
TEST_SUBHEADING_DESCRIPTION = "Horses"
TEST_CHAPTER_DESCRIPTION = "Live animals"
TEST_SECTION_DESCRIPTION = "Live animals; animal products"
COMMODITY_DATA = APPS_DIR + "/commodities/tests/commodity_{0}.json".format(
TEST_COMMODITY_CODE
)
COMMODITY_STRUCTURE = APPS_DIR + "/commodities/tests/structure_{0}.json".format(
TEST_COMMODITY_CODE
)
SUBHEADING_STRUCTURE = (
APPS_DIR
+ "/hierarchy/tests/subheading_{0}_structure.json".format(TEST_SUBHEADING_CODE)
)
HEADING_STRUCTURE = APPS_DIR + "/hierarchy/tests/heading_{0}_structure.json".format(
TEST_HEADING_CODE
)
CHAPTER_STRUCTURE = APPS_DIR + "/hierarchy/tests/chapter_{0}_structure.json".format(
TEST_CHAPTER_CODE
)
SECTION_STRUCTURE = APPS_DIR + "/hierarchy/tests/section_{}_structure.json".format(
TEST_SECTION_ID
)
SECTIONJSON_DATA = APPS_DIR + "/trade_tariff_service/import_data/SectionJson.json"
CHAPTERJSON_DATA = APPS_DIR + "/trade_tariff_service/import_data/ChapterJson.json"
HEADINGJSON_DATA = APPS_DIR + "/trade_tariff_service/import_data/HeadingJson.json"
SUBHEADINGJSON_DATA = APPS_DIR + "/trade_tariff_service/import_data/SubHeadingJson.json"
COMMODITYHEADINGJSON_DATA = (
APPS_DIR + "/trade_tariff_service/import_data/CommodityHeadingJson.json"
)
IMPORTMEASUREJSON_DATA = (
APPS_DIR + "/trade_tariff_service/import_data/ImportMeasureJson.json"
)
TTS_DATA = APPS_DIR + "/commodities/tests/commodity_0101210000.json"
REQUEST_MOCK_TTS_URL = "https://www.trade-tariff.service.gov.uk/api/v1/"
REQUEST_MOCK_SECTION_URL = (
"https://www.trade-tariff.service.gov.uk/api/v2/sections/1/section_note"
)
ELASTICSEARCH_INDEX_NAMES = {
"search.documents.section": "test_sections",
"search.documents.chapter": "test_chapters",
"search.documents.heading": "test_headings",
"search.documents.subheading": "test_subheadings",
"search.documents.commodity": "test_commodities",
}
ES_URL = "http://es:9200"
ELASTICSEARCH_DSL = {"default": {"hosts": ES_URL}}
SITE_READ_ONLY = False
INSTALLED_APPS += [
"cms",
"deferred_changes.tests.apps.DeferredChangesTestsConfig",
"hierarchy.tests.apps.HierarchyTestsConfig",
]
def get_trade_tariff_config():
return {
"UK": {
"TREE": {"BASE_URL": "https://www.trade-tariff.service.gov.uk/api/v2/"},
"JSON_OBJ": {"BASE_URL": "https://www.trade-tariff.service.gov.uk/api/v1/"},
},
"EU": {
"TREE": {"BASE_URL": "https://www.trade-tariff.service.gov.uk/xi/api/v2/"},
"JSON_OBJ": {
"BASE_URL": "https://www.trade-tariff.service.gov.uk/xi/api/v1/"
},
},
}
TRADE_TARIFF_CONFIG = get_trade_tariff_config
TRACK_GA_EVENTS = False
ROO_S3_BUCKET_NAME = ""
ROO_S3_ACCESS_KEY_ID = ""
ROO_S3_SECRET_ACCESS_KEY = ""
| StarcoderdataPython |
1732649 | #Assignment 8.5
#file name = mbox-short.txt
fname = input("Enter file name: ")
if len(fname) < 1 : fname = "mbox-short.txt"
fh = open(fname)
count = 0
for line in fh:
line = line.rstrip()
if not line.startswith('From '): #To check if the line staty with 'From '
continue #Note that there is a space behind the From, otherwise the print resuly would duplicated
word = line.split()
count = count + 1
print(word[1]) #
print("There were", count, "lines in the file with From as the first word")
| StarcoderdataPython |
3207729 | <gh_stars>0
from multiprocessing import Queue, Lock, Process, current_process
import queue, time
'''
where we are adding tasks to the queue, then creating processes and starting them, then using join() to complete the processes. Finally we are printing the log from the second queue
'''
def doJob(task_to_complete_queue, task_done_queue):
while True:
try:
task = task_to_complete_queue.get_nowait()
except queue.Empty:
break
else:
print(task)
task_done_queue.put(task + " done by " + current_process().name)
time.sleep(.5)
return True
def main():
number_of_task = 20
number_of_proc = 5
process_l = []
task_to_complete_queue = Queue()
task_done_queue = Queue()
for task in range(1, number_of_task):
task_to_complete_queue.put("Task - " + str(task))
for procs in range(1, number_of_proc):
prx = Process(target=doJob, args=(task_to_complete_queue, task_done_queue))
process_l.append(prx)
prx.start()
for pross in process_l:
pross.join()
while not task_done_queue.empty():
print(task_done_queue.get())
if __name__ == '__main__':
main() | StarcoderdataPython |
158576 | #!/usr/bin/env python
# Usage:
# python parse_taxonomy_for_clustered_subset.py X Y Z
# where X is the fasta file to read target labels from, Y is the taxonomy mapping file starting with a superset of labels from X, and Z is the output taxonomy mapping file.
from cogent.parse.fasta import MinimalFastaParser
from sys import argv
target_ids = []
target_fasta = open(argv[1], "U")
taxonomy_mapping = open(argv[2], "U")
taxonomy_outf = open(argv[3], "w")
for label,seq in MinimalFastaParser(target_fasta):
target_ids.append(label.strip())
target_ids = set(target_ids)
for line in taxonomy_mapping:
curr_id = line.split()[0]
if curr_id in target_ids:
taxonomy_outf.write(line) | StarcoderdataPython |
3218322 | from pyfluminus.api import name, modules, get_announcements, current_term
from pyfluminus.structs import Module
from pyfluminus.fluminus import get_links_for_module
from app import db
from app.models import User, User_Mods
from app.extra_api import get_class_grps
from datetime import datetime
def get_active_mods(auth):
"""Gets all active mods taken by authenticated student.
:param auth: Authentication token issued from Luminus
:type auth: dict
:return: Mods and their details in a dictionary
e.g.: {CS2030 : {"name" : Module name,
"id" : Module id,
"term" : Term this module is taken in}}
:rtype: dict
"""
mods = modules(auth).data
mods_dict = {}
for mod in mods:
mods_dict[mod.code] = {"name" : mod.name, "id" : mod.id, "term" : mod.term}
return mods_dict
def get_all_announcement(auth):
"""Gets all announcements the current authenticated user has.
:param auth: Authentication token issued from Luminus
:type auth: dict
:return: Dictionary of all announcements the user has, grouped by modules
:rtype: dict
"""
mods = modules(auth).data
announcements_list = {}
for mod in mods:
announcements_list[mod.code] = get_announcements(auth, mod.id, False).data
return announcements_list
def get_current_term(auth):
"""Gets the current semester this student is in.
:param auth: Authentication token issued by Luminus
:type auth: dict
:return: Current term of the student
:rtype: dict
"""
return current_term(auth).data
def response_json(status, count, data):
"""Generates JSON for http responses
:param status: True if data is valid and there are no errors, False otherwise
:type valid: boolean
:param code: http response code
:type code: int
:param count: Total number of fields in data
:type count: int
:param data: json structure for the actual data
:type data: dict
:return: Dictionary comprising of all the params to be sent to client as JSON
:rtype: dict
"""
return {
"status" : status,
#"code" : code,
"count" : count,
"data" : data
}
def add_mods(auth, uId):
mods = get_active_mods(auth)
for key in mods:
mod_id = mods[key]["id"]
class_grp = get_class_grps(auth, mod_id)
if class_grp is not None:
print(class_grp)
m = User_Mods(code=key, mod_id=mod_id, name=mods[key]["name"], class_grp=class_grp, term=mods[key]["term"], sem=1, student=uId)
m.get_timings()
db.session.add(m)
db.session.commit()
def update_mods(auth, uId):
mods = get_active_mods(auth)
old_mods = User.query.get(uId).mods
for mod in old_mods:
db.session.delete(mod)
db.session.commit()
add_mods(auth, uId)
def get_mod_files(auth):
mods = modules(auth).data
files = []
for module in mods:
if module is None:
continue
data = get_links_for_module(auth, module)
files.append(data)
return files
def get_single_mod_files(auth, code):
mods = modules(auth).data
for mod in mods:
if mod is None:
continue
if mod.code == code:
return get_links_for_module(auth, mod)
return None
def get_single_mod_announcements(auth, mod_id):
msgs = get_announcements(auth, mod_id, False).data
for msg in msgs:
msg['datetime'] = msg['datetime'].strftime("%a, %d %b %Y, %H:%M:%S")
return msgs | StarcoderdataPython |
3338638 | <filename>esmvalcore/cmor/_fixes/obs4mips/airs_2_1.py
"""Fixes for obs4MIPs dataset AIRS-2-1."""
from iris.exceptions import CoordinateNotFoundError
from ..fix import Fix
class AllVars(Fix):
"""Common fixes to all vars."""
def fix_metadata(self, cubes):
"""
Fix metadata.
Change unit of coordinate plev from hPa to Pa.
Parameters
----------
cubes: iris.cube.CubeList
Input cubes.
Returns
-------
iris.cube.CubeList
Fixed cubes.
"""
for cube in cubes:
try:
plev = cube.coord('air_pressure')
except CoordinateNotFoundError:
continue
else:
if plev.points[0] > 10000.0:
plev.units = 'Pa'
return cubes
| StarcoderdataPython |
48657 | <reponame>nickswalker/counterpoint-reinforcement-learning
from typing import List, Set
import numpy as np
from rl.action import Action
from rl.state import State
from rl.valuefunction import FeatureExtractor
class PerActionLinearVFA:
def __init__(self, num_features, actions: List[Action], initial_value=0.0):
self.num_features = num_features
self.actions = actions
self.weights_per_action = dict()
self.weights = None
self.reset(initial_value)
def reset(self, value=0.0):
for action in self.actions:
self.weights_per_action[action] = np.zeros(self.num_features)
def actionvalue(self, features: np.ndarray, action: Action) -> float:
return np.dot(self.weightsfor(action), features)
def statevalue(self, features: List[float]):
raise Exception()
def bestactions(self, state: State, extractor: FeatureExtractor) -> Set[Action]:
best_actions = []
best_value = float("-inf")
for action in self.actions:
state_features = extractor.extract(state)
value = self.actionvalue(state_features, action)
if value > best_value:
best_value = value
best_actions = [action]
elif value == best_value:
best_actions.append(action)
return best_actions
def weightsfor(self, action: Action) -> np.ndarray:
return self.weights_per_action[action]
def updateweightsfor(self, weights: np.ndarray, action: Action):
self.weights_per_action[action] = weights
class LinearVFA:
def __init__(self, num_features, actions: List[Action], initial_value=0.0):
self.num_features = num_features
self.actions = actions
self.weights = np.zeros(num_features)
self.reset(initial_value)
def reset(self, value=0.0):
self.weights = np.zeros(self.num_features)
def value(self, features: np.ndarray) -> float:
return np.dot(self.weights, features)
def bestactions(self, state: State, extractor: FeatureExtractor) -> Set[Action]:
best_actions = []
best_value = float("-inf")
for action in self.actions:
phi = extractor.extract(state, action)
value = self.value(phi)
if value > best_value:
best_value = value
best_actions = [action]
elif value == best_value:
best_actions.append(action)
return best_actions
def updateweights(self, weights: np.ndarray):
assert len(weights) == len(self.weights)
self.weights = weights
| StarcoderdataPython |
3359728 | <reponame>brian41005/Python-Messenger-Wrapper
import json
import logging
import os
import re
import sys
import time
import unittest
from messenger import login, logout, send
class TestSendMessage(unittest.TestCase):
def setUp(self):
with open('tests/test_config.json') as f:
user_data = json.load(f)
(self.session,
self.c_user,
self.fb_dtsg) = login.get_session(user_data['user'],
user_data['passwd'])
group = user_data['thread'][0]
self.group_id = group[0]
self.group_recipient_id = group[1][0]
self.friend_id = user_data['thread'][1][0]
def tearDown(self):
logout(self.session, self.fb_dtsg)
def test_send_personal_msg(self):
body = 'Hello, sad world.[{}]'.format(time.time())
send.send_msg(self.session, self.c_user, self.fb_dtsg,
self.friend_id, body, group=False)
def test_send_group_msg(self):
body = 'Hello, sad world.[{}]'.format(time.time())
send.send_msg(self.session, self.c_user, self.fb_dtsg,
self.group_id, body, group=True)
def test_send_emoji(self):
body = '👍'
send.send_msg(self.session, self.c_user, self.fb_dtsg,
self.group_id, body, group=True)
| StarcoderdataPython |
139745 | import unittest
import os
import evacsim.node
import evacsim.edge
import evacsim.disaster
import evacsim.exporter
class TestExporter(unittest.TestCase):
"""Tests functionality in the exporter module. There isn't much to be tested here, so it simply tests
that a KML file with the proper name is created when the export_kml function is called."""
def test_export_kml(self):
"""Tests the export_kml function"""
nodes = {'Troy': evacsim.node.Node('Troy', 42.727453, -73.691764, 50000, 80000), 'Watervliet': evacsim.node.Node('Watervliet', 42.730389, -73.701504, 10000, 15000)}
edges = [evacsim.edge.Edge(nodes['Troy'], nodes['Watervliet'], 25, 0, 1000)]
disaster = evacsim.disaster.Disaster('Alfred')
routes = []
exp = evacsim.exporter.Exporter(nodes, edges, disaster, routes, 'test.kml')
exp.export_kml()
self.assertTrue(os.path.exists('test.kml'))
os.remove('test.kml')
| StarcoderdataPython |
3240866 | <filename>LeetCode/0004_Median_of_Two_Sorted_Array.py
class Solution:
def findMedianSortedArrays(self, nums1, nums2):
"""
:type nums1: List[int]
:type nums2: List[int]
:rtype: float
"""
i1 = 0
i2 = 0
num1 = 0
num2 = 0
len1 = len(nums1)
len2 = len(nums2)
len4 = len1 + len2
len3 = int(len4 / 2) + 1
for x in range(len3):
if len1 > i1 and len2 > i2:
if nums1[i1] <= nums2[i2]:
num1 = num2
num2 = nums1[i1]
i1 = i1 + 1
else:
num1 = num2
num2 = nums2[i2]
i2 = i2 + 1
elif len1 > i1:
num1 = num2
num2 = nums1[i1]
i1 = i1 + 1
else:
num1 = num2
num2 = nums2[i2]
i2 = i2 + 1
if len4 % 2 == 0:
return (num1 + num2) / 2
else:
return float(num2)
| StarcoderdataPython |
1772233 | """
Pull an image from a website and save it as a PNG file.
"""
from seleniumbase import BaseCase
class ImageTest(BaseCase):
def test_pull_image_from_website(self):
self.open("https://xkcd.com/1117/")
selector = "#comic"
file_name = "comicmap.png"
folder = "images_exported"
self.save_element_as_image_file(selector, file_name, folder)
print('"%s/%s" has been saved!' % (folder, file_name))
def test_baidu(self):
self.open("https://www.baidu.com")
selector = "#lg"
file_name = "baidu.png"
folder = "images_exported"
self.save_element_as_image_file(selector,file_name,folder)
def test_baidu1(self):
self.open("https://www.baidu.com")
selector = "[name='wd']"
file_name = "baidu1.png"
folder = "images_exported"
self.save_element_as_image_file(selector,file_name,folder)
| StarcoderdataPython |
4800547 | # coding: utf-8
# pylint: disable=no-member, protected-access, unused-import, no-name-in-module
"""Random Number interface of mxnet."""
from __future__ import absolute_import
import ctypes
from .base import _LIB, check_call
from ._ndarray_internal import _sample_uniform as uniform
from ._ndarray_internal import _sample_normal as normal
def seed(seed_state):
"""Seed the random number generators in mxnet.
This seed will affect behavior of functions in this module,
as well as results from executors that contains Random number
such as Dropout operators.
Parameters
----------
seed_state : int
The random number seed to set to all devices.
Notes
-----
The random number generator of mxnet is by default device specific.
This means if you set the same seed, the random number sequence
generated from GPU0 can be different from CPU.
"""
if not isinstance(seed_state, int):
raise ValueError('sd must be int')
seed_state = ctypes.c_int(int(seed_state))
check_call(_LIB.MXRandomSeed(seed_state))
| StarcoderdataPython |
1724737 | <filename>migrations/versions/7044d2465076_.py
"""empty message
Revision ID: 7044d2465076
Revises:
Create Date: 2018-11-14 01:10:33.897024
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '7044d<PASSWORD>'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('users',
sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),
sa.Column('email', sa.String(), nullable=False),
sa.Column('hashed_password', sa.Binary(), nullable=False),
sa.Column('authenticated', sa.Boolean(), nullable=True),
sa.Column('created_on', sa.DateTime(), nullable=True),
sa.Column('role', sa.String(), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('email')
)
op.create_table('feature_requests',
sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),
sa.Column('title', sa.String(), nullable=False),
sa.Column('description', sa.Text(), nullable=True),
sa.Column('client', sa.Enum('client_one', 'client_two', 'client_three', name='clients'), nullable=False),
sa.Column('client_priority', sa.Integer(), nullable=True),
sa.Column('assigned_priority', sa.Integer(), nullable=True),
sa.Column('product_area', sa.Enum('policies', 'billing', 'claims', 'reports', name='productareas'), nullable=False),
sa.Column('available_on', sa.DateTime(), nullable=True),
sa.Column('requested_by', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['requested_by'], ['users.id'], ),
sa.PrimaryKeyConstraint('id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('feature_requests')
op.drop_table('users')
# ### end Alembic commands ###
| StarcoderdataPython |
1727610 | """
.. module:: guiclient
:platform: Windows
:synopsis: Instructor GUI tkinter frame
.. moduleauthor:: <NAME>, <NAME>, <NAME>
"""
# Standard Library imports
import tkinter as tk
from tkinter import ttk
from tkinter import Button
from tkinter import Entry
from tkinter import Frame
from tkinter import Label
from tkinter import Radiobutton
from tkinter import StringVar
from tkinter import IntVar
from tkinter import Scale
from tkinter import BooleanVar
# Pip Dependency imports
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
from matplotlib.figure import Figure
import matplotlib.pyplot as plt
import matplotlib.animation as animation
# Project imports
from transvenous_pacing_gui.signals import Signals
from transvenous_pacing_gui.client import Client
class InstructorGUI(tk.Frame):
"""Instructor GUI frame to be used in the main GUI
This class contains multiple input widgets for the GUI,
as well as the Client class used to connect with the
socket server.
"""
# Settings
header_1_style = "TkDefaultFont 18 bold"
header_2_style = "TkDefaultFont 16 bold"
default_style = "TkDefaultFont 14"
def __init__(self, parent, *args, **kwargs):
"""Constructor
Args:
parent (tk.widget): parent widget to make the frame a child of
*args: Variable length argument list
**kwargs: Arbitrary keyword argument list
"""
ttk.Frame.__init__(self, parent, *args, **kwargs)
self.parent = parent
# Socket client
self.client = Client(port=25565)
# ============ GUI Variables ============
self.host = StringVar(self, value=self.client.get_hostname())
# Instructor realtime setting variables
self.hr = IntVar(self, value=80)
self.threshold = StringVar(self, value=20)
self.hr_paced = IntVar(self, value=80)
self.pathway_1 = IntVar(self, value=0)
self.pathway_2 = IntVar(self, value=0)
# Manual Override variables
self.position = IntVar(self, value=0)
self.is_pacing = BooleanVar(self, value=False)
self.is_pos_overriding = BooleanVar(self, value=False)
# ============ Main Frame Sides ===========
# Left side frame
frame_left = Frame(self, bd=1, relief=tk.SUNKEN)
frame_left.pack(side=tk.LEFT, padx=10, pady=10)
Label(frame_left, text="Override Settings", font=self.header_1_style).pack()
# Middle frame
frame_mid = Frame(self, bd=1, relief=tk.SUNKEN)
frame_mid.pack(side=tk.LEFT, padx=10, pady=10)
Label(frame_mid, text="Real-Time Settings", font=self.header_1_style).pack()
# Right side frame
frame_right = Frame(self, bd=1, relief=tk.SUNKEN)
frame_right.pack(side=tk.RIGHT, padx=10, pady=10)
Label(frame_right, text="Display Preview", font=self.header_1_style).pack()
# ============ Position Selection ===============
frame_position = Frame(frame_left)
frame_position.pack(pady=5)
# Sets the position variable to invoke a callback
self.position.trace('w', self.callback_manual_pos)
# Radiobutton options for the override
POSITIONS = [
("Superior Vena Cava", 1),
("High Right Atrium", 2),
("Mid Right Atrium", 3),
("Low Right Atrium", 4),
("Right Ventricle", 5),
("Right Ventricular Wall", 6),
("Asystole", 0),
]
Label(frame_position, text="Show Manual Position", font=self.default_style).pack()
# Creates each radiobutton item
for button_text, position_value in POSITIONS:
Radiobutton(frame_position, text=button_text, value=position_value, variable=self.position, font=self.default_style).pack()
# Creates a toggle button to start/stop the manual override
self.btn_pos_override = Button(frame_position, text="Start Override", command=self.toggle_pos_override, fg="green", font=self.default_style)
self.btn_pos_override.pack()
# ============ Command Sends =============
frame_command = Frame(frame_left)
frame_command.pack(pady=5)
Label(frame_command, text="Commands", font=self.default_style).pack()
# Button to make the microcontroller recalibrate its sensor values
btn_recalibrate = Button(frame_command, text="Calibrate Sensors", command=lambda: self.send_command('cal'), fg="green", font=self.default_style)
btn_recalibrate.pack(side=tk.LEFT)
# Button to make the student GUI restart the signal at asystole in case it gets stuck
btn_reset_signal = Button(frame_command, text="Reset Signal (Asystole)", command=lambda: self.send_command('ressig'), fg="green", font=self.default_style)
btn_reset_signal.pack(side=tk.LEFT)
# ============ Connection Space ===============
frame_connection = Frame(frame_mid)
frame_connection.pack(pady=5)
# Displays the host machine's IP address for use with the other GUI
ip_label = "Device IP: {}".format(self.client.get_ip())
Label(frame_connection, text=ip_label, font=self.default_style).pack()
Label(frame_connection, text="Hostname", font=self.default_style).pack(side=tk.LEFT)
# Area to enter the hostname to connect to
entry_hostname = Entry(frame_connection, textvariable=self.host, font=self.default_style)
entry_hostname.pack(side=tk.LEFT)
# Button to start the TCP/IP connection
btn_connect = Button(frame_connection, text="Connect", command=self.connect, fg="green", font=self.default_style)
btn_connect.pack(side=tk.LEFT)
# ============ Customisation Space ===============
frame_signal = Frame(frame_mid)
frame_signal.pack(pady=5)
# Intrinsic heart rate entry area
Label(frame_signal, text="Heart Rate", font=self.default_style).grid(row=0, column=0)
scale_hr = Scale(frame_signal, from_=0, to=100, length=150, variable=self.hr, orient=tk.HORIZONTAL)
scale_hr.grid(row=0, column=1)
entry_hr = Entry(frame_signal, textvariable=self.hr, font=self.default_style, width=4)
entry_hr.grid(row=0, column=2)
# Pacing threshold entry area
Label(frame_signal, text="Pacing Threshold", font=self.default_style).grid(row=1, column=0)
scale_threshold = Scale(frame_signal, from_=0, to=50, length=150, variable=self.threshold, orient=tk.HORIZONTAL)
scale_threshold.grid(row=1, column=1)
entry_threshold = Entry(frame_signal, textvariable=self.threshold, font=self.default_style, width=4)
entry_threshold.grid(row=1, column=2)
# Paced heart rate entry area
Label(frame_signal, text="Paced Heart Rate", font=self.default_style).grid(row=2, column=0)
scale_hr_paced = Scale(frame_signal, from_=0, to=100, length=150, variable=self.hr_paced, orient=tk.HORIZONTAL)
scale_hr_paced.grid(row=2, column=1)
entry_hr_paced = Entry(frame_signal, textvariable=self.hr_paced, font=self.default_style, width=4)
entry_hr_paced.grid(row=2, column=2)
# Buttons for this area
frame_signal_buttons = Frame(frame_signal)
frame_signal_buttons.grid(row=3, columnspan=3)
# Sends the updated settings (heart rate, pacing threshold, paced heart rate) to the student GUI
btn_send_customisations = Button(frame_signal_buttons, text="Update ECG", command=self.send_customisations, fg="green", font=self.default_style, pady=5)
btn_send_customisations.pack(side=tk.LEFT, fill=tk.X)
# Starts a simulated pacer signal
self.btn_pacing = Button(frame_signal_buttons, text="Start Pacing", command=self.toggle_pacing, fg="green", font=self.default_style, pady=5)
self.btn_pacing.pack(side=tk.RIGHT, fill=tk.X)
# ========== Pathway Selection ==============
frame_pathway = Frame(frame_mid)
frame_pathway.pack(pady=5)
# Sets both pathway variables to invoke a callback
self.pathway_1.trace('w', self.callback_pathway_1)
self.pathway_2.trace('w', self.callback_pathway_2)
# Alternate pathway options
PATHWAYS_1 = [
("Low Right Atrium", 0),
("Inferior Vena Cava", 10)
]
PATHWAYS_2 = [
("Right Ventricular Wall", 0),
("Pulmonary Artery", 10)
]
Label(frame_pathway, text="Pathway Selection 1", font=self.header_2_style).pack(pady=5)
# Create each radiobutton for the first pathway option
for button_text, pathway_value in PATHWAYS_1:
Radiobutton(frame_pathway, text=button_text, value=pathway_value, variable=self.pathway_1, font=self.default_style).pack()
Label(frame_pathway, text="Pathway Selection 2", font=self.header_2_style).pack(pady=5)
# Create each radiobutton for the second pathway option
for button_text, pathway_value in PATHWAYS_2:
Radiobutton(frame_pathway, text=button_text, value=pathway_value, variable=self.pathway_2, font=self.default_style).pack()
# ======== Display Preview =========
# Instantiated signals class to generate signals
self.ecg_signals = Signals()
self.new_x = [0.0]
self.new_y = [0.0]
self.last_x = 0
self.last_x_lim = 0
self.position_to_show = 0
self.variation = 0
self.flat_span = False
self.end_flat = 0
self.flat_span_y = 0
self.plot_point = 0
# Creates plotting canvas
self.fig = plt.Figure(figsize=(10, 4.5), dpi=100,facecolor='k',edgecolor='k')
canvas = FigureCanvasTkAgg(self.fig, master=frame_right)
canvas.get_tk_widget().pack()
# Sets plot customisations
self.ax = self.fig.add_subplot(111)
self.ax.set_xlim(self.last_x_lim, 4)
self.ax.set_ylim(-3.0, 3.0)
self.ax.set_yticklabels([])
self.ax.set_xticklabels([])
self.ax.xaxis.set_tick_params(width=1, top=True)
self.ax.set_facecolor('black')
self.line, = self.ax.plot(0, 0)
self.ax.get_lines()[0].set_color("xkcd:lime")
# Starts an animated plot for the ECG signal
self.ani = animation.FuncAnimation(self.fig, self.animate, interval=24, blit=True)
def animate(self, i):
"""Animation function that is called periodically
Args:
i (int): the current frame value (not used)
Returns:
line (matplotlib.line): The line to plot with the next value
"""
# If currently overriding the student GUI, show the display preview
if self.is_pos_overriding.get():
# Set the position index value based on which source is responsible for the signal
position_index = self.position.get()
# Set initial heart rate to use
hr_to_use = self.hr.get()
# Adjust position and heart rate based on alternative pathways and pacer setting
if position_index == 4:
position_index = position_index + self.pathway_1.get()
elif position_index == 6:
position_index = position_index + self.pathway_2.get()
# Show the paced signal if pacer override is active
if not position_index == 16 and self.is_pacing.get():
position_index = 26
hr_to_use = self.hr_paced.get()
else:
# If no overrides or special settings, just keep the position the same
position_index = position_index
# Get the ECG signal values for the corresponding settings
[x, y] = self.ecg_signals.get_signal(self.ecg_signals.signal_index[position_index], hr_to_use)
# If not currently traveling between beats
if not self.flat_span:
# Set a variable to the potential next value
x_val = self.last_x + x[self.plot_point]
# If the potential new value is not going backwards
if x_val > self.new_x[-1]:
# Add the new x and y values to the axis lists
self.new_x.append(x_val)
self.new_y.append(y[self.plot_point])
# Update the line
self.line.set_data(self.new_x, self.new_y) # update the data
# If at the end of the beat
if self.plot_point== 29:
# Update where the last x value to build from is
self.last_x = self.new_x[-1]
# Start plotting for a flat area
self.end_flat = (x[-1] - x[-2]) + self.new_x[-1]
self.flat_span_y = y[-1]
self.flat_span = True
# Go back to the start of the heart beat if at the end of the beat
if self.plot_point == 29:
self.plot_point = 0
# Go to the next beat value otherwise
else:
self.plot_point = self.plot_point + 1
# If current traveling between beats
else:
# Add the new x and y values to the axis lists
self.new_x.append(self.new_x[-1] + 0.05)
self.new_y.append(self.flat_span_y)
# Update the line
self.line.set_data(self.new_x, self.new_y) # update the data
# If reached the end of the flat line area between beats
if self.new_x[-1] >= self.end_flat:
# Stop plotting flat
self.flat_span = False
self.last_x = self.new_x[-1]
# If at the end of the plotting window
if self.new_x[-1] >= self.last_x_lim + 5:
# Shift the plotting window (this is used instead of a reset to allow for future ECG output options)
self.last_x_lim += 5
self.ax.set_xlim(self.last_x_lim, self.last_x_lim + 5)
# Returns the new line to the plot
return self.line,
def connect(self):
"""Connects the instructor GUI to the student GUI"""
# Update the hostname
self.client.set_hostname(self.host.get())
# Actually connect
self.client.start()
def send_command(self, message):
"""Sends a message from the instructor GUI to the student GUI
Args:
message (str): Message to send through the socket connection
"""
self.client.send_data(message)
def send_customisations(self):
"""Sends updated customisation data from instructor GUI to the student GUI"""
# Command code
self.client.send_data("update")
# Data
self.client.send_data("{},{},{}".format(self.hr.get(), self.threshold.get(), self.hr_paced.get()))
def toggle_pos_override(self):
"""Sends position override data from the instructor to the student GUI"""
# Toggle if we are overriding
self.is_pos_overriding.set(not self.is_pos_overriding.get())
# If we are now overriding
if self.is_pos_overriding.get():
# Start command code
self.client.send_data("start-pos")
# Data
self.client.send_data("%d" % self.position.get())
# Switch the toggle button
self.btn_pos_override.config(fg="red", text="Stop Override")
self.ani.event_source.start()
# If we are now not overriding
else:
# Stop command code
self.client.send_data("stop-pos")
# Switch the toggle button
self.btn_pos_override.config(fg="green", text="Start Override")
# Stop the preview plotting
self.ani.event_source.stop()
def toggle_pacing(self):
"""Toggles whether to be pacing"""
# Toggles if we are pacing
self.is_pacing.set(not self.is_pacing.get())
# If we are now pacing
if self.is_pacing.get():
# Start pacing command code
self.client.send_data("start-pace")
# Send customisation network update
self.send_customisations()
# Switch the toggle button
self.btn_pacing.config(fg="red", text="Stop Pacing")
else:
# Stop pacing command code
self.client.send_data("stop-pace")
# Switch the toggle button
self.btn_pacing.config(fg="green", text="Start Pacing")
def callback_pathway_1(self, *args):
"""Callback function for when the pathway is switched.
This will automatically send an updated pathway.
Args:
*args: Variable length argument list
"""
# Command code
self.client.send_data("chpa1")
# Data
self.client.send_data("%d" % self.pathway_1.get())
def callback_pathway_2(self, *args):
"""Callback function for when the pathway is switched.
This will automatically send an updated pathway.
Args:
*args: Variable length argument list
"""
# Command code
self.client.send_data("chpa2")
# Data
self.client.send_data("%d" % self.pathway_2.get())
def callback_manual_pos(self, *args):
"""Callback function for when the pathway is switched.
This will automatically send an updated pathway.
Args:
*args: Variable length argument list
"""
# If we are actively overriding
if self.is_pos_overriding.get():
# Command code
self.client.send_data("manual-pos")
# Data
self.client.send_data("%d" % self.position.get())
def stop_gui(self):
"""Stops the instructor GUI client"""
self.client.stop()
| StarcoderdataPython |
1711984 | #!/usr/bin/python
"""
This is the code to accompany the Lesson 2 (SVM) mini-project.
Use a SVM to identify emails from the Enron corpus by their authors:
Sara has label 0
Chris has label 1
"""
import sys
from time import time
sys.path.append("../tools/")
from email_preprocess import preprocess
from sklearn import svm
### features_train and features_test are the features for the training
### and testing datasets, respectively
### labels_train and labels_test are the corresponding item labels
features_train, features_test, labels_train, labels_test = preprocess()
# defining constants
GAMMA = 1000
# can be linear, poly, rbf(default), sigmoid, precomputed, a callable
KERNEL = 'rbf'
C = 100.0
#########################################################
### your code goes here ###
clf = svm.SVC(kernel=KERNEL,gamma=GAMMA,C=C)
features_train = features_train[:len(features_train)/100]
labels_train = labels_train[:len(labels_train)/100]
t0 = time()
clf.fit(features_train, labels_train)
print("training done in %0.3fs" % (time() - t0))
t0 = time()
print(clf.score(features_test,labels_test))
print("prediction done in %0.3fs" % (time() - t0))
#########################################################
| StarcoderdataPython |
1713379 | <reponame>fossabot/hoard<gh_stars>10-100
import os
from tempfile import TemporaryDirectory
from .hoard_tester import HoardTester
class MissingConfigDirTester(HoardTester):
def run_test(self):
self.reset()
old_config_path = self.config_file_path()
with TemporaryDirectory() as tmpdir:
os.environ.setdefault("XDG_CONFIG_HOME", tmpdir)
self.args = ["--config-file", old_config_path]
result = self.run_hoard("backup", allow_failure=True, capture_output=True)
os.environ.pop("XDG_CONFIG_HOME")
assert b"error while saving uuid to file" not in result.stderr and b"No such file or directory" not in result.stderr
assert b"error while saving uuid to file" not in result.stdout and b"No such file or directory" not in result.stdout
| StarcoderdataPython |
3201107 | from __future__ import print_function
import argparse
import os.path
import models.examples as ex
from config import cfg
from generic_op import *
from midap_simulator import *
from midap_software import Compiler, MidapModel
def parse():
parser = argparse.ArgumentParser()
parser.add_argument('-i', '--input_shape', nargs='+', type=int, required=True)
parser.add_argument('-oc', '--out_chan', type=int, required=True)
parser.add_argument('-k', '--kern_info', nargs='+', type=int, required=True)
parser.add_argument('-l', '--layer_compiler', type=str, choices=['MIN_DRAM_ACCESS', 'HIDE_DRAM_LATENCY'], default='HIDE_DRAM_LATENCY')
parser.add_argument('-ib', '--init_banks', type=int, default=0)
parser.add_argument('-b', '--bus_policy', type=str, choices=['WMEM_FIRST', 'FIFO'], default='WMEM_FIRST')
parser.add_argument('-o', '--output_dir', type=str, default=None)
parser.add_argument('-da', '--disable_abstract_layer', action="store_true", default=False)
parser.add_argument('-f', '--fmem_entries', type=int, default=256)
parser.add_argument('-nb', '--num_banks', type=int, default=4)
parser.add_argument('--latency', type=int, default=100)
parser.add_argument('--bandwidth', type=int, default=32)
return parser.parse_args()
class TestWrapper(object):
def __init__(self):
self.cv = GenericConvertor()
self.midap_model = MidapModel()
self.cm = Compiler()
self.midap_simulator = MidapManager()
self.step_checker = [0, 0, 0]
def setup_from_builder(self, builder):
odict = builder.get_operator_dict()
self.cv.operator_dict = odict
self.cv.post_process()
self.midap_model.from_generic_op_dict(odict)
self.step_checker[0] = 1
if self.step_checker[1] > 0:
del self.cm
self.cm = Compiler()
self.step_checker[1] = 0
def compile(self, num_init_banks):
if self.step_checker[0] == 0:
print("Please setup the model first")
return
self.cm.force_setup(num_init_banks)
static_info = self.cm.compile(self.midap_model)
self.step_checker[1] = 1
if self.step_checker[2] > 0:
del self.midap_simulator
self.midap_simulator = MidapManager()
self.step_checker[2] = 0
return static_info
def simulate(self):
if self.step_checker[0] == 0:
print("Please setup the model first")
return
elif self.step_checker[1] == 0:
print("Please run compile")
return
input_tensor_list, path_info = self.cm.control_info
init_layer_list = self.midap_model.init_layer
_ = self.midap_simulator.process_network_with_multiple_input(input_tensor_list, init_layer_list, path_info)
self.step_checker[2] = 1
return path_info
def run_all(self, model, output_dir=None, output_option=(True, False, False, False)):
self.__init__()
self.setup_from_builder(model)
model = model.name
self.logger.info("[ {} ]".format(model))
_ = self.compile()
sim_instruction, stat = self.simulate()
diff, latency, feature_dram, weight_dram = stat
# print("check stat(Checking info) of network {}: {}".format(model ,stat), file=sys.stderr)
if diff > 0:
self.logger.error(
"Network Result Diff > 0: Functional Problem may occur, network {}".format(model))
self.midap_simulator.stats.print_result(sim_instruction.processing_order, model)
args = parse()
cfg.MIDAP.CONTROL_STRATEGY.LAYER_COMPILER = args.layer_compiler
cfg.MIDAP.BUS_POLICY = args.bus_policy
cfg.MODEL.ALLOW_ABSTRACT_DATA = not args.disable_abstract_layer
cfg.MODEL.REDUCTION_LOGIC = True
# Configuration
cfg.MIDAP.SYSTEM_WIDTH = 64
# cfg.MIDAP.FMEM.SIZE = 256 * 1024
cfg.MIDAP.FMEM.NUM_ENTRIES = args.fmem_entries * 1024
cfg.MIDAP.FMEM.NUM = args.num_banks
cfg.SYSTEM.BANDWIDTH = args.bandwidth # GB ( * 10^9 byte) / s
cfg.LATENCY.DRAM_READ = args.latency
output_dir = args.output_dir
tr = TestWrapper()
mb = ex.one_layer_example(args.input_shape, args.out_chan, args.kern_info)
tr.run_all("custom", mb, args.init_banks, output_dir=output_dir)
| StarcoderdataPython |
3331198 | from math import log10
class QueryDictionary:
"""
A class used to represent a dictionary of queries.
...
Attributes
----------
queries : {int : {str : int}}
A dictionary of queries, where the key is the number of the
query and the values are dictionaries containing the words
of the query and its incidences.
total_queries : int
The total numbers of queries.
Methods
-------
createIfNotExists(word: str, query_num: int)
Inserts the query and the words in the dicctionary if they
doesn't exists.
"""
def __init__(self):
self.queries = {}
self.total_queries = 0
def createIfNotExists(self, word: str, query_num: int):
"""
Checks if the word exist in the query and if the query exists,
if not exists is created, else the incidence increments in 1.
Parameters
----------
word : str
The word to be inserted.
query_num : int
The number of the query to check.
"""
query_already_exists = query_num in self.queries
if not query_already_exists:
self.queries[query_num] = {}
self.total_queries += 1
word_already_exists = word in self.queries[query_num]
if word_already_exists:
self.queries[query_num][word] += 1
else:
self.queries[query_num][word] = 1
def printDictionary(self):
for id_query, query in self.queries.items():
print(f"{id_query} -- {query}")
class Word:
"""
A class used to represent a word.
...
Attributes
----------
docs : {int : int}
A dictionary of documents, where the key is the number of the
document and the value is the incidences of the word in that
document.
idf : float
lambda value of the index document frecuency per term
correspondig to log(N/df), where N is the total number of
documents and df is the total number of documents where the
term appears, it has one argument, N.
length_docs : int
The total number of documents where the term appears.
Methods
-------
insertDoc(doc_num: int)
Inserts the document in the dicctionary if the document doesn't
exists.
"""
def __init__(self):
self.docs = {}
self.idf = lambda N : int(log10(N/self.length_docs) * 1000)/1000
self.length_docs = 0
def insertDoc(self, doc_num: int):
"""
Checks if the document exist in the dictionary, if not exists
is created, else the incidence increments in 1.
Parameters
----------
doc_num : int
The number of the document to check.
"""
already_exists = doc_num in self.docs
if already_exists:
self.docs[doc_num] += 1
else:
self.docs[doc_num] = 1
self.length_docs += 1
class WordsDictionary:
"""
A class used to represent a dictionary of words.
...
Attributes
----------
words : {str : Word}
A dictionary of words, where the key is a term and the values
are objects of the class Word.
id_documents : {int}
Set of documents id's.
total_documents : int
The total numbers of queries.
total_words : int
The total numbers of words.
Methods
-------
createIfNotExists(word: str, doc_num: int)
Inserts the word in the dicctionary if the word doesn't
exists and inserts the document where it appears.
"""
def __init__(self):
self.words = {}
self.id_documents = set()
self.total_documents = 0
self.total_words = 0
def createIfNotExists(self, word: str, doc_num: int):
"""
Checks if the word exist in the dictionary, if not exists
is created and the document is inserted.
Parameters
----------
doc_num : int
The number of the document to check.
"""
already_exists = word in self.words
if not already_exists:
self.words[word] = Word()
self.total_words += 1
self.words[word].insertDoc(doc_num)
self.id_documents.add(doc_num)
def printDictionary(self):
for word, class_word in self.words.items():
print(f"{word}--{class_word.docs}")
class RelevancesDictionary:
'''
A class used to represent the Presition and Recall of queries.
Presition is defined like:
|{Relevant documents} ∩ {Retrieved documents}| / |{Retrieved Documents}|
Recall is defined like:
|{Relevant documents} ∩ {Retrieved documents}| / |{Relevant Documents}|
'''
'''
{int: {int}}
'''
def __init__(self):
self.qrels = {}
def insertDoc(self, query_num: int, relDoc: int):
already_exists = query_num in self.qrels
if not already_exists:
self.qrels[query_num] = set()
self.qrels[query_num].add(relDoc)
def printDictionary(self):
print("Relevances dictionary: ")
for id_query, docs in self.qrels.items():
print(f"{id_query} --> {docs}")
def printDictionaryRange(self, lowerLimit: int, upperLimit: int):
print("Relevances dictionary: ")
relevants = {}
for i in range (lowerLimit, upperLimit):
doc = self.qrels[i]
relevants[i] = doc
#print(f"{i} --> {doc}")
print(relevants)
return relevants
| StarcoderdataPython |
1787073 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import torch
import numpy as np
from PIL import Image
import json
import argparse
import model_factory
from torch.autograd import Variable
def main():
image_path, checkpoint, top_k, category_names, gpu = get_input_args()
with open(category_names, 'r') as f:
cat_to_name = json.load(f)
model = model_factory.load_trained_model(checkpoint)
idx_to_class = {i:k for k, i in model.class_to_idx.items()}
probs, classes = predict(image_path, model, idx_to_class, top_k, gpu)
for prob, classe in zip(probs, classes):
print(cat_to_name[classe], prob)
def get_input_args():
parser = argparse.ArgumentParser(description='description')
parser.add_argument('image_path',
metavar='image_path',
type=str,
help='Image path')
parser.add_argument('checkpoint',
metavar='checkpoint',
default='./cli_checkpoint.pth',
type=str,
help='checkpoint')
parser.add_argument('--top_k',
type=int,
default=5,
help='Top k most likely classes')
parser.add_argument('--category_names',
type=str,
default='./cat_to_name.json',
help='Mapping of categories to real names')
parser.add_argument('--gpu',
action='store_true',
help='Use gpu')
args = parser.parse_args()
if args.gpu == True and torch.cuda.is_available() == False:
args.gpu = False
print("GPU is not avaliable")
if args.gpu == False and torch.cuda.is_available() == True:
print("GPU avaliable. You should use it")
return args.image_path, args.checkpoint, args.top_k, args.category_names, args.gpu
def predict(image_path, model, idx_to_class, topk, cuda):
''' Predict the class (or classes) of an image using a trained deep learning model.
'''
image = Image.open(image_path)
image = process_image(image)
image = torch.FloatTensor([image])
if cuda:
model.cuda()
image = image.cuda()
model.eval()
output = model.forward(Variable(image))
# top predictions
if cuda:
all_probs = torch.exp(output).data.cpu().numpy()[0]
else:
all_probs = torch.exp(output).data.numpy()[0]
topk_index = np.argsort(all_probs)[-topk:][::-1]
topk_class = [idx_to_class[x] for x in topk_index]
topk_prob = all_probs[topk_index]
return topk_prob, topk_class
def process_image(image):
''' Scales, crops, and normalizes a PIL image for a PyTorch model,
returns an Numpy array
'''
# resize shortest side to 256
width, height = image.size
if height > width:
new_width = 256
new_height = int(np.floor(256 * height / width))
elif height < width:
new_width = int(np.floor(256 * height / width))
new_height = 256
image = image.resize((new_width, new_height))
# crop out the center 224x224
left = (new_width - 224)/2
top = (new_height - 224)/2
right = (new_width + 224)/2
bottom = (new_height + 224)/2
image = image.crop((left, top, right, bottom))
# normalize
image = np.array(image)
image = image/255
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
image = (image - mean) / std
# reorder layers
image = image.transpose(2, 0, 1)
return image
# Call to main function to run the program
if __name__ == "__main__":
main()
| StarcoderdataPython |
1667396 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
-------------------------------------------------
@ Author : pengj
@ date : 2019/10/17 11:14
@ IDE : PyCharm
@ GitHub : https://github.com/JackyPJB
@ Contact : <EMAIL>
-------------------------------------------------
Description : 1003. 检查替换后的词是否有效 显示英文描述
用户通过次数245
用户尝试次数273
通过次数249
提交次数500
题目难度Medium
给定有效字符串 "abc"。
对于任何有效的字符串 V,我们可以将 V 分成两个部分 X 和 Y,使得 X + Y(X 与 Y 连接)等于 V。(X 或 Y 可以为空。)那么,X + "abc" + Y 也同样是有效的。
例如,如果 S = "abc",则有效字符串的示例是:"abc","aabcbc","abcabc","abcabcababcc"。无效字符串的示例是:"abccba","ab","cababc","bac"。
如果给定字符串 S 有效,则返回 true;否则,返回 false。
示例 1:
输入:"aabcbc"
输出:true
解释:
从有效字符串 "abc" 开始。
然后我们可以在 "a" 和 "bc" 之间插入另一个 "abc",产生 "a" + "abc" + "bc",即 "aabcbc"。
示例 2:
输入:"abcabcababcc"
输出:true
解释:
"abcabcabc" 是有效的,它可以视作在原串后连续插入 "abc"。
然后我们可以在最后一个字母之前插入 "abc",产生 "abcabcab" + "abc" + "c",即 "abcabcababcc"。
示例 3:
输入:"abccba"
输出:false
示例 4:
输入:"cababc"
输出:false
提示:
1 <= S.length <= 20000
S[i] 为 'a'、'b'、或 'c'
-------------------------------------------------
"""
import time
import re
from typing import List
__author__ = 'Max_Pengjb'
start_time = time.time()
# 下面写上代码块
class Solution:
def isValid(self, S: str) -> bool:
length = len(S)
if length % 3 != 0:
return False
ss = S
tmp = re.split("abc", ss)
while len(tmp) > 1:
ss = "".join(tmp)
tmp = re.split("abc", ss)
return "".join(tmp) == ""
s = "aaabcbcbc"
res = Solution().isValid(s)
print(res)
# 上面中间写上代码块
end_time = time.time()
print('Running time: %s Seconds' % (end_time - start_time))
| StarcoderdataPython |
3342844 | #-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2017, Anaconda, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
''' Provide tools for executing Selenium tests.
'''
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
log = logging.getLogger(__name__)
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
# External imports
from selenium.common.exceptions import TimeoutException
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.support.ui import WebDriverWait
# Bokeh imports
from bokeh.models import Button
from bokeh.util.serialization import make_id
#-----------------------------------------------------------------------------
# Globals and constants
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
def COUNT(key):
return 'Bokeh._testing.count(%r);' % key
INIT = 'Bokeh._testing.init();'
def RECORD(key, value):
return 'Bokeh._testing.record(%r, %s);' % (key, value)
RESULTS = 'return Bokeh._testing.results'
def SCROLL(amt):
return """
var elt = document.getElementsByClassName("bk-canvas-events")[0];
var event = new WheelEvent('wheel', { deltaY: %f, clientX: 100, clientY: 100} );
elt.dispatchEvent(event);
""" % amt
class ButtonWrapper(object):
def __init__(self, label, callback):
self.id = "button-" + make_id()
self.obj = Button(label=label, css_classes=[self.id])
self.obj.js_on_event('button_click', callback)
def click(self, driver):
button = driver.find_element_by_class_name(self.id)
button.click()
class element_to_start_resizing(object):
''' An expectation for checking if an element has started resizing
'''
def __init__(self, element):
self.element = element
self.previous_width = self.element.size['width']
def __call__(self, driver):
current_width = self.element.size['width']
if self.previous_width != current_width:
return True
else:
self.previous_width = current_width
return False
class element_to_finish_resizing(object):
''' An expectation for checking if an element has finished resizing
'''
def __init__(self, element):
self.element = element
self.previous_width = self.element.size['width']
def __call__(self, driver):
current_width = self.element.size['width']
if self.previous_width == current_width:
return True
else:
self.previous_width = current_width
return False
def enter_text_in_element(driver, element, text, click=1, enter=True):
actions = ActionChains(driver)
actions.move_to_element(element)
if click == 1: actions.click()
elif click == 2: actions.double_click()
if enter:
text += u"\ue007" # After the backslash is ENTER key
actions.send_keys(text)
actions.perform()
def enter_text_in_cell(driver, cell, text):
actions = ActionChains(driver)
actions.move_to_element(cell)
actions.double_click()
actions.send_keys(text + u"\ue007") # After the backslash is ENTER key
actions.perform()
def get_table_cell(driver, row, col):
return driver.find_element_by_css_selector('.grid-canvas .slick-row:nth-child(%d) .r%d' % (row, col))
def wait_for_canvas_resize(canvas, test_driver):
'''
'''
try:
wait = WebDriverWait(test_driver, 1)
wait.until(element_to_start_resizing(canvas))
wait.until(element_to_finish_resizing(canvas))
except TimeoutException:
# Resize may or may not happen instantaneously,
# Put the waits in to give some time, but allow test to
# try and process.
pass
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
| StarcoderdataPython |
181472 | <reponame>dr4ke616/notifier<gh_stars>1-10
from jinja2 import Environment, PackageLoader
class Render(object):
template = 'email.tpl'
def __init__(self, template=None):
if template:
self.template = template
self.env = Environment(
loader=PackageLoader('notifier', 'templates')
)
def execute(self, **kwargs):
template = self.env.get_template(self.template)
return template.render(**kwargs)
| StarcoderdataPython |
91536 | from gozokia.core.rules import RuleBase
class Bar(RuleBase):
def __init__(self):
self.set_reload(False)
def condition_raise(self, *args, **kwargs):
super(Bar, self).condition_raise(*args, **kwargs)
if self.sentence.lower() == 'foo':
return True
def condition_completed(self, *args, **kwargs):
self.set_completed()
def response(self, *args, **kwargs):
self.response_output = 'bar'
class BarSecond(RuleBase):
def __init__(self):
self.set_reload(False)
def condition_raise(self, *args, **kwargs):
super(BarSecond, self).condition_raise(*args, **kwargs)
if self.sentence.lower() == 'foo':
return True
def condition_completed(self, *args, **kwargs):
self.set_completed()
def response(self, *args, **kwargs):
self.response_output = 'bar second'
| StarcoderdataPython |
1751706 | <reponame>namixoi/NamixDH
import socket
import struct
import time
import logging
import requests
from requests.auth import HTTPBasicAuth, HTTPDigestAuth
LOGIN_TEMPLATE = b'\xa0\x00\x00\x60%b\x00\x00\x00%b%b%b%b\x04\x01\x00\x00\x00\x00\xa1\xaa%b&&%b\x00Random:%b\r\n\r\n'
GET_SERIAL = b'\xa4\x00\x00\x00\x00\x00\x00\x00\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' \
b'\x00\x00\x00\x00\x00\x00\x00'
GET_CHANNELS = b'\xa8\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' \
b'\x00\x00\x00\x00\x00\x00\x00\x00'
GET_SNAPSHOT = b'\x11\x00\x00\x00(\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' \
b'\x00\x00\x00\n\x00\x00\x00%b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' \
b'\x00\x00%b\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
JPEG_GARBAGE1 = b'\x0a%b\x00\x00\x0a\x00\x00\x00'
JPEG_GARBAGE2 = b'\xbc\x00\x00\x00\x00\x80\x00\x00%b'
TIMEOUT = 1
request_text_name = {
'url': 'http://{}:{}/cgi-bin/magicBox.cgi?action=getMachineName',
'fields': {'name': 'machine_name'}
}
request_text_config = {
'url' :'http://{}:{}/cgi-bin/configManager.cgi?action=getConfig&name=General',
'fields': {'table.General.LocalNo': 'local_no', 'table.General.MachineAddress': 'address', 'table.General.MachineName': 'general_machine_name'}
}
request_snapshot = {
'url': 'http://{}:{}/cgi-bin/snapshot.cgi?channel=1',
'fields': {'snapshot': 'snapshot'}
}
request_type = {
'url': 'http://{}:{}/cgi-bin/magicBox.cgi?action=getDeviceType',
'fields': {'type': 'type'}
}
request_serial = {
'url': 'http://{}:{}/cgi-bin/magicBox.cgi?action=getSerialNo',
'fields': {'sn': 'serial_no'}
}
request_hardware = {
'url': 'http://{}:{}/cgi-bin/magicBox.cgi?action=getHardwareVersion',
'fields': {'version': 'hw_version'}
}
request_software = {
'url': 'http://{}:{}/cgi-bin/magicBox.cgi?action=getSoftwareVersion',
'fields': {'version': 'sw_version'}
}
request_builddate = {
'url': 'http://{}:{}/cgi-bin/magicBox.cgi?action=getBuildDate',
'fields': {'builddate': 'build_date'}
}
request_system = {
'url': 'http://{}:{}/cgi-bin/magicBox.cgi?action=getSystemInfo',
'fields': {'serialNumber': 'system_serial_no', 'deviceType': 'system_type', 'hardwareVersion': 'system_hw_version', 'processor': 'system_processor', 'appAutoStart': 'system_appstart'}
}
request_ptz_list = {
'url': 'http://{}:{}/cgi-bin/ptz.cgi?action=getProtocolList',
'fields': {'result': 'ptz'}
}
request_vendor = {
'url': 'http://{}:{}/cgi-bin/magicBox.cgi?action=getVendor',
'fields': {'vendor': 'vendor'}
}
request_interfaces = {
'url': 'http://{}:{}/cgi-bin/netApp.cgi?action=getInterfaces',
'fields': {'netInterface[0].Name': 'interface1', 'netInterface[0].Type': 'iftype1',
'netInterface[1].Name': 'interface2', 'netInterface[1].Type': 'iftype2',
'netInterface[2].Name': 'interface3', 'netInterface[2].Type': 'iftype3',
'netInterface[3].Name': 'interface4', 'netInterface[3].Type': 'iftype4',}
}
HTTP_API_REQUESTS = [request_snapshot, request_text_name, request_vendor, request_text_config,request_type, request_serial, request_hardware, request_software, request_system, request_builddate, request_ptz_list, request_interfaces]
HTTP_PORTS = [80, 8080, 81, 88, 8081, 82, 8000, 83, 9000, 8088, 8082, 8888, 8083, 8084, 9080, 9999, 84]
logging.basicConfig(level=logging.INFO, format='[%(asctime)s] [%(levelname)s] %(message)s')
class DahuaController:
def __init__(self, ip, port, login, password):
try:
self.serial = ''
self.channels_count = -1
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.socket.settimeout(TIMEOUT)
self.socket.connect((ip, port))
self.socket.send(LOGIN_TEMPLATE % (struct.pack('b', 24 + len(login) + len(password)), login.encode('ascii'),
(8 - len(login)) * b'\x00', password.encode('ascii'),
(8 - len(password)) * b'\x00', login.encode('ascii'),
password.encode('ascii'), str(int(time.time())).encode('ascii')))
data = self.socket.recv(128)
if data[8] == 1:
if data[9] == 4:
self.status = 2
self.status = 1
elif data[8] == 0:
self.status = 0
else:
self.status = -1
if self.status == 0:
self.socket.send(GET_SERIAL)
self.serial = self.receive_msg().split(b'\x00')[0].decode('ascii')
if self.serial == '':
self.serial = '<unknown>'
self.get_channels_count()
except Exception as e:
#print(e, '__init__ ')
pass
def get_seriall(self):
try:
self.socket.send(GET_SERIAL)
self.serialll = self.receive_msg().split(b'\x00')[0].decode('ascii')
return self.serialll
except Exception as e:
#print(e, 'get_seriall ')
pass
def get_channels_count(self):
try:
self.socket.send(GET_CHANNELS)
channels = self.receive_msg()
self.channels_count = channels.count(b'&&') + 1
return self.channels_count
except Exception as e:
#print(e, 'get_channels_count')
pass
def receive_msg(self):
try:
try:
header = self.socket.recv(32)
length = struct.unpack('<H', header[4:6])[0]
except struct.error:
raise struct.error
data = self.socket.recv(length)
return data
except Exception as e:
#print(e, 'receive_msg')
pass
def get_snapshot(self, channel_id):
try:
channel_id = struct.pack('B', channel_id)
self.socket.send(GET_SNAPSHOT % (channel_id, channel_id))
self.socket.settimeout(3)
data = self.receive_msg_2(channel_id)
self.socket.settimeout(TIMEOUT)
except Exception as e:
#print(e, 'get_snapshot')
pass
return data
def receive_msg_2(self, c_id):
try:
garbage = JPEG_GARBAGE1 % c_id
garbage2 = JPEG_GARBAGE2 % c_id
data = b''
i = 0
while True:
buf = self.socket.recv(1460)
if i == 0:
buf = buf[32:]
data += buf
if b'\xff\xd9' in data:
break
i += 1
while garbage in data:
t_start = data.find(garbage)
t_end = t_start + len(garbage)
t_start -= 24
trash = data[t_start:t_end]
data = data.replace(trash, b'')
while garbage2 in data:
t_start = data.find(garbage2)
t_end = t_start + 32
trash = data[t_start:t_end]
data = data.replace(trash, b'')
return data
except Exception as e:
#print(e, 'receive_msg_2')
pass
| StarcoderdataPython |
3380271 | data_dir_test = data_dir+'test/'
N_test = len(os.listdir(data_dir_test+"/test"))
test_datagen = kpi.ImageDataGenerator(rescale=1. / 255)
test_generator = test_datagen.flow_from_directory(
data_dir_test,
#data_dir_sub+"/train/",
target_size=(img_height, img_width),
batch_size=batch_size,
class_mode=None,
shuffle=False)
test_prediction = model_VGG_LastConv_fcm.predict_generator(test_generator, N_test // batch_size)
images_test = [data_dir_test+"/test/"+k for k in os.listdir(data_dir_test+"/test")][:9]
x_test = [kpi.img_to_array(kpi.load_img(image_test))/255 for image_test in images_test] # this is a PIL image
fig = plt.figure(figsize=(10,10))
for k in range(9):
ax = fig.add_subplot(3,3,k+1)
ax.imshow(x_test[k], interpolation='nearest')
pred = test_prediction[k]
if pred >0.5:
title = "Probabiliy for dog : %.1f" %(pred*100)
else:
title = "Probabiliy for cat : %.1f" %((1-pred)*100)
ax.set_title(title)
plt.show() | StarcoderdataPython |
27479 | from django import forms
from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth.models import User
from .models import UserProfile
class UserRegistrationForm(UserCreationForm):
def __init__(self, *args, **kwargs):
super(UserRegistrationForm, self).__init__(*args, **kwargs)
self.fields['username'].help_text = ''
self.fields['password1'].help_text = ''
self.fields['password2'].help_text = ''
class Meta:
model = User
fields = (
'username',
'email',
'password1',
'password2'
)
def save(self):
user = User.objects.create_user(username=self.cleaned_data['username'], password=self.cleaned_data['<PASSWORD>'])
user.email = self.cleaned_data['email']
user.save()
return user
class UserProfileForm(forms.ModelForm):
class Meta:
model = UserProfile
fields = ('profile_pic', 'bio')
class ProfileEditForm(forms.ModelForm):
class Meta:
model = UserProfile
fields = ['profile_pic', 'bio'] | StarcoderdataPython |
70980 | from functools import wraps
from copy import copy
from collections import OrderedDict
from ..commands import InterfaceObj
from ..core.objects.memory import StackObj, RegObj
def BeforeParse(f):
@wraps(f)
def wrapper(inst, *args, **kwargs):
# noinspection PyProtectedMember
if inst._parsed is True:
raise Exception("Only before Parse") # TODO: Create Environment Exception
return f(inst, *args, **kwargs)
return wrapper
def BeforePrecompile(f):
@wraps(f)
def wrapper(inst, *args, **kwargs):
# noinspection PyProtectedMember
if inst._precompiled is True:
raise Exception("Only before Precompile") # TODO: Create Environment Exception
return f(inst, *args, **kwargs)
return wrapper
def BeforeCompile(f):
@wraps(f)
def wrapper(inst, *args, **kwargs):
# noinspection PyProtectedMember
if inst._compiled is True:
raise Exception("Only before Compile") # TODO: Create Environment Exception
return f(inst, *args, **kwargs)
return wrapper
class Environment:
"""
Environment Object used for the MurPy operations.
"""
def __init__(self):
"""Create a new Environment indipendent instance."""
self._code = None
self.PseudoCode = [] # Contenitore delle operazioni da eseguire
self._StackColl = OrderedDict() # Container degli StackObj
self._RegistryColl = OrderedDict() # Container dei RegObj
self.RoutineDict = {}
self._parsed = False
self._precompiled = False
self._compiled = False
@property
def StackColl(self):
return copy(self._StackColl)
@property
def RegistryColl(self):
return copy(self._RegistryColl)
@BeforePrecompile
def ExistStackName(self, name):
return name in self._StackColl.keys()
@BeforePrecompile
def RequestStackName(self, name):
if self.ExistStackName(name):
raise Exception("Required insertion of duplicated Stack name!")
else:
tmp = StackObj(name)
self._StackColl[name] = tmp
return tmp
@BeforeCompile
def RequestRegistry(self):
"""
Request a new registry slot.
:return: the new Reg Object.
"""
regkey = len(self._RegistryColl)
item = RegObj(regkey)
self._RegistryColl[regkey] = item
return item
@BeforeCompile
def RequestRegistryArray(self, size):
# TODO: Documentazione
# Di per se richieste successive hanno regkey successive ed adiacenti
# PER ORA...
assert size > 0
output = tuple([self.RequestRegistry() for _ in range(size)])
return output
@BeforeCompile
def getStackObjByName(self, name):
if not self.ExistStackName(name):
raise Exception("Variabile non definita")
return self._StackColl[name]
@BeforeCompile
def getStackPosition(self, stackobjs):
"""
Given a StackObject return the Tape Position of the associated registry.
:param stackobjs: Identity Object for the stack variable or a list of it.
:return: Tape Position of the registry or a tuple of it.
"""
names = list(self._StackColl)
work = str(stackobjs.name)
return int(names.index(work))
@BeforeCompile
def getRegPosition(self, regobjs):
"""
Given a RegObject return the Tape Position of the associated registry.
:param regobjs: Identity Object for the registry.
:return: Tape Position of the registry.
"""
keys = list(self._RegistryColl.keys())
work = int(regobjs.regkey)
return len(self._StackColl) + keys.index(work)
@staticmethod
def MoveP(start: int, end: int):
"""
Autogenerate the BFCode for the pointer moving from a
position to another.
:param start: Position of start.
:param end: Position of end.
:return: The BFCode of the movement.
"""
if start > end:
return "<" * (start - end)
else:
return ">" * (end - start)
@staticmethod
def ClearRegList(startpos: int, reglist: tuple):
code = ""
pointer = int(startpos)
for reg in reglist:
code += Environment.MoveP(pointer, reg) + "[-]"
pointer = reg
return code, pointer
def clear(self):
self.__init__()
@BeforeParse
def addRoutine(self, func):
"""
Introduce in the Routine Dictionary the specified routine.
:param func: The Routine to put in the Routine Dictionary.
"""
# Per essere certi che l'input sia una funzione
assert callable(func)
assert hasattr(func, "__name__")
self.RoutineDict[func.__name__] = func
@BeforeParse
def Parse(self):
"""
Do on the data previously provided the Parsing process.
After that all the PseudoCode will be generated into the Environment.
"""
# MODELLO PER UNA SOLA FUNZIONE MAIN
# TODO: Estendere il parser in modo dinamica a casi multifunzione
self.RoutineDict["main"]()
self.PseudoCode = InterfaceObj.BUFFER.GetMainBuffer()
self._parsed = True
@BeforePrecompile
def Precompile(self):
"""
Do the Precompilation process.
After that all the Operation in the PseudoCode will have already
executed his PreCompile method on the Environment for tuning it.
"""
for op in self.PseudoCode:
op.PreCompile(self)
self._precompiled = True
@BeforeCompile
def Compile(self):
"""
Do the Compilation process.
After the Precompilation and the tuning of the Environment with this
method the Environment will compute the final BFCode.
:return: The BFCode compiled.
"""
self._code = ""
pointer = 0
for op in self.PseudoCode:
code, newpointer = op.GetCode(self, pointer)
self._code += code
pointer = newpointer
self._compiled = True
return self._code
@property
def BFCode(self):
"""Get the BFCode if already generated."""
return self._code
del BeforeParse, BeforePrecompile, BeforeCompile
| StarcoderdataPython |
3284669 | import torch.nn as nn
import torch as t
import torch.nn.functional as F
class CondConv2d(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size,
stride=1, padding=0, dilation=1, groups=1, bias=True,
num_experts=1):
super(CondConv2d, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = kernel_size
self.stride = stride
self.padding = padding
self.dilation = dilation
self.groups = groups
self.num_experts = num_experts
self.avgpool = nn.AdaptiveAvgPool3d((None,1,1))
self.temporalconv=nn.Conv3d(in_channels, in_channels, (3,1,1))
self.fc=nn.Conv3d(in_channels, 1, (3,1,1))
self.weight = nn.Parameter(
t.Tensor(1,1,out_channels, in_channels // groups, kernel_size, kernel_size))
if bias:
self.bias = nn.Parameter(t.Tensor(1,1,out_channels))
else:
self.register_parameter('bias', None)
for m in self.modules():
if isinstance(m,nn.Conv3d):
nn.init.constant_(m.weight, 0)
nn.init.constant_(m.bias, 0)
def generateweight(self,xet):
xet=xet.permute(0,2,1,3,4) #x BxCxLxHxW
xet=self.avgpool(xet) #x BxCxLx1x1
allxet=t.cat((xet[:,:,0,:,:].unsqueeze(2),xet[:,:,0,:,:].unsqueeze(2),xet),2)
calibration=self.temporalconv(allxet)
finalweight=self.weight*(calibration+1).unsqueeze(0).permute(1,3,0,2,4,5)
bias=self.bias*(self.fc(allxet)+1).squeeze().unsqueeze(-1)
return finalweight,bias,allxet
def initset(self,x):
finalweight,finalbias,featset=self.generateweight(x)
b,l, c_in, h, w = x.size()
x=x.reshape(1,-1,h,w)
finalweight=finalweight.reshape(-1,self.in_channels,self.kernel_size ,self.kernel_size )
finalbias=finalbias.view(-1)
if self.bias is not None:
output = F.conv2d(
x, weight=finalweight, bias=finalbias, stride=self.stride, padding=self.padding,
dilation=self.dilation, groups=b*l)
else:
output = F.conv2d(
x, weight=finalweight, bias=None, stride=self.stride, padding=self.padding,
dilation=self.dilation, groups=b*l)
output = output.view(-1, self.out_channels, output.size(-2), output.size(-1))
return output,featset
def combinefeat(self,xet,feat):
xet=xet.permute(0,2,1,3,4) #x BxCxLxHxW
xet=self.avgpool(xet) #x BxCxLx1x1
allxet=t.cat((feat[:,:,-2,:,:].unsqueeze(2),feat[:,:,-1,:,:].unsqueeze(2),xet),2)
calibration=self.temporalconv(allxet)
finalweight=self.weight*(calibration+1).unsqueeze(0).permute(1,3,0,2,4,5)
bias=self.bias*(self.fc(allxet)+1).squeeze().unsqueeze(-1)
return finalweight,bias,allxet
def conti(self,x,feat):
finalweight,finalbias,allxet=self.combinefeat(x,feat)
b,l, c_in, h, w = x.size()
x=x.reshape(1,-1,h,w)
finalweight=finalweight.reshape(-1,self.in_channels,self.kernel_size ,self.kernel_size )
finalbias=finalbias.view(-1)
if self.bias is not None:
output = F.conv2d(
x, weight=finalweight, bias=finalbias, stride=self.stride, padding=self.padding,
dilation=self.dilation, groups=b*l)
else:
output = F.conv2d(
x, weight=finalweight, bias=None, stride=self.stride, padding=self.padding,
dilation=self.dilation, groups=b*l)
output = output.view(-1, self.out_channels, output.size(-2), output.size(-1))
return output,allxet
def forward(self, x): #x B*L*C*W*H
finalweight,finalbias,_=self.generateweight(x)
b,l, c_in, h, w = x.size()
x=x.reshape(1,-1,h,w)
finalweight=finalweight.reshape(-1,self.in_channels,self.kernel_size ,self.kernel_size )
finalbias=finalbias.view(-1)
if self.bias is not None:
output = F.conv2d(
x, weight=finalweight, bias=finalbias, stride=self.stride, padding=self.padding,
dilation=self.dilation, groups=b*l)
else:
output = F.conv2d(
x, weight=finalweight, bias=None, stride=self.stride, padding=self.padding,
dilation=self.dilation, groups=b*l)
output = output.view(-1, self.out_channels, output.size(-2), output.size(-1))
return output
class TemporalAlexNet(nn.Module):
configs = [3, 96, 256, 384, 384, 256]
#input (B*L)*C*W*H, A1,A2,A3,A4,B1,B2,B3,B4...
def __init__(self, width_mult=1):
configs = list(map(lambda x: 3 if x == 3 else
int(x*width_mult), TemporalAlexNet.configs))
super(TemporalAlexNet, self).__init__()
self.block1 = nn.Sequential(
nn.Conv2d(configs[0], configs[1], kernel_size=11, stride=2),
nn.BatchNorm2d(configs[1]),
nn.MaxPool2d(kernel_size=3, stride=2),
nn.ReLU(inplace=True),
)
self.block2 = nn.Sequential(
nn.Conv2d(configs[1], configs[2], kernel_size=5),
nn.BatchNorm2d(configs[2]),
nn.MaxPool2d(kernel_size=3, stride=2),
nn.ReLU(inplace=True),
)
self.block3 = nn.Sequential(
nn.Conv2d(configs[2], configs[3], kernel_size=3),
nn.BatchNorm2d(configs[3]),
nn.ReLU(inplace=True),
)
self.temporalconv1 = CondConv2d(configs[3], configs[4], kernel_size=3)
self.b_f1= nn.Sequential(
nn.BatchNorm2d(configs[4]),
nn.ReLU(inplace=True))
self.temporalconv2 =CondConv2d(configs[4], configs[5], kernel_size=3)
self.b_f2= nn.BatchNorm2d(configs[5])
self.feature_size = configs[5]
for param in self.block1.parameters():
param.requires_grad = False
for param in self.block2.parameters():
param.requires_grad = False
def init(self, xset):
xset = self.block1(xset)
xset = self.block2(xset)
xset = self.block3(xset)
xset=xset.unsqueeze(1)
xset,feat1 = self.temporalconv1.initset(xset)
xset = self.b_f1(xset)
xset=xset.unsqueeze(1)
xset,feat2 = self.temporalconv2.initset(xset)
xset = self.b_f2(xset)
return xset,feat1,feat2
def eachtest(self, xset,feat1,feat2):
xset = self.block1(xset)
xset = self.block2(xset)
xset = self.block3(xset)
xset=xset.unsqueeze(1)
xset,feat1 = self.temporalconv1.conti(xset,feat1)
xset = self.b_f1(xset)
xset=xset.unsqueeze(1)
xset,feat2 = self.temporalconv2.conti(xset,feat2)
xset = self.b_f2(xset)
return xset,feat1,feat2
def forward(self, xset):
B,L, _,_,_ = xset.size()
xset=xset.view(-1,xset.size(-3),xset.size(-2),xset.size(-1))
xset = self.block1(xset)
xset = self.block2(xset)
xset = self.block3(xset)
xset=xset.view(B,L,xset.size(-3),xset.size(-2),xset.size(-1))
xset = self.temporalconv1(xset)
xset = self.b_f1(xset)
xset=xset.view(B,L,xset.size(-3),xset.size(-2),xset.size(-1))
xset = self.temporalconv2(xset)
xset = self.b_f2(xset)
return xset
| StarcoderdataPython |
1631584 | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import time
import datetime
from concurrent import futures
import parlai.chat_service.utils.logging as log_utils
import parlai.chat_service.utils.misc as utils
class ChatServiceWorldRunner:
"""
World Runner.
Launches worlds, overworlds, etc. Helper for ChatServiceManager.
"""
def __init__(self, opt, world_path, max_workers, manager, is_debug=False):
self._world_module = utils.get_world_module(world_path)
self.executor = futures.ThreadPoolExecutor(max_workers=max_workers)
self.debug = is_debug
self._log("Found world module: {}".format(self._world_module))
opt["is_debug"] = is_debug
self.manager = manager
self.system_done = False
self.opt = opt
self.tasks = {} # task ID to task
self.initialized = False
def _is_done_initializing(fut):
e = fut.exception()
if e is not None:
self._log('`module_initialize` returned with error {}'.format(repr(e)))
if self.debug:
raise e
if fut.result():
print(fut.result())
if self.debug:
print("DEBUG: Call to `module_initialize` has completed...")
self.initialized = True
if hasattr(self._world_module, "module_initialize"):
self._log("Initializing world module...")
# perform any module intialization steps
init_fn = self._world_module.module_initialize
self.init_fut = self.executor.submit(init_fn, opt, manager)
self.init_fut.add_done_callback(_is_done_initializing)
else:
self._log("World module does not have `module initialize` function")
self.initialized = True
def _log(self, text):
if self.debug:
time = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
print("{} DEBUG: {}".format(time, text))
def is_initialized(self):
return self.initialized
def shutdown(self):
"""
Shutdown the world runner.
"""
for _, task in self.tasks.items():
if task.world is not None:
task.world.shutdown()
self.system_done = True # this forces worlds to stop executing parley
self._log("Executor shutting down.")
self.executor.shutdown()
self._log("Shutdown complete.")
def _run_world(self, task, world_name, agents):
"""
Run a world until completion.
:param task:
TaskState. State of the given task.
:param world_name:
string. The name of the world in the module file
:param agents:
list. A list of agents that should be in the world.
:return:
ret_val: last output of world's parley function. Return None if ERROR
world_data: data attribute of world, if it has one
"""
ret_val = None
world_generator = utils.get_world_fn_attr(
self._world_module, world_name, "generate_world"
)
world = world_generator(self.opt, agents)
task.world = world
while not world.episode_done() and not self.system_done:
ret_val = world.parley()
time.sleep(0.3)
world.shutdown()
world_data = world.data if hasattr(world, "data") else {}
return ret_val, world_data
def launch_task_world(self, task_name, world_name, agents):
"""
Launch a task world.
Return the job's future.
:param task_name:
string. the name of the job thread
:param world_name:
string. the name of the task world in the module file
:param agents:
list. the list of agents to install in the world
:return:
the Futures object corresponding to this launched task
"""
task = utils.TaskState(task_name, world_name, agents)
self.tasks[task_name] = task
def _world_fn():
log_utils.print_and_log(
logging.INFO, 'Starting task {}...'.format(task_name)
)
return self._run_world(task, world_name, agents)
fut = self.executor.submit(_world_fn)
task.future = fut
return fut
def launch_overworld(self, task_name, overworld_name, onboard_map, overworld_agent):
"""
Launch an overworld and a subsequent onboarding world.
Return the job's future
:param task_name:
string. the name of the job thread
:param overworld_name:
string. the name of the overworld in the module file
:param onboard_map:
map. a mapping of overworld return values to the names
of onboarding worlds in the module file.
:param overworld_agent:
The agent to run the overworld with
:return:
the Futures object corresponding to running the overworld
"""
task = utils.TaskState(
task_name,
overworld_name,
[overworld_agent],
is_overworld=True,
world_type=None,
)
self.tasks[task_name] = task
agent_state = self.manager.get_agent_state(overworld_agent.id)
def _world_function():
world_generator = utils.get_world_fn_attr(
self._world_module, overworld_name, "generate_world"
)
overworld = world_generator(self.opt, [overworld_agent])
while not overworld.episode_done() and not self.system_done:
world_type = overworld.parley()
if world_type is None:
time.sleep(0.5)
continue
if world_type == self.manager.EXIT_STR:
self.manager._remove_agent(overworld_agent.id)
return world_type
# perform onboarding
onboard_type = onboard_map.get(world_type)
if onboard_type:
onboard_id = 'onboard-{}-{}'.format(overworld_agent.id, time.time())
agent = self.manager._create_agent(onboard_id, overworld_agent.id)
agent.data = overworld_agent.data
agent_state.set_active_agent(agent)
agent_state.assign_agent_to_task(agent, onboard_id)
_, onboard_data = self._run_world(task, onboard_type, [agent])
agent_state.onboard_data = onboard_data
agent_state.data = agent.data
self.manager.add_agent_to_pool(agent_state, world_type)
log_utils.print_and_log(logging.INFO, 'onboarding/overworld complete')
return world_type
fut = self.executor.submit(_world_function)
task.future = fut
return fut
| StarcoderdataPython |
49086 |
#%%
import numpy as np
from scipy import integrate
import matplotlib.pyplot as plt
import matplotlib as mpl
import random
import time
import copy
from matplotlib import animation, rc
from IPython.display import HTML
def _update_plot (i,fig,scat,qax) :
scat.set_offsets(P[i])
qax.set_offsets(P[i])
VVV=[list(x) for x in zip(*nV[i])]
qax.set_UVC(VVV[0],VVV[1])
# qax.quiver(PPP[0],PPP[1],VVV[0],VVV[1],angles='xy')
# print ('Frames:%d' %i)
return scat,qax
def psi(s,b):
a = pow((1+s**2),-b)
return a
def csm(X,V,k):
a=[0,0]
for i in range(N):
s=pow((X[k][0]-X[i][0])**2+(X[k][1]-X[i][1])**2,0.5)
ps = psi(s,beta)
a[0]+= ps * (V[i][0]-V[k][0])
a[1]+= ps * (V[i][1]-V[k][1])
a[0]=a[0]/N
a[1]=a[1]/N
return a
# def ode_rk(X,):
if __name__ == '__main__':
N=int(input("N=?"))
beta=float(input("beta=?"))
T=int(input("T=?"))
Pinit=[]
Vinit=[]
for n in range(N):
x = random.uniform(-100,100)
y = random.uniform(-100,100)
Pinit.append([x,y])
x = random.uniform(5,70)
xd=random.randint(0,1)
y = random.uniform(5,70)
yd=random.randint(0,1)
Vinit.append([x*(2*xd-1),y*(2*yd-1)])
P=[Pinit]
V=[Vinit]
nV=[[[0,0] for row in range(N)]]
print(P[0])
print(V[0])
for n in range(N):
s=pow(V[0][n][0]**2+V[0][n][1]**2,0.5)
if (s==0) :
s=1
nV[0][n][0]=V[0][n][0]/s
nV[0][n][1]=V[0][n][1]/s
# P 위치 V 초기가ㅄ 설정, nV:V를 normalize -> 이거 np 사용하면 더 간단해질 수도 있을 듯
h=0.025
for t in range(1,T):
# print ("start %dth loop" %t)
# print (P[0])
Pnow=copy.deepcopy(P[t-1])
Vnow=copy.deepcopy(V[t-1])
nVnow=[[0,0] for row in range(N)]
K1=[]
K2=[]
K3=[]
K4=[]
# K1-K4가 runge kutta 에서 그 h*k1-h*k4를 가ㄱ각 k별로 구해서 list로 만든 것.
Phk1=copy.deepcopy(Pnow)
Vhk1=copy.deepcopy(Vnow)
for n in range(N):
k1=csm(Pnow,Vnow,n)
k1[0]*=h
k1[1]*=h
Phk1[n][0]+=Vnow[n][0]*h/2
Phk1[n][1]+=Vnow[n][1]*h/2
Vhk1[n][0]+=k1[0]/2
Vhk1[n][1]+=k1[1]/2
K1.append([Vnow[n],k1])
#Vhk1 = y+h*k1/2
Phk2=copy.deepcopy(Pnow)
Vhk2=copy.deepcopy(Vnow)
for n in range(N):
k2=csm(Phk1,Vhk1,n)
k2[0]*=h
k2[1]*=h
Phk2[n][0]+=Vhk1[n][0]*h/2
Phk2[n][1]+=Vhk1[n][1]*h/2
Vhk2[n][0]+=k2[0]/2
Vhk2[n][1]+=k2[1]/2
K2.append([Vhk1[n],k2])
#Vhk2 = y+h*k2/2
Phk3=copy.deepcopy(Pnow)
Vhk3=copy.deepcopy(Vnow)
for n in range(N):
k3=csm(Phk2,Vhk2,n)
k3[0]*=h
k3[1]*=h
Phk3[n][0]+=Vhk2[n][0]*h
Phk3[n][1]+=Vhk2[n][1]*h
Vhk3[n][0]+=k3[0]
Vhk3[n][1]+=k3[1]
K3.append([Vhk2[n],k3])
#Vhk3 = y+h*k3
for n in range(N):
k4=csm(Phk3,Vhk3,n)
k4[0]*=h
k4[1]*=h
K4.append([Vhk3[n],k4])
for n in range(N):
Pnow[n][0]+=(K1[n][0][0]+2*K2[n][0][0]+2*K3[n][0][0]+K4[n][0][0])*h/6
Pnow[n][1]+=(K1[n][0][1]+2*K2[n][0][1]+2*K3[n][0][1]+K4[n][0][1])*h/6
Vnow[n][0]+=(K1[n][1][0]+2*K2[n][1][0]+2*K3[n][1][0]+K4[n][1][0])/6
Vnow[n][1]+=(K1[n][1][1]+2*K2[n][1][1]+2*K3[n][1][1]+K4[n][1][1])/6
s=pow(Vnow[n][0]**2+Vnow[n][1]**2,0.5)
if (s==0):
s=1
nVnow[n][0]=Vnow[n][0]/s
nVnow[n][1]=Vnow[n][1]/s
P.append(Pnow)
V.append(Vnow)
nV.append(nVnow)
# print(P[0])
# print ("end")
print (Pnow)
print (Vnow)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_xlim([-1000,1000])
ax.set_ylim([-1000,1000])
PP=[list(x) for x in zip(*P[0])]
VV=[list(x) for x in zip(*nV[0])]
scat=plt.scatter(PP[0],PP[1],s=20)
scat.set_alpha(0.2)
qax=ax.quiver(PP[0],PP[1],VV[0],VV[1],angles='xy',width=0.001,scale=70)
ani = animation.FuncAnimation(fig,_update_plot,fargs=(fig,scat,qax),frames=T-1,interval=10,save_count=T-1)
#interval이 너무 작으니깐 save가 안됨-파일을 열때 에러남.
plt.show()
ani.save('csm-ode45-simu.mp4')
print("DONE")
# %%
| StarcoderdataPython |
91713 | import numpy as np
from . import stereonet_math
def fit_girdle(*args, **kwargs):
"""
Fits a plane to a scatter of points on a stereonet (a.k.a. a "girdle").
Input arguments will be interpreted as poles, lines, rakes, or "raw"
longitudes and latitudes based on the ``measurement`` keyword argument.
(Defaults to ``"poles"``.)
Parameters
----------
*args : 2 or 3 sequences of measurements
By default, this will be expected to be ``strikes`` & ``dips``, both
array-like sequences representing poles to planes. (Rake measurements
require three parameters, thus the variable number of arguments.) The
*measurement* kwarg controls how these arguments are interpreted.
measurement : {'poles', 'lines', 'rakes', 'radians'}, optional
Controls how the input arguments are interpreted. Defaults to
``"poles"``. May be one of the following:
``"poles"`` : Arguments are assumed to be sequences of strikes and
dips of planes. Poles to these planes are used for density
contouring.
``"lines"`` : Arguments are assumed to be sequences of plunges and
bearings of linear features.
``"rakes"`` : Arguments are assumed to be sequences of strikes,
dips, and rakes along the plane.
``"radians"`` : Arguments are assumed to be "raw" longitudes and
latitudes in the underlying projection's coordinate system.
bidirectional : boolean, optional
Whether or not the antipode of each measurement will be used in the
calculation. For almost all use cases, it should. Defaults to True.
Returns
-------
strike, dip: floats
The strike and dip of the plane.
Notes
-----
The pole to the best-fit plane is extracted by calculating the smallest
eigenvector of the covariance matrix of the input measurements in cartesian
3D space.
Examples
--------
Calculate the plunge of a cylindrical fold axis from a series of strike/dip
measurements of bedding from the limbs:
>>> strike = [270, 334, 270, 270]
>>> dip = [20, 15, 80, 78]
>>> s, d = mplstereonet.fit_girdle(strike, dip)
>>> plunge, bearing = mplstereonet.pole2plunge_bearing(s, d)
"""
vec = 0 # Smallest eigenvector will be the pole
return _sd_of_eigenvector(args, vec=vec, **kwargs)
def fit_pole(*args, **kwargs):
"""
Fits the pole to a plane to a "bullseye" of points on a stereonet.
Input arguments will be interpreted as poles, lines, rakes, or "raw"
longitudes and latitudes based on the ``measurement`` keyword argument.
(Defaults to ``"poles"``.)
Parameters
----------
*args : 2 or 3 sequences of measurements
By default, this will be expected to be ``strike`` & ``dip``, both
array-like sequences representing poles to planes. (Rake measurements
require three parameters, thus the variable number of arguments.) The
*measurement* kwarg controls how these arguments are interpreted.
measurement : {'poles', 'lines', 'rakes', 'radians'}, optional
Controls how the input arguments are interpreted. Defaults to
``"poles"``. May be one of the following:
``"poles"`` : Arguments are assumed to be sequences of strikes and
dips of planes. Poles to these planes are used for density
contouring.
``"lines"`` : Arguments are assumed to be sequences of plunges and
bearings of linear features.
``"rakes"`` : Arguments are assumed to be sequences of strikes,
dips, and rakes along the plane.
``"radians"`` : Arguments are assumed to be "raw" longitudes and
latitudes in the underlying projection's coordinate system.
bidirectional : boolean, optional
Whether or not the antipode of each measurement will be used in the
calculation. For almost all use cases, it should. Defaults to True.
Returns
-------
strike, dip: floats
The strike and dip of the plane.
Notes
-----
The pole to the best-fit plane is extracted by calculating the largest
eigenvector of the covariance matrix of the input measurements in cartesian
3D space.
Examples
--------
Find the average strike/dip of a series of bedding measurements
>>> strike = [270, 65, 280, 300]
>>> dip = [20, 15, 10, 5]
>>> strike0, dip0 = mplstereonet.fit_pole(strike, dip)
"""
vec = -1 # Largest eigenvector will be the pole
return _sd_of_eigenvector(args, vec=vec, **kwargs)
def _sd_of_eigenvector(data, vec, measurement='poles', bidirectional=True):
"""Unifies ``fit_pole`` and ``fit_girdle``."""
lon, lat = _convert_measurements(data, measurement)
vals, vecs = cov_eig(lon, lat, bidirectional)
x, y, z = vecs[:, vec]
s, d = stereonet_math.geographic2pole(*stereonet_math.cart2sph(x, y, z))
return s[0], d[0]
def eigenvectors(*args, **kwargs):
"""
Finds the 3 eigenvectors and eigenvalues of the 3D covariance matrix of a
series of geometries. This can be used to fit a plane/pole to a dataset or
for shape fabric analysis (e.g. Flinn/Hsu plots).
Input arguments will be interpreted as poles, lines, rakes, or "raw"
longitudes and latitudes based on the *measurement* keyword argument.
(Defaults to ``"poles"``.)
Parameters
----------
*args : 2 or 3 sequences of measurements
By default, this will be expected to be ``strike`` & ``dip``, both
array-like sequences representing poles to planes. (Rake measurements
require three parameters, thus the variable number of arguments.) The
*measurement* kwarg controls how these arguments are interpreted.
measurement : {'poles', 'lines', 'rakes', 'radians'}, optional
Controls how the input arguments are interpreted. Defaults to
``"poles"``. May be one of the following:
``"poles"`` : Arguments are assumed to be sequences of strikes and
dips of planes. Poles to these planes are used for density
contouring.
``"lines"`` : Arguments are assumed to be sequences of plunges and
bearings of linear features.
``"rakes"`` : Arguments are assumed to be sequences of strikes,
dips, and rakes along the plane.
``"radians"`` : Arguments are assumed to be "raw" longitudes and
latitudes in the underlying projection's coordinate system.
bidirectional : boolean, optional
Whether or not the antipode of each measurement will be used in the
calculation. For almost all use cases, it should. Defaults to True.
Returns
-------
plunges, bearings, values : sequences of 3 floats each
The plunges, bearings, and eigenvalues of the three eigenvectors of the
covariance matrix of the input data. The measurements are returned
sorted in descending order relative to the eigenvalues. (i.e. The
largest eigenvector/eigenvalue is first.)
Examples
--------
Find the eigenvectors as plunge/bearing and eigenvalues of the 3D
covariance matrix of a series of planar measurements:
>>> strikes = [270, 65, 280, 300]
>>> dips = [20, 15, 10, 5]
>>> plu, azi, vals = mplstereonet.eigenvectors(strikes, dips)
"""
lon, lat = _convert_measurements(args, kwargs.get('measurement', 'poles'))
vals, vecs = cov_eig(lon, lat, kwargs.get('bidirectional', True))
lon, lat = stereonet_math.cart2sph(*vecs)
plunges, bearings = stereonet_math.geographic2plunge_bearing(lon, lat)
# Largest eigenvalue first...
return plunges[::-1], bearings[::-1], vals[::-1]
def cov_eig(lon, lat, bidirectional=True):
lon = np.atleast_1d(np.squeeze(lon))
lat = np.atleast_1d(np.squeeze(lat))
if bidirectional:
# Include antipodes in calculation...
lon2, lat2 = stereonet_math.antipode(lon, lat)
lon, lat = np.hstack([lon, lon2]), np.hstack([lat, lat2])
xyz = np.column_stack(stereonet_math.sph2cart(lon, lat))
cov = np.cov(xyz.T)
eigvals, eigvecs = np.linalg.eigh(cov)
order = eigvals.argsort()
return eigvals[order], eigvecs[:, order]
def _convert_measurements(data, measurement):
def do_nothing(x, y):
return x, y
func = {'poles':stereonet_math.pole,
'lines':stereonet_math.line,
'rakes':stereonet_math.rake,
'radians':do_nothing}[measurement]
return func(*data)
def find_mean_vector(*args, **kwargs):
"""
Returns the mean vector for a set of measurments. By default, this expects
the input to be plunges and bearings, but the type of input can be
controlled through the ``measurement`` kwarg.
Parameters
----------
*args : 2 or 3 sequences of measurements
By default, this will be expected to be ``plunge`` & ``bearing``, both
array-like sequences representing linear features. (Rake measurements
require three parameters, thus the variable number of arguments.) The
*measurement* kwarg controls how these arguments are interpreted.
measurement : string, optional
Controls how the input arguments are interpreted. Defaults to
``"lines"``. May be one of the following:
``"poles"`` : strikes, dips
Arguments are assumed to be sequences of strikes and dips of
planes. Poles to these planes are used for analysis.
``"lines"`` : plunges, bearings
Arguments are assumed to be sequences of plunges and bearings
of linear features.
``"rakes"`` : strikes, dips, rakes
Arguments are assumed to be sequences of strikes, dips, and
rakes along the plane.
``"radians"`` : lon, lat
Arguments are assumed to be "raw" longitudes and latitudes in
the stereonet's underlying coordinate system.
Returns
-------
mean_vector : tuple of two floats
The plunge and bearing of the mean vector (in degrees).
r_value : float
The length of the mean vector (a value between 0 and 1).
"""
lon, lat = _convert_measurements(args, kwargs.get('measurement', 'lines'))
vector, r_value = stereonet_math.mean_vector(lon, lat)
plunge, bearing = stereonet_math.geographic2plunge_bearing(*vector)
return (plunge[0], bearing[0]), r_value
def find_fisher_stats(*args, **kwargs):
"""
Returns the mean vector and summary statistics for a set of measurements.
By default, this expects the input to be plunges and bearings, but the type
of input can be controlled through the ``measurement`` kwarg.
Parameters
----------
*args : 2 or 3 sequences of measurements
By default, this will be expected to be ``plunge`` & ``bearing``, both
array-like sequences representing linear features. (Rake measurements
require three parameters, thus the variable number of arguments.) The
*measurement* kwarg controls how these arguments are interpreted.
conf : number
The confidence level (0-100). Defaults to 95%, similar to 2 sigma.
measurement : string, optional
Controls how the input arguments are interpreted. Defaults to
``"lines"``. May be one of the following:
``"poles"`` : strikes, dips
Arguments are assumed to be sequences of strikes and dips of
planes. Poles to these planes are used for analysis.
``"lines"`` : plunges, bearings
Arguments are assumed to be sequences of plunges and bearings
of linear features.
``"rakes"`` : strikes, dips, rakes
Arguments are assumed to be sequences of strikes, dips, and
rakes along the plane.
``"radians"`` : lon, lat
Arguments are assumed to be "raw" longitudes and latitudes in
the stereonet's underlying coordinate system.
Returns
-------
mean_vector: tuple of two floats
A set consisting of the plunge and bearing of the mean vector (in
degrees).
stats : tuple of three floats
``(r_value, confidence, kappa)``
The ``r_value`` is the magnitude of the mean vector as a number between
0 and 1.
The ``confidence`` radius is the opening angle of a small circle that
corresponds to the confidence in the calculated direction, and is
dependent on the input ``conf``.
The ``kappa`` value is the dispersion factor that quantifies the amount
of dispersion of the given vectors, analgous to a variance/stddev.
"""
# How the heck did this wind up as a separate function?
lon, lat = _convert_measurements(args, kwargs.get('measurement', 'lines'))
conf = kwargs.get('conf', 95)
center, stats = stereonet_math.fisher_stats(lon, lat, conf)
plunge, bearing = stereonet_math.geographic2plunge_bearing(*center)
mean_vector = (plunge[0], bearing[0])
return mean_vector, stats
def kmeans(*args, **kwargs):
"""
Find centers of multi-modal clusters of data using a kmeans approach
modified for spherical measurements.
Parameters
----------
*args : 2 or 3 sequences of measurements
By default, this will be expected to be ``strike`` & ``dip``, both
array-like sequences representing poles to planes. (Rake measurements
require three parameters, thus the variable number of arguments.) The
``measurement`` kwarg controls how these arguments are interpreted.
num : int
The number of clusters to find. Defaults to 2.
bidirectional : bool
Whether or not the measurements are bi-directional linear/planar
features or directed vectors. Defaults to True.
tolerance : float
Iteration will continue until the centers have not changed by more
than this amount. Defaults to 1e-5.
measurement : string, optional
Controls how the input arguments are interpreted. Defaults to
``"poles"``. May be one of the following:
``"poles"`` : strikes, dips
Arguments are assumed to be sequences of strikes and dips of
planes. Poles to these planes are used for analysis.
``"lines"`` : plunges, bearings
Arguments are assumed to be sequences of plunges and bearings
of linear features.
``"rakes"`` : strikes, dips, rakes
Arguments are assumed to be sequences of strikes, dips, and
rakes along the plane.
``"radians"`` : lon, lat
Arguments are assumed to be "raw" longitudes and latitudes in
the stereonet's underlying coordinate system.
Returns
-------
centers : An Nx2 array-like
Longitude and latitude in radians of the centers of each cluster.
"""
lon, lat = _convert_measurements(args, kwargs.get('measurement', 'poles'))
num = kwargs.get('num', 2)
bidirectional = kwargs.get('bidirectional', True)
tolerance = kwargs.get('tolerance', 1e-5)
points = lon, lat
dist = lambda x: stereonet_math.angular_distance(x, points, bidirectional)
center_lon = np.random.choice(lon, num)
center_lat = np.random.choice(lat, num)
centers = zip(center_lon, center_lat)
while True:
dists = np.array([dist(item) for item in centers]).T
closest = dists.argmin(axis=1)
new_centers = []
for i in range(num):
mask = mask = closest == i
_, vecs = cov_eig(lon[mask], lat[mask], bidirectional)
new_centers.append(stereonet_math.cart2sph(*vecs[:,-1]))
if np.allclose(centers, new_centers, atol=tolerance):
break
else:
centers = new_centers
return centers | StarcoderdataPython |
3258679 | <filename>lib/checks/__init__.py
import abc
from os import walk
from os.path import join as path_join
from pkgutil import walk_packages
from inspect import getmembers, isclass
from subprocess import call
from grp import getgrgid
from lib.util import debug, log
class AbstractCheckBase(metaclass=abc.ABCMeta):
"""
Base class for all checks
"""
order = 1000
"""
Order when this check should be executed.
Lower numbers are executed earlier.
"""
def __init__(self, home_path, users, simulate, options):
self.home_path = home_path
"""
Path to users home directory, not expanded yet.
"""
self.users = users
"""see full config example for explanation"""
self.simulate = simulate
"""If True, nothing should ever be done."""
self.options = options
"""
Configuration options for this check.
All options from section defined in attribute 'config_section'.
"""
self.post_init()
"""hook for subclasses"""
def post_init(self):
"""
Hook for subclasses.
So that they do not need to override __init__.
"""
pass
@classmethod
def group_name_for_user(cls, user):
"""
Returns the group name of a users primary group.
"""
return getgrgid(user.pw_gid).gr_name
@classmethod
def expand_string_for_user(cls, string, user):
"""
Expands variables in string according to users.
"""
return string.replace(
"$u", user.pw_name
).replace(
"$h", user.pw_dir
).replace(
"$g", cls.group_name_for_user(user)
)
def get_home_for_user(self, user):
"""
Expands variables in path to users home path.
"""
return self.__class__.expand_string_for_user(
self.home_path,
user
)
@abc.abstractproperty
def config_section(self):
"""
Defines which section in the configuration belongs to this check.
Name clashes are to be avoided ;)
"""
pass
def execute_safely(self, function, *args, **kwargs):
"""
Method prints what would be done if simulating or
does it otherwise.
"""
def call_as_pretty_string():
return "%s.%s(%s, %s)" % (
function.__module__,
function.__name__,
', '.join((repr(arg) for arg in args)),
', '.join(( "%s=%s" % (repr(k), repr(v))
for k, v in kwargs.items())),
)
if self.simulate:
log("simulating - would execute %s otherwise" % (
call_as_pretty_string()
))
return None
else:
log("executing " + call_as_pretty_string())
return function(*args, **kwargs)
def execute_subprocess_safely(self, *args, **kwargs):
"""
Convenience wrapper around ``execute_safely`` to easily call
another program.
Arguments as for ``subprocess.Popen``.
"""
self.execute_safely(call, *args, **kwargs)
def check(self):
"""
Executes all checks according to configuration.
"""
if not self.options.get_bool('check'):
debug("check skipped: disabled in configuration")
return
self._check()
@abc.abstractmethod
def _check(self):
"""
Actually implements iteration over objects to check
(directories, users, …)
"""
pass
class AbstractPerDirectoryCheck(AbstractCheckBase):
"""
Executes checks per existing directory in home path.
"""
def __init__(self, *args, **kwargs):
"""
Additionally, ensured precondition for this check method to work.
"""
super(AbstractPerDirectoryCheck, self).__init__(*args, **kwargs)
self.check_home_path()
def check_home_path(self):
"""
Checks if the home_path is compatible with this implementation.
Future implementations may support a wider variety of directory
structures - feel free to improve. :)
"""
def home_path_fail():
ValueError(
"Sorry, at the moment checks for obsolete " +
"diretories can only be done for home_path's " +
"in the following form: /path/to/somewhere/$u"
)
home_path = self.home_path
if not (home_path.endswith('$u') or home_path.endswith('$u/')):
home_path_fail()
if "$g" in home_path or "$h" in home_path:
home_path_fail()
def get_existing_directories(self):
"""
Collects a set of all existing directories in the home_path.
"""
base_home_path = self.home_path.replace("$u", "")
assert "$h" not in base_home_path
assert "$g" not in base_home_path
for _, directory_names, _ in walk(base_home_path):
return (path_join(base_home_path, name)
for name in directory_names)
def _check(self):
"""
For every existing home directory, check if it's correct and
correct if required and configured.
"""
for directory in self.get_existing_directories():
if not self.is_correct(directory):
if not self.options.get_bool('correct'):
debug("correction skipped: disabled in configuration")
continue
self.correct(directory)
@abc.abstractmethod
def is_correct(self, directory):
"""
Checks correctness for a single user home directory.
"""
pass
@abc.abstractmethod
def correct(self, directory):
"""
Corrects a users home directory.
"""
pass
class AbstractPerUserCheck(AbstractCheckBase):
"""
Executes checks per existing user.
"""
def _check(self):
"""
For every user, check if the home directory is correct and
correct with respect to the configuration.
"""
for user in self.users:
if not self.is_correct(user):
if not self.options.get_bool('correct'):
debug("correction skipped: disabled in configuration")
continue
self.correct(user)
@abc.abstractmethod
def is_correct(self, user):
"""
Checks correctness for a single user home directory.
"""
pass
@abc.abstractmethod
def correct(self, user):
"""
Corrects a users home directory.
"""
pass
class AbstractAllUsersAndAllDirectoriesCheck(AbstractPerDirectoryCheck):
"""
Somehow a hybrid from per user and per directory checks.
Checks and corrections get handed a iterable of users and existing
directories.
Might be memory intensive in large setups.
"""
def _check(self):
"""
Checks correctness and corrects if configured using iterables of
all users and existing directories.
"""
directories = list(self.get_existing_directories() or [])
users = self.users
if not self.is_correct(users, directories):
debug("correction required")
if not self.options.get_bool('correct'):
debug("correction skipped: disabled in configuration")
return
self.correct(users, directories)
@abc.abstractmethod
def is_correct(self, users, directories):
"""
Checks correctness with a list of users and directories.
"""
pass
@abc.abstractmethod
def correct(self, users, directories):
"""
Corrects home directory for a list of users and directories..
"""
pass
"""
Import all checks dynamically
Let's see when it crashes :)
"""
for module_loader, module_name, _ in walk_packages(__path__):
module = module_loader.find_module(module_name).load_module(module_name)
for cls_name, cls in getmembers(module):
if not isclass(cls):
continue
if not issubclass(cls, AbstractCheckBase):
continue
if cls_name.startswith("Abstract"):
continue
exec('from %s import *' % module_name)
| StarcoderdataPython |
1692443 | <gh_stars>1-10
import re
from dataclasses import dataclass
from typing import List
@dataclass
class MonsterInfoReader:
name_lines: List[str]
detail_lines: List[str]
info_line: str = None
def __init__(self):
self.clear()
def clear(self):
self.name_lines = []
self.detail_lines = []
self.info_line = None
def has_complete_data(self):
return self.name_lines and self.info_line and self.detail_lines
def push_line(self, line: str):
if line.startswith('==='):
self.info_line = line
elif self.info_line:
self.detail_lines.append(line)
else:
self.name_lines.append(line)
def get_mon_info_list(self, mon_info: str):
lines = mon_info.splitlines()
for line in lines:
if not line:
if self.has_complete_data():
yield self.parse()
self.clear()
else:
self.push_line(line)
def parse(self):
# モンスター名の解析
name_line = '\n'.join(self.name_lines)
m = re.match(
r"^(\[.\])?\s*(?:(.+)\/)?(.+)\s*\((.+?)\)$", name_line,
flags=re.DOTALL)
name = m[2].replace('\n', '')
english_name = m[3].replace('\n', ' ')
is_unique = True if m[1] else False
symbol = m[4].replace('\n', '')
# モンスター情報の解析
m = re.match(
r"^=== Num:(\d+) Lev:(\d+) Rar:(\d+) Spd:(.+) Hp:(.+) Ac:(\d+) Exp:(\d+)",
self.info_line)
result = {
'id': m[1],
'name': name,
'english_name': english_name,
'is_unique': is_unique,
'symbol': symbol,
'level': m[2],
'rarity': m[3],
'speed': m[4],
'hp': m[5],
'ac': m[6],
'exp': m[7],
'detail': ''.join(self.detail_lines)
}
return result
| StarcoderdataPython |
3240722 | from setuptools import setup, find_packages
import pathlib
here = pathlib.Path(__file__).parent.resolve()
long_description = (here / 'README.md').read_text(encoding='utf-8')
VERSION='1.0.0'
setup(
name="tinypistats",
version=VERSION,
author="<NAME>",
author_email="<EMAIL>",
description="View Raspberry Pi stats on a tiny OLED display",
long_description=long_description,
license="Apache License, Version 2.0",
url="https://github.com/avojak/tinypistats",
packages=find_packages(),
scripts=['bin/tinypistats'],
install_requires=[
'adafruit-circuitpython-ssd1306>=2.11.2',
'gpiozero>=1.6.2',
'Pillow>=5.4.1',
'psutil>=5.8.0'
],
keywords=['python', 'pi', 'raspberry', 'oled', 'stats'],
classifiers=[
'Programming Language :: Python :: 3'
'Programming Language :: Python :: 3.6'
'Programming Language :: Python :: 3.7'
'Programming Language :: Python :: 3.8'
'Programming Language :: Python :: 3.9'
'Programming Language :: Python :: 3 :: Only'
],
python_requires='>=3.6, <4',
project_urls={
'Bug Reports': 'https://github.com/avojak/tinypistats/issues',
'Source': 'https://github.com/avojak/tinypistats'
}
)
| StarcoderdataPython |
90548 | <reponame>tkrsh/pomodoro-cli-python
"pomodoro cli for interactive pomodoro sessions"
import time # for sleep
import os
import sys
from tqdm import tqdm
time_cycle = int(sys.argv[1])
time_short_break = int(sys.argv[2])
time_long_break = int(sys.argv[3])
cycles = int(sys.argv[4])
total_cycles = int(sys.argv[5])
def display_timer(duration):
""" displays timer """
duration = duration * 60
for remaining in range(duration, 0, -1):
sys.stdout.write("\r")
sys.stdout.write("{:2d} seconds remaining.".format(remaining))
sys.stdout.flush()
time.sleep(1)
sys.stdout.write("\rComplete! \n")
def notify(msg):
""" Send notification to system using notify-send"""
if msg == 0:
os.system("notify-send CycleStarted")
if msg == 1:
os.system("notify-send Short_Break_Started")
if msg == 2:
os.system("notify-send Long_Break_Started")
if msg == 3:
os.system("notify-send Pomodoro_Completed")
def start_info():
"""Display Pomodoro Info"""
message = ("\nPomodoro Cycle Has Been Set Sucessfully!"
"\n\n----------------------------------\n"
"Time Per Pomodoro Cycle: {} Minutes \nSmall Break Duration : {} "
"Minutes\nLong Break Duration : {} Minutes\nCycles before Long Break : {}"
"\nTotal Cycles : {}\n----------------------------------\n\n"
.format(time_cycle, time_short_break, time_long_break, cycles, total_cycles))
sys.stdout.write(message)
sys.stdout.flush()
def pomodoro():
"""Pomodoro cycle logic"""
for i in tqdm(range(total_cycles)):
for time_range in range(cycles):
sys.stdout.write(
"\n\nPomodoro Cycle {} has started !\n".format(time_range+1))
# time.sleep(time_cycle)
display_timer(time_cycle)
notify(0)
sys.stdout.write(
"\n \tShort Break of {} (Minutes) Duration has started\n".format(time_short_break))
# time.sleep(time_short_break)
display_timer(time_short_break)
notify(1)
sys.stdout.write(
"\nLong Break of {} (Minutes) Duration has started\n".format(time_long_break))
notify(2)
# time.sleep(time_long_break)
display_timer(time_long_break)
sys.stdout.write(
"\nComplete Cycle ({}/{}) of Pomodoro is done\n\n".format(i+1, total_cycles))
if i+1 == total_cycles:
notify(3)
sys.stdout.write(
"\nComplete Pomodoro Cycle Has Ended !\n\n".format(i+1, total_cycles))
if __name__ == '__main__':
start_info()
pomodoro()
| StarcoderdataPython |
3355942 | # Generated by Django 2.0.1 on 2018-03-08 20:50
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('card', '0023_card_in_pack'),
]
operations = [
migrations.CreateModel(
name='Deck',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('deck_format', models.CharField(choices=[['ST', 'Standard'], ['SE', 'Sealed'], ['DR', 'Draft']], max_length=50)),
('card_list', models.ManyToManyField(to='card.Card')),
],
),
migrations.AddField(
model_name='set',
name='sealed_format',
field=models.ManyToManyField(related_name='_set_sealed_format_+', to='card.Set'),
),
]
| StarcoderdataPython |
100216 | from os.path import join
from numpy import sqrt, pi, linspace, array, zeros
from numpy.testing import assert_almost_equal
from multiprocessing import cpu_count
import pytest
from SciDataTool.Functions.Plot.plot_2D import plot_2D
from pyleecan.Classes.OPdq import OPdq
from pyleecan.Classes.Simu1 import Simu1
from pyleecan.Classes.InputCurrent import InputCurrent
from pyleecan.Classes.Electrical import Electrical
from pyleecan.Classes.EEC_PMSM import EEC_PMSM
from pyleecan.Classes.MagFEMM import MagFEMM
from pyleecan.Classes.VarLoadCurrent import VarLoadCurrent
from pyleecan.Functions.load import load
from pyleecan.Functions.Plot import dict_2D
from pyleecan.definitions import DATA_DIR
from Tests import save_validation_path as save_path
is_show_fig = False
@pytest.mark.long_5s
@pytest.mark.MagFEMM
@pytest.mark.EEC_PMSM
@pytest.mark.IPMSM
@pytest.mark.periodicity
@pytest.mark.SingleOP
def test_EEC_PMSM(nb_worker=int(0.5 * cpu_count())):
"""Validation of the PMSM Electrical Equivalent Circuit by comparing torque with MagFEMM"""
Toyota_Prius = load(join(DATA_DIR, "Machine", "Toyota_Prius.json"))
simu = Simu1(name="test_EEC_PMSM", machine=Toyota_Prius)
# Definition of the input
simu.input = InputCurrent(OP=OPdq(N0=2000), Nt_tot=8 * 16, Na_tot=2048)
simu.input.set_Id_Iq(I0=250 / sqrt(2), Phi0=60 * pi / 180)
# Definition of the magnetic simulation
simu_mag = simu.copy()
simu_mag.mag = MagFEMM(
is_periodicity_a=True, is_periodicity_t=True, nb_worker=nb_worker, T_mag=60
)
# Definition of the electrical simulation
simu.elec = Electrical()
simu.elec.eec = EEC_PMSM(
fluxlink=MagFEMM(
is_periodicity_t=True,
is_periodicity_a=True,
nb_worker=nb_worker,
T_mag=60,
),
)
out = simu.run()
out_mag = simu_mag.run()
# from Yang et al, 2013
assert out.elec.Tem_av_ref == pytest.approx(82.1, rel=0.1)
assert out_mag.mag.Tem_av == pytest.approx(82, rel=0.1)
# Plot 3-phase current function of time
if is_show_fig:
out.elec.get_Is().plot_2D_Data(
"time",
"phase[]",
# save_path=join(save_path, "EEC_FEMM_IPMSM_currents.png"),
# is_show_fig=False,
**dict_2D
)
return out
@pytest.mark.long_5s
@pytest.mark.long_1m
@pytest.mark.MagFEMM
@pytest.mark.EEC_PMSM
@pytest.mark.IPMSM
@pytest.mark.periodicity
def test_EEC_PMSM_sync_rel(nb_worker=int(0.5 * cpu_count())):
"""Validation of the PMSM Electrical Equivalent Circuit with the Prius machine
Compute Torque from EEC results and compare with Yang et al, 2013
"""
Toyota_Prius = load(join(DATA_DIR, "Machine", "Toyota_Prius.json"))
simu = Simu1(name="test_EEC_PMSM_sync_rel", machine=Toyota_Prius)
# Definition of the input
simu.input = InputCurrent(
OP=OPdq(N0=2000, Tem_av_ref=79), Nt_tot=8 * 16, Na_tot=2048
)
simu.input.set_Id_Iq(I0=250 / sqrt(2), Phi0=60 * pi / 180)
# Definition of the simulation (FEMM)
simu.elec = Electrical()
simu.elec.eec = EEC_PMSM(
fluxlink=MagFEMM(
is_periodicity_t=True,
is_periodicity_a=True,
nb_worker=nb_worker,
T_mag=60,
),
)
# Creating the Operating point matrix
Tem_av_ref = array([79, 125, 160, 192, 237, 281, 319, 343, 353, 332, 266, 164, 22])
N_simu = Tem_av_ref.size
Phi0_ref = linspace(60 * pi / 180, 180 * pi / 180, N_simu)
OP_matrix = zeros((N_simu, 4))
# Set N0 = 2000 [rpm] for all simulation
OP_matrix[:, 0] = 2000
# Set I0 = 250/sqrt(2) [A] (RMS) for all simulations
OP_matrix[:, 1] = 250 / sqrt(2)
# Set Phi0 from 60 to 180
OP_matrix[:, 2] = Phi0_ref
# Set reference torque from Yang et al, 2013
OP_matrix[:, 3] = Tem_av_ref
simu.var_simu = VarLoadCurrent(
is_torque=True, OP_matrix=OP_matrix, type_OP_matrix=0, is_keep_all_output=True
)
out = simu.run()
Tem_eec = [out_ii.elec.Tem_av_ref for out_ii in out.output_list]
Tem_sync = zeros(N_simu)
Tem_rel = zeros(N_simu)
for ii, out_ii in enumerate(out.output_list):
Tem_sync[ii], Tem_rel[ii] = out_ii.elec.eec.comp_torque_sync_rel()
Tem2 = Tem_sync + Tem_rel
assert_almost_equal(Tem_eec - Tem2, 0, decimal=12)
if is_show_fig:
plot_2D(
array([x * 180 / pi for x in out.xoutput_dict["Phi0"].result]),
[Tem_eec, Tem_av_ref],
legend_list=["Pyleecan", "Yang et al, 2013"],
xlabel="Current angle [deg]",
ylabel="Electrical torque [N.m]",
title="Electrical torque vs current angle",
**dict_2D
)
plot_2D(
array([x * 180 / pi for x in out.xoutput_dict["Phi0"].result]),
[Tem_eec, Tem_sync, Tem_rel],
legend_list=["Overall", "Synchronous", "Reluctant"],
xlabel="Current angle [deg]",
ylabel="Electrical torque [N.m]",
title="Electrical torque vs current angle",
**dict_2D
)
return out
# To run it without pytest
if __name__ == "__main__":
out = test_EEC_PMSM()
out = test_EEC_PMSM_sync_rel()
print("Done")
| StarcoderdataPython |
1734232 | import pandas as pd
import math
import os.path
import time
from binance.client import Client
from datetime import timedelta, datetime
from dateutil import parser
# from tqdm import tqdm_notebook #(Optional, used for progress-bars)
binance_api_key = '<KEY>' # Enter your own API-key here
binance_api_secret = '<KEY>' # Enter your own API-secret here
binsizes = {"1m": 1, "5m": 5, "1h": 60, "1d": 1440}
batch_size = 750
binance_client = Client(api_key=binance_api_key, api_secret=binance_api_secret)
### FUNCTIONS
def minutes_of_new_data(symbol, kline_size, data, source):
if len(data) > 0:
old = parser.parse(data["timestamp"].iloc[-1])
elif source == "binance":
old = datetime.strptime('1 Jan 2017', '%d %b %Y')
if source == "binance": new = pd.to_datetime(binance_client.get_klines(symbol=symbol, interval=kline_size)[-1][0],
unit='ms')
return old, new
def get_all_binance(symbol, kline_size, save=False):
filename = '%s-%s-data.csv' % (symbol, kline_size)
if os.path.isfile(filename):
data_df = pd.read_csv(filename)
else:
data_df = pd.DataFrame()
oldest_point, newest_point = minutes_of_new_data(symbol, kline_size, data_df, source="binance")
delta_min = (newest_point - oldest_point).total_seconds() / 60
available_data = math.ceil(delta_min / binsizes[kline_size])
if oldest_point == datetime.strptime('1 Jan 2017', '%d %b %Y'):
print('Downloading all available %s data for %s. Be patient..!' % (kline_size, symbol))
else:
print('Downloading %d minutes of new data available for %s, i.e. %d instances of %s data.' % (
delta_min, symbol, available_data, kline_size))
klines = binance_client.get_historical_klines(symbol, kline_size, oldest_point.strftime("%d %b %Y %H:%M:%S"),
newest_point.strftime("%d %b %Y %H:%M:%S"))
data = pd.DataFrame(klines,
columns=['timestamp', 'open', 'high', 'low', 'close', 'volume', 'close_time', 'quote_av',
'trades', 'tb_base_av', 'tb_quote_av', 'ignore'])
data['timestamp'] = pd.to_datetime(data['timestamp'], unit='ms')
if len(data_df) > 0:
temp_df = pd.DataFrame(data)
data_df = data_df.append(temp_df)
else:
data_df = data
data_df.set_index('timestamp', inplace=True)
if save: data_df.to_csv(filename)
print('All caught up..!')
return data_df
if __name__ == "__main__":
# binance_client = Client(binance_api_key, binance_api_secret)
# data = binance_client.get_klines(symbol='ETHUSDT', interval= '1h')
# print(data)
get_all_binance("BTCUSDT", "1m", save=True)
| StarcoderdataPython |
187553 | <gh_stars>0
import day06 as day
INPUTFOLDER = day.get_path()
def test_part_1():
result = day.run_part_1(INPUTFOLDER+"/test1")
assert result == 11
def test_part_1_real():
result = day.run_part_1(INPUTFOLDER+"/input1")
assert result == 6551
def test_part_2():
result = day.run_part_2(INPUTFOLDER+"/test1")
assert result == 6
def test_part_2_real():
result = day.run_part_2(INPUTFOLDER+"/input1")
assert result == 3358
| StarcoderdataPython |
49153 | def ans(n):
global fib
for i in range(1,99):
if fib[i]==n: return n
if fib[i+1]>n: return ans(n-fib[i])
fib=[1]*100
for i in range(2,100):
fib[i]=fib[i-1]+fib[i-2]
n=int(input())
print(ans(n)) | StarcoderdataPython |
3296191 | # Copyright 2019-2021 ETH Zurich and the DaCe authors. All rights reserved.
from .ast_node import AST_Node
from .ast_values import AST_Ident
import dace
class AST_Assign(AST_Node):
def __init__(self, context, lhs, rhs, op):
# for a normal assignment op is "=", but there is also
# in place modification, i.e., "+="
AST_Node.__init__(self, context)
self.lhs = lhs
self.rhs = rhs
self.op = op
self.children = [self.lhs, self.rhs]
def get_children(self):
retval = [self.lhs, self.rhs]
return retval
def replace_child(self, old, new):
if old == self.lhs:
self.lhs = new
if old == self.rhs:
self.rhs = new
def defined_variables(self):
# check if this adds something to the scope, if yes add it.
# assume A is undefined before this node, then:
# A = expr defines A, A(5) = expr defines A, but
# A += expr or A(5) += expr is illegal.
if self.op == "=":
if isinstance(self.lhs, AST_Ident):
return [self.lhs.get_name()]
else:
return []
def provide_parents(self, parent):
self.parent = parent
self.lhs.provide_parents(self)
self.rhs.provide_parents(self)
def __repr__(self):
return "AST_Assign(" + str(self.lhs) + ", " + str(self.op) + ", " + str(self.rhs) + ")"
def print_nodes(self, state):
for n in state.nodes():
print(str(n))
print("---")
def generate_code(self, sdfg, state):
from .ast_arrayaccess import AST_ArrayAccess
from .ast_values import AST_Constant
from .ast_loop import AST_ForLoop
self.rhs.generate_code(sdfg, state)
s = sdfg.nodes()[state]
if self.op == "=":
# We assign to an entire array
if isinstance(self.lhs, AST_Ident):
dims = self.rhs.get_dims()
basetype = self.rhs.get_basetype()
name = self.lhs.get_name()
if name not in sdfg.arrays:
sdfg.add_array(name, dims, basetype, debuginfo=self.context)
rhs_datanode = self.rhs.get_datanode(sdfg, state)
lhs_datanode = self.lhs.get_datanode(sdfg, state)
s.add_edge(rhs_datanode, None, lhs_datanode, None,
dace.memlet.Memlet.from_array(lhs_datanode.data, lhs_datanode.desc(sdfg)))
# We assign only to a part of an (existing) array, in order to not
# create cycles we need to add a new data-node, the add_array()
# interface will make sure it is connected to the same memory than
# the existing array node.
elif isinstance(self.lhs, AST_ArrayAccess):
# get the definition of the array we are assigning to
lhs_data = self.lhs.arrayname.get_datanode(sdfg, state)
vardef = self.search_vardef_in_scope(self.lhs.arrayname.get_name())
if vardef is None:
raise ValueError("No definition found for " + self.lhs.arrayname.get_name() + " searching from " +
str(self))
dims = vardef.get_dims()
basetype = vardef.get_basetype()
if self.lhs.arrayname.get_name() not in sdfg.arrays:
sdfg.add_array(self.lhs.arrayname.get_name(), dims, basetype, debuginfo=self.context)
dn = sdfg.nodes()[state].add_access(self.lhs.arrayname.get_name())
# check if the write is "out of bounds": this _is_ allowed in
# matlab, but not in SDFGs, since it would require to
# dynamically reallocate the array
# create a memlet which connects the rhs of the assignment to dn
rhs_datanode = self.rhs.get_datanode(sdfg, state)
if self.lhs.is_data_dependent_access() == False:
msubset = self.lhs.make_range_from_accdims()
writem = dace.memlet.Memlet.simple(self.lhs.arrayname.get_name(), msubset, debuginfo=self.context)
sdfg.nodes()[state].add_edge(rhs_datanode, None, dn, None, writem)
else:
s = sdfg.nodes()[state]
acc_data_nodes = set()
acc_dims = []
for a in self.lhs.accdims:
if isinstance(a, AST_Constant):
acc_dims.append(a.get_value())
elif isinstance(a, AST_Ident):
vardef = self.search_vardef_in_scope(a.get_name())
if vardef is None:
raise ValueError('No definition found for ' + str(acc.get_name()))
elif isinstance(vardef, AST_ForLoop):
acc_data_nodes.add(vardef.var)
acc_dims.append(vardef.var.get_name())
else:
raise ValueError(str(type(a)) + " in data dependent write not allowed.")
mapdict = {}
for a in acc_dims:
mapdict[a] = str(a)
men, mex = s.add_map('datedepwrite', mapdict)
men.add_in_connector('IN_1') # the data to write goes here
men.add_out_connector('OUT_1') # and comes out here
for d in acc_data_nodes:
dname = d.get_name_in_sdfg(sdfg)
men.add_in_connector(dname)
datanode = d.get_datanode(sdfg, state)
s.add_edge(datanode, None, men, dname,
dace.memlet.Memlet.from_array(datanode.data, datanode.desc(sdfg)))
s.add_edge(rhs_datanode, None, men, 'IN_1',
dace.memlet.Memlet.from_array(rhs_datanode.data, rhs_datanode.desc(sdfg)))
s.add_edge(
men, 'OUT_1', dn, None,
dace.memlet.Memlet.simple(self.lhs.arrayname.get_name(), ','.join([str(d) for d in acc_dims])))
s.add_edge(dn, None, mex, None, dace.memlet.Memlet())
else:
raise NotImplementedError("Assignment with lhs of type " + str(type(self.lhs)) +
" has not been implemented yet.")
else:
raise NotImplementedError("Assignment operator " + self.op + " has not been implemented yet.")
__str__ = __repr__
| StarcoderdataPython |
1777268 | <reponame>kirim-Lee/nomadgram_mac<filename>nomadgram/images/urls.py
# from django.urls import path
from django.conf.urls import url
from . import views
app_name="images"
urlpatterns = [
#path("all/",view=views.ListAllImages.as_view(),name="all_images")
url(
regex=r"^all/$",
view=views.ListAllImages.as_view(),
name="all_images"
)
]
| StarcoderdataPython |
1603653 | <gh_stars>1-10
from pandas import read_csv
import cv2
import glob
import os
import numpy as np
import logging
import coloredlogs
import tensorflow as tf
logger = logging.getLogger(__name__)
coloredlogs.install(level='DEBUG')
coloredlogs.install(level='DEBUG', logger=logger)
IM_EXTENSIONS = ['png', 'jpg', 'jpeg', 'bmp']
def read_img(img_path, img_shape=(128, 128)):
"""
load image file and divide by 255.
"""
img = cv2.imread(img_path)
img = cv2.resize(img, img_shape)
img = img.astype('float')
img /= 255.
return img
def append_zero(arr):
return np.append([0], arr)
def dataloader(dataset_dir, label_path, batch_size=1000, img_shape=(128, 128)):
"""
data loader
return image, [class_label, class_and_location_label]
"""
label_df = read_csv(label_path)
label_idx = label_df.set_index('filename')
img_files = label_idx.index.unique().values
numofData = len(img_files) # endwiths(png,jpg ...)
data_idx = np.arange(numofData)
while True:
batch_idx = np.random.choice(data_idx, size=batch_size, replace=False)
batch_img = []
batch_label = []
batch_class = []
for i in batch_idx:
img = read_img(dataset_dir + img_files[i], img_shape=img_shape)
label = label_idx.loc[img_files[i]].values
label = np.array(label, ndmin=2)
label = label[:, :4]
cls_loc_label = np.apply_along_axis(append_zero, 1, label)
batch_img.append(img)
batch_label.append(cls_loc_label) # face + bb
# print(cls_loc_label[:, 0:1].shape)
batch_class.append(cls_loc_label[:, 0:1]) # label[:, 0:1]) ---> face
# yield {'input_1': np.array(batch_img, dtype=np.float32)}, {'clf_output': np.array(batch_class, dtype=np.float32),'bb_output': np.array(batch_label, dtype=np.float32)}
yield np.array(batch_img, dtype=np.float32), [np.array(batch_class, dtype=np.float32), np.array(batch_label, dtype=np.float32)]
if __name__ == "__main__":
pass
| StarcoderdataPython |
129838 | #!/use/bin/env python3
#-*- coding:utf-8 -*-
# child.py
# A sample child process for receiving messages over a channel
import sys,os
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
import channel
ch = channel.Channel(sys.stdout, sys.stdin)
while True:
try:
item = ch.recv()
ch.send(("child",item))
except EOFError as e:
break | StarcoderdataPython |
154070 | # httpServerLogParser.py
#
# Copyright (c) 2016, <NAME>
#
"""
Parser for HTTP server log output, of the form:
192.168.127.12 - - [20/Jan/2003:08:55:36 -0800]
"GET /path/to/page.html HTTP/1.0" 200 4649 "http://www.somedomain.com/020602/page.html"
"Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1)"
127.0.0.1 - <EMAIL> [12/Sep/2006:14:13:53 +0300]
"GET /skins/monobook/external.png HTTP/1.0" 304 - "http://wiki.mysite.com/skins/monobook/main.css"
"Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.0.6) Gecko/20060728 Firefox/1.5.0.6"
You can then break it up as follows:
IP ADDRESS - -
Server Date / Time [SPACE]
"GET /path/to/page
HTTP/Type Request"
Success Code
Bytes Sent To Client
Referer
Client Software
"""
import string
from mo_parsing import *
from mo_parsing.helpers import delimited_list, dblQuotedString, remove_quotes
from mo_parsing.utils import nums, alphas
def getCmdFields(t, l, s):
t["method"], t["requestURI"], t["protocolVersion"] = t[0].strip('"').split()
logLineBNF = None
def getLogLineBNF():
global logLineBNF
if logLineBNF is None:
integer = Word(nums)
ipAddress = delimited_list(integer, ".", combine=True)
timeZoneOffset = Word("+-", nums)
month = Word(string.ascii_uppercase, string.ascii_lowercase, exact=3)
serverDateTime = Group(
Suppress("[")
+ Combine(
integer
+ "/"
+ month
+ "/"
+ integer
+ ":"
+ integer
+ ":"
+ integer
+ ":"
+ integer
)
+ timeZoneOffset
+ Suppress("]")
)
logLineBNF = (
ipAddress.set_token_name("ipAddr")
+ Suppress("-")
+ ("-" | Word(alphas + nums + "@._")).set_token_name("auth")
+ serverDateTime.set_token_name("timestamp")
+ dblQuotedString.set_token_name("cmd").add_parse_action(getCmdFields)
+ (integer | "-").set_token_name("statusCode")
+ (integer | "-").set_token_name("numBytesSent")
+ dblQuotedString.set_token_name("referrer").add_parse_action(remove_quotes)
+ dblQuotedString.set_token_name("clientSfw").add_parse_action(remove_quotes)
)
return logLineBNF
testdata = """
192.168.127.12 - - [20/Jan/2003:08:55:36 -0800] "GET /path/to/page.html HTTP/1.0" 200 4649 "http://www.somedomain.com/020602/page.html" "Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1)"
192.168.3.11 - - [16/Feb/2004:04:09:49 -0800] "GET /ads/redirectads/336x280redirect.htm HTTP/1.1" 304 - "http://www.foobarp.org/theme_detail.php?type=vs&cat=0&mid=27512" "Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1)"
1172.16.31.10 - - [16/Feb/2004:10:35:12 -0800] "GET /ads/redirectads/468x60redirect.htm HTTP/1.1" 200 541 "http://11.11.111.11/adframe.php?n=ad1f311a&what=zone:56" "Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1) Opera 7.20 [ru\"]"
127.0.0.1 - <EMAIL> [12/Sep/2006:14:13:53 +0300] "GET /skins/monobook/external.png HTTP/1.0" 304 - "http://wiki.mysite.com/skins/monobook/main.css" "Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.0.6) Gecko/20060728 Firefox/1.5.0.6"
"""
for line in testdata.split("\n"):
if not line:
continue
fields = getLogLineBNF().parse_string(line)
# ~ print repr(fields)
# ~ for k in fields.keys():
# ~ print "fields." + k + " =", fields[k]
| StarcoderdataPython |
1729794 | '''
@summary: Bootstrapper file to run gui_server, and UI associated with it.
Ensures all routing for pages, error routing, and user sessions are handled.
@author devopsec and mackhendricks
'''
from flask import Flask, render_template, request, jsonify, make_response, url_for, redirect, abort, Markup
import requests #, json
from api.sql.models import *
from datetime import datetime
app = Flask('gui_server')
#app.config['DEBUG'] = False
# error handling #
@app.errorhandler(400)
def bad_request(error):
return make_response(jsonify({'error': 'Bad request'}), 400)
@app.errorhandler(404)
def not_found(error):
return make_response(jsonify({'error': 'Not found'}), 404)
@app.errorhandler(400)
@app.route('/registration', methods=['POST', 'GET'])
def register_user():
if request.method == 'POST':
try:
username = request.json.get('username')
password = request.json.get('password')
if username is None or password is None:
abort(400) # missing arguments
if user_data.query.filter_by(username=username).first() is not None:
abort(400) # existing user
user_id = user_data.query().count + 1
firstname = request.json.get('firstname')
lastname = request.json.get('lastname')
email = request.json.get('email')
company_id = request.json.get('company_id')
status = request.json.get('status')
phone_number = request.json.get('phone_number')
USER = user_data(user_id=user_id, username=username, firstname=firstname, lastname=lastname, password=password,
email=email, company_id=company_id, status=status, phone_number=phone_number, lastlogin = datetime.datetime.now().tostring(),
account_type='1', notification=None)
user_data.hash_password(password)
db.session.add(USER)
db.session.commit()
return (jsonify({'username': USER.username}), 201, {'Location': url_for('get_user', id=USER.id, _external=True)})
except Exception as e:
return {
'status': 400,
'message':'User creation unsuccessful'
}
else:
return render_template('user_registration.html')
# routes #
@app.route('/')
def index():
return render_template('index.html')
@app.route('/login', methods=['GET', 'POST'])
def login():
error = None
if request.method == 'POST':
username = request.form['username']
password = request.form['password']
url = 'http://0.0.0.0:7777/api/auth/' + username + "/" + password
try:
response = requests.get(url)
except requests.exceptions.RequestException as e:
error = "Make sure the API Server is started and then try to login again"
return render_template('login.html',error=error);
jData = response.json()
if jData['authentication'] == True:
return render_template('home.html',username=username)
else:
error = "Username or Password was not correct"
return render_template('login.html',error=error)
@app.route('/threats', methods=['GET'])
def threats():
company = "Flyball-Labs"
# Grab the sites for the company
url = 'http://10.10.10.97:7777/api/company/' + company + "/sites"
params = request.args.items()
site = request.args.get('site')
apiServer = 'http://10.10.10.97:7777'
if site != None:
threatsBySiteURI = '/api/metron/threats/' + site
assetURI = '/api/assets/' + site
response = requests.get(url)
jData = response.json()
sites = jData['sites']
site = request.args.get('site')
if request.method == 'GET' and site != None:
return render_template('threatsbysite.html',sites=sites,selectedSite=site,apiServer=apiServer,threatsBySiteURI=threatsBySiteURI,assetURI=assetURI)
return render_template('threatsbysite.html',sites=sites)
@app.route('/datamanagement', methods=['GET'])
def show_UI():
error = None
return render_template('data_management.html',error=error)
#### TODO - refactoring of code and adding new tables objects #####
@app.route('/datamanagement/tables', methods=['GET', 'POST', 'PATCH', 'DELETE'])
def tables_redirect():
route = request.args.get('route')
print(route)
#url = 'http://0.0.0.0:7777/api/user/' + username
#'/api/agent', '/api/agent/<string:_mac_address_>'
#'/api/picontroller', '/api/picontroller/<string:_mac_address_>'
#'/api/auth/<string:_username>/<string:_password>'
#'/api/user', '/api/user/<string:_username_>'
#'/api/company/<string:_company_name_>'
#'/api/company', '/api/company/sites', '/api/company/<string:_company_name_>/sites'
#'/api/notification', '/api/notification/<string:_username_>'
'''if site != None:
if request.method == 'POST':
if route == 'notifications':
return render_template('tables_ui.html',sites=sites,selectedSite=site,apiServer=apiServer,threatsBySiteURI=threatsBySiteURI,assetURI=assetURI)
return render_template('tables_ui.html',sites=sites)
if request.method == 'POST':
if route == 'users':
return render_template('tables_ui.html',sites=sites,selectedSite=site,apiServer=apiServer,threatsBySiteURI=threatsBySiteURI,assetURI=assetURI)
return render_template('tables_ui.html',sites=sites)
if requet.method == "POST":
if route == 'companies':
return render_template('tables_ui.html',sites=sites,selectedSite=site,apiServer=apiServer,threatsBySiteURI=threatsBySiteURI,assetURI=assetURI)
return render_template('tables_ui.html',sites=sites)
if request.method == "POST":
return render_template('tables_ui.html',sites=sites,selectedSite=site,apiServer=apiServer,threatsBySiteURI=threatsBySiteURI,assetURI=assetURI)
return render_template('tables_ui.html',sites=sites)
else:
return render_template(('tables_ui.html'))
else:'''
try:#### TODO getting routing issues only when passing table data ###
### load db table into html table as an example ###
if route == "notifications":
table = notification_table(notification_data.query.all())
return render_template('tables_ui.html',table=table)
elif route == "users":
curr_session = db.session
data = user_data.query.all()
print(data)
table = user_table(data)
print(table.__html__())
render_template('tables_ui.html',table=table)
elif route == "companies":
data = company_data.query.all()
print(data)
table = company_table(data)
print(table.__html__())
render_template('tables_ui.html',table=Markup(table.__html__()))
elif route == "assets":
table = asset_table(asset_data.query.all())
render_template('tables_ui.html',table=table)
elif route == "databases":
render_template('tables_ui.html',table=table)
else:
return {'error': 'Route not available or does not exist'}, 404
except Exception as e:
return {"An Error Occurred" : e.traceback()}, 404
'''
if request.method == 'GET':
if request.method == 'POST':
if request.method == 'PATCH':
if request.method == 'DELETE':
'''
'''
username = request.form['username']
password = request.form['password']
url = 'http://0.0.0.0:7777/api/auth/' + username + "/" + password
try:
response = requests.get(url)
except requests.exceptions.RequestException as e:
error = "Make sure the API Server is started and then try to login again"
return render_template('login.html',error=error);
jData = response.json()
if jData['authentication'] == True:
return render_template('home.html',username=username)
else:
error = "Username or Password was not correct"
return render_template('tables_ui.html',error=error)
company = "Flyball-Labs"
# Grab the sites for the company
url = 'http://0.0.0.0:7777/api/company/' + company + "/sites"
params = request.args.items()
site = request.args.get('site')
apiServer = 'http://0.0.0.0:7777'
if site != None:
threatsBySiteURI = '/api/metron/threats/' + site
assetURI = '/api/assets/' + site
response = requests.get(url)
jData = response.json()
sites = jData['sites']
site = request.args.get('site')
if request.method == 'GET' and site != None:
return render_template('threatsbysite.html',sites=sites,selectedSite=site,apiServer=apiServer,threatsBySiteURI=threatsBySiteURI,assetURI=assetURI)
return render_template('threatsbysite.html',sites=sites)
'''
if __name__=='__main__':
app.run(host='0.0.0.0', port=8888, debug=True)
| StarcoderdataPython |
1635928 | mystery_string = "my cat your cat"
#You may modify the lines of code above, but don't move them!
#When you Submit your code, we'll change these lines to
#assign different values to the variables.
#Add some code below that will count and print how many
#times the character sequence "cat" appears in mystery_string.
#For example, for the string above, it would print 2.
#
#This one is tricky! Think carefully about for-each loops,
#conditionals, and booleans. How can you track what character
#you're currently looking for? We expect you'll use a loop
#and a single big conditional, but there are other approaches
#as well. Try to stick with the topics we've covered so far.
#Add your code here!
counter = 0
result = 0
for char in mystery_string:
if counter <= (len(mystery_string) - 3):
if char == "c" and mystery_string[counter+1] == "a" and mystery_string[counter+2]=="t":
result += 1
counter += 1
print(result)
| StarcoderdataPython |
4823756 | def isabn(obj):
"""isabn(string or int) -> True|False
Validate an ABN (Australian Business Number).
http://www.ato.gov.au/businesses/content.asp?doc=/content/13187.htm
Accepts an int or a string of exactly 11 digits and no leading zeroes.
Digits may be optionally separated with spaces. Any other input raises
TypeError or ValueError.
Return True if the argument is a valid ABN, otherwise False.
>>> isabn('53 004 085 616')
True
>>> isabn('93 004 085 616')
False
"""
if isinstance(obj, int):
if not 10**10 <= obj < 10**11:
raise ValueError('int out of range for an ABN')
obj = str(obj)
assert len(obj) == 11
if not isinstance(obj, str):
raise TypeError('expected a str or int but got %s' % type(obj))
obj = obj.replace(' ', '')
if len(obj) != 11:
raise ValueError('ABN must have exactly 11 digits')
if not obj.isdigit():
raise ValueError('non-digit found in ABN')
if obj.startswith('0'):
raise ValueError('leading zero not allowed in ABNs')
digits = [int(c) for c in obj]
digits[0] -= 1
weights = [10, 1, 3, 5, 7, 9, 11, 13, 15, 17, 19]
assert len(digits) == len(weights) == 11
chksum = sum(d*w for d,w in zip(digits, weights)) % 89
return chksum == 0
| StarcoderdataPython |
3224660 | from setuptools import setup
from os import sep
setup(name='hygnd',
version='0.1',
description='HYdrologic Gauge Network Datamanager',
url='',
author='<NAME>',
author_email='<EMAIL>',
license='MIT',
packages=['hygnd'],
entry_points = {
'console_scripts': [
#nutrient_mon_report/__main__.py
#'bin/hygnd-store = hygnd-store.py.__main__:main',
#TODO add script for plotting events
#TODO add script for updating store
]
},
zip_safe=False)
| StarcoderdataPython |
1767237 | <gh_stars>10-100
import torch
from huggingsound import SpeechRecognitionModel
device = "cuda" if torch.cuda.is_available() else "cpu"
batch_size = 1
model = SpeechRecognitionModel("jonatasgrosman/wav2vec2-large-xlsr-53-english", device=device)
references = [
{"path": "/path/to/sagan.mp3", "transcription": "extraordinary claims require extraordinary evidence"},
{"path": "/path/to/asimov.wav", "transcription": "violence is the last refuge of the incompetent"},
]
evaluation = model.evaluate(references, inference_batch_size=batch_size)
print(evaluation) | StarcoderdataPython |
1627854 | <filename>Calculadora-em-russo.py<gh_stars>0
lista_mult = [] #lista para tratamento do resultado
lista_div = [] #lista para tratamento do resultado
def dividindo_por_dois(n1):
while True:
lista_div.append(n1)
n1 = n1//2
if n1 == 1:
break
lista_div.append(1)
return lista_div
def multiplicando_por_2 (n2):
count = len(lista_div) #parametro de contagem para a multiplicação do multiplicador
while True:
lista_mult.append(n2)
n2 = n2*2
count = count - 1
if count == 0:
break
return lista_mult
def parametro(lista_div, lista_mult):
count = len(lista_div)-1
while True:
if (lista_div[count]%2) == 0:
lista_mult.pop(count)
count = count - 1
if count == -1:
break
return lista_mult
def resultado (lista_mult):
soma_das_notas = sum(lista_mult)
return (soma_das_notas)
############################################################################################
import time
n1 = int(input("Digite o primeiro numero: "))
n2 = int(input("Digite o segundo numero: "))
operacao = input("Digite '+' para soma e '*' para multiplicação")
if(operacao == '*'):
t1 = time.time()
lista_dividida = dividindo_por_dois(n1)
lista_multiplicada = multiplicando_por_2(n2)
lista_multiplicada_impares = parametro(lista_dividida, lista_multiplicada)
resultado = resultado(lista_multiplicada_impares)
print(resultado)
tempoExec = time.time() - t1
else:
print(n1+n2) | StarcoderdataPython |
3302414 | <reponame>urbas/py-air-control-exporter<filename>test/test_app.py
from unittest import mock
from py_air_control_exporter import app
@mock.patch("py_air_control_exporter.metrics.PyAirControlCollector")
def test_create_app(mock_collector):
"""
check that we can create an exporter with explicit host and protocol parameters
"""
app.create_app(host="1.2.3.4", protocol="foobar")
mock_collector.assert_called_once_with(host="1.2.3.4", protocol="foobar")
| StarcoderdataPython |
4803110 | <filename>main.py
import sqlite3
import time
import random
conn = sqlite3.connect('main.db')
c = conn.cursor()
def table():
c.execute("CREATE TABLE IF NOT EXISTS username(username VARCHAR, password VARCHAR)")
table()
def login():
for i in range(3):
username = input("pls enter your username: ")
password = input("pls enter pass: ")
with sqlite3.connect('main.db') as db:
cursour = db.cursor()
find_user = ('SELECT * FROM login WHERE username = ? AND password = ?')
cursour.execute(find_user,[(username), (password)])
results = cursour.fetchall()
if results:
for bla in results:
malli = str(bla)
print("Welcome ")
break
else:
print("Username and passwored not recognised")
again = input("Do u want to try again?(y/n): ")
if again.lower() == "n":
print("Good Bye")
time.sleep(1)
break
login()
def main():
print("this are the categorys we have")
print('''
a.new acount
b.veiw account
c.add money
d.sending money
''')
a = input("wich category do you want: ")
def logic_statment():
if a == 'a':
print("you are creating a new account")
c.execute("CREATE TABLE IF NOT EXISTS names(username,passwored)")
while True:
username = input("enter your name: ")
if not username.isalpha():
continue
else:
try:
passwored = int(input("enter passwored: "))
bankAccountNumber = random.randint(1000,100000)
print(bankAccountNumber)
print("THIS IS YOUR BANK ACCOUNT NUBER")
except ValueError:
print("sorry i did'nt understand that")
c.execute("INSERT INTO names(username,passwored) VALUES(?,?)",(username,passwored))
conn.commit()
print("SUCCESSFULLY ADDED")
break
c.execute("SELECT * FROM names")
for b in c.fetchall():
print(b)
elif a == 'b':
c.execute("CREATE TABLE IF NOT EXISTS names(username,passwored)")
logic_statment()
main() | StarcoderdataPython |
118833 | from django.db import transaction
from django.db.models import Q
from analysis.models import TagNode, Analysis, Tag
from analysis.tasks.variant_tag_tasks import analysis_tag_created_task, analysis_tag_deleted_task
def _analysis_tag_nodes_set_dirty(analysis: Analysis, tag: Tag):
""" Needs to be sync so version is bumped by the time client calls node_versions to see whats dirty """
tag_filter = Q(tagnodetag__tag__isnull=True) | Q(tagnodetag__tag=tag)
for node in TagNode.objects.filter(analysis=analysis).filter(tag_filter).distinct():
node.queryset_dirty = True
node.save()
def variant_tag_create(sender, instance, created=False, **kwargs):
if created:
if instance.analysis:
_analysis_tag_nodes_set_dirty(instance.analysis, instance.tag)
# want to be as quick as we can so do analysis reload + liftover async
# Need to launch this at end of transaction so we know VariantTag is in DB for celery job
celery_task = analysis_tag_created_task.si(instance.pk)
transaction.on_commit(lambda: celery_task.apply_async())
def variant_tag_delete(sender, instance, **kwargs):
if instance.analysis:
_analysis_tag_nodes_set_dirty(instance.analysis, instance.tag)
# want to be as quick as we can so do analysis reload + liftover async
celery_task = analysis_tag_deleted_task.si(instance.analysis_id, instance.tag_id)
transaction.on_commit(lambda: celery_task.apply_async())
| StarcoderdataPython |
1774719 | <reponame>Zelenyy/phd-code<gh_stars>0
from multiprocessing import Pool
import os
import pickle
from phd.thunderstorm.electric_field import generate_potential
def save_potential(filename):
res = generate_potential()
with open(filename ,'wb') as fout:
pickle.dump(res, fout)
return filename
def main():
namelist = ['../../data/thunderstorm/random_field/potentialINR{}.obj'.format(str(i).rjust(3,'0')) for i in range(1,5)]
with Pool(os.cpu_count()) as p:
for name in p.imap_unordered(save_potential, namelist):
print(name)
if __name__ == '__main__':
main() | StarcoderdataPython |
1737014 | from .site import Site
import os
import socket
from ..minirunner import Node
class CCParallel(Site):
"""Object representing execution in the local environment, e.g. a laptop."""
def command(self, cmd, sec):
"""Generate a complete command line to be run with the specified execution variables.
This builds up the command from the core and adds any docker commands, env var settings,
or mpirun calls.
Parameters
----------
cmd: str
The core command to execute.
sec: StageExecutionConfig
sec objects contain info on numbers of processes, threads, etc, and container choices
Returns
-------
full_cmd: str
The complete decorated command to be executed.
"""
mpi1 = f"{self.mpi_command} {sec.nprocess} "
mpi2 = f"--mpi" if sec.nprocess > 1 else ""
volume_flag = f"--bind {sec.volume} " if sec.volume else ""
paths = self.config.get("python_paths", [])
# TODO: allow other container types here, like singularity
if sec.image:
# If we are setting python paths then we have to modify the executable
# here. This is because we need the path to be available right from the
# start, in case the stage is defined in a module added by these paths.
# The --env flags in docker/shifter overwrites an env var, and there
# doesn't seem to be a way to just append to one, so we have to be a bit
# roundabout to make this work, and invoke bash -c instead.
bash_start = "bash -c ' cd /opt/TXPipe && "
bash_end = "'"
paths_start = "PYTHONPATH=$PYTHONPATH:" + (":".join(paths)) if paths else ""
return (
f"{mpi1} "
f"singularity run "
f"--env OMP_NUM_THREADS={sec.threads_per_process} "
f"{volume_flag} "
f"{sec.image} "
f"{bash_start} "
f"{paths_start} "
f"{cmd} {mpi2} "
f"{bash_end}"
)
else:
# In the non-container case this is much easier
paths_env = (
"PYTHONPATH=" + (":".join(paths)) + ":$PYTHONPATH" if paths else ""
)
return (
f"OMP_NUM_THREADS={sec.threads_per_process} "
f"{paths_env} "
f"{mpi1} "
f"{cmd} {mpi2}"
)
def configure_for_parsl(self):
raise ValueError("Parsl not configured for CC IN2P3 in ceci yet")
def configure_for_mini(self):
import psutil
total_cores = int(os.environ['NSLOTS'])
cores_per_node = 16 # seems to be the case
nodes = total_cores // cores_per_node
last_node_codes = total_cores % cores_per_node
nodes = [Node(f"Node_{i}", cores_per_node) for i in range(nodes)]
if last_node_codes:
i = len(nodes)
nodes.append(Node(f"Node_{i}", last_node_codes))
self.info["nodes"] = nodes
def configure_for_cwl(self):
pass
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.