id
stringlengths
1
7
text
stringlengths
6
1.03M
dataset_id
stringclasses
1 value
1647643
import hypothesis.strategies as st import torch from hypothesis import assume from hypothesis import given from myrtlespeech.builders.fully_connected import build from myrtlespeech.model.fully_connected import FullyConnected from myrtlespeech.protos import fully_connected_pb2 from tests.builders.test_activation import activation_match_cfg from tests.protos.test_fully_connected import fully_connecteds # Utilities ------------------------------------------------------------------- def fully_connected_module_match_cfg( fully_connected: FullyConnected, fully_connected_cfg: fully_connected_pb2.FullyConnected, input_features: int, output_features: int, ) -> None: """Ensures ``FullyConnected`` module matches protobuf configuration.""" fully_connected = fully_connected.fully_connected # get torch module # if no hidden layers then test that the module is Linear with corret # sizes, ignore activation if fully_connected_cfg.num_hidden_layers == 0: assert isinstance(fully_connected, torch.nn.Linear) assert fully_connected.in_features == input_features assert fully_connected.out_features == output_features assert not fully_connected.HasField("dropout") return # otherwise it will be a Sequential of layers assert isinstance(fully_connected, torch.nn.Sequential) # expected configuration of each layer in Sequential depends on whether # both/either of {activation, dropout} are present. act_fn_is_none = fully_connected_cfg.activation.HasField("identity") dropout_is_none = not fully_connected_cfg.HasField("dropout") dropout_is_none = dropout_is_none or fully_connected_cfg.dropout.value == 0 if act_fn_is_none: expected_len = fully_connected_cfg.num_hidden_layers + 1 else: expected_len = 2 * fully_connected_cfg.num_hidden_layers + 1 if not dropout_is_none: expected_len += fully_connected_cfg.num_hidden_layers assert len(fully_connected) == expected_len # Now check that the linear/activation_fn/dropout layers appear in the # expected order. We set the ``module_idx`` and then check for the # following condition: # if module_idx % total_types == <module_type>_idx: # assert isinstance(module, <module_type>) linear_idx = 0 # in all cases activation_idx = -1 # infeasible value as default dropout_idx = -1 if act_fn_is_none and dropout_is_none: total_types = 1 # (linear layers only) elif not act_fn_is_none and dropout_is_none: total_types = 2 # (linear and activation) activation_idx = 1 elif act_fn_is_none and not dropout_is_none: total_types = 2 dropout_idx = 1 elif not act_fn_is_none and not dropout_is_none: total_types = 3 activation_idx = 1 dropout_idx = 2 for module_idx, module in enumerate(fully_connected): if module_idx % total_types == linear_idx: assert isinstance(module, torch.nn.Linear) assert module.in_features == input_features if module_idx == len(fully_connected) - 1: assert module.out_features == output_features else: assert module.out_features == fully_connected_cfg.hidden_size input_features = fully_connected_cfg.hidden_size elif module_idx % total_types == activation_idx: activation_match_cfg(module, fully_connected_cfg.activation) elif module_idx % total_types == dropout_idx: assert isinstance(module, torch.nn.Dropout) assert abs(module.p - fully_connected_cfg.dropout.value) < 1e-8 else: raise ValueError( "Check module_idx and total_types assignment. It " "**should not** be possible to hit this branch!" ) # Tests ----------------------------------------------------------------------- @given( fully_connected_cfg=fully_connecteds(), input_features=st.integers(min_value=1, max_value=32), output_features=st.integers(min_value=1, max_value=32), ) def test_build_fully_connected_returns_correct_module_structure( fully_connected_cfg: fully_connected_pb2.FullyConnected, input_features: int, output_features: int, ) -> None: """Ensures Module returned has correct structure.""" if fully_connected_cfg.num_hidden_layers == 0: assume(fully_connected_cfg.hidden_size is None) assume(fully_connected_cfg.activation is None) actual = build(fully_connected_cfg, input_features, output_features) fully_connected_module_match_cfg( actual, fully_connected_cfg, input_features, output_features )
StarcoderdataPython
1797310
<reponame>rkisdp/rkisdp.django.backend<filename>contact/apis/social_links.py<gh_stars>0 # -*- coding: utf-8 -*- # python imports from __future__ import unicode_literals # lib imports from rest_framework.generics import ListAPIView # project imports from contact.models import SocialLink from contact.serializers.social_link import SocialLinkSerializer class ListSocialLinkView(ListAPIView): model = SocialLink serializer_class = SocialLinkSerializer def get_queryset(self): return self.model.objects.filter_is_delete().order_by('create_date')
StarcoderdataPython
1748958
from django.apps import AppConfig class TwitterCloneAppConfig(AppConfig): name = 'twitter_clone_app'
StarcoderdataPython
1732716
<reponame>ankile/budbua-classifieds # Generated by Django 2.1.7 on 2019-02-26 12:20 from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('auctions', '0012_auto_20190226_1146'), ('auctions', '0012_auto_20190219_1012'), ('auctions', '0012_auto_20190222_1304'), ] operations = [ ]
StarcoderdataPython
60566
import sys import logging from face_client import face_client from face_client.api_proxy import FaceApiProxy from face_client.camera_controller import CameraController from face_client.image_displayer import ImageDisplayer def main(argv): client = face_client.FaceClient(CameraController(), ImageDisplayer(), FaceApiProxy()) client.start() def run_main(): # pylint: disable=invalid-name try: sys.exit(main(sys.argv)) except Exception as e: logging.exception('face client crashed...') sys.exit(1)
StarcoderdataPython
1665474
<filename>rrd/store.py<gh_stars>1-10 #-*- coding:utf-8 -*- import MySQLdb from rrd import config def connect_db(host, port, user, password, db): try: conn = MySQLdb.connect( host=host, port=port, user=user, passwd=password, db=db, use_unicode=True, charset="utf8") return conn except Exception, e: print "Fatal: connect db fail:%s" % e return None class DB(object): def __init__(self, host, port, user, password, db): self.host = host self.port = port self.user = user self.password = password self.db = db self._conn = connect_db(host, port, user, password, db) def connect(self): self._conn = connect_db(self.host, self.port, self.user, self.password, self.db) return self._conn def execute(self, *a, **kw): cursor = kw.pop('cursor', None) try: cursor = cursor or self._conn.cursor() cursor.execute(*a, **kw) except (AttributeError, MySQLdb.OperationalError): self._conn and self._conn.close() self.connect() cursor = self._conn.cursor() cursor.execute(*a, **kw) return cursor def commit(self): if self._conn: try: self._conn.commit() except MySQLdb.OperationalError: self._conn and self._conn.close() self.connect() self._conn and self._conn.commit() def rollback(self): if self._conn: try: self._conn.rollback() except MySQLdb.OperationalError: self._conn and self._conn.close() self.connect() self._conn and self._conn.rollback() dashboard_db_conn = DB( config.DASHBOARD_DB_HOST, config.DASHBOARD_DB_PORT, config.DASHBOARD_DB_USER, config.DASHBOARD_DB_PASSWD, config.DASHBOARD_DB_NAME) graph_db_conn = DB( config.GRAPH_DB_HOST, config.GRAPH_DB_PORT, config.GRAPH_DB_USER, config.GRAPH_DB_PASSWD, config.GRAPH_DB_NAME)
StarcoderdataPython
123874
<filename>closedSet/closed_set.py #!/usr/bin/env python2 # -*- coding: utf-8 -*- """ """ # reset all variable # like clear all in matlab from IPython import get_ipython get_ipython().magic('reset -sf') __author__ = '<NAME>' import numpy as np import os, glob from scipy import signal # detect the current working directory and print it old_path = os.getcwd() os.chdir('../libsvm-toolbox/python') from svmutil import * # libSVM os.chdir(old_path) #-------- global definition -------------------- # Load Data = np.load('../data/DataDic1.npy').item() CLASSES = Data.keys() # libsvm constants rbf =2 # parameters: param = svm_parameter("-s 0") param.probability = 1 param.kernel_type = RBF nr_fold= 5 cross_validation= True #------------------ # got them from (opt_parameters.py) # see (hyperparams_opt.txt) param.C = 150 param.gamma = .025 # -------------------------------------------- def splitdata(Data): trainingData = {} testData = {} for Clazz in CLASSES: cl_data=Data[Clazz] np.random.shuffle(cl_data) ################################# testSplit = int(.2 * len(cl_data)) print("class : %s has %d instances \n" % (Clazz, len(cl_data))) train = cl_data[testSplit:] test = cl_data[:testSplit] ###################### trainingData[Clazz] = train testData[Clazz] = test return (trainingData, testData) ## train def getModels(trainingData,param): models = {} for c in CLASSES: labels, data = getTrainingData(trainingData, c) prob = svm_problem(labels, data) m = svm_train(prob, param) models[c] = m return models def getTrainingData(trainingData, clazz): labeledData = getLabeledDataVector(trainingData, clazz, 1) negClasses = [c for c in CLASSES if not c == clazz] for c in negClasses: ld = getLabeledDataVector(trainingData, c, -1) labeledData += ld np.random.shuffle(labeledData) unzipped = [list(t) for t in zip(*labeledData)] labels, data = unzipped[0], unzipped[1] return (labels, data) def getLabeledDataVector(dataset, clazz, label): data = dataset[clazz] labels = [label] * len(data) output = zip(labels, data) # python2 #output = list(zip(labels, data)) # python3 return output # classify def classify(models, dataSet): results = {} for trueClazz in CLASSES: count = 0 correct = 0 pred_list=[] for item in dataSet[trueClazz]: predClazz, prob = framepredict(models, item) pred_list.append(predClazz) count += 1 if trueClazz == predClazz: correct += 1 results[trueClazz] = (count, correct,pred_list) return results def framepredict(models, item): maxProb = 0.0 bestClass = "" pb = np.array([0]) for clazz, model in models.iteritems(): output = svm_predict([0], [item], model, "-q -b 1") prob = output[2][0][0] pb = np.append(pb, prob) if prob > maxProb: maxProb = prob bestClass = clazz return (bestClass, maxProb) def plot_confusionMx(cm,title=None): if not title: title = 'confusion matrix' import matplotlib.pyplot as plt plt.figure(figsize=(6,6)) # size of figure plt.clf() plt.imshow(cm, interpolation='nearest', cmap=plt.cm.Wistia) plt.title(title) plt.ylabel('True label') plt.xlabel('Predicted label') tick_marks = np.arange(len(CLASSES)) plt.xticks(tick_marks, CLASSES, rotation=45) plt.yticks(tick_marks, CLASSES) for i in range(len(CLASSES)): for j in range(len(CLASSES)): plt.text(j,i, str(cm[i][j])) plt.show() ## EOF ######## main def main(): try: trainingData,testData=splitdata(Data) #------------------- models = getModels(trainingData,param) results= classify(models, testData) totalCount = 0 totalCorrect = 0 confmatrix=[] for clazz in CLASSES: count, correct,pred_list = results[clazz] totalCount += count totalCorrect += correct print ("%s : %d , %d , %f" % (clazz, correct, count, (float(correct) / count))) row=[] for clazz in CLASSES: row.append(pred_list.count(clazz)) confmatrix.append(row) print("----------------------------------------\n") print ("%s %d %d %f" % ("Overall", totalCorrect, totalCount, (float(totalCorrect) / totalCount))) plot_confusionMx(confmatrix,title='Confusion matrix') except Exception as e: print (e) return 5 if __name__ == "__main__": main() #
StarcoderdataPython
1712122
<reponame>raliouat/purview-serverless from azure.core.exceptions import AzureError, ClientAuthenticationError, ResourceNotFoundError, ResourceExistsError from utils.purview_client import get_purview_client import logging import json import azure.functions as func def get_all_scans_by_ds(client, ds_name:str): try: result = {} response = client.scans.list_by_data_source(ds_name) except (ClientAuthenticationError, ResourceNotFoundError, ResourceExistsError) as e: logging.warning(f"Error - Status code : {e.status_code} ") logging.warning(e.message) return func.HttpResponse( e.message, status_code=e.status_code ) except AzureError as e: logging.warning(f"Error") logging.warning(e) return func.HttpResponse( "Internal Server Error", status_code=500 ) else: list_items = [item for item in response] result["nb_scans"] = len(list_items) result["scans"] = list_items logging.info(response) return func.HttpResponse(body=json.dumps(result), mimetype="application/json", status_code=200) def get_scan_by_name(client, ds_name:str, scan_name:str): try: response = client.scans.get(ds_name, scan_name) logging.info(response) return func.HttpResponse(body=json.dumps(response), mimetype="application/json", status_code=200) except (ClientAuthenticationError, ResourceNotFoundError, ResourceExistsError) as e: logging.warning(f"Error - Status code : {e.status_code} ") logging.warning(e.message) return func.HttpResponse( e.message, status_code=e.status_code ) except AzureError as e: logging.warning("Error") logging.warning(e) return func.HttpResponse( "Internal Server Error", status_code=500 ) def main(req: func.HttpRequest) -> func.HttpResponse: logging.info('This HTTP function to get scans executed successfully.') ds_name = req.route_params.get('ds_name') scan_name = req.route_params.get('scan_name') try: client = get_purview_client() except AzureError as e: logging.warning("Error") logging.warning(e) return func.HttpResponse( "Internal Server Error", status_code=500 ) #Check if we want to list all the scans or check a specific scan if scan_name: return get_scan_by_name(client, ds_name, scan_name) else: return get_all_scans_by_ds(client, ds_name)
StarcoderdataPython
121393
import serial import MySQLdb device = '/dev/ttyACM0' #ser = serial.Serial('/dev/ttyACM1', 9600) arduino = serial.Serial(device, 9600) data = arduino.readline() print('Encoded Serial Databyte'+ data) temp = data.decode('UTF-8') print(temp) #Make DB connection dbConn = MySQLdb.connect("localhost", "root", "password", "<PASSWORD>") or die("Could not connect to the database") print(dbConn) #with dbConn: try: cursor = dbConn.cursor() cursor.execute("INSERT INTO tempLog (Temperature) VALUES (%s)" % (temp)) except (MySQLdb.Error) as e: print(e) dbConn.rollback() else: dbConn.commit() finally: cursor.close()
StarcoderdataPython
1688116
<gh_stars>1-10 import datetime from pathlib import Path from .context import browser_history from .utils import become_linux, become_windows, change_homedir # pylint: disable=unused-import # pylint: disable=redefined-outer-name,unused-argument def test_firefox_linux(become_linux, change_homedir): """Test history is correct on Firefox for Linux""" f = browser_history.browsers.Firefox() output = f.fetch() his = output.get() assert len(his) == 1 assert his == [(datetime.datetime(2020, 8, 3, 0, 29, 4, tzinfo=datetime.timezone(datetime.timedelta(seconds=19800), 'IST')), 'https://www.mozilla.org/en-US/privacy/firefox/')] profs = f.profiles() his_path = f.history_path_profile(profs[0]) assert his_path == Path.home() / '.mozilla/firefox/profile/places.sqlite' his = f.history_profiles(profs).get() assert len(his) == 1 assert his == [(datetime.datetime(2020, 8, 3, 0, 29, 4, tzinfo=datetime.timezone(datetime.timedelta(seconds=19800), 'IST')), 'https://www.mozilla.org/en-US/privacy/firefox/')] def test_edge_windows(become_windows, change_homedir): """Test history is correct for Edge on Windows""" e = browser_history.browsers.Edge() output = e.fetch() his = output.get() # test history from all profiles assert len(his) == 4 assert his == [(datetime.datetime(2020, 9, 23, 10, 22, 37, tzinfo=datetime.timezone(datetime.timedelta(seconds=19800), 'India Standard Time')), 'http://www.google.com/'), (datetime.datetime(2020, 9, 23, 10, 22, 37, tzinfo=datetime.timezone(datetime.timedelta(seconds=19800), 'India Standard Time')), 'https://www.google.com/?gws_rd=ssl'), (datetime.datetime(2020, 9, 23, 10, 22, 37, tzinfo=datetime.timezone(datetime.timedelta(seconds=19800), 'India Standard Time')), 'https://www.google.com/?gws_rd=ssl'), (datetime.datetime(2020, 9, 23, 10, 45, 3, tzinfo=datetime.timezone(datetime.timedelta(seconds=19800), 'India Standard Time')), 'https://pesos.github.io/')] # test history from specific profile profs = e.profiles() assert len(profs) == 2 his_path = e.history_path_profile("Profile 2") assert his_path == Path.home() / \ 'AppData/Local/Microsoft/Edge/User Data/Profile 2/History' his = e.history_profiles(["Profile 2"]).get() assert len(his) == 1 assert his == [(datetime.datetime(2020, 9, 23, 10, 45, 3, tzinfo=datetime.timezone(datetime.timedelta(seconds=19800), 'India Standard Time')), 'https://pesos.github.io/')]
StarcoderdataPython
74509
from __future__ import unicode_literals from __future__ import print_function from __future__ import division from __future__ import absolute_import # Standard imports from future import standard_library standard_library.install_aliases() from builtins import * import sys import datetime import uuid import logging import crontab import optparse import os # Our imports import emission.core.get_database as edb import emission.core.wrapper.trip_old as ecwt import emission.net.ext_service.gmaps as gmaps_lib import emission.net.ext_service.otp.otp as otp def obtain_alternatives(trip_id, user_id): db = edb.get_trip_db() trip = ecwt.E_Mission_Trip.trip_from_json(db.find_one({"trip_id": trip_id, "user_id": user_id})) logging.debug(trip.sections) start_coord = trip.trip_start_location.maps_coordinate() end_coord = trip.trip_end_location.maps_coordinate() logging.debug("Start: %s " % start_coord) logging.debug("End: %s " % end_coord) curr_time = datetime.datetime.now() curr_year = curr_time.year curr_month = curr_time.month curr_day = curr_time.day curr_hour = curr_time.hour curr_minute = curr_time.minute otp_modes = ['CAR', 'WALK', 'BICYCLE', 'TRANSIT'] for mode in otp_modes: try: otp_trip = otp.OTP(os.environ("OTP_SERVER")).route(start_coord, end_coord, mode, write_day(curr_month, curr_day, curr_year), write_time(curr_hour, curr_minute), False) otp_trip = otp_trip.turn_into_trip(None, user_id, trip_id) otp_trip.save_to_db() except otp.PathNotFoundException as e: #modes = ['driving', 'walking', 'bicycling', 'transit'] logging.debug("Got error %s from OTP, defaulting to Google Maps" % e) otp_to_google_mode = {"CAR":"driving", "WALK":"walking", "BICYCLE":"bicycling", "TRANSIT":"transit"} mode = otp_to_google_mode[mode] gmaps = gmaps_lib.googlemaps.GoogleMaps('<KEY>') try: result = gmaps.directions(origin=start_coord, destination=end_coord, mode=mode) gmaps_trip = gmaps_lib.common.google_maps_to_our_trip(result, None, user_id, trip_id, mode, curr_time) gmaps_trip.save_to_db() except gmaps_lib.googlemaps.GoogleMapsError as ge: logging.info("No alternatives found in either OTP or google maps, saving nothing") ''' #remove job from cronjob #TODO: make sure that you only remove the cronjob related to your current query, this will remove all cronjobs scheduled at the same time cron = crontab.CronTab() for job in cron: if job.month == curr_month and job.day == curr_day and job.hour == curr_hour and job.minute == curr_minute: cron.remove(job) print("Removed job!") pdb = edb.get_perturbed_trips_db() trip = pdb.find_one({"_id" : _id}) all_alts_finished = True for pert in find_perturbed_trips(trip): pert._id = pert._id.replace('.', '') if [pert._id] == None: all_alts_finished = False if all_alts_finished: trip.getpipelineFlags().finishAlternatives() ''' def write_day(month, day, year): return "%s-%s-%s" % (month, day, year) def write_time(hour, minute): return "%s:%s" % (hour, minute) def commandArgs(argv): parser = optparse.OptionParser(description = '') parser.add_option('--trip-id', dest = 'trip_id', help = 'Trip ID') parser.add_option('--user-id', dest = 'user_id', help = 'User ID') (options, args) = parser.parse_args(argv) if not options.trip_id: raise Exception("No Trip ID given") if not options.user_id: raise Exception("No User ID given") return (options.trip_id, uuid.UUID(options.user_id)) if __name__ == '__main__': (trip_id, user_id) = commandArgs(sys.argv) obtain_alternatives(trip_id, user_id)
StarcoderdataPython
16362
"""Additional GitHub specific tools. """
StarcoderdataPython
8436
<reponame>kubapi/hater # Generated by Django 3.2.3 on 2021-06-13 19:58 from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('feed', '0001_initial'), ] operations = [ migrations.RemoveField( model_name='player', name='finished_decks', ), ]
StarcoderdataPython
77087
from abc import ABCMeta, abstractmethod from collections import defaultdict from counter_backport import Counter from elfstatsd import utils, settings class Storage(): """Abstract class used as a parent for statistics storages""" __metaclass__ = ABCMeta def __init__(self, name): self.name = name # Basic storage structure - dict of dicts, with the first-level dict responsible for # storing data related to different access log files, and the second-level dict # storing key-value pairs we're interested in. self._storage = defaultdict(dict) def get(self, storage_key, record_key): """ Get value of a given counter_key associated with given storage_key. Create storage_key if missing. @param str storage_key: access log-related key to define statistics storage @param str record_key: record-related key @return value @raise KeyError if record_key is not found by storage_key """ return self._storage[storage_key][record_key] def set(self, storage_key, record_key, value): """ Set a value of specified key in storage determined by storage_key. Create storage_key if missing. @param str storage_key: access log-related key to define statistics storage @param str record_key: record-related key @param value: value to set """ self._storage[storage_key][record_key] = value @abstractmethod def reset(self, storage_key): """ Properly reset the storage and prepare it for the next round @param str storage_key: access log-related key to define statistics storage """ self._storage[storage_key] = {} @abstractmethod def dump(self, storage_key, parser): """ Dump storage data defined by the storage_key to RawConfigParser instance @param str storage_key: access log-related key to define statistics storage @param RawConfigParser parser: instance of ConfigParser to store the data """ section = self.name if not parser.has_section(section): parser.add_section(section) for record_key in sorted(self._storage[storage_key].keys()): value = self.get(storage_key, record_key) parser.set(section, str(record_key), utils.format_value_for_munin(value)) class CounterStorage(Storage): """Abstract class representing a storage for incrementing counters""" def __init__(self, name): super(CounterStorage, self).__init__(name) # Storage structure - dict of Counters, with the first-level dict responsible for # storing data related to different access log files, and the second-level dict # being a Counter storing key-value pairs with values being incrementing integers self._storage = defaultdict(Counter) def inc_counter(self, storage_key, record_key): """ Increment the counter for the given key in storage determined by storage key. Create storage_key if missing. @param str storage_key: access log-related key to define statistics storage @param str record_key: record-related key. If the value for this key is missing, it will be set to 1. """ self._storage[storage_key][record_key] += 1 @abstractmethod def reset(self, storage_key): """ Properly reset the storage and prepare it for the next round. Save all the keys, but reset the values. @param str storage_key: access log-related key to define statistics storage """ for record_key in self._storage[storage_key].keys(): self._storage[storage_key][record_key] = 0 class MetadataStorage(Storage): """Simple storage for metadata values, like daemon's version and starting time""" def __init__(self): super(MetadataStorage, self).__init__('metadata') def reset(self, storage_key): super(MetadataStorage, self).reset(storage_key) def dump(self, storage_key, parser): super(MetadataStorage, self).dump(storage_key, parser) def update_time(self, storage_key, time): """ Update time-related metrics (first and last record) with given timestamp @param str storage_key: access log-related key to define statistics storage @param str time: string representation of record time """ if not 'first_record' in self._storage[storage_key] or \ not self._storage[storage_key]['first_record']: self._storage[storage_key]['first_record'] = time self._storage[storage_key]['last_record'] = time class RecordsStorage(CounterStorage): """Storage for records counters, like the number of parsed and skipped records""" def __init__(self): super(RecordsStorage, self).__init__('records') self.record_statuses = ['parsed', 'skipped', 'error', 'total'] def reset(self, storage_key): super(RecordsStorage, self).reset(storage_key) for status in self.record_statuses: self._storage[storage_key][status] = 0 def dump(self, storage_key, parser): super(RecordsStorage, self).dump(storage_key, parser) class ResponseCodesStorage(CounterStorage): """Storage for response codes distribution""" def __init__(self): super(ResponseCodesStorage, self).__init__('response_codes') self.permanent_codes = getattr(settings, 'RESPONSE_CODES', []) def reset(self, storage_key): super(ResponseCodesStorage, self).reset(storage_key) for code in self.permanent_codes: self.set(storage_key, code, 0) def dump(self, storage_key, parser): self.flexible_dump(storage_key, parser, self.name) def flexible_dump(self, storage_key, parser, section, prefix='rc'): """ Dump storage data defined by the storage_key to RawConfigParser instance @param str storage_key: access log-related key to define statistics storage @param RawConfigParser parser: instance of RawConfigParser to store the data @param str section: name of section to write data @param str prefix: prefix to be added to response code """ if not parser.has_section(section): parser.add_section(section) for code in sorted(self._storage[storage_key].keys()): parser.set(section, prefix+str(code), utils.format_value_for_munin(self._storage[storage_key][code])) class PatternsMatchesStorage(Storage): """Storage for additional patterns found in the requests""" def __init__(self): super(PatternsMatchesStorage, self).__init__('patterns') # Storage structure - dict of dicts of Counters, with the first-level dict responsible for # storing data related to different access log files, the second-level dict # responsible for storing data per pattern found in settings.PATTERNS_TO_EXTRACT and # the third-level Counter storing key-value pairs with values being incrementing integers # for the specific occurrences of the values extracted using the pattern. self._storage = defaultdict(lambda: defaultdict(Counter)) def set(self, storage_key, record_key, value): """ Increment match counter found in specific pattern defined by record_key from specific access log defined by storage_key. If storage_key or record_key are not found, they are created automatically. If value is not found, it is set to 1. @param str storage_key: access log-related key to define statistics storage @param record_key: identifier of a matched pattern @param str value: value of a matched pattern """ self._storage[storage_key][record_key][value] += 1 def reset(self, storage_key): self._storage[storage_key] = defaultdict(Counter) def dump(self, storage_key, parser): """ For each pattern existing in given access log file defined by storage_key, dump two values: `pattern.total` with total number of pattern matches, and `pattern.distinct` with total number of different matches. @param str storage_key: access log-related key to define statistics storage @param RawConfigParser parser: instance of RawConfigParser to store the data """ section = self.name if not parser.has_section(section): parser.add_section(section) for record_key in sorted(self._storage[storage_key].keys()): total = sum([value for value in self.get(storage_key, record_key).values()]) parser.set(section, str(record_key)+'.total', utils.format_value_for_munin(total)) distinct = len(self._storage[storage_key][record_key]) parser.set(section, str(record_key)+'.distinct', utils.format_value_for_munin(distinct)) #adding missing patterns by name patterns = getattr(settings, 'PATTERNS_TO_EXTRACT', []) for pattern in patterns: if 'name' in pattern and not parser.has_option(section, pattern['name']+'.total'): parser.set(section, pattern['name'] + '.total', utils.format_value_for_munin(0)) parser.set(section, pattern['name'] + '.distinct', utils.format_value_for_munin(0))
StarcoderdataPython
64447
from PWMparser import * t = open("/home/hsuj/Downloads/All_PWMs/SCI09/Gcm1_pwm_primary.txt", 'rU') index, matrix, size = uniProbe_parse(t) print(matrix) print(size)
StarcoderdataPython
3360978
<reponame>levilucio/SyVOLT from core.himesis import Himesis import cPickle as pickle from uuid import UUID class HSM2SM_partial(Himesis): def __init__(self): """ Creates the himesis graph representing the AToM3 model HSM2SM_partial. """ # Flag this instance as compiled now self.is_compiled = True super(HSM2SM_partial, self).__init__(name='HSM2SM_partial', num_nodes=18, edges=[]) # Add the edges self.add_edges([(2, 8), (8, 4), (2, 9), (9, 5), (4, 0), (0, 5), (6, 1), (1, 2), (3, 10), (10, 14), (7, 11), (11, 15), (12, 3), (16, 3), (4, 16), (6, 12), (6, 13), (13, 7), (5, 17), (17, 7)]) # Set the graph attributes self["mm__"] = pickle.loads("""(lp1 S'PoliceStationMM' p2 a.""") self["name"] = """SM2SM_partial""" self["GUID__"] = UUID('0c348003-dcb8-4c48-ba1a-f1a0add0662b') # Set the node attributes self.vs[0]["associationType"] = """t_""" self.vs[0]["mm__"] = """directLink_T""" self.vs[0]["GUID__"] = UUID('a94ebb3e-7f8e-470f-8412-7e401852f504') self.vs[1]["mm__"] = """paired_with""" self.vs[1]["GUID__"] = UUID('182878dd-fecc-4a6b-9bab-91b53bfe4a95') self.vs[2]["mm__"] = """ApplyModel""" self.vs[2]["GUID__"] = UUID('5bd125a2-0c58-4b3c-be63-08cc716af633') self.vs[3]["name"] = """s_""" self.vs[3]["classtype"] = """1""" self.vs[3]["mm__"] = """Station_S""" self.vs[3]["cardinality"] = """s_""" self.vs[3]["GUID__"] = UUID('251ccf29-e8fc-4954-97a4-b8f47272c578') self.vs[4]["name"] = """s_""" self.vs[4]["classtype"] = """t_""" self.vs[4]["mm__"] = """Station_T""" self.vs[4]["GUID__"] = UUID('442375fc-2f20-4121-b2a8-ccdd6c17036b') self.vs[5]["name"] = """s_""" self.vs[5]["classtype"] = """t_""" self.vs[5]["mm__"] = """Male_T""" self.vs[5]["GUID__"] = UUID('c93f613e-c858-4c94-ae21-5dfebba644a1') self.vs[6]["mm__"] = """MatchModel""" self.vs[6]["GUID__"] = UUID('046ef3b6-9746-483d-8919-42fc914bc023') self.vs[7]["name"] = """s_""" self.vs[7]["classtype"] = """1""" self.vs[7]["mm__"] = """Male_S""" self.vs[7]["cardinality"] = """s_""" self.vs[7]["GUID__"] = UUID('c41a737c-44e1-4221-8b84-3c375850b398') self.vs[8]["mm__"] = """apply_contains""" self.vs[8]["GUID__"] = UUID('be8bb699-3ff6-405f-ae29-4d948abbbff4') self.vs[9]["mm__"] = """apply_contains""" self.vs[9]["GUID__"] = UUID('1f496c7a-8358-4193-9b6b-95c5d7385c33') self.vs[10]["mm__"] = """hasAttr_S""" self.vs[10]["GUID__"] = UUID('9e5b0d91-e174-4932-a45f-0f12b7f74b37') self.vs[11]["mm__"] = """hasAttr_S""" self.vs[11]["GUID__"] = UUID('65dae6a3-694c-4faf-9b3a-a9c387a04717') self.vs[12]["mm__"] = """match_contains""" self.vs[12]["GUID__"] = UUID('5b3ef3d7-716c-4bf2-aa86-ce5307767395') self.vs[13]["mm__"] = """match_contains""" self.vs[13]["GUID__"] = UUID('7826ac7d-86fb-404b-96e4-5f8acaebd830') self.vs[14]["name"] = """name""" self.vs[14]["mm__"] = """Attribute""" self.vs[14]["GUID__"] = UUID('9f5154f4-9e0a-4e09-a5b6-b7480d19a009') self.vs[15]["name"] = """name""" self.vs[15]["mm__"] = """Attribute""" self.vs[15]["GUID__"] = UUID('a38c63ae-5279-4f96-bde4-5dc0040d9c71') self.vs[16]["mm__"] = """trace_link""" self.vs[16]["GUID__"] = UUID('746bc92d-6a35-4e7a-983e-cbfe7da4cab3') self.vs[17]["mm__"] = """trace_link""" self.vs[17]["GUID__"] = UUID('8a4d899a-30be-473d-ab8d-8f25c70ec1d9')
StarcoderdataPython
1759637
import matplotlib matplotlib.use("Agg") import matplotlib.pylab as plt import os import librosa import numpy as np import torch from torch.utils.data import DataLoader import sys if os.path.isdir(os.path.join(os.getcwd(),'pre-train')): sys.path.append('pre-train') from reader import TextMelIDLoader, TextMelIDCollate, id2ph, id2sp from hparams import create_hparams from model import Parrot, lcm from train import load_model import scipy.io.wavfile os.chdir('pre-train') ########### Configuration ########### hparams = create_hparams() # #generation list # hlist = '/home/jxzhang/Documents/DataSets/VCTK/list/hold_english.list' # tlist = '/home/jxzhang/Documents/DataSets/VCTK/list/eval_english.list' hlist = '/data/evs/VCTK/VCTK-Corpus-0.92/list/audio-txt-nframe-nphone_no-indian_test.txt' tlist = '/data/evs/VCTK/VCTK-Corpus-0.92/list/audio-txt-nframe-nphone_no-indian_valid.txt' # use seen (tlist) or unseen list (hlist) test_list = tlist # checkpoint_path='outdir/checkpoint_0' checkpoint_path = 'outdir/vctk/test_orig_bs16/checkpoint_1000000' # TTS or VC task? input_text=False # number of utterances for generation NUM=10 ISMEL=(not hparams.predict_spectrogram) ##################################### def plot_data(data, fn, figsize=(12, 4)): fig, axes = plt.subplots(1, len(data), figsize=figsize) for i in range(len(data)): if len(data) == 1: ax = axes else: ax = axes[i] # origin='bottom' no longer working after matplotlib 3.3.2 g = ax.imshow(data[i], aspect='auto', origin='lower', interpolation='none') plt.colorbar(g, ax=ax) plt.savefig(fn) model = load_model(hparams) model.load_state_dict(torch.load(checkpoint_path)['state_dict']) _ = model.eval() test_set = TextMelIDLoader(test_list, hparams.mel_mean_std, shuffle=True) sample_list = test_set.file_path_list collate_fn = TextMelIDCollate(lcm(hparams.n_frames_per_step_encoder, hparams.n_frames_per_step_decoder)) test_loader = DataLoader(test_set, num_workers=1, shuffle=False, sampler=None, batch_size=1, pin_memory=False, drop_last=True, collate_fn=collate_fn) task = 'tts' if input_text else 'vc' path_save = os.path.join(checkpoint_path.replace('checkpoint', 'test'), task) path_save += '_seen' if test_list == tlist else '_unseen' if not os.path.exists(path_save): print('creating dir: {}'.format(path_save)) os.makedirs(path_save) print('path to save: {}'.format(path_save)) def recover_wav(mel, wav_path, ismel=False, n_fft=2048, win_length=800,hop_length=200): if ismel: mean, std = np.load(hparams.mel_mean_std) else: mean, std = np.load(hparams.mel_mean_std.replace('mel','spec')) mean = mean[:,None] std = std[:,None] mel = 1.2 * mel * std + mean mel = np.exp(mel) if ismel: filters = librosa.filters.mel(sr=16000, n_fft=2048, n_mels=80) inv_filters = np.linalg.pinv(filters) spec = np.dot(inv_filters, mel) else: spec = mel def _griffin_lim(stftm_matrix, shape, max_iter=50): y = np.random.random(shape) for i in range(max_iter): stft_matrix = librosa.core.stft(y, n_fft=n_fft, win_length=win_length, hop_length=hop_length) stft_matrix = stftm_matrix * stft_matrix / np.abs(stft_matrix) y = librosa.core.istft(stft_matrix, win_length=win_length, hop_length=hop_length) return y shape = spec.shape[1] * hop_length - hop_length + 1 y = _griffin_lim(spec, shape) scipy.io.wavfile.write(wav_path, 16000, float2pcm(y)) return y def float2pcm(sig, dtype='int16'): sig = np.asarray(sig) i = np.iinfo(dtype) abs_max = 2 ** (i.bits-1) offset = i.min + abs_max sig2 = (sig*abs_max + offset).clip(i.min, i.max).astype(dtype) return sig2 text_input, mel, spec, speaker_id = test_set[0] print(' '.join([id2ph[int(id)] for id in text_input])) reference_mel = mel.cuda().unsqueeze(0) ref_sp = id2sp[speaker_id.item()] def levenshteinDistance(s1, s2): if len(s1) > len(s2): s1, s2 = s2, s1 distances = list(range(len(s1) + 1)) for i2, c2 in enumerate(s2): distances_ = [i2+1] for i1, c1 in enumerate(s1): if c1 == c2: distances_.append(distances[i1]) else: distances_.append(1 + min((distances[i1], distances[i1 + 1], distances_[-1]))) distances = distances_ return distances[-1] with torch.no_grad(): errs = 0 totalphs = 0 for i, batch in enumerate(test_loader): if i == NUM: break # sample_id = sample_list[i].split('/')[-1][9:17] sample_id = sample_list[i].split('/')[-1][:8] print(('%d index %s, decoding ...'%(i,sample_id))) # x (4 items): text_input_padded, mel_padded, text_lengths, mel_lengths # y (5 items): text_input_padded, mel_padded, spc_padded, speaker_id, stop_token_padded x, y = model.parse_batch(batch) predicted_mel, post_output, predicted_stop, alignments, \ text_hidden, audio_seq2seq_hidden, audio_seq2seq_phids, audio_seq2seq_alignments, \ speaker_id = model.inference(x, input_text, reference_mel, hparams.beam_width) post_output = post_output.data.cpu().numpy()[0] #-> [n_mel_channels, n_frames] alignments = alignments.data.cpu().numpy()[0].T audio_seq2seq_alignments = audio_seq2seq_alignments.data.cpu().numpy()[0].T text_hidden = text_hidden.data.cpu().numpy()[0].T #-> [hidden_dim, max_text_len] audio_seq2seq_hidden = audio_seq2seq_hidden.data.cpu().numpy()[0].T audio_seq2seq_phids = audio_seq2seq_phids.data.cpu().numpy()[0] # [T + 1] speaker_id = speaker_id.data.cpu().numpy()[0] # scalar task = 'TTS' if input_text else 'VC' recover_wav(post_output, os.path.join(path_save, 'Wav_%s_ref_%s_%s.wav'%(sample_id, ref_sp, task)), ismel=ISMEL) post_output_path = os.path.join(path_save, 'Mel_%s_ref_%s_%s.npy'%(sample_id, ref_sp, task)) np.save(post_output_path, post_output) plot_data([alignments, audio_seq2seq_alignments], os.path.join(path_save, 'Ali_%s_ref_%s_%s.pdf'%(sample_id, ref_sp, task))) plot_data([np.hstack([text_hidden, audio_seq2seq_hidden])], os.path.join(path_save, 'Hid_%s_ref_%s_%s.pdf'%(sample_id, ref_sp, task))) audio_seq2seq_phids = [id2ph[id] for id in audio_seq2seq_phids[:-1]] target_text = y[0].data.cpu().numpy()[0] target_text = [id2ph[id] for id in target_text[:]] # to-do: output text for reference print('Sounds like %s'%(id2sp[speaker_id])) print('Decoded text: {}'.format(' '.join(audio_seq2seq_phids))) print('Target text: {}'.format(' '.join(target_text))) err = levenshteinDistance(audio_seq2seq_phids, target_text) print(err, len(target_text)) errs += err totalphs += len(target_text) print(float(errs)/float(totalphs))
StarcoderdataPython
120161
#!/usr/bin/env python import sys import os sys.path.append(os.path.join(os.path.abspath('.'), 'lib')) import re from flask import request import telegram from actualapp import app from bot_helper import bot, TOKEN, sendMsg, editMsg, editMsgReplyMarkup, makeInlineKeyboard from io_helper import serialise, deserialise from user_text import helpText, helpTextFull from bus_controller import replyLocation, sendBusStopLoc, replyNextBus, replyBusInfo, processBusStop, shouldCheckQueueUponStart from chat_controller import editStar, editFav, replyDailyLog, showFav, showHist, showStar, checkQueueUponStart @app.route('/'+TOKEN+'/HOOK', methods=['POST']) def webhookHandler(): if request.method == "POST": # retrieve the message in JSON and then transform it to Telegram object update = telegram.Update.de_json(request.get_json(force=True), bot) message = update.message callback = update.callback_query if message: chat_id = message.chat.id text = message.text loc = message.location if loc: replyLocation(chat_id, loc) elif text: # may be None if it's a sticker or something replyCommand(chat_id, text) elif callback: message = callback.message data = deserialise(callback.data) command = data.get("c") if command == "constr": callbackConstr(message, data) elif command in ["star", "unstar"]: editStar(message, data.get("s"), command, data.get("n")) elif command == "loc": sendBusStopLoc(message, data.get("s")) elif command == "hide": editMsgReplyMarkup(message, reply_markup=None) # editMsgReplyMarkup(message, reply_markup=makeInlineKeyboard([])) bot.answerCallbackQuery(callback.id) return 'ok' def replyCommand(chat_id, text, message=None): text = text.strip() lowerText = text.lower() lowerText = re.sub('^/', '', lowerText) if re.match(r'next *', lowerText) or re.match(r'remind *', lowerText) or re.match(r'nag *', lowerText) or re.match(r'tracks? *', lowerText): replyNextBus(chat_id, text, 0, False, message) elif re.match(r'info *', lowerText): replyBusInfo(chat_id, text, message) elif re.match(r'save *\|', lowerText) or re.match(r'delete *\|', lowerText): editFav(chat_id, text) elif re.match(r'counter *', lowerText): replyDailyLog(chat_id) elif re.match(r'fav *', lowerText): showFav(chat_id) elif re.match(r'history *', lowerText): showHist(chat_id) elif re.match(r'starred *', lowerText): showStar(chat_id) elif re.match(r'help *', lowerText): sendMsg(chat_id, helpTextFull) else: sendMsg(chat_id, helpText) def callbackConstr(message, data): chat_id = message.chat.id busStopNo = data.get("s") routeNo = data.get("r") action = data.get("a") if action: if action in ["next", "info"]: text = "/{} {} {}".format(action, busStopNo, routeNo) replyCommand(chat_id, text, message) elif action in ["remind", "track", "tracks"]: interval = data.get("x") if interval: text = "/{} {} {} {}".format(action, busStopNo, routeNo, interval) replyCommand(chat_id, text, message) else: makeData = lambda x: serialise(dict(data, **{"x": x})) intervals = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 15, 20, 25, 30] reply_markup = makeInlineKeyboard([(a, makeData(a)) for a in intervals], rows=3) if action == "track": template = "How many minutes before the next bus {} arrives at stop {} would you like your last alert?" else: template = "How many minutes before the next bus {} arrives at stop {} would you like your alert?" editMsg(message, template.format(routeNo, busStopNo), reply_markup=reply_markup) elif routeNo: makeData = lambda x: serialise(dict(data, **{"a": x})) actions = ["next", "remind", "track", "tracks", "info"] reply_markup = makeInlineKeyboard([(a, makeData(a)) for a in actions], rows=1) editMsg(message, "Choose an action (as described in /help) for stop {} and route {}:".format(busStopNo, routeNo), reply_markup=reply_markup) else: processBusStop(chat_id, busStopNo, message) if (shouldCheckQueueUponStart): checkQueueUponStart()
StarcoderdataPython
152363
<gh_stars>0 # Copyright (c) 2015 Midokura SARL, All Rights Reserved. # # @author: <NAME> <<EMAIL>>, Midokura
StarcoderdataPython
1694448
import json from requests import HTTPError class MailjetError(Exception): def __init__(self, *args, **kwargs): self.email_message = kwargs.pop('email_message', None) self.payload = kwargs.pop('payload', None) if isinstance(self, HTTPError): self.response = kwargs.get('response', None) else: self.response = kwargs.pop('response', None) super(MailjetError, self).__init__(*args, **kwargs) def __str__(self): parts = [ " ".join([str(arg) for arg in self.args]), self.describe_send(), self.describe_response(), ] return "\n".join(filter(None, parts)) def describe_send(self): if self.payload is None: return None description = "Sending a message" try: to_emails = [to['email'] for to in self.payload['message']['to']] description += " to %s" % ','.join(to_emails) except KeyError: pass try: description += " from %s" % self.payload['message']['from_email'] except KeyError: pass return description def describe_response(self): if self.response is None: return None description = "Mailjet API response %d: %s" % (self.response.status_code, self.response.reason) try: json_response = self.response.json() description += "\n" + json.dumps(json_response, indent=2) except (AttributeError, KeyError, ValueError): try: description += " " + self.response.text except AttributeError: pass return description class MailjetAPIError(MailjetError, HTTPError): def __init__(self, *args, **kwargs): super(MailjetAPIError, self).__init__(*args, **kwargs) if self.response is not None: self.status_code = self.response.status_code
StarcoderdataPython
3381883
<reponame>huq-industries/carto-spatial-extension<filename>modules/transformations/redshift/lib/center_lib/helper.py # Copyright (c) 2020, <NAME> (Python3 implementation) # Copyright (c) 2021, CARTO from __future__ import division from math import sqrt def euclidean_distance(p1, p2): return sqrt((p2[0] - p1[0]) ** 2 + (p2[1] - p1[1]) ** 2)
StarcoderdataPython
3298119
# ------------------------------------------------------------------------------------------ # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License (MIT). See LICENSE in the repo root for license information. # ------------------------------------------------------------------------------------------ from typing import Any, Callable, List, Optional, Tuple import numpy as np import pytest import torch from torch.optim import lr_scheduler from torch.optim.lr_scheduler import CosineAnnealingLR, ExponentialLR, LambdaLR, MultiStepLR, \ StepLR, _LRScheduler from torch.optim.optimizer import Optimizer from InnerEye.ML.config import SegmentationModelBase from InnerEye.ML.deep_learning_config import DeepLearningConfig, LRSchedulerType, LRWarmUpType from InnerEye.ML.utils.lr_scheduler import SchedulerWithWarmUp from Tests.ML.configs.DummyModel import DummyModel def enumerate_scheduler(scheduler: _LRScheduler, steps: int) -> List[float]: """ Reads the current learning rate via get_last_lr, run 1 scheduler step, and repeat. Returns the LR values. """ lrs = [] for _ in range(steps): lr = scheduler.get_last_lr() # type: ignore assert isinstance(lr, list) assert len(lr) == 1 lrs.append(lr[0]) scheduler.step() return lrs def test_create_lr_scheduler_last_epoch() -> None: """ Test to check if the lr scheduler is initialized to the correct epoch """ l_rate = 1e-3 gamma = 0.5 total_epochs = 5 expected_lrs_per_epoch = [l_rate * (gamma ** i) for i in range(total_epochs)] config = DummyModel() config.l_rate = l_rate config.l_rate_scheduler = LRSchedulerType.Step config.l_rate_step_step_size = 1 config.l_rate_step_gamma = gamma # create lr scheduler initial_scheduler, initial_optimizer = _create_lr_scheduler_and_optimizer(config) # check lr scheduler initialization step initial_epochs = 3 assert np.allclose(enumerate_scheduler(initial_scheduler, initial_epochs), expected_lrs_per_epoch[:initial_epochs]) # create lr scheduler for recovery checkpoint config.start_epoch = initial_epochs recovery_scheduler, recovery_optimizer = _create_lr_scheduler_and_optimizer(config) # Both the scheduler and the optimizer need to be loaded from the checkpoint. recovery_scheduler.load_state_dict(initial_scheduler.state_dict()) recovery_optimizer.load_state_dict(initial_optimizer.state_dict()) assert recovery_scheduler.last_epoch == config.start_epoch # check lr scheduler initialization matches the checkpoint epoch # as training will start for start_epoch + 1 in this case assert np.allclose(enumerate_scheduler(recovery_scheduler, 2), expected_lrs_per_epoch[initial_epochs:]) @pytest.mark.parametrize("lr_scheduler_type", [x for x in LRSchedulerType]) def test_lr_monotonically_decreasing_function(lr_scheduler_type: LRSchedulerType) -> None: """ Tests if LR scheduler is a monotonically decreasing function """ config = DummyModel(num_epochs=10, l_rate_scheduler=lr_scheduler_type, l_rate_exponential_gamma=0.9, l_rate_step_gamma=0.9, l_rate_step_step_size=1, l_rate_multi_step_gamma=0.9, l_rate_multi_step_milestones=[3, 5, 7], l_rate_polynomial_gamma=0.9) def non_increasing(L: List) -> bool: return all(x >= y for x, y in zip(L, L[1:])) # create lr scheduler lr_scheduler, _ = _create_lr_scheduler_and_optimizer(config) lr_list = enumerate_scheduler(lr_scheduler, config.num_epochs) assert non_increasing(lr_list) @pytest.mark.parametrize("lr_scheduler_type", [x for x in LRSchedulerType]) @pytest.mark.parametrize("warmup_epochs", [0, 3]) def test_warmup_against_original_schedule(lr_scheduler_type: LRSchedulerType, warmup_epochs: int) -> None: """ Tests if LR scheduler with warmup matches the Pytorch implementation after the warmup stage is completed. """ config = DummyModel(num_epochs=6, l_rate=1e-2, l_rate_scheduler=lr_scheduler_type, l_rate_exponential_gamma=0.9, l_rate_step_gamma=0.9, l_rate_step_step_size=2, l_rate_multi_step_gamma=0.9, l_rate_multi_step_milestones=[3, 5, 7], l_rate_polynomial_gamma=0.9, l_rate_warmup=LRWarmUpType.Linear if warmup_epochs > 0 else LRWarmUpType.NoWarmUp, l_rate_warmup_epochs=warmup_epochs) # create lr scheduler lr_scheduler, optimizer1 = _create_lr_scheduler_and_optimizer(config) original_scheduler: Optional[_LRScheduler] = None optimizer2 = _create_dummy_optimizer(config) # This mimics the code in SchedulerWithWarmUp.get_scheduler and must be in sync if lr_scheduler_type == LRSchedulerType.Exponential: original_scheduler = ExponentialLR(optimizer=optimizer2, gamma=config.l_rate_exponential_gamma) elif lr_scheduler_type == LRSchedulerType.Step: original_scheduler = StepLR(optimizer=optimizer2, step_size=config.l_rate_step_step_size, gamma=config.l_rate_step_gamma) elif lr_scheduler_type == LRSchedulerType.Cosine: original_scheduler = CosineAnnealingLR(optimizer2, T_max=config.num_epochs, eta_min=config.min_l_rate) elif lr_scheduler_type == LRSchedulerType.MultiStep: assert config.l_rate_multi_step_milestones is not None # for mypy original_scheduler = MultiStepLR(optimizer=optimizer2, milestones=config.l_rate_multi_step_milestones, gamma=config.l_rate_multi_step_gamma) elif lr_scheduler_type == LRSchedulerType.Polynomial: x = config.min_l_rate / config.l_rate polynomial_decay: Any = lambda epoch: (1 - x) * ( (1. - float(epoch) / config.num_epochs) ** config.l_rate_polynomial_gamma) + x original_scheduler = LambdaLR(optimizer=optimizer2, lr_lambda=polynomial_decay) else: raise ValueError("Scheduler has not been added to this test.") expected_lr_list = [] if warmup_epochs == 0: pass elif warmup_epochs == 3: # For the first config.l_rate_warmup_epochs, the learning rate is lower than the initial learning rate by a # linear factor expected_lr_list.extend([f * config.l_rate for f in [0.25, 0.5, 0.75]]) else: raise NotImplementedError() expected_lr_list.extend(enumerate_scheduler(original_scheduler, config.num_epochs - warmup_epochs)) print(f"Expected schedule with warmup: {expected_lr_list}") lr_with_warmup_scheduler = enumerate_scheduler(lr_scheduler, config.num_epochs) print(f"Actual schedule: {lr_with_warmup_scheduler}") if ((lr_scheduler_type == LRSchedulerType.Polynomial or lr_scheduler_type == LRSchedulerType.Cosine) and warmup_epochs > 0): # Polynomial and Cosine scheduler will be squashed in time because the number of epochs is reduced # (both schedulers take a "length of training" argument, and that is now shorter). Skip comparing those. pass else: assert np.allclose(lr_with_warmup_scheduler, expected_lr_list, rtol=1e-5) def _create_dummy_optimizer(config: SegmentationModelBase) -> Optimizer: return torch.optim.Adam([torch.ones(2, 2, requires_grad=True)], lr=config.l_rate) def _create_lr_scheduler_and_optimizer(config: SegmentationModelBase, optimizer: Optimizer = None) \ -> Tuple[SchedulerWithWarmUp, Optimizer]: # create dummy optimizer if optimizer is None: optimizer = _create_dummy_optimizer(config) # create lr scheduler lr_scheduler = SchedulerWithWarmUp(config, optimizer) return lr_scheduler, optimizer # This construct is to work around an issue where mypy does not think that MultiplicativeLR exists in lr_scheduler def multiplicative(optimizer: Optimizer) -> _LRScheduler: return lr_scheduler.MultiplicativeLR(optimizer, lr_lambda=lambda epoch: 0.5) # type: ignore @pytest.mark.parametrize("scheduler_func, expected_values", # A scheduler that reduces learning rate by a factor of 0.5 in each epoch [(multiplicative, [1, 0.5, 0.25, 0.125, 0.0625]), # A scheduler that reduces learning rate by a factor of 0.5 at epochs 2 and 4 (lambda optimizer: MultiStepLR(optimizer, [2, 4], gamma=0.5), [1, 1, 0.5, 0.5, 0.25]), (lambda optimizer: MultiStepLR(optimizer, [1, 2, 3, 4, 5], gamma=0.5), [1, 0.5, 0.25, 0.125, 0.0625]) ]) def test_built_in_lr_scheduler(scheduler_func: Callable[[Optimizer], _LRScheduler], expected_values: List[float]) -> None: """ A test to check that the behaviour of the built-in learning rate schedulers is still what we think it is. """ initial_lr = 1 optimizer = torch.optim.Adam([torch.ones(2, 2, requires_grad=True)], lr=initial_lr) scheduler = scheduler_func(optimizer) lrs = [] for _ in range(5): last_lr = scheduler.get_last_lr() # type: ignore lrs.append(last_lr) # get_last_lr should not change the state when called twice assert scheduler.get_last_lr() == last_lr # type: ignore scheduler.step() # Expected behaviour: First LR should be the initial LR set in the optimizers. assert lrs == [[v] for v in expected_values] @pytest.mark.parametrize("warmup_epochs, expected_values", [(0, [1, 1, 0.5, 0.5]), (1, [0.5, 1, 1, 0.5]), (2, [1 / 3, 2 / 3, 1, 1])]) def test_lr_scheduler_with_warmup(warmup_epochs: int, expected_values: List[float]) -> None: """ Check that warmup is applied correctly to a multistep scheduler """ initial_lr = 1 optimizer = torch.optim.Adam([torch.ones(2, 2, requires_grad=True)], lr=initial_lr) config = DeepLearningConfig(l_rate=initial_lr, l_rate_scheduler=LRSchedulerType.MultiStep, l_rate_multi_step_milestones=[2, 4], l_rate_multi_step_gamma=0.5, l_rate_warmup_epochs=warmup_epochs, l_rate_warmup=LRWarmUpType.Linear, should_validate=False) scheduler = SchedulerWithWarmUp(config, optimizer) lrs = enumerate_scheduler(scheduler, 4) assert lrs == expected_values # Exclude Polynomial scheduler because that uses lambdas, which we can't save to a state dict @pytest.mark.parametrize("lr_scheduler_type", [x for x in LRSchedulerType if x != LRSchedulerType.Polynomial]) @pytest.mark.parametrize("warmup_epochs", [0, 3, 4, 5]) def test_resume_from_saved_state(lr_scheduler_type: LRSchedulerType, warmup_epochs: int) -> None: """ Tests if LR scheduler when restarted from an epoch continues as expected. """ restart_from_epoch = 4 config = DummyModel(num_epochs=7, l_rate_scheduler=lr_scheduler_type, l_rate_exponential_gamma=0.9, l_rate_step_gamma=0.9, l_rate_step_step_size=2, l_rate_multi_step_gamma=0.9, l_rate_multi_step_milestones=[3, 5, 7], l_rate_polynomial_gamma=0.9, l_rate_warmup=LRWarmUpType.Linear if warmup_epochs > 0 else LRWarmUpType.NoWarmUp, l_rate_warmup_epochs=warmup_epochs) # This scheduler mimics what happens if we train for the full set of epochs scheduler_all_epochs, _ = _create_lr_scheduler_and_optimizer(config) expected_lr_list = enumerate_scheduler(scheduler_all_epochs, config.num_epochs) # Create a scheduler where training will be recovered scheduler1, optimizer1 = _create_lr_scheduler_and_optimizer(config) # Scheduler 1 is only run for 4 epochs, and then "restarted" to train the rest of the epochs. result_lr_list = enumerate_scheduler(scheduler1, restart_from_epoch) # resume state: This just means setting start_epoch in the config config.start_epoch = restart_from_epoch scheduler_resume, optimizer_resume = _create_lr_scheduler_and_optimizer(config) # Load a "checkpoint" for both scheduler and optimizer scheduler_resume.load_state_dict(scheduler1.state_dict()) optimizer_resume.load_state_dict(optimizer1.state_dict()) result_lr_list.extend(enumerate_scheduler(scheduler_resume, config.num_epochs - restart_from_epoch)) print(f"Actual schedule: {result_lr_list}") print(f"Expected schedule: {expected_lr_list}") assert len(result_lr_list) == len(expected_lr_list) assert np.allclose(result_lr_list, expected_lr_list) @pytest.mark.parametrize("lr_scheduler_type", [x for x in LRSchedulerType]) def test_save_and_load_state_dict(lr_scheduler_type: LRSchedulerType) -> None: def object_dict_same(lr1: SchedulerWithWarmUp, lr2: SchedulerWithWarmUp) -> bool: """ Tests to see if two LRScheduler objects are the same. This ignores lambdas if one of the schedulers is LambdaLR, since lambdas are not stored to the state dict. """ # ignore the _scheduler and _warmup objects, compare those separately dict1 = {key: val for key, val in lr1.__dict__.items() if key != "_scheduler" and key != "_warmup"} dict2 = {key: val for key, val in lr2.__dict__.items() if key != "_scheduler" and key != "_warmup"} # see if the underlying scheduler object is the same scheduler1_dict = {key: val for key, val in lr1._scheduler.__dict__.items() if key != "lr_lambdas"} scheduler2_dict = {key: val for key, val in lr2._scheduler.__dict__.items() if key != "lr_lambdas"} warmup1_dict = lr1._warmup.__dict__ warmup2_dict = lr2._warmup.__dict__ return dict1 == dict2 and scheduler1_dict == scheduler2_dict and warmup1_dict == warmup2_dict config = DummyModel(num_epochs=10, l_rate_scheduler=lr_scheduler_type, l_rate_exponential_gamma=0.9, l_rate_step_gamma=0.9, l_rate_step_step_size=2, l_rate_multi_step_gamma=0.9, l_rate_multi_step_milestones=[3, 5, 7], l_rate_polynomial_gamma=0.9, l_rate_warmup=LRWarmUpType.Linear, l_rate_warmup_epochs=4) lr_scheduler_1, optimizer = _create_lr_scheduler_and_optimizer(config) lr_scheduler_1.step() # This is not supported functionality - we are doing this just to change _scheduler from its default state lr_scheduler_1._scheduler.step() lr_scheduler_1._scheduler.step() state_dict = lr_scheduler_1.state_dict() lr_scheduler_2, _ = _create_lr_scheduler_and_optimizer(config, optimizer) assert not object_dict_same(lr_scheduler_1, lr_scheduler_2) lr_scheduler_2.load_state_dict(state_dict) assert object_dict_same(lr_scheduler_1, lr_scheduler_2) def test_cosine_decay_function() -> None: """ Tests Cosine lr decay function at (pi/2) and verifies if the value is correct. """ config = DummyModel(l_rate_scheduler=LRSchedulerType.Cosine, num_epochs=10, min_l_rate=0.0) # create lr scheduler test_epoch = 5 lr_scheduler, _ = _create_lr_scheduler_and_optimizer(config) for _ in range(test_epoch): lr_scheduler.step() assert lr_scheduler.get_last_lr()[0] == 0.5 * config.l_rate def test_multistep_lr() -> None: l_rate = 0.3 config = DummyModel(l_rate_scheduler=LRSchedulerType.MultiStep, l_rate=l_rate, l_rate_multi_step_gamma=0.1, num_epochs=10, l_rate_multi_step_milestones=[2], l_rate_warmup=LRWarmUpType.Linear, l_rate_warmup_epochs=5) def check_warmup(expected: List[float]) -> None: scheduler, _ = _create_lr_scheduler_and_optimizer(config) actual = enumerate_scheduler(scheduler, 4) assert actual == expected # No warmup: multi-step LR with milestone after 2 epochs original_schedule = [l_rate, l_rate, l_rate * 0.1, l_rate * 0.1] config.l_rate_warmup = LRWarmUpType.Linear config.l_rate_warmup_epochs = 0 check_warmup(original_schedule) # 1 epoch warmup: linear function up to the initial learning rate gives a warmup value of half the initial LR config.l_rate_warmup_epochs = 1 check_warmup([l_rate * 0.5] + original_schedule[:3]) # 2 epochs warmup config.l_rate_warmup_epochs = 2 check_warmup([l_rate / 3, l_rate * 2 / 3] + original_schedule[:2])
StarcoderdataPython
1758577
import sys import pysam import subprocess from re import sub import os from itertools import izip bamsortfn = sys.argv[1] bamrepairedfn = sub('.sorted.bam$', ".repaired.bam", bamsortfn) bamrepairedsortfn = sub('.sorted.bam$', ".repaired.sorted.bam", bamsortfn) if(os.path.isfile(bamsortfn)): inbam = pysam.Samfile(bamsortfn, 'rb') outbam = pysam.Samfile(bamrepairedfn, 'wb', template=inbam) writtencount = 0 #positive & negative strands strands=['pos','neg'] for strand in strands : read1fn= sub('.bam$', '.read1_'+strand+'.bam', bamsortfn) read2fn= sub('.bam$', '.read2_'+strand+'.bam', bamsortfn) if(not os.path.isfile(read1fn) or not os.path.isfile(read2fn)): read1_strand1sortfn = sub('.bam$', '.read1_pos.bam', bamsortfn) read1_strand2sortfn = sub('.bam$', '.read1_neg.bam', bamsortfn) read2_strand1sortfn = sub('.bam$', '.read2_pos.bam', bamsortfn) read2_strand2sortfn = sub('.bam$', '.read2_neg.bam', bamsortfn) command1 = " ".join(["samtools view -u -h -f 0x0063", bamsortfn, ">", read1_strand1sortfn]) command2 = " ".join(["samtools view -u -h -f 0x0053", bamsortfn, ">", read1_strand2sortfn]) command3 = " ".join(["samtools view -u -h -f 0x0093", bamsortfn, ">", read2_strand1sortfn]) command4 = " ".join(["samtools view -u -h -f 0x00A3", bamsortfn, ">", read2_strand2sortfn]) subprocess.check_output(command1, shell = True) subprocess.check_output(command2, shell = True) subprocess.check_output(command3, shell = True) subprocess.check_output(command4, shell = True) splt1 = pysam.Samfile(read1fn , 'rb') splt2 = pysam.Samfile(read2fn , 'rb') itr1 = splt1.fetch(until_eof=True) itr2 = splt2.fetch(until_eof=True) start = True for read1, read2 in izip(itr1, itr2): try: if(read2.qname != read1.qname and start): read2 = itr2.next() start = False continue read1next=itr1.next() read2next=itr2.next() if(strand == 'pos'): tlenabs1 = read2next.pos - read1.pos + abs(read2next.qlen) tlenabs2 = read2.pos - read1next.pos + abs(read2.qlen) tlenmean = (abs(read1.tlen) + abs(read1next.tlen))/2 if(tlenabs1 > 0.2*tlenmean and tlenabs1 < 5*tlenmean and read2next.qname != read1.qname and tlenabs1 > 0 and not read1.is_duplicate and not read1.is_secondary and not read2next.is_duplicate and not read2next.is_secondary): read1.tlen = tlenabs1 read2next.tlen = -tlenabs1 read1.pnext = read2next.pos read2next.pnext = read1.pos read2next.qname = read1.qname outbam.write(read1) outbam.write(read2next) writtencount = writtencount + 1 if(tlenabs2 > 0.2*tlenmean and tlenabs2 < 5*tlenmean and read1next.qname != read2.qname and tlenabs2 > 0 and not read2.is_duplicate and not read2.is_secondary and not read1next.is_duplicate and not read1next.is_secondary ): read1next.tlen = tlenabs2 read2.tlen = -tlenabs2 read2.pnext = read1next.pos read1next.pnext = read2.pos read2.qname = read1next.qname outbam.write(read1next) outbam.write(read2) writtencount = writtencount + 1 elif(strand== 'neg'): tlenabs1 = read1.pos - read2next.pos + abs(read1.qlen) tlenabs2 = read1next.pos -read2.pos + abs(read1next.qlen) tlenmean = (abs(read1.tlen) + abs(read1next.tlen))/2 if(tlenabs1 > 0.2*tlenmean and tlenabs1 < 5*tlenmean and read2next.qname != read1.qname and tlenabs1 > 0 and not read1.is_duplicate and not read1.is_secondary and not read2next.is_duplicate and not read2next.is_secondary): read1.tlen = -tlenabs1 read2next.tlen = tlenabs1 read1.pnext = read2next.pos read2next.pnext = read1.pos read2next.qname = read1.qname outbam.write(read1) outbam.write(read2next) writtencount = writtencount + 1 if(tlenabs2 > 0.2*tlenmean and tlenabs2 < 5*tlenmean and read1next.qname != read2.qname and tlenabs2 > 0 and not read2.is_duplicate and not read2.is_secondary and not read1next.is_duplicate and not read1next.is_secondary): read1next.tlen = -tlenabs2 read2.tlen = tlenabs2 read2.pnext = read1next.pos read1next.pnext = read2.pos read2.qname = read1next.qname outbam.write(read1next) outbam.write(read2) writtencount = writtencount + 1 except StopIteration: break splt1.close();splt2.close() os.remove(read1fn) os.remove(read2fn) inbam.close() outbam.close() command = " ".join(["sambamba sort", bamrepairedfn, "-o", bamrepairedsortfn]) subprocess.check_output(command, shell = True) os.remove(bamrepairedfn)
StarcoderdataPython
1613765
<filename>src/reader.py import collections import numpy as np import re class TextProcessor(object): @staticmethod def from_file(input_file): with open(input_file, 'r', encoding = 'utf8') as fh: text = fh.read() return TextProcessor(text) def __init__(self, text): # self.words = self._text2words(text) self.words = [w for w in text.split()] self.id2word = None self.word2id = None self.vector = None def set_vocab(self, word2id): self.word2id = word2id return self def create_vocab(self, size): counter = collections.Counter(self.words) print( 'Vocabulary size reduced from %s to %s' % (len(counter), size) ) count_pairs = counter.most_common(size-1) self.id2word = list(dict(count_pairs).keys()) self.id2word[-1] = '<unk>' self.word2id = dict(zip(self.id2word, range(len(self.id2word)))) def get_vector(self): unk = self.word2id['<unk>'] self.vector = [self.word2id[word] if word in self.word2id else unk for word in self.words] return self.vector def save_converted(self, filename): with open(filename, 'w') as fh: for wid in self.vector: fh.write(self.id2word[wid]+' ') @staticmethod def _text2words(text): # prepare for word based processing re4 = re.compile(r'\.\.+') re5 = re.compile(r' +') text = text.lower() text = re4.sub(' <3dot> ', text) text = text.replace(',', ' , ') text = text.replace('.', ' . ') text = text.replace('/', ' . ') text = text.replace('(', ' ( ') text = text.replace(')', ' ) ') text = text.replace('[', ' ( ') text = text.replace(']', ' ) ') text = text.replace(':', ' : ') text = text.replace("'", " '") text = text.replace('?', ' ? ') text = text.replace(';', ' . ') text = text.replace('-', ' -') text = text.replace('<3dot>', ' ... ') text = text.replace('"', '') text = re5.sub(' ', text) text = text.replace('\n', ' <nl> ') return ['\n' if w == '<nl>' else w for w in text.split()] def train_iterator(raw_data, batch_size, num_steps): raw_data = np.array(raw_data, dtype=np.int32) data_len = len(raw_data) batch_len = data_len // batch_size data = np.zeros([batch_size, batch_len], dtype=np.int32) for i in range(batch_size): data[i] = raw_data[batch_len * i:batch_len * (i + 1)] epoch_size = (batch_len - 1) // num_steps if epoch_size == 0: raise ValueError("epoch_size == 0, decrease batch_size or num_steps") for i in range(epoch_size): x = data[:, i * num_steps:(i + 1) * num_steps] y = data[:, i * num_steps + 1:(i + 1) * num_steps + 1] yield (x, y)
StarcoderdataPython
1748191
<filename>greenonbrown.py #!/home/pi/.virtualenvs/owl/bin/python3 from algorithms import exg, exg_standardised, exg_standardised_hue, hsv, exgr, gndvi from button_inputs import Selector, Recorder from image_sampler import image_sample from imutils.video import VideoStream, FileVideoStream, FPS from relay_control import Controller from queue import Queue from time import strftime import subprocess import imutils # reference PyImageSearch import shutil import numpy as np # reference import time import sys import cv2 # reference import os def nothing(x): pass def green_on_brown(image, exgMin=30, exgMax=250, hueMin=30, hueMax=90, brightnessMin=5, brightnessMax=200, saturationMin=30, saturationMax=255, minArea=1, headless=True, algorithm='exg'): ''' Uses a provided algorithm and contour detection to determine green objects in the image. Min and Max thresholds are provided. :param image: input image to be analysed :param exgMin: :param exgMax: :param hueMin: :param hueMax: :param brightnessMin: :param brightnessMax: :param saturationMin: :param saturationMax: :param minArea: minimum area for the detection - used to filter out small detections :param headless: True: no windows display; False: watch what the algorithm does :param algorithm: the algorithm to use. Defaults to ExG if not correct :return: ''' # different algorithm options, add in your algorithm here if you make a new one! threshedAlready = False if algorithm == 'exg': output = exg(image) elif algorithm == 'exgr': output = exgr(image) elif algorithm == 'nexg': output = exg_standardised(image) elif algorithm == 'exhsv': output = exg_standardised_hue(image, hueMin=hueMin, hueMax=hueMax, brightnessMin=brightnessMin, brightnessMax=brightnessMax, saturationMin=saturationMin, saturationMax=saturationMax) elif algorithm == 'hsv': output, threshedAlready = hsv(image, hueMin=hueMin, hueMax=hueMax, brightnessMin=brightnessMin, brightnessMax=brightnessMax, saturationMin=saturationMin, saturationMax=saturationMax) elif algorithm == 'gndvi': output = gndvi(image) else: output = exg(image) print('[WARNING] DEFAULTED TO EXG') if not headless: cv2.imshow("Threshold", output) # run the thresholds provided kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3)) # if not a binary image, run an adaptive threshold on the area that fits within the thresholded bounds. if not threshedAlready: output = np.where(output > exgMin, output, 0) output = np.where(output > exgMax, 0, output) output = np.uint8(np.abs(output)) if not headless: cv2.imshow("post", output) thresholdOut = cv2.adaptiveThreshold(output, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY_INV, 31, 2) thresholdOut = cv2.morphologyEx(thresholdOut, cv2.MORPH_CLOSE, kernel, iterations=1) # if already binary, run morphological operations to remove any noise if threshedAlready: thresholdOut = cv2.morphologyEx(output, cv2.MORPH_CLOSE, kernel, iterations=5) if not headless: cv2.imshow("Threshold", thresholdOut) # find all the contours on the binary images cnts = cv2.findContours(thresholdOut.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) cnts = imutils.grab_contours(cnts) weedCenters = [] boxes = [] # loop over all the detected contours and calculate the centres and bounding boxes for c in cnts: # filter based on total area of contour if cv2.contourArea(c) > minArea: # calculate the min bounding box startX, startY, boxW, boxH = cv2.boundingRect(c) endX = startX + boxW endY = startY + boxH cv2.rectangle(image, (int(startX), int(startY)), (endX, endY), (0, 0, 255), 2) # save the bounding box boxes.append([startX, startY, boxW, boxH]) # compute box center centerX = int(startX + (boxW / 2)) centerY = int(startY + (boxH / 2)) weedCenters.append([centerX, centerY]) # returns the contours, bounding boxes, centroids and the image on which the boxes have been drawn return cnts, boxes, weedCenters, image # the class Owl: def __init__(self, video=False, videoFile=None, recording=False, nozzleNum=4, headless=True, exgMin=30, exgMax=180, hueMin=30,hueMax=92, brightnessMin=5, brightnessMax=200, saturationMin=30, saturationMax=255, resolution=(832, 624), framerate=32): # different detection parameters self.headless = headless self.recording = recording self.resolution = resolution self.framerate = framerate # threshold parameters for different algorithms self.exgMin = exgMin self.exgMax = exgMax self.hueMin = hueMin self.hueMax = hueMax self.saturationMin = saturationMin self.saturationMax = saturationMax self.brightnessMin = brightnessMin self.brightnessMax = brightnessMax # setup the track bars if headless is False if not self.headless: # create trackbars for the threshold calculation cv2.namedWindow("Params") cv2.createTrackbar("thresholdMin", "Params", self.exgMin, 255, nothing) cv2.createTrackbar("thresholdMax", "Params", self.exgMax, 255, nothing) # instantiate the recorder if recording is True if self.recording: self.fourcc = cv2.VideoWriter_fourcc(*'MJPG') self.writer = None else: self.record = False self.saveRecording = False # check if test video or videostream from camera if video: self.cam = FileVideoStream(videoFile).start() # if no video, start the camera with the provided parameters else: try: self.cam = VideoStream(usePiCamera=True, resolution=self.resolution, framerate=self.framerate).start() except ModuleNotFoundError: self.cam = VideoStream(src=0).start() time.sleep(1.0) # set the sprayqueue size self.sprayQueue = Queue(maxsize=10) # nozzleDict maps the reference nozzle number to a boardpin on the embedded device self.nozzleDict = { 0: 13, 1: 15, 2: 16, 3: 18 } ### Data collection only ### # algorithmDict maps pins to algorithms for data collection self.algorithmDict = { "exg": 29, "nexg": 31, "hsv": 33, "exhsv": 35, } # this is where the recording button can be added. Currently set to pin 37 if self.recording: self.recorderButton = Recorder(recordGPIO=37) ############################ # instantiate the nozzle controller - successful start should beep the buzzer self.controller = Controller(nozzleDict=self.nozzleDict) # instantiate the logger self.logger = self.controller.logger # sensitivity and weed size to be added self.sensitivity = None self.laneCoords = {} # add the total number of nozzles. This can be changed easily, but the nozzleDict and physical relays would need # to be updated too. Fairly straightforward, so an opportunity for more precise application self.nozzleNum = nozzleNum def hoot(self, sprayDur, sample=False, sampleDim=400, saveDir='output', camera_name='cam1', algorithm='exg', selectorEnabled=False, minArea=10): # track FPS and framecount fps = FPS().start() if selectorEnabled: self.selector = Selector(switchDict=self.algorithmDict) try: while True: frame = self.cam.read() if selectorEnabled: algorithm, newAlgorithm = self.selector.algorithm_selector(algorithm) if newAlgorithm: self.logger.log_line('[NEW ALGO] {}'.format(algorithm)) if self.recording: self.record = self.recorderButton.record self.saveRecording = self.recorderButton.saveRecording if frame is None: fps.stop() print("[INFO] Stopped. Approximate FPS: {:.2f}".format(fps.fps())) self.stop() break if self.record and self.writer is None: saveDir = os.path.join(saveDir, strftime("%Y%m%d-{}-{}".format(camera_name, algorithm))) if not os.path.exists(saveDir): os.makedirs(saveDir) self.baseName = os.path.join(saveDir, strftime("%Y%m%d-%H%M%S-{}-{}".format(camera_name, algorithm))) videoName = self.baseName + '.avi' self.logger.new_video_logfile(name=self.baseName + '.txt') self.writer = cv2.VideoWriter(videoName, self.fourcc, 30, (frame.shape[1], frame.shape[0]), True) # retrieve the trackbar positions for thresholds if not self.headless: self.exgMin = cv2.getTrackbarPos("thresholdMin", "Params") self.exgMax = cv2.getTrackbarPos("thresholdMax", "Params") else: # this leaves it open to adding dials for sensitivity. Static at the moment, but could be dynamic self.update(exgMin=self.exgMin, exgMax=self.exgMax) # add in update values here # pass image, thresholds to green_on_brown function cnts, boxes, weedCentres, imageOut = green_on_brown(frame.copy(), exgMin=self.exgMin, exgMax=self.exgMax, hueMin=self.hueMin, hueMax=self.hueMax, saturationMin=self.saturationMin, saturationMax=self.saturationMax, brightnessMin=self.brightnessMin, brightnessMax=self.brightnessMax, headless=self.headless, algorithm=algorithm, minArea=minArea) ##### IMAGE SAMPLER ##### # record sample images if required of weeds detected # uncomment if needed # if frameCount % 60 == 0 and sample is True: # saveFrame = frame.copy() # sampleThread = Thread(target=image_sample, args=[saveFrame, weedCentres, saveDir, sampleDim]) # sampleThread.start() ######################### # activation region limit - once weed crosses this line, nozzle is activated self.yAct = int((0.2) * frame.shape[0]) laneWidth = imageOut.shape[1] / self.nozzleNum # calculate lane coords and draw on frame for i in range(self.nozzleNum): laneX = int(i * laneWidth) # cv2.line(displayFrame, (laneX, 0), (laneX, imageOut.shape[0]), (0, 255, 255), 2) self.laneCoords[i] = laneX # loop over the ID/weed centres from contours for ID, centre in enumerate(weedCentres): # if they are in activation region the spray them if centre[1] > self.yAct: sprayTime = time.time() for i in range(self.nozzleNum): # determine which lane needs to be activated if int(self.laneCoords[i]) <= centre[0] < int(self.laneCoords[i] + laneWidth): # log a spray job with the controller using the nozzle, timestamp and spray duration self.controller.receive(nozzle=i, timeStamp=sprayTime, duration=sprayDur) # update the framerate counter fps.update() if not self.headless: cv2.imshow("Output", imutils.resize(imageOut, width=600)) if self.record and not self.saveRecording: self.writer.write(frame) if self.saveRecording and not self.record: self.writer.release() self.controller.solenoid.beep(duration=0.1) self.recorderButton.saveRecording = False fps.stop() self.writer = None self.logger.log_line_video("[INFO] {}. Approximate FPS: {:.2f}".format(self.baseName, fps.fps()), verbose=True) fps = FPS().start() k = cv2.waitKey(1) & 0xFF if k == 27: fps.stop() self.logger.log_line_video("[INFO] Stopped. Approximate FPS: {:.2f}".format(fps.fps()), verbose=True) self.stop() break except KeyboardInterrupt: fps.stop() self.logger.log_line_video("[INFO] Stopped. Approximate FPS: {:.2f}".format(fps.fps()), verbose=True) self.stop() except Exception as e: self.controller.solenoid.beep(duration=0.5, repeats=5) self.logger.log_line("[CRITICAL ERROR] STOPPED: {}".format(e)) # still in development def update_software(self): USBDir, USBConnected = check_for_usb() if USBConnected: files = os.listdir(USBDir) workingDir = '/home/pi' # move old version to version control directory first oldVersionDir = strftime(workingDir + "/%Y%m%d-%H%M%S_update") os.mkdir(oldVersionDir) currentDir = '/home/pi/owl' shutil.move(currentDir, oldVersionDir) # move new directory to working directory for item in files: if 'owl' in item: shutil.move() def stop(self): self.controller.running = False self.controller.solenoid.all_off() self.controller.solenoid.beep(duration=0.1) self.controller.solenoid.beep(duration=0.1) self.cam.stop() if self.record: self.writer.release() self.recorderButton.running = False if not self.headless: cv2.destroyAllWindows() sys.exit() def update(self, exgMin=30, exgMax=180): self.exgMin = exgMin self.exgMax = exgMax def check_for_usb(): try: nanoMediaFolder = 'ls /media/pi' proc = subprocess.Popen(nanoMediaFolder, shell=True, preexec_fn=os.setsid, stdout=subprocess.PIPE) usbName = proc.stdout.readline().rstrip().decode('utf-8') if len(usbName) > 0: print('[INFO] Saving to {} usb'.format(usbName)) saveDir = '/media/pi/{}/'.format(usbName) return saveDir, True else: print('[INFO] No USB connected. Saving to videos') saveDir = '/home/pi/owl/videos' return saveDir, False except AttributeError: print('[INFO] Windows computer detected...') saveDir = '/videos/' return saveDir, False # business end of things if __name__ == "__main__": owl = Owl(video=False, videoFile=r'', headless=True, recording=False, exgMin=25, exgMax=200, hueMin=39, hueMax=83, saturationMin=50, saturationMax=220, brightnessMin=60, brightnessMax=190, framerate=32, resolution=(416, 320)) # start the targeting! owl.hoot(sprayDur=0.15, sample=False, sampleDim=1000, saveDir='/home/pi', algorithm='exhsv', selectorEnabled=False, camera_name='hsv', minArea=10)
StarcoderdataPython
4822543
<reponame>JaumVitor/HOMEWORK-PYTHON n1 = float(input('DIGITE O PRIMEIRO VALOR: ')) n2 = float(input('DIGITE O SEGUNDO VALOR : ')) n3 = float(input('DIGITE O TERCEIRO VALOR: ')) if (n2 < n1 > n3): print('VALOR',n1,'É O MAIOR VALOR') elif (n1 < n2 > n3): print('VALOR',n2,'É O MAIOR VALOR') elif (n1 < n3 > n2 ): print('VALOR',n3,'É O MAIOR VALOR')
StarcoderdataPython
3225125
<reponame>deephdc/deep-oc-client # -*- coding: utf-8 -*- # Copyright 2019 Spanish National Research Council (CSIC) # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import functools import hashlib import json import logging import os import os.path import sqlite3 import time import yaml import requests from six.moves.urllib import parse from deep_oc_client.client import modules from deep_oc_client import exceptions from deep_oc_client import version class _JSONEncoder(json.JSONEncoder): def default(self, o): return super(_JSONEncoder, self).default(o) def cache_request(f): """Decorator to cache requests.""" @functools.wraps(f) def wrapper(self, url, method, **kwargs): if method.lower() == "get": cache_db = os.path.join(self.cache_dir, "cache") try: conn = sqlite3.connect(cache_db) with conn: conn.execute( "CREATE TABLE IF NOT EXISTS module " " (id INTEGER PRIMARY KEY, " " url TEXT UNIQUE, " " metadata TEXT, " " timestamp REAL" " )") row = conn.execute( "SELECT metadata, timestamp " "FROM module " "WHERE url = ?", (url,)).fetchone() now = time.time() if not row or now - row[1] > self.cache_seconds: resp, content = f(self, url, method, **kwargs) if not row: conn.execute( "INSERT INTO module " "(url, metadata, timestamp) " "VALUES (?, ?, ?)", (url, self._json.encode(content), now)) else: conn.execute( "UPDATE module SET" " metadata = ?, timestamp = ? " "WHERE url = ?", (self._json.encode(content), now, url)) else: resp = requests.Response resp.status_code = 200 content = json.loads(row[0]) except sqlite3.Error as e: raise e finally: conn.close() return resp, content else: return f(self, url, method, **kwargs) return wrapper class DeepOcClient(object): """The DEEP OC client class.""" _catalog_url = ("https://raw.githubusercontent.com/deephdc/" "deephdc.github.io/pelican/project_apps.yml") def __init__(self, cache=300, state_dir="~/.deep-oc/", debug=False): """Initialization of DeepOcClient object. :param bool debug: whether to enable debug logging """ self.url = None self.http_debug = debug self._modules = modules.Modules(self) self._logger = logging.getLogger(__name__) if self.http_debug: # Logging level is already set on the root logger ch = logging.StreamHandler() self._logger.addHandler(ch) self._logger.propagate = False if hasattr(requests, 'logging'): rql = requests.logging.getLogger(requests.__name__) rql.addHandler(ch) # Since we have already setup the root logger on debug, we # have to set it up here on WARNING (its original level) # otherwise we will get all the requests logging messages rql.setLevel(logging.WARNING) self._json = _JSONEncoder() self.session = requests.Session() self.cache_seconds = cache self.state_dir = state_dir self.cache_dir = None self.init_state_dir() @property def modules(self): """Interface to query for modules. :return: Modules interface. :rtype: deep_oc_client.client.modules.Modules """ return self._modules def init_state_dir(self): self.state_dir = os.path.expanduser(self.state_dir) self.cache_dir = os.path.join(self.state_dir, "cache") for d in (".", ): d = os.path.join(self.state_dir, d) if os.path.exists(d): if not os.path.isdir(d): raise exceptions.ClientException( message="Cannot use %s, is not a directory" % d ) else: os.mkdir(d) @cache_request def request(self, url, method, json=None, **kwargs): """Send an HTTP request with the specified characteristics. Wrapper around `requests.Session.request` to handle tasks such as setting headers, JSON encoding/decoding, and error handling. Arguments that are not handled are passed through to the requests library. :param str url: Path or fully qualified URL of the HTTP request. If only a path is provided then the URL will be prefixed with the attribute self.url. If a fully qualified URL is provided then self.url will be ignored. :param str method: The http method to use. (e.g. 'GET', 'POST') :param json: Some data to be represented as JSON. (optional) :param kwargs: any other parameter that can be passed to :meth:`requests.Session.request` (such as `headers`). Except: - `data` will be overwritten by the data in the `json` param. - `allow_redirects` is ignored as redirects are handled by the session. :returns: The response to the request. """ method = method.lower() kwargs.setdefault('headers', kwargs.get('headers', {})) kwargs["headers"]["User-Agent"] = "orpy-%s" % version.user_agent kwargs["headers"]["Accept"] = "application/json" if json is not None: kwargs["headers"].setdefault('Content-Type', 'application/json') kwargs['data'] = self._json.encode(json) url = parse.urljoin(self.url, url) self.http_log_req(method, url, kwargs) resp = self.session.request(method, url, **kwargs) self.http_log_resp(resp) if resp.status_code >= 400: raise exceptions.from_response(resp, resp.json(), url, method) try: content = resp.json().get("content", resp.json()) except Exception: content = yaml.safe_load(resp.text) return resp, content def _get_links_from_response(self, response): d = {} for link in response.json().get("links", []): d[link["rel"]] = link["href"] return d.get("self"), d.get("next"), d.get("last") def http_log_req(self, method, url, kwargs): if not self.http_debug: return string_parts = ['curl -g -i'] if not kwargs.get('verify', True): string_parts.append(' --insecure') string_parts.append(" '%s'" % url) string_parts.append(' -X %s' % method) headers = copy.deepcopy(kwargs['headers']) self._redact(headers, ['Authorization']) # because dict ordering changes from 2 to 3 keys = sorted(headers.keys()) for name in keys: value = headers[name] header = ' -H "%s: %s"' % (name, value) string_parts.append(header) if 'data' in kwargs: data = json.loads(kwargs['data']) string_parts.append(" -d '%s'" % json.dumps(data)) self._logger.debug("REQ: %s" % "".join(string_parts)) def http_log_resp(self, resp): if not self.http_debug: return if resp.text and resp.status_code != 400: try: body = json.loads(resp.text) self._redact(body, ['access', 'token', 'id']) except ValueError: body = None else: body = None self._logger.debug("RESP: [%(status)s] %(headers)s\nRESP BODY: " "%(text)s\n", {'status': resp.status_code, 'headers': resp.headers, 'text': json.dumps(body)}) def _redact(self, target, path, text=None): """Replace the value of a key in `target`. The key can be at the top level by specifying a list with a single key as the path. Nested dictionaries are also supported by passing a list of keys to be navigated to find the one that should be replaced. In this case the last one is the one that will be replaced. :param dict target: the dictionary that may have a key to be redacted; modified in place :param list path: a list representing the nested structure in `target` that should be redacted; modified in place :param string text: optional text to use as a replacement for the redacted key. if text is not specified, the default text will be sha1 hash of the value being redacted """ key = path.pop() # move to the most nested dict for p in path: try: target = target[p] except KeyError: return if key in target: if text: target[key] = text elif target[key] is not None: # because in python3 byte string handling is ... ug value = target[key].encode('utf-8') sha1sum = hashlib.sha1(value) # nosec target[key] = "{SHA1}%s" % sha1sum.hexdigest() def head(self, url, **kwargs): """Perform a HEAD request. This calls :py:meth:`.request()` with ``method`` set to ``HEAD``. """ return self.request(url, 'HEAD', **kwargs) def get(self, url, **kwargs): """Perform a GET request. This calls :py:meth:`.request()` with ``method`` set to ``GET``. """ return self.request(url, 'GET', **kwargs) def post(self, url, **kwargs): """Perform a POST request. This calls :py:meth:`.request()` with ``method`` set to ``POST``. """ return self.request(url, 'POST', **kwargs) def put(self, url, **kwargs): """Perform a PUT request. This calls :py:meth:`.request()` with ``method`` set to ``PUT``. """ return self.request(url, 'PUT', **kwargs) def delete(self, url, **kwargs): """Perform a DELETE request. This calls :py:meth:`.request()` with ``method`` set to ``DELETE``. """ return self.request(url, 'DELETE', **kwargs) def patch(self, url, **kwargs): """Perform a PATCH request. This calls :py:meth:`.request()` with ``method`` set to ``PATCH``. """ return self.request(url, 'PATCH', **kwargs)
StarcoderdataPython
50382
""" Train Fashion MNIST CNN Code borrowed from http://danialk.github.io/blog/2017/09/29/range-of- convolutional-neural-networks-on-fashion-mnist-dataset/ """ # Specify visible cuda device import os os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" os.environ["CUDA_VISIBLE_DEVICES"] = "7" import numpy as np from keras.preprocessing.image import ImageDataGenerator from lib.keras_utils import build_vgg_fmnist from lib.utils import load_dataset_fmnist batch_size = 512 # Load f-mnist, find mean and std x_train, y_train, x_test, y_test = load_dataset_fmnist() mean = x_train.mean().astype(np.float32) std = x_train.std().astype(np.float32) # Build Keras model cnn = build_vgg_fmnist(mean, std) # Data augmentation gen = ImageDataGenerator(rotation_range=8, width_shift_range=0.08, shear_range=0.3, height_shift_range=0.08, zoom_range=0.08) batches = gen.flow(x_train, y_train, batch_size=batch_size) val_batches = gen.flow(x_test, y_test, batch_size=batch_size) cnn.fit_generator(batches, steps_per_epoch=60000//batch_size, epochs=50, validation_data=val_batches, validation_steps=10000//batch_size, use_multiprocessing=True) score = cnn.evaluate(x_train, y_train, verbose=0) print('Train loss:', score[0]) print('Train accuracy:', score[1]) score = cnn.evaluate(x_test, y_test, verbose=0) print('Test loss:', score[0]) print('Test accuracy:', score[1]) # Save trained weight cnn.save_weights('./tmp/weights/fmnist_vgg_smxe.h5')
StarcoderdataPython
1689550
<reponame>vikrantsingh-vs53/Final-Senior-Year-Project- # -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import migrations, models import django.utils.timezone import django.contrib.sites.managers import sorl.thumbnail.fields from django.conf import settings import newsletter.utils import django.db.models.manager class Migration(migrations.Migration): dependencies = [ ('sites', '0001_initial'), migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [ migrations.CreateModel( name='Article', fields=[ ('id', models.AutoField(verbose_name='ID', primary_key=True, serialize=False, auto_created=True)), ('sortorder', models.PositiveIntegerField(verbose_name='sort order', db_index=True, help_text='Sort order determines the order in which articles are concatenated in a post.')), ('title', models.CharField(verbose_name='title', max_length=200)), ('text', models.TextField(verbose_name='text')), ('url', models.URLField(verbose_name='link', blank=True, null=True)), ('image', sorl.thumbnail.fields.ImageField(verbose_name='image', blank=True, null=True, upload_to='newsletter/images/%Y/%m/%d')), ], options={ 'verbose_name': 'article', 'verbose_name_plural': 'articles', 'ordering': ('sortorder',), }, ), migrations.CreateModel( name='Message', fields=[ ('id', models.AutoField(verbose_name='ID', primary_key=True, serialize=False, auto_created=True)), ('title', models.CharField(verbose_name='title', max_length=200)), ('slug', models.SlugField(verbose_name='slug')), ('date_create', models.DateTimeField(verbose_name='created', auto_now_add=True)), ('date_modify', models.DateTimeField(verbose_name='modified', auto_now=True)), ], options={ 'verbose_name': 'message', 'verbose_name_plural': 'messages', }, ), migrations.CreateModel( name='Newsletter', fields=[ ('id', models.AutoField(verbose_name='ID', primary_key=True, serialize=False, auto_created=True)), ('title', models.CharField(verbose_name='newsletter title', max_length=200)), ('slug', models.SlugField(unique=True)), ('email', models.EmailField(verbose_name='e-mail', max_length=254, help_text='Sender e-mail')), ('sender', models.CharField(verbose_name='sender', max_length=200, help_text='Sender name')), ('visible', models.BooleanField(verbose_name='visible', db_index=True, default=True)), ('send_html', models.BooleanField(verbose_name='send html', default=True, help_text='Whether or not to send HTML versions of e-mails.')), ('site', models.ManyToManyField(default=newsletter.utils.get_default_sites, to='sites.Site')), ], options={ 'verbose_name': 'newsletter', 'verbose_name_plural': 'newsletters', }, managers=[ ('objects', django.db.models.manager.Manager()), ('on_site', django.contrib.sites.managers.CurrentSiteManager()), ], ), migrations.CreateModel( name='Submission', fields=[ ('id', models.AutoField(verbose_name='ID', primary_key=True, serialize=False, auto_created=True)), ('publish_date', models.DateTimeField(verbose_name='publication date', blank=True, null=True, db_index=True, default=django.utils.timezone.now)), ('publish', models.BooleanField(verbose_name='publish', db_index=True, default=True, help_text='Publish in archive.')), ('prepared', models.BooleanField(verbose_name='prepared', db_index=True, default=False, editable=False)), ('sent', models.BooleanField(verbose_name='sent', db_index=True, default=False, editable=False)), ('sending', models.BooleanField(verbose_name='sending', db_index=True, default=False, editable=False)), ('message', models.ForeignKey(verbose_name='message', to='newsletter.Message')), ('newsletter', models.ForeignKey(verbose_name='newsletter', editable=False, to='newsletter.Newsletter')), ], options={ 'verbose_name': 'submission', 'verbose_name_plural': 'submissions', }, ), migrations.CreateModel( name='Subscription', fields=[ ('id', models.AutoField(verbose_name='ID', primary_key=True, serialize=False, auto_created=True)), ('name_field', models.CharField(verbose_name='name', max_length=30, blank=True, null=True, help_text='optional', db_column='name')), ('email_field', models.EmailField(verbose_name='e-mail', max_length=254, blank=True, null=True, db_index=True, db_column='email')), ('ip', models.GenericIPAddressField(verbose_name='IP address', blank=True, null=True)), ('create_date', models.DateTimeField(default=django.utils.timezone.now, editable=False)), ('activation_code', models.CharField(verbose_name='activation code', max_length=40, default=newsletter.utils.make_activation_code)), ('subscribed', models.BooleanField(verbose_name='subscribed', db_index=True, default=False)), ('subscribe_date', models.DateTimeField(verbose_name='subscribe date', blank=True, null=True)), ('unsubscribed', models.BooleanField(verbose_name='unsubscribed', db_index=True, default=False)), ('unsubscribe_date', models.DateTimeField(verbose_name='unsubscribe date', blank=True, null=True)), ('newsletter', models.ForeignKey(verbose_name='newsletter', to='newsletter.Newsletter')), ('user', models.ForeignKey(verbose_name='user', blank=True, null=True, to=settings.AUTH_USER_MODEL)), ], options={ 'verbose_name': 'subscription', 'verbose_name_plural': 'subscriptions', }, ), migrations.AddField( model_name='submission', name='subscriptions', field=models.ManyToManyField(verbose_name='recipients', blank=True, db_index=True, help_text='If you select none, the system will automatically find the subscribers for you.', to='newsletter.Subscription'), ), migrations.AddField( model_name='message', name='newsletter', field=models.ForeignKey(verbose_name='newsletter', to='newsletter.Newsletter'), ), migrations.AddField( model_name='article', name='post', field=models.ForeignKey(verbose_name='message', related_name='articles', to='newsletter.Message'), ), migrations.AlterUniqueTogether( name='subscription', unique_together=set([('user', 'email_field', 'newsletter')]), ), migrations.AlterUniqueTogether( name='message', unique_together=set([('slug', 'newsletter')]), ), ]
StarcoderdataPython
3344222
#!/usr/bin/env python # license removed for brevity import rospy from std_msgs.msg import Float64 def servo_cmd(servo_val, flag): pub = rospy.Publisher('/simple_model/base_to_second_joint_position_controller/command', Float64, queue_size=40) pub2 = rospy.Publisher('/simple_model/base_to_first_joint_position_controller/command', Float64, queue_size=40) rospy.init_node('servo_cmd', anonymous=True) rate = rospy.Rate(50) # 40hz while not rospy.is_shutdown(): pub.publish(0.2) pub2.publish(0.2) rate.sleep() servo_val = 0.0 flag = 0.0 if __name__ == '__main__': try: servo_cmd(servo_val, flag) except rospy.ROSInterruptException: pass
StarcoderdataPython
124556
from typing import List ''' Easy ''' class Solution_1: def minArray(self, numbers: List[int]) -> int: for i in range(1, len(numbers)): if numbers[i - 1] > numbers[i]: return numbers[i] return numbers[0] # 二分查找 # 作者:LeetCode-Solution # 链接:https://leetcode-cn.com/problems/xuan-zhuan-shu-zu-de-zui-xiao-shu-zi-lcof/solution/xuan-zhuan-shu-zu-de-zui-xiao-shu-zi-by-leetcode-s/ # 考虑数组中的最后一个元素 x: # 在最小值右侧的元素,它们的值一定都小于等于(<=) x; # 而在最小值左侧的元素,它们的值一定都大于等于(>=) x。 class Solution_2: def minArray(self, numbers: List[int]) -> int: low, high = 0, len(numbers) - 1 while low < high: pivot = low + (high - low) // 2 if numbers[pivot] < numbers[high]: high = pivot elif numbers[pivot] > numbers[high]: low = pivot + 1 else: high -= 1 return numbers[low]
StarcoderdataPython
4828593
<gh_stars>0 import docker import os import copy # from threading import Thread import concurrent.futures class DockerContainer: def __init__(self): self.client = docker.from_env() self.containers = [] self.available_containers = [] def create_containers(self,no_containers): try: # Check if run-code-image exists image = self.client.images.get('run-code-image') except docker.errors.ImageNotFound: print("run-code-image not found") try: print("Trying to build image please wait..") image,log = self.client.images.build(path = "./",tag="run-code-image",quiet=False) # for line in log: # print(line) except: print("Docker build error") exit(0) except: print("Docker Service not found") exit(0) mount_path= {os.getcwd():{'bind': '/tmp', 'mode': 'ro'}} # Create containers if not present for c_no in range(no_containers): c_name = "Alpha-Code-Container%d"%(c_no) if len(self.client.containers.list(all=True, filters={"name":c_name})) == 0: container = self.client.containers.create('run-code-image', volumes=mount_path,stdin_open = True , tty = True, working_dir="/root", detach=True,mem_limit='50M', name="Alpha-Code-Container%d"%(c_no)) print("Created container %d"%(c_no+1)) else: container = self.client.containers.get(c_name) print("Container %s is already created"%(c_name)) self.containers.append(container) self.available_containers = copy.copy(self.containers) def start_all(self): for index,container in enumerate(self.containers): print("Starting Container %d"%(index+1)) container.start() print("Container %d started"%(index+1)) def stop_all(self): for index,container in enumerate(self.containers): print("Stoping Container %d"%(index+1)) container.stop(timeout=0) # container.remove(force=True) print("Container %d stopped"%(index+1)) def run_file(self,container,fname): command = 'python3 /tmp/RunServer.py %s'%(fname) res = container.exec_run(cmd=command,workdir="/root") return res.output.decode() def allocate_container(self,fname): TIMEOUT = 5 container = self.available_containers.pop(0) with concurrent.futures.ThreadPoolExecutor() as executor: future = executor.submit(self.run_file,container,fname) try: output = future.result(timeout=TIMEOUT) except: output = "Time limit Exceeded" container.stop(timeout=0) container.start() self.available_containers.append(container) return output def execute_task(self,fname): TIMEOUT = 5 if len(self.available_containers) == 0: return "No container is available" output = "Docker: Un-known Error" with concurrent.futures.ThreadPoolExecutor() as executor: future = executor.submit(self.allocate_container,fname) try: output = future.result(timeout=TIMEOUT) except: output = "Time limit Exceeded" return output if __name__ == "__main__": container = DockerContainer() # coderunnernet = container.client.networks.create("coderunnernet", driver="bridge") container.create_containers(5) container.start_all() # for i in container.containers: # print("connecting: %s",i) # coderunnernet.connect(i) for cont in container.containers: res = cont.exec_run(cmd="echo hello;",workdir="/root") print(res) print(res.output) res = input("Stop containers [y/n] :") if res == 'y': container.stop_all() # container.client.networks.prune() #Alphine reuirements #gcc #libc-dev #python3
StarcoderdataPython
25602
<filename>tests/test_day5.py from aoc_2020.day5 import part1, get_pos, part2 data = """BFFFBBFRRR FFFBBBFRRR BBFFBBFRLL """ def test_get_pos(): assert get_pos("FBFBBFFRLR") == (44, 5) def test_part1(): assert part1(data) == 820 def test_part2(): assert part2(data) is None
StarcoderdataPython
3341722
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Fri May 7 21:28:46 2021 @author: marco """ import sim #import sys import numpy as np #import math #import matplotlib.pyplot as plt from inference_by_python import inference #import time #import cv2 '''******IMPOTRANT****** simRemoteApi.start(19999) -- input this line in Lua command of simulation firstly''' sim.simxFinish(-1) # Just in case, close all opened connections sim_client = sim.simxStart('127.0.0.1', 19999, True, True, 5000, 5) if sim_client == -1: print('Failed to connect to simulation (V-REP remote API server). Exiting.') exit() else: print('Connected to simulation.') # Synchronously Running the client and server sim.simxSynchronous(sim_client,True); # Enable the synchronous mode (Blocking function call) sim.simxStartSimulation(sim_client,sim.simx_opmode_oneshot) sim.simxSynchronousTrigger(sim_client) #trigger the simulation sim_ret, rgb_cam = sim.simxGetObjectHandle(sim_client, "kinect_rgb", sim.simx_opmode_blocking) sim_ret, depth_cam = sim.simxGetObjectHandle(sim_client, "kinect_depth", sim.simx_opmode_blocking) #Wait for Signal objectNumber err, objectNumber=sim.simxGetIntegerSignal(sim_client,'objectNumber',sim.simx_opmode_streaming) while err != sim.simx_return_ok: err,objectNumber=sim.simxGetIntegerSignal(sim_client,'objectNumber',sim.simx_opmode_buffer) print(r'objectNumber: %i.'%(objectNumber)) sim.simxClearIntegerSignal(sim_client,'objectNumber',sim.simx_opmode_oneshot) #x=[0.375,0.250,0.35] #y=[0.050,0.050,0.15] #angle=[30*math.pi/180,0,0] for i in range(objectNumber): #Wait for Signal sendImages err2, sendImages=sim.simxGetStringSignal(sim_client,'sendImages',sim.simx_opmode_streaming) while err2 != sim.simx_return_ok: err2,sendImages=sim.simxGetStringSignal(sim_client,'sendImages',sim.simx_opmode_buffer) print(r'sendImages: %s' %(sendImages)) sim.simxClearStringSignal(sim_client,'sendImages', sim.simx_opmode_oneshot) # Acquire RGB Image sim_ret, resolution, raw_image = sim.simxGetVisionSensorImage(sim_client, rgb_cam, 0, sim.simx_opmode_blocking) color_img = np.asarray(raw_image) color_img.shape = (resolution[1], resolution[0],3) color_img = color_img.astype(np.float) color_img[color_img < 0] += 255 color_img = np.flipud(color_img) color_img = color_img.astype(np.uint8) # Gain Depth Image sim_ret, resolution, depth_buffer = sim.simxGetVisionSensorDepthBuffer(sim_client, depth_cam, sim.simx_opmode_blocking) depth_img = np.asarray(depth_buffer) #depth_img= cv2.rgb2grey depth_img.shape = (resolution[1], resolution[0]) depth_img = np.flipud(depth_img) depth_img = depth_img * 255 #zNear = 0.01 #zFar = 2 #depth_img = depth_img0 * (zFar - zNear) + zNear #Inference by Deep Learning model #X=x[i] #Y=y[i] #graspAngle=angle[i] args_network='/home/marco/vrep_python/trained-models/cornell-randsplit-rgbd-grconvnet3-drop1-ch32/epoch_19_iou_0.98' args_use_depth=True args_use_rgb=True args_n_grasps=1 args_save=True args_force_cpu=False grasps=inference(args_network, color_img,depth_img,args_use_depth,args_use_rgb, args_n_grasps,args_save,args_force_cpu) X=grasps[0].center[0]*0.5/223 Y=grasps[0].center[1]*0.5/223 graspAngle=grasps[0].angle local_depth_min=np.max(depth_img) for a in range(grasps[0].center[0]-6,grasps[0].center[0]+7): row_min=np.min(depth_img[a][(grasps[0].center[1]-6):(grasps[0].center[1]+7)]) if row_min<local_depth_min: local_depth_min=row_min #depth_grasp=depth_img[grasps[0].center[0]][grasps[0].center[1]] #graspCenterZ=(0.46175-0.02)*(1-np.min(depth_img)/np.max(depth_img))-0.05 graspCenterZ=(0.46175-0.02)*(1-local_depth_min/np.max(depth_img))-0.05 #grasps.quality, grasps.width, grasps.length # send grasps to CoppeliaSim that should be received and evaluated at the same time sim.simxPauseCommunication(sim_client,True) sim.simxSetFloatSignal(sim_client,'graspCenterX', X, sim.simx_opmode_oneshot) sim.simxSetFloatSignal(sim_client,'graspCenterY', Y, sim.simx_opmode_oneshot) sim.simxSetFloatSignal(sim_client,'graspAngle', graspAngle, sim.simx_opmode_oneshot) sim.simxSetFloatSignal(sim_client,'graspCenterZ', graspCenterZ, sim.simx_opmode_oneshot) sim.simxSetStringSignal(sim_client,'sendGrasps', 'start', sim.simx_opmode_oneshot) sim.simxPauseCommunication(sim_client,False) #Above's 3 values will be received on the CoppeliaSim side at the same time ''' plt.figure(2*i) plt.imshow(color_img) #saveimg plt.figure(2*i+1) plt.imshow(depth_img) # #saveimg '''
StarcoderdataPython
3224402
<filename>DB/Entities/Process.py # Copyright © 2017 <NAME> <<EMAIL>> # Copyright © 2017 <NAME> <<EMAIL>> from sqlalchemy import Column from sqlalchemy import Integer from sqlalchemy import String from sqlalchemy import TIMESTAMP from DB.Entities import Base class Process(Base): """ Object model for a submission """ __tablename__ = 'processes' id = Column(Integer, primary_key=True) name = Column(String) last_report_time = Column(TIMESTAMP) status = Column(Integer)
StarcoderdataPython
1733496
""" FRODO: a FRamework for Open/Distributed Optimization Copyright (C) 2008-2013 <NAME>, <NAME> & <NAME> FRODO is free software: you can redistribute it and/or modify it under the terms of the GNU Affero General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. FRODO is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more details. You should have received a copy of the GNU Affero General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. How to contact the authors: <http://frodo2.sourceforge.net/> """ # @todo Write documentation (in which format?) import os import time import signal import sys import subprocess import math # Global variables interrupted = False terminated = False java = [] generator = "" algos = [] timeout = -1 output = "" outFile = None javaProcess = None print("""FRODO Copyright (C) 2008-2013 <NAME>, <NAME> & <NAME> This program comes with ABSOLUTELY NO WARRANTY. This is free software, and you are welcome to redistribute it under certain conditions.\n"""); drawGraphs = False try: import matplotlib import matplotlib.pyplot as plt drawGraphs = True except ImportError: sys.stderr.write("Could not find the matplotlib module; no graphs will be drawn\n") # The array to compute the 95%-confidence interval for the median # @author <NAME> conf = [ [], # for n = 0 data point, it doesn't make sense [1, 1], # n = 1 ] + [ [int(math.floor(n / 2.0)), int(math.floor(n / 2.0))] for n in range(2, 6) # no confidence interval for n < 6 ] + [ [1, 6], # n = 6 [1, 7], [1, 7], # n = 7..8 [2, 8], # n = 9 [2, 9], # n = 10 [2, 10], # n = 11 [3, 10], # n = 12 [3, 11], [3, 11], # n = 13..14 [4, 12], [4, 12], # n = 15..16 [5, 13], # n = 17 [5, 14], # n = 18 [5, 15], # n = 19 [6, 15], # n = 20 [6, 16], [6, 16], # n = 21..22 [7, 17], [7, 17], # n = 23..24 [8, 18], # n = 25 [8, 19], # n = 26 [8, 20], # n = 27 [9, 20], # n = 28 [9, 21], # n = 29 [10, 21], # n = 30 [10, 22], [10, 22], # n = 31..32 [11, 23], [11, 23], # n = 33..34 [12, 24], [12, 24], # n = 35..36 [13, 25], # n = 37 [13, 26], # n = 38 [13, 27], # n = 39 [14, 27], # n = 40 [14, 28], # n = 41 [15, 28], # n = 42 [15, 29], # n = 43 [16, 29], # n = 44 [16, 30], [16, 30], # n = 45..46 [17, 31], [17, 31], # n = 47..48 [18, 32], [18, 32], # n = 49..50 [19, 33], # n = 51 [19, 34], # n = 52 [19, 35], # n = 53 [20, 35], # n = 54 [20, 36], # n = 55 [21, 36], # n = 56 [21, 37], # n = 57 [22, 37], # n = 58 [22, 38], # n = 59 [23, 39], [23, 39], # n = 60..61 [24, 40], [24, 40], [24, 40], # n = 62..64 [25, 41], [25, 41], # n = 65..66 [26, 42], # n = 67 [26, 43], # n = 68 [26, 44], # n = 69 [27, 44], # n = 70 ] def ignoreInterruption (): signal.signal(signal.SIGINT, signal.SIG_IGN) def interruptionHandler (signal, frame): global interrupted, terminated, outFile if not interrupted: print(""" Interruption signal caught. Waiting for all algorithms to finish solving the current problem instance... Why? Because, statistically, you are more likely to interrupt long runs, introducing an experimental bias. In other words, the algorithm you just tried to interrupt might take a long time to terminate or time out. Interrupting it and discarding this run could make the algorithm appear to perform better than it actually does. Hit CTRL+C again if you really want to abruptly interrupt the experiment. All experimental results for the current problem instance will be discarded. """) interrupted = True elif not terminated: print("\nAbruptly interrupting the experiment. Killing the Java process... ") if not javaProcess is None: javaProcess.kill() javaProcess.wait() terminated = True else: if not outFile is None: outFile.close() sys.exit(0) def checkAlgoNameUnicity (): global algos if len(algos) == 0: sys.stderr.write("Error: no algorithm specified") sys.exit(1) algoNames = [] for name in [ algo[0] for algo in algos ]: if name in algoNames: sys.stderr.write("Error: two algorithms have the same name `" + name + "'") sys.exit(1) algoNames += [name] def getLowMedHigh (data): """ Returns the median and the bounds of the 95%-confidence intervals: [low, med, high] @param data the list of data points; each data point is a tuple [timeout, value] """ # First sort the data and look up the median index data = sorted(data) size = len(data) medI = int(math.floor(size / 2.0)) if size < 71: [lowI, highI] = conf[size] else: lowI = int(math.floor(size / 2.0 - 0.980 * math.sqrt(size))) highI = int(math.ceil(size / 2.0 + 1 + 0.980 * math.sqrt(size))) # Check whether the median corresponds to a timeout if data[medI-1][0] == 1: # timeout return [ float("NaN"), float("NaN"), float("NaN") ] else: return [ data[lowI-1][1], data[medI-1][1], data[highI-1][1] ] def runAtDepth (depth, indent, genParams): global interrupted, terminated, java, generator, algos, timeout, output, outFile, javaProcess # Check if all options have been set if depth == len(genParams): print(indent + "Generating a problem instance using the following Java arguments:") print(indent + str([generator] + genParams)) subprocess.call(java + [generator] + genParams, stdout = -1) # First write experimental results for the current problem instance a temporary file tmpFileName = ".current_run.csv" if os.path.exists(tmpFileName): os.remove(tmpFileName) # Run each algorithm indent += "\t" for algo in algos: # Run the algorithm print(indent + time.strftime("%H:%M:%S", time.localtime()) + " Starting " + algo[0]) javaProcess = subprocess.Popen(java + [algo[1]] + algo + [str(timeout), tmpFileName], preexec_fn = ignoreInterruption) javaProcess.wait() javaProcess = None if terminated: return # Copy the results to the overall output file (skipping irrelevant timeouts) and delete the tmp file needsHeader = not os.path.exists(output) outFile = open(output, "a+") tmpFile = open(tmpFileName, "r") line = tmpFile.readline() if needsHeader: outFile.write(line) line = tmpFile.readline() while line != "": # Compare the algorithms and the problem instances on this line and the next; # if they are the same, skip this line (it is the timeout line) nextLine = tmpFile.readline() if nextLine == "\n": # EOF outFile.write(line) break thisSplit = line.split('\t') nextSplit = nextLine.split('\t') if thisSplit[0] != nextSplit[0] or thisSplit[2] != nextSplit[2]: # the algorithms or problem instances differ outFile.write(line) line = nextLine else: # same algorithm on the same problem instance; skip the first timeout line outFile.write(nextLine) line = tmpFile.readline() outFile.close() # Delete the tmp file tmpFile.close() os.remove(tmpFileName) return # Check whether we need to iterate on the depth-th option optList = genParams[depth] if not isinstance(optList, list): genParams[depth] = str(optList) runAtDepth(depth+1, indent, genParams) return optBefore = genParams[0:depth] optAfter = genParams[depth+1:] # Iterate on the possible values for this option for opt in optList: if interrupted: return print(indent + "Picking " + str(opt) + " from " + str(optList)) runAtDepth(depth+1, indent+"\t", optBefore + [str(opt)] + optAfter) def run (java_i, javaParams_i, generator_i, genParams, nbrProblems, algos_i, timeout_i, output_i): """Starts the experiment @param java_i the command line to call Java @param javaParams_i the list of parameters to be passed to the JVM. Example: ["-Xmx2G", "-classpath", "my/path"] @param generator_i the class name for the random problem generator @param genParams_i the list of options for the random problem generator. Each option is either a value, or a list of values. @param nbrProblems the number of runs @param algos_i the list of algorithms; each algorithm is [display name, solver class name, agent configuration file, problem file] @param timeout_i the timeout in seconds @param output_i the CSV file to which the statistics should be written """ # @todo Eventually remove the solver from the algorithm description (after standardizing the stats gatherer) # @todo How to show and update the graphs as the experiment is running? # @todo Introduce an option to keep all the XCSP files # @todo Support the use of a set of XCSP files as an input instead of a random problem generator # (this could be a fake problem generator that takes the first XCSP file from a folder, copies to a file with a predefined name, # and then moves the file to a ./done/ subfolder) # @todo It should be possible to run some of the algorithms from a JAR, others from the src folder and plot pairwise difference to compare FRODO versions # Set the values of the global variables global interrupted, java, generator, algos, timeout, output java = [java_i] + javaParams_i generator = generator_i algos = algos_i timeout = timeout_i output = output_i checkAlgoNameUnicity() # Catch interruptions to let the algorithms finish on the current problem instance signal.signal(signal.SIGINT, interruptionHandler) for run in range(1, nbrProblems+1): if interrupted: return; print("Run " + str(run) + "/" + str(nbrProblems)) runAtDepth(0, "\t", genParams) def plot (resultsFile, xCol, yCol): """ Plots the results @param resultsFile the CSV file containing the experimental results @param xCol the index of the column in the CSV file to be used for the x axis (the first column has index 0) @param yCol the index of the column in the CSV file to be used for the y axis (the first column has index 0) """ # @todo Allow to input column names rather than column indexes, and allow to input lists to get multiple graphs # @todo Make it possible to set figure size, dpi, rcParams, xticks, colors fmts, ylim, ncol, the order of the algorithms in the legend global drawGraphs file = open(resultsFile) # Read the column names headers = file.readline().split('\t') xIndex = xCol yIndex = yCol xName = headers[xIndex] yName = "median " + headers[yIndex] # if drawGraphs: results = { algoName : { xValue : [[timeout1, yValue1], ..., [timeoutN, yValueN]] } } # else: results = { xValue : { algoName : [[timeout1, yValue1], ..., [timeoutN, yValueN]] } } results = dict() # Read the file line by line xMin = float("infinity") xMax = float("-infinity") while True: line = file.readline() if line == "": break # Parse the algorithm name and the (x, y) values lineSplit = line.split('\t') algoName = lineSplit[0] xValue = lineSplit[xIndex] yValue = float(lineSplit[yIndex]) timeout = int(lineSplit[1]) # 0 = no timeout; 1 = timeout x = float(xValue) xMin = min(xMin, x) xMax = max(xMax, x) if drawGraphs: # Get the data for this algorithm, or initialize it if necessary # data = { xValue : [[timeout1, yValue1], ..., [timeoutN, yValueN]] } if algoName in results: data = results[algoName] else: data = dict() results[algoName] = data # Get the data for this xValue, or initialize it if necessary # yValues = [[timeout1, yValue1], ..., [timeoutN, yValueN]] if xValue in data: yValues = data[xValue] else: yValues = [] data[xValue] = yValues else: # Get the data for this xValue, or initialize it if necessary # data = { algoName : [[timeout1, yValue1], ..., [timeoutN, yValueN]] } if xValue in results: data = results[xValue] else: data = dict() results[xValue] = data # Get the data for this algorithm, or initialize it if necessary # yValues = [[timeout1, yValue1], ..., [timeoutN, yValueN]] if algoName in data: yValues = data[algoName] else: yValues = [] data[algoName] = yValues # Record the value yValues += [[timeout, yValue]] if drawGraphs: plotData(results, xMin, xMax, xName, yName) else: saveData(resultsFile, results, xName, yName) def plotData (results, xMin, xMax, xName, yName): """ @param results { algoName : { xValue : [[timeout1, yValue1], ..., [timeoutN, yValueN]] } } """ plt.axes().set_yscale('log') # Compute the margins on the x-axis to make the confidence intervals visible margin = .025 * (xMax - xMin) xMin -= margin xMax += margin plt.axes().set_xlim(xMin, xMax) # Compute and plot the median and confidence intervals for algoName, data in results.items(): # plotData = [ [xValue1, yMin1, yMed1, yPlus1], [xValue2,... ] ] plotData = [] for xValue, yValues in data.items(): [yLow, yMed, yHigh] = getLowMedHigh(yValues) plotData += [ [float(xValue), yMed - yLow, yMed, yHigh - yMed] ] plotData = sorted(plotData) xValues = [ xValue for [xValue, yMin, yMed, yPlus] in plotData ] yMeds = [ yMed for [xValue, yMin, yMed, yPlus] in plotData ] yMins = [ yMin for [xValue, yMin, yMed, yPlus] in plotData ] yPluses = [ yPlus for [xValue, yMin, yMed, yPlus] in plotData ] plt.errorbar(xValues, yMeds, yerr = [yMins, yPluses], label = algoName) # @todo Move the legend outside of the graph plt.legend(loc='best', numpoints=1, columnspacing=1, labelspacing=.5, handletextpad=0.5) plt.grid(which="major") plt.axes().xaxis.grid(False) plt.xlabel(xName) plt.ylabel(yName) plt.show() def saveData (resultsFile, results, xName, yName): """ @param results { xValue : { algoName : [[timeout1, yValue1], ..., [timeoutN, yValueN]] } } """ # Converts the results to the format [ [ xValue1, { algoName1 : [[timeout1, yValue1], ..., [timeoutN, yValueN]] } ], [ xValue2,... resultsList = sorted([ [float(xValue), data] for xValue, data in results.items() ]) print(resultsList) # Open the output file outFilePath = "figure_data_" + resultsFile; outFile = open(outFilePath, 'w') # Write the y-axis label outFile.write("y axis label:\t" + yName + "\n") # Get the list of all algorithms allAlgos = [] for [xValue, data] in resultsList: for algoName in data: if algoName not in allAlgos: allAlgos += [algoName] allAlgos = sorted(allAlgos) # Write the header outFile.write(xName) yNegSuff = " length of below confidence half-interval" yPosSuff = " length of above confidence half-interval" for algoName in allAlgos: outFile.write("\t" + algoName + "\t" + algoName + yNegSuff + "\t" + algoName + yPosSuff) outFile.write("\n") # Write the median an confidence intervals for each x value for [xValue, data] in resultsList: outFile.write(str(xValue)) for algoName in allAlgos: if algoName not in data: outFile.write("\t\t\t") else: [yLow, yMed, yHigh] = getLowMedHigh(data[algoName]) outFile.write("\t" + str(yMed) + "\t" + str(yMed - yLow) + "\t" + str(yHigh - yMed)) outFile.write("\n") print("(Over)wrote " + outFilePath) outFile.close()
StarcoderdataPython
3333733
<reponame>andreycizov/python-xrpc import logging import multiprocessing from subprocess import Popen import shutil import signal import subprocess import sys import unittest from contextlib import ExitStack from datetime import timedelta, datetime from itertools import count from os import environ from tempfile import mkdtemp from time import sleep from typing import Any, Optional, Dict from dataclasses import field, dataclass from xrpc.actor import run_server from xrpc.logging import LoggerSetup, LL, logging_setup from xrpc.popen import PopenStack, cov, popen, _popen_defn, PopenStackException, argv_decode from xrpc.trace import trc from xrpc.util import time_now def helper_main(ls, fn, *args, **kwargs): with logging_setup(ls), cov(): try: fn(*args, **kwargs) except: defn = _popen_defn() tb = None if defn is None else defn.traceback if tb: logging.getLogger('helper_main').exception('From %s %s %s\nPopen-called from:\n%s', fn, args, kwargs, tb) else: logging.getLogger('helper_main').exception('From %s %s %s', fn, args, kwargs) raise def server_main(factory_fn, addr, *args, **kwargs): logging.getLogger(__name__ + '.server_main').debug('%s %s %s %s', factory_fn, addr, args, kwargs) try: tp, rpc = factory_fn(addr, *args, **kwargs) run_server(tp, rpc, [addr]) finally: logging.getLogger('server_main').exception('Exited with: %s %s', factory_fn, sys.exc_info()) def server_main_new(factory_fn, addrs, *args, **kwargs): logging.getLogger(__name__ + '.server_main').debug('%s %s %s %s', factory_fn, addrs, args, kwargs) try: tp, rpc = factory_fn(*args, **kwargs) run_server(tp, rpc, addrs) finally: logging.getLogger('server_main').exception('Exited with: %s %s', factory_fn, sys.exc_info()) def wait_items(waiting, max_wait=40): wait_till = time_now() + timedelta(seconds=max_wait) waiting = list(waiting) while wait_till > time_now() and len(waiting): to_remove = [] for x in waiting: try: x.wait(0) to_remove.append(x) except multiprocessing.context.TimeoutError: pass except subprocess.TimeoutExpired: pass for x in to_remove: waiting.remove(x) sleep(0.03) if len(waiting) and wait_till > time_now(): raise TimeoutError(f'{waiting}') @dataclass(frozen=False) class Timer: started: datetime = field(default_factory=time_now) max: Optional[float] = None def get(self, now=None) -> timedelta: if now is None: now = time_now() elapsed = now - self.started if self.max and elapsed.total_seconds() > self.max: raise TimeoutError() return elapsed def __enter__(self): return self def __exit__(self, exc_type, exc_val, exc_tb): self.get() def sleep(self, seconds=0): sleep(seconds) return self.get() DEFAULT_LEVEL = logging.INFO if environ.get('DEBUG', None): DEFAULT_LEVEL = logging.DEBUG @dataclass(frozen=False) class ProcessHelper: ls: LoggerSetup = field(default_factory=lambda: LoggerSetup(LL(None, DEFAULT_LEVEL), [], ['stream:///stderr'])) es: ExitStack = field(default_factory=ExitStack) ps: PopenStack = field(default_factory=lambda: PopenStack(10)) ms: Dict[Any, Popen] = field(default_factory=dict) def popen(self, fn, *args, **kwargs): b = popen(helper_main, self.ls, fn, *args, **kwargs) self.ps.add(b) return b def wait(self, items, max_wait=10): return wait_items(items, max_wait) def timer(self, max: Optional[float] = 5.) -> Timer: return Timer(max=max) def __enter__(self): self.es.enter_context(logging_setup(self.ls)) self.es.enter_context(self.ps) return self def __exit__(self, *args): try: self.es.__exit__(*args) except PopenStackException as e: if e.abuser: fn = e.abuser.args[-2] decoded = argv_decode(e.abuser.args[-1]) raise ValueError(f'`{fn}` `{decoded}`') raise class ProcessHelperCase(unittest.TestCase): def _get_ls(self) -> LoggerSetup: return LoggerSetup(LL(None, DEFAULT_LEVEL), [ ], ['stream:///stderr']) def step(self, s=None): trc(f'STEP.{next(self.steps)}', depth=2).warning('%s', s if s else '') def make_ph(self): r = ProcessHelper(self._get_ls()) return r def signal_handler(self, signal, frame): self.tearDown() def setUp(self): self.steps = count() self.ps = self.make_ph().__enter__() self.dtemp = mkdtemp() signal.signal(signal.SIGINT, self.signal_handler) def tearDown(self): self.ps.__exit__(*sys.exc_info()) shutil.rmtree(self.dtemp)
StarcoderdataPython
1681583
<reponame>newlikehalo/pytroch-ctpn-Retina # -*- coding:utf-8 -*- # ''' # Created on 18-12-11 上午10:03 # # @Author: <NAME>(laygin) # ''' import os os.environ['CUDA_VISIBLE_DEVICES'] = '' import cv2 import numpy as np import glob import torch import torch.nn.functional as F from ctpn_model import CTPN_Model from ctpn_utils import gen_anchor, bbox_transfor_inv, nms, clip_box, filter_bbox, TextProposalConnectorOriented from ctpn_utils import resize import config import ipdb import time import math from lib.text_proposal_connector import TextProposalConnector import copy # from lib.fast_rcnn.nms_wrapper import nms def cutstr(string): return string.split('/')[-1].split('.')[0] def ifdir(dir): # 判断是不是有这个目录 if not os.path.exists(dir): os.mkdir(dir) ALL_DIR = "/home/like/data/ctpnresult" EPOCH = "epoch_12_b" EPOCH_DIR = os.path.join(ALL_DIR, EPOCH) newepoch = os.path.join(EPOCH_DIR, str(config.IOU_SELECT)) ifdir(newepoch) EPOCH_IMAGE = os.path.join(newepoch, "imageresult") EPOCH_TXT = os.path.join(newepoch, "pthfile") ifdir(EPOCH_DIR) ifdir(EPOCH_IMAGE) ifdir(EPOCH_TXT) prob_thresh = config.IOU_SELECT width = 600 device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu') weights = glob.glob(os.path.join(EPOCH_DIR, '*.tar'))[0] # torch.save(weights,weights) ipdb.set_trace() # img_path = '/home/like/data/pic/image/without_label/37.JPG' # weights = "/home/like/pytorch_ctpn/checkpoints/ctpn_ep02_0.0727_0.0568_0.1295.pth.tar" ipdb.set_trace() model = CTPN_Model() model.load_state_dict(torch.load(weights, map_location=device)['model_state_dict']) model.to(device) model.eval() def dis(image): cv2.imshow('image', image) cv2.waitKey(0) cv2.destroyAllWindows() # new work def anaiou(line, inds): boxs = [] for i in inds: bbox = line[i, :4] x1, y1, x2, y2 = bbox[0], bbox[1], bbox[2], bbox[3] boxs.append(bbox) newbox = copy.deepcopy(boxs) for i, mbox in enumerate(boxs): for j, nbox in enumerate(boxs): if i != j: marea = (mbox[2] - mbox[0]) * (mbox[3] - mbox[1]) narea = (nbox[2] - nbox[0]) * (nbox[3] - nbox[1]) # print(mbox,nbox,marea, narea) x1 = max(mbox[0], nbox[0]) x2 = min(mbox[2], nbox[2]) y1 = max(mbox[1], nbox[1]) y2 = min(mbox[3], nbox[3]) intersection = max(x2 - x1, 0) * max(y2 - y1, 0) if intersection / marea > 0.7: bx1 = min(mbox[0], nbox[0]) bx2 = max(mbox[2], nbox[2]) by1 = min(mbox[1], nbox[1]) by2 = max(mbox[3], nbox[3]) newbox[i] = [0, 0, 0, 0] newbox[j] = [bx1, by1, bx2, by2] elif intersection / narea > 0.7: bx1 = min(mbox[0], nbox[0]) bx2 = max(mbox[2], nbox[2]) by1 = min(mbox[1], nbox[1]) by2 = max(mbox[3], nbox[3]) newbox[j] = [0, 0, 0, 0] newbox[i] = [bx1, by1, bx2, by2] nnbox = [] for i in newbox: if not (i[0] == 0 and i[1] == 0 and i[2] == 0 and i[3] == 0): # print(i) nnbox.append(i) else: print("1<i") # ipdb.set_trace() return nnbox def save_results(image_name, line, thresh): im = cv2.imread(image_name) inds = np.where(line[:, -1] >= thresh)[0] if len(inds) == 0: return newimage_name = image_name.split('/')[-1].split('.')[0] all_list = [] nnbox = anaiou(line, inds) for bbox in nnbox: # bbox = line[i, :4] # score = line[i, -1] cv2.rectangle( im, (bbox[0], bbox[1]), (bbox[2], bbox[3]), color=(0, 0, 255), thickness=1) all_list.append([bbox[0], bbox[1], bbox[2], bbox[3]]) save_path = os.path.join(EPOCH_TXT, newimage_name + '.txt') file = open(save_path, 'w') file.write(str(all_list)) file.close() image_name = image_name.split('/')[-1] cv2.imwrite(os.path.join(EPOCH_IMAGE, image_name), im) def connect_proposal(text_proposals, scores, im_size): cp = TextProposalConnector() line = cp.get_text_lines(text_proposals, scores, im_size) return line def test(img_path): image = cv2.imread(img_path) """gray""" isize = image.shape # ipdb.set_trace() image_c = image.copy() # h1,w1,c=image_c.shape # oddnumber=w1/width # image = resize(image, width=width) h, w = image.shape[:2] image = image.astype(np.float32) - config.IMAGE_MEAN image = torch.from_numpy(image.transpose(2, 0, 1)).unsqueeze(0).float() with torch.no_grad(): image = image.to(device) cls, regr = model(image) cls_prob = F.softmax(cls, dim=-1).cpu().numpy() regr = regr.cpu().numpy() anchor = gen_anchor((math.ceil(h / 16), math.ceil(w / 16)), 16) bbox = bbox_transfor_inv(anchor, regr) bbox = clip_box(bbox, [h, w]) fg = np.where(cls_prob[0, :, 1] > prob_thresh)[0] boxes = bbox[fg, :] # 可用的框格 scores = cls_prob[0, fg, 1] select_anchor = boxes.astype(np.float32) keep_index = filter_bbox(select_anchor, 16) # nsm boxes = select_anchor[keep_index] scores = scores[keep_index] NMS_THRESH = 0.3 dets = np.hstack((boxes, scores[:, np.newaxis])).astype(np.float32) keep = nms(dets, NMS_THRESH) dets = dets[keep, :] keep = np.where(dets[:, 4] >= 0.7)[0] dets = dets[keep, :] line = connect_proposal(dets[:, 0:4], dets[:, 4], isize) save_results(img_path, line, thresh=0.9) if __name__ == '__main__': DATA_DIR = "/home/like/data/ctpnresult/testdata/pic" im_names = glob.glob(os.path.join(DATA_DIR, '*.png')) + \ glob.glob(os.path.join(DATA_DIR, '*.jpg')) im_names.sort() start = time.time() # im_names=["/home/like/data/ctpnresult/testdata/111.png"] for im_name in im_names: print(im_name) test(im_name) # ctpn(sess, net, im_name) end = time.time() print(end - start)
StarcoderdataPython
6350
import os import warnings from django.conf import settings CAPTCHA_FONT_PATH = getattr(settings, 'CAPTCHA_FONT_PATH', os.path.normpath(os.path.join(os.path.dirname(__file__), '..', 'fonts/Vera.ttf'))) CAPTCHA_FONT_SIZE = getattr(settings, 'CAPTCHA_FONT_SIZE', 22) CAPTCHA_LETTER_ROTATION = getattr(settings, 'CAPTCHA_LETTER_ROTATION', (-35, 35)) CAPTCHA_BACKGROUND_COLOR = getattr(settings, 'CAPTCHA_BACKGROUND_COLOR', '#ffffff') CAPTCHA_FOREGROUND_COLOR = getattr(settings, 'CAPTCHA_FOREGROUND_COLOR', '#001100') CAPTCHA_CHALLENGE_FUNCT = getattr(settings, 'CAPTCHA_CHALLENGE_FUNCT', 'captcha.helpers.random_char_challenge') CAPTCHA_NOISE_FUNCTIONS = getattr(settings, 'CAPTCHA_NOISE_FUNCTIONS', ('captcha.helpers.noise_arcs', 'captcha.helpers.noise_dots',)) CAPTCHA_FILTER_FUNCTIONS = getattr(settings, 'CAPTCHA_FILTER_FUNCTIONS', ('captcha.helpers.post_smooth',)) CAPTCHA_WORDS_DICTIONARY = getattr(settings, 'CAPTCHA_WORDS_DICTIONARY', '/usr/share/dict/words') CAPTCHA_PUNCTUATION = getattr(settings, 'CAPTCHA_PUNCTUATION', '''_"',.;:-''') CAPTCHA_FLITE_PATH = getattr(settings, 'CAPTCHA_FLITE_PATH', None) CAPTCHA_SOX_PATH = getattr(settings, 'CAPTCHA_SOX_PATH', None) CAPTCHA_TIMEOUT = getattr(settings, 'CAPTCHA_TIMEOUT', 5) # Minutes CAPTCHA_LENGTH = int(getattr(settings, 'CAPTCHA_LENGTH', 4)) # Chars # CAPTCHA_IMAGE_BEFORE_FIELD = getattr(settings, 'CAPTCHA_IMAGE_BEFORE_FIELD', True) CAPTCHA_DICTIONARY_MIN_LENGTH = getattr(settings, 'CAPTCHA_DICTIONARY_MIN_LENGTH', 0) CAPTCHA_DICTIONARY_MAX_LENGTH = getattr(settings, 'CAPTCHA_DICTIONARY_MAX_LENGTH', 99) CAPTCHA_IMAGE_SIZE = getattr(settings, 'CAPTCHA_IMAGE_SIZE', None) CAPTCHA_IMAGE_TEMPLATE = getattr(settings, 'CAPTCHA_IMAGE_TEMPLATE', 'captcha/image.html') CAPTCHA_HIDDEN_FIELD_TEMPLATE = getattr(settings, 'CAPTCHA_HIDDEN_FIELD_TEMPLATE', 'captcha/hidden_field.html') CAPTCHA_TEXT_FIELD_TEMPLATE = getattr(settings, 'CAPTCHA_TEXT_FIELD_TEMPLATE', 'captcha/text_field.html') if getattr(settings, 'CAPTCHA_FIELD_TEMPLATE', None): msg = ("CAPTCHA_FIELD_TEMPLATE setting is deprecated in favor of widget's template_name.") warnings.warn(msg, DeprecationWarning) CAPTCHA_FIELD_TEMPLATE = getattr(settings, 'CAPTCHA_FIELD_TEMPLATE', None) if getattr(settings, 'CAPTCHA_OUTPUT_FORMAT', None): msg = ("CAPTCHA_OUTPUT_FORMAT setting is deprecated in favor of widget's template_name.") warnings.warn(msg, DeprecationWarning) CAPTCHA_OUTPUT_FORMAT = getattr(settings, 'CAPTCHA_OUTPUT_FORMAT', None) CAPTCHA_MATH_CHALLENGE_OPERATOR = getattr(settings, 'CAPTCHA_MATH_CHALLENGE_OPERATOR', '*') CAPTCHA_GET_FROM_POOL = getattr(settings, 'CAPTCHA_GET_FROM_POOL', False) CAPTCHA_GET_FROM_POOL_TIMEOUT = getattr(settings, 'CAPTCHA_GET_FROM_POOL_TIMEOUT', 5) CAPTCHA_TEST_MODE = getattr(settings, 'CAPTCHA_TEST_MODE', False) # Failsafe if CAPTCHA_DICTIONARY_MIN_LENGTH > CAPTCHA_DICTIONARY_MAX_LENGTH: CAPTCHA_DICTIONARY_MIN_LENGTH, CAPTCHA_DICTIONARY_MAX_LENGTH = CAPTCHA_DICTIONARY_MAX_LENGTH, CAPTCHA_DICTIONARY_MIN_LENGTH def _callable_from_string(string_or_callable): if callable(string_or_callable): return string_or_callable else: return getattr(__import__('.'.join(string_or_callable.split('.')[:-1]), {}, {}, ['']), string_or_callable.split('.')[-1]) def get_challenge(generator=None): return _callable_from_string(generator or CAPTCHA_CHALLENGE_FUNCT) def noise_functions(): if CAPTCHA_NOISE_FUNCTIONS: return map(_callable_from_string, CAPTCHA_NOISE_FUNCTIONS) return [] def filter_functions(): if CAPTCHA_FILTER_FUNCTIONS: return map(_callable_from_string, CAPTCHA_FILTER_FUNCTIONS) return []
StarcoderdataPython
1704412
<gh_stars>0 # ------------------------------------------------------------------------------ # IMPORTS # ------------------------------------------------------------------------------ from sys import version_info from sys import path as syspath from os import path import json _CURRENT_DIRECTORY = syspath[0] try: import util # if you have problems visit: # https://gist.github.com/pinxau1000/8817d4ef0ed766c78bac8e6feafc8b47 # https://github.com/pinxau1000/ except ModuleNotFoundError: from urllib import request print("'util.py' not found on the same folder as this script!") _url_utilpy = "https://gist.githubusercontent.com/pinxau1000/8817d4ef0ed766c78bac8e6feafc8b47/raw/util.py" print("Downloading util.py from:\n" + _url_utilpy) # https://stackoverflow.com/questions/7243750/download-file-from-web-in-python-3 request.urlretrieve(_url_utilpy, "util.py") print("Downloading finished!") import util try: import cv2 as cv except ModuleNotFoundError: util.install("opencv-python") import cv2 as cv try: from matplotlib import pyplot as plt except ModuleNotFoundError: util.install("matplotlib") from matplotlib import pyplot as plt try: import numpy as np except ModuleNotFoundError: util.install("numpy>=1.19,<1.19.4") import numpy as np try: from packaging import version except ModuleNotFoundError: util.install("packaging") from packaging import version try: import click except ModuleNotFoundError: util.install("click") import click # ------------------------------------------------------------------------------ # REQUIREMENTS CHECK # ------------------------------------------------------------------------------ assert version_info.major >= 3 and \ version_info.minor >= 5, \ "This script requires Python 3.5.0 or above!" assert version.parse(cv.__version__).major >= 4 and \ version.parse(cv.__version__).minor >= 4, \ "This script requires OpenCV 4.4.0 or above!" assert version.parse(plt.matplotlib.__version__).major >= 3 and \ version.parse(plt.matplotlib.__version__).minor >= 3, \ "This script requires MatPlotLib 3.3.0 or above!" assert version.parse(np.__version__).major >= 1 and \ version.parse(np.__version__).minor >= 19 and \ version.parse(np.__version__).micro < 4, \ "This script requires Numpy version >= 1.19.0 and < 1.19.4 !" assert version.parse(click.__version__).major >= 7 and \ version.parse(click.__version__).minor >= 1, \ "This script requires Click 7.1.0 or above!" # ------------------------------------------------------------------------------ # Load Default Pictures # ------------------------------------------------------------------------------ _PATH_2_DATA = path.join(_CURRENT_DIRECTORY, "../../data/") _IMG_ORIG_NAME = "img05.jpg" _IMG_NOISE_NAME = "img05_noise.jpg" _IMG_HARRIS_NAME = "Harris.jpg" _FULL_PATH_ORIG = path.join(_PATH_2_DATA, _IMG_ORIG_NAME) _FULL_PATH_NOISE = path.join(_PATH_2_DATA, _IMG_NOISE_NAME) _FULL_PATH_HARRIS = path.join(_PATH_2_DATA, _IMG_HARRIS_NAME) # ------------------------------------------------------------------------------ # Functions # ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------ # Plot Original Pictures # ------------------------------------------------------------------------------ # PASSED @click.command() @click.option('--orig', default=_FULL_PATH_ORIG, type=str, help="The path to the original image") @click.option('--noisy', default=_FULL_PATH_NOISE, type=str, help='he path to the original image w/ noise') @click.option("--save", default="output_OriginalPictures", type=str, help="The save name(s) of the output figure(s)") @click.option("--dpi", default=None, type=int, help="Quality of the figure window generated. If None its the " "default 100 dpi.") @click.option("--num", default=None, type=int, help="Number of the figure window generated. If None its " "cumulative.") def original_pictures(orig, noisy, save, dpi, num): orig = util.load_image_RGB(orig) noisy = util.load_image_RGB(noisy) fig = util.plotImages([orig, noisy], ["Original", "Noisy"], show=True, main_title="Loaded Images", cols=2, num=num, dpi=dpi) # Saves the figure. if save != "None": fig.savefig(save) # Wait for a key press to close figures input("Press Enter to continue...") # ------------------------------------------------------------------------------ # Mean Filter # ------------------------------------------------------------------------------ # PASSED @click.command() @click.option("--image", prompt="Path", default=_FULL_PATH_NOISE, type=str, help="The path to the image") @click.option("--kernels", default=str(list(range(3, 8, 2))), help="List of kernel dimensions") @click.option("--save", default="output_MeanFilter", type=str, help="The save name(s) of the output figure(s). If None didn't " "save the figure window.") @click.option("--dpi", default=None, type=int, help="Quality of the figure window generated. If None its the " "default 100 dpi.") @click.option("--num", default=None, type=int, help="Number of the figure window generated. If None its " "cumulative.") def mean_filter(image, kernels, save, dpi, num): image = util.load_image_RGB(image) kernels = json.loads(kernels) # Initialize the mean_images as list. Values are assigned on the for-loop mean_images = [] titles_images = [] for k in kernels: mean_images.append(cv.blur(image, (k, k))) titles_images.append(f"Kernel {k}x{k}") # Copy the arrays with the images generated in the for-loop and adds the # original noisy image for comparison. Also copies and adds the titles. plot_images = mean_images plot_images.insert(0, image) plot_titles = titles_images plot_titles.insert(0, "Noisy Image") # Plots the images. fig = util.plotImages(plot_images, plot_titles, show=True, main_title="Mean Filter - cv.blur", num=num, dpi=dpi) # Saves the figure. if save != "None": fig.savefig(save) # Wait for a key press to close figures input("Press Enter to continue...") # ------------------------------------------------------------------------------ # Mean Filter Anchor # ------------------------------------------------------------------------------ # PASSED @click.command() @click.option("--image", prompt="Path", default=_FULL_PATH_NOISE, type=str, help="The path to the image") @click.option("--kernel", default=10, help="Kernel aperture") @click.option("--crop_corner", default=str([480, 110]), help="The upper left corner of the crop area as list. The point " "with coordinates x=480 and y=110 is passed as [480,110].") @click.option("--crop_size", default=64, type=int, help="The size of the crop area") @click.option("--save", default="output_MeanFilter_Anchor", type=str, help="The save name(s) of the output figure(s)") @click.option("--dpi", default=None, type=int, help="Quality of the figure window generated. If None its the " "default 100 dpi.") @click.option("--num", default=None, type=int, help="Number of the figure window generated. If None its " "cumulative.") def mean_filter_anchor(image, kernel, crop_corner, crop_size, save, dpi, num): image = util.load_image_RGB(image) crop_corner = json.loads(crop_corner) # Initialize the anchor_images as list. Values are assigned on the for-loop # Initializes the kernel_max which is the maximum kernel value from Mean # Filter section above. anchor_images = [] titles_images = [] for a in range(0, kernel, round((kernel - 1) / 2) - 1): anchor_images.append(cv.blur(image, (kernel, kernel), anchor=(a, a))) titles_images.append(f"Anchor at ({a}, {a})") # Crop blurred images to better see the effect of anchor. anchor_images_crop = [] for i in range(len(anchor_images)): anchor_images_crop.append(anchor_images[i] [crop_corner[1]:crop_corner[1] + crop_size, crop_corner[0]:crop_corner[0] + crop_size]) # Saves individual the images. # util.saveImages(anchor_images_crop, titles_images, dpi=300, # save_name=save) # Saves an animation. util.animateImages(images=anchor_images_crop, titles=titles_images, save_name=save, frame_interval=120, verbose=True) # Plots the images. fig = util.plotImages(anchor_images_crop, titles_images, show=True, main_title=f"Mean Filter Anchor @ Kernel " f"{kernel}x{kernel} - cv.blur", num=num, dpi=dpi) # Saves the figure. if save != "None": fig.savefig(save) # Wait for a key press to close figures input("Press Enter to continue...") # ------------------------------------------------------------------------------ # Median filter # ------------------------------------------------------------------------------ # PASSED @click.command() @click.option("--image", prompt="Path", default=_FULL_PATH_NOISE, type=str, help="The path to the image") @click.option("--kernels", default=str(list(range(5, 10, 2))), help="List with kernel dimensions") @click.option("--save", default="output_MedianFilter", type=str, help="The save name(s) of the output figure(s)") @click.option("--dpi", default=None, type=int, help="Quality of the figure window generated. If None its the " "default 100 dpi.") @click.option("--num", default=None, type=int, help="Number of the figure window generated. If None its " "cumulative.") def median_filter(image, kernels, save, dpi, num): image = util.load_image_RGB(image) kernels = json.loads(kernels) # Initialize the median_images as list. Values are assigned on the for-loop median_images = [] titles_images = [] for k in kernels: median_images.append(cv.medianBlur(image, k)) titles_images.append(f"Kernel {k}x{k}") # Copy the arrays with the images generated in the for-loop and adds the # original noisy image for comparison. Also copies and adds the titles. plot_images = median_images plot_images.insert(0, image) plot_titles = titles_images plot_titles.insert(0, "Noisy Image") # Plots the images. fig = util.plotImages(plot_images, plot_titles, show=True, main_title="Median Filter - cv.medianBlur", num=num, dpi=dpi) # Saves the figure. if save != "None": fig.savefig(save) # Wait for a key press to close figures input("Press Enter to continue...") # ------------------------------------------------------------------------------ # Gaussian Filter # ------------------------------------------------------------------------------ # PASSED @click.command() @click.option("--image", prompt="Path", default=_FULL_PATH_NOISE, type=str, help="The path to the image") @click.option("--kernels", default=str(list(range(5, 10, 2))), help="List with kernel dimensions") @click.option("--save", default="output_GaussianFilter", type=str, help="The save name(s) of the output figure(s)") @click.option("--dpi", default=None, type=int, help="Quality of the figure window generated. If None its the " "default 100 dpi.") @click.option("--num", default=None, type=int, help="Number of the figure window generated. If None its " "cumulative.") def gaussian_filter(image, kernels, save, dpi, num): image = util.load_image_RGB(image) kernels = json.loads(kernels) # Initialize the gaussian_images as list. # Values are assigned on the for-loop gaussian_images = [] titles_images = [] for k in kernels: # SigmaX and SigmaY is 0 so they are calculated from kernel gaussian_images.append(cv.GaussianBlur(image, (k, k), 0, 0)) titles_images.append(f"Kernel {k}x{k}") # Copy the arrays with the images generated in the for-loop and adds the # original noisy image for comparison. Also copies and adds the titles. plot_images = gaussian_images plot_images.insert(0, image) plot_titles = titles_images plot_titles.insert(0, "Noisy Image") # Plots the images. fig = util.plotImages(plot_images, plot_titles, show=True, main_title="Gaussian Filter - cv.GaussianBlur", num=num, dpi=dpi) # Saves the figure. if save != "None": fig.savefig(save) # Wait for a key press to close figures input("Press Enter to continue...") # ------------------------------------------------------------------------------ # Gaussian Filter Sigma # ------------------------------------------------------------------------------ # PASSED @click.command() @click.option("--image", prompt="Path", default=_FULL_PATH_NOISE, type=str, help="The path to the image") @click.option("--sigma_x", default=str(list(np.arange(0.5, 1.1, 0.5))), help="The sigmaX values to be evaluated") @click.option("--sigma_y", default=str(list(np.arange(0.5, 1.1, 0.5))), help="The sigmaY values to be evaluated") @click.option("--crop_corner", default=str([480, 110]), help="The upper left corner of the crop area as list. The point " "with coordinates x=480 and y=110 is passed as [480,110].") @click.option("--crop_size", default=64, type=int, help="The size of the crop area") @click.option("--save", default="output_GaussianFilter_Sigma", type=str, help="The save name(s) of the output figure(s)") @click.option("--dpi", default=None, type=int, help="Quality of the figure window generated. If None its the " "default 100 dpi.") @click.option("--num", default=None, type=int, help="Number of the figure window generated. If None its " "cumulative.") def gaussian_filter_sigma(image, sigma_x, sigma_y, crop_corner, crop_size, save, dpi, num): image = util.load_image_RGB(image) sigma_x = json.loads(sigma_x) sigma_y = json.loads(sigma_y) crop_corner = json.loads(crop_corner) # Initialize the gaussian_images_sigma as list. # Values are assigned on the for-loop. gaussian_images_sigmaX = [] titles_images_X = [] for sigX in sigma_x: gaussian_images_sigmaX.append(cv.GaussianBlur(image, (9, 9), sigmaX=sigX, sigmaY=0.1)) titles_images_X.append(f"SigmaX = {sigX}") gaussian_images_sigmaY = [] titles_images_Y = [] for sigY in sigma_y: # SigmaX and SigmaY is 0 so they are calculated from kernel gaussian_images_sigmaY.append(cv.GaussianBlur(image, (9, 9), sigmaX=0.1, sigmaY=sigY)) titles_images_Y.append(f"SigmaY = {sigY}") # Crop filtered images to better see the effect of Sigma. gaussian_images_sigmaX_crop = [] for i in range(len(gaussian_images_sigmaX)): gaussian_images_sigmaX_crop.append(gaussian_images_sigmaX[i] [crop_corner[1]: crop_corner[1] + crop_size, crop_corner[0]: crop_corner[0] + crop_size]) gaussian_images_sigmaY_crop = [] for i in range(len(gaussian_images_sigmaY)): gaussian_images_sigmaY_crop.append(gaussian_images_sigmaY[i] [crop_corner[1]: crop_corner[1] + crop_size, crop_corner[0]: crop_corner[0] + crop_size]) # Concat the arrays of sigmaX and sigmaY to plot plot_images = gaussian_images_sigmaX_crop + gaussian_images_sigmaY_crop # Concat the titles arrays of sigmaX and sigmaY to plot plot_titles = titles_images_X + titles_images_Y # Plots the images. fig = util.plotImages(plot_images, plot_titles, show=True, main_title="Gaussian Filter Sigmas @ Kernel 9x9 - " "cv.GaussianBlur", num=num, dpi=dpi) # Saves the figure. if save != "None": fig.savefig(save) # Wait for a key press to close figures input("Press Enter to continue...") # ------------------------------------------------------------------------------ # Bilateral Filter # ------------------------------------------------------------------------------ # PASSED # See https://bit.ly/35X9VhK for recommended values. @click.command() @click.option("--image", prompt="Path", default=_FULL_PATH_NOISE, type=str, help="The path to the image") @click.option("--diameters", default=str(list(range(5, 16, 5))), help="Diameter of each pixel neighborhood used during " "filtering. If it is non-positive it is computed from " "sigmaSpace.") @click.option("--sigma_c", default=80, type=int, help="Filter sigma in the color space. A larger value of the " "parameter means that farther colors within the pixel " "neighborhood (see sigmaSpace) will be mixed together, " "resulting in larger areas of semi-equal color.") @click.option("--sigma_s", default=80, type=int, help="Filter sigma in the coordinate space. A larger value of " "the parameter means that farther pixels will influence " "each other as long as their colors are close enough (see " "sigmaColor). When d>0, it specifies the neighborhood " "size regardless of sigmaSpace.Otherwise, " "d is proportional to sigmaSpace.") @click.option("--save", default="output_BilateralFilter", type=str, help="The save name(s) of the output figure(s)") @click.option("--dpi", default=None, type=int, help="Quality of the figure window generated. If None its the " "default 100 dpi.") @click.option("--num", default=None, type=int, help="Number of the figure window generated. If None its " "cumulative.") def bilateral_filter(image, diameters, sigma_c, sigma_s, save, dpi, num): image = util.load_image_RGB(image) diameters = json.loads(diameters) # Initialize the bilateral_images as list. # Values are assigned on the for-loop bilateral_images = [] titles_images = [] for d in diameters: bilateral_images.append(cv.bilateralFilter(src=image, d=d, sigmaColor=sigma_c, sigmaSpace=sigma_s)) titles_images.append(f"D = {d}") # Copy the arrays with the images generated in the for-loop and adds the # original noisy image for comparison. Also copies and adds the titles. plot_images = bilateral_images plot_images.insert(0, image) plot_titles = titles_images plot_titles.insert(0, "Noisy Image") # Plots the images. fig = util.plotImages(plot_images, plot_titles, show=True, main_title=f"Bilateral Filter @ sigmaC = {sigma_c}, " f"sigmaS = {sigma_s} - cv.bilateralFilter", num=num, dpi=dpi) # Saves the figure. if save != "None": fig.savefig(save) # Wait for a key press to close figures input("Press Enter to continue...") # ------------------------------------------------------------------------------ # Bilateral Filter Sigma # ------------------------------------------------------------------------------ # PASSED # See https://bit.ly/35X9VhK for recommended values. @click.command() @click.option("--image", prompt="Path", default=_FULL_PATH_NOISE, type=str, help="The path to the image") @click.option("--diameter", default=9, type=int, help="Diameter of each pixel neighborhood used during " "filtering. If it is non-positive it is computed from " "sigmaSpace.") @click.option("--sigma_c", default=str(list(range(10, 251, 120))), help="Filter sigma in the color space. A larger value of the " "parameter means that farther colors within the pixel " "neighborhood (see sigmaSpace) will be mixed together, " "resulting in larger areas of semi-equal color.") @click.option("--sigma_s", default=str(list(range(3, 16, 6))), help="Filter sigma in the coordinate space. A larger value of " "the parameter means that farther pixels will influence " "each other as long as their colors are close enough (see " "sigmaColor). When d>0, it specifies the neighborhood " "size regardless of sigmaSpace.Otherwise, " "d is proportional to sigmaSpace.") @click.option("--crop_corner", default=str([480, 110]), help="The upper left corner of the crop area as list. The point " "with coordinates x=480 and y=110 is passed as [480,110].") @click.option("--crop_size", default=64, type=int, help="The size of the crop area") @click.option("--save", default="output_BilateralFilter_Sigma", type=str, help="The save name(s) of the output figure(s)") @click.option("--dpi", default=None, type=int, help="Quality of the figure window generated. If None its the " "default 100 dpi.") @click.option("--num", default=None, type=int, help="Number of the figure window generated. If None its " "cumulative.") def bilateral_filter_sigma(image, diameter, sigma_c, sigma_s, crop_corner, crop_size, save, dpi, num): image = util.load_image_RGB(image) sigma_c = json.loads(sigma_c) sigma_s = json.loads(sigma_s) crop_corner = json.loads(crop_corner) # Initialize the bilateral_images as list. # Values are assigned on the for-loop bilateral_images_sigmaC = [] titles_images_sigmaC = [] _sigS = min(sigma_s) for sigC in sigma_c: bilateral_images_sigmaC.append(cv.bilateralFilter(src=image, d=diameter, sigmaColor=sigC, sigmaSpace=_sigS)) titles_images_sigmaC.append(f"SigmaC = {sigC}") bilateral_images_sigmaS = [] titles_images_sigmaS = [] _sigC = max(sigma_c) for sigS in sigma_s: bilateral_images_sigmaS.append(cv.bilateralFilter(src=image, d=diameter, sigmaColor=_sigC, sigmaSpace=sigS)) titles_images_sigmaS.append(f"SigmaS = {sigS}") # Crop filtered images to better see the effect of Sigma. bilateral_images_sigmaC_crop = [] for i in range(len(bilateral_images_sigmaC)): bilateral_images_sigmaC_crop.append(bilateral_images_sigmaC[i] [crop_corner[1]: crop_corner[1] + crop_size, crop_corner[0]: crop_corner[0] + crop_size]) bilateral_images_sigmaS_crop = [] for i in range(len(bilateral_images_sigmaC)): bilateral_images_sigmaS_crop.append(bilateral_images_sigmaS[i] [crop_corner[1]: crop_corner[1] + crop_size, crop_corner[0]: crop_corner[0] + crop_size]) plot_images = bilateral_images_sigmaC_crop + bilateral_images_sigmaS_crop plot_titles = titles_images_sigmaC + titles_images_sigmaS # Plots the images. fig = util.plotImages(plot_images, plot_titles, show=True, main_title=f"Bilateral Filter Sigma @" f"D = {diameter} - cv.bilateralFilter", num=num, dpi=dpi) # Saves the figure. if save != "None": fig.savefig(save) # Wait for a key press to close figures input("Press Enter to continue...") # region @click.group() def entry_point(): pass entry_point.add_command(original_pictures) entry_point.add_command(mean_filter) entry_point.add_command(mean_filter_anchor) entry_point.add_command(median_filter) entry_point.add_command(gaussian_filter) entry_point.add_command(gaussian_filter_sigma) entry_point.add_command(bilateral_filter) entry_point.add_command(bilateral_filter_sigma) if __name__ == "__main__": entry_point() # endregion
StarcoderdataPython
1621531
<gh_stars>1-10 # # PySNMP MIB module DLINK-3100-CLI-MIB (http://snmplabs.com/pysmi) # ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/DLINK-3100-CLI-MIB # Produced by pysmi-0.3.4 at Wed May 1 12:48:06 2019 # On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4 # Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15) # OctetString, ObjectIdentifier, Integer = mibBuilder.importSymbols("ASN1", "OctetString", "ObjectIdentifier", "Integer") NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues") ConstraintsUnion, ValueRangeConstraint, ValueSizeConstraint, SingleValueConstraint, ConstraintsIntersection = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsUnion", "ValueRangeConstraint", "ValueSizeConstraint", "SingleValueConstraint", "ConstraintsIntersection") rnd, = mibBuilder.importSymbols("DLINK-3100-MIB", "rnd") ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup") IpAddress, Counter32, TimeTicks, Gauge32, ObjectIdentity, Unsigned32, MibIdentifier, iso, Integer32, NotificationType, MibScalar, MibTable, MibTableRow, MibTableColumn, ModuleIdentity, Counter64, Bits = mibBuilder.importSymbols("SNMPv2-SMI", "IpAddress", "Counter32", "TimeTicks", "Gauge32", "ObjectIdentity", "Unsigned32", "MibIdentifier", "iso", "Integer32", "NotificationType", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "ModuleIdentity", "Counter64", "Bits") TextualConvention, TruthValue, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "TruthValue", "DisplayString") rlCli = ModuleIdentity((1, 3, 6, 1, 4, 1, 171, 10, 94, 89, 89, 52)) rlCli.setRevisions(('2007-01-02 00:00',)) if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0): if mibBuilder.loadTexts: rlCli.setRevisionsDescriptions(('Initial revision.',)) if mibBuilder.loadTexts: rlCli.setLastUpdated('200701020000Z') if mibBuilder.loadTexts: rlCli.setOrganization('Dlink, Inc. Dlink Semiconductor, Inc.') if mibBuilder.loadTexts: rlCli.setContactInfo('www.dlink.com') if mibBuilder.loadTexts: rlCli.setDescription('This private MIB module defines CLI private MIBs.') rlCliMibVersion = MibScalar((1, 3, 6, 1, 4, 1, 171, 10, 94, 89, 89, 52, 1), Integer32()).setMaxAccess("readonly") if mibBuilder.loadTexts: rlCliMibVersion.setStatus('current') if mibBuilder.loadTexts: rlCliMibVersion.setDescription("MIB's version, the current version is 1.") rlCliPassword = MibScalar((1, 3, 6, 1, 4, 1, 171, 10, 94, 89, 89, 52, 2), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 20))).setMaxAccess("readwrite") if mibBuilder.loadTexts: rlCliPassword.setStatus('current') if mibBuilder.loadTexts: rlCliPassword.setDescription('CLI Password') rlCliTimer = MibScalar((1, 3, 6, 1, 4, 1, 171, 10, 94, 89, 89, 52, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(5, 3600))).setMaxAccess("readwrite") if mibBuilder.loadTexts: rlCliTimer.setStatus('current') if mibBuilder.loadTexts: rlCliTimer.setDescription('CLI Timer') rlCliFileEnable = MibScalar((1, 3, 6, 1, 4, 1, 171, 10, 94, 89, 89, 52, 4), TruthValue()).setMaxAccess("readonly") if mibBuilder.loadTexts: rlCliFileEnable.setStatus('current') if mibBuilder.loadTexts: rlCliFileEnable.setDescription('CLI File Enable/Disable') rlCliFileEnableAfterReset = MibScalar((1, 3, 6, 1, 4, 1, 171, 10, 94, 89, 89, 52, 5), TruthValue()).setMaxAccess("readwrite") if mibBuilder.loadTexts: rlCliFileEnableAfterReset.setStatus('current') if mibBuilder.loadTexts: rlCliFileEnableAfterReset.setDescription('CLI File Enable/Disable After Reset') mibBuilder.exportSymbols("DLINK-3100-CLI-MIB", rlCliFileEnable=rlCliFileEnable, rlCliMibVersion=rlCliMibVersion, rlCliPassword=rlCliPassword, rlCli=rlCli, PYSNMP_MODULE_ID=rlCli, rlCliFileEnableAfterReset=rlCliFileEnableAfterReset, rlCliTimer=rlCliTimer)
StarcoderdataPython
3362414
<filename>sdk/python/pulumi_gcp/compute/ha_vpn_gateway.py<gh_stars>100-1000 # coding=utf-8 # *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** import warnings import pulumi import pulumi.runtime from typing import Any, Mapping, Optional, Sequence, Union, overload from .. import _utilities from . import outputs from ._inputs import * __all__ = ['HaVpnGatewayArgs', 'HaVpnGateway'] @pulumi.input_type class HaVpnGatewayArgs: def __init__(__self__, *, network: pulumi.Input[str], description: Optional[pulumi.Input[str]] = None, name: Optional[pulumi.Input[str]] = None, project: Optional[pulumi.Input[str]] = None, region: Optional[pulumi.Input[str]] = None, vpn_interfaces: Optional[pulumi.Input[Sequence[pulumi.Input['HaVpnGatewayVpnInterfaceArgs']]]] = None): """ The set of arguments for constructing a HaVpnGateway resource. :param pulumi.Input[str] network: The network this VPN gateway is accepting traffic for. :param pulumi.Input[str] description: An optional description of this resource. :param pulumi.Input[str] name: Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `a-z?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. :param pulumi.Input[str] project: The ID of the project in which the resource belongs. If it is not provided, the provider project is used. :param pulumi.Input[str] region: The region this gateway should sit in. :param pulumi.Input[Sequence[pulumi.Input['HaVpnGatewayVpnInterfaceArgs']]] vpn_interfaces: A list of interfaces on this VPN gateway. Structure is documented below. """ pulumi.set(__self__, "network", network) if description is not None: pulumi.set(__self__, "description", description) if name is not None: pulumi.set(__self__, "name", name) if project is not None: pulumi.set(__self__, "project", project) if region is not None: pulumi.set(__self__, "region", region) if vpn_interfaces is not None: pulumi.set(__self__, "vpn_interfaces", vpn_interfaces) @property @pulumi.getter def network(self) -> pulumi.Input[str]: """ The network this VPN gateway is accepting traffic for. """ return pulumi.get(self, "network") @network.setter def network(self, value: pulumi.Input[str]): pulumi.set(self, "network", value) @property @pulumi.getter def description(self) -> Optional[pulumi.Input[str]]: """ An optional description of this resource. """ return pulumi.get(self, "description") @description.setter def description(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "description", value) @property @pulumi.getter def name(self) -> Optional[pulumi.Input[str]]: """ Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `a-z?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. """ return pulumi.get(self, "name") @name.setter def name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "name", value) @property @pulumi.getter def project(self) -> Optional[pulumi.Input[str]]: """ The ID of the project in which the resource belongs. If it is not provided, the provider project is used. """ return pulumi.get(self, "project") @project.setter def project(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "project", value) @property @pulumi.getter def region(self) -> Optional[pulumi.Input[str]]: """ The region this gateway should sit in. """ return pulumi.get(self, "region") @region.setter def region(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "region", value) @property @pulumi.getter(name="vpnInterfaces") def vpn_interfaces(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['HaVpnGatewayVpnInterfaceArgs']]]]: """ A list of interfaces on this VPN gateway. Structure is documented below. """ return pulumi.get(self, "vpn_interfaces") @vpn_interfaces.setter def vpn_interfaces(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['HaVpnGatewayVpnInterfaceArgs']]]]): pulumi.set(self, "vpn_interfaces", value) @pulumi.input_type class _HaVpnGatewayState: def __init__(__self__, *, description: Optional[pulumi.Input[str]] = None, name: Optional[pulumi.Input[str]] = None, network: Optional[pulumi.Input[str]] = None, project: Optional[pulumi.Input[str]] = None, region: Optional[pulumi.Input[str]] = None, self_link: Optional[pulumi.Input[str]] = None, vpn_interfaces: Optional[pulumi.Input[Sequence[pulumi.Input['HaVpnGatewayVpnInterfaceArgs']]]] = None): """ Input properties used for looking up and filtering HaVpnGateway resources. :param pulumi.Input[str] description: An optional description of this resource. :param pulumi.Input[str] name: Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `a-z?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. :param pulumi.Input[str] network: The network this VPN gateway is accepting traffic for. :param pulumi.Input[str] project: The ID of the project in which the resource belongs. If it is not provided, the provider project is used. :param pulumi.Input[str] region: The region this gateway should sit in. :param pulumi.Input[str] self_link: The URI of the created resource. :param pulumi.Input[Sequence[pulumi.Input['HaVpnGatewayVpnInterfaceArgs']]] vpn_interfaces: A list of interfaces on this VPN gateway. Structure is documented below. """ if description is not None: pulumi.set(__self__, "description", description) if name is not None: pulumi.set(__self__, "name", name) if network is not None: pulumi.set(__self__, "network", network) if project is not None: pulumi.set(__self__, "project", project) if region is not None: pulumi.set(__self__, "region", region) if self_link is not None: pulumi.set(__self__, "self_link", self_link) if vpn_interfaces is not None: pulumi.set(__self__, "vpn_interfaces", vpn_interfaces) @property @pulumi.getter def description(self) -> Optional[pulumi.Input[str]]: """ An optional description of this resource. """ return pulumi.get(self, "description") @description.setter def description(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "description", value) @property @pulumi.getter def name(self) -> Optional[pulumi.Input[str]]: """ Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `a-z?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. """ return pulumi.get(self, "name") @name.setter def name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "name", value) @property @pulumi.getter def network(self) -> Optional[pulumi.Input[str]]: """ The network this VPN gateway is accepting traffic for. """ return pulumi.get(self, "network") @network.setter def network(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "network", value) @property @pulumi.getter def project(self) -> Optional[pulumi.Input[str]]: """ The ID of the project in which the resource belongs. If it is not provided, the provider project is used. """ return pulumi.get(self, "project") @project.setter def project(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "project", value) @property @pulumi.getter def region(self) -> Optional[pulumi.Input[str]]: """ The region this gateway should sit in. """ return pulumi.get(self, "region") @region.setter def region(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "region", value) @property @pulumi.getter(name="selfLink") def self_link(self) -> Optional[pulumi.Input[str]]: """ The URI of the created resource. """ return pulumi.get(self, "self_link") @self_link.setter def self_link(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "self_link", value) @property @pulumi.getter(name="vpnInterfaces") def vpn_interfaces(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['HaVpnGatewayVpnInterfaceArgs']]]]: """ A list of interfaces on this VPN gateway. Structure is documented below. """ return pulumi.get(self, "vpn_interfaces") @vpn_interfaces.setter def vpn_interfaces(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['HaVpnGatewayVpnInterfaceArgs']]]]): pulumi.set(self, "vpn_interfaces", value) class HaVpnGateway(pulumi.CustomResource): @overload def __init__(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions] = None, description: Optional[pulumi.Input[str]] = None, name: Optional[pulumi.Input[str]] = None, network: Optional[pulumi.Input[str]] = None, project: Optional[pulumi.Input[str]] = None, region: Optional[pulumi.Input[str]] = None, vpn_interfaces: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['HaVpnGatewayVpnInterfaceArgs']]]]] = None, __props__=None): """ Represents a VPN gateway running in GCP. This virtual device is managed by Google, but used only by you. This type of VPN Gateway allows for the creation of VPN solutions with higher availability than classic Target VPN Gateways. To get more information about HaVpnGateway, see: * [API documentation](https://cloud.google.com/compute/docs/reference/rest/v1/vpnGateways) * How-to Guides * [Choosing a VPN](https://cloud.google.com/vpn/docs/how-to/choosing-a-vpn) * [Cloud VPN Overview](https://cloud.google.com/vpn/docs/concepts/overview) ## Example Usage ### Ha Vpn Gateway Basic ```python import pulumi import pulumi_gcp as gcp network1 = gcp.compute.Network("network1", auto_create_subnetworks=False) ha_gateway1 = gcp.compute.HaVpnGateway("haGateway1", region="us-central1", network=network1.id) ``` ### Ha Vpn Gateway Gcp To Gcp ```python import pulumi import pulumi_gcp as gcp network1 = gcp.compute.Network("network1", routing_mode="GLOBAL", auto_create_subnetworks=False) ha_gateway1 = gcp.compute.HaVpnGateway("haGateway1", region="us-central1", network=network1.id) network2 = gcp.compute.Network("network2", routing_mode="GLOBAL", auto_create_subnetworks=False) ha_gateway2 = gcp.compute.HaVpnGateway("haGateway2", region="us-central1", network=network2.id) network1_subnet1 = gcp.compute.Subnetwork("network1Subnet1", ip_cidr_range="10.0.1.0/24", region="us-central1", network=network1.id) network1_subnet2 = gcp.compute.Subnetwork("network1Subnet2", ip_cidr_range="10.0.2.0/24", region="us-west1", network=network1.id) network2_subnet1 = gcp.compute.Subnetwork("network2Subnet1", ip_cidr_range="192.168.1.0/24", region="us-central1", network=network2.id) network2_subnet2 = gcp.compute.Subnetwork("network2Subnet2", ip_cidr_range="192.168.2.0/24", region="us-east1", network=network2.id) router1 = gcp.compute.Router("router1", network=network1.name, bgp=gcp.compute.RouterBgpArgs( asn=64514, )) router2 = gcp.compute.Router("router2", network=network2.name, bgp=gcp.compute.RouterBgpArgs( asn=64515, )) tunnel1 = gcp.compute.VPNTunnel("tunnel1", region="us-central1", vpn_gateway=ha_gateway1.id, peer_gcp_gateway=ha_gateway2.id, shared_secret="a secret message", router=router1.id, vpn_gateway_interface=0) tunnel2 = gcp.compute.VPNTunnel("tunnel2", region="us-central1", vpn_gateway=ha_gateway1.id, peer_gcp_gateway=ha_gateway2.id, shared_secret="a secret message", router=router1.id, vpn_gateway_interface=1) tunnel3 = gcp.compute.VPNTunnel("tunnel3", region="us-central1", vpn_gateway=ha_gateway2.id, peer_gcp_gateway=ha_gateway1.id, shared_secret="a secret message", router=router2.id, vpn_gateway_interface=0) tunnel4 = gcp.compute.VPNTunnel("tunnel4", region="us-central1", vpn_gateway=ha_gateway2.id, peer_gcp_gateway=ha_gateway1.id, shared_secret="a secret message", router=router2.id, vpn_gateway_interface=1) router1_interface1 = gcp.compute.RouterInterface("router1Interface1", router=router1.name, region="us-central1", ip_range="169.254.0.1/30", vpn_tunnel=tunnel1.name) router1_peer1 = gcp.compute.RouterPeer("router1Peer1", router=router1.name, region="us-central1", peer_ip_address="169.254.0.2", peer_asn=64515, advertised_route_priority=100, interface=router1_interface1.name) router1_interface2 = gcp.compute.RouterInterface("router1Interface2", router=router1.name, region="us-central1", ip_range="169.254.1.2/30", vpn_tunnel=tunnel2.name) router1_peer2 = gcp.compute.RouterPeer("router1Peer2", router=router1.name, region="us-central1", peer_ip_address="169.254.1.1", peer_asn=64515, advertised_route_priority=100, interface=router1_interface2.name) router2_interface1 = gcp.compute.RouterInterface("router2Interface1", router=router2.name, region="us-central1", ip_range="169.254.0.2/30", vpn_tunnel=tunnel3.name) router2_peer1 = gcp.compute.RouterPeer("router2Peer1", router=router2.name, region="us-central1", peer_ip_address="169.254.0.1", peer_asn=64514, advertised_route_priority=100, interface=router2_interface1.name) router2_interface2 = gcp.compute.RouterInterface("router2Interface2", router=router2.name, region="us-central1", ip_range="169.254.1.1/30", vpn_tunnel=tunnel4.name) router2_peer2 = gcp.compute.RouterPeer("router2Peer2", router=router2.name, region="us-central1", peer_ip_address="169.254.1.2", peer_asn=64514, advertised_route_priority=100, interface=router2_interface2.name) ``` ### Compute Ha Vpn Gateway Encrypted Interconnect ```python import pulumi import pulumi_gcp as gcp network = gcp.compute.Network("network", auto_create_subnetworks=False) address1 = gcp.compute.Address("address1", address_type="INTERNAL", purpose="IPSEC_INTERCONNECT", address="192.168.1.0", prefix_length=29, network=network.self_link) router = gcp.compute.Router("router", network=network.name, encrypted_interconnect_router=True, bgp=gcp.compute.RouterBgpArgs( asn=16550, )) attachment1 = gcp.compute.InterconnectAttachment("attachment1", edge_availability_domain="AVAILABILITY_DOMAIN_1", type="PARTNER", router=router.id, encryption="IPSEC", ipsec_internal_addresses=[address1.self_link]) address2 = gcp.compute.Address("address2", address_type="INTERNAL", purpose="IPSEC_INTERCONNECT", address="192.168.2.0", prefix_length=29, network=network.self_link) attachment2 = gcp.compute.InterconnectAttachment("attachment2", edge_availability_domain="AVAILABILITY_DOMAIN_2", type="PARTNER", router=router.id, encryption="IPSEC", ipsec_internal_addresses=[address2.self_link]) vpn_gateway = gcp.compute.HaVpnGateway("vpn-gateway", network=network.id, vpn_interfaces=[ gcp.compute.HaVpnGatewayVpnInterfaceArgs( id=0, interconnect_attachment=attachment1.self_link, ), gcp.compute.HaVpnGatewayVpnInterfaceArgs( id=1, interconnect_attachment=attachment2.self_link, ), ]) ``` ## Import HaVpnGateway can be imported using any of these accepted formats ```sh $ pulumi import gcp:compute/haVpnGateway:HaVpnGateway default projects/{{project}}/regions/{{region}}/vpnGateways/{{name}} ``` ```sh $ pulumi import gcp:compute/haVpnGateway:HaVpnGateway default {{project}}/{{region}}/{{name}} ``` ```sh $ pulumi import gcp:compute/haVpnGateway:HaVpnGateway default {{region}}/{{name}} ``` ```sh $ pulumi import gcp:compute/haVpnGateway:HaVpnGateway default {{name}} ``` :param str resource_name: The name of the resource. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[str] description: An optional description of this resource. :param pulumi.Input[str] name: Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `a-z?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. :param pulumi.Input[str] network: The network this VPN gateway is accepting traffic for. :param pulumi.Input[str] project: The ID of the project in which the resource belongs. If it is not provided, the provider project is used. :param pulumi.Input[str] region: The region this gateway should sit in. :param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['HaVpnGatewayVpnInterfaceArgs']]]] vpn_interfaces: A list of interfaces on this VPN gateway. Structure is documented below. """ ... @overload def __init__(__self__, resource_name: str, args: HaVpnGatewayArgs, opts: Optional[pulumi.ResourceOptions] = None): """ Represents a VPN gateway running in GCP. This virtual device is managed by Google, but used only by you. This type of VPN Gateway allows for the creation of VPN solutions with higher availability than classic Target VPN Gateways. To get more information about HaVpnGateway, see: * [API documentation](https://cloud.google.com/compute/docs/reference/rest/v1/vpnGateways) * How-to Guides * [Choosing a VPN](https://cloud.google.com/vpn/docs/how-to/choosing-a-vpn) * [Cloud VPN Overview](https://cloud.google.com/vpn/docs/concepts/overview) ## Example Usage ### Ha Vpn Gateway Basic ```python import pulumi import pulumi_gcp as gcp network1 = gcp.compute.Network("network1", auto_create_subnetworks=False) ha_gateway1 = gcp.compute.HaVpnGateway("haGateway1", region="us-central1", network=network1.id) ``` ### Ha Vpn Gateway Gcp To Gcp ```python import pulumi import pulumi_gcp as gcp network1 = gcp.compute.Network("network1", routing_mode="GLOBAL", auto_create_subnetworks=False) ha_gateway1 = gcp.compute.HaVpnGateway("haGateway1", region="us-central1", network=network1.id) network2 = gcp.compute.Network("network2", routing_mode="GLOBAL", auto_create_subnetworks=False) ha_gateway2 = gcp.compute.HaVpnGateway("haGateway2", region="us-central1", network=network2.id) network1_subnet1 = gcp.compute.Subnetwork("network1Subnet1", ip_cidr_range="10.0.1.0/24", region="us-central1", network=network1.id) network1_subnet2 = gcp.compute.Subnetwork("network1Subnet2", ip_cidr_range="10.0.2.0/24", region="us-west1", network=network1.id) network2_subnet1 = gcp.compute.Subnetwork("network2Subnet1", ip_cidr_range="192.168.1.0/24", region="us-central1", network=network2.id) network2_subnet2 = gcp.compute.Subnetwork("network2Subnet2", ip_cidr_range="192.168.2.0/24", region="us-east1", network=network2.id) router1 = gcp.compute.Router("router1", network=network1.name, bgp=gcp.compute.RouterBgpArgs( asn=64514, )) router2 = gcp.compute.Router("router2", network=network2.name, bgp=gcp.compute.RouterBgpArgs( asn=64515, )) tunnel1 = gcp.compute.VPNTunnel("tunnel1", region="us-central1", vpn_gateway=ha_gateway1.id, peer_gcp_gateway=ha_gateway2.id, shared_secret="a secret message", router=router1.id, vpn_gateway_interface=0) tunnel2 = gcp.compute.VPNTunnel("tunnel2", region="us-central1", vpn_gateway=ha_gateway1.id, peer_gcp_gateway=ha_gateway2.id, shared_secret="a secret message", router=router1.id, vpn_gateway_interface=1) tunnel3 = gcp.compute.VPNTunnel("tunnel3", region="us-central1", vpn_gateway=ha_gateway2.id, peer_gcp_gateway=ha_gateway1.id, shared_secret="a secret message", router=router2.id, vpn_gateway_interface=0) tunnel4 = gcp.compute.VPNTunnel("tunnel4", region="us-central1", vpn_gateway=ha_gateway2.id, peer_gcp_gateway=ha_gateway1.id, shared_secret="a secret message", router=router2.id, vpn_gateway_interface=1) router1_interface1 = gcp.compute.RouterInterface("router1Interface1", router=router1.name, region="us-central1", ip_range="169.254.0.1/30", vpn_tunnel=tunnel1.name) router1_peer1 = gcp.compute.RouterPeer("router1Peer1", router=router1.name, region="us-central1", peer_ip_address="169.254.0.2", peer_asn=64515, advertised_route_priority=100, interface=router1_interface1.name) router1_interface2 = gcp.compute.RouterInterface("router1Interface2", router=router1.name, region="us-central1", ip_range="169.254.1.2/30", vpn_tunnel=tunnel2.name) router1_peer2 = gcp.compute.RouterPeer("router1Peer2", router=router1.name, region="us-central1", peer_ip_address="169.254.1.1", peer_asn=64515, advertised_route_priority=100, interface=router1_interface2.name) router2_interface1 = gcp.compute.RouterInterface("router2Interface1", router=router2.name, region="us-central1", ip_range="169.254.0.2/30", vpn_tunnel=tunnel3.name) router2_peer1 = gcp.compute.RouterPeer("router2Peer1", router=router2.name, region="us-central1", peer_ip_address="169.254.0.1", peer_asn=64514, advertised_route_priority=100, interface=router2_interface1.name) router2_interface2 = gcp.compute.RouterInterface("router2Interface2", router=router2.name, region="us-central1", ip_range="169.254.1.1/30", vpn_tunnel=tunnel4.name) router2_peer2 = gcp.compute.RouterPeer("router2Peer2", router=router2.name, region="us-central1", peer_ip_address="169.254.1.2", peer_asn=64514, advertised_route_priority=100, interface=router2_interface2.name) ``` ### Compute Ha Vpn Gateway Encrypted Interconnect ```python import pulumi import pulumi_gcp as gcp network = gcp.compute.Network("network", auto_create_subnetworks=False) address1 = gcp.compute.Address("address1", address_type="INTERNAL", purpose="IPSEC_INTERCONNECT", address="192.168.1.0", prefix_length=29, network=network.self_link) router = gcp.compute.Router("router", network=network.name, encrypted_interconnect_router=True, bgp=gcp.compute.RouterBgpArgs( asn=16550, )) attachment1 = gcp.compute.InterconnectAttachment("attachment1", edge_availability_domain="AVAILABILITY_DOMAIN_1", type="PARTNER", router=router.id, encryption="IPSEC", ipsec_internal_addresses=[address1.self_link]) address2 = gcp.compute.Address("address2", address_type="INTERNAL", purpose="IPSEC_INTERCONNECT", address="192.168.2.0", prefix_length=29, network=network.self_link) attachment2 = gcp.compute.InterconnectAttachment("attachment2", edge_availability_domain="AVAILABILITY_DOMAIN_2", type="PARTNER", router=router.id, encryption="IPSEC", ipsec_internal_addresses=[address2.self_link]) vpn_gateway = gcp.compute.HaVpnGateway("vpn-gateway", network=network.id, vpn_interfaces=[ gcp.compute.HaVpnGatewayVpnInterfaceArgs( id=0, interconnect_attachment=attachment1.self_link, ), gcp.compute.HaVpnGatewayVpnInterfaceArgs( id=1, interconnect_attachment=attachment2.self_link, ), ]) ``` ## Import HaVpnGateway can be imported using any of these accepted formats ```sh $ pulumi import gcp:compute/haVpnGateway:HaVpnGateway default projects/{{project}}/regions/{{region}}/vpnGateways/{{name}} ``` ```sh $ pulumi import gcp:compute/haVpnGateway:HaVpnGateway default {{project}}/{{region}}/{{name}} ``` ```sh $ pulumi import gcp:compute/haVpnGateway:HaVpnGateway default {{region}}/{{name}} ``` ```sh $ pulumi import gcp:compute/haVpnGateway:HaVpnGateway default {{name}} ``` :param str resource_name: The name of the resource. :param HaVpnGatewayArgs args: The arguments to use to populate this resource's properties. :param pulumi.ResourceOptions opts: Options for the resource. """ ... def __init__(__self__, resource_name: str, *args, **kwargs): resource_args, opts = _utilities.get_resource_args_opts(HaVpnGatewayArgs, pulumi.ResourceOptions, *args, **kwargs) if resource_args is not None: __self__._internal_init(resource_name, opts, **resource_args.__dict__) else: __self__._internal_init(resource_name, *args, **kwargs) def _internal_init(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions] = None, description: Optional[pulumi.Input[str]] = None, name: Optional[pulumi.Input[str]] = None, network: Optional[pulumi.Input[str]] = None, project: Optional[pulumi.Input[str]] = None, region: Optional[pulumi.Input[str]] = None, vpn_interfaces: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['HaVpnGatewayVpnInterfaceArgs']]]]] = None, __props__=None): if opts is None: opts = pulumi.ResourceOptions() if not isinstance(opts, pulumi.ResourceOptions): raise TypeError('Expected resource options to be a ResourceOptions instance') if opts.version is None: opts.version = _utilities.get_version() if opts.id is None: if __props__ is not None: raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource') __props__ = HaVpnGatewayArgs.__new__(HaVpnGatewayArgs) __props__.__dict__["description"] = description __props__.__dict__["name"] = name if network is None and not opts.urn: raise TypeError("Missing required property 'network'") __props__.__dict__["network"] = network __props__.__dict__["project"] = project __props__.__dict__["region"] = region __props__.__dict__["vpn_interfaces"] = vpn_interfaces __props__.__dict__["self_link"] = None super(HaVpnGateway, __self__).__init__( 'gcp:compute/haVpnGateway:HaVpnGateway', resource_name, __props__, opts) @staticmethod def get(resource_name: str, id: pulumi.Input[str], opts: Optional[pulumi.ResourceOptions] = None, description: Optional[pulumi.Input[str]] = None, name: Optional[pulumi.Input[str]] = None, network: Optional[pulumi.Input[str]] = None, project: Optional[pulumi.Input[str]] = None, region: Optional[pulumi.Input[str]] = None, self_link: Optional[pulumi.Input[str]] = None, vpn_interfaces: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['HaVpnGatewayVpnInterfaceArgs']]]]] = None) -> 'HaVpnGateway': """ Get an existing HaVpnGateway resource's state with the given name, id, and optional extra properties used to qualify the lookup. :param str resource_name: The unique name of the resulting resource. :param pulumi.Input[str] id: The unique provider ID of the resource to lookup. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[str] description: An optional description of this resource. :param pulumi.Input[str] name: Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `a-z?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. :param pulumi.Input[str] network: The network this VPN gateway is accepting traffic for. :param pulumi.Input[str] project: The ID of the project in which the resource belongs. If it is not provided, the provider project is used. :param pulumi.Input[str] region: The region this gateway should sit in. :param pulumi.Input[str] self_link: The URI of the created resource. :param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['HaVpnGatewayVpnInterfaceArgs']]]] vpn_interfaces: A list of interfaces on this VPN gateway. Structure is documented below. """ opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id)) __props__ = _HaVpnGatewayState.__new__(_HaVpnGatewayState) __props__.__dict__["description"] = description __props__.__dict__["name"] = name __props__.__dict__["network"] = network __props__.__dict__["project"] = project __props__.__dict__["region"] = region __props__.__dict__["self_link"] = self_link __props__.__dict__["vpn_interfaces"] = vpn_interfaces return HaVpnGateway(resource_name, opts=opts, __props__=__props__) @property @pulumi.getter def description(self) -> pulumi.Output[Optional[str]]: """ An optional description of this resource. """ return pulumi.get(self, "description") @property @pulumi.getter def name(self) -> pulumi.Output[str]: """ Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `a-z?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. """ return pulumi.get(self, "name") @property @pulumi.getter def network(self) -> pulumi.Output[str]: """ The network this VPN gateway is accepting traffic for. """ return pulumi.get(self, "network") @property @pulumi.getter def project(self) -> pulumi.Output[str]: """ The ID of the project in which the resource belongs. If it is not provided, the provider project is used. """ return pulumi.get(self, "project") @property @pulumi.getter def region(self) -> pulumi.Output[str]: """ The region this gateway should sit in. """ return pulumi.get(self, "region") @property @pulumi.getter(name="selfLink") def self_link(self) -> pulumi.Output[str]: """ The URI of the created resource. """ return pulumi.get(self, "self_link") @property @pulumi.getter(name="vpnInterfaces") def vpn_interfaces(self) -> pulumi.Output[Sequence['outputs.HaVpnGatewayVpnInterface']]: """ A list of interfaces on this VPN gateway. Structure is documented below. """ return pulumi.get(self, "vpn_interfaces")
StarcoderdataPython
100880
from django.core.management.base import BaseCommand from mixer.backend.django import mixer from situation_report_app.models import Post from users_app.models import AppUser class Command(BaseCommand): def handle(self, *args, **options): AppUser.objects.filter(is_superuser=False).delete() count = 500 for i in range(count): p = (i/count)*100 print(f'{i}) {p} %') new_post = mixer.blend(Post) print(new_post) mixer.blend(Post) print('end')
StarcoderdataPython
79476
import IPython.lib.demo as ipd # To use, run ipython, then # # In [1]: %run Demos.py # In [2]: d = ImageDemo() # In [3]: d() # In [4]: d() def ImageDemo (): return ipd.ClearIPDemo ( 'BasicTutorial1/Image.py' ) def InputOutputDemo (): return ipd.ClearIPDemo ( 'BasicTutorial1/InputOutput.py' ) def MemoryManagementDemo (): return ipd.ClearIPDemo ( 'BasicTutorial1/MemoryManagement.py' ) def FiltersDemo (): return ipd.ClearIPDemo ( 'BasicTutorial2/Filters.py' ) def MorphologyDemo (): return ipd.ClearIPDemo ( 'BasicTutorial2/Morphology.py' ) def MeasureRegionsDemo (): return ipd.ClearIPDemo ( 'InteractiveTutorial/MeasureRegions.py' ) def BorderChangeDemo (): return ipd.ClearIPDemo ( 'InteractiveTutorial/05-01-BorderChange.py' ) def NumpyDemo (): return ipd.ClearIPDemo ( 'InteractiveTutorial/05-02-Numpy.py' ) def RidgeDetectionDemo (): return ipd.ClearIPDemo ( 'InteractiveTutorial/05-04-RidgeDetection.py' )
StarcoderdataPython
116381
<gh_stars>10-100 # -*- coding: utf-8 -*- # Form implementation generated from reading ui file 'IMWindow.ui' # # Created: Mon Oct 09 13:21:20 2006 # by: PyQt4 UI code generator 4.0.1 # # WARNING! All changes made in this file will be lost! import sys from PyQt4 import QtCore, QtGui class Ui_IMWindow(object): def setupUi(self, IMWindow): IMWindow.setObjectName("IMWindow") IMWindow.resize(QtCore.QSize(QtCore.QRect(0,0,401,308).size()).expandedTo(IMWindow.minimumSizeHint())) IMWindow.setWindowIcon(QtGui.QIcon(":/images/cspace32.png")) self.vboxlayout = QtGui.QVBoxLayout(IMWindow) self.vboxlayout.setMargin(9) self.vboxlayout.setSpacing(6) self.vboxlayout.setObjectName("vboxlayout") self.chatLogView = QtGui.QTextEdit(IMWindow) sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Policy(7),QtGui.QSizePolicy.Policy(7)) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(4) sizePolicy.setHeightForWidth(self.chatLogView.sizePolicy().hasHeightForWidth()) self.chatLogView.setSizePolicy(sizePolicy) font = QtGui.QFont(self.chatLogView.font()) font.setFamily("MS Shell Dlg") font.setPointSize(10) font.setWeight(50) font.setItalic(False) font.setUnderline(False) font.setStrikeOut(False) font.setBold(False) self.chatLogView.setFont(font) self.chatLogView.setFocusPolicy(QtCore.Qt.ClickFocus) self.chatLogView.setReadOnly(True) self.chatLogView.setObjectName("chatLogView") self.vboxlayout.addWidget(self.chatLogView) self.chatInputEdit = QtGui.QTextEdit(IMWindow) sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Policy(7),QtGui.QSizePolicy.Policy(7)) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(1) sizePolicy.setHeightForWidth(self.chatInputEdit.sizePolicy().hasHeightForWidth()) self.chatInputEdit.setSizePolicy(sizePolicy) font = QtGui.QFont(self.chatInputEdit.font()) font.setFamily("MS Shell Dlg") font.setPointSize(10) font.setWeight(50) font.setItalic(False) font.setUnderline(False) font.setStrikeOut(False) font.setBold(False) self.chatInputEdit.setFont(font) self.chatInputEdit.setAcceptRichText(False) self.chatInputEdit.setObjectName("chatInputEdit") self.vboxlayout.addWidget(self.chatInputEdit) self.statusLabel = QtGui.QLabel(IMWindow) self.statusLabel.setObjectName("statusLabel") self.vboxlayout.addWidget(self.statusLabel) self.retranslateUi(IMWindow) QtCore.QMetaObject.connectSlotsByName(IMWindow) IMWindow.setTabOrder(self.chatInputEdit,self.chatLogView) def retranslateUi(self, IMWindow): IMWindow.setWindowTitle(QtGui.QApplication.translate("IMWindow", "CSpace IM", None, QtGui.QApplication.UnicodeUTF8))
StarcoderdataPython
1723233
import random import json from requests import get # Get Random useragent def RandomUSERagent(): fp = open('user_agents.txt', 'r') Content = fp.read() CoList = Content.split('\n') USER_AGENTS_LIST = [] for i in CoList: try: USER_AGENTS_LIST.append(i) except: pass return random.choice(USER_AGENTS_LIST) def Get_users_count(txt): '''Get users count''' if txt == 'members': # Open file file = open('Members.txt', 'r') elif txt == 'inline_members': # Open file file = open('Inline_Members.txt', 'r') # Read file data = file.read() # Split each line users = data.split('\n') Counter = 0 # Count users for user in users: if user: Counter += 1 return Counter def Get_total_users(): '''Get all users count''' Users = Get_users_count('members') Inline_users = Get_users_count('inline_members') total = Users + Inline_users return total def Write_userID(userid, txt): '''Write user ID to txt file''' if txt == 'members': file = open('Members.txt', 'a') file.write(str(userid) + '\n') file.close() elif txt == 'inline_members': file = open('Inline_Members.txt', 'a') file.write(str(userid) + '\n') file.close() def is_user(userid, txt): '''If userid isn't in txt file return Flase else True''' if txt == 'members': file = open('Members.txt') checker = file.read() file.close() if str(userid) in checker: return True else: return False elif txt == 'inline_members': file = open('Inline_Members.txt') checker = file.read() file.close() if str(userid) in checker: return True else: return False def Get_lyrics(query): '''Get lyrics using lyrics freak unofficial API''' url = f'https://lyricsfk-api.herokuapp.com/search-lyrics/{query}?format=json' req = get(url).text lyrics = json.loads(req) return lyrics
StarcoderdataPython
1630865
<filename>generated-libraries/python/netapp/service_processor/sp_link_status.py class SpLinkStatus(basestring): """ Possible states for the underlying physical link for a network insterface Possible values: <ul> <li> "up" - Link is connected, <li> "down" - Link is severed, <li> "disabled" - Link has been disabled by user, <li> "unknown" - Link status is no known </ul> """ @staticmethod def get_api_name(): return "sp-link-status"
StarcoderdataPython
194972
from __future__ import annotations def n31(a: int) -> tuple[list[int], int]: """ Returns the Collatz sequence and its length of any positive integer. >>> n31(4) ([4, 2, 1], 3) """ if not isinstance(a, int): raise TypeError("Must be int, not {}".format(type(a).__name__)) if a < 1: raise ValueError(f"Given integer must be greater than 1, not {a}") path = [a] while a != 1: if a % 2 == 0: a = a // 2 else: a = 3 * a + 1 path += [a] return path, len(path) def test_n31(): """ >>> test_n31() """ assert n31(4) == ([4, 2, 1], 3) assert n31(11) == ([11, 34, 17, 52, 26, 13, 40, 20, 10, 5, 16, 8, 4, 2, 1], 15) assert n31(31) == ([31,94,47,142,71,214,322,161,484,242,121,364,182,91,274,137,412,206,103,310,155,466,233,700,350,175,526,263,790,395,1186,593,1780,890, 445,1336,668,334,167,502,251,754,377,1132,566,283,850,425,1276,638,319,958,479,1438,719,2158,1079,3238,1619,4858,2429,7288,3644,1822,911, 2734,1367,4102,2051,6154,3077,9232,4616,2308,1154,577,1732,866,433,1300,650,325,976,488,244,122,61,184,92,46,23,70,35,106,53,160,80,40, 20,10,5,16,8,4,2,1,],107) if __name__ == "__main__": num = 4 path, length = n31(num) print(f"The Collatz sequence of {num} took {length} steps. \nPath: {path}")
StarcoderdataPython
3200303
''' Created on Apr, 2017 @author: hugo ''' import numpy as np def calc_ranks(x): """Given a list of items, return a list(in ndarray type) of ranks. """ n = len(x) index = list(zip(*sorted(list(enumerate(x)), key=lambda d:d[1], reverse=True))[0]) rank = np.zeros(n) rank[index] = range(1, n + 1) return rank def rank_bank_topic(bank_doc_map, doc_topic_dist): """Rank topics for banks """ bank_topic_ranks = {} for each_bank in bank_doc_map: rank = [] for each_doc in bank_doc_map[each_bank]: rank.append(calc_ranks(doc_topic_dist[each_doc])) rank = np.r_[rank] # compute ranking score bank_topic_ranks[each_bank] = np.mean(1. / rank, axis=0) return bank_topic_ranks if __name__ == '__main__': n = 10 bank_doc_map = {'bank_0': ['doc_0', 'doc_1'], 'bank_1': ['doc_2', 'doc_3', 'doc_4']} doc_topic_dist = dict([('doc_%s' % i, np.random.randn(n)) for i in range(5)]) rank = rank_bank_topic(bank_doc_map, doc_topic_dist)
StarcoderdataPython
3394625
<filename>test/test_filequeue.py<gh_stars>0 import json import os import time from six.moves import range import pytest import taskqueue from taskqueue import RegisteredTask, TaskQueue, MockTask, PrintTask, LocalTaskQueue from taskqueue.paths import ExtractedPath, mkpath FILE_QURL = 'fq:///tmp/removeme/taskqueue/fq' N = 1000 def crtq(): tq = TaskQueue(FILE_QURL) tq.purge() tq.rezero() tq.insert(( PrintTask(i) for i in range(N) )) return tq def test_release_all(): tq = crtq() for _ in range(tq.enqueued): task = tq.lease(seconds=3600) now = int(time.time()) for fname in os.listdir(tq.api.queue_path): assert int(fname.split('--')[0]) > now tq.release_all() now = int(time.time()) for fname in os.listdir(tq.api.queue_path): assert int(fname.split('--')[0]) <= now tq.purge() def test_count_completions(): tq = crtq() executed = tq.poll(stop_fn=lambda executed: N <= executed) assert tq.completed == 0 tq = crtq() tq.poll(stop_fn=lambda executed: N <= executed, tally=True) assert tq.completed == N tq.purge() def test_count_insertions(): tq = crtq() assert tq.inserted == N tq.rezero() assert tq.inserted == 0 tq.purge() def test_count_leases(): tq = crtq() assert tq.leased == 0 tq.lease(seconds=10000) assert tq.leased == 1 tq.lease(seconds=10000) tq.lease(seconds=10000) tq.lease(seconds=10000) assert tq.leased == 4 tq.release_all() assert tq.leased == 0 tq.purge() def test_renew(): tq = TaskQueue(FILE_QURL) tq.purge() tq.insert(PrintTask('hello')) ts = lambda fname: int(fname.split('--')[0]) ident = lambda fname: fname.split('--')[1] filenames = os.listdir(tq.api.queue_path) assert len(filenames) == 1 filename = filenames[0] assert ts(filename) == 0 identity = ident(filename) now = time.time() tq.renew(filename, 1) filenames = os.listdir(tq.api.queue_path) assert len(filenames) == 1 filename = filenames[0] assert ts(filename) >= int(time.time()) + 1 assert ident(filename) == identity def test_enumerating_tasks(): tq = TaskQueue(FILE_QURL) tq.purge() for _ in range(10): tq.insert(PrintTask('hello')) tq.insert(PrintTask('world')) lst = list(tq.tasks()) assert len(lst) == 20 hello = 0 world = 0 for task in lst: hello += int(task.txt == "hello") world += int(task.txt == "world") assert hello == 10 assert world == 10
StarcoderdataPython
1636462
from django.apps import AppConfig class InfraConfig(AppConfig): name = "infra"
StarcoderdataPython
1657995
""" Scene creation functions Listing 60 from <NAME>'s Ray Tracing in a Weekend: https://raytracing.github.io/books/RayTracingInOneWeekend.html <NAME> -- 2020 """ import math from random import random, uniform from pathlib import Path from PIL import Image import colorcet as cc from stl import mesh # numpy-stl from geometry_classes import Vec3, GeometryList, Camera from geometry_classes import random_on_unit_sphere, get_color from material_classes import Lambertian, Metal, Dielectric from texture_classes import SolidColor, CheckerBoard, ImageTexture, NoiseTexture from primitives_classes import Sphere, Plane, Triangle, Disc, STLMesh from light_classes import PointLight, AreaLight from scene import Scene from perlin import value_noise, turbulent_noise, fractal_noise, wood_pattern, marble_pattern def create_simple_world(settings=None): color_1 = SolidColor(Vec3(0.7, 0.3, 0.3)) color_2 = SolidColor(Vec3(0.8, 0.8, 0)) color_3 = SolidColor(Vec3(0.8,0.6,0.2)) diffuse_1 = Lambertian(color_1, name="diffuse_1") diffuse_2 = Lambertian(color_2, name="diffuse_2") metal_1 = Metal(color_3, fuzziness=0.3, name="metal_1") dielectric_1 = Dielectric(1.5, name="dielectric_1") world = GeometryList() world.add(Sphere(Vec3(0,0,-1), 0.5, diffuse_1)) world.add(Sphere(Vec3(0,-100.5,-1), 100, diffuse_2)) world.add(Sphere(Vec3(1.25,0,-1), 0.5, metal_1)) world.add(Sphere(Vec3(-1.25,0,-1),0.5, dielectric_1)) world.add(Sphere(Vec3(-1.25,0,-1),-0.45, dielectric_1)) # hollow sphere ambient = Vec3(0.7, 0.7, 0.7) background = SolidColor(Vec3(0.5, 0.7, 1.0)) # light_1 = PointLight(pos=Vec3(0, 10, 0.35), color=Vec3(0.25, 0.25, 0.25)) # light directly above sphere light_1 = PointLight(pos=Vec3(-1, 10, 0.35), color=Vec3(0.25, 0.25, 0.25)) # light_2 = PointLight(pos=Vec3(0, 10, 5.0), color=Vec3(0.1, 0.1, 0.25)) # blue light to the left lights = [light_1] scene = Scene(world, ambient=ambient, lights=lights, background=background) #camera = Camera(look_from=Vec3(-0.5, 1, 5), look_at=Vec3(0, 0, -1), vup=Vec3(0, 1, 0), vert_fov=20, aperature=0.1, camera = Camera(look_from=Vec3(-0.5, 1, 5), look_at=Vec3(0, 0, -1), vup=Vec3(0, 1, 0), vert_fov=20, aperature=0.0, focus_dist=20) return {'scene': scene, 'camera': camera} def create_simple_world_2(settings=None): # use a plane instead of a big sphere! color_3 = SolidColor(Vec3(0.2, 0.2, 0.7)) color_4 = SolidColor(Vec3(0.8,0.6,0.2)) color_5 = SolidColor(Vec3(0.4,0.4,0.4)) diffuse_3 = Lambertian(color_3, name="diffuse_3") metal_1 = Metal(color_4, fuzziness=0.3, name="metal_1") metal_2 = Metal(color_5, fuzziness=0.0, name="metal_2") world = GeometryList() world.add(Sphere(Vec3(0,0,-1), 1.5, metal_1)) plane_1 = Plane.plane_from_point_and_normal(pt=Vec3(0,-3,0), normal=Vec3(0,1,0), material=diffuse_3) plane_2 = Plane.plane_from_point_and_normal(pt=Vec3(0,0,-10), normal=Vec3(0,0,1), material=metal_2) world.add(plane_1) world.add(plane_2) ambient = Vec3(0.6, 0.6, 0.6) background = SolidColor(Vec3(0.5, 0.7, 1.0)) light_1 = PointLight(pos=Vec3(-2, 10, 0.35), color=Vec3(0.25, 0.25, 0.25)) # light directly above sphere lights = [light_1] scene = Scene(world, ambient=ambient, lights=lights, background=background) camera = Camera(look_from=Vec3(-0.5, 1, 10), look_at=Vec3(0, 0, -1), vup=Vec3(0, 1, 0), vert_fov=20, aperature=0.0, focus_dist=20) return {'scene': scene, 'camera': camera} def create_simple_world_3(settings=None): # add triangles color_1 = SolidColor(Vec3(0.7, 0.3, 0.3)) color_2 = SolidColor(Vec3(0.2, 0.2, 0.7)) color_3 = SolidColor(Vec3(0.4,0.4,0.4)) diffuse_1 = Lambertian(color_1) diffuse_3 = Lambertian(color_2) metal_2 = Metal(color_3, fuzziness=0.0) dielectric_1 = Dielectric(1.5) world = GeometryList() world.add(Sphere(Vec3(0,0,-1), 1.5, metal_2)) v0 = Vec3(-1.8, -0.5, 1.5) v1 = Vec3(-1.0, 0.5, 1.5) v2 = Vec3(-0.2, -0.5, 1.5) world.add(Triangle(v0,v1,v2,diffuse_1)) v0 = Vec3(1.8, -0.5, 1.5) v1 = Vec3(1.0, 0.5, 1.5) v2 = Vec3(0.2, -0.5, 1.5) world.add(Triangle(v0, v1, v2, metal_2)) v0 = Vec3(-1.0, 0.8, 1.5) v1 = Vec3(0.0, 2.5, 0.75) v2 = Vec3(1.0, 0.8, 1.5) world.add(Triangle(v0, v1, v2, dielectric_1)) plane_1 = Plane.plane_from_point_and_normal(pt=Vec3(0,-3,0), normal=Vec3(0,1,0), material=diffuse_3) world.add(plane_1) ambient = Vec3(0.6, 0.6, 0.6) background = SolidColor(Vec3(0.5, 0.7, 1.0)) light_1 = PointLight(pos=Vec3(-2, 10, 0.35), color=Vec3(0.25, 0.25, 0.25)) # light directly above sphere lights = [light_1] scene = Scene(world, ambient=ambient, lights=lights, background=background) # camera = Camera(look_from=Vec3(-0.5, 1, 13), look_at=Vec3(0, 0, -1), vup=Vec3(0, 1, 0), vert_fov=20, aperature=0.1, focus_dist=20) camera = Camera(look_from=Vec3(1, 1, 13), look_at=Vec3(0, 0, -1), vup=Vec3(0, 1, 0), vert_fov=20, aperature=0.0, focus_dist=20) return {'scene': scene, 'camera': camera} def create_random_world(settings=None): world = GeometryList() color_1 = SolidColor(Vec3(0.5,0.5,0.5)) ground_material = Lambertian(color_1) glass_material = Dielectric(1.5) center_offset = Vec3(4, 0.2, 9) world.add(Sphere(Vec3(0,-1000,0), 1000, ground_material)) for a in range(-11, 11): for b in range(-11, 11): choose_mat = random() center = Vec3(a+0.9*random(), 0.2, b+0.9*random()) if (center - center_offset).length() > 0.9: if choose_mat < 0.8: # diffuse r = random()*random() g = random()*random() b = random()*random() albedo = SolidColor(Vec3(r,g,b)) sphere_material = Lambertian(albedo) elif choose_mat < 0.95: # metal a = uniform(0.5, 1.0) fuzz = uniform(0.0, 0.5) albedo = SolidColor(Vec3(a,a,a)) sphere_material = Metal(albedo, fuzz) else: # glass sphere_material = glass_material world.add(Sphere(center, 0.2, sphere_material)) material_1 = Dielectric(1.5) world.add(Sphere(Vec3(0,1,0), 1.0, material_1)) color_2 = SolidColor(Vec3(0.4, 0.2, 0.1)) material_2 = Lambertian(color_2) world.add(Sphere(Vec3(-4, 1, 0), 1.0, material_2)) color_3 = SolidColor(Vec3(0.7,0.6,0.5)) material_3 = Metal(color_3, 0.0) world.add(Sphere(Vec3(4, 1, 0), 1.0, material_3)) ambient = Vec3(0.6, 0.6, 0.6) background = SolidColor(Vec3(0.5, 0.7, 1.0)) light_1 = PointLight(pos=Vec3(18,12,5), color=Vec3(0.25, 0.25, 0.25)) # light directly above sphere lights = [light_1] scene = Scene(world, ambient=ambient, lights=lights, background=background) camera = Camera(look_from=Vec3(13, 2, 3), look_at=Vec3(0, 0, 0), vup=Vec3(0, 1, 0), vert_fov=20, aperature=0.1, focus_dist=20) return {'scene': scene, 'camera': camera} def create_random_world2(settings=None): def random_material(): choose_mat = random() if choose_mat < 0.8: # diffuse r = random() * random() g = random() * random() b = random() * random() albedo = SolidColor(Vec3(r, g, b)) material = Lambertian(albedo) elif choose_mat < 0.95: # metal a = uniform(0.5, 1.0) albedo = SolidColor(Vec3(a,a,a)) fuzz = uniform(0.0, 0.5) material = Metal(albedo, fuzz) else: # glass material = glass_material return material # a ground plane, a metal sphere and random triangles... world = GeometryList() ground_color = SolidColor(Vec3(0.2,0.6,0.2)) ground_material = Lambertian(ground_color) metal_1 = Metal(SolidColor(Vec3(0.7,0.6,0.5)), fuzziness=0.0) metal_2 = Metal(SolidColor(Vec3(0.4,0.4,0.4)), fuzziness=0.3) glass_material = Dielectric(1.5) center_offset = Vec3(4, 0.2, 9) plane_1 = Plane.plane_from_point_and_normal(pt=Vec3(0, -3, 0), normal=Vec3(0, 1, 0), material=ground_material) world.add(plane_1) world.add(Sphere(Vec3(0,0,-1), 1.5, metal_1)) for a in range(-12, 12): for b in range(-12, 12): center = Vec3(a+0.9*random(), 3*random()+0.3, b+0.9*random()) if (center - center_offset).length() > 0.9: material = random_material() v0 = random_on_unit_sphere().mul_val(0.7) + center v1 = random_on_unit_sphere().mul_val(0.7) + center v2 = random_on_unit_sphere().mul_val(0.7) + center triangle = Triangle(v0,v1,v2, material) world.add(triangle) ambient = Vec3(0.6, 0.6, 0.6) background = SolidColor(Vec3(0.5, 0.7, 1.0)) light_1 = PointLight(pos=Vec3(18, 10, 5), color=Vec3(0.25, 0.25, 0.25)) # light directly above sphere lights = [light_1] scene = Scene(world, ambient=ambient, lights=lights, background=background) camera = Camera(look_from=Vec3(13, 2, 3), look_at=Vec3(0, 0, 0), vup=Vec3(0, 1, 0), vert_fov=20, aperature=0.1, focus_dist=20) return {'scene': scene, 'camera': camera} def create_checkerboard_world(settings=None): color_1 = SolidColor(Vec3(0.7, 0.3, 0.3)) color_2 = SolidColor(Vec3(0.8,0.6,0.2)) odd_color = SolidColor(Vec3(0.2,0.3,0.1)) even_color = SolidColor(Vec3(0.9,0.9,0.9)) checker_board = CheckerBoard(even_color, odd_color, spacing=3) other_odd_color = SolidColor(Vec3(0.1, 0.1, 0.1)) other_even_color = SolidColor(Vec3(0.9, 0.1, 0.1)) checker_board_2 = CheckerBoard(other_even_color, other_odd_color, spacing=8) diffuse_1 = Lambertian(color_1, name="diffuse_1") diffuse_2 = Lambertian(checker_board, name="diffuse_checkerboard") diffuse_3 = Lambertian(checker_board_2, name="diffuse_checkerboard_2") metal_1 = Metal(color_2, fuzziness=0.3, name="metal_1") dielectric_1 = Dielectric(1.5, name="dielectric_1") world = GeometryList() world.add(Sphere(Vec3(0,0,-1), 0.5, diffuse_1)) v0 = Vec3(-2, 0.1, -2.5) v1 = Vec3(2, 0.1, -2.5) v2 = Vec3(0, 1.5, -2.0) world.add(Triangle(v0, v1, v2, diffuse_3, uv0=(0.5,1), uv1=(1,0), uv2=(0,0))) plane_1 = Plane.plane_from_point_and_normal(pt=Vec3(0, -3, 0), normal=Vec3(0, 1, 0), material=diffuse_2) world.add(plane_1) world.add(Sphere(Vec3(1.2,0,-1), 0.5, metal_1)) world.add(Sphere(Vec3(-1.2,0,-1),0.5, dielectric_1)) world.add(Sphere(Vec3(-1.2,0,-1),-0.45, dielectric_1)) # hollow sphere ambient = Vec3(0.6, 0.6, 0.6) background = SolidColor(Vec3(0.5, 0.7, 1.0)) light_1 = PointLight(pos=Vec3(-2, 10, 0.35), color=Vec3(0.25, 0.25, 0.25)) # light directly above sphere lights = [light_1] scene = Scene(world, ambient=ambient, lights=lights, background=background) camera = Camera(look_from=Vec3(-0.5, 1, 5), look_at=Vec3(0, 0, -1), vup=Vec3(0, 1, 0), vert_fov=30) return {'scene': scene, 'camera': camera} def create_checkerboard_world_2(settings=None): odd_color = SolidColor(Vec3(0.2,0.3,0.1)) even_color = SolidColor(Vec3(0.9,0.9,0.9)) checker_board = CheckerBoard(even_color, odd_color, spacing=3) diffuse_1 = Lambertian(checker_board, name="diffuse_checkerboard") metal_1 = Metal(checker_board, fuzziness=0.1, name="metal_checkerboard") world = GeometryList() world.add(Sphere(Vec3(0,-10,0), 10, diffuse_1)) world.add(Sphere(Vec3(0,10,0), 10, metal_1)) ambient = Vec3(0.6, 0.6, 0.6) background = SolidColor(Vec3(0.5, 0.7, 1.0)) light_1 = PointLight(pos=Vec3(-2, 10, 0.35), color=Vec3(0.25, 0.25, 0.25)) # light directly above sphere lights = [light_1] scene = Scene(world, ambient=ambient, lights=lights, background=background) camera = Camera(look_from=Vec3(-0.5, 1, 15), look_at=Vec3(0, 0, -1), vup=Vec3(0, 1, 0), vert_fov=20) return {'scene': scene, 'camera': camera} def create_image_texture_world(settings=None): silver = SolidColor(Vec3(0.7, 0.7, 0.7)) image_1 = Image.open(Path("./textures/earthlights_dmsp_big.jpg")) image_2 = Image.open(Path("./textures/george harrison (1 bit).bmp")) image_texture_1 = ImageTexture(image_1, "earthlights") odd_color = ImageTexture(image_2) even_color = SolidColor(Vec3(0.1, 0.1, 0.1)) checker_board = CheckerBoard(even_color, odd_color, spacing=3) diffuse_1 = Lambertian(image_texture_1, name="diffuse_1") diffuse_2 = Lambertian(checker_board, name="checkerboard") metal_1 = Metal(silver, name="metal_1") world = GeometryList() world.add(Sphere(Vec3(-0.75, 0, -1), 1.0, diffuse_1)) world.add(Sphere(Vec3(1.5, 0, -2.25), 1.0, metal_1)) plane_1 = Plane.plane_from_point_and_normal(pt=Vec3(0, -3, 0), normal=Vec3(0, 1, 0), material=diffuse_2) world.add(plane_1) ambient = Vec3(0.6, 0.6, 0.6) background = SolidColor(Vec3(0.5, 0.7, 1.0)) light_1 = PointLight(pos=Vec3(-2, 10, 0.35), color=Vec3(0.25, 0.25, 0.25)) # light directly above sphere lights = [light_1] scene = Scene(world, ambient=ambient, lights=lights, background=background) camera = Camera(look_from=Vec3(-0.5, 1, 7), look_at=Vec3(0, 0, -0.5), vup=Vec3(0, 1, 0), vert_fov=20, aperature=0.1, focus_dist=20) return {'scene': scene, 'camera': camera} def create_canonical_1(settings=None): # sphere over a checkerboard! silver = SolidColor(Vec3(0.7, 0.7, 0.7)) # image_1 = Image.open(Path("./textures/earthlights_dmsp_big.jpg")) image_2 = Image.open(Path("./textures/IO_logo.png")) # image_texture_1 = ImageTexture(image_1, "earthlights") logo = ImageTexture(image_2) # images are across the entire checkerboard, not a single square? # odd_color = ImageTexture(image_2) # images are across the entire checkerboard, not a single square? odd_color = SolidColor(Vec3(0.2, 0.75, 0.2)) even_color = SolidColor(Vec3(0.1, 0.1, 0.1)) checker_board = CheckerBoard(even_color, odd_color, spacing=2.0) if True: # use checkerboard vs image texture diffuse_2 = Lambertian(checker_board, name="checkerboard") # diffuse_2 = Lambertian(odd_color, name="odd_color") else: diffuse_2 = Lambertian(logo, name="io_logo'") metal_1 = Metal(silver, name="metal_1") world = GeometryList() world.add(Sphere(Vec3(0, 1.25, 0.35), 1.0, metal_1)) # world.add(Sphere(Vec3(0, 1.0001, 0.35), 1.0, metal_1)) if True: # use plane vs triangles plane_1 = Plane.plane_from_point_and_normal(pt=Vec3(0, -1, 0), normal=Vec3(0, 1, 0), material=diffuse_2) world.add(plane_1) else: plane_x = 2 plane_y = -1 back_plane_z = -2 front_plane_z = 3 v0 = Vec3(-plane_x, plane_y, front_plane_z) uv0 = (0,0) v1 = Vec3(-plane_x ,plane_y,back_plane_z) uv1 = (0, 1) v2 = Vec3(plane_x, plane_y, front_plane_z) uv2 = (1, 0) v3 = Vec3(plane_x, plane_y,back_plane_z) uv3 = (1, 1) triangle = Triangle(v0, v1, v2, diffuse_2, uv0, uv1, uv2) world.add(triangle) triangle = Triangle(v1, v2, v3, diffuse_2, uv1, uv2, uv3) world.add(triangle) ambient = Vec3(0.5,0.5,0.5) # ambient = Vec3(0.3,0.3,0.3) background = SolidColor(Vec3(0.5, 0.7, 1.0)) # background = SolidColor(Vec3(0,0,0)) # light_1 = PointLight(pos=Vec3(11,10,3), color=Vec3(0.25, 0.25, 0.25)) geom = Disc(center=Vec3(11,10,3), normal=Vec3(0,-1,0), radius=1.5, material=SolidColor(Vec3(0.7, 0.7, 0.7))) if settings and 'SAMPLES_PER_LIGHT' in settings: samples = settings['SAMPLES_PER_LIGHT'] else: samples = 25 light_2 = AreaLight(geom=geom, color=Vec3(0.6, 0.6, 0.6), num_samples=samples) lights = [light_2] scene = Scene(world, ambient=ambient, lights=lights, background=background) camera = Camera(look_from=Vec3(8.5, 4, 0), look_at=Vec3(0, 1, 0), vup=Vec3(0, 1, 0), vert_fov=25) return {'scene': scene, 'camera': camera} def create_canonical_2(settings=None): """ teapot time! TEAPOT_thingiverse.stl -- bbox=AABB(vmin=(-15.000, -10.005, -9.088), vmax=(16.371, 10.005, 7.162)), num_triangles=87298 """ spacing=0.5 stl_filename = Path("models/TEAPOT_thingiverse.stl") rot_axis = [1, 0, 0] rot_rads = math.pi / 2.0 # 45 deg look_from = Vec3(0, 15, 60) look_at = Vec3(0, -1.5, 0) plane_y = -9.1 plane_x = 25 back_plane_z = -25 front_plane_z = 15 light1_pos = Vec3(11, 10, 3) silver = SolidColor(Vec3(0.7, 0.7, 0.7)) light_gray = SolidColor(Vec3(0.85, 0.85, 0.85)) odd_color = SolidColor(Vec3(0.2, 0.75, 0.2)) even_color = SolidColor(Vec3(0.1, 0.1, 0.1)) checker_board = CheckerBoard(even_color, odd_color, spacing=spacing) diffuse_1 = Lambertian(checker_board, name="checkerboard") # diffuse_2 = Lambertian(silver, name="silver_matte") diffuse_2 = Lambertian(light_gray, name="silver_matte") fuzz = 0.2 metal_1 = Metal(silver, fuzziness=fuzz, name="chrome") world = GeometryList() my_mesh = mesh.Mesh.from_file(stl_filename) my_mesh.rotate(rot_axis, rot_rads) # teapot_matl = metal_1 teapot_matl = diffuse_2 stl_mesh = STLMesh(my_mesh, teapot_matl, name="teapot") print(f'stl_mesh {stl_filename} -- bbox={stl_mesh.bounding_box(None, None)}, num_triangles={stl_mesh.num_triangles}') world.add(stl_mesh) if False: # use plane vs triangles plane_1 = Plane.plane_from_point_and_normal(pt=Vec3(0, plane_y, 0), normal=Vec3(0, 1, 0), material=diffuse_1) world.add(plane_1) else: v0 = Vec3(-plane_x, plane_y, front_plane_z) uv0 = (0,0) v1 = Vec3(-plane_x ,plane_y,back_plane_z) uv1 = (0, 1) v2 = Vec3(plane_x, plane_y, front_plane_z) uv2 = (1, 0) v3 = Vec3(plane_x, plane_y,back_plane_z) uv3 = (1, 1) triangle = Triangle(v0, v1, v2, diffuse_1, uv0, uv1, uv2) world.add(triangle) triangle = Triangle(v1, v2, v3, diffuse_1, uv1, uv2, uv3) world.add(triangle) # ambient = Vec3(0.6,0.6,0.6) ambient = Vec3(0.5,0.5,0.5) background = SolidColor(Vec3(0.5, 0.7, 1.0)) # light_1 = PointLight(pos=light1_pos, color=Vec3(0.35, 0.35, 0.35)) light_1 = PointLight(pos=light1_pos, color=Vec3(0.5, 0.5, 0.5)) lights = [light_1] scene = Scene(world, ambient=ambient, lights=lights, background=background) camera = Camera(look_from=look_from, look_at=look_at, vup=Vec3(0, 1, 0), vert_fov=25) return {'scene': scene, 'camera': camera} def create_perlin_1(settings=None): # sphere over a plane! # dragon over a plane! green = SolidColor(Vec3(0.2,0.7,0.2)) brown = SolidColor(Vec3(0.7,0.5,0.3)) # point_scale = 1.0 # wood_point_scale = 100.0 wood_point_scale = 20.0 wood, wood_name = (cc.CET_D6[155:240], 'wood3') wood_colormap = [get_color(i, wood) for i in range(len(wood))] kwargs = {'frequency': 0.01, 'frequency_mult': 10, } translate = 1.0 scale = 0.5 wood_texture = NoiseTexture(wood_colormap, point_scale=wood_point_scale, translate=translate, scale=scale, name=wood_name, eval_func=wood_pattern, eval_kwargs=kwargs) jade, jade_name = (cc.CET_D13[135:240], 'jade2') jade_colormap = [get_color(i, jade) for i in range(len(jade))] kwargs = {'frequency': 0.024, 'frequency_mult': 2.5, 'amplitude_mult': 0.5, 'layers': 7, 'displace_x': 200} translate = 0.20 scale = 1.0 # jade_point_scale = 600.0 jade_point_scale = 6.0 jade_texture = NoiseTexture(jade_colormap, point_scale=jade_point_scale, translate=translate, scale=scale, name=jade_name, eval_func=marble_pattern, eval_kwargs=kwargs) # diffuse_1 = Lambertian(wood_texture, name="wood'") diffuse_2 = Lambertian(jade_texture, name="jade") diffuse_3 = Lambertian(green, name="solid green") diffuse_4 = Lambertian(brown, name="solid brown") metal_1 = Metal(wood_texture, name="shiny wood", fuzziness=0.2) metal_2 = Metal(jade_texture, name="metal_1", fuzziness=0.3) ground_matl = metal_1 # ground_matl = diffuse_4 object_matl = diffuse_2 # object_matl = metal_2 # object_matl = diffuse_3 world = GeometryList() if False: # if True: world.add(Sphere(Vec3(0, 0.0, 0.0), 8.0, object_matl)) settings = {'look_from': Vec3(0.0, 10, 40), 'look_at': Vec3(0, 0.25, 0), 'plane_x': 24, 'plane_y': -8.0, 'back_plane_z': -25, 'front_plane_z': 20, 'rot_axis': [1, 0, 0], 'rot_rads': math.pi / 2, 'translate': [0, 0, -12.5], 'show_walls': True} else: stl_filename = "models/dragon_65.stl" settings = {'look_from': Vec3(0.0, 10, 40), 'look_at': Vec3(0, 0.25, 0), 'plane_x': 24, 'plane_y': -7.221, 'back_plane_z': -25, 'front_plane_z': 20, 'rot_axis': [1,0,0], 'rot_rads': math.pi/2, 'translate': [0, 0, -12.5], 'show_walls': True} # if False: if True: my_mesh = mesh.Mesh.from_file(stl_filename) if 'translate' in settings: settings['translate'][0] my_mesh.translate([settings['translate'][0], settings['translate'][1], settings['translate'][2]]) if 'rot_axis' in settings and settings['rot_axis'] is not None: rot_axis = settings['rot_axis'] rot_rads = settings['rot_rads'] my_mesh.rotate(rot_axis, rot_rads) stl_mesh = STLMesh(my_mesh, object_matl, name="mesh_1") print(f'stl_mesh {stl_filename} -- bbox={stl_mesh.bounding_box(None, None)}, num_triangles={stl_mesh.num_triangles}') world.add(stl_mesh) if True: # if True: # use plane vs triangles if False: plane_1 = Plane.plane_from_point_and_normal(pt=Vec3(0, -1, 0), normal=Vec3(0, 1, 0), material=ground_matl) world.add(plane_1) else: plane_x = settings['plane_x'] plane_y = settings['plane_y'] back_plane_z = settings['back_plane_z'] front_plane_z = settings['front_plane_z'] v0 = Vec3(-plane_x, plane_y, front_plane_z) uv0 = (0,0) v1 = Vec3(-plane_x ,plane_y,back_plane_z) uv1 = (0, 1) v2 = Vec3(plane_x, plane_y, front_plane_z) uv2 = (1, 0) v3 = Vec3(plane_x, plane_y,back_plane_z) uv3 = (1, 1) triangle = Triangle(v0, v1, v2, ground_matl, uv0, uv1, uv2) world.add(triangle) triangle = Triangle(v1, v2, v3, ground_matl, uv1, uv2, uv3) world.add(triangle) ambient = Vec3(0.6,0.6,0.6) background = SolidColor(Vec3(0.5, 0.7, 1.0)) geom = Disc(center=Vec3(3,10,-3), normal=Vec3(0,-1,0), radius=1.5, material=SolidColor(Vec3(0.7, 0.7, 0.7))) if settings and 'SAMPLES_PER_LIGHT' in settings: samples = settings['SAMPLES_PER_LIGHT'] else: samples = 25 # light_1 = PointLight(pos=Vec3(-10.0, 100, 80), color=Vec3(0.2, 0.3, 0.2)) light_1 = PointLight(pos=Vec3(-10.0, 100, 80), color=Vec3(0.6, 0.6, 0.6)) light_2 = AreaLight(geom=geom, color=Vec3(0.6, 0.6, 0.6), num_samples=samples) # lights = [light_1] lights = [light_2] # lights = [light_1, light_2] scene = Scene(world, ambient=ambient, lights=lights, background=background) # camera = Camera(look_from=Vec3(8.5, 4, 0), look_at=Vec3(0, 1, 0), vup=Vec3(0, 1, 0), vert_fov=25) camera = Camera(look_from=settings['look_from'], look_at=settings['look_at'], vup=Vec3(0, 1, 0), vert_fov=25) return {'scene': scene, 'camera': camera} def create_stl_mesh(settings=None): """ TODO: scale the model """ silver = SolidColor(Vec3(0.7, 0.7, 0.7)) green = SolidColor(Vec3(0.1, 0.5, 0.1)) blue = SolidColor(Vec3(0.1, 0.1, 0.5)) red = SolidColor(Vec3(0.5, 0.2, 0.2)) purple = SolidColor(Vec3(0.4, 0.1, 0.4)) gray = SolidColor(Vec3(0.2, 0.2, 0.2)) med_gray = SolidColor(Vec3(0.4, 0.4, 0.4)) light_gray = SolidColor(Vec3(0.9, 0.9, 0.9)) dark_gray = SolidColor(Vec3(0.1, 0.1, 0.1)) black = SolidColor(Vec3(0.0, 0.0, 0.0)) # rotated by 90 deg on the X axis... bbox=AABB(vmin=(-48.551, 5.275, -45.792), vmax=(59.196, 113.167, 42.010)), num_triangles=112402 stl_filename = "models/Bunny.stl" settings = {'look_from': Vec3(0.0, 100, 350), 'look_at': Vec3(0, 50.0, 0), 'plane_x': 120, 'plane_y': 5.275, 'back_plane_z': -85, 'front_plane_z': 350, 'rot_axis': [1,0,0], 'rot_rads': math.pi/2, 'translate': [-25, 0, 0], 'show_walls': True} image_2 = Image.open(Path("./textures/IO_logo.png")) logo = ImageTexture(image_2) # images are across the entire checkerboard, not a single square? checked = CheckerBoard(dark_gray, light_gray, spacing=0.1) diffuse_red = Lambertian(red, name="red'") diffuse_blue = Lambertian(blue, name="blue'") diffuse_gray = Lambertian(gray, name="gray'") diffuse_med_gray = Lambertian(med_gray, name="med_gray'") diffuse_light_gray = Lambertian(light_gray, name="light_gray'") metal_1 = Metal(silver, name="metal_1") logo_matl = Lambertian(logo, name="logo") # dielectric_1 = Dielectric(1.5, name="dielectric_1") checkerboard = Lambertian(checked, name="gray'") dielectric = Dielectric(1.0, "dielectric") # object_matl = metal_1 # object_matl = diffuse_gray object_matl = diffuse_light_gray # object_matl = dielectric ground_matl = diffuse_gray # ground_matl = checkerboard # ground_matl = logo_matl # right_wall_matl = diffuse_red # right_wall_matl = metal_1 right_wall_matl = diffuse_light_gray # left_wall_matl = diffuse_blue # left_wall_matl = metal_1 left_wall_matl = diffuse_light_gray # back_wall_matl = logo_matl # back_wall_matl = metal_1 back_wall_matl = checkerboard world = GeometryList() if True: plane_x = settings['plane_x'] plane_y = settings['plane_y'] back_plane_z = settings['back_plane_z'] front_plane_z = settings['front_plane_z'] if True: # ground plane v0 = Vec3(-plane_x, plane_y, front_plane_z) uv0 = (0,1) v1 = Vec3(-plane_x ,plane_y,back_plane_z) uv1 = (0,0) v2 = Vec3(plane_x, plane_y, front_plane_z) uv2 = (1,1) v3 = Vec3(plane_x, plane_y,back_plane_z) uv3 = (1,0) triangle = Triangle(v0, v1, v2, ground_matl, uv0, uv1, uv2) world.add(triangle) triangle = Triangle(v1, v2, v3, ground_matl, uv1, uv2, uv3) world.add(triangle) height = 2 * plane_x if settings['show_walls'] is True: # right wall v0 = Vec3(plane_x, plane_y, front_plane_z) uv0 = (1, 1) v1 = Vec3(plane_x, plane_y, back_plane_z) uv1 = (0, 1) v2 = Vec3(plane_x, plane_y+height, back_plane_z) uv2 = (0,0) v3 = Vec3(plane_x, plane_y+height, front_plane_z) uv3 = (1, 0) triangle = Triangle(v0, v1, v2, right_wall_matl, uv0, uv1, uv2) world.add(triangle) triangle = Triangle(v0, v2, v3, right_wall_matl, uv0, uv2, uv3) world.add(triangle) # left wall v0 = Vec3(-plane_x, plane_y, front_plane_z) uv0 = (0, 1) v1 = Vec3(-plane_x, plane_y, back_plane_z) uv1 = (1, 1) v2 = Vec3(-plane_x, plane_y + height, back_plane_z) uv2 = (1, 0) v3 = Vec3(-plane_x, plane_y + height, front_plane_z) uv3 = (0, 0) triangle = Triangle(v0, v1, v2, left_wall_matl, uv0, uv1, uv2) world.add(triangle) triangle = Triangle(v0, v2, v3, left_wall_matl, uv0, uv2, uv3) world.add(triangle) # back wall v0 = Vec3(-plane_x, plane_y, back_plane_z) uv0 = (0, 1) v1 = Vec3(-plane_x, plane_y + height, back_plane_z) uv1 = (0, 0) v2 = Vec3(plane_x, plane_y + height, back_plane_z) uv2 = (1, 0) v3 = Vec3(plane_x, plane_y, back_plane_z) uv3 = (1, 1) triangle = Triangle(v0, v1, v2, back_wall_matl, uv0, uv1, uv2) world.add(triangle) triangle = Triangle(v0, v2, v3, back_wall_matl, uv0, uv2, uv3) world.add(triangle) my_mesh = mesh.Mesh.from_file(stl_filename) if 'translate' in settings: settings['translate'][0] my_mesh.translate([settings['translate'][0], settings['translate'][1], settings['translate'][2]]) if 'rot_axis' in settings and settings['rot_axis'] is not None: rot_axis = settings['rot_axis'] rot_rads = settings['rot_rads'] my_mesh.rotate(rot_axis, rot_rads) stl_mesh = STLMesh(my_mesh, object_matl, name="mesh_1") print(f'stl_mesh {stl_filename} -- bbox={stl_mesh.bounding_box(None, None)}, num_triangles={stl_mesh.num_triangles}') world.add(stl_mesh) ambient = Vec3(0.5, 0.5, 0.5) # ambient = Vec3(0.3, 0.3, 0.3) background = SolidColor(Vec3(0.5, 0.7, 1.0)) light_1 = PointLight(pos=Vec3(-10.0, 100, 80), color=Vec3(0.2, 0.2, 0.2)) # disc_1 = Disc(center=Vec3(-15,110,20), normal=Vec3(-1,-20,-0.5), radius=5.0, material=diffuse_gray) # disc_1 = Disc(center=Vec3(-15,110,20), normal=Vec3(0,-1,0), radius=15.0, material=diffuse_gray) disc_1 = Disc(center=Vec3(-30,130,25), normal=Vec3(0.15, -1, 0.15), radius=8.0, material=diffuse_med_gray) light_2 = AreaLight(geom=disc_1, color=Vec3(0.5, 0.5, 0.5)) # lights = [light_1] lights = [light_2] # lights = [light_1, light_2] scene = Scene(world, ambient=ambient, lights=lights, background=background) # camera = Camera(look_from=Vec3(8.5, 4, 0), look_at=Vec3(0, 1, 0), vup=Vec3(0, 1, 0), vert_fov=25) # camera = Camera(look_from=Vec3(12, 3, 0), look_at=Vec3(0, 0.5, 0), vup=Vec3(0, 1, 0), vert_fov=25) look_from = settings['look_from'] look_at = settings['look_at'] camera = Camera(look_from=look_from, look_at=look_at, vup=Vec3(0, 1, 0), vert_fov=25) return {'scene': scene, 'camera': camera} def create_quad_world(settings=None): color_1 = SolidColor(Vec3(0.7, 0.3, 0.3)) color_2 = SolidColor(Vec3(0.8, 0.8, 0)) color_3 = SolidColor(Vec3(0.8,0.6,0.2)) image_1 = Image.open(Path("./textures/earthlights_dmsp_big.jpg")) image_texture_1 = ImageTexture(image_1, "earthlights") texture_1 = Lambertian(image_texture_1, name="texture_1") diffuse_1 = Lambertian(color_1, name="diffuse_1") diffuse_2 = Lambertian(color_2, name="diffuse_2") diffuse_3 = Lambertian(color_3, name="diffuse_3") # metal_1 = Metal(color_3, fuzziness=0.3, name="metal_1") # dielectric_1 = Dielectric(1.5, name="dielectric_1") world = GeometryList() v0 = Vec3(-1,0,0) v1 = Vec3(-1,1,0) v2 = Vec3(1,1,0) # world.add(Quad(v0,v1,v2, diffuse_1)) # XY world.add(Quad(v0,v1,v2, texture_1)) # XY v0 = Vec3(-1, 0, -1) # XZ v1 = Vec3(-1, 0, -2) v2 = Vec3(1, 0, -1) world.add(Quad(v0, v1, v2, diffuse_2)) v0 = Vec3(-0.75, 3, -0.75) v1 = Vec3(-1, 2, -0.35) v2 = Vec3(0.3, 2.75, -0.5) world.add(Quad(v0, v1, v2, diffuse_3)) ambient = Vec3(0.7, 0.7, 0.7) background = SolidColor(Vec3(0.5, 0.7, 1.0)) light_1 = PointLight(pos=Vec3(-1, 10, 0.35), color=Vec3(0.25, 0.25, 0.25)) lights = [light_1] scene = Scene(world, ambient=ambient, lights=lights, background=background) camera = Camera(look_from=Vec3(1.5, 3, 5), look_at=Vec3(0, 0, -1), vup=Vec3(0, 1, 0), vert_fov=20, aperature=0.0, # camera = Camera(look_from=Vec3(0, 30, 0), look_at=Vec3(0, 0, 0), vup=Vec3(-1, 0, 0), vert_fov=20, aperature=0.0, focus_dist=20) return {'scene': scene, 'camera': camera} def create_disc_test_world(settings=None): color_1 = SolidColor(Vec3(0.7, 0.3, 0.3)) color_2 = SolidColor(Vec3(0.8, 0.8, 0)) color_3 = SolidColor(Vec3(0.8,0.6,0.2)) # image_1 = Image.open(Path("./textures/earthlights_dmsp_big.jpg")) image_1 = Image.open(Path("./textures/george harrison (1 bit).bmp")) image_texture_1 = ImageTexture(image_1, "earthlights") diffuse_1 = Lambertian(color_1, name="diffuse_1") diffuse_2 = Lambertian(color_2, name="diffuse_2") diffuse_3 = Lambertian(image_texture_1, name="diffuse_3") metal_1 = Metal(color_3, fuzziness=0.0, name="metal_1") dielectric_1 = Dielectric(1.5, name="dielectric_1") world = GeometryList() world.add(Disc(center=Vec3(-1.5,1.5,0), normal=Vec3(0,0,1), radius=0.5, material=diffuse_1)) world.add(Disc(center=Vec3(1.5,1.5,0), normal=Vec3(0,0,1), radius=1.0, material=diffuse_2)) world.add(Disc(center=Vec3(0,1.0,0), normal=Vec3(0,-1,1), radius=0.75, material=diffuse_3)) world.add(Disc(center=Vec3(0,0,0), normal=Vec3(0,1,0), radius=5.0, material=metal_1)) ambient = Vec3(0.7, 0.7, 0.7) background = SolidColor(Vec3(0.5, 0.7, 1.0)) light_1 = PointLight(pos=Vec3(-1, 10, 0.35), color=Vec3(0.25, 0.25, 0.25)) lights = [light_1] scene = Scene(world, ambient=ambient, lights=lights, background=background) camera = Camera(look_from=Vec3(-0.5, 3, 10), look_at=Vec3(0, 1.0, 0), vup=Vec3(0, 1, 0), vert_fov=20, aperature=0.0, focus_dist=20) return {'scene': scene, 'camera': camera}
StarcoderdataPython
1609564
import hashlib from copy import deepcopy from secrets import token_bytes from ec import (G1FromBytes, G1Generator, G1Infinity, G2FromBytes, G2Generator, G2Infinity, JacobianPoint, default_ec, default_ec_twist, sign_Fq2, twist, untwist, y_for_x) from fields import Fq, Fq2, Fq6, Fq12 from hash_to_field import expand_message_xmd from hkdf import expand, extract from op_swu_g2 import g2_map from pairing import ate_pairing from private_key import PrivateKey from schemes import AugSchemeMPL, BasicSchemeMPL, PopSchemeMPL G1Element = JacobianPoint G2Element = JacobianPoint def test_hkdf(): def test_one_case( ikm_hex, salt_hex, info_hex, prk_expected_hex, okm_expected_hex, L ): prk = extract(bytes.fromhex(salt_hex), bytes.fromhex(ikm_hex)) okm = expand(L, prk, bytes.fromhex(info_hex)) assert len(bytes.fromhex(prk_expected_hex)) == 32 assert L == len(bytes.fromhex(okm_expected_hex)) assert prk == bytes.fromhex(prk_expected_hex) assert okm == bytes.fromhex(okm_expected_hex) test_case_1 = ( "0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b", "000102030405060708090a0b0c", "f0f1f2f3f4f5f6f7f8f9", "077709362c2e32df0ddc3f0dc47bba6390b6c73bb50f9c3122ec844ad7c2b3e5", "3cb25f25faacd57a90434f64d0362f2a2d2d0a90cf1a5a4c5db02d56ecc4c5bf34007208d5b887185865", 42, ) test_case_2 = ( "000102030405060708090a0b0c0d0e0f" "101112131415161718191a1b1c1d1e1f" "202122232425262728292a2b2c2d2e2f" "303132333435363738393a3b3c3d3e3f" "404142434445464748494a4b4c4d4e4f", "606162636465666768696a6b6c6d6e6f" "707172737475767778797a7b7c7d7e7f" "808182838485868788898a8b8c8d8e8f" "909192939495969798999a9b9c9d9e9f" "a0a1a2a3a4a5a6a7a8a9aaabacadaeaf", "b0b1b2b3b4b5b6b7b8b9babbbcbdbebf" "c0c1c2c3c4c5c6c7c8c9cacbcccdcecf" "d0d1d2d3d4d5d6d7d8d9dadbdcdddedf" "e0e1e2e3e4e5e6e7e8e9eaebecedeeef" "f0f1f2f3f4f5f6f7f8f9fafbfcfdfeff", "06a6b88c5853361a06104c9ceb35b45cef760014904671014a193f40c15fc244", "b11e398dc80327a1c8e7f78c596a4934" "4f012eda2d4efad8a050cc4c19afa97c" "59045a99cac7827271cb41c65e590e09" "da3275600c2f09b8367793a9aca3db71" "cc30c58179ec3e87c14c01d5c1f3434f" "1d87", 82, ) test_case_3 = ( "0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b", "", "", "<KEY>", "8da4e775a563c18f715f802a063c5a31b8a11f5c5ee1879ec3454e5f3c738d2d9d201395faa4b61a96c8", 42, ) test_case_4 = ( "<KEY>", "53d8e19b", "", "<KEY>", "<KEY>", 64, ) test_one_case(*test_case_1) test_one_case(*test_case_2) test_one_case(*test_case_3) test_one_case(*test_case_4) def test_eip2333(): def test_one_case(seed_hex, master_sk_hex, child_sk_hex, child_index): master = BasicSchemeMPL.key_gen(bytes.fromhex(seed_hex)) child = BasicSchemeMPL.derive_child_sk(master, child_index) assert len(bytes(master)) == 32 assert len(bytes(child)) == 32 assert bytes(master) == bytes.fromhex(master_sk_hex) assert bytes(child) == bytes.fromhex(child_sk_hex) test_case_1 = ( "3141592653589793238462643383279502884197169399375105820974944592", "4ff5e145590ed7b71e577bb04032396d1619ff41cb4e350053ed2dce8d1efd1c", "<KEY>", 3141592653, ) test_case_2 = ( "0099FF991111002299DD7744EE3355BBDD8844115566CC55663355668888CC00", "<KEY>", "<KEY>", 4294967295, ) test_case_3 = ( "<KEY>", "614d21b10c0e4996ac0608e0e7452d5720d95d20fe03c59a3321000a42432e1a", "08de7136e4afc56ae3ec03b20517d9c1232705a747f588fd17832f36ae337526", 42, ) test_case_intermediate = ( "c55257c360c07c72029aebc1b53c05ed0362ada38ead3e3e9efa3708e53495531f09a6987599d18264c1e1c92f2cf141630c7a3c4ab7c81b2f001698e7463b04", "<KEY>", "1a1de3346883401f1e3b2281be5774080edb8e5ebe6f776b0f7af9fea942553a", 0, ) test_one_case(*test_case_1) test_one_case(*test_case_2) test_one_case(*test_case_3) test_one_case(*test_case_intermediate) def test_fields(): a = Fq(17, 30) b = Fq(17, -18) c = Fq2(17, a, b) d = Fq2(17, a + a, Fq(17, -5)) e = c * d f = e * d assert f != e e_sq = e * e e_sqrt = e_sq.modsqrt() assert pow(e_sqrt, 2) == e_sq a2 = Fq( 172487123095712930573140951348, 3012492130751239573498573249085723940848571098237509182375, ) b2 = Fq(172487123095712930573140951348, 3432984572394572309458723045723849) c2 = Fq2(172487123095712930573140951348, a2, b2) assert b2 != c2 g = Fq6(17, c, d, d * d * c) h = Fq6(17, a + a * c, c * b * a, b * b * d * Fq(17, 21)) i = Fq12(17, g, h) assert ~(~i) == i assert (~(i.root)) * i.root == Fq6.one(17) x = Fq12(17, Fq6.zero(17), i.root) assert (~x) * x == Fq12.one(17) j = Fq6(17, a + a * c, Fq2.zero(17), Fq2.zero(17)) j2 = Fq6(17, a + a * c, Fq2.zero(17), Fq2.one(17)) assert j == (a + a * c) assert j2 != (a + a * c) assert j != j2 # Test frob_coeffs one = Fq(default_ec.q, 1) two = one + one a = Fq2(default_ec.q, two, two) b = Fq6(default_ec.q, a, a, a) c = Fq12(default_ec.q, b, b) for base in (a, b, c): for expo in range(1, base.extension): assert base.qi_power(expo) == pow(base, pow(default_ec.q, expo)) def test_ec(): q = default_ec.q g = G1Generator() assert g.is_on_curve() assert 2 * g == g + g assert (3 * g).is_on_curve() assert 3 * g == g + g + g g2 = G2Generator() assert g2.x * (Fq(q, 2) * g2.y) == Fq(q, 2) * (g2.x * g2.y) assert g2.is_on_curve() s = g2 + g2 assert untwist(twist(s.to_affine())) == s.to_affine() assert untwist(5 * twist(s.to_affine())) == (5 * s).to_affine() assert 5 * twist(s.to_affine()) == twist((5 * s).to_affine()) assert s.is_on_curve() assert g2.is_on_curve() assert g2 + g2 == 2 * g2 assert g2 * 5 == (g2 * 2) + (2 * g2) + g2 y = y_for_x(g2.x, default_ec_twist, Fq2) assert y == g2.y or -y == g2.y g_j = G1Generator() g2_j = G2Generator() g2_j2 = G2Generator() * 2 assert g.to_affine().to_jacobian() == g assert (g_j * 2).to_affine() == g.to_affine() * 2 assert (g2_j + g2_j2).to_affine() == g2.to_affine() * 3 def test_edge_case_sign_Fq2(): q = default_ec.q a = Fq(q, 62323) test_case_1 = Fq2(q, a, Fq(q, 0)) test_case_2 = Fq2(q, -a, Fq(q, 0)) assert sign_Fq2(test_case_1) != sign_Fq2(test_case_2) test_case_3 = Fq2(q, Fq(q, 0), a) test_case_4 = Fq2(q, Fq(q, 0), -a) assert sign_Fq2(test_case_3) != sign_Fq2(test_case_4) def test_xmd(): msg = token_bytes(48) dst = token_bytes(16) ress = {} for length in range(16, 8192): result = expand_message_xmd(msg, dst, length, hashlib.sha512) assert length == len(result) key = result[:16] ress[key] = ress.get(key, 0) + 1 assert all(x == 1 for x in ress.values()) def test_swu(): dst_1 = b"QUUX-V01-CS02-with-BLS12381G2_XMD:SHA-256_SSWU_RO_" msg_1 = b"abcdef0123456789" res = g2_map(msg_1, dst_1).to_affine() assert ( res.x[0].value == 0x121982811D2491FDE9BA7ED31EF9CA474F0E1501297F68C298E9F4C0028ADD35AEA8BB83D53C08CFC007C1E005723CD0 ) assert ( res.x[1].value == 0x190D119345B94FBD15497BCBA94ECF7DB2CBFD1E1FE7DA034D26CBBA169FB3968288B3FAFB265F9EBD380512A71C3F2C ) assert ( res.y[0].value == 0x05571A0F8D3C08D094576981F4A3B8EDA0A8E771FCDCC8ECCEAF1356A6ACF17574518ACB506E435B639353C2E14827C8 ) assert ( res.y[1].value == 0x0BB5E7572275C567462D91807DE765611490205A941A5A6AF3B1691BFE596C31225D3AABDF15FAFF860CB4EF17C7C3BE ) def test_elements(): i1 = int.from_bytes(bytes([1, 2]), byteorder="big") i2 = int.from_bytes(bytes([3, 1, 4, 1, 5, 9]), byteorder="big") b1 = i1 b2 = i2 g1 = G1Generator() g2 = G2Generator() u1 = G1Infinity() u2 = G2Infinity() x1 = g1 * b1 x2 = g1 * b2 y1 = g2 * b1 y2 = g2 * b2 # G1 assert x1 != x2 assert x1 * b1 == b1 * x1 assert x1 * b1 != x1 * b2 left = x1 + u1 right = x1 assert left == right assert x1 + x2 == x2 + x1 assert x1 + x1.negate() == u1 assert x1 == G1FromBytes(bytes(x1)) copy = deepcopy(x1) assert x1 == copy x1 += x2 assert x1 != copy # G2 assert y1 != y2 assert y1 * b1 == b1 * y1 assert y1 * b1 != y1 * b2 assert y1 + u2 == y1 assert y1 + y2 == y2 + y1 assert y1 + y1.negate() == u2 assert y1 == G2FromBytes(bytes(y1)) copy = deepcopy(y1) assert y1 == copy y1 += y2 assert y1 != copy # pairing operation pair = ate_pairing(x1, y1) assert pair != ate_pairing(x1, y2) assert pair != ate_pairing(x2, y1) copy = deepcopy(pair) assert pair == copy pair = None assert pair != copy sk = 728934712938472938472398074 pk = sk * g1 Hm = y2 * 12371928312 + y2 * 12903812903891023 sig = Hm * sk assert ate_pairing(g1, sig) == ate_pairing(pk, Hm) def test_achi_vectors_1(): seed1: bytes = bytes([0x00] * 32) seed2: bytes = bytes([0x01] * 32) msg1: bytes = bytes([7, 8, 9]) msg2: bytes = bytes([10, 11, 12]) sk1 = BasicSchemeMPL.key_gen(seed1) sk2 = BasicSchemeMPL.key_gen(seed2) assert ( bytes(sk1).hex() == "4a353be3dac091a0a7e640620372f5e1e2e4401717c1e79cac6ffba8f6905604" ) assert ( bytes(sk1.get_g1()).hex() == "<KEY>" ) sig1 = BasicSchemeMPL.sign(sk1, msg1) sig2 = BasicSchemeMPL.sign(sk2, msg2) assert ( bytes(sig1).hex() == "b8faa6d6a3881c9fdbad803b170d70ca5cbf1e6ba5a586262df368c75acd1d1ffa3ab6ee21c71f844494659878f5eb230c958dd576b08b8564aad2ee0992e85a1e565f299cd53a285de729937f70dc176a1f01432129bb2b94d3d5031f8065a1" ) assert bytes(sig2).hex() == ( "a9c4d3e689b82c7ec7e838dac2380cb014f9a08f6cd6ba044c263746e39a8f7a60ffee4afb7" "8f146c2e421360784d58f0029491e3bd8ab84f0011d258471ba4e87059de295d9aba845c044e" "e83f6cf2411efd379ef38bf4cf41d5f3c0ae1205d" ) agg_sig_1 = BasicSchemeMPL.aggregate([sig1, sig2]) assert bytes(agg_sig_1).hex() == ( "aee003c8cdaf3531b6b0ca354031b0819f7586b5846796615aee8108fec75ef838d181f9d24" "4a94d195d7b0231d4afcf06f27f0cc4d3c72162545c240de7d5034a7ef3a2a03c0159de982fb" "c2e7790aeb455e27beae91d64e077c70b5506dea3" ) assert BasicSchemeMPL.aggregate_verify( [sk1.get_g1(), sk2.get_g1()], [msg1, msg2], agg_sig_1 ) msg3: bytes = bytes([1, 2, 3]) msg4: bytes = bytes([1, 2, 3, 4]) msg5: bytes = bytes([1, 2]) sig3 = BasicSchemeMPL.sign(sk1, msg3) sig4 = BasicSchemeMPL.sign(sk1, msg4) sig5 = BasicSchemeMPL.sign(sk2, msg5) agg_sig_2 = BasicSchemeMPL.aggregate([sig3, sig4, sig5]) assert BasicSchemeMPL.aggregate_verify( [sk1.get_g1(), sk1.get_g1(), sk2.get_g1()], [msg3, msg4, msg5], agg_sig_2 ) assert bytes(agg_sig_2).hex() == ( "a0b1378d518bea4d1100adbc7bdbc4ff64f2c219ed6395cd36fe5d2aa44a4b8e710b607afd9" "65e505a5ac3283291b75413d09478ab4b5cfbafbeea366de2d0c0bcf61deddaa521f6020460f" "d547ab37659ae207968b545727beba0a3c5572b9c" ) def test_achi_vectors_3(): seed1: bytes = bytes([0x04] * 32) sk1 = PopSchemeMPL.key_gen(seed1) proof = PopSchemeMPL.pop_prove(sk1) assert ( bytes(proof).hex() == "84f709159435f0dc73b3e8bf6c78d85282d19231555a8ee3b6e2573aaf66872d9203fefa1ef" "700e34e7c3f3fb28210100558c6871c53f1ef6055b9f06b0d1abe22ad584ad3b957f3018a8f5" "8227c6c716b1e15791459850f2289168fa0cf9115" ) def test_pyecc_vectors(): ref_sig1Basic = b"\x96\xba4\xfa\xc3<\x7f\x12\x9d`*\x0b\xc8\xa3\xd4?\x9a\xbc\x01N\xce\xaa\xb75\x91F\xb4\xb1P\xe5{\x80\x86Es\x8f5g\x1e\x9e\x10\xe0\xd8b\xa3\x0c\xabp\x07N\xb5\x83\x1d\x13\xe6\xa5\xb1b\xd0\x1e\xeb\xe6\x87\xd0\x16J\xdb\xd0\xa8d7\n|\"*'h\xd7pM\xa2T\xf1\xbf\x18#f[\xc26\x1f\x9d\xd8\xc0\x0e\x99" ref_sig2Basic = b'\xa4\x02y\t2\x13\x0fvj\xf1\x1b\xa7\x16Sf\x83\xd8\xc4\xcf\xa5\x19G\xe4\xf9\x08\x1f\xed\xd6\x92\xd6\xdc\x0c\xac[\x90K\xee^\xa6\xe2Ui\xe3m{\xe4\xcaY\x06\x9a\x96\xe3K\x7fp\x07X\xb7\x16\xf9IJ\xaaY\xa9nt\xd1J;U*\x9ak\xc1)\xe7\x17\x19[\x9d`\x06\xfdm\\\xefGh\xc0"\xe0\xf71j\xbf' ref_sigABasic = b"\x98|\xfd;\xcdb(\x02\x87\x02t\x83\xf2\x9cU$^\xd81\xf5\x1d\xd6\xbd\x99\x9ao\xf1\xa1\xf1\xf1\xf0\xb6Gw\x8b\x01g5\x9cqPUX\xa7n\x15\x8ef\x18\x1e\xe5\x12Y\x05\xa6B$k\x01\xe7\xfa^\xe5=h\xa4\xfe\x9b\xfb)\xa8\xe2f\x01\xf0\xb9\xadW}\xdd\x18\x87js1|!n\xa6\x1fC\x04\x14\xecQ\xc5" ref_sig1Aug = b'\x81\x80\xf0,\xcbr\xe9"\xb1R\xfc\xed\xbe\x0e\x1d\x19R\x105Opp6X\xe8\xe0\x8c\xbe\xbf\x11\xd4\x97\x0e\xabj\xc3\xcc\xf7\x15\xf3\xfb\x87m\xf9\xa9yz\xbd\x0c\x1a\xf6\x1a\xae\xad\xc9,,\xfe\\\nV\xc1F\xcc\x8c?qQ\xa0s\xcf_\x16\xdf8$g$\xc4\xae\xd7?\xf3\x0e\xf5\xda\xa6\xaa\xca\xed\x1a&\xec\xaa3k' ref_sig2Aug = b'\x99\x11\x1e\xea\xfbA-\xa6\x1eL7\xd3\xe8\x06\xc6\xfdj\xc9\xf3\x87\x0eT\xda\x92"\xbaNIH"\xc5\xb7eg1\xfazdY4\xd0KU\x9e\x92a\xb8b\x01\xbb\xeeW\x05RP\xa4Y\xa2\xda\x10\xe5\x1f\x9c\x1aiA)\x7f\xfc]\x97\nUr6\xd0\xbd\xeb|\xf8\xff\x18\x80\x0b\x08c8q\xa0\xf0\xa7\xeaB\xf4t\x80' ref_sigAAug = b"\x8c]\x03\xf9\xda\xe7~\x19\xa5\x94Z\x06\xa2\x14\x83n\xdb\x8e\x03\xb8QR]\x84\xb9\xded@\xe6\x8f\xc0\xcas\x03\xee\xed9\r\x86<\x9bU\xa8\xcfmY\x14\n\x01\xb5\x88G\x88\x1e\xb5\xafgsMD\xb2UVF\xc6al9\xab\x88\xd2S)\x9a\xcc\x1e\xb1\xb1\x9d\xdb\x9b\xfc\xbev\xe2\x8a\xdd\xf6q\xd1\x16\xc0R\xbb\x18G" ref_sig1Pop = b"\x95P\xfbN\x7f~\x8c\xc4\xa9\x0b\xe8V\n\xb5\xa7\x98\xb0\xb20\x00\xb6\xa5J!\x17R\x02\x10\xf9\x86\xf3\xf2\x81\xb3v\xf2Y\xc0\xb7\x80b\xd1\xeb1\x92\xb3\xd9\xbb\x04\x9fY\xec\xc1\xb0:pI\xebf^\r\xf3d\x94\xaeL\xb5\xf1\x13l\xca\xee\xfc\x99X\xcb0\xc33==C\xf0qH\xc3\x86)\x9a{\x1b\xfc\r\xc5\xcf|" ref_sig2Pop = b"\xa6\x906\xbc\x11\xae^\xfc\xbfa\x80\xaf\xe3\x9a\xdd\xde~'s\x1e\xc4\x02W\xbf\xdc<7\xf1{\x8d\xf6\x83\x06\xa3N\xbd\x10\xe9\xe3*5%7P\xdf\\\x87\xc2\x14/\x82\x07\xe8\xd5eG\x12\xb4\xe5T\xf5\x85\xfbhF\xff8\x04\xe4)\xa9\xf8\xa1\xb4\xc5ku\xd0\x86\x9e\xd6u\x80\xd7\x89\x87\x0b\xab\xe2\xc7\xc8\xa9\xd5\x1e{*" ref_sigAPop = b"\xa4\xeat+\xcd\xc1U>\x9c\xa4\xe5`\xbe~^ln\xfajd\xdd\xdf\x9c\xa3\xbb(T#=\x85\xa6\xaa\xc1\xb7n\xc7\xd1\x03\xdbN3\x14\x8b\x82\xaf\x99#\xdb\x05\x93Jn\xce\x9aq\x01\xcd\x8a\x9dG\xce'\x97\x80V\xb0\xf5\x90\x00!\x81\x8cEi\x8a\xfd\xd6\xcf\x8ako\x7f\xee\x1f\x0bCqoU\xe4\x13\xd4\xb8z`9" secret1 = bytes([1] * 32) secret2 = bytes([x * 314159 % 256 for x in range(32)]) sk1 = PrivateKey.from_bytes(secret1) sk2 = PrivateKey.from_bytes(secret2) msg = bytes([3, 1, 4, 1, 5, 9]) sig1Basic = BasicSchemeMPL.sign(sk1, msg) sig2Basic = BasicSchemeMPL.sign(sk2, msg) sigABasic = BasicSchemeMPL.aggregate([sig1Basic, sig2Basic]) sig1Aug = AugSchemeMPL.sign(sk1, msg) sig2Aug = AugSchemeMPL.sign(sk2, msg) sigAAug = AugSchemeMPL.aggregate([sig1Aug, sig2Aug]) sig1Pop = PopSchemeMPL.sign(sk1, msg) sig2Pop = PopSchemeMPL.sign(sk2, msg) sigAPop = PopSchemeMPL.aggregate([sig1Pop, sig2Pop]) assert bytes(sig1Basic) == ref_sig1Basic print(bytes(sig1Basic).hex()) assert bytes(sig2Basic) == ref_sig2Basic assert bytes(sigABasic) == ref_sigABasic assert bytes(sig1Aug) == ref_sig1Aug assert bytes(sig2Aug) == ref_sig2Aug assert bytes(sigAAug) == ref_sigAAug assert bytes(sig1Pop) == ref_sig1Pop assert bytes(sig2Pop) == ref_sig2Pop assert bytes(sigAPop) == ref_sigAPop def test_vectors_invalid(): # Invalid inputs from https://github.com/algorand/bls_sigs_ref/blob/master/python-impl/serdesZ.py invalid_inputs_1 = [ # infinity points: too short "c000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", # infinity points: not all zeros "c00000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000000", # bad tags "3a0111ea397fe69a4b1ba7b6434bacd764774b84f38512bf6730d2a0f6b0f6241eabfffeb153ffffb9feffffffffaaaa", "<KEY>", "<KEY>", # wrong length for compresed point "<KEY>", "<KEY>", # invalid x-coord "<KEY>", # invalid elm of Fp --- equal to p (must be strictly less) "9a0111ea397fe69a4b1ba7b6434bacd764774b84f38512bf6730d2a0f6b0f6241eabfffeb153ffffb9feffffffffaaab", ] invalid_inputs_2 = [ # infinity points: too short "c000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", # infinity points: not all zeros "c00000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", "c00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000", # bad tags "3a0111ea397fe69a4b1ba7b6434bacd764774b84f38512bf6730d2a0f6b0f6241eabfffeb153ffffb9feffffffffaaaa000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", "7a0111ea397fe69a4b1ba7b6434bacd764774b84f38512bf6730d2a0f6b0f6241eabfffeb153ffffb9feffffffffaaaa000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", "fa0111ea397fe69a4b1ba7b6434bacd764774b84f38512bf6730d2a0f6b0f6241eabfffeb153ffffb9feffffffffaaaa000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", # wrong length for compressed point "9a0111ea397fe69a4b1ba7b6434bacd764774b84f38512bf6730d2a0f6b0f6241eabfffeb153ffffb9feffffffffaaaa0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", "9a0111ea397fe69a4b1ba7b6434bacd764774b84f38512bf6730d2a0f6b0f6241eabfffeb153ffffb9feffffffffaaaa00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", # invalid x-coord "<KEY>", # invalid elm of Fp --- equal to p (must be strictly less) "9a0111ea397fe69a4b1ba7b6434bacd764774b84f38512bf6730d2a0f6b0f6241eabfffeb153ffffb9feffffffffaaab000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", "<KEY>", ] for s in invalid_inputs_1: bytes_ = bytes.fromhex(s) try: g1 = G1FromBytes(bytes_) assert g1 is not None assert False, "Failed to disallow creation of G1 element." except Exception: pass for s in invalid_inputs_2: bytes_ = bytes.fromhex(s) try: g2 = G2FromBytes(bytes_) assert g2 is not None assert False, "Failed to disallow creation of G2 element." except Exception: pass def test_readme(): seed: bytes = bytes( [ 0, 50, 6, 244, 24, 199, 1, 25, 52, 88, 192, 19, 18, 12, 89, 6, 220, 18, 102, 58, 209, 82, 12, 62, 89, 110, 182, 9, 44, 20, 254, 22, ] ) sk: PrivateKey = AugSchemeMPL.key_gen(seed) pk: G1Element = sk.get_g1() message: bytes = bytes([1, 2, 3, 4, 5]) signature: G2Element = AugSchemeMPL.sign(sk, message) ok: bool = AugSchemeMPL.verify(pk, message, signature) assert ok sk_bytes: bytes = bytes(sk) # 32 bytes pk_bytes: bytes = bytes(pk) # 48 bytes signature_bytes: bytes = bytes(signature) # 96 bytes print(sk_bytes.hex(), pk_bytes.hex(), signature_bytes.hex()) sk = PrivateKey.from_bytes(sk_bytes) assert sk is not None pk = G1FromBytes(pk_bytes) assert pk is not None signature: G2Element = G2FromBytes(signature_bytes) seed = bytes([1]) + seed[1:] sk1: PrivateKey = AugSchemeMPL.key_gen(seed) seed = bytes([2]) + seed[1:] sk2: PrivateKey = AugSchemeMPL.key_gen(seed) message2: bytes = bytes([1, 2, 3, 4, 5, 6, 7]) pk1: G1Element = sk1.get_g1() sig1: G2Element = AugSchemeMPL.sign(sk1, message) pk2: G1Element = sk2.get_g1() sig2: G2Element = AugSchemeMPL.sign(sk2, message2) agg_sig: G2Element = AugSchemeMPL.aggregate([sig1, sig2]) ok = AugSchemeMPL.aggregate_verify([pk1, pk2], [message, message2], agg_sig) assert ok seed = bytes([3]) + seed[1:] sk3: PrivateKey = AugSchemeMPL.key_gen(seed) pk3: G1Element = sk3.get_g1() message3: bytes = bytes([100, 2, 254, 88, 90, 45, 23]) sig3: G2Element = AugSchemeMPL.sign(sk3, message3) agg_sig_final: G2Element = AugSchemeMPL.aggregate([agg_sig, sig3]) ok = AugSchemeMPL.aggregate_verify( [pk1, pk2, pk3], [message, message2, message3], agg_sig_final ) assert ok pop_sig1: G2Element = PopSchemeMPL.sign(sk1, message) pop_sig2: G2Element = PopSchemeMPL.sign(sk2, message) pop_sig3: G2Element = PopSchemeMPL.sign(sk3, message) pop1: G2Element = PopSchemeMPL.pop_prove(sk1) pop2: G2Element = PopSchemeMPL.pop_prove(sk2) pop3: G2Element = PopSchemeMPL.pop_prove(sk3) ok = PopSchemeMPL.pop_verify(pk1, pop1) assert ok ok = PopSchemeMPL.pop_verify(pk2, pop2) assert ok ok = PopSchemeMPL.pop_verify(pk3, pop3) assert ok pop_sig_agg: G2Element = PopSchemeMPL.aggregate([pop_sig1, pop_sig2, pop_sig3]) ok = PopSchemeMPL.fast_aggregate_verify([pk1, pk2, pk3], message, pop_sig_agg) assert ok pop_agg_pk: G1Element = pk1 + pk2 + pk3 ok = PopSchemeMPL.verify(pop_agg_pk, message, pop_sig_agg) assert ok pop_agg_sk: PrivateKey = PrivateKey.aggregate([sk1, sk2, sk3]) ok = PopSchemeMPL.sign(pop_agg_sk, message) == pop_sig_agg assert ok master_sk: PrivateKey = AugSchemeMPL.key_gen(seed) child: PrivateKey = AugSchemeMPL.derive_child_sk(master_sk, 152) grandchild: PrivateKey = AugSchemeMPL.derive_child_sk(child, 952) assert grandchild is not None master_pk: G1Element = master_sk.get_g1() child_u: PrivateKey = AugSchemeMPL.derive_child_sk_unhardened(master_sk, 22) grandchild_u: PrivateKey = AugSchemeMPL.derive_child_sk_unhardened(child_u, 0) child_u_pk: G1Element = AugSchemeMPL.derive_child_pk_unhardened(master_pk, 22) grandchild_u_pk: G1Element = AugSchemeMPL.derive_child_pk_unhardened(child_u_pk, 0) ok = grandchild_u_pk == grandchild_u.get_g1() assert ok test_hkdf() test_eip2333() test_fields() test_ec() test_xmd() test_swu() test_edge_case_sign_Fq2() test_elements() test_achi_vectors_1() test_achi_vectors_3() test_pyecc_vectors() test_vectors_invalid() test_readme()
StarcoderdataPython
1637071
<filename>groundworks/views.py # -*- coding: utf-8 -*- from __future__ import unicode_literals from django.views.generic.base import TemplateView from groundworks.response import ( TemplateBadRequestResponse, TemplateForbiddenResponse, TemplateNotFoundResponse, TemplateServerErrorResponse ) class BadRequestView(TemplateView): """ A class based view for use as a handler400. """ template_name = "400.html" response_class = TemplateBadRequestResponse class ForbiddenView(TemplateView): """ A class based view for use as a handler402. """ template_name = "402.html" response_class = TemplateForbiddenResponse class NotFoundView(TemplateView): """ A class based view for use as a handler404. """ template_name = "404.html" response_class = TemplateNotFoundResponse class ServerErrorView(TemplateView): """ A class based view for use as a handler500. """ template_name = "500.html" response_class = TemplateServerErrorResponse
StarcoderdataPython
1654919
from django.contrib import admin from .models import HeroInfo, BookInfo class BookInfoAdmin(admin.ModelAdmin): list_display = ['id', 'btitle', 'bpub_date', 'bread', 'bcomment'] class HeroInfoAdmin(admin.ModelAdmin): list_display = ['id', 'hname', 'hgender', 'hcomment', 'hbook_id'] # Register your models here. admin.site.register(BookInfo, BookInfoAdmin) admin.site.register(HeroInfo, HeroInfoAdmin)
StarcoderdataPython
1720270
import numpy as np def normalize(v): norm = np.sqrt((v**2).sum()) if norm == 0: return v return v / norm
StarcoderdataPython
1658192
#!/usr/bin/python3 import os, platform from kivy.app import App from numpy.core.defchararray import count if platform.system() == "Linux" or platform.system() == "Darwin": os.environ["KIVY_VIDEO"] = "ffpyplayer" from pysimbotlib.core import PySimbotApp, Robot, Simbot from kivy.logger import Logger from kivy.config import Config import numpy as np import time # Force the program to show user's log only for "info" level or more. The info log will be disabled. Config.set('kivy', 'log_level', 'info') # START POINT START_POINT = (20, 560) # update robot every 0.5 seconds (2 frames per sec) REFRESH_INTERVAL = 1/15 count = 1 rotage_var = 1 count_struck = 0 class MyRobot(Robot): def __init__(self): super(MyRobot, self).__init__() self.pos = START_POINT def update(self): global count, rotage_var, count_struck ''' Update method which will be called each frame ''' self.ir_values = self.distance() self.target = self.smell() Logger.info("Distance: {0}".format(self.distance())) Logger.info("Stuck: {0}".format(self.stuck)) # initial list of rules rules = list() turns = list() moves = list() # __move forward when front is far__ rules.append(self.far_ir(0) * self.far_ir(-2) * self.far_ir(2)) moves.append(15) turns.append(0) rules.append(self.far_ir(0)* np.min([self.near_ir(-2),self.near_ir(2)])) moves.append(8) turns.append(0) rules.append(self.near_ir(0)) moves.append(3) turns.append(0) # __move back for safety__ rules.append(self.near_ir(0)) moves.append(-5) turns.append(0) rules.append(self.near_ir(0) * self.near_ir(-1) * self.near_ir(1)) moves.append(-8) turns.append(5) rules.append(self.stuck * np.max([self.mid_ir(0), self.mid_ir(1),self.mid_ir(-1)]) ) moves.append(0) turns.append(180) # back sensor rules.append(self.near_ir(3) * self.near_ir(4) * self.near_ir(5) * self.far_ir(0)) moves.append(3) turns.append(0) # right sensor rules.append(self.near_ir(-2) * self.far_ir(2)) moves.append(3) turns.append(20) rules.append(self.near_ir(-1) * self.far_ir(1)) moves.append(3) turns.append(10) # left sensor rules.append(self.far_ir(-2) * self.near_ir(2)) moves.append(3) turns.append(-20) rules.append(self.far_ir(-1) * self.near_ir(1)) moves.append(3) turns.append(-10) # __________FOOD RULE__________ # smell center move rules.append(self.smell_center() * self.far_ir(0) ) moves.append(10) turns.append(0) rules.append(self.smell_center() * np.min([self.far_ir(-2), self.far_ir(2), self.near_ir(0) ])) moves.append(-20) turns.append(10) rules.append(self.smell_left() * self.far_ir(0) * np.min([self.far_ir(-2), self.far_ir(2)])) moves.append(0) turns.append(-12) rules.append(self.smell_right() * self.far_ir(0) * np.min([self.far_ir(-2), self.far_ir(2)])) moves.append(0) turns.append(15) rules.append(self.smell_back() * self.far_ir(4) * self.far_ir(0)) moves.append(0) turns.append(180) # ________FIX STRUCK_______ if count % 20 == 0: rules.append(0.5) moves.append(-3) turns.append(10) # if count % 120 == 0: # rules.append(1.0) # moves.append(-5) # turns.append(-120) # turns.append(10*rotage_var) # count_struck += 1 # elif count_struck >= 30: # rotage_var = -1 # elif count_struck >= 50: # rotage_var = 2 # count_struck = 0 ans_turn = 0.0 ans_move = 0.0 for r, t, m in zip(rules, turns, moves): ans_turn += t * r ans_move += m * r self.turn(int(ans_turn)) self.move(int(ans_move)) count += 1 def near_ir(self, x): self.ir = self.ir_values[x] if x%2 == 0: self.x1 = 8.0 self.x2 = 40.0 else: self.x1 = 14.0 self.x2 = 30.0 if self.ir <= self.x1: return 1.0 elif self.ir >= self.x2: return 0.0 else: return (self.ir-self.x1)/(self.x2-self.x1) def mid_ir(self, x): self.ir = self.ir_values[x] self.x1 = 10.0 self.x2 = 20.0 self.x3 = 40.0 if self.ir <= self.x1: return 0.0 elif self.ir > self.x1 and self.ir < self.x2: return (self.ir-self.x1)/(self.x2-self.x1) elif self.ir >= self.x2 and self.ir < self.x3: return 1.0 - (self.ir-self.x2)/(self.x3-self.x2) else: return 0.0 def far_ir(self, x): return 1.0 - self.near_ir(x) def smell_right(self): target = self.smell() LB = 5 RB = 135 if target <= LB: return 0.0 elif target >= RB: return 1.0 else: return abs((RB-target) / (RB-LB)) def smell_center(self): target = self.smell() LB = -15 MB = 0 RB = 15 if target >= LB: return 0.0 elif target == MB: return 1.0 elif target <= RB: return 0.0 elif target>MB and target < RB: return abs((target-MB) / (RB-MB)) elif target<MB and target > LB: return abs((target-MB) / (LB-MB)) def smell_left(self): target = self.smell() LB = -135 RB = -5 if target <= LB: return 1.0 elif target >= RB: return 0.0 else: return abs((target-RB) / (LB-RB)) def smell_back(self): target = self.smell() LB = -135 MB = 180 RB = 135 if target >= LB: return 0.0 elif target == MB: return 1.0 elif target <= RB: return 0.0 elif target>MB and target < RB: return abs((target-MB) / (RB-MB)) elif target<MB and target > LB: return abs((target-MB) / (LB-MB)) if __name__ == '__main__': app = PySimbotApp(map="default", robot_cls=MyRobot, num_robots=1, interval=REFRESH_INTERVAL, enable_wasd_control=True,save_wasd_history=True) app.run()
StarcoderdataPython
1637211
<filename>VerticalSlices2.py import numpy as np import matplotlib.pyplot as plt import matplotlib.tri as tri import matplotlib.mlab as mlab from scipy.interpolate import griddata caseName = 'ALM_N_H' # './ABL_N_H/Slices/20000.9038025/U_alongWind_Slice.raw' data = np.genfromtxt('I:/SOWFA Data/' + caseName + '/Slices/20500.9078025/U_alongWind_Slice.raw', skip_header = 2) x, y, z = data[:, 0], data[:, 1], data[:, 2] pointsXZ = np.vstack((data[:, 0], data[:, 2])).T u, v, w = data[:, 3], data[:, 4], data[:, 5] # U UmagSlice = np.zeros((data.shape[0], 1)) for i, row in enumerate(data): if np.nan in row: print(row) UmagSlice[i] = np.sqrt(row[3]**2 + row[4]**2 + row[5]**2) grid_x, grid_z = np.mgrid[x.min():x.max():1500j, z.min():z.max():500j] grid_y, _ = np.mgrid[y.min():y.max():1500j, z.min():z.max():500j] Uinterp = griddata(pointsXZ, UmagSlice.ravel(), (grid_x, grid_z), method = 'cubic') from PlottingTool import Plot2D_InsetZoom, PlotSurfaceSlices3D # myplot = Plot2D_InsetZoom(grid_x, grid_z, zoomBox = (1000, 2500, 0, 500), z2D = Uinterp, equalAxis = True, name = caseName + '_slice', figDir = 'R:/') # # myplot.initializeFigure() # myplot.plotFigure(contourLvl = 100) # myplot.finalizeFigure() myplot2 = PlotSurfaceSlices3D(grid_x, grid_y, grid_z, Uinterp, name = caseName + '_3d', figDir = 'R:/', xLim = (0, 3000), yLim = (0, 3000), zLim = (0, 1000), show = False, xLabel = r'\textit{x} [m]', yLabel = r'\textit{z} [m]', zLabel = r'\textit{U} [m/s]') myplot2.initializeFigure() myplot2.plotFigure() myplot2.finalizeFigure() # from PlottingTool import plot2D, plot2DWithInsetZoom # # plot2D(grid_x, grid_z, z2D = Uinterp, contourLvl = 10, equalAxis = True) # plot2DWithInsetZoom(grid_x, grid_z, zoomBox = (1000, 2500, 0, 500), z2D = Uinterp, contourLvl = 100, equalAxis = True, name = caseName, xLabel = r'\textit{x} [m]', yLabel = r'\textit{z} [m]', zLabel = r'\textit{U} [m/s]') # # # fig, ax = plt.subplots(1, 1, num = 'asf') # # plt.contourf(grid_x, grid_z, Uinterp, 100, extend = 'both') # # # plt.axis('equal') # # ax.set_aspect('equal', 'box') # # plt.xlim((0, 3000)) # # plt.ylim((0, 1000)) # # plt.tight_layout() # # # plt.figure() # # triang = tri.Triangulation(x, z) # # plt.tricontourf(x, z, UmagSlice.ravel(), 20) # # plt.colorbar() # # plt.axis('equal')
StarcoderdataPython
94100
<reponame>kwinter213/TeamDayDream import cv2 import numpy import time cap = cv2.VideoCapture(0) while(True): # Capture frame-by-frame ret, frame = cap.read() #gets the frame #orangeLower=numpy.array([5, 50, 150], dtype="uint8") #uint8 necessary for this kind of thing #orangeUpper=numpy.array([100, 200, 255], dtype= "uint8") #represents upper and lower bounds of the color "orange" blackLower=numpy.array([0,0,0]) #black blackUpper=numpy.array([50, 50, 50]) cv2.imshow('live feed', frame) mask=cv2.inRange(frame, blackLower,blackUpper) #creates mask of all the red pixels output=cv2.bitwise_and(frame,frame,mask=mask) #maps mask onto image #getting rid of false negatives and other outliers/smoothing over larger objects output = cv2.cvtColor(output, cv2.COLOR_BGR2HSV) output = cv2.erode(output, None, iterations=2) output = cv2.dilate(output, None, iterations=2) cv2.imshow('dilated', output) #conversion to find contours blurred = cv2.GaussianBlur(output, (7, 7), 0) edged = cv2.Canny(blurred, 50, 150) #imgray = cv2.cvtColor(edged, cv2.COLOR_BGR2GRAY) #ret, thresh= cv2.threshold(imgray,127,255,0) # find contours in the edge map contours, hierarchy= cv2.findContours(edged,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE) #drawing said contours (for debugging, not final) #cv2.drawContours(output, contours, -1, (255,0,0),-1) #returning/drawing the biggest rectangle minx=800 maxx=0 miny=800 maxy=0 for cnt in contours: x, y, w, h= cv2.boundingRect(cnt) if minx>x: minx=x if maxx<x+w: maxx=x+w if miny>y: miny=y if maxy<y+h: maxy=y+h cv2.rectangle(output, (minx,miny),(maxx-minx,maxy-miny),(0,255,0),2) #rect = cv2.minAreaRect(contours) #box = cv2.boxPoints(rect) #box = np.int0(box) #cv2.drawContours(img,[box],0,(0,0,255),2) ###Detection part cv2.imshow('Final with drawn rectangles', output) #display filtered out thing for row in output: for pixel in row: if pixel is not [0,0,0]: #print "detected!" pass # Display the resulting framegit pu if cv2.waitKey(1) & 0xFF == ord('q'): break # When everything done, release the capture cap.release() cv2.destroyAllWindows()
StarcoderdataPython
3295623
__version__ = "0.7.2+snapshot"
StarcoderdataPython
3300496
<gh_stars>0 #---------------------------------------------# # # Mailer will queue up emails, Try to send them # and keep track of if they are sent or not. # Should be executed with a cron job. # #---------------------------------------------# from django.utils.encoding import python_2_unicode_compatible from django.utils.translation import ugettext_lazy as _ import datetime import logging import os logger = logging.getLogger(__name__) from django.utils import timezone from django.db import models from django.core.mail import EmailMultiAlternatives from django.db.models.signals import post_save from django.dispatch import receiver from django.conf import settings from . import defaults from .utils import get_storage class MailerMessageManager(models.Manager): def send_queued(self, limit=None): if limit is None: limit = getattr(settings, 'MAILQUEUE_LIMIT', defaults.MAILQUEUE_LIMIT) for email in self.filter(sent=False)[:limit]: email.send_mail() def clear_sent_messages(self, offset=None): """ Deletes sent MailerMessage records """ if offset is None: offset = getattr(settings, 'MAILQUEUE_CLEAR_OFFSET', defaults.MAILQUEUE_CLEAR_OFFSET) if type(offset) is int: offset = datetime.timedelta(hours=offset) delete_before = timezone.now() - offset self.filter(sent=True, last_attempt__lte=delete_before).delete() @python_2_unicode_compatible class MailerMessage(models.Model): subject = models.CharField(_('Subject'), max_length=250, blank=True) to_address = models.TextField(_('To')) bcc_address = models.TextField(_('BCC'), blank=True) from_address = models.EmailField(_('From'), max_length=250) content = models.TextField(_('Content'), blank=True) html_content = models.TextField(_('HTML Content'), blank=True) app = models.CharField(_('App'), max_length=250, blank=True) sent = models.BooleanField(_('Sent'), default=False, editable=False) last_attempt = models.DateTimeField(_('Last attempt'), auto_now=False, auto_now_add=False, blank=True, null=True, editable=False) objects = MailerMessageManager() class Meta: verbose_name = _('Message') verbose_name_plural = _('Messages') def __str__(self): return self.subject def add_attachment(self, attachment): """ Takes a Django `File` object and creates an attachment for this mailer message. """ if self.pk is None: self._save_without_sending() Attachment.objects.create(email=self, file_attachment=attachment) def _save_without_sending(self, *args, **kwargs): """ Saves the MailerMessage instance without sending the e-mail. This ensures other models (e.g. `Attachment`) have something to relate to in the database. """ self.do_not_send = True super(MailerMessage, self).save(*args, **kwargs) def send_mail(self): """ Public api to send mail. Makes the determinination of using celery or not and then calls the appropriate methods. """ if getattr(settings, 'MAILQUEUE_CELERY', defaults.MAILQUEUE_CELERY): from mailqueue.tasks import send_mail send_mail.delay(self.pk) else: self._send() def _send(self): if not self.sent: self.last_attempt = timezone.now() subject, from_email = self.subject, self.from_address text_content = self.content msg = EmailMultiAlternatives(subject, text_content, from_email) if self.html_content: html_content = self.html_content msg.attach_alternative(html_content, "text/html") msg.to = [email.strip() for email in self.to_address.split(',') if email.strip()] msg.bcc = [email.strip() for email in self.bcc_address.split(',') if email.strip()] # Add any additional attachments for attachment in self.attachment_set.all(): msg.attach_file(os.path.join(settings.MEDIA_ROOT, attachment.file_attachment.name)) try: msg.send() self.sent = True except Exception as e: self.do_not_send = True logger.error('Mail Queue Exception: {0}'.format(e)) self.save() @python_2_unicode_compatible class Attachment(models.Model): file_attachment = models.FileField(storage=get_storage(), upload_to='mail-queue/attachments', blank=True, null=True) email = models.ForeignKey(MailerMessage, blank=True, null=True) class Meta: verbose_name = _('Attachment') verbose_name_plural = _('Attachments') def __str__(self): return self.file_attachment.name @receiver(post_save, sender=MailerMessage) def send_post_save(sender, instance, signal, *args, **kwargs): if getattr(instance, "do_not_send", False): instance.do_not_send = False return if not getattr(settings, 'MAILQUEUE_QUEUE_UP', defaults.MAILQUEUE_QUEUE_UP): # If mail queue up is set, wait for the cron or management command # to send any email. instance.send_mail()
StarcoderdataPython
1762479
<filename>src/common.py import discord #the client itself client = discord.Client()
StarcoderdataPython
1660281
<reponame>ryotaro/neural_style_transfer from torch import unsqueeze from torch.nn import Parameter from .container import ImageContainer class ImageParameter(ImageContainer): def __init__(self, path, imsize=256): super().__init__(path, imsize) self.parameter = Parameter(self.variable.data) def feed(self, net): return net(self.parameter) def clamp_(self): self.parameter.data.clamp_(0, 1)
StarcoderdataPython
3259666
<gh_stars>1-10 import pandas as pd import os import urllib.request import sys utils_path = os.path.join(os.path.abspath(os.getenv('PROCESSING_DIR')),'utils') if utils_path not in sys.path: sys.path.append(utils_path) import util_files import util_cloud import util_carto from zipfile import ZipFile import logging import datetime # Set up logging # Get the top-level logger object logger = logging.getLogger() for handler in logger.handlers: logger.removeHandler(handler) logger.setLevel(logging.INFO) # make it print to the console. console = logging.StreamHandler() logger.addHandler(console) logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s') # name of table on Carto where you want to upload data # this should be a table name that is not currently in use dataset_name = 'com_028_rw1_effect_of_ag_prices_on_commodity_prices' logger.info('Executing script for dataset: ' + dataset_name) # create a new sub-directory within your specified dir called 'data' # within this directory, create files to store raw and processed data data_dir = util_files.prep_dirs(dataset_name) ''' Download data and save to your data directory ''' logger.info('Downloading raw data') # insert the url used to download the data from the source website url = 'http://www.ag-incentives.org/sites/default/files/data/AgIncentivesNRP.zip' # download the data from the source raw_data_file = os.path.join(data_dir, 'AgIncentivesNRP.zip') urllib.request.urlretrieve(url, raw_data_file) # unzip source data raw_data_file_unzipped = raw_data_file.split('.')[0] zip_ref = ZipFile(raw_data_file, 'r') zip_ref.extractall(raw_data_file_unzipped) zip_ref.close() ''' Process data ''' # read the data into a pandas dataframe df = pd.read_csv(os.path.join(raw_data_file_unzipped, 'AgIncentivesNRP.csv')) # convert the column names to lowercase to match the column name requirements of Carto df.columns = [x.lower() for x in df.columns] # convert the years in the 'year' column to datetime objects and store them in a new column 'datetime' df['datetime'] = [datetime.datetime(x, 1, 1) for x in df.year] # extract subset to only retain 'COUNTRY_TOTAL' (aggregate of all products at country level) in the 'category' column df = df.loc[df['category'] == 'COUNTRY_TOTAL'] # remove column 'notes' since it only contains indexes instead of actual data # remove column 'productcode' since it contains the same information as the column 'productname' # remove column 'source' since it contains the same information as the column 'sourceversion' df = df.drop(columns = ['notes','productcode','source']) # save dataset to csv processed_data_file = os.path.join(data_dir, dataset_name+'_edit.csv') df.to_csv(processed_data_file, index=False) ''' Upload processed data to Carto ''' logger.info('Uploading processed data to Carto.') util_carto.upload_to_carto(processed_data_file, 'LINK') ''' Upload original data and processed data to Amazon S3 storage ''' # initialize AWS variables aws_bucket = 'wri-public-data' s3_prefix = 'resourcewatch/' logger.info('Uploading original data to S3.') # Upload raw data file to S3 # Copy the raw data into a zipped file to upload to S3 raw_data_dir = os.path.join(data_dir, dataset_name+'.zip') with ZipFile(raw_data_dir,'w') as zip: zip.write(raw_data_file, os.path.basename(raw_data_file)) # Upload raw data file to S3 uploaded = util_cloud.aws_upload(raw_data_dir, aws_bucket, s3_prefix+os.path.basename(raw_data_dir)) logger.info('Uploading processed data to S3.') # Copy the processed data into a zipped file to upload to S3 processed_data_dir = os.path.join(data_dir, dataset_name+'_edit.zip') with ZipFile(processed_data_dir,'w') as zip: zip.write(processed_data_file, os.path.basename(processed_data_file)) # Upload processed data file to S3 uploaded = util_cloud.aws_upload(processed_data_dir, aws_bucket, s3_prefix+os.path.basename(processed_data_dir))
StarcoderdataPython
3230554
<reponame>jasonadu/Python-2.5 from test import test_support import StringIO # SF bug 480215: softspace confused in nested print f = StringIO.StringIO() class C: def __str__(self): print >> f, 'a' return 'b' print >> f, C(), 'c ', 'd\t', 'e' print >> f, 'f', 'g' # In 2.2 & earlier, this printed ' a\nbc d\te\nf g\n' test_support.vereq(f.getvalue(), 'a\nb c d\te\nf g\n')
StarcoderdataPython
13228
<reponame>stinvi/dava.engine import os import shutil import build_utils def get_supported_targets(platform): if platform == 'win32': return ['win32'] elif platform == 'darwin': return ['macos'] elif platform == 'linux': return ['linux'] else: return [] def get_dependencies_for_target(target): if target == 'win32': return ['zlib'] else: return [] def build_for_target(target, working_directory_path, root_project_path): if target == 'win32': _build_win32(working_directory_path, root_project_path) elif target == 'macos': _build_macos(working_directory_path, root_project_path) elif target == 'linux': _build_linux(working_directory_path, root_project_path) def get_download_info(): return 'https://sourceforge.net/projects/libpsd/files/libpsd/0.9/libpsd-0.9.zip' def _download_and_extract(working_directory_path): source_folder_path = os.path.join(working_directory_path, 'libpsd_source') url = get_download_info() build_utils.download_and_extract( url, working_directory_path, source_folder_path, build_utils.get_url_file_name_no_ext(url)) return source_folder_path @build_utils.run_once def _patch_sources(source_folder_path, working_directory_path): build_utils.apply_patch( os.path.abspath('patch_v0.9.diff'), working_directory_path) shutil.copyfile( 'CMakeLists.txt', os.path.join(source_folder_path, 'CMakeLists.txt')) def _build_win32(working_directory_path, root_project_path): source_folder_path = _download_and_extract(working_directory_path) _patch_sources(source_folder_path, working_directory_path) cmake_flags = ['-DZLIB_INCLUDE_DIR=' + os.path.join(working_directory_path, '../zlib/zlib_source/')] build_utils.build_and_copy_libraries_win32_cmake( os.path.join(working_directory_path, 'gen'), source_folder_path, root_project_path, 'psd.sln', 'psd', 'psd.lib', 'psd.lib', 'libpsd.lib', 'libpsd.lib', 'libpsd.lib', 'libpsd.lib', cmake_flags, static_runtime=False) _copy_headers(source_folder_path, root_project_path) def _build_macos(working_directory_path, root_project_path): source_folder_path = _download_and_extract(working_directory_path) _patch_sources(source_folder_path, working_directory_path) build_utils.build_and_copy_libraries_macos_cmake( os.path.join(working_directory_path, 'gen'), source_folder_path, root_project_path, 'psd.xcodeproj', 'psd', 'libpsd.a', 'libpsd.a') _copy_headers(source_folder_path, root_project_path) def _build_linux(working_directory_path, root_project_path): source_folder_path = _download_and_extract(working_directory_path) _patch_sources(source_folder_path, working_directory_path) build_utils.build_and_copy_libraries_linux_cmake( gen_folder_path=os.path.join(working_directory_path, 'gen'), source_folder_path=source_folder_path, root_project_path=root_project_path, target="all", lib_name='libpsd.a') _copy_headers(source_folder_path, root_project_path) def _copy_headers(source_folder_path, root_project_path): include_path = os.path.join(root_project_path, 'Libs/include/libpsd') build_utils.copy_files_by_name( os.path.join(source_folder_path, 'include'), include_path, ['libpsd.h', 'psd_color.h', 'psd_types.h'])
StarcoderdataPython
1711187
# Generated by Django 4.0.1 on 2022-01-23 21:46 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('utils', '0014_constellation_historical_image'), ] operations = [ migrations.AddField( model_name='objecttype', name='map_symbol_type', field=models.CharField(choices=[('Marker', 'marker'), ('Ellipse', 'ellipse'), ('Open Circle', 'open-circle'), ('Gray Circle', 'gray circle'), ('Circle in Square', 'circle-in-square'), ('Open Square', 'square'), ('Gray Square', 'gray-square'), ('Circle in Gray Square', 'circle-in-gray-square')], default='marker', max_length=30, verbose_name='Map Symbol Type'), ), ]
StarcoderdataPython
1708108
<gh_stars>1-10 from ujson import loads as load_json from websocket import create_connection from ..base import _wrap from ...base import StreamNone, StreamEnd from ...thread import run def WebSocket(url, *args, **kwargs): return AsyncWebSocket(url, *args, **kwargs) def AsyncWebSocket(url, json=False, wrap=False): async def _listen(url, json, wrap): ws = create_connection(url) for x in run(ws.recv): if isinstance(x, StreamNone): continue elif not x or isinstance(x, StreamEnd): break if json: x = load_json(x) if wrap: x = [x] yield x return _wrap(_listen, dict(url=url, json=json, wrap=wrap), name='WebSocket')
StarcoderdataPython
47750
def action(ctx): return 'move left'
StarcoderdataPython
1750976
import csv import pandas as pd import geopandas as gpd import dash_html_components as html def seismic_reporting_data(occurence): ''' Seismic Reporting Function Collects the additional features of the disaster like 'Tsunami Alerts', 'Danger Alerts' Depends on the occurence type that is being chosen Parameter : `occurence` Return : `pandas DataFrame` ''' # geojson file reading eq_geojson = 'https://earthquake.usgs.gov/earthquakes/feed/v1.0/summary/' + str(occurence) +'.geojson' eq_geodata = gpd.read_file(eq_geojson) # conversion of geojson to pandas dataframe eq_geocsv = pd.DataFrame(eq_geodata) # take longitude geometric value longitude = eq_geodata['geometry'].x.tolist() # take latitude geometric value latitude = eq_geodata['geometry'].y.tolist() eq_geocsv['latitude'] = latitude eq_geocsv['longitude'] = longitude eq_geocsv.to_csv('additional_geojson.csv') data_quake = pd.read_csv('additional_geojson.csv', index_col=0) return data_quake def get_all_felts(occurence, mag_value, region_name): ''' Felt Reports Parameter : `occurence`, `mag_value`, `region_name` Return : `list - [ (), (), () ]` ''' data_quakes = seismic_reporting_data(occurence) felt_specific = data_quakes[['title', 'mag', 'felt']] felt_specific = felt_specific[felt_specific['mag'] > mag_value] felt_specific.dropna(inplace=True) felt_specific = felt_specific.sort_values(by='felt', ascending=False) title_felt = list(zip(felt_specific['title'], felt_specific['felt'])) region_felt = [regionf for regionf in title_felt if region_name in regionf[0]] if region_name == 'Worldwide': return title_felt else: return region_felt def get_all_alerts(occurence, mag_value, region_name): ''' Alert Reports Parameter : `occurence`, `mag_value`, `region_name` Return : `list - [ (), (), () ]` ''' data_quakes = seismic_reporting_data(occurence) alert_specific = data_quakes[['title', 'mag', 'alert']] alert_specific = alert_specific[alert_specific['mag'] > mag_value] alert_specific.dropna(inplace=True) alert_specific = alert_specific.sort_values(by='alert', ascending=False) title_alert = list(zip(alert_specific['title'], alert_specific['alert'])) region_alert = [regiona for regiona in title_alert if region_name in regiona[0]] if region_name == 'Worldwide': return title_alert else: return region_alert def get_all_tsunamis(occurence, mag_value, region_name): ''' Tsunami Reports Parameter : `occurence`, `mag_value`, `region_name` Return : `list - [ (), (), () ]` ''' data_quakes = seismic_reporting_data(occurence) tsunami_specific = data_quakes[['title', 'mag', 'tsunami']] tsunami_specific = tsunami_specific[tsunami_specific['mag'] > mag_value] tsunami_event = tsunami_specific[tsunami_specific['tsunami'] > 0] tsunami_event = tsunami_event.sort_values(by='mag', ascending=False) title_tsunami = list(zip(tsunami_event['title'], tsunami_event['tsunami'])) region_tsunami = [regiont for regiont in title_tsunami if region_name in regiont[0]] if region_name == 'Worldwide': return title_tsunami else: return region_tsunami def make_seismic_report(report_list, loc_color, report_color): ''' Displaying Seismic Report (people felt and tsunami report) Parameter : `report_list` from any of the get_ functions `loc_color` location color `report_color` report color Return : `html.Div([])` list ''' report_content = [] for trs in report_list: report_content.append( html.Div([ html.P('Location: ' + str(trs[0]), style={'color' : str(loc_color)}), html.P('Report found: ' + str(trs[1]), style={'color' : str(report_color)}), html.P('-'*25) ]) ) if len(report_content) == 0: return html.Div([ html.P('Everything seems clear...', style={'textAlign' : 'center', 'margin-top' : 40, 'margin-bottom' : 40}) ]) else: return report_content def make_alert_report(report_list): ''' Displaying only Alert Color Report Parameter : `report_list` from any of the get_ functions Return : `html.Div([])` list ''' alert_colors = {'green' : '#018014', 'yellow' : '#f1c40f', 'orange' : '#f39c12', 'red' : '#de1a0a'} report_content = [] for colr in alert_colors: for trs in report_list: if trs[1] == colr: report_content.append( html.Div([ html.P(str(trs[0]), style={'color' : alert_colors[colr]}), html.P('-'*25) ]) ) if len(report_content) == 0: return html.Div([ html.P('Everything seems clear...', style={'textAlign' : 'center', 'margin-top' : 40, 'margin-bottom' : 40}) ]) else: return report_content
StarcoderdataPython
56829
<reponame>sirmammingtonham/droneee import tensorflow as tf # Get the variables def find_trainable_variables(key): with tf.variable_scope(key): return tf.trainable_variables() # Make directory def make_path(f): # exist_ok: if the folder already exist makes no exception error return os.makedirs(f, exist_ok=True) def discount_with_dones(rewards, dones, gamma): discounted = [] r = 0 for reward, done in zip(rewards[::-1], dones[::-1]): r = reward + gamma*r*(1.-done) # fixed off by one bug discounted.append(r) return discounted[::-1]
StarcoderdataPython
1628595
<reponame>fuzzball81/pipenv<filename>tests/integration/test_project.py # -*- coding=utf-8 -*- import pytest import os from pipenv.project import Project from pipenv.utils import temp_environ from pipenv.patched import pipfile @pytest.mark.project @pytest.mark.sources @pytest.mark.environ def test_pipfile_envvar_expansion(PipenvInstance): with PipenvInstance(chdir=True) as p: with temp_environ(): with open(p.pipfile_path, 'w') as f: f.write(""" [[source]] url = 'https://${TEST_HOST}/simple' verify_ssl = false name = "pypi" [packages] pytz = "*" """.strip()) os.environ['TEST_HOST'] = 'localhost:5000' project = Project() assert project.sources[0]['url'] == 'https://localhost:5000/simple' assert 'localhost:5000' not in str(pipfile.load(p.pipfile_path)) @pytest.mark.project @pytest.mark.sources @pytest.mark.parametrize('lock_first', [True, False]) def test_get_source(PipenvInstance, pypi, lock_first): with PipenvInstance(pypi=pypi, chdir=True) as p: with open(p.pipfile_path, 'w') as f: contents = """ [[source]] url = "{0}" verify_ssl = false name = "testindex" [[source]] url = "https://pypi.python.org/simple" verify_ssl = "true" name = "pypi" [packages] pytz = "*" six = {{version = "*", index = "pypi"}} [dev-packages] """.format(os.environ['PIPENV_TEST_INDEX']).strip() f.write(contents) if lock_first: # force source to be cached c = p.pipenv('lock') assert c.return_code == 0 project = Project() sources = [ ['pypi', 'https://pypi.python.org/simple'], ['testindex', os.environ.get('PIPENV_TEST_INDEX')] ] for src in sources: name, url = src source = [s for s in project.pipfile_sources if s.get('name') == name] assert source source = source[0] assert source['name'] == name assert source['url'] == url assert sorted(source.items()) == sorted(project.get_source(name=name).items()) assert sorted(source.items()) == sorted(project.get_source(url=url).items()) assert sorted(source.items()) == sorted(project.find_source(name).items()) assert sorted(source.items()) == sorted(project.find_source(url).items())
StarcoderdataPython
9657
<reponame>prettyirrelevant/zeta-python-sdk class InvalidSideException(Exception): """Invalid side""" class NotSupportedException(Exception): """Not supported by dummy wallet""" class InvalidProductException(Exception): """Invalid product type""" class OutOfBoundsException(Exception): """Attempt to access memory outside buffer bounds"""
StarcoderdataPython
64237
# -*- coding: utf-8 -*- # Copyright (c) 2017, Frappe Technologies Pvt. Ltd. and Contributors # See license.txt from __future__ import unicode_literals import frappe import unittest test_records = [ dict(doctype='Item', item_code='Food Item 1', item_group='Products', is_stock_item=0), dict(doctype='Item', item_code='Food Item 2', item_group='Products', is_stock_item=0), dict(doctype='Item', item_code='Food Item 3', item_group='Products', is_stock_item=0), dict(doctype='Item', item_code='Food Item 4', item_group='Products', is_stock_item=0), dict(doctype='Restaurant Menu', restaurant='Test Restaurant 1', name='Test Restaurant 1 Menu 1', items = [ dict(item='Food Item 1', rate=400), dict(item='Food Item 2', rate=300), dict(item='Food Item 3', rate=200), dict(item='Food Item 4', rate=100), ]), dict(doctype='Restaurant Menu', restaurant='Test Restaurant 1', name='Test Restaurant 1 Menu 2', items = [ dict(item='Food Item 1', rate=450), dict(item='Food Item 2', rate=350), ]) ] class TestRestaurantMenu(unittest.TestCase): def test_price_list_creation_and_editing(self): menu1 = frappe.get_doc('Restaurant Menu', 'Test Restaurant 1 Menu 1') menu1.save() menu2 = frappe.get_doc('Restaurant Menu', 'Test Restaurant 1 Menu 2') menu2.save() self.assertTrue(frappe.db.get_value('Price List', 'Test Restaurant 1 Menu 1')) self.assertEqual(frappe.db.get_value('Item Price', dict(price_list = 'Test Restaurant 1 Menu 1', item_code='Food Item 1'), 'price_list_rate'), 400) self.assertEqual(frappe.db.get_value('Item Price', dict(price_list = 'Test Restaurant 1 Menu 2', item_code='Food Item 1'), 'price_list_rate'), 450) menu1.items[0].rate = 401 menu1.save() self.assertEqual(frappe.db.get_value('Item Price', dict(price_list = 'Test Restaurant 1 Menu 1', item_code='Food Item 1'), 'price_list_rate'), 401) menu1.items[0].rate = 400 menu1.save()
StarcoderdataPython
3302538
from mod2 import * class A: a = 1 def f(x): print x.a def g(y): print y+ o = B() print B.a
StarcoderdataPython
3289626
<reponame>RingoIngo/gluon-ts from pts.dataset.repository.datasets import get_dataset, dataset_recipes from pts.dataset.utils import to_pandas import numpy as np import pandas as pd from pts.dataset import ListDataset import torch from pts.model.simple_feedforward import SimpleFeedForwardEstimator import pickle import json import random def group_exchangerate_cv( num_ts=10, num_groups=14, context_length=15, prediction_length=10, file_name="default", ): dataset = get_dataset("exchange_rate") len_sample = context_length + prediction_length dataset_group = [[] for i in range(num_groups)] train_full_data = [] test_full_data = [] ret = dict() train_it = iter(dataset.train) test_it = iter(dataset.test) # num_ts = int(dataset.metadata.feat_static_cat[0].cardinality) date_checkpoint = ["1994-01-01", "1998-01-01", "2002-01-01"] for i in range(num_ts): train_entry = next(train_it) unsplit_ts = train_entry["target"] unsplit_start = train_entry["start"] for ts_sample_start in range( 0, len(unsplit_ts) - len_sample, prediction_length ): for j, date_ckpt in enumerate(date_checkpoint): if unsplit_start < pd.Timestamp(date_ckpt): sid = j break elif unsplit_start > pd.Timestamp(date_checkpoint[-1]): sid = len(date_checkpoint) break gid = i * 4 + sid ts_slice = unsplit_ts[ ts_sample_start : ts_sample_start + len_sample ] train_full_data.append( { "target": ts_slice, "start": unsplit_start, "feat_static_cat": train_entry["feat_static_cat"], } ) dataset_group[gid].append( { "target": ts_slice, "start": unsplit_start, "feat_static_cat": train_entry["feat_static_cat"], } ) unsplit_start += pd.Timedelta("1D") * prediction_length # get ready the test data for i in range(int(num_ts * 0.2)): test_entry = next(test_it) unsplit_ts = test_entry["target"] unsplit_start = test_entry["start"] for ts_sample_start in range( 0, len(unsplit_ts) - len_sample, prediction_length ): ts_slice = unsplit_ts[ ts_sample_start : ts_sample_start + len_sample ] test_full_data.append( { "target": ts_slice, "start": unsplit_start, "feat_static_cat": test_entry["feat_static_cat"], } ) print("total number of training examples: ", len(train_full_data)) ret["group_ratio"] = [len(i) / len(train_full_data) for i in dataset_group] print("ratio for each group: ", ret["group_ratio"]) random.shuffle(train_full_data) ret["whole_data"] = ListDataset( train_full_data, freq=dataset.metadata.freq ) random.shuffle(test_full_data) ret["val_data"] = ListDataset(test_full_data, freq=dataset.metadata.freq) group_data_list = [] for group in dataset_group: random.shuffle(group) group_data_list.append(ListDataset(group, freq=dataset.metadata.freq)) ret["group_data"] = group_data_list with open("../dataset/" + file_name + "_data.csv", "wb") as output: pickle.dump(ret, output) return True
StarcoderdataPython
120769
<reponame>kchng/Quantum_machine_learning # Author: <NAME> # (c) 2016 # San Jose State University import numpy as np import random import time import sys class insert_file_info : def __init__(self, full_file_path, filenumber, batch_size = 50, use_random_seed = False, include_validation_data = False, load_test_data_only = False) : """ full_file_path : full file path of the shuffled data filenumber : An array of file number """ self.filename = full_file_path.rsplit('\\', 1)[-1] self.filename = self.filename.rsplit('/', 1)[-1] self.filenumber = filenumber self.full_file_path = full_file_path self.include_validation_data = include_validation_data self.nrows = 0 self.ncols = 0 self.nfile = len(filenumber) self.batch_size = batch_size self.current_index = 0 self.load_test_data_only = load_test_data_only if self.load_test_data_only : self.include_validation_data = False self.delimiter = [1 for i in xrange(self.ncols)] class DataSet(object) : file_info = None def __init__(self, images, labels, temps, signs, nrows, nfile_train, nfile_test, nfile_val, full_file_path, data_type) : #self.file_into = insert_file_info() #super(DataSet,self).__init__() #self.insert_file_info = insert_file_info self._epochs_completed = 0 self._file_index = 1 self._images = images self._index_in_datafile = 0 self._index_in_epoch = 0 self._labels = labels self._ndata = 0 self._temps = temps self._signs = signs self.batch_size = 0 self.data_type = data_type self.full_file_path = full_file_path self.nrows = nrows self.shuffle_index_dose = np.arange(0,self.nrows,1) if self.data_type == 'train' : self.start_file_index = 1 self.end_file_index = nfile_train self._ndata = nfile_train*self.nrows self.convert_to_one_hot = True self.shuffle_index = np.arange(0,self._ndata,1) elif self.data_type == 'test' : self.start_file_index = nfile_train + 1 self.end_file_index = nfile_train + nfile_test self._ndata = nfile_test*self.nrows self.convert_to_one_hot = True self.shuffle_index = np.arange(0,self._ndata,1) elif self.data_type == 'validation' : self.start_file_index = nfile_train + nfile_test + 1 self.end_file_index = nfile_train + nfile_test + nfile_val self._ndata = nfile_val*self.nrows self.convert_to_one_hot = False self.shuffle_index = np.arange(0,self._ndata,1) #@staticmethod #def feed_self(self, batch_size, nrows) : # self.batch_size = batch_size # self.nrows = nrows #print self.batch_size, self.nrows @property def images(self): return self._images @property def labels(self): return self._labels @property def temps(self): return self._temps @property def signs(self): return self._signs @property def ndata(self): return self._ndata @property def epochs_completed(self): return self._epochs_completed def next_batch(self, batch_size = 50) : start = self._index_in_epoch if ( self._epochs_completed == 0 ) and ( start == 0 ) : self.batch_size = batch_size while np.modf(float(self._ndata)/self.batch_size)[0] > 0.0 : print 'Warning! Number of data/ batch size must be an integer.' print 'number of data: %d' % self._ndata print 'batch size: %d' % self.batch_size self.batch_size = int(input('Input new batch size: ')) print 'batch size : %d' % self.batch_size print 'number of data: %d' % self._ndata self._index_in_epoch += self.batch_size if self._index_in_epoch > self._ndata : # Number of training epochs completed self._epochs_completed += 1 # Shuffle data random.shuffle(self.shuffle_index) self._images = self._images[self.shuffle_index] self._labels = self._labels[self.shuffle_index] # Reinitialize conunter start = 0 self._index_in_epoch = self.batch_size assert self.batch_size <= self._ndata end = self._index_in_epoch return self._images[start:end], self._labels[start:end] def next_dose(self, batch_size = 50) : def convert_to_one_hot( label ) : label_one_hot = np.zeros((len(label),2)) for i in range(len(label)) : label_one_hot[i,label[i]] = 1 return label_one_hot start = self._index_in_datafile if ( self._file_index == self.start_file_index ) and ( start == 0 ) : self.batch_size = batch_size while np.modf(float(self.nrows)/self.batch_size)[0] > 0.0 : print 'Warning! Number of data per file/ dose size must be an integer.' print 'number of data per file: %d' % self.nrows print 'dose size: %d' % self.batch_size self.batch_size = int(input('Input new dose size: ')) print 'dose size : %d' % self.batch_size print 'number of data: %d' % self._ndata # Read in one file at a time data = np.genfromtxt(self.full_file_path%(self._file_index) ,dtype=int, skip_header=0, skip_footer=0) self._images = data[:,:-1].astype('int') labels = data[:,-1:].astype('int') if self.convert_to_one_hot : self._labels = convert_to_one_hot(labels) self._index_in_datafile += self.batch_size if self._index_in_datafile > self.nrows : self._file_index += 1 start = 0 self._index_in_datafile = self.batch_size assert self.batch_size <= self.nrows # Read in one file at a time data = np.genfromtxt(self.full_file_path%(self._file_index) ,dtype=int, skip_header=0, skip_footer=0) self._images = data[:,:-1].astype('int') labels = data[:,-1:].astype('int') if self.convert_to_one_hot : self._labels = convert_to_one_hot(labels) # Shufle data random.shuffle(self.shuffle_index_dose) self._images = self._images[self.shuffle_index_dose] self._labels = self._labels[self.shuffle_index_dose] if self._file_index > self.end_file_index : # Number of training epochs completed self._epochs_completed += 1 self._file_index = self.start_file_index # Reinitialize conunter start = 0 self._index_in_datafile = self.batch_size end = self._index_in_datafile return self._images[start:end], self._labels[start:end] def next_dose_old(self, batch_size = 50) : def convert_to_one_hot( label ) : label_one_hot = np.zeros((len(label),2)) for i in range(len(label)) : label_one_hot[i,label[i]] = 1 return label_one_hot start = self._index_in_datafile if ( self._file_index == self.start_file_index ) and ( start == 0 ) : self.batch_size = batch_size while np.modf(float(self.nrows)/self.batch_size)[0] > 0.0 : print 'Warning! Number of data per file/ dose size must be an integer.' print 'number of data per file: %d' % self.nrows print 'dose size: %d' % self.batch_size self.batch_size = int(input('Input new dose size: ')) print 'dose size : %d' % self.batch_size print 'number of data: %d' % self._ndata self.shuffle_index_dose_old = np.arange(0,self.batch_size,1) self._index_in_datafile += self.batch_size if self._index_in_datafile > self.nrows : self._file_index += 1 start = 0 self._index_in_datafile = self.batch_size assert self.batch_size <= self.nrows if self._file_index > self.end_file_index : # Number of training epochs completed self._epochs_completed += 1 self._file_index = self.start_file_index # Reinitialize conunter start = 0 self._index_in_datafile = self.batch_size end = self._index_in_datafile # Read in small dosage of data data = np.genfromtxt(self.full_file_path%(self._file_index) ,dtype=int, skip_header=start, skip_footer=self.nrows-end) self._images = data[:,:-1].astype('int') labels = data[:,-1:].astype('int') if self.convert_to_one_hot : self._labels = convert_to_one_hot(labels) # Shufle data random.shuffle(self.shuffle_index_dose_old) self._images = self._images[self.shuffle_index_dose_old] self._labels = self._labels[self.shuffle_index_dose_old] return self._images, self._labels def categorize_data(self, convert_test_labels_to_one_hot = True, make_spin_down_negative = False) : class DataSets(object): pass data_sets = DataSets() def convert_to_one_hot( label ) : label_one_hot = np.zeros((len(label),2)) for i in range(len(label)) : label_one_hot[i,label[i]] = 1 return label_one_hot def reindex_data( in_data, L=200 ) : nrows, ncols = data_shape = np.shape(in_data) n_x = int(round((float(ncols)/L)**(1/3.))) index = range(ncols) new_index = np.zeros(ncols) count=0 for j in range(L) : for i in range(n_x**3) : new_index[count] = index[j+i*L] count+=1 output_data = np.zeros(np.shape(in_data)) for i in range(ncols) : output_data[:,int(new_index[i])] = in_data[:,i] return output_data data = np.loadtxt(self.full_file_path%1) self.nrows, self.ncols = np.shape(data) self.nrows, self.ncols = int(self.nrows), int(self.ncols) if np.modf(float(self.nrows)/self.batch_size)[0] > 0.0 : self.batch_size = int(float(self.nrows)/20) if self.include_validation_data : # Use 10% of the data each for testing and validating, the remaining for # training nfile_train = int(self.nfile*.8) nfile_test = int(self.nfile*.1) nfile_val = nfile_test else : # Use 15% of the data for testing, the remaining for training nfile_train = int(self.nfile*.85) nfile_test = int(self.nfile*.15) nfile_val = 0 n_data_check = self.nfile - ( nfile_train + nfile_test + nfile_val ) if n_data_check > 0 : nfile_train += n_data_check elif n_data_check < 0 : nfile_train -= n_data_check start_time = time.time() if not(self.load_test_data_only) : TRAIN_DATA = np.zeros((nfile_train*self.nrows,self.ncols)) #train_images = np.zeros((nfile_train*self.nrows,self.ncols-1)) #train_labels = np.zeros((nfile_train*self.nrows,1)) print 'Loading %d/%d files for training data...' % (nfile_train,self.nfile) for i in range(nfile_train) : print '%.1fs. Loading file %d.' % (time.time()-start_time, i+1) TRAIN_DATA[i*self.nrows:(i+1)*self.nrows,:] = np.loadtxt(self.full_file_path%(i+1)) train_images = reindex_data(TRAIN_DATA[:,:-2]).astype('int') if make_spin_down_negative : train_images[train_images==0] = -1 train_labels = TRAIN_DATA[:,-2].astype('int') train_labels = convert_to_one_hot(train_labels) train_temps = [] train_signs = [] print 'Loading %d/%d files for test data...' % (nfile_test,self.nfile) TEST_DATA = np.zeros((nfile_test*self.nrows,self.ncols)) #test_images = np.zeros((nfile_test*self.nrows,self.ncols-1)) #test_labels = np.zeros((nfile_test*self.nrows,1)) for i in range(nfile_test) : print '%.1fs. Loading file %d.' % (time.time()-start_time, i+1) TEST_DATA[i*self.nrows:(i+1)*self.nrows,:] = np.loadtxt(self.full_file_path%(i+1+nfile_train)) test_images = reindex_data(TEST_DATA[:,:-2]).astype('int') if make_spin_down_negative : test_images[test_images==0] = -1 test_labels = TEST_DATA[:,-2].astype('int') if convert_test_labels_to_one_hot : test_labels = convert_to_one_hot(test_labels) test_temps = TEST_DATA[:,-1].astype('int') test_signs = [] if self.include_validation_data : print 'Loading %d/%d files for validation data...' % (nfile_val,self.nfile) VALIDATION_DATA = np.zeros((nfile_val*self.nrows,self.ncols)) #validation_images = np.zeros((nfile_val*self.nrows,self.ncols-1)) #validation_labels = np.zeros((nfile_val*self.nrows,1)) for i in range(nfile_test) : print '%.1fs. Loading file %d.' % (time.time()-start_time, i+1) VALIDATION_DATA[i*self.nrows:(i+1)*self.nrows,:] = np.loadtxt(self.full_file_path%(i+1+nfile_train+nfile_test)) validation_images = reindex_data(VALIDATION_DATA[:,:-2]).astype('int') if make_spin_down_negative : validation_images[validation_images==0] = -1 validation_labels = VALIDATION_DATA[:,-2].astype('int') validation_temps = VALIDATION_DATA[:,-1].astype('int') validation_signs = [] if not(self.load_test_data_only) : data_sets.train = insert_file_info.DataSet(train_images, train_labels, train_temps, train_signs, self.nrows, nfile_train, nfile_test, nfile_val, self.full_file_path, data_type = 'train') data_sets.test = insert_file_info.DataSet(test_images, test_labels, test_temps, test_signs, self.nrows, nfile_train, nfile_test, nfile_val, self.full_file_path, data_type = 'test') if self.include_validation_data : data_sets.validation = insert_file_info.DataSet(validation_images, validation_labels, validation_temps, validation_signs, self.nrows, nfile_train, nfile_test, nfile_val, self.full_file_path, data_type = 'validation') return data_sets def categorize_dose_of_data(self) : class DataSets(object): pass data_sets = DataSets() data = np.loadtxt(self.full_file_path%1) self.nrows, self.ncols = np.shape(data) self.nrows, self.ncols = int(self.nrows), int(self.ncols) if np.modf(float(self.nrows)/self.batch_size)[0] > 0.0 : self.batch_size = int(float(self.nrows)/20) if self.include_validation_data : # Use 10% of the data each for testing and validating, the remaining for # training nfile_train = int(self.nfile*.8) nfile_test = int(self.nfile*.1) nfile_val = nfile_test else : # Use 10% of the data each for testing, the remaining for training nfile_train = int(self.nfile*.85) nfile_test = int(self.nfile*.15) nfile_val = 0 n_data_check = self.nfile - ( nfile_train + nfile_test + nfile_val ) if n_data_check > 0 : nfile_train += n_data_check elif n_data_check < 0 : nfile_train -= n_data_check if not(self.load_test_data_only) : train_images = np.array([]).astype('int') train_labels = np.array([]).astype('int') train_temps = [] train_signs = [] start_time = time.time() print 'Loading %d/%d files for test data...' % (nfile_test,self.nfile) TEST_DATA = np.zeros((nfile_test*self.nrows,self.ncols)) test_images = np.zeros((nfile_test*self.nrows,self.ncols-1)) test_labels = np.zeros((nfile_test*self.nrows,1)) for i in range(nfile_test) : print '%.1fs. Loading file %d.' % (time.time()-start_time, i+1) TEST_DATA[i*self.nrows:(i+1)*self.nrows,:] = np.loadtxt(self.full_file_path%(i+1+nfile_train)) test_images = reindex_data(TEST_DATA[:,:-2]).astype('int') test_labels = TEST_DATA[:,-2].astype('int') if convert_test_labels_to_one_hot : test_labels = convert_to_one_hot(test_labels) test_temps = TEST_DATA[:,-1].astype('int') test_signs = [] if self.include_validation_data : print 'Loading %d/%d files for validation data...' % (nfile_val,self.nfile) VALIDATION_DATA = np.zeros((nfile_val*self.nrows,self.ncols)) validation_images = np.zeros((nfile_val*self.nrows,self.ncols-1)) validation_labels = np.zeros((nfile_val*self.nrows,1)) for i in range(nfile_test) : print '%.1fs. Loading file %d.' % (time.time()-start_time, i+1) VALIDATION_DATA[i*self.nrows:(i+1)*self.nrows,:] = np.loadtxt(self.full_file_path%(i+1+nfile_train+nfile_test)) validation_images = reindex_data(VALIDATION_DATA[:,:-2]).astype('int') validation_labels = VALIDATION_DATA[:,-2].astype('int') validation_temps = VALIDATION_DATA[:,-1].astype('int') validation_signs = [] #test_images = np.array([]).astype('int') #test_labels = np.array([]).astype('int') #test_temps = np.array([]).astype('int') #if self.include_validation_data : # validation_images = np.array([]).astype('int') # validation_labels = np.array([]).astype('int') # validation_temps = np.array([]).astype('int') if not(self.load_test_data_only) : data_sets.train = insert_file_info.DataSet(train_images, train_labels, train_temps, train_signs, self.nrows, nfile_train, nfile_test, nfile_val, self.full_file_path, data_type = 'train') data_sets.test = insert_file_info.DataSet(test_images, test_labels, test_temps, test_signs, self.nrows, nfile_train, nfile_test, nfile_val, self.full_file_path, data_type = 'test') if self.include_validation_data : data_sets.validation = insert_file_info.DataSet(validation_images, validation_labels, validation_temps, validation_signs, self.nrows, nfile_train, nfile_test, nfile_val, self.full_file_path, data_type = 'validation') return data_sets def load_classification_data(self, nrows = 1000, ncols=12800, SkipHeader = 0, load_ndata_per_file = 1000, include_sign=False, make_spin_down_negative = False) : class DataSets(object): pass data_sets = DataSets() def reindex_data( in_data, L=200 ) : nrows, ncols = data_shape = np.shape(in_data) n_x = int(round((float(ncols)/L)**(1/3.))) index = range(ncols) new_index = np.zeros(ncols) count=0 for j in range(L) : for i in range(n_x**3) : new_index[count] = index[j+i*L] count+=1 output_data = np.zeros(np.shape(in_data)) for i in range(ncols) : output_data[:,int(new_index[i])] = in_data[:,i] return output_data start_time = time.time() self.ncols = ncols self.nrows = nrows self.delimiter = [1 for i in xrange(self.ncols)] #if SkipHeader == 0 : # load_ndata_per_file = self.nrows SkipFooter = self.nrows - SkipHeader - load_ndata_per_file while load_ndata_per_file > self.nrows : print 'Number of classification data used per temperature must be smaller than number of data per temnperature.' print 'Number of data per temnperature : %d' % self.nrows print 'Classification data used per temperature: %d' % load_ndata_per_file load_ndata_per_file = input('Input new classification data used per temperature: ') classification_images = np.zeros((self.nfile*load_ndata_per_file,self.ncols)) print 'Loading %d files for classfication data...' % (self.nfile) for i in range(self.nfile) : print '%.1fs. Loading file %d.' % (time.time()-start_time, i+1) classification_images[i*load_ndata_per_file:(i+1)*load_ndata_per_file,:] = np.genfromtxt(self.full_file_path%self.filenumber[i], dtype = int, delimiter=self.delimiter, skip_header=SkipHeader, skip_footer=SkipFooter) classification_images = reindex_data(classification_images).astype('int') if make_spin_down_negative : classification_images[classification_images==0] = -1 classification_labels = [] classification_temps = [] if include_sign : classification_signs = np.zeros(self.ncols) for i in range(self.nfile) : classification_signs[i*load_ndata_per_file:(i+1)*load_ndata_per_file] = np.loadtxt(self.full_file_path%self.filenumber[i])[SkipHeader:(self.nrows-SkipFooter),-1] else : classification_signs = [] data_sets.classification = insert_file_info.DataSet(classification_images, classification_labels, classification_temps, classification_signs, 0, 0, 0, 0, self.full_file_path, data_type='classification') return data_sets
StarcoderdataPython
1732878
<reponame>PressLabs/zinc from .zone import ZoneAdmin from .ip import IPAdmin from .policy import PolicyAdmin from .policy_record import PolicyRecordAdmin
StarcoderdataPython
105381
<filename>djconfig/middleware.py<gh_stars>10-100 # -*- coding: utf-8 -*- from __future__ import unicode_literals from . import conf try: from django.utils.deprecation import MiddlewareMixin except ImportError: # Django < 1.10 MiddlewareMixin = object __all__ = ['DjConfigMiddleware'] class DjConfigMiddleware(MiddlewareMixin): """ Populate the cache using the database.\ Reload the cache *only* if it is not up\ to date with the config model """ def process_request(self, request): conf.reload_maybe() # Backward compatibility DjConfigLocMemMiddleware = DjConfigMiddleware
StarcoderdataPython
1748013
def structure_features(element): ''' Calculates the structure features for a single atom This is a long description Parameters ---------- Returns ------- Examples -------- ''' if atom == "C": sfeatures[0] = 1 elif atom == "N": sfeatures[1] = 1 return sfeatures def other_features(atom): ''' ''' return other_features def all_features(smiles) for atom in smiles: structure_features() other_features() return
StarcoderdataPython
3289338
<reponame>elecro/fuzzinator # Copyright (c) 2016-2018 <NAME>, <NAME>. # # Licensed under the BSD 3-Clause License # <LICENSE.rst or https://opensource.org/licenses/BSD-3-Clause>. # This file may not be copied, modified, or distributed except # according to those terms. from bson.objectid import ObjectId from datetime import datetime from pymongo import ASCENDING, MongoClient, ReturnDocument class MongoDriver(object): def __init__(self, uri): self.uri = uri @property def _db(self): return MongoClient(self.uri).get_default_database() def init_db(self, sut_fuzzer_pairs): """ Creates a 'fuzzinator_issues' collection with appropriate indexes (if not existing already), and initializes a 'fuzzinator_stats' collection for (sut, fuzzer) pairs (with 0 exec and issue counts if not existing already). """ db = self._db issues = db.fuzzinator_issues issues.create_index([('sut', ASCENDING), ('id', ASCENDING)]) stats = db.fuzzinator_stats for sut, fuzzer in sut_fuzzer_pairs: if stats.find({'sut': sut, 'fuzzer': fuzzer}).count() == 0: stats.insert_one({'sut': sut, 'fuzzer': fuzzer, 'exec': 0, 'crashes': 0}) def add_issue(self, issue): # MongoDB assumes that dates and times are in UTC, hence it must # be used in the `first_seen` field, too. now = datetime.utcnow() result = self._db.fuzzinator_issues.find_one_and_update( {'id': issue['id'], 'sut': issue['sut']}, {'$setOnInsert': dict(issue, first_seen=now), '$set': dict(last_seen=now), '$inc': dict(count=1)}, upsert=True, return_document=ReturnDocument.AFTER, ) issue.update(result) # `first_seen` and `last_seen` values cannot be compared to `now` due # to some rounding in pymongo, the returning values can be slightly # different from the value stored in `now` (on nanosecond level). return issue['first_seen'] == issue['last_seen'] def all_issues(self): return list(self._db.fuzzinator_issues.find({})) def find_issue_by_id(self, id): id = ObjectId(id) return self._db.fuzzinator_issues.find_one({'_id': id}) def find_issues_by_suts(self, suts): return list(self._db.fuzzinator_issues.find({'sut': {'$in': suts}})) def update_issue(self, issue, _set): self._db.fuzzinator_issues.update_one({'id': issue['id'], 'sut': issue['sut']}, {'$set': _set}) def remove_issue_by_id(self, _id): _id = ObjectId(_id) self._db.fuzzinator_issues.delete_one({'_id': _id}) def stat_snapshot(self, fuzzers): db = self._db stat = dict() match = {'$or': [{'fuzzer': fuzzer} for fuzzer in fuzzers]} if fuzzers else {} if db.fuzzinator_issues.count() > 0: issues_stat = db.fuzzinator_issues.aggregate([ {'$match': match}, {'$group': {'_id': {'fuzzer': '$fuzzer'}, 'unique': {'$sum': 1}}}, {'$project': {'fuzzer': 1, 'unique': 1}} ]) for document in issues_stat: fuzzer = document['_id']['fuzzer'] stat[fuzzer] = dict(fuzzer=fuzzer, unique=document['unique'], issues=0, exec=0) fuzzers_stat = db.fuzzinator_stats.aggregate([ {'$match': match}, {'$group': {'_id': {'fuzzer': '$fuzzer'}, 'exec': {'$sum': '$exec'}, 'crashes': {'$sum': '$crashes'}}}, {'$project': {'fuzzer': 1, 'exec': 1, 'crashes': 1}} ]) for document in fuzzers_stat: fuzzer = document['_id']['fuzzer'] data = dict(fuzzer=fuzzer, exec=document['exec'], issues=document['crashes']) if fuzzer in stat: stat[fuzzer].update(data) else: stat[fuzzer] = dict(unique=0, **data) return stat def update_stat(self, sut, fuzzer, batch, issues): self._db.fuzzinator_stats.find_one_and_update({'sut': sut, 'fuzzer': fuzzer}, {'$inc': {'exec': int(batch), 'crashes': issues}}, upsert=True)
StarcoderdataPython
1659578
import os, sys, time from psychopy import visual, core, data, logging from .task_base import Task from ..shared import config STIMULI_DURATION=4 BASELINE_BEGIN=5 BASELINE_END=5 ISI=4 class Speech(Task): DEFAULT_INSTRUCTION = """You will be presented text that you need to read out loud right when you see it.""" def __init__(self, words_file,*args,**kwargs): super().__init__(**kwargs) if os.path.exists(words_file): self.words_file = words_file self.words_list = data.importConditions(self.words_file) else: raise ValueError('File %s does not exists'%words_file) def instructions(self, exp_win, ctl_win): screen_text = visual.TextStim( exp_win, text=self.instruction, alignHoriz="center", color = 'white', wrapWidth=config.WRAP_WIDTH) for frameN in range(config.FRAME_RATE * config.INSTRUCTION_DURATION): screen_text.draw(exp_win) if ctl_win: screen_text.draw(ctl_win) yield() def _run(self, exp_win, ctl_win): self.trials = data.TrialHandler(self.words_list, 1, method='random') text = visual.TextStim( exp_win, text='', alignHoriz="center", color = 'white') exp_win.logOnFlip(level=logging.EXP,msg='speech: task starting at %f'%time.time()) for frameN in range(config.FRAME_RATE * BASELINE_BEGIN): yield() for trial in self.trials: text.text = trial['text'] exp_win.logOnFlip(level=logging.EXP,msg='speech: %s'%text.text) trial['onset'] = self.task_timer.getTime() for frameN in range(config.FRAME_RATE * STIMULI_DURATION): text.draw(exp_win) if ctl_win: text.draw(ctl_win) yield() trial['offset'] = self.task_timer.getTime() trial['duration'] = trial['offset']-trial['onset'] exp_win.logOnFlip(level=logging.EXP,msg='speech: rest') for frameN in range(config.FRAME_RATE * ISI): yield() for frameN in range(config.FRAME_RATE * BASELINE_END): yield() def save(self): self.trials.saveAsWideText(self._generate_tsv_filename())
StarcoderdataPython
54662
import threading import app.models import app.view import calendar from datetime import date, datetime from app.models import session from app.models.booking import Booking from app.models.returns import Returns def init_database(): app.models.__init__ def init_gui(): app.view.__init__ def generate_invoices(): # today = date.today() # this_month = today.strftime("%m") # if int(today.strftime("%d")) is int(get_last_day_of_month(int(this_month))): # _returns = session.query(Returns).filter(Returns.date < get_last_day_of_month(int(this_month))).all() # _bookings = [] # for returns in _returns: # amount = 0 # booking = session.query(Booking).filter(Booking.id == returns.booking_id) # # returns.booking_id # if returns.date > booking.booked_date + datetime.timedelta(days=float(booking.duration_of_booking)): # amount += booking.daily_price pass def get_last_day_of_month(month): return calendar.monthrange(2020, month)[1] if __name__ == '__main__': # using separate threads for each package to improve the performance t = threading.Thread(target=init_database, args=()) t.daemon = True t.start() t = threading.Thread(target=init_gui, args=()) t.daemon = True t.start() t = threading.Thread(target=generate_invoices, args=()) t.daemon = True t.start()
StarcoderdataPython
1648277
# Python - 2.7.6 test.assert_equals( balance_statement('ZNGA 1300 2.66 B, CLH15.NYM 50 56.32 B, OWW 1000 11.623 B, OGG 20 580.1 B'), 'Buy: 29499 Sell: 0' )
StarcoderdataPython
1710710
<reponame>dirble/streamscrobbler-python<gh_stars>10-100 from streamscrobbler import *
StarcoderdataPython
69883
import dash_bootstrap_components as dbc from dash import html from .util import make_subheading form = html.Div( [ make_subheading("Form", "form"), dbc.Form( [ html.Div( [ dbc.Label("Username"), dbc.Input( placeholder="Enter your username", type="text", ), dbc.FormText( [ "Can't remember your username? ", html.A( "Click here.", href="#", className="text-muted", style={"textDecoration": "underline"}, ), ] ), ] ), html.Div( [ dbc.Label("Username"), dbc.Input( placeholder="Enter your password", type="password", ), dbc.FormText( [ "Can't remember your password? ", html.A( "Click here.", href="#", className="text-muted", style={"textDecoration": "underline"}, ), ] ), ] ), ] ), ], className="mb-4", )
StarcoderdataPython
3379681
<gh_stars>1-10 ''' <NAME> copying from <NAME> and translating from Matlab to Python 2020/5/22 NORMALISE - Normalises image values to 0-1, or to desired mean and variance Usage: n = normalise(im) Offsets and rescales image so that the minimum value is 0 and the maximum value is 1. Result is returned in n. If the image is colour the image is converted to HSV and the value/intensity component is normalised to 0-1 before being converted back to RGB. n = normalise(im, reqmean, reqvar) Arguments: im - A grey-level input image. reqmean - The required mean value of the image. reqvar - The required variance of the image. Offsets and rescales image so that it has mean reqmean and variance reqvar. Colour images cannot be normalised in this manner. Copyright (c) 1996-2005 <NAME> School of Computer Science & Software Engineering The University of Western Australia http://www.csse.uwa.edu.au/ Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. The Software is provided "as is", without warranty of any kind. January 2005 - modified to allow desired mean and variance ''' from skimage.color import rgb2hsv, hsv2rgb import numpy as np def normalize(im, reqmean=None, reqvar=None): # Normalize 0–1 if reqmean is None and reqvar is None: if im.ndim == 3: hsv = rgb2hsv(im) v = hsv[:,:,2] v = v - np.min(v) # Just normalise value component v = v/np.max(v) hsv[:,:,2] = v n = hsv2rgb(hsv) else: # if ~isa(im,'double'), im = double(im); end n = im - np.min(im) n = n/np.max(n) else: # Normalise to desired mean and variance if im.ndim == 3: # colour image? error('cannot normalise colour image to desired mean and variance'); # if ~isa(im,'double'), im = double(im); end im = im - np.mean(im) im = im/np.std(im) # Zero mean, unit std dev n = reqmean + im*np.sqrt(reqvar) return n
StarcoderdataPython
3213045
<reponame>codered-by-ec-council/Computer-Vision-Face-Recognition-Quick-Starter-in-Python<filename>test_env.py #import the libraries import cv2 import dlib import face_recognition #printing the versions print(cv2.__version__) print(dlib.__version__) print(face_recognition.__version__)
StarcoderdataPython
128974
<filename>Cura/Uranium/UM/Scene/Scene.py # Copyright (c) 2018 <NAME>. # Uranium is released under the terms of the LGPLv3 or higher. import functools # For partial to update files that were changed. import os.path # To watch files for changes. import threading from typing import Callable, List, Optional, Set from PyQt5.QtCore import QFileSystemWatcher # To watch files for changes. from UM.Decorators import deprecated from UM.Logger import Logger from UM.Mesh.ReadMeshJob import ReadMeshJob # To reload a mesh when its file was changed. from UM.Message import Message # To display a message for reloading files that were changed. from UM.Scene.Camera import Camera from UM.Scene.Iterator.BreadthFirstIterator import BreadthFirstIterator from UM.Scene.SceneNode import SceneNode from UM.Signal import Signal, signalemitter from UM.i18n import i18nCatalog from UM.Platform import Platform i18n_catalog = i18nCatalog("uranium") ## Container object for the scene graph # # The main purpose of this class is to provide the root SceneNode. @signalemitter class Scene: def __init__(self) -> None: super().__init__() from UM.Scene.SceneNode import SceneNode self._root = SceneNode(name = "Root") self._root.setCalculateBoundingBox(False) self._connectSignalsRoot() self._active_camera = None # type: Optional[Camera] self._ignore_scene_changes = False self._lock = threading.Lock() # Watching file for changes. self._file_watcher = QFileSystemWatcher() self._file_watcher.fileChanged.connect(self._onFileChanged) self._reload_message = None # type: Optional[Message] self._callbacks = set() # type: Set[Callable] # Need to keep these in memory. This is a memory leak every time you refresh, but a tiny one. def _connectSignalsRoot(self) -> None: self._root.transformationChanged.connect(self.sceneChanged) self._root.childrenChanged.connect(self.sceneChanged) self._root.meshDataChanged.connect(self.sceneChanged) def _disconnectSignalsRoot(self) -> None: self._root.transformationChanged.disconnect(self.sceneChanged) self._root.childrenChanged.disconnect(self.sceneChanged) self._root.meshDataChanged.disconnect(self.sceneChanged) def setIgnoreSceneChanges(self, ignore_scene_changes: bool) -> None: if self._ignore_scene_changes != ignore_scene_changes: self._ignore_scene_changes = ignore_scene_changes if self._ignore_scene_changes: self._disconnectSignalsRoot() else: self._connectSignalsRoot() ## Gets the global scene lock. # # Use this lock to prevent any read or write actions on the scene from other threads, # assuming those threads also properly acquire the lock. Most notably, this # prevents the rendering thread from rendering the scene while it is changing. def getSceneLock(self) -> threading.Lock: return self._lock ## Get the root node of the scene. def getRoot(self) -> "SceneNode": return self._root ## Change the root node of the scene def setRoot(self, node: "SceneNode") -> None: if self._root != node: if not self._ignore_scene_changes: self._disconnectSignalsRoot() self._root = node if not self._ignore_scene_changes: self._connectSignalsRoot() self.rootChanged.emit() rootChanged = Signal() ## Get the camera that should be used for rendering. def getActiveCamera(self) -> Optional[Camera]: return self._active_camera def getAllCameras(self) -> List[Camera]: cameras = [] for node in BreadthFirstIterator(self._root): if isinstance(node, Camera): cameras.append(node) return cameras ## Set the camera that should be used for rendering. # \param name The name of the camera to use. def setActiveCamera(self, name: str) -> None: camera = self.findCamera(name) if camera and camera != self._active_camera: if self._active_camera: self._active_camera.perspectiveChanged.disconnect(self.sceneChanged) self._active_camera = camera self._active_camera.perspectiveChanged.connect(self.sceneChanged) else: Logger.log("w", "Couldn't find camera with name [%s] to activate!" % name) ## Signal that is emitted whenever something in the scene changes. # \param object The object that triggered the change. sceneChanged = Signal() ## Find an object by id. # # \param object_id The id of the object to search for, as returned by the python id() method. # # \return The object if found, or None if not. def findObject(self, object_id: int) -> Optional["SceneNode"]: for node in BreadthFirstIterator(self._root): if id(node) == object_id: return node return None def findCamera(self, name: str) -> Optional[Camera]: for node in BreadthFirstIterator(self._root): if isinstance(node, Camera) and node.getName() == name: return node return None ## Add a file to be watched for changes. # \param file_path The path to the file that must be watched. def addWatchedFile(self, file_path: str) -> None: # The QT 5.10.0 issue, only on Windows. Cura crashes after loading a stl file from USB/sd-card/Cloud-based drive if not Platform.isWindows(): self._file_watcher.addPath(file_path) ## Remove a file so that it will no longer be watched for changes. # \param file_path The path to the file that must no longer be watched. def removeWatchedFile(self, file_path: str) -> None: # The QT 5.10.0 issue, only on Windows. Cura crashes after loading a stl file from USB/sd-card/Cloud-based drive if not Platform.isWindows(): self._file_watcher.removePath(file_path) ## Triggered whenever a file is changed that we currently have loaded. def _onFileChanged(self, file_path: str) -> None: if not os.path.isfile(file_path) or os.path.getsize(file_path) == 0: # File doesn't exist any more, or it is empty return # Multiple nodes may be loaded from the same file at different stages. Reload them all. from UM.Scene.Iterator.DepthFirstIterator import DepthFirstIterator # To find which nodes to reload when files have changed. modified_nodes = [node for node in DepthFirstIterator(self.getRoot()) if node.getMeshData() and node.getMeshData().getFileName() == file_path] # type: ignore if modified_nodes: # Hide the message if it was already visible if self._reload_message is not None: self._reload_message.hide() self._reload_message = Message(i18n_catalog.i18nc("@info", "Would you like to reload {filename}?").format(filename = os.path.basename(file_path)), title = i18n_catalog.i18nc("@info:title", "File has been modified")) self._reload_message.addAction("reload", i18n_catalog.i18nc("@action:button", "Reload"), icon = "", description = i18n_catalog.i18nc("@action:description", "This will trigger the modified files to reload from disk.")) self._reload_callback = functools.partial(self._reloadNodes, modified_nodes) self._reload_message.actionTriggered.connect(self._reload_callback) self._reload_message.show() ## Reloads a list of nodes after the user pressed the "Reload" button. # \param nodes The list of nodes that needs to be reloaded. # \param message The message that triggered the action to reload them. # \param action The button that triggered the action to reload them. def _reloadNodes(self, nodes: List["SceneNode"], message: str, action: str) -> None: if action != "reload": return if self._reload_message is not None: self._reload_message.hide() for node in nodes: meshdata = node.getMeshData() if meshdata: filename = meshdata.getFileName() if not filename or not os.path.isfile(filename): # File doesn't exist any more. continue job = ReadMeshJob(filename) reload_finished_callback = functools.partial(self._reloadJobFinished, node) self._callbacks.add(reload_finished_callback) #Store it so it won't get garbage collected. This is a memory leak, but just one partial per reload so it's not much. job.finished.connect(reload_finished_callback) job.start() ## Triggered when reloading has finished. # # This then puts the resulting mesh data in the node. def _reloadJobFinished(self, replaced_node: SceneNode, job: ReadMeshJob) -> None: for node in job.getResult(): mesh_data = node.getMeshData() if mesh_data: replaced_node.setMeshData(mesh_data) else: Logger.log("w", "Could not find a mesh in reloaded node.")
StarcoderdataPython
1670844
"""Simple example on how to move the robot.""" import robot_interfaces import robot_fingers def move_up_and_down( frontend: robot_fingers.TriFingerPlatformFrontend, episode_length: int ): """Move up and down multiple times using fixed goal positions. Args: frontend: Frontend of the TriFingerPro platform. Used to control the robot. episode_length: Number of time steps in the episode. Used to ensure that the limit is not exceeded. """ position_down = [-0.08, 0.84, -1.2] * 3 position_up = [0.5, 1.2, -2.4] * 3 target_positions = [position_down, position_up] i = 0 while True: print("Iteration {}".format(i)) action = robot_interfaces.trifinger.Action( position=target_positions[i % 2] ) i += 1 for _ in range(500): t = frontend.append_desired_action(action) frontend.wait_until_timeindex(t) # make sure to not exceed the number of allowed actions if t >= episode_length - 1: return robot_observation = frontend.get_robot_observation(t) print("Finger positions:", robot_observation.position) camera_observation = frontend.get_camera_observation(t) print("Object position:", camera_observation.object_pose.position)
StarcoderdataPython
147574
<filename>learning_files/arithmetic.py # Arithmetic # +, -, *, /, %, all work with numbers and variables holding numbers # +=, -=, *=, /=, i.e x += 1 is the same as x = x + 1 x = 1 + 1 # assign 1 + 1 to x y = 0 y += x # add x to y
StarcoderdataPython
1630569
<filename>tacker/db/vm/vm_db.py # vim: tabstop=4 shiftwidth=4 softtabstop=4 # # Copyright 2013, 2014 Intel Corporation. # Copyright 2013, 2014 <NAME> <isaku.yamahata at intel com> # <isaku.yamahata at gmail com> # All Rights Reserved. # # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import uuid import sqlalchemy as sa from sqlalchemy import orm from sqlalchemy.orm import exc as orm_exc from tacker.api.v1 import attributes from tacker import context as t_context from tacker.db import api as qdbapi from tacker.db import db_base from tacker.db import model_base from tacker.db import models_v1 from tacker.extensions import vnfm from tacker import manager from tacker.openstack.common import log as logging from tacker.openstack.common import uuidutils from tacker.plugins.common import constants LOG = logging.getLogger(__name__) _ACTIVE_UPDATE = (constants.ACTIVE, constants.PENDING_UPDATE) _ACTIVE_UPDATE_ERROR_DEAD = ( constants.PENDING_CREATE, constants.ACTIVE, constants.PENDING_UPDATE, constants.ERROR, constants.DEAD) ########################################################################### # db tables class DeviceTemplate(model_base.BASE, models_v1.HasId, models_v1.HasTenant): """Represents template to create hosting device.""" # Descriptive name name = sa.Column(sa.String(255)) description = sa.Column(sa.String(255)) # service type that this service vm provides. # At first phase, this includes only single service # In future, single service VM may accomodate multiple services. service_types = orm.relationship('ServiceType', backref='template') # driver to create hosting device. e.g. noop, nova, heat, etc... infra_driver = sa.Column(sa.String(255)) # driver to communicate with service managment mgmt_driver = sa.Column(sa.String(255)) # (key, value) pair to spin up attributes = orm.relationship('DeviceTemplateAttribute', backref='template') class ServiceType(model_base.BASE, models_v1.HasId, models_v1.HasTenant): """Represents service type which hosting device provides. Since a device may provide many services, This is one-to-many relationship. """ template_id = sa.Column(sa.String(36), sa.ForeignKey('devicetemplates.id'), nullable=False) service_type = sa.Column(sa.String(255), nullable=False) class DeviceTemplateAttribute(model_base.BASE, models_v1.HasId): """Represents attributes necessary for spinning up VM in (key, value) pair key value pair is adopted for being agnostic to actuall manager of VMs like nova, heat or others. e.g. image-id, flavor-id for Nova. The interpretation is up to actual driver of hosting device. """ template_id = sa.Column(sa.String(36), sa.ForeignKey('devicetemplates.id'), nullable=False) key = sa.Column(sa.String(255), nullable=False) value = sa.Column(sa.TEXT(65535), nullable=True) class Device(model_base.BASE, models_v1.HasTenant): """Represents devices that hosts services. Here the term, 'VM', is intentionally avoided because it can be VM or other container. """ id = sa.Column(sa.String(255), primary_key=True, default=uuidutils.generate_uuid) template_id = sa.Column(sa.String(36), sa.ForeignKey('devicetemplates.id')) template = orm.relationship('DeviceTemplate') name = sa.Column(sa.String(255), nullable=True) description = sa.Column(sa.String(255), nullable=True) # sufficient information to uniquely identify hosting device. # In case of service VM, it's UUID of nova VM. instance_id = sa.Column(sa.String(255), nullable=True) # For a management tool to talk to manage this hosting device. # opaque string. # e.g. (driver, mgmt_url) = (ssh, ip address), ... mgmt_url = sa.Column(sa.String(255), nullable=True) attributes = orm.relationship("DeviceAttribute", backref="device") status = sa.Column(sa.String(255), nullable=False) class DeviceAttribute(model_base.BASE, models_v1.HasId): """Represents kwargs necessary for spinning up VM in (key, value) pair. key value pair is adopted for being agnostic to actuall manager of VMs like nova, heat or others. e.g. image-id, flavor-id for Nova. The interpretation is up to actual driver of hosting device. """ device_id = sa.Column(sa.String(255), sa.ForeignKey('devices.id'), nullable=False) key = sa.Column(sa.String(255), nullable=False) # json encoded value. example # "nic": [{"net-id": <net-uuid>}, {"port-id": <port-uuid>}] value = sa.Column(sa.TEXT(65535), nullable=True) class VNFMPluginDb(vnfm.VNFMPluginBase, db_base.CommonDbMixin): @property def _core_plugin(self): return manager.TackerManager.get_plugin() def subnet_id_to_network_id(self, context, subnet_id): subnet = self._core_plugin.get_subnet(context, subnet_id) return subnet['network_id'] def __init__(self): qdbapi.register_models() super(VNFMPluginDb, self).__init__() def _get_resource(self, context, model, id): try: return self._get_by_id(context, model, id) except orm_exc.NoResultFound: if issubclass(model, DeviceTemplate): raise vnfm.DeviceTemplateNotFound(device_tempalte_id=id) elif issubclass(model, ServiceType): raise vnfm.ServiceTypeNotFound(service_type_id=id) if issubclass(model, Device): raise vnfm.DeviceNotFound(device_id=id) else: raise def _make_attributes_dict(self, attributes_db): return dict((attr.key, attr.value) for attr in attributes_db) def _make_service_types_list(self, service_types): return [{'id': service_type.id, 'service_type': service_type.service_type} for service_type in service_types] def _make_template_dict(self, template, fields=None): res = { 'attributes': self._make_attributes_dict(template['attributes']), 'service_types': self._make_service_types_list( template.service_types) } key_list = ('id', 'tenant_id', 'name', 'description', 'infra_driver', 'mgmt_driver') res.update((key, template[key]) for key in key_list) return self._fields(res, fields) def _make_dev_attrs_dict(self, dev_attrs_db): return dict((arg.key, arg.value) for arg in dev_attrs_db) def _make_device_dict(self, device_db, fields=None): LOG.debug(_('device_db %s'), device_db) LOG.debug(_('device_db attributes %s'), device_db.attributes) res = { 'device_template': self._make_template_dict(device_db.template), 'attributes': self._make_dev_attrs_dict(device_db.attributes), } key_list = ('id', 'tenant_id', 'name', 'description', 'instance_id', 'template_id', 'status', 'mgmt_url') res.update((key, device_db[key]) for key in key_list) return self._fields(res, fields) @staticmethod def _infra_driver_name(device_dict): return device_dict['device_template']['infra_driver'] @staticmethod def _mgmt_driver_name(device_dict): return device_dict['device_template']['mgmt_driver'] @staticmethod def _instance_id(device_dict): return device_dict['instance_id'] def create_device_template(self, context, device_template): template = device_template['device_template'] LOG.debug(_('template %s'), template) tenant_id = self._get_tenant_id_for_create(context, template) infra_driver = template.get('infra_driver') mgmt_driver = template.get('mgmt_driver') service_types = template.get('service_types') if (not attributes.is_attr_set(infra_driver)): LOG.debug(_('hosting device driver unspecified')) raise vnfm.InfraDriverNotSpecified() if (not attributes.is_attr_set(mgmt_driver)): LOG.debug(_('mgmt driver unspecified')) raise vnfm.MGMTDriverNotSpecified() if (not attributes.is_attr_set(service_types)): LOG.debug(_('service types unspecified')) raise vnfm.ServiceTypesNotSpecified() with context.session.begin(subtransactions=True): template_id = str(uuid.uuid4()) template_db = DeviceTemplate( id=template_id, tenant_id=tenant_id, name=template.get('name'), description=template.get('description'), infra_driver=infra_driver, mgmt_driver=mgmt_driver) context.session.add(template_db) for (key, value) in template.get('attributes', {}).items(): attribute_db = DeviceTemplateAttribute( id=str(uuid.uuid4()), template_id=template_id, key=key, value=value) context.session.add(attribute_db) for service_type in (item['service_type'] for item in template['service_types']): service_type_db = ServiceType( id=str(uuid.uuid4()), tenant_id=tenant_id, template_id=template_id, service_type=service_type) context.session.add(service_type_db) LOG.debug(_('template_db %(template_db)s %(attributes)s '), {'template_db': template_db, 'attributes': template_db.attributes}) return self._make_template_dict(template_db) def update_device_template(self, context, device_template_id, device_template): with context.session.begin(subtransactions=True): template_db = self._get_resource(context, DeviceTemplate, device_template_id) template_db.update(device_template['device_template']) return self._make_template_dict(template_db) def delete_device_template(self, context, device_template_id): with context.session.begin(subtransactions=True): # TODO(yamahata): race. prevent from newly inserting hosting device # that refers to this template devices_db = context.session.query(Device).filter_by( template_id=device_template_id).first() if devices_db is not None: raise vnfm.DeviceTemplateInUse( device_template_id=device_template_id) context.session.query(ServiceType).filter_by( template_id=device_template_id).delete() context.session.query(DeviceTemplateAttribute).filter_by( template_id=device_template_id).delete() template_db = self._get_resource(context, DeviceTemplate, device_template_id) context.session.delete(template_db) def get_device_template(self, context, device_template_id, fields=None): template_db = self._get_resource(context, DeviceTemplate, device_template_id) return self._make_template_dict(template_db) def get_device_templates(self, context, filters, fields=None): return self._get_collection(context, DeviceTemplate, self._make_template_dict, filters=filters, fields=fields) def choose_device_template(self, context, service_type, required_attributes=None): required_attributes = required_attributes or [] LOG.debug(_('required_attributes %s'), required_attributes) with context.session.begin(subtransactions=True): query = ( context.session.query(DeviceTemplate). filter( sa.exists(). where(sa.and_( DeviceTemplate.id == ServiceType.template_id, ServiceType.service_type == service_type)))) for key in required_attributes: query = query.filter( sa.exists(). where(sa.and_( DeviceTemplate.id == DeviceTemplateAttribute.template_id, DeviceTemplateAttribute.key == key))) LOG.debug(_('statements %s'), query) template_db = query.first() if template_db: return self._make_template_dict(template_db) def _device_attribute_update_or_create( self, context, device_id, key, value): arg = (self._model_query(context, DeviceAttribute). filter(DeviceAttribute.device_id == device_id). filter(DeviceAttribute.key == key).first()) if arg: arg.value = value else: arg = DeviceAttribute( id=str(uuid.uuid4()), device_id=device_id, key=key, value=value) context.session.add(arg) # called internally, not by REST API def _create_device_pre(self, context, device): device = device['device'] LOG.debug(_('device %s'), device) tenant_id = self._get_tenant_id_for_create(context, device) template_id = device['template_id'] name = device.get('name') device_id = device.get('id') or str(uuid.uuid4()) attributes = device.get('attributes', {}) with context.session.begin(subtransactions=True): template_db = self._get_resource(context, DeviceTemplate, template_id) device_db = Device(id=device_id, tenant_id=tenant_id, name=name, description=template_db.description, instance_id=None, template_id=template_id, status=constants.PENDING_CREATE) context.session.add(device_db) for key, value in attributes.items(): arg = DeviceAttribute( id=str(uuid.uuid4()), device_id=device_id, key=key, value=value) context.session.add(arg) return self._make_device_dict(device_db) # called internally, not by REST API # intsance_id = None means error on creation def _create_device_post(self, context, device_id, instance_id, mgmt_url, device_dict): LOG.debug(_('device_dict %s'), device_dict) with context.session.begin(subtransactions=True): query = (self._model_query(context, Device). filter(Device.id == device_id). filter(Device.status == constants.PENDING_CREATE). one()) query.update({'instance_id': instance_id, 'mgmt_url': mgmt_url}) if instance_id is None or device_dict['status'] == constants.ERROR: query.update({'status': constants.ERROR}) for (key, value) in device_dict['attributes'].items(): self._device_attribute_update_or_create(context, device_id, key, value) def _create_device_status(self, context, device_id, new_status): with context.session.begin(subtransactions=True): (self._model_query(context, Device). filter(Device.id == device_id). filter(Device.status == constants.PENDING_CREATE). update({'status': new_status})) def _get_device_db(self, context, device_id, current_statuses, new_status): try: device_db = ( self._model_query(context, Device). filter(Device.id == device_id). filter(Device.status.in_(current_statuses)). with_lockmode('update').one()) except orm_exc.NoResultFound: raise vnfm.DeviceNotFound(device_id=device_id) if device_db.status == constants.PENDING_UPDATE: raise vnfm.DeviceInUse(device_id=device_id) device_db.update({'status': new_status}) return device_db def _update_device_pre(self, context, device_id): with context.session.begin(subtransactions=True): device_db = self._get_device_db( context, device_id, _ACTIVE_UPDATE, constants.PENDING_UPDATE) return self._make_device_dict(device_db) def _update_device_post(self, context, device_id, new_status, new_device_dict=None): with context.session.begin(subtransactions=True): (self._model_query(context, Device). filter(Device.id == device_id). filter(Device.status == constants.PENDING_UPDATE). update({'status': new_status})) dev_attrs = new_device_dict.get('attributes', {}) (context.session.query(DeviceAttribute). filter(DeviceAttribute.device_id == device_id). filter(~DeviceAttribute.key.in_(dev_attrs.keys())). delete(synchronize_session='fetch')) for (key, value) in dev_attrs.items(): self._device_attribute_update_or_create(context, device_id, key, value) def _delete_device_pre(self, context, device_id): with context.session.begin(subtransactions=True): device_db = self._get_device_db( context, device_id, _ACTIVE_UPDATE_ERROR_DEAD, constants.PENDING_DELETE) return self._make_device_dict(device_db) def _delete_device_post(self, context, device_id, error): with context.session.begin(subtransactions=True): query = ( self._model_query(context, Device). filter(Device.id == device_id). filter(Device.status == constants.PENDING_DELETE)) if error: query.update({'status': constants.ERROR}) else: (self._model_query(context, DeviceAttribute). filter(DeviceAttribute.device_id == device_id).delete()) query.delete() # reference implementation. needs to be overrided by subclass def create_device(self, context, device): device_dict = self._create_device_pre(context, device) # start actual creation of hosting device. # Waiting for completion of creation should be done backgroundly # by another thread if it takes a while. instance_id = str(uuid.uuid4()) device_dict['instance_id'] = instance_id self._create_device_post(context, device_dict['id'], instance_id, None, device_dict) self._create_device_status(context, device_dict['id'], constants.ACTIVE) return device_dict # reference implementation. needs to be overrided by subclass def update_device(self, context, device_id, device): device_dict = self._update_device_pre(context, device_id) # start actual update of hosting device # waiting for completion of update should be done backgroundly # by another thread if it takes a while self._update_device_post(context, device_id, constants.ACTIVE) return device_dict # reference implementation. needs to be overrided by subclass def delete_device(self, context, device_id): self._delete_device_pre(context, device_id) # start actual deletion of hosting device. # Waiting for completion of deletion should be done backgroundly # by another thread if it takes a while. self._delete_device_post(context, device_id, False) def get_device(self, context, device_id, fields=None): device_db = self._get_resource(context, Device, device_id) return self._make_device_dict(device_db, fields) def get_devices(self, context, filters=None, fields=None): devices = self._get_collection(context, Device, self._make_device_dict, filters=filters, fields=fields) # Ugly hack to mask internaly used record return [device for device in devices if uuidutils.is_uuid_like(device['id'])] def _mark_device_status(self, device_id, exclude_status, new_status): context = t_context.get_admin_context() with context.session.begin(subtransactions=True): try: device_db = ( self._model_query(context, Device). filter(Device.id == device_id). filter(~Device.status.in_(exclude_status)). with_lockmode('update').one()) except orm_exc.NoResultFound: LOG.warning(_('no device found %s'), device_id) return False device_db.update({'status': new_status}) return True def _mark_device_error(self, device_id): return self._mark_device_status( device_id, [constants.DEAD], constants.ERROR) def _mark_device_dead(self, device_id): exclude_status = [ constants.DOWN, constants.PENDING_CREATE, constants.PENDING_UPDATE, constants.PENDING_DELETE, constants.INACTIVE, constants.ERROR] return self._mark_device_status( device_id, exclude_status, constants.DEAD) # used by failure policy def rename_device_id(self, context, device_id, new_device_id): # ugly hack... context = t_context.get_admin_context() with context.session.begin(subtransactions=True): device_db = self._get_resource(context, Device, device_id) new_device_db = Device( id=new_device_id, tenant_id=device_db.tenant_id, template_id=device_db.template_id, name=device_db.name, description=device_db.description, instance_id=device_db.instance_id, mgmt_url=device_db.mgmt_url, status=device_db.status) context.session.add(new_device_db) (self._model_query(context, DeviceAttribute). filter(DeviceAttribute.device_id == device_id). update({'device_id': new_device_id})) context.session.delete(device_db) def get_vnfs(self, context, filters=None, fields=None): return self.get_devices(context, filters, fields) def get_vnf(self, context, vnf_id, fields=None): return self.get_device(context, vnf_id, fields) def delete_vnfd(self, context, vnfd_id): self.delete_device_template(context, vnfd_id) def get_vnfd(self, context, vnfd_id, fields=None): return self.get_device_template(context, vnfd_id, fields) def get_vnfds(self, context, filters=None, fields=None): return self.get_device_templates(context, filters, fields)
StarcoderdataPython
1738682
"""Asynchronous Python client for SolarEnergy devices.""" import aiohttp import asyncio import async_timeout from yarl import URL from xml.parsers.expat import ExpatError from typing import Any from .__version__ import __version__ from .const import ( DEFAULT_PORT, DEFAULT_TIMEOUT, ) from .exceptions import ( SolarEnergyError, SolarEnergyConnectionError, SolarEnergyTimeoutError, SolarEnergyClientError, SolarEnergyResponseError, SolarEnergyParseError, SolarEnergyContentTypeError, SolarEnergyAttributeError, ) from .parser import parse_xml_to_json class SolarEnergyInverter: """Main class for handling connections with SolarEnergy devices.""" def __init__( self, host: str = None, port: int = DEFAULT_PORT, username: str = None, password: str = <PASSWORD>, request_timeout: int = DEFAULT_TIMEOUT, session: aiohttp.ClientSession = None, tls: bool = False, verify_ssl: bool = False, user_agent: str = None, ) -> None: """Initialize connection with SolarEnergy device.""" self.host = host self.port = port self.username = username self.password = password self.request_timeout = request_timeout self._session = session self._close_session = False self.tls = tls self.verify_ssl = verify_ssl self.user_agent = user_agent if user_agent is None: self.user_agent = f"PythonSolarEnergy/{__version__}" async def _request(self, endpoint: str) -> aiohttp.ClientResponse: """Handle a request to an SolarEnergy device.""" method = "GET" if self.host is None: url = URL.build( path=endpoint ) else: scheme = "https" if self.tls else "http" url = URL.build( scheme=scheme, host=self.host, port=self.port, path=endpoint ) auth = None if self.username and self.password: auth = aiohttp.BasicAuth(self.username, self.password) headers = { "User-Agent": self.user_agent, "Content-Type": "text/xml", "Accept": "text/xml, text/plain, */*", } if self._session is None: self._session = aiohttp.ClientSession() self._close_session = True try: with async_timeout.timeout(self.request_timeout): response = await self._session.request( method, url, auth=auth, headers=headers, ssl=self.verify_ssl, raise_for_status=True ) except asyncio.TimeoutError as exc: raise SolarEnergyTimeoutError( "Timeout occurred while connecting to SolarEnergy inverter. Maybe it's on standby." ) from exc except (aiohttp.ClientError, RuntimeError) as exc: raise SolarEnergyClientError( "Error occurred while communicating with SolarEnergy inverter." ) from exc except NotImplementedError as exc: raise SolarEnergyConnectionError( "Unknown error occurred while communicating with SolarEnergy inverter." ) from exc return response async def _execute( self, endpoint: str, root_element: str = None ) -> Any: """Send a request message to the SolarEnergy device.""" # Todo: return_type: xml, dict, etc content_type = "text/xml" response = await self._request(endpoint) try: data = await response.json( loads=parse_xml_to_json, content_type=content_type ) return data if root_element is None else data[root_element] except ExpatError as exc: raise SolarEnergyParseError( "Received malformed xml from inverter." ) from exc except aiohttp.ContentTypeError as exc: raise SolarEnergyContentTypeError( "Received unexpected mime type from inverter." ) from exc except (KeyError, TypeError) as exc: raise SolarEnergyAttributeError( "Received invalid data from inverter." ) from exc except NotImplementedError as exc: raise SolarEnergyResponseError( "Unknown error occurred with response of SolarEnergy inverter." ) from exc async def get_info(self) -> Any: return await self._execute( endpoint="/equipment_data.xml", root_element="equipment_data" ) async def get_data(self) -> Any: return await self._execute( endpoint="/real_time_data.xml", root_element="real_time_data" ) async def get_network(self) -> Any: return await self._execute( endpoint="/network_data.xml", root_element="network_data" ) async def close_session(self) -> None: """Close open client session.""" if self._session and self._close_session: await self._session.close() async def __aenter__(self) -> "SolarEnergyInverter": """Async enter.""" return self async def __aexit__(self, *exc_info) -> None: """Async exit.""" await self.close_session()
StarcoderdataPython
3384196
#!/usr/bin/env python3 import fileinput mem = [int(n.strip()) for n in next(fileinput.input()).split()] size = len(mem) states = set() states.add('.'.join(str(n) for n in mem)) part2 = None steps = 0 while True: i = mem.index(max(mem)) x = mem[i] mem[i] = 0 while x > 0: i += 1 mem[i % size] += 1 x -= 1 steps += 1 statehash = '.'.join(str(n) for n in mem) if statehash in states: if not part2: print("Part 1:", steps) part2 = statehash part1_steps = steps else: if statehash == part2: print("Part 2:", steps - part1_steps) break else: states.add(statehash)
StarcoderdataPython
1725477
import unittest import utils # O(n^2) time. O(n) space. Space-optimized DP. class Solution: def minimumTotal(self, triangle): """ :type triangle: List[List[int]] :rtype: int """ n = len(triangle) # dp[i][j]: the minimum path sum to reach triangle[i][j] dp = [0] * n for i in range(n): prev = 0x7FFFFFFF for j in range(i + 1): curr = triangle[i][j] if i >= 1: if j < i: prev = min(prev, dp[j]) curr += prev prev = dp[j] dp[j] = curr return min(dp) class Test(unittest.TestCase): def test(self): cases = utils.load_test_json(__file__).test_cases for case in cases: args = str(case.args) actual = Solution().minimumTotal(**case.args.__dict__) self.assertEqual(case.expected, actual, msg=args) if __name__ == '__main__': unittest.main()
StarcoderdataPython
3296006
<filename>cfgrib/bindings.py # # Copyright 2017-2019 European Centre for Medium-Range Weather Forecasts (ECMWF). # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Authors: # <NAME> - B-Open - https://bopen.eu # import functools import logging import pkgutil import typing as T # noqa import cffi LOG = logging.getLogger(__name__) ffi = cffi.FFI() ffi.cdef( pkgutil.get_data(__name__, 'grib_api.h').decode('utf-8') + pkgutil.get_data(__name__, 'eccodes.h').decode('utf-8') ) class RaiseOnAttributeAccess(object): def __init__(self, exc, message): self.message = message self.exc = exc def __getattr__(self, attr): raise RuntimeError(self.message) from self.exc for libname in ['eccodes', 'libeccodes.so', 'libeccodes']: try: lib = ffi.dlopen(libname) LOG.info("ecCodes library found using name '%s'.", libname) break except OSError as exc: # lazy exception lib = RaiseOnAttributeAccess(exc, 'ecCodes library not found on the system.') LOG.info("ecCodes library not found using name '%s'.", libname) # default encoding for ecCodes strings ENC = 'ascii' # # from gribapi.py # CODES_PRODUCT_ANY = 0 """ Generic product kind """ CODES_PRODUCT_GRIB = 1 """ GRIB product kind """ CODES_PRODUCT_BUFR = 2 """ BUFR product kind """ CODES_PRODUCT_METAR = 3 """ METAR product kind """ CODES_PRODUCT_GTS = 4 """ GTS product kind """ CODES_PRODUCT_TAF = 5 """ TAF product kind """ # Constants for 'missing' GRIB_MISSING_DOUBLE = -1e100 GRIB_MISSING_LONG = 2147483647 CODES_MISSING_DOUBLE = GRIB_MISSING_DOUBLE CODES_MISSING_LONG = GRIB_MISSING_LONG # # Helper values to discriminate key types # CODES_TYPE_UNDEFINED = lib.GRIB_TYPE_UNDEFINED CODES_TYPE_LONG = lib.GRIB_TYPE_LONG CODES_TYPE_DOUBLE = lib.GRIB_TYPE_DOUBLE CODES_TYPE_STRING = lib.GRIB_TYPE_STRING CODES_TYPE_BYTES = lib.GRIB_TYPE_BYTES CODES_TYPE_SECTION = lib.GRIB_TYPE_SECTION CODES_TYPE_LABEL = lib.GRIB_TYPE_LABEL CODES_TYPE_MISSING = lib.GRIB_TYPE_MISSING KEYTYPES = {1: int, 2: float, 3: str} CODES_KEYS_ITERATOR_ALL_KEYS = 0 CODES_KEYS_ITERATOR_SKIP_READ_ONLY = 1 << 0 CODES_KEYS_ITERATOR_SKIP_OPTIONAL = 1 << 1 CODES_KEYS_ITERATOR_SKIP_EDITION_SPECIFIC = 1 << 2 CODES_KEYS_ITERATOR_SKIP_CODED = 1 << 3 CODES_KEYS_ITERATOR_SKIP_COMPUTED = 1 << 4 CODES_KEYS_ITERATOR_SKIP_DUPLICATES = 1 << 5 CODES_KEYS_ITERATOR_SKIP_FUNCTION = 1 << 6 CODES_KEYS_ITERATOR_DUMP_ONLY = 1 << 7 # # Helper functions for error reporting # def grib_get_error_message(code): # type: (int) -> str message = lib.grib_get_error_message(code) return ffi.string(message).decode(ENC) class GribInternalError(Exception): def __init__(self, code, message=None, *args): self.code = code self.eccode_message = grib_get_error_message(code) if message is None: message = '%s (%s).' % (self.eccode_message, code) super(GribInternalError, self).__init__(message, code, *args) class KeyValueNotFoundError(GribInternalError): """Key/value not found.""" class ReadOnlyError(GribInternalError): """Value is read only.""" class FileNotFoundError(GribInternalError): """File not found.""" ERROR_MAP = {-18: ReadOnlyError, -10: KeyValueNotFoundError, -7: FileNotFoundError} def check_last(func): @functools.wraps(func) def wrapper(*args): code = ffi.new('int *') args += (code,) retval = func(*args) if code[0] != lib.GRIB_SUCCESS: if code[0] in ERROR_MAP: raise ERROR_MAP[code[0]](code[0]) else: raise GribInternalError(code[0]) return retval return wrapper def check_return(func): @functools.wraps(func) def wrapper(*args): code = func(*args) if code != lib.GRIB_SUCCESS: if code in ERROR_MAP: raise ERROR_MAP[code](code) else: raise GribInternalError(code) return wrapper # # CFFI reimplementation of gribapi.py functions with codes names # def codes_grib_new_from_file(fileobj, product_kind=CODES_PRODUCT_GRIB, context=None): if context is None: context = ffi.NULL try: retval = check_last(lib.codes_handle_new_from_file)(context, fileobj, product_kind) if retval == ffi.NULL: raise EOFError("End of file: %r" % fileobj) else: return retval except GribInternalError as ex: if ex.code == lib.GRIB_END_OF_FILE: raise EOFError("End of file: %r" % fileobj) raise codes_new_from_file = codes_grib_new_from_file def codes_clone(handle): # type: (cffi.FFI.CData) -> cffi.FFI.CData cloned_handle = lib.codes_handle_clone(handle) if cloned_handle is ffi.NULL: raise GribInternalError(lib.GRIB_NULL_POINTER) return cloned_handle codes_release = lib.codes_handle_delete _codes_get_size = check_return(lib.codes_get_size) def codes_get_size(handle, key): # type: (cffi.FFI.CData, str) -> int """ Get the number of coded value from a key. If several keys of the same name are present, the total sum is returned. :param bytes key: the keyword to get the size of :rtype: int """ size = ffi.new('size_t *') _codes_get_size(handle, key.encode(ENC), size) return size[0] _codes_get_length = check_return(lib.codes_get_length) def codes_get_string_length(handle, key): # type: (cffi.FFI.CData, str) -> int """ Get the length of the string representation of the key. If several keys of the same name are present, the maximum length is returned. :param bytes key: the keyword to get the string representation size of. :rtype: int """ size = ffi.new('size_t *') _codes_get_length(handle, key.encode(ENC), size) return size[0] _codes_get_bytes = check_return(lib.codes_get_bytes) def codes_get_bytes_array(handle, key, size): # type: (cffi.FFI.CData, str, int) -> T.List[int] """ Get unsigned chars array values from a key. :param bytes key: the keyword whose value(s) are to be extracted :rtype: List(int) """ values = ffi.new('unsigned char[]', size) size_p = ffi.new('size_t *', size) _codes_get_bytes(handle, key.encode(ENC), values, size_p) return list(values) _codes_get_long_array = check_return(lib.codes_get_long_array) def codes_get_long_array(handle, key, size): # type: (cffi.FFI.CData, str, int) -> T.List[int] """ Get long array values from a key. :param bytes key: the keyword whose value(s) are to be extracted :rtype: List(int) """ values = ffi.new('long[]', size) size_p = ffi.new('size_t *', size) _codes_get_long_array(handle, key.encode(ENC), values, size_p) return list(values) _codes_get_double_array = check_return(lib.codes_get_double_array) def codes_get_double_array(handle, key, size): # type: (cffi.FFI.CData, str, int) -> T.List[float] """ Get double array values from a key. :param bytes key: the keyword whose value(s) are to be extracted :rtype: T.List(float) """ values = ffi.new('double[]', size) size_p = ffi.new('size_t *', size) _codes_get_double_array(handle, key.encode(ENC), values, size_p) return list(values) _codes_get_string_array = check_return(lib.codes_get_string_array) def codes_get_string_array(handle, key, size, length=None): # type: (cffi.FFI.CData, str, int, int) -> T.List[bytes] """ Get string array values from a key. :param bytes key: the keyword whose value(s) are to be extracted :rtype: T.List[bytes] """ if length is None: length = codes_get_string_length(handle, key) values_keepalive = [ffi.new('char[]', length) for _ in range(size)] values = ffi.new('char*[]', values_keepalive) size_p = ffi.new('size_t *', size) _codes_get_string_array(handle, key.encode(ENC), values, size_p) return [ffi.string(values[i]).decode(ENC) for i in range(size_p[0])] def codes_get_long(handle, key): # type: (cffi.FFI.CData, str) -> int value = ffi.new('long *') _codes_get_long = check_return(lib.codes_get_long) _codes_get_long(handle, key.encode(ENC), value) return value[0] def codes_get_double(handle, key): # type: (cffi.FFI.CData, str) -> int value = ffi.new('double *') _codes_get_long = check_return(lib.codes_get_double) _codes_get_long(handle, key.encode(ENC), value) return value[0] def codes_get_string(handle, key, length=None): # type: (cffi.FFI.CData, str, int) -> str """ Get string element from a key. It may or may not fail in case there are more than one key in a message. Outputs the last element. :param bytes key: the keyword to select the value of :param int length: (optional) length of the string :rtype: bytes """ if length is None: length = codes_get_string_length(handle, key) values = ffi.new('char[]', length) length_p = ffi.new('size_t *', length) _codes_get_string = check_return(lib.codes_get_string) _codes_get_string(handle, key.encode(ENC), values, length_p) return ffi.string(values, length_p[0]).decode(ENC) _codes_get_native_type = check_return(lib.codes_get_native_type) def codes_get_native_type(handle, key): # type: (cffi.FFI.CData, str) -> int grib_type = ffi.new('int *') _codes_get_native_type(handle, key.encode(ENC), grib_type) return KEYTYPES.get(grib_type[0], grib_type[0]) def codes_get_array(handle, key, key_type=None, size=None, length=None, log=LOG): # type: (cffi.FFI.CData, str, int, int, int, logging.Logger) -> T.Any if key_type is None: key_type = codes_get_native_type(handle, key) if size is None: size = codes_get_size(handle, key) if key_type == int: return codes_get_long_array(handle, key, size) elif key_type == float: return codes_get_double_array(handle, key, size) elif key_type == str: return codes_get_string_array(handle, key, size, length=length) elif key_type == CODES_TYPE_BYTES: return codes_get_bytes_array(handle, key, size) else: log.warning("Unknown GRIB key type: %r", key_type) def codes_get(handle, key, key_type=None, length=None, log=LOG): # type: (cffi.FFI.CData, str, int, int, logging.Logger) -> T.Any if key_type is None: key_type = codes_get_native_type(handle, key) if key_type == int: return codes_get_long(handle, key) elif key_type == float: return codes_get_double(handle, key) elif key_type == str: return codes_get_string(handle, key, length=length) else: log.warning("Unknown GRIB key type: %r", key_type) def codes_keys_iterator_new(handle, flags=CODES_KEYS_ITERATOR_ALL_KEYS, namespace=None): # type: (cffi.FFI.CData, int, str) -> cffi.FFI.CData if namespace is None: bnamespace = ffi.NULL else: bnamespace = namespace.encode(ENC) codes_keys_iterator_new = lib.codes_keys_iterator_new return codes_keys_iterator_new(handle, flags, bnamespace) def codes_keys_iterator_next(iterator_id): return lib.codes_keys_iterator_next(iterator_id) def codes_keys_iterator_get_name(iterator): ret = lib.codes_keys_iterator_get_name(iterator) return ffi.string(ret).decode(ENC) def codes_keys_iterator_delete(iterator_id): codes_keys_iterator_delete = check_return(lib.codes_keys_iterator_delete) codes_keys_iterator_delete(iterator_id) def codes_get_api_version(): """ Get the API version. Returns the version of the API as a string in the format "major.minor.revision". """ ver = lib.codes_get_api_version() patch = ver % 100 ver = ver // 100 minor = ver % 100 major = ver // 100 return "%d.%d.%d" % (major, minor, patch) def portable_handle_new_from_samples(samplename, product_kind): # # re-implement codes_grib_handle_new_from_samples in a portable way. # imports are here not to pollute the head of the file with (hopefully!) temporary stuff # import os.path import platform handle = ffi.NULL if platform.platform().startswith('Windows'): samples_folder = ffi.string(lib.codes_samples_path(ffi.NULL)).decode('utf-8') sample_path = os.path.join(samples_folder, samplename + '.tmpl') try: with open(sample_path, 'rb') as file: handle = codes_grib_new_from_file(file, product_kind) except Exception: logging.exception("creating empty message from sample failed") return handle def codes_new_from_samples(samplename, product_kind=CODES_PRODUCT_GRIB): # type: (str, int) -> cffi.FFI.CData # work around an ecCodes bug on Windows, hopefully this will go away soon handle = portable_handle_new_from_samples(samplename, product_kind) if handle != ffi.NULL: return handle # end of work-around if product_kind == CODES_PRODUCT_GRIB: handle = lib.codes_grib_handle_new_from_samples(ffi.NULL, samplename.encode(ENC)) elif product_kind == CODES_PRODUCT_BUFR: handle = lib.codes_bufr_handle_new_from_samples(ffi.NULL, samplename.encode(ENC)) else: raise NotImplementedError("product kind not supported: %r" % product_kind) if handle == ffi.NULL: raise ValueError("sample not found: %r" % samplename) return handle def codes_set_long(handle, key, value): # type: (cffi.FFI.CData, str, int) -> None codes_set_long = check_return(lib.codes_set_long) codes_set_long(handle, key.encode(ENC), value) def codes_set_double(handle, key, value): # type: (cffi.FFI.CData, str, float) -> None codes_set_double = check_return(lib.codes_set_double) codes_set_double(handle, key.encode(ENC), value) def codes_set_string(handle, key, value): # type: (cffi.FFI.CData, str, str) -> None size = ffi.new('size_t *', len(value)) codes_set_string = check_return(lib.codes_set_string) codes_set_string(handle, key.encode(ENC), value.encode(ENC), size) def codes_set(handle, key, value): """""" if isinstance(value, int): codes_set_long(handle, key, value) elif isinstance(value, float): codes_set_double(handle, key, value) elif isinstance(value, str): codes_set_string(handle, key, value) else: raise TypeError("Unsupported type %r" % type(value)) def codes_set_double_array(handle, key, values): # type: (cffi.FFI.CData, str, T.List[float]) -> None size = len(values) c_values = ffi.new("double []", values) codes_set_double_array = check_return(lib.codes_set_double_array) codes_set_double_array(handle, key.encode(ENC), c_values, size) def codes_set_long_array(handle, key, values): # type: (cffi.FFI.CData, str, T.List[int]) -> None size = len(values) c_values = ffi.new("long []", values) codes_set_long_array = check_return(lib.codes_set_long_array) codes_set_long_array(handle, key.encode(ENC), c_values, size) def codes_set_array(handle, key, values): # type: (cffi.FFI.CData, str, T.List[T.Any]) -> None if len(values) > 0: if isinstance(values[0], float): codes_set_double_array(handle, key, values) elif isinstance(values[0], int): codes_set_long_array(handle, key, values) else: raise TypeError("Unsupported value type: %r" % type(values[0])) else: raise ValueError("Cannot set an empty list.") def codes_grib_multi_support_on(context=None): if context is None: context = ffi.NULL lib.codes_grib_multi_support_on(context) def codes_grib_multi_support_off(context=None): if context is None: context = ffi.NULL lib.codes_grib_multi_support_off(context) def codes_write(handle, outfile): # type: (cffi.FFI.CData, T.BinaryIO) -> None """ Write a coded message to a file. If the file does not exist, it is created. :param str path: (optional) the path to the GRIB file; defaults to the one of the open index. """ mess = ffi.new('const void **') mess_len = ffi.new('size_t*') codes_get_message = check_return(lib.codes_get_message) codes_get_message(handle, mess, mess_len) message = ffi.buffer(mess[0], size=mess_len[0]) outfile.write(message)
StarcoderdataPython
3338518
import cv2 import numpy as np import re def return_seconds(line): timeValues = line[:line.index("-->")].strip().replace(",",":").split(":") timeValues = list(map(int, timeValues)) hours_to_seconds = timeValues[0] * 3600 minutes_to_seconds = timeValues[1] * 60 seconds = timeValues[2] milliseconds_to_seconds = round(timeValues[3]/1000, 2) total_seconds = hours_to_seconds + minutes_to_seconds + seconds + milliseconds_to_seconds return int(total_seconds) key = input("ENTER THE KEY PHRASE :") file = open('tcs.srt','r') previousLine = "" for line in file.readlines(): if key in line: t = return_seconds(previousLine) break; previousLine = line cap = cv2.VideoCapture('tcs.mp4') fps = cap.get(cv2.CAP_PROP_FPS) count = 0 success = True while success: success,frame = cap.read() count+=1 ts = count/fps if t == ts: print("time stamp of current frame:",count/fps) print("this will take some time.......") print("Press Q to quit") f_count = count cap.set(cv2.CAP_PROP_POS_FRAMES, f_count) # Check if camera opened successfully if (cap.isOpened()== False): print("Error opening video file") # Read until video is completed while(cap.isOpened()): # Capture frame-by-frame ret, frame = cap.read() if ret == True: # Display the resulting frame cv2.imshow('Frame', frame) # Press Q on keyboard to exit if cv2.waitKey(25) & 0xFF == ord('q'): break # Break the loop else: break # When everything done, release # the video capture object cap.release() # Closes all the frames cv2.destroyAllWindows()
StarcoderdataPython
1628138
import sympy maxLimit = 10000 for n in range(maxLimit+1): print "true" if sympy.isprime(n) else "false"
StarcoderdataPython
3221608
<gh_stars>1-10 import hashlib import random import requests import sys import time import thread import json import binascii import math import threading from bitarray import * from multiprocessing.pool import ThreadPool from ast import literal_eval from UST import * from user import * from conversation import * from server import * from client_crypto import * # Master URL MASTER_URL = 'http://localhost:5000' # Errors ERROR = "Error" # Constants SUCCESS = "Success" FAILED = "Failed" TRUE = "True" FALSE = "False" # Routes SERVER = 'server' SUBSCRIBE = 'subscribe' PUSH = 'push' PULL = 'pull' UPDATE_USER_TABLE = 'update_user_table' UPDATE_NEW_CONVERSATION_TABLE = 'update_new_conversations_table' UPDATE_SERVER_VIEW = 'update_server_view' INITIATE = 'initiate' DELETE = 'delete' RESERVE = 'reserve' CONNECT_TO_SLAVE = 'connect_to_slave' # Wait Times NEW_CLIENT_WAIT = 3.000 # Wait 3 seconds NEW_CONVERSATION_WAIT = 3.000 # Wait 3 second NEW_MESSAGE_WAIT = 1.000 # Wait 1 second SERVER_UPDATE_WAIT = 5.000 # Wait 5 seconds SLAVE_RETRY_TIME = 0.500 # Wait 0.5 second SLAVE_WAIT_TIME = 0.500 # Wait 0.5 second class Client: def main(self,username): self.server_table = {} self.shard_table = {} self.ust_table = {} self.user_table = {} self.conversations = {} self.conversation_lock = threading.Lock() self.rsa = None self.rsa_sign = None self.username = username self.connect_server() self.subscribe() # Upon successfully suscribing begin updates self.updates() self.client_input() def connect_server(self): r = send_request(MASTER_URL, SERVER, {}) n = r['server_pk_n'] e = r['server_pk_e'] self.server_table = {MASTER_URL: Server(MASTER_URL, n, e)} def updates(self): try: thread.start_new_thread(self.user_update, ()) thread.start_new_thread(self.conversation_update, ()) thread.start_new_thread(self.message_update, ()) thread.start_new_thread(self.server_update, ()) except: print "ERRROR: unable to start client threads" print "FATAL: client unable to update" sys.exit(0) # -------------------------[ INPUT ]------------------------- # def client_input(self): while True: cmd = raw_input("[Please enter your next command]\n>> ") self.handle_input(cmd) def handle_input(self, cmd): parts = cmd.split(' ', 1) cmd_type = parts[0] if cmd_type == "1" or cmd_type == "ut": self.print_user_table() elif cmd_type == "2" or cmd_type == "ct": self.print_conversation_table() elif cmd_type == "3" or cmd_type == "c": if len(parts) < 2: return "ERROR: please enter a valid command" cmd_args = parts[1] self.init_conversation(cmd_args) elif cmd_type == "4" or cmd_type == "mu": if len(parts) < 2: return "ERROR: please enter a valid command" cmd_args = parts[1] self.print_conversation(cmd_args) elif cmd_type == "5" or cmd_type == "m": if len(parts) < 2: return "ERROR: please enter a valid command" cmd_args = parts[1] split = cmd_args.split(' ', 1) if len(split) < 2: return "ERROR: please enter a valid command" username = split[0] message = split[1] self.send_message(username, message) elif cmd_type == "H" or cmd_type == "h" or cmd_type == "Help" or cmd_type == "help": print " 1: [1,ut] - Print Local User Table" print " 2: [2,ct] - Print Local Conversation Table" print " 3: [3,c] <username> - Start Conversation with 'username'" print " 4: [4,mu] <username> - Print Conversation with 'username'" print " 5: [5,m] <username> <message> - Send 'message' to 'username'" print " H: [H,h,Help,help] - Print this help message" # -------------------------[ PRINTING ]------------------------- # def print_user_table(self): print "=== Local User Table ===" usernames = sorted(self.user_table.keys()) for username in usernames: print " %-24s" % username print "\n", def print_conversation_table(self): print "=== Local Conversation Table ===" recipients = sorted(self.conversations.keys()) for recipient in recipients: print " %-24s" % recipients print "\n", def print_conversation(self,username): if username not in self.user_table: print "ERROR: user '" + username + "' does not exist\n>> ", if username not in self.conversations: print "ERROR: you have not started a conversation with " + username conversation = self.conversations[username] print "\n" + conversation.get_conversation() + "\n>> ", # -------------------------[ SUBSCRIBE ]------------------------- # def gen_keys(self): self.rsa = RSA_gen(4096) self.n, self.e, self.d = RSA_keys(self.rsa) self.rsa_sign = RSA_gen(1024) self.n_sign, self.e_sign, self.d_sign = RSA_keys(self.rsa_sign) def subscribe(self): print "Subscribing please wait..." self.gen_keys() self.ust_table[MASTER_URL] = UST(self.server_table[MASTER_URL]) ust = self.ust_table[MASTER_URL] ust.lock.acquire() ust.prepare() args = {"blinded_nonce" : ust.blinded_nonce, "client_username" : self.username, "client_pk_n" : self.n, "client_pk_e" : self.e, "client_sign_pk_n" : self.n_sign, "client_sign_pk_e" : self.e_sign} r = send_request(MASTER_URL, SUBSCRIBE, args) if r == ERROR: print "ERROR: could not subscribe" sys.exit(0) ust.receive(r['blinded_sign']) ust.lock.release() user = r['user'] if user['client_pk_n'] == self.n and user['client_pk_e'] == self.e \ and user['client_sign_pk_n'] == self.n_sign \ and user['client_sign_pk_e'] == self.e_sign: pass else: print "Username is taken, please try again" sys.exit(0) self.user_id = user['client_user_id'] self.user_table_ptr = 0 self.conversations_table_ptr = 0 print "Hello " + self.username + ", welcome to Traceless!" return # -------------------------[ SERVER UPDATE ]------------------------- # def server_update(self): while True: ust = self.ust_table[MASTER_URL] ust.lock.acquire() ust.prepare() args = {"nonce" : ust.nonce, "signature" : ust.signature, "blinded_nonce" : ust.blinded_nonce} r = send_request(MASTER_URL, UPDATE_SERVER_VIEW, args) ust.receive(r['blinded_sign']) ust.lock.release() shards = r['shards'] for shard_name, server_data in shards.items(): shard_range = literal_eval(shard_name) if server_data == None: # Shard no longer maps to a server if shard_range in self.shard_table: del self.shard_table[shard_range] continue server = Server(server_data['url'], server_data['server_pk_n'], server_data['server_pk_e']) if shard_range not in self.shard_table: # Shard is new self.shard_table[shard_range] = server self.add_new_server(server) elif self.shard_table[shard_range].equals(server) == False: # New server is resposible for this shard self.shard_table[shard_range] = server self.add_new_server(server) time.sleep(SERVER_UPDATE_WAIT) return def add_new_server(self, server): # Check if the server table does not have identifcal server under url if server.url in self.server_table: if self.server_table[server.url].equals(server): return self.server_table[server.url] = server slave_ust = UST(self.server_table[server.url]) self.ust_table[server.url] = slave_ust slave_ust.lock.acquire() slave_ust.prepare() ust = self.ust_table[MASTER_URL] ust.lock.acquire() ust.prepare() args = {"nonce" : ust.nonce, "signature" : ust.signature, "blinded_nonce" : ust.blinded_nonce, "blinded_slave_nonce" : slave_ust.blinded_nonce, "slave_url" : server.url} r = send_request(MASTER_URL, CONNECT_TO_SLAVE, args) ust.receive(r['blinded_sign']) ust.lock.release() slave_ust.receive(r['blinded_slave_sign']) slave_ust.lock.release() # -------------------------[ USER UPDATE ]------------------------- # def user_update(self): while True: ust = self.ust_table[MASTER_URL] ust.lock.acquire() ust.prepare() args = {"nonce" : ust.nonce, "signature" : ust.signature, "blinded_nonce" : ust.blinded_nonce, "client_user_table_ptr" : self.user_table_ptr} r = send_request(MASTER_URL, UPDATE_USER_TABLE, args) ust.receive(r['blinded_sign']) ust.lock.release() new_users = r['new_users'] for new_user in new_users: username = new_user['client_username'] if username not in self.user_table: user_id = new_user['client_user_id'] pk_n, pk_e, pk_sign_n, pk_sign_e = (new_user['client_pk_n'], new_user['client_pk_e'], new_user['client_sign_pk_n'], new_user['client_sign_pk_e']) user = User(username,user_id,pk_n,pk_e,pk_sign_n,pk_sign_e) self.user_table[username] = user self.user_table_ptr = user_id time.sleep(NEW_CLIENT_WAIT) return # -------------------------[ RESERVATIONS ]------------------------- # def reserve_slot(self): while True: # Consider who to resever from, master is default ust = self.ust_table[MASTER_URL] ust.lock.acquire() ust.prepare() slot_id = random.getrandbits(128) delete_nonce = (slot_id << 128) + random.getrandbits(128) ust_delete = UST(self.server_table[MASTER_URL]) ust_delete.prepare(delete_nonce) args = {"nonce" : ust.nonce, "signature" : ust.signature, "blinded_nonce" : ust.blinded_nonce, "slot_id" : slot_id, "blinded_deletion_nonce" : ust_delete.blinded_nonce} r = send_request(MASTER_URL, RESERVE, args) ust.receive(r['blinded_sign']) ust.lock.release() if r['success'] == True: ust_delete.receive(r['blinded_deletion_sign']) sig = ust_delete.signature return slot_id, delete_nonce, sig def reserve_slot_forced(self): slot_id = None while True: slot_id = random.getrandbits(128) if self.get_shard_from_slot(slot_id)[0]: break return slot_id, None, None # -------------------------[ SHARD HELPERS ]------------------------- # def get_shard_from_slot(self, slot_id): for shard in self.shard_table: if self.slot_in_shard(slot_id, shard): return True, shard return False, None def slot_in_shard(self, slot_id, shard): if shard[0] <= slot_id < shard[1]: return True return False def shards_available(self): if len(self.shard_table) == 0: return False return True def get_slave_from_slot(self, slot_id): ok, shard_range = self.get_shard_from_slot(slot_id) if not ok: return False shard = self.shard_table[shard_range] return shard.url # -------------------------[ CONVERSATIONS ]------------------------- # def init_conversation(self, recipient): if recipient == self.username: print "ERROR: Please enter a username that is not your own" return if recipient not in self.user_table: print "ERROR: Please enter a username that is available" return # Reserve Read/Write slot read_slot_id, read_nonce, read_slot_sig = self.reserve_slot_forced() write_slot_id, write_nonce, write_slot_sig = self.reserve_slot_forced() x = bin(int(binascii.hexlify(self.username), 16)) my_username = x.ljust(256) y = bin(int(binascii.hexlify(recipient), 16)) recipient_username = y.ljust(256) P = (int(my_username, 2) << 512) + \ (int(recipient_username,2) << 256) + \ (read_slot_id << 128) + \ write_slot_id sign = PKCS1_sign(str(P), self.rsa_sign) rsa_recipient = RSA_gen_user(self.user_table[recipient]) sign_enc = RSA_encrypt(sign, rsa_recipient) P_enc = RSA_encrypt(str(P), rsa_recipient) enc_M = sign_enc + "*****" + P_enc ust = self.ust_table[MASTER_URL] ust.lock.acquire() ust.prepare() args = {"nonce" : ust.nonce, "signature" : ust.signature, "blinded_nonce" : ust.blinded_nonce, "message" : enc_M} r = send_request(MASTER_URL, INITIATE, args) ust.receive(r['blinded_sign']) ust.lock.release() conversation_obj = Conversation(self.user_table[self.username], self.user_table[recipient], read_slot_id, write_slot_id) self.conversation_lock.acquire() self.conversations[recipient] = conversation_obj self.conversation_lock.release() return def conversation_update(self): while True: ust = self.ust_table[MASTER_URL] ust.lock.acquire() ust.prepare() args = {"nonce" : ust.nonce, "signature" : ust.signature, "blinded_nonce" : ust.blinded_nonce, "client_new_conversations_table_ptr" : self.conversations_table_ptr} r = send_request(MASTER_URL, UPDATE_NEW_CONVERSATION_TABLE, args) ust.receive(r['blinded_sign']) ust.lock.release() new_conversations = r['new_conversations'] for conversation in new_conversations: conversation_id = conversation['conversation_id'] enc_M = conversation['message'] self.conversations_table_ptr = conversation_id + 1 ciphertext = enc_M.split("*****") sign = RSA_decrypt(ciphertext[0], self.rsa) P = RSA_decrypt(ciphertext[1], self.rsa) try: b = bin(int(P))[2:] b2 = (768-len(b))*'0' + b sender = (''.join(chr(int(b2[i:i+8], 2)) for i in xrange(0, 256, 8))).replace('\x00','') recipient = (''.join(chr(int(b2[i:i+8], 2)) for i in xrange(256, 512, 8))).replace('\x00','') write_slot_id = int(b2[512:640],2) read_slot_id = int(b2[640:],2) if recipient != self.username: continue rsa_sign_sender = RSA_gen_user_sign(self.user_table[sender]) # TODO, fix verification, assume no spoofs atm # if PKCS1_verify(sign, str(P), rsa_sign_sender): # print "VERIFIED!", recipient conversation_obj = Conversation(self.user_table[self.username], self.user_table[sender], read_slot_id, write_slot_id) self.conversation_lock.acquire() self.conversations[sender] = conversation_obj self.conversation_lock.release() print "\nConversation started with: ", sender, "\n>> ", except: continue time.sleep(NEW_CONVERSATION_WAIT) return # -------------------------[ MESSAGES ]------------------------- # def send_message(self, username, text): #, slot_id, next_block, ND, ND_signed): if len(text) > 256: print "\nERROR: message too long\n>> ", return if username not in self.user_table: print "\nERROR: user " + username + " does not exist\n>> ", return if username not in self.conversations: print "\nERROR: please start conversation with " + username + " first\n>> ", return if not self.shards_available(): print "\nERROR: server cannot accept messages at this time\n>> ", return conversation = self.conversations[username] write_slot_id = conversation.write_slot_id new_write_slot_id, new_write_nonce, new_write_slot_sig = self.reserve_slot_forced() conversation.update_write_slot(new_write_slot_id) msg = text.ljust(128) x = bin(int(binascii.hexlify(msg), 16)) new_text = int(x,2) #P = (new_text << 2432) + (next_block << 2304) + (ND << 2048) + (ND_signed) P = (new_text << 128) + new_write_slot_id signature = PKCS1_sign(str(P), self.rsa_sign) recipient = self.user_table[username] rsa_recipient = RSA_gen_user(recipient) ciphertext = RSA_encrypt(signature, rsa_recipient) + \ "*****" + RSA_encrypt(str(P), rsa_recipient) slave_url = self.get_slave_from_slot(write_slot_id) if slave_url == False: print "\nERROR: server cannot accept messages at this time\n>> ", return if slave_url not in self.ust_table: print "\nERROR: please retry your message again\n>> ", return ust = self.ust_table[slave_url] ust.lock.acquire() ust.prepare() args = {"nonce" : ust.nonce, "signature" : ust.signature, "blinded_nonce" : ust.blinded_nonce, "slot_id" : write_slot_id, "message" : ciphertext} r = send_request(slave_url, PUSH, args) print r ust.receive(r['blinded_sign']) while r['success'] == False: # Failed request, retry if ust.lock.locked(): ust.lock.release() slave_url = self.get_slave_from_slot(write_slot_id) if slave_url not in self.ust_table: time.sleep(SLAVE_WAIT_TIME) continue ust = self.ust_table[slave_url] ust.lock.acquire() ust.prepare() args["nonce"] = ust.nonce args["signature"] = ust.signature args["blinded_nonce"] = ust.blinded_nonce r = send_request(slave_url, PUSH, args) print r ust.receive(r['blinded_sign']) time.sleep(SLAVE_RETRY_TIME) if ust.lock.locked(): ust.lock.release() conversation.add_write_text(text) print "\n" + conversation.get_conversation() + "\n>> ", return def message_update(self): while True: for sender, conversation in self.conversations.items(): read_slot_id = conversation.read_slot_id slave_url = self.get_slave_from_slot(read_slot_id) if slave_url == False or slave_url not in self.ust_table: continue ust = self.ust_table[slave_url] ust.lock.acquire() ust.prepare() args = {"nonce" : ust.nonce, "signature" : ust.signature, "blinded_nonce" : ust.blinded_nonce, "slot_id" : read_slot_id} r = send_request(slave_url, PULL, args) ust.receive(r['blinded_sign']) while r['success'] == False: # Failed request, retry if ust.lock.locked(): ust.lock.release() slave_url = self.get_slave_from_slot(read_slot_id) if slave_url not in self.ust_table: time.sleep(SLAVE_WAIT_TIME) continue ust = self.ust_table[slave_url] ust.lock.acquire() ust.prepare() args["nonce"] = ust.nonce args["signature"] = ust.signature args["blinded_nonce"] = ust.blinded_nonce r = send_request(slave_url, PULL, args) ust.receive(r['blinded_sign']) time.sleep(SLAVE_RETRY_TIME) if ust.lock.locked(): ust.lock.release() messages = r['messages'] for message in messages: try: ciphertext = message.split("*****") sign = RSA_decrypt(ciphertext[0], self.rsa) P = RSA_decrypt(ciphertext[1], self.rsa) b = bin(int(P))[2:] new_read_slot_id = int(b[-128:],2) rest = '0' + str(b[:-128]) # Yeah... don't worry about this... text = (''.join(chr(int(rest[i:i+8], 2)) for i in xrange(0, len(rest), 8))).strip() # Need to verify message sender # Can use just username conversation.update_read_slot(new_read_slot_id) conversation.add_read_text(text) print "\n" + conversation.get_conversation() + "\n>> ", except: continue time.sleep(NEW_MESSAGE_WAIT) return def send_request(server, route, args, retry=True): RETRY_TICKS = 100 # if 'nonce' in args: # print args['nonce'] % 10000, route, server headers = {'content-type': 'application/json'} data = json.dumps(args) url = server + "/" + route pool = ThreadPool(processes=1) response_handle = pool.apply_async(send, (url, headers, data)) # Handle Timeouts ticks = 0 while True: ok = response_handle.ready() if not ok: ticks += 1 if retry and ticks > RETRY_TICKS: pool.terminate() pool = ThreadPool(processes=1) response_handle = pool.apply_async(send, (url, headers, data)) ticks = 0 else: time.sleep(0.01) else: pool.terminate() pool = None break response = response_handle.get() if not (200 <= response.status_code < 300): raise Exception(response.text) return ERROR return json.loads(response.text) # ===== Do Not Change ====== # Simulate successful or dropped packet UNRELIABLE = False def send(url, headers, data): DROP_RATE = 0.01 if UNRELIABLE and random.random() < DROP_RATE: while True: # The world keeps spinning pass response = requests.post(url, headers=headers, data=data) return response # ====== Handle Boot ======= if len(sys.argv) < 2: print "ERROR: Please start client with an input username" sys.exit(0) client = Client() username_in = sys.argv[1] client.main(username_in)
StarcoderdataPython
135438
<gh_stars>0 class Solution: def getModifiedArray(self, length: int, updates: List[List[int]]) -> List[int]: arr = [0] * (length + 1) for s, e, i in updates: arr[s] += i arr[e+1] -= i for i in range(1, length): arr[i] += arr[i-1] return arr[:-1]
StarcoderdataPython