text
string
size
int64
token_count
int64
######################################################################## # This script contains all the data analysis functions # ######################################################################## from __future__ import division from pylab import * import scipy, scipy.stats import tables import os from tempfile import TemporaryFile def avalanches(activity, variable, value, Threshold = 'percent', Theta_percent = 15, Transient = 0, fullS = False, binsize = False): # Threhold indicates which kind of activity treshold are we using # If FALSE, Theta_percent percentile is used # if 'half', 'half' the mean activity is used # if '1std', mean activity minus one std is used a_duration_all = []; a_area_all = [] # Theta is by default given by 25 percentile # Theta MUST be a int if Threshold == 'percent': Theta = percentile(activity, Theta_percent) elif Threshold == 'half': Theta = int(activity.mean()/2.) elif Threshold == '1std': Theta = int(activity.mean()-activity.std()) else: Theta = Threshold for data_file in range(len(activity)): sys.stdout.write('\rcalculating avalanches %d%%' \ %(100*(data_file+1)/len(activity))), sys.stdout.flush() # make prettier if binsize is not False and binsize != 1: avalan_stable = activity[data_file] if binsize == 2: avalan_stable = avalan_stable[::2] + avalan_stable[1::2] avalan_stable /= 2. if binsize == 5: avalan_stable = avalan_stable[::5] + avalan_stable[1::5] +\ avalan_stable[2::5] + avalan_stable[3::5] +\ avalan_stable[4::5] avalan_stable /= 5. if binsize == 10: avalan_stable = avalan_stable[::10] + avalan_stable[1::10] +\ avalan_stable[2::10] + avalan_stable[3::10] +\ avalan_stable[4::10] + avalan_stable[5::10] +\ avalan_stable[6::10] + avalan_stable[7::10] +\ avalan_stable[8::10] + avalan_stable[9::10] avalan_stable /= 10. avalan_stable = np.floor(avalan_stable - Theta) else: # to avoid empty array error if len(activity.shape) > 1: avalan_stable = activity[data_file] - Theta else: avalan_stable = activity - Theta size, area = 0, 0 ### TODO: make it prettier - this should include only the avalanches ### before the transient transient_end = 0 new_range = len(avalan_stable) if Transient is not 0: for i in range(Transient, new_range): if avalan_stable[i] == 0: transient_end = i + 1 break new_range = transient_end #### for i in range(new_range): if avalan_stable[i] > 0: size += 1 if not fullS: area += int(avalan_stable[i]) else: area += int(activity[data_file][i]) elif size != 0: a_duration_all.append(size) a_area_all.append(area) size, area = 0, 0 # convert to np.array cause it is easier for the other functions a_duration_all = asarray(a_duration_all) a_area_all = asarray(a_area_all) print '...done' return a_duration_all, a_area_all ### distribution of the total activity # Return the average activity (as a array), and std def mean_activity(activity, variable, value): distribution = zeros((len(activity), activity.max()+1)) for data_file in range(len(activity)): # print the % in the terminal sys.stdout.write('\rcalculating activity %d%%' \ %(100*(data_file+1)/len(activity))), sys.stdout.flush() total_steps = activity[data_file].size for i in range(total_steps): distribution[data_file, activity[data_file, i]] += 1 distribution[data_file, :] /= distribution[data_file, :].sum() dist_mean = distribution.mean(0) dist_std = distribution.std(0) print '...done' return dist_mean, dist_std ### calculate the size average as a function of the duration # receives the non-sorted arrays with measures of size and duration # returns two non-sorted arrays containing the duration and average # avalanche size. def area_X_duration(a_dur, a_area): S_avg = [] T_avg = [] for i in range(len(a_dur)): duration = a_dur[i] if duration not in T_avg: T_avg.append(duration) S_avg.append(a_area[np.where(a_dur==duration)].mean()) T_avg=asarray(T_avg) S_avg=asarray(S_avg) return T_avg, S_avg
4,923
1,558
import eventlet eventlet.monkey_patch() import time from datetime import datetime, timedelta, timezone import pytz from email.utils import parsedate_tz import json from flask import Flask, request, render_template from threading import Thread from tweepy import OAuthHandler, API, Stream, Cursor from flask_socketio import ( SocketIO, emit, join_room, leave_room, close_room, rooms, disconnect, ) from darksky import forecast socketio = SocketIO() thread = None thread2 = None from edwin.tweets import StdOutListener def create_app(): app = Flask(__name__) app.config.from_object("config") app.config["SECRET_KEY"] = "secret!" with app.app_context(): socketio.init_app(app, async_mode="eventlet") CONSUMER_KEY = app.config["TWITTER_CONSUMER_KEY"] CONSUMER_SECRET = app.config["TWITTER_CONSUMER_SECRET"] ACCESS_TOKEN = app.config["TWITTER_ACCESS_TOKEN"] ACCESS_TOKEN_SECRET = app.config["TWITTER_ACCESS_TOKEN_SECRET"] TWITTER_SCREEN_NAME = app.config["TWITTER_SCREEN_NAME"] DARKSKY_KEY = app.config["DARKSKY_KEY"] # These config variables come from 'config.py' auth = OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET) auth.set_access_token(ACCESS_TOKEN, ACCESS_TOKEN_SECRET) api = API(auth, wait_on_rate_limit=True, wait_on_rate_limit_notify=True) ids = api.friends_ids(screen_name=TWITTER_SCREEN_NAME, stringify_ids="true") try: dc = forecast(DARKSKY_KEY, 38.9159, -77.0446) except: print("failed connection to darksky") @app.route("/", methods=["GET"]) def index(): global thread global thread2 if thread is None: thread = Thread(target=twitter_thread, daemon=True) thread.start() if thread2 is None: thread2 = Thread(target=darksky_thread, daemon=True) thread2.start() return render_template("index.html") def twitter_thread(): """connect to twitter sreaming API and send data to client""" stream = Stream(auth, listener) _follow = ["15736341", "1"] stream.filter(follow=ids, filter_level="low") def darksky_thread(): while True: try: dc.refresh(extend='daily') sunrise = convert_unix_ts(dc['daily']['data'][0]['sunriseTime']) sunset = convert_unix_ts(dc['daily']['data'][0]['sunsetTime']) # convert to int for a nice round whole number temperture temp = int(dc.temperature) except: print("break") sunrise = "_" sunset = "-" temp = "Connection Lost" socketio.emit( "darksky_channel", {"temp": temp, "sunrise": sunrise, "sunset": sunset}, namespace="/darksky_streaming", ) time.sleep(120) listener = StdOutListener() return app def convert_unix_ts(ts): ts= int(ts) return datetime.fromtimestamp(ts).strftime('%-I:%M')
3,404
1,032
from django import forms from django.contrib import admin from .models import Attendance, Diet, Participant, Troop from payment.admin import DiscountInline, PaymentInline class AttendanceInline(admin.TabularInline): model = Participant.attendance.through readonly_fields = ("participant",) can_delete = False def has_add_permission(self, request, obj=None): return False class AttendanceAdmin(admin.ModelAdmin): inlines = [ AttendanceInline, ] list_display = ( "date", "is_main", ) class DietInline(admin.TabularInline): model = Participant.diet.through readonly_fields = ("participant",) can_delete = False def has_add_permission(self, request, obj=None): return False class DietAdmin(admin.ModelAdmin): inlines = [ DietInline, ] class ParticipantAdmin(admin.ModelAdmin): inlines = [ DiscountInline, ] list_display = ( "troop", "first_name", "last_name", "birthday", "age_section", "is_leader", ) list_display_links = ( "first_name", "last_name", "birthday", ) def formfield_for_dbfield(self, db_field, **kwargs): formfield = super(ParticipantAdmin, self).formfield_for_dbfield( db_field, **kwargs ) if db_field.name == "comment": formfield.widget = forms.Textarea(attrs=formfield.widget.attrs) return formfield class ParticipantInline(admin.TabularInline): model = Participant fields = ( "first_name", "last_name", "birthday", ) readonly_fields = ( "first_name", "last_name", "birthday", ) can_delete = False show_change_link = True def has_add_permission(self, request, obj=None): return False class TroopAdmin(admin.ModelAdmin): inlines = [ ParticipantInline, PaymentInline, ] list_display = ( "number", "name", ) list_display_links = ("name",) admin.site.register(Attendance, AttendanceAdmin) admin.site.register(Diet, DietAdmin) admin.site.register(Participant, ParticipantAdmin) admin.site.register(Troop, TroopAdmin)
2,255
724
from conf import celery_settings from .app import app @app.on_after_configure.connect def setup_periodic_tasks(sender, **kwargs): pass
142
46
from flask import Flask from flask import render_template import plotly.express as px from plotly.offline import plot app = Flask("Datenvisualisierung") def data(): data = px.data.gapminder() data_ch = data[data.country == 'Switzerland'] return data_ch def viz(): data_ch = data() fig = px.bar( data_ch, x='year', y='pop', hover_data=['lifeExp', 'gdpPercap'], color='lifeExp', labels={ 'pop': 'Einwohner der Schweiz', 'year': 'Jahrzehnt' }, height=400 ) div = plot(fig, output_type="div") return div @app.route("/") def index(): div = viz() # return str([str(i) for i in data()]) return render_template('index.html', viz_div=div) if __name__ == '__main__': app.run(debug=True, port=5000)
829
295
# coding=utf-8 # *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** import warnings import pulumi import pulumi.runtime from typing import Any, Mapping, Optional, Sequence, Union, overload from . import _utilities __all__ = ['DatabaseConnectionPoolArgs', 'DatabaseConnectionPool'] @pulumi.input_type class DatabaseConnectionPoolArgs: def __init__(__self__, *, cluster_id: pulumi.Input[str], db_name: pulumi.Input[str], mode: pulumi.Input[str], size: pulumi.Input[int], user: pulumi.Input[str], name: Optional[pulumi.Input[str]] = None): """ The set of arguments for constructing a DatabaseConnectionPool resource. :param pulumi.Input[str] cluster_id: The ID of the source database cluster. Note: This must be a PostgreSQL cluster. :param pulumi.Input[str] db_name: The database for use with the connection pool. :param pulumi.Input[str] mode: The PGBouncer transaction mode for the connection pool. The allowed values are session, transaction, and statement. :param pulumi.Input[int] size: The desired size of the PGBouncer connection pool. :param pulumi.Input[str] user: The name of the database user for use with the connection pool. :param pulumi.Input[str] name: The name for the database connection pool. """ pulumi.set(__self__, "cluster_id", cluster_id) pulumi.set(__self__, "db_name", db_name) pulumi.set(__self__, "mode", mode) pulumi.set(__self__, "size", size) pulumi.set(__self__, "user", user) if name is not None: pulumi.set(__self__, "name", name) @property @pulumi.getter(name="clusterId") def cluster_id(self) -> pulumi.Input[str]: """ The ID of the source database cluster. Note: This must be a PostgreSQL cluster. """ return pulumi.get(self, "cluster_id") @cluster_id.setter def cluster_id(self, value: pulumi.Input[str]): pulumi.set(self, "cluster_id", value) @property @pulumi.getter(name="dbName") def db_name(self) -> pulumi.Input[str]: """ The database for use with the connection pool. """ return pulumi.get(self, "db_name") @db_name.setter def db_name(self, value: pulumi.Input[str]): pulumi.set(self, "db_name", value) @property @pulumi.getter def mode(self) -> pulumi.Input[str]: """ The PGBouncer transaction mode for the connection pool. The allowed values are session, transaction, and statement. """ return pulumi.get(self, "mode") @mode.setter def mode(self, value: pulumi.Input[str]): pulumi.set(self, "mode", value) @property @pulumi.getter def size(self) -> pulumi.Input[int]: """ The desired size of the PGBouncer connection pool. """ return pulumi.get(self, "size") @size.setter def size(self, value: pulumi.Input[int]): pulumi.set(self, "size", value) @property @pulumi.getter def user(self) -> pulumi.Input[str]: """ The name of the database user for use with the connection pool. """ return pulumi.get(self, "user") @user.setter def user(self, value: pulumi.Input[str]): pulumi.set(self, "user", value) @property @pulumi.getter def name(self) -> Optional[pulumi.Input[str]]: """ The name for the database connection pool. """ return pulumi.get(self, "name") @name.setter def name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "name", value) @pulumi.input_type class _DatabaseConnectionPoolState: def __init__(__self__, *, cluster_id: Optional[pulumi.Input[str]] = None, db_name: Optional[pulumi.Input[str]] = None, host: Optional[pulumi.Input[str]] = None, mode: Optional[pulumi.Input[str]] = None, name: Optional[pulumi.Input[str]] = None, password: Optional[pulumi.Input[str]] = None, port: Optional[pulumi.Input[int]] = None, private_host: Optional[pulumi.Input[str]] = None, private_uri: Optional[pulumi.Input[str]] = None, size: Optional[pulumi.Input[int]] = None, uri: Optional[pulumi.Input[str]] = None, user: Optional[pulumi.Input[str]] = None): """ Input properties used for looking up and filtering DatabaseConnectionPool resources. :param pulumi.Input[str] cluster_id: The ID of the source database cluster. Note: This must be a PostgreSQL cluster. :param pulumi.Input[str] db_name: The database for use with the connection pool. :param pulumi.Input[str] host: The hostname used to connect to the database connection pool. :param pulumi.Input[str] mode: The PGBouncer transaction mode for the connection pool. The allowed values are session, transaction, and statement. :param pulumi.Input[str] name: The name for the database connection pool. :param pulumi.Input[str] password: Password for the connection pool's user. :param pulumi.Input[int] port: Network port that the database connection pool is listening on. :param pulumi.Input[str] private_host: Same as `host`, but only accessible from resources within the account and in the same region. :param pulumi.Input[str] private_uri: Same as `uri`, but only accessible from resources within the account and in the same region. :param pulumi.Input[int] size: The desired size of the PGBouncer connection pool. :param pulumi.Input[str] uri: The full URI for connecting to the database connection pool. :param pulumi.Input[str] user: The name of the database user for use with the connection pool. """ if cluster_id is not None: pulumi.set(__self__, "cluster_id", cluster_id) if db_name is not None: pulumi.set(__self__, "db_name", db_name) if host is not None: pulumi.set(__self__, "host", host) if mode is not None: pulumi.set(__self__, "mode", mode) if name is not None: pulumi.set(__self__, "name", name) if password is not None: pulumi.set(__self__, "password", password) if port is not None: pulumi.set(__self__, "port", port) if private_host is not None: pulumi.set(__self__, "private_host", private_host) if private_uri is not None: pulumi.set(__self__, "private_uri", private_uri) if size is not None: pulumi.set(__self__, "size", size) if uri is not None: pulumi.set(__self__, "uri", uri) if user is not None: pulumi.set(__self__, "user", user) @property @pulumi.getter(name="clusterId") def cluster_id(self) -> Optional[pulumi.Input[str]]: """ The ID of the source database cluster. Note: This must be a PostgreSQL cluster. """ return pulumi.get(self, "cluster_id") @cluster_id.setter def cluster_id(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "cluster_id", value) @property @pulumi.getter(name="dbName") def db_name(self) -> Optional[pulumi.Input[str]]: """ The database for use with the connection pool. """ return pulumi.get(self, "db_name") @db_name.setter def db_name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "db_name", value) @property @pulumi.getter def host(self) -> Optional[pulumi.Input[str]]: """ The hostname used to connect to the database connection pool. """ return pulumi.get(self, "host") @host.setter def host(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "host", value) @property @pulumi.getter def mode(self) -> Optional[pulumi.Input[str]]: """ The PGBouncer transaction mode for the connection pool. The allowed values are session, transaction, and statement. """ return pulumi.get(self, "mode") @mode.setter def mode(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "mode", value) @property @pulumi.getter def name(self) -> Optional[pulumi.Input[str]]: """ The name for the database connection pool. """ return pulumi.get(self, "name") @name.setter def name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "name", value) @property @pulumi.getter def password(self) -> Optional[pulumi.Input[str]]: """ Password for the connection pool's user. """ return pulumi.get(self, "password") @password.setter def password(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "password", value) @property @pulumi.getter def port(self) -> Optional[pulumi.Input[int]]: """ Network port that the database connection pool is listening on. """ return pulumi.get(self, "port") @port.setter def port(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "port", value) @property @pulumi.getter(name="privateHost") def private_host(self) -> Optional[pulumi.Input[str]]: """ Same as `host`, but only accessible from resources within the account and in the same region. """ return pulumi.get(self, "private_host") @private_host.setter def private_host(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "private_host", value) @property @pulumi.getter(name="privateUri") def private_uri(self) -> Optional[pulumi.Input[str]]: """ Same as `uri`, but only accessible from resources within the account and in the same region. """ return pulumi.get(self, "private_uri") @private_uri.setter def private_uri(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "private_uri", value) @property @pulumi.getter def size(self) -> Optional[pulumi.Input[int]]: """ The desired size of the PGBouncer connection pool. """ return pulumi.get(self, "size") @size.setter def size(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "size", value) @property @pulumi.getter def uri(self) -> Optional[pulumi.Input[str]]: """ The full URI for connecting to the database connection pool. """ return pulumi.get(self, "uri") @uri.setter def uri(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "uri", value) @property @pulumi.getter def user(self) -> Optional[pulumi.Input[str]]: """ The name of the database user for use with the connection pool. """ return pulumi.get(self, "user") @user.setter def user(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "user", value) class DatabaseConnectionPool(pulumi.CustomResource): @overload def __init__(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions] = None, cluster_id: Optional[pulumi.Input[str]] = None, db_name: Optional[pulumi.Input[str]] = None, mode: Optional[pulumi.Input[str]] = None, name: Optional[pulumi.Input[str]] = None, size: Optional[pulumi.Input[int]] = None, user: Optional[pulumi.Input[str]] = None, __props__=None): """ Provides a DigitalOcean database connection pool resource. ## Example Usage ### Create a new PostgreSQL database connection pool ```python import pulumi import pulumi_digitalocean as digitalocean postgres_example = digitalocean.DatabaseCluster("postgres-example", engine="pg", version="11", size="db-s-1vcpu-1gb", region="nyc1", node_count=1) pool_01 = digitalocean.DatabaseConnectionPool("pool-01", cluster_id=postgres_example.id, mode="transaction", size=20, db_name="defaultdb", user="doadmin") ``` ## Import Database connection pools can be imported using the `id` of the source database cluster and the `name` of the connection pool joined with a comma. For example ```sh $ pulumi import digitalocean:index/databaseConnectionPool:DatabaseConnectionPool pool-01 245bcfd0-7f31-4ce6-a2bc-475a116cca97,pool-01 ``` :param str resource_name: The name of the resource. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[str] cluster_id: The ID of the source database cluster. Note: This must be a PostgreSQL cluster. :param pulumi.Input[str] db_name: The database for use with the connection pool. :param pulumi.Input[str] mode: The PGBouncer transaction mode for the connection pool. The allowed values are session, transaction, and statement. :param pulumi.Input[str] name: The name for the database connection pool. :param pulumi.Input[int] size: The desired size of the PGBouncer connection pool. :param pulumi.Input[str] user: The name of the database user for use with the connection pool. """ ... @overload def __init__(__self__, resource_name: str, args: DatabaseConnectionPoolArgs, opts: Optional[pulumi.ResourceOptions] = None): """ Provides a DigitalOcean database connection pool resource. ## Example Usage ### Create a new PostgreSQL database connection pool ```python import pulumi import pulumi_digitalocean as digitalocean postgres_example = digitalocean.DatabaseCluster("postgres-example", engine="pg", version="11", size="db-s-1vcpu-1gb", region="nyc1", node_count=1) pool_01 = digitalocean.DatabaseConnectionPool("pool-01", cluster_id=postgres_example.id, mode="transaction", size=20, db_name="defaultdb", user="doadmin") ``` ## Import Database connection pools can be imported using the `id` of the source database cluster and the `name` of the connection pool joined with a comma. For example ```sh $ pulumi import digitalocean:index/databaseConnectionPool:DatabaseConnectionPool pool-01 245bcfd0-7f31-4ce6-a2bc-475a116cca97,pool-01 ``` :param str resource_name: The name of the resource. :param DatabaseConnectionPoolArgs args: The arguments to use to populate this resource's properties. :param pulumi.ResourceOptions opts: Options for the resource. """ ... def __init__(__self__, resource_name: str, *args, **kwargs): resource_args, opts = _utilities.get_resource_args_opts(DatabaseConnectionPoolArgs, pulumi.ResourceOptions, *args, **kwargs) if resource_args is not None: __self__._internal_init(resource_name, opts, **resource_args.__dict__) else: __self__._internal_init(resource_name, *args, **kwargs) def _internal_init(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions] = None, cluster_id: Optional[pulumi.Input[str]] = None, db_name: Optional[pulumi.Input[str]] = None, mode: Optional[pulumi.Input[str]] = None, name: Optional[pulumi.Input[str]] = None, size: Optional[pulumi.Input[int]] = None, user: Optional[pulumi.Input[str]] = None, __props__=None): if opts is None: opts = pulumi.ResourceOptions() if not isinstance(opts, pulumi.ResourceOptions): raise TypeError('Expected resource options to be a ResourceOptions instance') if opts.version is None: opts.version = _utilities.get_version() if opts.id is None: if __props__ is not None: raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource') __props__ = DatabaseConnectionPoolArgs.__new__(DatabaseConnectionPoolArgs) if cluster_id is None and not opts.urn: raise TypeError("Missing required property 'cluster_id'") __props__.__dict__["cluster_id"] = cluster_id if db_name is None and not opts.urn: raise TypeError("Missing required property 'db_name'") __props__.__dict__["db_name"] = db_name if mode is None and not opts.urn: raise TypeError("Missing required property 'mode'") __props__.__dict__["mode"] = mode __props__.__dict__["name"] = name if size is None and not opts.urn: raise TypeError("Missing required property 'size'") __props__.__dict__["size"] = size if user is None and not opts.urn: raise TypeError("Missing required property 'user'") __props__.__dict__["user"] = user __props__.__dict__["host"] = None __props__.__dict__["password"] = None __props__.__dict__["port"] = None __props__.__dict__["private_host"] = None __props__.__dict__["private_uri"] = None __props__.__dict__["uri"] = None super(DatabaseConnectionPool, __self__).__init__( 'digitalocean:index/databaseConnectionPool:DatabaseConnectionPool', resource_name, __props__, opts) @staticmethod def get(resource_name: str, id: pulumi.Input[str], opts: Optional[pulumi.ResourceOptions] = None, cluster_id: Optional[pulumi.Input[str]] = None, db_name: Optional[pulumi.Input[str]] = None, host: Optional[pulumi.Input[str]] = None, mode: Optional[pulumi.Input[str]] = None, name: Optional[pulumi.Input[str]] = None, password: Optional[pulumi.Input[str]] = None, port: Optional[pulumi.Input[int]] = None, private_host: Optional[pulumi.Input[str]] = None, private_uri: Optional[pulumi.Input[str]] = None, size: Optional[pulumi.Input[int]] = None, uri: Optional[pulumi.Input[str]] = None, user: Optional[pulumi.Input[str]] = None) -> 'DatabaseConnectionPool': """ Get an existing DatabaseConnectionPool resource's state with the given name, id, and optional extra properties used to qualify the lookup. :param str resource_name: The unique name of the resulting resource. :param pulumi.Input[str] id: The unique provider ID of the resource to lookup. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[str] cluster_id: The ID of the source database cluster. Note: This must be a PostgreSQL cluster. :param pulumi.Input[str] db_name: The database for use with the connection pool. :param pulumi.Input[str] host: The hostname used to connect to the database connection pool. :param pulumi.Input[str] mode: The PGBouncer transaction mode for the connection pool. The allowed values are session, transaction, and statement. :param pulumi.Input[str] name: The name for the database connection pool. :param pulumi.Input[str] password: Password for the connection pool's user. :param pulumi.Input[int] port: Network port that the database connection pool is listening on. :param pulumi.Input[str] private_host: Same as `host`, but only accessible from resources within the account and in the same region. :param pulumi.Input[str] private_uri: Same as `uri`, but only accessible from resources within the account and in the same region. :param pulumi.Input[int] size: The desired size of the PGBouncer connection pool. :param pulumi.Input[str] uri: The full URI for connecting to the database connection pool. :param pulumi.Input[str] user: The name of the database user for use with the connection pool. """ opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id)) __props__ = _DatabaseConnectionPoolState.__new__(_DatabaseConnectionPoolState) __props__.__dict__["cluster_id"] = cluster_id __props__.__dict__["db_name"] = db_name __props__.__dict__["host"] = host __props__.__dict__["mode"] = mode __props__.__dict__["name"] = name __props__.__dict__["password"] = password __props__.__dict__["port"] = port __props__.__dict__["private_host"] = private_host __props__.__dict__["private_uri"] = private_uri __props__.__dict__["size"] = size __props__.__dict__["uri"] = uri __props__.__dict__["user"] = user return DatabaseConnectionPool(resource_name, opts=opts, __props__=__props__) @property @pulumi.getter(name="clusterId") def cluster_id(self) -> pulumi.Output[str]: """ The ID of the source database cluster. Note: This must be a PostgreSQL cluster. """ return pulumi.get(self, "cluster_id") @property @pulumi.getter(name="dbName") def db_name(self) -> pulumi.Output[str]: """ The database for use with the connection pool. """ return pulumi.get(self, "db_name") @property @pulumi.getter def host(self) -> pulumi.Output[str]: """ The hostname used to connect to the database connection pool. """ return pulumi.get(self, "host") @property @pulumi.getter def mode(self) -> pulumi.Output[str]: """ The PGBouncer transaction mode for the connection pool. The allowed values are session, transaction, and statement. """ return pulumi.get(self, "mode") @property @pulumi.getter def name(self) -> pulumi.Output[str]: """ The name for the database connection pool. """ return pulumi.get(self, "name") @property @pulumi.getter def password(self) -> pulumi.Output[str]: """ Password for the connection pool's user. """ return pulumi.get(self, "password") @property @pulumi.getter def port(self) -> pulumi.Output[int]: """ Network port that the database connection pool is listening on. """ return pulumi.get(self, "port") @property @pulumi.getter(name="privateHost") def private_host(self) -> pulumi.Output[str]: """ Same as `host`, but only accessible from resources within the account and in the same region. """ return pulumi.get(self, "private_host") @property @pulumi.getter(name="privateUri") def private_uri(self) -> pulumi.Output[str]: """ Same as `uri`, but only accessible from resources within the account and in the same region. """ return pulumi.get(self, "private_uri") @property @pulumi.getter def size(self) -> pulumi.Output[int]: """ The desired size of the PGBouncer connection pool. """ return pulumi.get(self, "size") @property @pulumi.getter def uri(self) -> pulumi.Output[str]: """ The full URI for connecting to the database connection pool. """ return pulumi.get(self, "uri") @property @pulumi.getter def user(self) -> pulumi.Output[str]: """ The name of the database user for use with the connection pool. """ return pulumi.get(self, "user")
24,336
6,998
# -*- coding: utf-8 -*- """Script which can be used to compare the features obtained of two different influenza models Usage: get_model_statistics.py <model> [--country=<country_name>] [--no-future] [--basedir=<directory>] [--start-year=<start_year>] [--end-year=<end_year>] [--save] [--no-graph] <baseline> Data file of the first model <other_method> Data file of the second model -h, --help Print this help message """ import pandas as pd import numpy as np from scipy import stats from docopt import docopt import os import glob from sklearn.metrics import mean_squared_error import seaborn as sns import matplotlib.pyplot as plt sns.set() def get_results_filename(basepath): files = [f for f in glob.glob(basepath + "/*-prediction.csv", recursive=True)] y = os.path.basename(files[0]).split("-")[0] y2 = os.path.basename(files[0]).split("-")[1] return "{}-{}".format(y, y2) if __name__ == "__main__": args = docopt(__doc__) model = args["<model>"] base_dir = args["--basedir"] if args["--basedir"] else "../complete_results" country = args["--country"] if args["--country"] else "italy" future = "no-future" if args["--no-future"] else "future" # Read the baseline results and merge them model_path = os.path.join(base_dir, args["<model>"], future, country) season_years = get_results_filename(model_path) model_file = os.path.join(model_path, "{}-prediction.csv".format(season_years)) # Load the data data = pd.read_csv(model_file) # Get only the weeks we care for start_year = "2007-42" if not args["--start-year"] else args["--start-year"] end_year = "2019-15" if not args["--end-year"] else args["--end-year"] start_season = data["week"] >= start_year end_season = data["week"] <= str(int(end_year.split("-")[0]) + 1) + "-" + end_year.split("-")[1] total = start_season & end_season data = data[total] # Describe the data print("") print("[*] Describe the given dataset {}".format(model_file)) print(data.describe()) # Generate residuals print("") print("[*] Describe the residuals") residuals = data["incidence"]-data["prediction"] print(residuals.describe()) # Get some statistics print("") total_pearson = 0 for i in np.arange(0, len(data["prediction"]), 26): total_pearson += stats.pearsonr(data["prediction"][i:i+26], data["incidence"][i:i+26])[0] print("Pearson Correlation (value/p): ", total_pearson/(len(data["prediction"])/26)) print("") print("Mean Squared Error: ", mean_squared_error(data["prediction"], data["incidence"])) print("") if not args["--no-graph"]: ax = sns.distplot(residuals, label="Residual") plt.figure() ax = sns.distplot(data["incidence"], label="Incidence") ax = sns.distplot(data["prediction"], label="Prediction") plt.legend() plt.show()
2,940
1,039
from scipy.sparse import vstack from sklearn.metrics.pairwise import cosine_similarity import numpy as np from sisu.preprocessing.tokenizer import is_relevant_sentence, make_sentences, sanitize_text from gismo.gismo import Gismo, covering_order from gismo.common import auto_k from gismo.parameters import Parameters from gismo.corpus import Corpus from gismo.embedding import Embedding from sisu.embedding_idf import IdfEmbedding def cosine_order(projection, sentences, query): """ Order relevant sentences by cosine similarity to the query. Parameters ---------- projection: callable A function that converts a text into a tuple whose first element is an embedding (typically a Gismo :meth:`~gismo.embedding.Embedding.query_projection`). sentences: :class:`list` of :class:`dict` Sentences as output by :func:`~sisu.summarizer.extract_sentences`. query: :class:`str` Target query Returns ------- :class:`list` of :class:`int` Ordered list of indexes of relevant sentences, sorted by cosine similarity """ relevant_indices = [s['index'] for s in sentences if s['relevant']] projected_query = projection(query)[0] projected_sentences = vstack([projection(sentences[i]['sanitized'])[0] for i in relevant_indices]) order = np.argsort(- cosine_similarity(projected_sentences, projected_query)[:, 0]) return [relevant_indices[i] for i in order] def extract_sentences(source, indices, getter=None, tester=None): """ Pick up the entries of the source corresponding to indices and build a list of sentences out of that. Each sentence is a dictionary with the following keys: - `index`: position of the sentence in the returned list - `sentence`: the actual sentence - `relevant`: a boolean that tells if the sentence is eligible for being part of the summary - `sanitized`: for relevant sentences, a simplified version to be fed to the embedding Parameters ---------- source: :class:`list` list of objects indices: iterable of :class:`int` Indexes of the source items to select getter: callable, optional Tells how to convert a source entry into text. tester: callable, optional Tells if the sentence is eligible for being part of the summary. Returns ------- list of dict Examples -------- >>> doc1 = ("This is a short sentence! This is a sentence with reference to the url http://www.ix.com! " ... "This sentence is not too short and not too long, without URL and without citation. " ... "I have many things to say in that sentence, to the point " ... "I do not know if I will stop anytime soon but don\'t let it stop " ... "you from reading this meaninless garbage and this goes on and " ... "this goes on and this goes on and this goes on and this goes on and " ... "this goes on and this goes on and this goes on and this goes on " ... "and this goes on and this goes on and this goes on and this goes " ... "on and this goes on and this goes on and this goes on and this goes " ... "on and this goes on and that is all.") >>> doc2 = ("This is a a sentence with some citations [3, 7]. " ... "This sentence is not too short and not too long, without URL and without citation. " ... "Note that the previous sentence is already present in doc1. " ... "The enzyme cytidine monophospho-N-acetylneuraminic acid hydroxylase (CMAH) catalyzes " ... "the synthesis of Neu5Gc by hydroxylation of Neu5Ac (Schauer et al. 1968).") >>> extract_sentences([doc1, doc2], [1, 0]) # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS [{'index': 0, 'sentence': 'This is a a sentence with some citations [3, 7].', 'relevant': False, 'sanitized': ''}, {'index': 1, 'sentence': 'This sentence is not too short and not too long, without URL and without citation.', 'relevant': True, 'sanitized': 'This sentence is not too short and not too long without URL and without citation'}, {'index': 2, 'sentence': 'Note that the previous sentence is already present in doc1.', 'relevant': True, 'sanitized': 'Note that the previous sentence is already present in doc'}, {'index': 3, 'sentence': 'The enzyme cytidine monophospho-N-acetylneuraminic acid hydroxylase (CMAH) catalyzes the synthesis of Neu5Gc by hydroxylation of Neu5Ac (Schauer et al. 1968).', 'relevant': False, 'sanitized': ''}, {'index': 4, 'sentence': 'This is a short sentence!', 'relevant': False, 'sanitized': ''}, {'index': 5, 'sentence': 'This is a sentence with reference to the url http://www.ix.com!', 'relevant': False, 'sanitized': ''}, {'index': 6, 'sentence': 'This sentence is not too short and not too long, without URL and without citation.', 'relevant': False, 'sanitized': ''}, {'index': 7, 'sentence': "I have many things to say in that sentence...", 'relevant': False, 'sanitized': ''}] """ if getter is None: getter = str if tester is None: tester = is_relevant_sentence sentences = [{'index': i, 'sentence': sent, 'relevant': tester(sent)} for i, sent in enumerate([sent for j in indices for sent in make_sentences(getter(source[j]))])] used = set() for s in sentences: if s['sentence'] in used and s['relevant']: s['relevant'] = False else: used.add(s['sentence']) s['sanitized'] = sanitize_text(s['sentence']) if s['relevant'] else "" return sentences default_summarizer_parameters = { 'order': 'rank', 'text_getter': None, 'sentence_tester': is_relevant_sentence, 'itf': True, 'post_processing': lambda summa, i: summa.sentences_[i]['sentence'], 'sentence_gismo_parameters': {'post': False, 'resolution': .99}, 'num_documents': None, 'num_query': None, 'num_sentences': None, 'max_chars': None} """ List of parameters for the summarizer with their default values. Parameters ----------- order: :class:`str` Sorting function. text_getter: callable Extraction of text from corpus item. If not specify, the to_text of the :class:`~gismo.corpus.Corpus` will be used. sentence_tester: callable Function that estimates if a sentence is eligible to be part of the summary itf: :class:`bool` Use of ITF normalization in the sentence-level Gismo post_processing: callable post_processing transformation. Signature is (:class:`~sisu.summarizer.Summarizer`, :class:`int`) -> :class:`str` sentence_gismo_parameters: :class:`dict` Tuning of sentence-level gismo. `post` MUST be set to False. num_documents: :class:`int` or None Number of documents to pre-select num_query: :class:`int` or None Number of features to use in generic query num_sentences: :class:`int` or None Number of sentences to return max_chars: :class:`int` or None Maximal number of characters to return """ class Summarizer: """ Summarizer class. Parameters ---------- gismo: :class:`~gismo.gismo.Gismo` Gismo of the documents to analyze. kwargs: :class:`dict` Parameters of the summarizer (see :obj:`~sisu.summarizer.default_summarizer_parameters` for details). Attributes ---------- query_: :class:`str` Query used to summarize. sentences_: :class:`list` of :class:`dict` Selected sentences. Each sentence is a dictionary with the following keys: - `index`: position of the sentence in the returned list - `sentence`: the actual sentence - `relevant`: a boolean that tells if the sentence is eligible for being part of the summary - `sanitized`: for relevant sentences, a simplified version to be fed to the embedding order_: :class:`numpy.ndarray` Proposed incomplete ordering of the :class:`~sisu.summarizer.Summarizer.sentences_` sentence_gismo_: :class:`~gismo.gismo.Gismo` Gismo running at sentence level. parameters: :class:`~gismo.parameters.Parameters` Handler of parameters. Examples -------- The package contains a data folder with a toy gismo with articles related to Covid-19. We load it. >>> gismo = Gismo(filename="toy_gismo", path="data") Then we build a summarizer out of it. We tell to fetch the sentences from the content of the articles. >>> summa = Summarizer(gismo, text_getter = lambda d: d['content']) Ask for a summary on *bat* with a maximal budget of 500 characters, using pure TF-IDF sentence embedding. >>> summa('bat', max_chars=500, itf=False) # doctest: +NORMALIZE_WHITESPACE ['By comparing the amino acid sequence of 2019-nCoV S-protein (GenBank Accession: MN908947.3) with Bat SARS-like coronavirus isolate bat-SL-CoVZC45 and Bat SARS-like coronavirus isolate Bat-SL-CoVZXC21, the latter two were shown to share 89.1% and 88.6% sequence identity to 2019-nCoV S-protein (supplementary figure 1) .', 'Within our bat-hemoplasma network, genotype sharing was restricted to five host communities, 380 whereas six genotypes were each restricted to a single bat species (Fig. 5A ).'] Now a summary based on the *cosine* ordering, using the content of abstracts and pure TF-IDF sentence embedding. >>> summa('bat', max_chars=500, order='cosine', text_getter = lambda d: d['abstract']) # doctest: +NORMALIZE_WHITESPACE ['Bat dipeptidyl peptidase 4 (DPP4) sequences were closely related to 38 those of human and non-human primates but distinct from dromedary DPP4 sequence.', 'The multiple sequence alignment data correlated with already published reports on SARS-CoV-2 indicated that it is closely related to Bat-Severe Acute Respiratory Syndrome like coronavirus (Bat CoV SARS-like) and wellstudied Human SARS.', '(i.e., hemoplasmas) across a species-rich 40 bat community in Belize over two years.'] Now 4 sentences using a *coverage* ordering. >>> summa('bat', num_sentences=4, order='coverage') # doctest: +NORMALIZE_WHITESPACE ['By comparing the amino acid sequence of 2019-nCoV S-protein (GenBank Accession: MN908947.3) with Bat SARS-like coronavirus isolate bat-SL-CoVZC45 and Bat SARS-like coronavirus isolate Bat-SL-CoVZXC21, the latter two were shown to share 89.1% and 88.6% sequence identity to 2019-nCoV S-protein (supplementary figure 1) .', 'However, we have not done the IDPs analysis for ORF10 from the Bat-SL-CoVZC45 strain since we have taken different strain of Bat CoV (reviewed strain HKU3-1) in our study.', 'To test the dependence of the hemoplasma 290 phylogeny upon the bat phylogeny and thus assess evidence of evolutionary codivergence, we 291 applied the Procrustes Approach to Cophylogeny (PACo) using distance matrices and the paco 292 We used hemoplasma genotype assignments to create a network, with each node representing a 299 bat species and edges representing shared genotypes among bat species pairs.', 'However, these phylogenetic patterns in prevalence were decoupled from those describing bat 526 species centrality in sharing hemoplasmas, such that genotype sharing was generally restricted 527 by bat phylogeny.'] As you can see, there are some ``However, '' in the answers. A bit of NLP post_processing can take care of those. >>> import spacy >>> nlp = spacy.load("en_core_web_sm") >>> post_nlp = PostNLP(nlp) >>> summa('bat', num_sentences=4, order='coverage', post_processing=post_nlp) # doctest: +NORMALIZE_WHITESPACE ['By comparing the amino acid sequence of 2019-nCoV S-protein (GenBank Accession: MN908947.3) with Bat SARS-like coronavirus isolate bat-SL-CoVZC45 and Bat SARS-like coronavirus isolate Bat-SL-CoVZXC21, the latter two were shown to share 89.1% and 88.6% sequence identity to 2019-nCoV S-protein (supplementary figure 1) .', 'We have not done the IDPs analysis for ORF10 from the Bat-SL-CoVZC45 strain since we have taken different strain of Bat CoV (reviewed strain HKU3-1) in our study.', 'To test the dependence of the hemoplasma 290 phylogeny upon the bat phylogeny and thus assess evidence of evolutionary codivergence, we 291 applied the Procrustes Approach to Cophylogeny (PACo) using distance matrices and the paco 292 We used hemoplasma genotype assignments to create a network, with each node representing a 299 bat species and edges representing shared genotypes among bat species pairs.', 'These phylogenetic patterns in prevalence were decoupled from those describing bat 526 species centrality in sharing hemoplasmas, such that genotype sharing was generally restricted 527 by bat phylogeny.'] """ def __init__(self, gismo, **kwargs): self.gismo = gismo self.query_ = None self.sentences_ = None self.order_ = None self.sentence_gismo_ = None self.parameters = Parameters(parameter_list=default_summarizer_parameters, **kwargs) if self.parameters.text_getter is None: self.parameters.text_getter = self.gismo.corpus.to_text def rank_documents(self, query, num_query=None): """ Perform a Gismo query at document-level. If the query fails, builds a generic query instead. The :attr:`~sisu.summarizer.Summarizer.gismo` and :attr:`~sisu.summarizer.Summarizer.query_` attributes are updated. Parameters ---------- query: :class:`str` Input text num_query: :class:`int` Number of words of the generic query, is any Returns ------- None """ if num_query is None: num_query = self.parameters.num_query success = self.gismo.rank(query) if success: self.query_ = query else: self.query_ = " ".join(self.gismo.get_features_by_rank(k=num_query)) self.gismo.rank(self.query_) def build_sentence_source(self, num_documents=None, getter=None, tester=None): """ Creates the corpus of sentences (:attr:`~sisu.summarizer.Summarizer.sentences_`) Parameters ---------- num_documents: :class:`int`, optional Number of documents to select (if not, Gismo will automatically decide). getter: callable Extraction of text from corpus item. If not specify, the to_text of the :class:`~gismo.corpus.Corpus` will be used. tester: callable Function that estimates if a sentence is eligible to be part of the summary. Returns ------- None """ if num_documents is None: num_documents = self.parameters.num_documents if getter is None: getter = self.parameters.text_getter if tester is None: tester = self.parameters.sentence_tester self.sentences_ = extract_sentences(source=self.gismo.corpus, indices=self.gismo.get_documents_by_rank(k=num_documents, post=False), getter=getter, tester=tester) def build_sentence_gismo(self, itf=None, s_g_p=None): """ Creates the Gismo of sentences (:attr:`~sisu.summarizer.Summarizer.sentence_gismo_`) Parameters ---------- itf: :class:`bool`, optional Applies TF-IDTF embedding. I False, TF-IDF embedding is used. s_g_p: :class:`dict` Parameters for the sentence Gismo. Returns ------- None """ if itf is None: itf = self.parameters.itf if s_g_p is None: s_g_p = self.parameters.sentence_gismo_parameters sentence_corpus = Corpus(source=self.sentences_, to_text=lambda s: s['sanitized']) sentence_embedding = Embedding() if itf else IdfEmbedding() sentence_embedding.fit_ext(embedding=self.gismo.embedding) sentence_embedding.transform(sentence_corpus) self.sentence_gismo_ = Gismo(sentence_corpus, sentence_embedding, **s_g_p) def build_coverage_order(self, k): """ Populate :attr:`~sisu.summarizer.Summarizer.order_` with a covering order with target number of sentences *k*. The actual number of indices is stretched by the sentence Gismo stretch factor. Parameters ---------- k: :class:`int` Number of optimal covering sentences. Returns ------- :class:`numpy.ndarray` Covering order. """ p = self.sentence_gismo_.parameters(post=False) cluster = self.sentence_gismo_.get_documents_by_cluster(k=int(k * p['stretch']), **p) return covering_order(cluster, wide=p['wide']) def summarize(self, query="", **kwargs): """ Performs a full run of all summary-related operations: - Rank a query at document level, fallback to a generic query if the query fails; - Extract sentences from the top documents - Order sentences by one of the three methods proposed, *rank*, *coverage*, and *cosine* - Apply post-processing and return list of selected sentences. Note that calling a :class:`~sisu.summarizer.Summarizer` will call its :meth:`~sisu.summarizer.Summarizer.summarize` method. Parameters ---------- query: :class:`str` Query to run. kwargs: :class:`dict` Runtime specific parameters (see :obj:`~sisu.summarizer.default_summarizer_parameters` for possible arguments). Returns ------- :class:`list` of :class:`str` Summary. """ # Instantiate parameters for the call p = self.parameters(**kwargs) # Perform query, fallback to generic query in case of failure self.rank_documents(query=query, num_query=p['num_query']) # Extract and preprocess sentences self.build_sentence_source(num_documents=p['num_documents'], getter=p['text_getter'], tester=p['sentence_tester']) # Order sentences if p['order'] == 'cosine': self.order_ = cosine_order(self.gismo.embedding.query_projection, self.sentences_, self.query_) elif p['order'] in {'rank', 'coverage'}: self.build_sentence_gismo(itf=p['itf'], s_g_p=p['sentence_gismo_parameters']) self.sentence_gismo_.rank(query) if p['num_sentences'] is None: p['num_sentences'] = auto_k(data=self.sentence_gismo_.diteration.x_relevance, order=self.sentence_gismo_.diteration.x_order, max_k=self.sentence_gismo_.parameters.max_k, target=self.sentence_gismo_.parameters.target_k) if p['order'] == 'rank': self.order_ = self.sentence_gismo_.diteration.x_order else: self.order_ = self.build_coverage_order(p['num_sentences']) if p['max_chars'] is None: results = [p['post_processing'](self, i) for i in self.order_[:p['num_sentences']]] return [txt for txt in results if len(txt)>0] else: results = [] length = 0 # Maximal number of sentences that will be processed max_sentences = int(p['max_chars']/50) for i in self.order_[:max_sentences]: txt = p['post_processing'](self, i) l = len(txt) if l>0 and length+l < p['max_chars']: results.append(txt) length += l if length > .98*p['max_chars']: break return results def __call__(self, query="", **kwargs): return self.summarize(query, **kwargs) class PostNLP: """ Post-processor for the :class:`~sisu.summarizer.Summarizer` that leverages a spacy NLP engine. - Discard sentences with no verb. - Remove adverbs and punctuations that starts a sentence (e.g. "However, we ..." -> "We ..."). - Optionally, if the engine supports co-references, resolve them. Parameters ---------- nlp: callable A Spacy nlp engine. coref: :class:`bool` Resolve co-references if the nlp engine supports it. """ def __init__(self, nlp, coref=False): self.nlp = nlp self.coref = coref def __call__(self, summa, i): nlp_sent = self.nlp(summa.sentences_[i]['sentence']) tags = {token.tag_ for token in nlp_sent} if not any([t.startswith("VB") for t in tags]): summa.sentences_[i]['relevant'] = False return "" while nlp_sent[0].pos_ == "ADV" and len(nlp_sent)>0: nlp_sent = nlp_sent[1:] if nlp_sent[0].pos_ == "PUNCT": nlp_sent = nlp_sent[1:] txt = nlp_sent.text summa.sentences_[i]['sentence'] = f"{txt[0].upper()}{txt[1:]}" if "PRP" in tags and self.coref and hasattr(nlp_sent._, 'has_coref'): extract_str = " ".join([s['sentence'] for s in summa.sentences_[max(0, i - 2) : i + 1]]) extract = self.nlp(extract_str) if extract._.has_coref: resolved_extract = extract._.coref_resolved summa.sentences_[i]['sentence'] = make_sentences(resolved_extract)[-1] return summa.sentences_[i]['sentence']
21,887
6,667
import numpy as np import os from automr import dump_mat from functools import partial, reduce print = partial(print, flush=True) einsum = partial(np.einsum, optimize=True) def print_mol(mol): print(mol._basis) print(mol.atom) print(mol._atom) print(mol.aoslice_by_atom()) print(mol.ao_labels()) #if mol.verbose >= logger.DEBUG: mol.stdout.write('[INPUT] ---------------- BASIS SET ---------------- \n') mol.stdout.write('[INPUT] l, kappa, [nprim/nctr], ' 'expnt, c_1 c_2 ...\n') for atom, basis_set in mol._basis.items(): mol.stdout.write('[INPUT] %s\n' % atom) for b in basis_set: if isinstance(b[1], int): kappa = b[1] b_coeff = b[2:] else: kappa = 0 b_coeff = b[1:] nprim = len(b_coeff) nctr = len(b_coeff[0])-1 if nprim < nctr: logger.warn(mol, 'num. primitives smaller than num. contracted basis') mol.stdout.write('[INPUT] %d %2d [%-5d/%-4d] ' % (b[0], kappa, nprim, nctr)) for k, x in enumerate(b_coeff): if k == 0: mol.stdout.write('%-15.12g ' % x[0]) else: mol.stdout.write(' '*32+'%-15.12g ' % x[0]) for c in x[1:]: mol.stdout.write(' %4.12g' % c) mol.stdout.write('\n') def py2qchem(mf, basename, is_uhf=False): if is_uhf: mo_coeffa = mf.mo_coeff[0] mo_coeffb = mf.mo_coeff[1] #mo_enea = mf.mo_energy[0] #mo_eneb = mf.mo_energy[1] else: mo_coeffa = mf.mo_coeff mo_coeffb = mf.mo_coeff #mo_enea = mf.mo_energy #mo_eneb = mf.mo_energy mo_enea = np.zeros(len(mo_coeffa)) mo_eneb = np.zeros(len(mo_coeffa)) Sdiag = mf.get_ovlp().diagonal()**(0.5) mo_coeffa = einsum('ij,i->ij', mo_coeffa, Sdiag).T mo_coeffb = einsum('ij,i->ij', mo_coeffb, Sdiag).T #dump_mat.dump_mo(mf.mol, mo_coeffa, ncol=10) guess_file = np.vstack([mo_coeffa, mo_coeffb, mo_enea, mo_eneb]).flatten() tmpbasename = '/tmp/qchem/' + basename os.system('mkdir -p ' + tmpbasename) with open(tmpbasename + '/53.0', 'w') as f: guess_file.tofile(f, sep='') create_qchem_in(mf, basename) def create_qchem_in(mf, basename, uhf=False, sph=True): atom = mf.mol.format_atom(mf.mol.atom, unit=1) with open(basename + '.in', 'w') as f: f.write('$molecule\n') f.write(' %d %d\n' % (mf.mol.charge, mf.mol.spin+1)) for a in atom: f.write(' %s %12.6f %12.6f %12.6f\n' % (a[0], a[1][0], a[1][1], a[1][2])) f.write('$end\n\n') '''f.write('$rem\n') f.write(' method = hf\n') if uhf: f.write(' unrestricted = true\n') f.write(' basis = cc-pvdz\n') f.write(' print_orbitals = true\n') f.write(' sym_ignore = true\n') if sph: f.write(' purecart = 1111\n') else: f.write(' purecart = 2222\n') f.write(' scf_guess_print = 2\n') f.write(' scf_guess = read\n') f.write(' scf_convergence = 0\n') f.write(' thresh = 12\n') f.write('$end\n\n') f.write('@@@\n\n') f.write('$molecule\n') f.write('read\n') f.write('$end\n\n')''' f.write('$rem\n') #f.write(' method = hf\n') f.write(' correlation = pp\n') f.write(' gvb_local = 0\n') f.write(' gvb_n_pairs = 2\n') f.write(' gvb_print = 1\n') if uhf: f.write(' unrestricted = true\n') f.write(' basis = cc-pvdz\n') f.write(' print_orbitals = true\n') f.write(' sym_ignore = true\n') if sph: f.write(' purecart = 1111\n') else: f.write(' purecart = 2222\n') f.write(' scf_guess_print = 2\n') f.write(' scf_guess = read\n') f.write(' thresh = 12\n') f.write('$end\n\n') def qchem2py(basename): with open('/tmp/qchem/' + basename + '/53.0', 'r') as f: data = np.fromfile(f) print(data.shape) n = data.shape[0] #x = sympy.Symbol('x') #nmo = sympy.solve(2*x*(x+1) -n, x) nmo = int(np.sqrt(n/2.0+0.25)-0.5) moa = data[:nmo*nmo].reshape(nmo,nmo).T mob = data[nmo*nmo:2*nmo*nmo].reshape(nmo,nmo).T mo = (moa, mob) return mo
4,482
1,872
from justgood import imjustgood media = imjustgood("YOUR_APIKEY_HERE") query = "gojek" # example query data = media.playstore(query) # Get attributes number = 0 result = "Playstore :" for a in data["result"]: number += 1 result += "\n\n{}. {}".format(number, a["title"]) result += "\nDeveloper : {}".format(a["developer"]) result += "\nThumbnail : {}".format(a["thumbnail"]) result += "\nURL : {}".format(a["pageUrl"]) print(result) # Get JSON results print(data)
487
167
import numpy as np # disjoint-set forests using union-by-rank and path compression (sort of). class universe: def __init__(self, n_elements): self.num = n_elements self.elts = np.empty(shape=(n_elements, 3), dtype=int) for i in range(n_elements): self.elts[i, 0] = 0 # rank self.elts[i, 1] = 1 # size self.elts[i, 2] = i # p def size(self, x): return self.elts[x, 1] def num_sets(self): return self.num def find(self, x): y = int(x) while y != self.elts[y, 2]: y = self.elts[y, 2] self.elts[x, 2] = y return y def join(self, x, y): # x = int(x) # y = int(y) if self.elts[x, 0] > self.elts[y, 0]: self.elts[y, 2] = x self.elts[x, 1] += self.elts[y, 1] else: self.elts[x, 2] = y self.elts[y, 1] += self.elts[x, 1] if self.elts[x, 0] == self.elts[y, 0]: self.elts[y, 0] += 1 self.num -= 1
1,089
459
num1 = 1 num2 = 20 num3 = 168 # dev first commit num1 = 1 # resolve conflict num2 = 88888888 # Test next commit num3 = 99
131
71
from __future__ import unicode_literals import frappe from datetime import datetime from frappe.model.document import Document @frappe.whitelist(allow_guest=True) def sendMail(doc,method): if doc.send_email_on_event_creation: for d in doc.event_participants: if d.reference_doctype == "Employee": email = frappe.db.sql("""select prefered_email,employee_name from `tabEmployee` where name = %s;""",(d.reference_docname)) if email: content = "<h4>Dear,</h4><p>"+email[0][1]+"</p><br><br><p>Event : "+str(doc.subject)+"</p><p>Start : "+str(doc.starts_on)+"</p><p>End : "+str(doc.ends_on)+"</p><p>Event Category : "+str(doc.event_category)+"</p><br><p><b>Description : </b><br>"+str(doc.description)+"</p>" frappe.sendmail(recipients=email[0][0],sender="notification@latteysindustries.com",subject="Invitation For Event", content=content) if d.reference_doctype == "Lead": email = frappe.db.sql("""select email_id,lead_name from `tabLead` where name = %s;""",(d.reference_docname)) if email: content = "<h4>Dear,</h4><p>"+email[0][1]+"</p><br><br><p>Event : "+str(doc.subject)+"</p><p>Start : "+str(doc.starts_on)+"</p><p>End : "+str(doc.ends_on)+"</p><p>Event Category : "+str(doc.event_category)+"</p><br><p><b>Description : </b><br>"+str(doc.description)+"</p>" frappe.sendmail(recipients=email[0][0],sender="notification@latteysindustries.com",subject="Invitation For Event", content=content)
1,441
573
#!/usr/bin/env python import os import sys import hashlib import httplib import base64 import socket from xml.dom.minidom import * RAC_CODE = { 'x' : 'Unknown error', '0x0' : 'Success', '0x4' : 'Number of arguments does not match', '0xc' : 'Syntax error in xml2cli command', '0x408' : 'Session Timeout', '0x43' : 'No such subfunction', '0x62' : 'Command not supported on this platform for this firmware', '0xb0002' : 'Invalid handle', '0x140000' : 'Too many sessions', '0x140002' : 'Logout', '0x140004' : 'Invalid password', '0x140005' : 'Invalid username', '0x150008' : 'Too many requests', '0x15000a' : 'No such event', '0x15000c' : 'No such function', '0x15000d' : 'Unimplemented', '0x170003' : 'Missing content in POST ?', '0x170007' : 'Dont know yet', '0x1a0004' : 'Invalid sensorname', '0x10150006' : 'Unknown sensor error', '0x10150009' : 'Too many sensors in sensorlist', '0x20308' : 'Console not available', '0x30003' : 'Console not active', '0x3000a' : 'Console is in text mode', '0x3000b' : 'Console is in VGA graphic mode', '0x30011' : [ 'Console is in Linux mode (no ctrl+alt+del)', 'Console is in Windows or Netware mode' ], '0xe0003' : 'Unknown serveraction', '0xf0001' : 'Offset exceeds number of entries in eventlog', '0xf0003' : 'Request exceeds number of entries in eventlog', '0xf0004' : 'Invalid number of events requested' } SEVERITY = { 'x' : 'Unknown severity. ', '' : '-', '0x1' : 'Unknown', '0x2' : 'OK', '0x3' : 'Information', '0x4' : 'Recoverable', '0x5' : 'Non-Critical', '0x6' : 'Critical', '0x7' : 'Non-Recoverable', } BOGUS_IDS_1650 = [ '0x1010018', '0x1020010', '0x1020018', '0x1020062', '0x1030010', '0x1030018', '0x1030062', '0x1040010', '0x1040018', '0x1050018', '0x1060010', '0x1060018', '0x1060062', '0x1070018', '0x1070062', '0x1080010', '0x1080062', '0x1090010', '0x10a0010', '0x10f0062', '0x1100010', '0x1110010', '0x1120010', '0x1120062', '0x1130010', '0x1140010', '0x1150010', '0x13b0010', '0x13c0010', '0x13f0010', '0x14b0010', '0x14d0010', '0x20e0062', '0x2110062', '0x2160061', '0x2160062', '0x2170061', '0x2170062', '0x2180061', '0x2180062', '0x2190061', '0x2190062', '0x21a0061', '0x21a0062', '0x21b0061', '0x21b0062', '0x21e0010', '0x21e0061', '0x21e0062', '0x21f0061', '0x21f0062', '0x2210010', '0x2220010', '0x2230010', '0x2240010', '0x2250010', '0x2260010', '0x2270010', '0x2280010', '0x2290010', '0x22a0010', '0x22b0010', '0x22c0010', '0x22d0010', '0x22e0010', '0x22f0010', '0x2300010', '0x2310010', '0x2320010', '0x2330010', '0x2340010', '0x2350010', '0x2360010', '0x2370010', '0x2380010', '0x2390010', '0x23a0010', '0x23e0010', '0x2410010', '0x2420010', '0x2430010', '0x2440010', '0x2450010', '0x2460010', '0x2470010', '0x2480010', '0x2530010', ] BOGUS_IDS_2650 = [ '0x1350010', '0x1360010', '0x2160061', '0x2170061', '0x2180061', '0x2190061', '0x21a0061', '0x21b0061', '0x21c0061', '0x21d0061', '0x21e0060', '0x21e0061', '0x21f0060', '0x21f0061', '0x2d00010', ] BOGUS_IDS_1750 = [ '0x1060062', '0x1070062', '0x1080062', '0x10f0062', '0x1120062', '0x1030062', '0x1020062', '0x20e0062', '0x2110062', '0x2160062', '0x2170062', '0x2180062', '0x2190062', '0x21a0062', '0x21b0062', '0x21f0062', '0x21e0062', '0x2160061', '0x2170061', '0x2180061', '0x2190061', '0x21a0061', '0x21b0061', '0x21f0061', '0x21e0061', '0x1010010', '0x1020010', '0x1030010', '0x1040010', '0x1080010', '0x1090010', '0x10a0010', '0x1100010', '0x1110010', '0x1120010', '0x1130010', '0x1140010', '0x1150010', '0x21e0010', '0x2210010', '0x2220010', '0x2230010', '0x2240010', '0x2250010', '0x2260010', '0x2290010', '0x22a0010', '0x22b0010', '0x22c0010', '0x22d0010', '0x22e0010', '0x22f0010', '0x2300010', '0x2310010', '0x2320010', '0x2330010', '0x2340010', '0x2350010', '0x2360010', '0x2370010', '0x2380010', '0x2390010', '0x23a0010', '0x13b0010', '0x13c0010', '0x13f0010', '0x2440010', '0x2450010', '0x2460010', '0x2470010', '0x2480010', '0x14a0010', '0x14d0010', '0x14e0010', '0x1500010', '0x1510010', '0x2000010', '0x2570010', '0x10f0060', '0x1120060', '0x1020060', '0x1010018', '0x1020018', '0x1030018', '0x1040018', '0x1050018', '0x1060018', '0x1070018', ] PROPNAMES = [ 'NAME', 'SEVERITY', 'LOW_CRITICAL', 'LOW_NON_CRITICAL', 'VAL', 'UNITS', 'UPPER_NON_CRITICAL', 'UPPER_CRITICAL', 'SENSOR_TYPE', ] DRIVE_SLOT_CODES = { '0' : 'Good', '1' : 'No Error', '2' : 'Faulty Drive', '4' : 'Drive Rebuilding', '8' : 'Drive In Failed Array', '16' : 'Drive In Critical Array', '32' : 'Parity Check Error', '64' : 'Predicted Error', '128' : 'No Drive', } POWER_UNIT_CODES = { '0' : 'AC Power Unit', '1' : 'DC Power Unit', } BUTTON_CODES = { '0' : 'Power Button Disabled', '1' : 'Power Button Enabled' } FAN_CONTROL_CODES = { '0' : 'Normal Operation', '1' : 'Unknown', } INTRUSION_CODES = { '0' : 'No Intrusion', '1' : 'Cover Intrusion Detected', '2' : 'Bezel Intrusion Detected', } POWER_SUPPLY_CODES = { '1' : 'Good', '2' : 'Failure Detected', '4' : 'Failure Predicted', '8' : 'Power Lost', '16' : 'Not Present', } PROCESSOR_CODES = { '1' : 'Good', '2' : 'Failure Detected', '4' : 'Failure Predicted', '8' : 'Power Lost', '16' : 'Not Present', } CODES = { 'button' : BUTTON_CODES, 'drive slot' : DRIVE_SLOT_CODES, 'fan control' : FAN_CONTROL_CODES, 'intrusion' : INSTRUSION_CODES, 'power supply' : POWER_SUPPLY_CODES, 'power unit' : POWER_UNIT_CODES, 'processor' : PROCESSOR_CODES, }
8,022
3,786
#!/usr/bin/env python from __future__ import print_function # Copyright 2019 Juliane Mai - juliane.mai(at)uwaterloo.ca # # License # This file is part of the EEE code library for "Computationally inexpensive identification # of noninformative model parameters by sequential screening: Efficient Elementary Effects (EEE)". # # The EEE code library is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # The MVA code library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # You should have received a copy of the GNU Lesser General Public License # along with The EEE code library. # If not, see <https://github.com/julemai/EEE/blob/master/LICENSE>. # # If you use this method in a publication please cite: # # M Cuntz & J Mai et al. (2015). # Computationally inexpensive identification of noninformative model parameters by sequential screening. # Water Resources Research, 51, 6417-6441. # https://doi.org/10.1002/2015WR016907. # # # # python 3_derive_elementary_effects.py \ # -i example_ishigami-homma/model_output.pkl \ # -d example_ishigami-homma/parameters.dat \ # -m example_ishigami-homma/parameter_sets_1_para3_M.dat \ # -v example_ishigami-homma/parameter_sets_1_para3_v.dat \ # -o example_ishigami-homma/eee_results.dat """ Derives the Elementary Effects based on model outputs stored as dictionary in a pickle file (option -i) using specified model parameters (option -d). The model parameters were sampled beforehand as Morris trajectories. The Morris trajectory information is stored in two files (option -m and option -v). The Elementary Effects are stored in a file (option -o). History ------- Written, JM, Mar 2019 """ # ------------------------------------------------------------------------- # Command line arguments # modeloutputs = 'example_ishigami-homma/model_output.pkl' modeloutputkey = 'All' maskfile = 'example_ishigami-homma/parameters.dat' morris_M = 'example_ishigami-homma/parameter_sets_1_para3_M.dat' morris_v = 'example_ishigami-homma/parameter_sets_1_para3_v.dat' outfile = 'example_ishigami-homma/eee_results.dat' skip = None # number of lines to skip in Morris files import optparse parser = optparse.OptionParser(usage='%prog [options]', description="Derives the Elementary Effects based on model outputs stored as dictionary in a pickle file (option -i) using specified model parameters (option -d). The model parameters were sampled beforehand as Morris trajectories. The Morris trajectory information is stored in two files (option -m and option -v). The Elementary Effects are stored in a file (option -o).") parser.add_option('-i', '--modeloutputs', action='store', default=modeloutputs, dest='modeloutputs', metavar='modeloutputs', help="Name of file used to save (scalar) model outputs in a pickle file (default: 'model_output.pkl').") parser.add_option('-k', '--modeloutputkey', action='store', default=modeloutputkey, dest='modeloutputkey', metavar='modeloutputkey', help="Key of model output dictionary stored in pickle output file. If 'All', all model outputs are taken into account and multi-objective EEE is applied. (default: 'All').") parser.add_option('-d', '--maskfile', action='store', dest='maskfile', type='string', default=maskfile, metavar='File', help='Name of file where all model parameters are specified including their distribution, distribution parameters, default value and if included in analysis or not. (default: maskfile=parameters.dat).') parser.add_option('-m', '--morris_M', action='store', dest='morris_M', type='string', default=morris_M, metavar='morris_M', help="Morris trajectory information: The UNSCALED parameter sets. (default: 'parameter_sets_1_para3_M.dat').") parser.add_option('-v', '--morris_v', action='store', dest='morris_v', type='string', default=morris_v, metavar='morris_v', help="Morris trajectory information: The indicator which parameter changed between subsequent sets in a trajectory. (default: 'parameter_sets_1_para3_v.dat').") parser.add_option('-s', '--skip', action='store', default=skip, dest='skip', metavar='skip', help="Number of lines to skip in Morris output files (default: None).") parser.add_option('-o', '--outfile', action='store', dest='outfile', type='string', default=outfile, metavar='File', help='File containing Elementary Effect estimates of all model parameters listed in parameter information file. (default: eee_results.dat).') (opts, args) = parser.parse_args() modeloutputs = opts.modeloutputs modeloutputkey = opts.modeloutputkey maskfile = opts.maskfile morris_M = opts.morris_M morris_v = opts.morris_v outfile = opts.outfile skip = opts.skip del parser, opts, args # ----------------------- # add subolder scripts/lib to search path # ----------------------- import sys import os dir_path = os.path.dirname(os.path.realpath(__file__)) sys.path.append(dir_path+'/lib') import numpy as np import pickle from fsread import fsread # in lib/ from autostring import astr # in lib/ # ------------------------- # read parameter info file # ------------------------- # parameter info file has following header: # # para dist lower upper default informative(0)_or_noninformative(1) # # mean stddev nc,snc = fsread(maskfile, comment="#",cskip=1,snc=[0,1],nc=[2,3,4,5]) snc = np.array(snc) para_name = snc[:,0] para_dist = snc[:,1] lower_bound = nc[:,0] upper_bound = nc[:,1] initial = nc[:,2] # if informative(0) -> maskpara=False # if noninformative(1) -> maskpara=True mask_para = np.where((nc[:,3].flatten())==1.,True,False) dims_all = np.shape(mask_para)[0] idx_para = np.arange(dims_all)[mask_para] # indexes of parameters which will be changed [0,npara-1] dims = np.sum(mask_para) # pick only non-masked bounds lower_bound_mask = lower_bound[np.where(mask_para)] upper_bound_mask = upper_bound[np.where(mask_para)] para_dist_mask = para_dist[np.where(mask_para)] para_name_mask = para_name[np.where(mask_para)] # ------------------------- # read model outputs # ------------------------- model_output = pickle.load( open( modeloutputs, "rb" ) ) if modeloutputkey == 'All': keys = list(model_output.keys()) else: keys = [ modeloutputkey ] model_output = [ np.array(model_output[ikey]) for ikey in keys ] nkeys = len(model_output) # ------------------------- # read Morris M # ------------------------- ff = open(morris_M, "r") parasets = ff.readlines() ff.close() if skip is None: skip = np.int(parasets[0].strip().split(':')[1]) else: skip = np.int(skip) parasets = parasets[skip:] for iparaset,paraset in enumerate(parasets): parasets[iparaset] = list(map(float,paraset.strip().split())) parasets = np.array(parasets) # ------------------------- # read Morris v # ------------------------- ff = open(morris_v, "r") parachanged = ff.readlines() ff.close() if skip is None: skip = np.int(parachanged[0].strip().split(':')[1]) else: skip = np.int(skip) parachanged = parachanged[skip:] for iparachanged,parachan in enumerate(parachanged): parachanged[iparachanged] = np.int(parachan.strip()) parachanged = np.array(parachanged) # ------------------------- # calculate Elementary Effects # ------------------------- ee = np.zeros([dims_all,nkeys],dtype=float) ee_counter = np.zeros([dims_all,nkeys],dtype=int) ntraj = np.int( np.shape(parasets)[0] / (dims+1) ) nsets = np.shape(parasets)[0] for ikey in range(nkeys): for iset in range(nsets): ipara_changed = parachanged[iset] if ipara_changed != -1: ee_counter[ipara_changed,ikey] += 1 if ( len(np.shape(model_output[ikey])) == 1): # scalar model output ee[ipara_changed,ikey] += np.abs(model_output[ikey][iset]-model_output[ikey][iset+1]) / np.abs(parasets[iset,ipara_changed] - parasets[iset+1,ipara_changed]) elif ( len(np.shape(model_output[ikey])) == 2): # 1D model output ee[ipara_changed,ikey] += np.mean(np.abs(model_output[ikey][iset,:]-model_output[ikey][iset+1,:]) / np.abs(parasets[iset,ipara_changed] - parasets[iset+1,ipara_changed])) else: raise ValueError('Only scalar and 1D model outputs are supported!') for ikey in range(nkeys): for ipara in range(dims_all): if ee_counter[ipara,ikey] > 0: ee[ipara,ikey] /= ee_counter[ipara,ikey] # ------------------------- # write final file # ------------------------- # format: # # model output #1: 'out1' # # model output #2: 'out2' # # ii para_name elemeffect(ii),ii=1:3,jj=1:1 counter(ii),ii=1:3,jj=1:1 # 1 'x_1' 0.53458196335158181 5 # 2 'x_2' 7.0822368906630215 5 # 3 'x_3' 3.5460086652980554 5 f = open(outfile, 'w') for ikey in range(nkeys): f.write('# model output #'+str(ikey+1)+': '+keys[ikey]+'\n') f.write('# ii para_name elemeffect(ii),ii=1:'+str(dims_all)+',jj=1:'+str(nkeys)+' counter(ii),ii=1:'+str(dims_all)+',jj=1:'+str(nkeys)+' \n') for ipara in range(dims_all): f.write(str(ipara)+' '+para_name[ipara]+' '+' '.join(astr(ee[ipara,:],prec=8))+' '+' '.join(astr(ee_counter[ipara,:]))+'\n') f.close() print("wrote: '"+outfile+"'")
10,272
3,365
""" Leetcode #113 """ from typing import List class TreeNode: def __init__(self, val=0, left=None, right=None): self.val = val self.left = left self.right = right class Solution: def pathSum(self, root: TreeNode, sum: int) -> List[List[int]]: if not root: return None res = [] def helper(node, curr, target): curr.append(node.val) target = target - node.val if not node.left and not not.right and target == 0: res.append(curr[:]) if node.left: helper(node.left, curr, target) if node.right: helper(node.right, curr, target) tmp = curr.pop() target += tmp helper(root, [], sum) return res if __name__ == "__main__": root = TreeNode(5) root.left = TreeNode(4) root.left.left = TreeNode(11) root.left.left.left = TreeNode(7) root.left.left.right = TreeNode(2) root.right = TreeNode(8) root.right.left = TreeNode(13) root.right.right = TreeNode(4) root.right.right.left = TreeNode(5) root.right.right.right = TreeNode(1) """ Expected Tree 5 / \ 4 8 / / \ 11 13 4 / \ / \ 7 2 5 1 """ print(Solution().pathSum(root, 22))
1,377
466
from .alertsparser import AlertsParser from .subwaytripsparser import SubwayTripsParser from .stationscsvparser import StationsCsvParser
137
38
import os import pygame from input import Input from stages.stage import Stage from stages.stage_example import StageExample from stages.stage1 import Stage1 from stages.stage2 import Stage2 from stages.stage3 import Stage3 from stages.stage4 import Stage4 from stages.stage5 import Stage5 from stages.stage6 import Stage6 from stages.stage7 import Stage7 from stages.stage8 import Stage8 from stages.stage9 import Stage9 from stages.stage10 import Stage10 from stages.stage11 import Stage11 from stages.stage12 import Stage12 from stages.stage13 import Stage13 from stages.stage14 import Stage14 from stages.stage15 import Stage15 from stages.stage16 import Stage16 from stages.stage17 import Stage17 from stages.stage18 import Stage18 from stages.stage19 import Stage19 from stages.stage20 import Stage20 from stages.stage21 import Stage21 from stages.stage22 import Stage22 from stages.stage23 import Stage23 from stages.stage24 import Stage24 from stages.stage25 import Stage25 from stages.stage26 import Stage26 from stages.stage27 import Stage27 from stages.stage28 import Stage28 from stages.stage29 import Stage29 from stages.stage30 import Stage30 from stages.stage31 import Stage31 from stages.stage32 import Stage32 from stages.stage_start import Stage_start from stages.stage_end import Stage_end from stages.stage_transition import Stage_transition #os.environ['SDL_VIDEO_WINDOW_POS'] = "1, 0" os.environ['SDL_VIDEO_WINDOW_POS'] = "100, 10" resolution = [800, 600] pygame.init() pygame.mouse.set_visible(False) pygame.display.set_caption("32 bits of delivery") screen = pygame.display.set_mode(resolution) clock = pygame.time.Clock() GameIsRunning = True input = Input() stages = [ # StageExample(resolution), # Stage1(resolution), Stage_start(resolution), Stage2(resolution), # have you tried turning it on and off again? Stage29(resolution), # Button mash to transmit Stage27(resolution), # Stop Spamming Stage26(resolution), # Share love by petting Stage8(resolution), # Two auth factor Stage7(resolution), # USB connection Stage16(resolution), # Poop Stage18(resolution), # Upgrade PC Stage9(resolution), # Dancing Stage22(resolution), # Psychic transmission Stage21(resolution), # Fix TV Stage20(resolution), # Tune TV signal Stage17(resolution), # Buy coffee Stage25(resolution), # Share regrets Stage23(resolution), # Send SMS Stage13(resolution), # Love transmission! Stage3(resolution), # chrome game Stage15(resolution), # Clap to transmit noise Stage19(resolution), # Sell trash Stage14(resolution), # Find the strongest transmission Stage28(resolution), # Game and Watch Stage24(resolution), # Send Like Stage6(resolution), # energize with coffee Stage5(resolution), # crowd surfing game Stage32(resolution), # transmit knowledge Stage30(resolution), # transmit toothpaste Stage31(resolution), # transmit toothpaste to teeth Stage12(resolution), # Charge! Stage11(resolution), # Space Defender Stage4(resolution), # punching game Stage10(resolution), # Ninja Turtle Van Stage_end(resolution), ] # add transtitions updated_stages = [] for stage in stages: updated_stages.append(stage) updated_stages.append(Stage_transition(resolution)) stages = updated_stages currentStage = 0 #currentStage = -2 while GameIsRunning: pygame.display.flip() tick = clock.tick(60) screen.fill([0, 0, 0]) for event in pygame.event.get(): if event.type == pygame.KEYDOWN: if event.key == pygame.K_ESCAPE: GameIsRunning = False if event.type == pygame.QUIT: GameIsRunning = False if not GameIsRunning: pygame.quit() break input.update() complete = stages[currentStage].update(input, tick) if complete: currentStage = (currentStage + 1) % len(stages) stages[currentStage].__init__(resolution) stages[currentStage].draw(screen)
4,254
1,300
# Copyright (c) 2013 Shotgun Software Inc. # # CONFIDENTIAL AND PROPRIETARY # # This work is provided "AS IS" and subject to the Shotgun Pipeline Toolkit # Source Code License included in this distribution package. See LICENSE. # By accessing, using, copying or modifying this work you indicate your # agreement to the Shotgun Pipeline Toolkit Source Code License. All rights # not expressly granted therein are reserved by Shotgun Software Inc. import os import subprocess from threading import Thread from Queue import Queue import tempfile import sys import traceback from .logger import get_logger logger = get_logger(__name__) class ReadThread(Thread): """ Thread that reads a pipe. """ def __init__(self, p_out, target_queue): """ Constructor. :param p_out: Pipe to read. :param target_queue: Queue that will accumulate the pipe output. """ Thread.__init__(self) self.pipe = p_out self.target_queue = target_queue def run(self): """ Reads the contents of the pipe and adds it to the queue until the pipe is closed. """ while True: line = self.pipe.readline() # blocking read if line == '': break self.target_queue.put(line) class Command(object): @staticmethod def _create_temp_file(): """ :returns: Returns the path to a temporary file. """ handle, path = tempfile.mkstemp(prefix="desktop_server") os.close(handle) return path @staticmethod def call_cmd(args): """ Runs a command in a separate process. :param args: Command line tokens. :returns: A tuple containing (exit code, stdout, stderr). """ # The commands that are being run are probably being launched from Desktop, which would # have a TANK_CURRENT_PC environment variable set to the site configuration. Since we # preserve that value for subprocesses (which is usually the behavior we want), the DCCs # being launched would try to run in the project environment and would get an error due # to the conflict. # # Clean up the environment to prevent that from happening. env = os.environ.copy() vars_to_remove = ["TANK_CURRENT_PC"] for var in vars_to_remove: if var in env: del env[var] # Launch the child process # Due to discrepencies on how child file descriptors and shell=True are # handled on Windows and Unix, we'll provide two implementations. See the Windows # implementation for more details. if sys.platform == "win32": ret, stdout_lines, stderr_lines = Command._call_cmd_win32(args, env) else: ret, stdout_lines, stderr_lines = Command._call_cmd_unix(args, env) out = ''.join(stdout_lines) err = ''.join(stderr_lines) return ret, out, err @staticmethod def _call_cmd_unix(args, env): """ Runs a command in a separate process. Implementation for Unix based OSes. :param args: Command line tokens. :param env: Environment variables to set for the subprocess. :returns: A tuple containing (exit code, stdout, stderr). """ # Note: Tie stdin to a PIPE as well to avoid this python bug on windows # http://bugs.python.org/issue3905 # Queue code taken from: http://stackoverflow.com/questions/375427/non-blocking-read-on-a-subprocess-pipe-in-python stdout_lines = [] stderr_lines = [] try: process = subprocess.Popen( args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env ) process.stdin.close() stdout_q = Queue() stderr_q = Queue() stdout_t = ReadThread(process.stdout, stdout_q) stdout_t.setDaemon(True) stdout_t.start() stderr_t = ReadThread(process.stderr, stderr_q) stderr_t.setDaemon(True) stderr_t.start() # Popen.communicate() doesn't play nicely if the stdin pipe is closed # as it tries to flush it causing an 'I/O error on closed file' error # when run from a terminal # # to avoid this, lets just poll the output from the process until # it's finished process.wait() try: process.stdout.flush() process.stderr.flush() except IOError: # This fails on OSX 10.7, but it looks like there's no ill side effect # from failing on that platform so we can ignore it. logger.exception("Error while flushing file descriptor:") stdout_t.join() stderr_t.join() while not stdout_q.empty(): stdout_lines.append(stdout_q.get()) while not stderr_q.empty(): stderr_lines.append(stderr_q.get()) ret = process.returncode except StandardError: # Do not log the command line, it might contain sensitive information! logger.exception("Error running subprocess:") ret = 1 stderr_lines = traceback.format_exc().split() stderr_lines.append("%s" % args) return ret, stdout_lines, stderr_lines @staticmethod def _call_cmd_win32(args, env): """ Runs a command in a separate process. Implementation for Windows. :param args: Command line tokens. :param env: Environment variables to set for the subprocess. :returns: A tuple containing (exit code, stdout, stderr). """ stdout_lines = [] stderr_lines = [] try: stdout_path = Command._create_temp_file() stderr_path = Command._create_temp_file() # On Windows, file descriptors like sockets can be inherited by child # process and are only closed when the main process and all child # processes are closed. This is bad because it means that the port # the websocket server uses will never be released as long as any DCCs # or tank commands are running. Therefore, closing the Desktop and # restarting it for example wouldn't free the port and would give the # "port 9000 already in use" error we've seen before. # To avoid this, close_fds needs to be specified when launching a child # process. However, there's a catch. On Windows, specifying close_fds # also means that you can't share stdout, stdin and stderr with the child # process, which is required here because we want to capture the output # of the process. # Therefore on Windows we'll invoke the code in a shell environment. The # output will be redirected to two temporary files which will be read # when the child process is over. # Ideally, we'd be using this implementation on Unix as well. After all, # the syntax of the command line is the same. However, specifying shell=True # on Unix means that the following ["ls", "-al"] would be invoked like this: # ["/bin/sh", "-c", "ls", "-al"]. This means that only ls is sent to the # shell and -al is considered to be an argument of the shell and not part # of what needs to be launched. The naive solution would be to quote the # argument list and pass ["\"ls -al \""] to Popen, but that would ignore # the fact that there could already be quotes on that command line and # they would need to be escaped as well. Python 2's only utility to # escape strings for the command line is pipes.quote, which is deprecated. # Because of these reasons, we'll keep both implementations for now. args = args + ["1>", stdout_path, "2>", stderr_path] # Prevents the cmd.exe dialog from appearing on Windows. startupinfo = subprocess.STARTUPINFO() startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW process = subprocess.Popen( args, close_fds=True, startupinfo=startupinfo, env=env, shell=True ) process.wait() # Read back the output from the two. with open(stdout_path) as stdout_file: stdout_lines = [l for l in stdout_file] with open(stderr_path) as stderr_file: stderr_lines = [l for l in stderr_file] # Track the result code. ret = process.returncode except StandardError: logger.exception("Error running subprocess:") ret = 1 stderr_lines = [traceback.format_exc().split()] stderr_lines.append("%s" % args) # Don't lose any sleep over temporary files that can't be deleted. try: os.remove(stdout_path) except: pass try: os.remove(stderr_path) except: pass return ret, stdout_lines, stderr_lines
9,439
2,465
import os _ROOT = os.path.abspath(os.path.dirname(__file__)) def get_data(path): return os.path.join(_ROOT, 'wordlist', path)
127
50
""" Usage: briltag insertdata [options] Options: -h --help Show this screen. -c CONNECT Service name [default: onlinew] -p AUTHPATH Authentication file --name TAGNAME Name of the data tag --comments COMMENTS Comments on the tag """ from docopt import docopt from schema import Schema from brilws.cli import clicommonargs def validate(optdict): myvalidables = ['-c','-p','--name','--comments',str] argdict = dict((k,v) for k,v in clicommonargs.argvalidators.items() if k in myvalidables) s = Schema(argdict) result = s.validate(optdict) return result if __name__ == '__main__': print (docopt(__doc__,options_first=True))
745
238
from rendering.manager import * from rendering.scenes import * from rendering.training import * import random import glm import os import numpy as np import math __VOLUME_RECONSTRUCTION_SHADERS__ = os.path.dirname(__file__)+"/shaders/VR" compile_shader_sources(__VOLUME_RECONSTRUCTION_SHADERS__) class RayGenerator(RendererModule): def __init__(self, device, output_dim: (int, int), mode: int, *args, **kwargs): self.output_dim = output_dim self.mode = mode self.camera_buffer = None super().__init__(device, *args, **kwargs) def setup(self): self.camera_buffer = self.device.create_uniform_buffer( ProjToWorld=glm.mat4 ) pipeline = self.device.create_compute_pipeline() pipeline.load_compute_shader(__VOLUME_RECONSTRUCTION_SHADERS__+"/raygen.comp.spv") pipeline.bind_storage_buffer(0, ShaderStage.COMPUTE, lambda: self.pipeline.rays) pipeline.bind_uniform(1, ShaderStage.COMPUTE, lambda: self.camera_buffer) pipeline.bind_constants( 0, ShaderStage.COMPUTE, dim=glm.ivec2, mode=int, seed=int ) pipeline.close() self.pipeline = pipeline def forward_render(self, inputs): origins, targets = inputs origins = origins.reshape(-1, 3) targets = targets.reshape(-1, 3) full_rays = torch.zeros(len(origins) * self.output_dim[0] * self.output_dim[1], 6, device=origins.device) for i, (o, t) in enumerate(zip(origins, targets)): self.pipeline.rays = self.wrap_tensor(torch.zeros(self.output_dim[0] * self.output_dim[1], 6, device=origins.device), False) # Setup camera proj = glm.perspective(45, self.output_dim[1] / self.output_dim[0], 0.01, 1000) view = glm.lookAt(glm.vec3(*o), glm.vec3(*t), glm.vec3(0, 1, 0)) proj_to_model = glm.inverse(proj * view) self.camera_buffer.ProjToWorld = proj_to_model with self.device.get_compute() as man: man.set_pipeline(self.pipeline) man.update_sets(0) man.update_constants( ShaderStage.COMPUTE, dim=glm.ivec2(self.output_dim[1], self.output_dim[0]), mode=self.mode, seed=np.random.randint(0, 10000000) ) man.dispatch_threads_2D(self.output_dim[1], self.output_dim[0]) t = self.get_tensor(self.pipeline.rays) full_rays[i*self.output_dim[0]*self.output_dim[1]:(i+1)*self.output_dim[0]*self.output_dim[1]] = t return [full_rays] class TransmittanceRenderer(RendererModule): def __init__(self, device, *args, **kwargs): super().__init__(device, *args, **kwargs) def setup(self): self.medium_buffer = self.device.create_uniform_buffer( scatteringAlbedo=glm.vec3, density=float, phase_g=float ) pipeline = self.device.create_compute_pipeline() pipeline.load_compute_shader(__VOLUME_RECONSTRUCTION_SHADERS__ + '/forward.comp.spv') pipeline.bind_storage_buffer(0, ShaderStage.COMPUTE, lambda: self.forward_pipeline.grid) pipeline.bind_storage_buffer(1, ShaderStage.COMPUTE, lambda: self.forward_pipeline.rays) pipeline.bind_storage_buffer(2, ShaderStage.COMPUTE, lambda: self.forward_pipeline.transmittances) pipeline.bind_uniform(3, ShaderStage.COMPUTE, lambda: self.medium_buffer) pipeline.bind_constants(0, ShaderStage.COMPUTE, grid_dim=glm.ivec3, number_of_rays=int ) pipeline.close() self.forward_pipeline = pipeline pipeline = self.device.create_compute_pipeline() pipeline.load_compute_shader(__VOLUME_RECONSTRUCTION_SHADERS__ + '/backward.comp.spv') pipeline.bind_storage_buffer(0, ShaderStage.COMPUTE, lambda: self.backward_pipeline.grid_gradients) pipeline.bind_storage_buffer(1, ShaderStage.COMPUTE, lambda: self.backward_pipeline.rays) pipeline.bind_storage_buffer(2, ShaderStage.COMPUTE, lambda: self.backward_pipeline.transmittances) pipeline.bind_storage_buffer(3, ShaderStage.COMPUTE, lambda: self.backward_pipeline.transmittance_gradients) pipeline.bind_uniform(4, ShaderStage.COMPUTE, lambda: self.medium_buffer) pipeline.bind_constants(0, ShaderStage.COMPUTE, grid_dim=glm.ivec3, number_of_rays=int ) pipeline.close() self.backward_pipeline = pipeline def set_medium(self, scattering_albedo: glm.vec3, density: float, phase_g: float): self.medium_buffer.scatteringAlbedo = scattering_albedo self.medium_buffer.density = density self.medium_buffer.phase_g = phase_g def forward_render(self, inputs): rays, grid = inputs grid_dim = grid.shape ray_count = torch.numel(rays) // 6 self.forward_pipeline.rays = self.wrap_tensor(rays) self.forward_pipeline.grid = self.wrap_tensor(grid) self.forward_pipeline.transmittances = self.wrap_tensor(torch.zeros(ray_count, 3, device=rays.device), False) with self.device.get_compute() as man: man.set_pipeline(self.forward_pipeline) man.update_sets(0) man.update_constants(ShaderStage.COMPUTE, grid_dim=glm.ivec3(grid_dim[2], grid_dim[1], grid_dim[0]), number_of_rays=ray_count ) man.dispatch_threads_1D(ray_count) return [self.get_tensor(self.forward_pipeline.transmittances)] def backward_render(self, inputs, outputs, output_gradients): rays, grid = inputs transmittances, = outputs transmittance_gradients, = output_gradients grid_dim = grid.shape ray_count = torch.numel(rays) // 6 self.backward_pipeline.rays = self.wrap_tensor(rays) self.backward_pipeline.transmittances = self.wrap_tensor(transmittances) self.backward_pipeline.transmittance_gradients = self.wrap_tensor(transmittance_gradients) self.backward_pipeline.grid_gradients = self.wrap_tensor(torch.zeros_like(grid)) with self.device.get_compute() as man: man.set_pipeline(self.backward_pipeline) man.update_sets(0) man.update_constants(ShaderStage.COMPUTE, grid_dim=glm.ivec3(grid_dim[2], grid_dim[1], grid_dim[0]), number_of_rays=ray_count ) man.dispatch_threads_1D(ray_count) return [None, self.get_tensor(self.backward_pipeline.grid_gradients)] class ResampleGrid(RendererModule): def __init__(self, device: DeviceManager, output_dim: (int, int, int), *args, **kwargs): self.output_dim = output_dim super().__init__(device, *args, **kwargs) def setup(self): pipeline = self.device.create_compute_pipeline() pipeline.load_compute_shader(__VOLUME_RECONSTRUCTION_SHADERS__ + "/resampling.comp.spv") pipeline.bind_storage_buffer(0, ShaderStage.COMPUTE, lambda: self.pipeline.dst_grid) pipeline.bind_storage_buffer(1, ShaderStage.COMPUTE, lambda: self.pipeline.src_grid) pipeline.bind_constants(0, ShaderStage.COMPUTE, dst_grid_dim=glm.ivec3, rem0=float, src_grid_dim=glm.ivec3, rem1=float ) pipeline.close() self.pipeline = pipeline def forward_render(self, inputs: List[torch.Tensor]): src_grid, = inputs self.pipeline.src_grid = self.wrap_tensor(src_grid) self.pipeline.dst_grid = self.wrap_tensor(torch.zeros(self.output_dim, device=src_grid.device)) src_grid_dim = src_grid.shape dst_grid_dim = self.output_dim with self.device.get_compute() as man: man.set_pipeline(self.pipeline) man.update_sets(0) man.update_constants(ShaderStage.COMPUTE, dst_grid_dim=glm.ivec3(dst_grid_dim[2], dst_grid_dim[1], dst_grid_dim[0]), src_grid_dim=glm.ivec3(src_grid_dim[2], src_grid_dim[1], src_grid_dim[0]) ) man.dispatch_threads_1D(dst_grid_dim[0] * dst_grid_dim[1] * dst_grid_dim[0]) return [self.get_tensor(self.pipeline.dst_grid)] class TransmittanceGenerator(Technique): def __init__(self, grid, output_image): super().__init__() self.grid = grid self.output_image = output_image self.width, self.height = output_image.width, output_image.height def __setup__(self): # rays self.rays = self.create_buffer(6 * 4 * self.width * self.height, BufferUsage.STORAGE | BufferUsage.TRANSFER_SRC | BufferUsage.TRANSFER_DST, MemoryProperty.GPU) # Transmittance self.transmittances = self.create_buffer(3 * 4 * self.width * self.height, BufferUsage.STORAGE | BufferUsage.TRANSFER_SRC | BufferUsage.TRANSFER_DST, MemoryProperty.GPU) # camera buffer self.camera_buffer = self.create_uniform_buffer( ProjToWorld=glm.mat4 ) # medium properties self.medium_buffer = self.create_uniform_buffer( scatteringAlbedo=glm.vec3, density=float, phase_g=float ) pipeline = self.create_compute_pipeline() pipeline.load_compute_shader(__VOLUME_RECONSTRUCTION_SHADERS__+'/generator.comp.spv') pipeline.bind_storage_image(0, ShaderStage.COMPUTE, lambda: self.output_image) pipeline.bind_storage_image(1, ShaderStage.COMPUTE, lambda: self.grid) pipeline.bind_storage_buffer(2, ShaderStage.COMPUTE, lambda: self.rays) pipeline.bind_storage_buffer(3, ShaderStage.COMPUTE, lambda: self.transmittances) pipeline.bind_uniform(4, ShaderStage.COMPUTE, lambda: self.camera_buffer) pipeline.bind_uniform(5, ShaderStage.COMPUTE, lambda: self.medium_buffer) pipeline.close() self.pipeline = pipeline self.set_camera(glm.vec3(0,0,-3), glm.vec3(0,0,0)) self.set_medium(glm.vec3(1,1,1), 10, 0.875) def set_camera(self, look_from: glm.vec3, look_to: glm.vec3): # Setup camera proj = glm.perspective(45, self.width / self.height, 0.01, 1000) view = glm.lookAt(look_from, look_to, glm.vec3(0, 1, 0)) proj_to_model = glm.inverse(proj * view) self.camera_buffer.ProjToWorld = proj_to_model def set_medium(self, scattering_albedo: glm.vec3, density: float, phase_g: float): self.medium_buffer.scatteringAlbedo = scattering_albedo self.medium_buffer.density = density self.medium_buffer.phase_g = phase_g def __dispatch__(self): with self.get_compute() as man: man.set_pipeline(self.pipeline) man.update_sets(0) man.dispatch_threads_2D(self.width, self.height) class TransmittanceForward(Technique): def __init__(self, rays_resolver, grid_dim: (int, int, int), grid_resolver, transmittance_resolver): super().__init__() self.rays_resolver = rays_resolver # input self.grid_resolver = grid_resolver # params self.transmittance_resolver = transmittance_resolver # output self.grid_dim = glm.ivec3(grid_dim) def set_medium(self, scattering_albedo: glm.vec3, density: float, phase_g: float): self.medium_buffer.scatteringAlbedo = scattering_albedo self.medium_buffer.density = density self.medium_buffer.phase_g = phase_g def __setup__(self): # medium properties self.medium_buffer = self.create_uniform_buffer( scatteringAlbedo=glm.vec3, density=float, phase_g=float ) pipeline = self.create_compute_pipeline() pipeline.load_compute_shader(__VOLUME_RECONSTRUCTION_SHADERS__ + '/forward.comp.spv') pipeline.bind_storage_buffer(0, ShaderStage.COMPUTE, self.grid_resolver) pipeline.bind_storage_buffer(1, ShaderStage.COMPUTE, self.rays_resolver) pipeline.bind_storage_buffer(2, ShaderStage.COMPUTE, self.transmittance_resolver) pipeline.bind_uniform(3, ShaderStage.COMPUTE, lambda: self.medium_buffer) pipeline.bind_constants(0, ShaderStage.COMPUTE, grid_dim = glm.ivec3, number_of_rays = int ) pipeline.close() self.pipeline = pipeline self.set_medium(glm.vec3(1, 1, 1), 10, 0.875) def __dispatch__(self): rays = self.rays_resolver() with self.get_compute() as man: man.set_pipeline(self.pipeline) man.update_sets(0) ray_count = rays.size // (4*3*2) man.update_constants(ShaderStage.COMPUTE, grid_dim=self.grid_dim, number_of_rays=ray_count ) man.dispatch_threads_1D(ray_count) class TransmittanceBackward(Technique): def __init__(self, rays, grid_dim, gradient_densities, transmittances, gradient_transmittances): super().__init__() self.grid_dim = grid_dim self.rays = rays # buffer with rays configurations (origin, direction) self.gradient_densities = gradient_densities # Flatten grid 512x512x512 used as parameters self.transmittances = transmittances # Float with transmittance for each ray self.gradient_transmittances = gradient_transmittances self.pipeline = None def set_medium(self, scattering_albedo: glm.vec3, density: float, phase_g: float): self.medium_buffer.scatteringAlbedo = scattering_albedo self.medium_buffer.density = density self.medium_buffer.phase_g = phase_g def __setup__(self): # medium properties self.medium_buffer = self.create_uniform_buffer( scatteringAlbedo=glm.vec3, density=float, phase_g=float ) pipeline = self.create_compute_pipeline() pipeline.load_compute_shader(__VOLUME_RECONSTRUCTION_SHADERS__ + '/backward.comp.spv') pipeline.bind_storage_buffer(0, ShaderStage.COMPUTE, lambda: self.gradient_densities) pipeline.bind_storage_buffer(1, ShaderStage.COMPUTE, lambda: self.rays) pipeline.bind_storage_buffer(2, ShaderStage.COMPUTE, lambda: self.transmittances) pipeline.bind_storage_buffer(3, ShaderStage.COMPUTE, lambda: self.gradient_transmittances) pipeline.bind_uniform(4, ShaderStage.COMPUTE, lambda: self.medium_buffer) pipeline.bind_constants(0, ShaderStage.COMPUTE, grid_dim=glm.ivec3, number_of_rays=int ) pipeline.close() self.pipeline = pipeline self.set_medium(glm.vec3(1, 1, 1), 10, 0.875) def __dispatch__(self): with self.get_compute() as man: man.clear_buffer(self.gradient_densities) # Zero grad man.set_pipeline(self.pipeline) man.update_sets(0) ray_count = self.rays.size // (4 * 3 * 2) man.update_constants(ShaderStage.COMPUTE, grid_dim=self.grid_dim, number_of_rays=ray_count ) man.dispatch_threads_1D(ray_count) class UpSampleGrid(Technique): def __init__(self): self.src_grid = None self.dst_grid = None self.src_grid_dim = glm.ivec3(0,0,0) self.dst_grid_dim = glm.ivec3(0,0,0) def set_src_grid(self, grid_dim, grid): self.src_grid = grid self.src_grid_dim = grid_dim def set_dst_grid(self, grid_dim, grid): self.dst_grid = grid self.dst_grid_dim = grid_dim def __setup__(self): pipeline = self.create_compute_pipeline() pipeline.load_compute_shader(__VOLUME_RECONSTRUCTION_SHADERS__+"/initialize.comp.spv") pipeline.bind_storage_buffer(0, ShaderStage.COMPUTE, lambda: self.dst_grid) pipeline.bind_storage_buffer(1, ShaderStage.COMPUTE, lambda: self.src_grid) pipeline.bind_constants(0, ShaderStage.COMPUTE, dst_grid_dim=glm.ivec3, rem0=float, src_grid_dim=glm.ivec3, rem1=float ) pipeline.close() self.pipeline = pipeline def __dispatch__(self): with self.get_compute() as man: man.set_pipeline(self.pipeline) man.update_sets(0) man.update_constants(ShaderStage.COMPUTE, dst_grid_dim=self.dst_grid_dim, src_grid_dim=self.src_grid_dim ) man.dispatch_threads_1D(self.dst_grid_dim.x * self.dst_grid_dim.y * self.dst_grid_dim.z) man.gpu_to_cpu(self.dst_grid)
17,152
5,523
def _merge_sort(arr:'list'): if len(arr) <= 1: return arr begin = 0 end = len(arr)-1 middle = (begin+end)//2 first = _merge_sort(arr[begin:middle+1]) second = _merge_sort(arr[middle+1:end+1]) # merge ptr1 = begin ptr2 = middle+1 ptr = 0 while(ptr1<middle+1 and ptr2<end+1): if first[ptr1] < second[ptr2-middle-1]: arr[ptr] = first[ptr1] ptr1 += 1 else: arr[ptr] = second[ptr2-middle-1] ptr2 += 1 ptr += 1 # print(ptr1, ptr2) while(ptr1 < middle+1): arr[ptr] = first[ptr1] ptr1 += 1 ptr += 1 while(ptr2 < end+1): arr[ptr] = second[ptr2-middle-1] ptr2 += 1 ptr += 1 return arr # test if __name__ == '__main__': print(_merge_sort([1, 3, 2]))
868
358
# - *- coding: utf- 8 - *- import time from selenium import webdriver from selenium.webdriver.common.keys import Keys from selenium.webdriver.chrome.options import Options options = Options() options.headless = True path = 'path/to/chromedriver.exe' # You need to change this def parser(): text = input("Hi! I will help you find information about the item on the Amazon website. Enter the text: \n\n") if type(text) == str: print("\nI have received your request. I'm starting to search...") try: driver = webdriver.Chrome(path, chrome_options=options) driver.get('https://www.amazon.co.uk/') search = driver.find_element_by_xpath('//*[@id="twotabsearchtextbox"]') search.send_keys(text) time.sleep(2) search.send_keys(Keys.ENTER) try: title = driver.find_element_by_xpath('//*[@id="search"]/div[1]/div/div[1]/div/span[3]/div[2]/div[1]/div/span/div/div/div/div/div[2]/div[2]/div/div/div[1]/h2/a/span') price_full = driver.find_element_by_xpath('//*[@id="search"]/div[1]/div/div[1]/div/span[3]/div[2]/div[1]/div/span/div/div/div/div/div[2]/div[2]/div/div/div[3]/div[1]/div/div[1]/div/a/span/span[2]/span[2]') price_part = driver.find_element_by_xpath('//*[@id="search"]/div[1]/div/div[1]/div/span[3]/div[2]/div[1]/div/span/div/div/div/div/div[2]/div[2]/div/div/div[3]/div[1]/div/div[1]/div/a/span/span[2]/span[3]') print("ᅠ") print(f"<<The first item on the site for your request>>\n\nName: {title.text}\nPrice: {price_full.text}.{price_part.text} £\n\n") except: title = driver.find_element_by_xpath('//*[@id="search"]/div[1]/div/div[1]/div/span[3]/div[2]/div[2]/div/span/div/div/div/div/div[2]/div[1]/h2/a/span') price_full = driver.find_element_by_xpath('//*[@id="search"]/div[1]/div/div[1]/div/span[3]/div[2]/div[2]/div/span/div/div/div/div/div[2]/div[3]/div/a/span[1]/span[2]/span[2]') price_part = driver.find_element_by_xpath('//*[@id="search"]/div[1]/div/div[1]/div/span[3]/div[2]/div[2]/div/span/div/div/div/div/div[2]/div[3]/div/a/span[1]/span[2]/span[3]') print("ᅠ") print(f"<<The first item on the site for your request>>\n\nName: {title.text}\nPrice: {price_full.text}.{price_part.text} £\n\n") except Exception as e: print("Error! Nothing was found.") else: print("Error! The input value must be of the string type.") parser()
2,605
980
import numpy as np import matplotlib.pyplot as plt from scipy.optimize import curve_fit def cinematica(t,s0,v0,a): s = s0 + v0*t +(a*t*t/2.0) return s t = np.linspace(0, 5, 500) s0 = 0.5 v0 = 2.0 a = 1.5 s_noise = 0.5 * np.random.normal(size=t.size) s = cinematica(t,s0,v0,a) sdata = s + s_noise coefs, pcov = curve_fit(cinematica, t, sdata) plt.plot(t, sdata, 'b-', label='Deslocamento') plt.plot(t, cinematica(t, *coefs), 'r-',label='Função ajustada') plt.xlabel('Tempo') plt.ylabel('Deslocamento') plt.title('Ajuste de curva') plt.legend() plt.show() print("Espaço inicial= %f" %coefs[0]) print("Velocidade inicial= %f" %coefs[1]) print("Aceleração= %f" %coefs[2])
679
339
import pandas as pd import numpy as np import optuna import xgboost train = pd.read_csv("~/kaggledatasets/riiid-test-answer-prediction/train.csv", nrows=3e6, dtype={'row_id': 'int64', 'timestamp': 'int64', 'user_id': 'int32', 'content_id': 'int16', 'content_type_id': 'int8', 'task_container_id': 'int16', 'user_answer': 'int8', 'answered_correctly': 'int8', 'prior_question_elapsed_time': 'float64', 'prior_question_had_explanation': 'boolean'}, ) class DataPipeline: def __init__(self): self.is_fitted = False def fit(self, X, y=None): self.is_fitted = True raise NotImplementedError def transform(self, X, y=None): if self.is_fitted == True: return 1 else: pass def func(self): pass
1,059
326
from typing import Optional, Dict from pathlib import Path from copy import deepcopy from tqdm import tqdm import torch as pt from torch import Tensor, nn from torch.optim import Adam def train( train_data: Dict[str, Tensor], valid_data: Dict[str, Tensor], model: nn.Module, optimizer: Adam, model_path: Path, n_epochs: int, test_size: Optional[int] = None, log_step: int = 10, patience: int = 10, ) -> None: prog_bar = tqdm(total=n_epochs, unit='epoch') best_valid = float('inf') stop_counter = patience for epoch in range(n_epochs): prog_bar.update() model = model.train() loss_train, _ = model(**train_data, test_size=test_size) optimizer.zero_grad() loss_train.backward() optimizer.step() postfix = {'train_loss': loss_train.item()} if (epoch+1) % log_step == 0: if valid_data is not None: model = model.eval() with pt.no_grad(): loss_valid, _ = model(**valid_data) loss_valid = loss_valid.item() postfix['valid_loss'] = loss_valid if loss_valid < best_valid: best_valid = loss_valid stop_counter = patience else: stop_counter -= 1 if stop_counter == 0: break prog_bar.set_postfix(**postfix) prog_bar.close() pt.save(model.state_dict(), model_path) def inference( data: Dict[str, Tensor], model: nn.Module, model_path: Path, ): model.load_state_dict(pt.load(model_path)) model = model.eval() with pt.no_grad(): _, pr = model(**data, test_size=0) pr = pr.clamp_min_(0.0) return pr
1,828
595
from django.db import models from django.conf import settings from django.db.models.signals import post_save class Profile(models.Model): username = models.CharField(max_length=255, primary_key=True) bio = models.TextField(null=True, blank=True) image = models.URLField( null=True, blank=True ) user = models.OneToOneField( settings.AUTH_USER_MODEL, on_delete=models.CASCADE ) created_at = models.DateTimeField(auto_now_add=True) updated_at = models.DateTimeField(auto_now=True) def __str__(self): return self.username def user_was_created(sender, instance, created, ** kwargs): """ Listen for when a user is creted and create a profile""" created and Profile.objects.create( user=instance, username=instance.username ) post_save.connect(user_was_created, sender=settings.AUTH_USER_MODEL)
898
281
import numpy as np # Compute normalized mutual information between two parcellations z1 and z2 def NMI(z1, z2): N = len(z1) assert N == len(z2) p1 = np.bincount(z1)/N p1[p1 == 0] = 1 H1 = (-p1*np.log(p1)).sum() p2 = np.bincount(z2)/N p2[p2 == 0] = 1 H2 = (-p2*np.log(p2)).sum() joint = np.histogram2d(z1,z2,[range(0,z1.max()+2), range(0,z2.max()+2)], normed=True) joint_p = joint[0] pdiv = joint_p/np.outer(p1,p2) pdiv[joint_p == 0] = 1 MI = (joint_p*np.log(pdiv)).sum() if MI == 0: NMI = 0 else: NMI = MI/np.sqrt(H1*H2) return NMI # (Approximately) return whether an array is symmetric def CheckSymApprox(D): # Random indices to check for symmetry sym_sub = np.random.randint(D.shape[0], size=(1000,2)) a = np.ravel_multi_index((sym_sub[:,0],sym_sub[:,1]), dims=np.shape(D)) b = np.ravel_multi_index((sym_sub[:,1],sym_sub[:,0]), dims=np.shape(D)) sym = np.all(D.flat[a] == D.flat[b]) return sym
975
490
import pytest from xigt import XigtCorpus, Igt, Tier, Item, Metadata, Meta, MetaChild from xigt.errors import XigtError, XigtStructureError class TestMetadata(): md1 = Metadata() m1 = Meta(id='meta1', text='meta') md2 = Metadata( id='md2', type='basic', attributes={'attr':'val'}, metas=[m1] ) def test_init(self): with pytest.raises(ValueError): Metadata(id='1') # invalid id def test_id(self): assert self.md1.id is None assert self.md2.id is 'md2' def test_type(self): assert self.md1.type is None assert self.md2.type == 'basic' def test_metas(self): assert self.md1.metas == [] assert len(self.md2.metas) == 1 assert self.md2[0].text == 'meta' def test_attributes(self): assert self.md1.attributes == dict() assert self.md2.attributes == {'attr':'val'} def test_eq(self): assert self.md1 == self.md1 assert self.md2 == self.md2 assert self.md1 != self.md2 def test_getitem(self): assert self.md2[0] == self.m1 assert self.md2['meta1'] == self.m1 assert self.md2['0'] == self.m1 with pytest.raises(IndexError): self.md2[1] with pytest.raises(IndexError): self.md2['1'] with pytest.raises(KeyError): self.md2['m2'] def test_setitem(self): md = Metadata(metas=[Meta(id='meta1'), Meta(id='meta2')]) md[0] = Meta(id='meta3') assert len(md) == 2 assert md[0].id == 'meta3' with pytest.raises(KeyError): md['meta1'] with pytest.raises(ValueError): md['meta2'] = Meta(id='meta2') def test_delitem(self): md = Metadata(metas=[Meta(id='meta1'), Meta(id='meta2')]) assert len(md) == 2 del md[0] assert len(md) == 1 assert md[0].id == 'meta2' with pytest.raises(KeyError): md['meta1'] del md['meta2'] assert len(md) == 0 with pytest.raises(KeyError): md['meta2'] def test_get(self): assert self.md1.get(0) is None assert self.md1.get('meta1') is None assert self.md1.get('meta1', default=1) == 1 assert self.md2.get(0).id == 'meta1' assert self.md2.get(1) is None assert self.md2.get('meta1').id == 'meta1' assert self.md2.get('meta1', default=Meta(id='meta2')).id == 'meta1' def test_append(self): md = Metadata() with pytest.raises(XigtStructureError): md.append(Item()) with pytest.raises(XigtStructureError): md.append(Tier()) with pytest.raises(XigtStructureError): md.append(Igt()) with pytest.raises(XigtStructureError): md.append(XigtCorpus()) with pytest.raises(XigtStructureError): md.append(Metadata()) assert len(md) == 0 md.append(Meta(id='meta1')) assert len(md) == 1 with pytest.raises(XigtError): md.append(Meta(id='meta1')) md.append(Meta(id='meta2')) assert len(md) == 2 assert md[0].id == 'meta1' assert md[1].id == 'meta2' def test_insert(self): md = Metadata() assert len(md) == 0 md.insert(0, Meta(id='meta1')) assert len(md) == 1 with pytest.raises(XigtError): md.insert(0, Meta(id='meta1')) md.insert(0, Meta(id='meta2')) md.insert(100, Meta(id='meta3')) assert len(md) == 3 assert md[0].id == 'meta2' assert md[1].id == 'meta1' assert md[2].id == 'meta3' def test_extend(self): md = Metadata() assert len(md) == 0 md.extend([Meta(id='meta1')]) assert len(md) == 1 md.extend([]) assert len(md) == 1 md.extend([Meta(id='meta2'), Meta(id='meta3')]) assert len(md) == 3 assert md[0].id == 'meta1' assert md[1].id == 'meta2' assert md[2].id == 'meta3' def test_remove(self): md = Metadata(metas=[Meta(id='m1'), Meta(id='m2')]) assert len(md) == 2 md.remove(md[0]) assert len(md) == 1 assert md[0].id == 'm2' with pytest.raises(KeyError): md['m1'] def test_clear(self): md = Metadata() md.extend([Meta(id='meta1'), Meta(id='meta2'), Meta(id='meta3')]) assert len(md) == 3 md.clear() assert len(md) == 0 assert md.get(0) is None assert md.get('meta1') is None def test_get_attribute(self): md = Metadata( attributes={'one': 1, 'two': 2, '{http://namespace.org}three': 4}, nsmap={'pre': 'http://namespace.org'} ) igt = Igt(metadata=[md], attributes={'three': 3}) assert md.get_attribute('one') == 1 assert md.get_attribute('two') == 2 assert md.get_attribute('three') is None assert md.get_attribute('three', namespace='http://namespace.org') == 4 assert md.get_attribute('three', namespace='pre') == 4 assert md.get_attribute('three', inherit=True) == 3 assert md.get_attribute('three', namespace='pre', inherit=True) == 4 assert md.get_attribute('three', default=5) == 5 class TestMeta(): m1 = Meta() m2 = Meta( id='meta1', type='metatype', attributes={'one': 1}, text='metatext', children=[MetaChild('child1'), MetaChild('child2')] ) def test_init(self): with pytest.raises(ValueError): Meta(id='1') # invalid id def test_id(self): assert self.m1.id is None assert self.m2.id == 'meta1' def test_type(self): assert self.m1.type is None assert self.m2.type == 'metatype' def test_attributes(self): assert self.m1.attributes == dict() assert self.m2.attributes == {'one': 1} def test_get_attribute(self): assert self.m1.get_attribute('attr') is None assert self.m1.get_attribute('attr', 1) == 1 assert self.m2.get_attribute('one') == 1 assert self.m2.get_attribute('two') is None m = Meta(attributes={'one': 1}) md = Metadata( attributes={'two': 2}, metas=[m] ) assert m.get_attribute('two', inherit=True) == 2 def test_eq(self): assert self.m1 == self.m1 assert self.m2 == self.m2 assert self.m1 != self.m2 def test_text(self): assert self.m1.text is None assert self.m2.text == 'metatext' def test_children(self): assert self.m1.children == [] assert len(self.m2.children) == 2 assert self.m2.children[0].name == 'child1' assert self.m2.children[1].name == 'child2' class TestMetaChild(): mc1 = MetaChild('childname') mc2 = MetaChild( 'childname', attributes={'id': 'mc2', 'type': 'childtype', 'one': 1}, text='childtext', children=[MetaChild('grandchild1'), MetaChild('grandchild2')] ) def test_init(self): # name (i.e. tag in XML) is mandatory with pytest.raises(TypeError): MetaChild() # invalid names with pytest.raises(ValueError): MetaChild('1') with pytest.raises(ValueError): MetaChild('a:1') # id and type not allowed as parameters (they can be attributes) with pytest.raises(TypeError): MetaChild('mc0', id='mc1') with pytest.raises(TypeError): MetaChild('mc0', type='childtype') def test_name(self): assert self.mc1.name == 'childname' assert self.mc2.name == 'childname' def test_attributes(self): assert self.mc1.attributes == dict() assert self.mc2.attributes == {'id': 'mc2', 'type': 'childtype', 'one': 1} def test_get_attribute(self): assert self.mc1.get_attribute('id') is None assert self.mc1.get_attribute('attr') is None assert self.mc1.get_attribute('attr', 1) == 1 assert self.mc2.get_attribute('id') == 'mc2' assert self.mc2.get_attribute('type') == 'childtype' assert self.mc2.get_attribute('one') == 1 assert self.mc2.get_attribute('two') is None mc = MetaChild('childname', attributes={'one': 1}) m = Meta(children=[mc]) md = Metadata( attributes={'two': 2}, metas=[m] ) assert mc.get_attribute('two', inherit=True) == 2 def test_eq(self): assert self.mc1 == self.mc1 assert self.mc2 == self.mc2 assert self.mc1 != self.mc2 def test_text(self): assert self.mc1.text is None assert self.mc2.text == 'childtext' def test_children(self): assert self.mc1.children == [] assert len(self.mc2.children) == 2 assert self.mc2.children[0].name == 'grandchild1' assert self.mc2.children[1].name == 'grandchild2' class TestItem(): # empty i1 = Item() # basic info i2 = Item( id='i2', type='basic', attributes={'attr':'val'}, text='text' ) # alignment and content refs i_ac = Item( id='i_ac', alignment='i2', content='i2[0:2]' ) # segmentation ref i_s = Item( id='i_s', segmentation='i2[2:4]' ) # override content ref with text i_t = Item( id='i_t', content='i2', text='something else' ) # contextual structure t_a = Tier(id='t_a', items=[i2]) t_b = Tier(id='t_b', items=[i_ac, i_t], alignment='t_a', content='t_a') t_c = Tier(id='t_c', items=[i_s], segmentation='t_a') igt = Igt(tiers=[t_a, t_b, t_c]) xc = XigtCorpus(igts=[igt]) def test_init(self): with pytest.raises(ValueError): Item(id='1') # invalid id def test_id(self): assert self.i1.id is None assert self.i2.id == 'i2' assert self.i_ac.id == 'i_ac' assert self.i_s.id == 'i_s' assert self.i_t.id == 'i_t' def test_type(self): assert self.i1.type is None assert self.i2.type == 'basic' assert self.i_ac.type is None assert self.i_s.type is None assert self.i_t.type is None def test_parents(self): assert self.i1.tier is None assert self.i1.igt is None assert self.i1.corpus is None assert self.i2.tier is self.t_a assert self.i2.igt is self.igt assert self.i2.corpus is self.xc assert self.i_ac.tier == self.t_b assert self.i_ac.igt == self.igt assert self.i_ac.corpus == self.xc assert self.i_s.tier == self.t_c assert self.i_s.igt == self.igt assert self.i_s.corpus == self.xc assert self.i_t.tier == self.t_b assert self.i_t.igt == self.igt assert self.i_t.corpus == self.xc def test_eq(self): assert self.i1 == self.i1 assert self.i2 == self.i2 assert self.i1 != self.i2 def test_attributes(self): assert self.i1.attributes == dict() assert self.i2.attributes == {'attr':'val'} assert self.i_ac.attributes == {'alignment': 'i2', 'content': 'i2[0:2]'} assert self.i_s.attributes == {'segmentation': 'i2[2:4]'} assert self.i_t.attributes == {'content': 'i2'} def test_reference_attributes(self): # segmentation cannot co-occur with alignment or content with pytest.raises(XigtError): Item(alignment='a1', segmentation='b1') with pytest.raises(XigtError): Item(content='a1', segmentation='b1') assert self.i1.alignment is None assert self.i1.content is None assert self.i1.segmentation is None assert self.i2.alignment is None assert self.i2.content is None assert self.i2.segmentation is None assert self.i_ac.alignment == 'i2' assert self.i_ac.content == 'i2[0:2]' assert self.i_ac.segmentation is None assert self.i_s.alignment is None assert self.i_s.content is None assert self.i_s.segmentation == 'i2[2:4]' assert self.i_t.alignment == None assert self.i_t.content == 'i2' assert self.i_t.segmentation == None def test_text(self): assert self.i1.text is None assert self.i2.text == 'text' assert self.i_ac.text is None assert self.i_s.text is None assert self.i_t.text == 'something else' def test_value(self): assert self.i1.value() is None assert self.i2.value() == 'text' assert self.i_ac.value() == 'te' assert self.i_s.value() == 'xt' assert self.i_t.value() == 'something else' def test_resolve_ref(self): # item has no reference attribute b1 = Item(id='b1') with pytest.raises(KeyError): b1.resolve_ref('alignment') # has a reference attribute, but is not contained by a tier b1.alignment = 'a1' with pytest.raises(XigtStructureError): b1.resolve_ref('alignment') # item in tier, but tier has no reference attribute t_b = Tier(id='b', items=[b1]) with pytest.raises(KeyError): b1.resolve_ref('alignment') # tier has reference attribute, but is not contained by an Igt t_b.alignment = 'a' with pytest.raises(XigtStructureError): b1.resolve_ref('alignment') # item in IGT, but referred tier doesn't exist igt = Igt(tiers=[t_b]) with pytest.raises(XigtStructureError): b1.resolve_ref('alignment') # referred tier exists, but has no item referred by item's alignment t_a = Tier(id='a') igt.append(t_a) with pytest.raises(XigtStructureError): b1.resolve_ref('alignment') # referred item exists, but has no value (which resolves to '') a1 = Item(id='a1') t_a.append(a1) assert b1.resolve_ref('alignment') == '' # referred item has a value a1.text = 'text' assert b1.resolve_ref('alignment') == 'text' # stored item tests with pytest.raises(KeyError): self.i1.resolve_ref('alignment') with pytest.raises(KeyError): self.i2.resolve_ref('alignment') assert self.i_ac.resolve_ref('alignment') == 'text' assert self.i_ac.resolve_ref('content') == 'te' assert self.i_s.resolve_ref('segmentation') == 'xt' assert self.i_t.resolve_ref('content') == 'text' def test_span(self): # sub-spans of null content is also null content assert self.i1.span(0,1) is None assert self.i2.span(0,1) == 't' assert self.i_ac.span(1,2) == 'e' assert self.i_s.span(1,2) == 't' assert self.i_t.span(1,2) == 'o' def test_get_attribute(self): i = Item(id='i1') assert i.get_attribute('attr') == None assert i.get_attribute('attr', 1) == 1 i.attributes['attr'] = 'val' assert i.get_attribute('attr', 1) == 'val' assert i.get_attribute('abc', inherit=True) == None t = Tier(id='t', items=[i], attributes={'abc': 'def'}) assert i.get_attribute('abc', inherit=True) == 'def' assert self.i1.get_attribute('attr') == None assert self.i1.get_attribute('attr', 1) == 1 assert self.i2.get_attribute('attr') == 'val' assert self.i2.get_attribute('attr', 1) == 'val' assert self.i_ac.get_attribute('alignment') == 'i2' class TestTier(): t1 = Tier() i1 = Item(id='t1') i2 = Item(id='t2') t2 = Tier( id='t', type='basic', attributes={'attr':'val'}, metadata=[Metadata(type='meta', metas=[Meta(text='meta')])], items=[i1, i2] ) def test_init(self): with pytest.raises(ValueError): Tier(id='1') # invalid id # don't allow multiple items with the same ID with pytest.raises(XigtError): Tier(items=[Item(id='i1'), Item(id='i1')]) def test_id(self): assert self.t1.id is None assert self.t2.id == 't' def test_type(self): assert self.t1.type is None assert self.t2.type == 'basic' def test_items(self): assert len(self.t1.items) == 0 assert self.t1.items == [] assert len(self.t2.items) == 2 # contained Items should now have their tier specified for i in self.t2.items: assert i.tier is self.t2 def test_parents(self): assert self.t1.igt is None assert self.t1.corpus is None assert self.t2.igt is None assert self.t2.corpus is None def test_metadata(self): assert len(self.t1.metadata) == 0 assert self.t2.metadata[0].type == 'meta' assert len(self.t2.metadata[0].metas) == 1 assert self.t2.metadata[0][0].text == 'meta' def test_attributes(self): assert self.t1.attributes == dict() assert self.t2.attributes == {'attr':'val'} def test_reference_attributes(self): # segmentation cannot co-occur with alignment or content with pytest.raises(XigtError): Tier(alignment='a1', segmentation='b1') with pytest.raises(XigtError): Tier(content='a1', segmentation='b1') assert self.t1.alignment is None assert self.t1.content is None assert self.t1.segmentation is None assert self.t2.alignment is None assert self.t2.content is None assert self.t2.segmentation is None def test_eq(self): assert self.t1 == self.t1 assert self.t2 == self.t2 assert self.t1 != self.t2 def test_getitem(self): assert self.t2[0] == self.i1 assert self.t2['t1'] == self.i1 assert self.t2['0'] == self.i1 assert self.t2[1] == self.i2 with pytest.raises(IndexError): self.t2[2] with pytest.raises(IndexError): self.t2['2'] with pytest.raises(KeyError): self.t2['t3'] def test_setitem(self): t = Tier(items=[Item(id='a1'), Item(id='a2')]) t[0] = Item(id='a3') assert len(t) == 2 assert t[0].id == 'a3' with pytest.raises(KeyError): t['a1'] with pytest.raises(ValueError): t['a2'] = Item(id='a3') def test_delitem(self): t = Tier(items=[Item(id='a1'), Item(id='a2')]) assert len(t) == 2 del t[0] assert len(t) == 1 assert t[0].id == 'a2' with pytest.raises(KeyError): t['a1'] del t['a2'] assert len(t) == 0 with pytest.raises(KeyError): t['a2'] def test_get(self): assert self.t1.get(0) is None assert self.t1.get('t') is None assert self.t1.get('t', default=1) == 1 assert self.t2.get(0).id == 't1' assert self.t2.get(2) is None assert self.t2.get('t1').id == 't1' assert self.t2.get('t1', default=Item(id='x')).id == 't1' def test_append(self): t = Tier() with pytest.raises(XigtStructureError): t.append(Tier()) with pytest.raises(XigtStructureError): t.append(Igt()) with pytest.raises(XigtStructureError): t.append(XigtCorpus()) with pytest.raises(XigtStructureError): t.append(Metadata()) with pytest.raises(XigtStructureError): t.append(Meta()) assert len(t) == 0 t.append(Item(id='t1')) assert len(t) == 1 with pytest.raises(XigtError): t.append(Item(id='t1')) t.append(Item(id='t2')) assert len(t) == 2 assert t[0].id == 't1' assert t[1].id == 't2' def test_insert(self): t = Tier() assert len(t) == 0 t.insert(0, Item(id='t1')) assert len(t) == 1 with pytest.raises(XigtError): t.insert(0, Item(id='t1')) t.insert(0, Item(id='t2')) t.insert(100, Item(id='t3')) assert len(t) == 3 assert t[0].id == 't2' assert t[1].id == 't1' assert t[2].id == 't3' def test_extend(self): t = Tier() assert len(t) == 0 t.extend([Item(id='t1')]) assert len(t) == 1 t.extend([]) assert len(t) == 1 t.extend([Item(id='t2'), Item(id='t3')]) assert len(t) == 3 assert t[0].id == 't1' assert t[1].id == 't2' assert t[2].id == 't3' def test_remove(self): t = Tier(items=[Item(id='i1'), Item(id='i2')]) assert len(t) == 2 t.remove(t[0]) assert len(t) == 1 assert t[0].id == 'i2' with pytest.raises(KeyError): t['i1'] def test_clear(self): t = Tier() t.extend([Item(id='t1'), Item(id='t2'), Item(id='t3')]) assert len(t) == 3 t.clear() assert len(t) == 0 assert t.get(0) is None assert t.get('t1') is None def test_get_attribute(self): t = Tier(id='t', attributes={'one': 1, 'two': 2}) igt = Igt(tiers=[t], attributes={'three': 3}) assert t.get_attribute('one') == 1 assert t.get_attribute('two') == 2 assert t.get_attribute('three') is None assert t.get_attribute('three', inherit=True) == 3 assert t.get_attribute('three', default=4) == 4 class TestIgt(): i1 = Igt() t1 = Tier(id='a', items=[Item(id='a1'), Item(id='a2')]) t2 = Tier(id='b', items=[Item(id='b1'), Item(id='b2')]) i2 = Igt( id='i1', type='basic', attributes={'attr':'val'}, metadata=[Metadata(type='meta', metas=[Meta(text='meta')])], tiers=[t1, t2] ) def test_init(self): with pytest.raises(ValueError): Igt(id='1') # invalid id # don't allow multiple tiers with the same ID with pytest.raises(XigtError): Igt(tiers=[Tier(id='a'), Tier(id='a')]) def test_id(self): assert self.i1.id is None assert self.i2.id == 'i1' def test_type(self): assert self.i1.type is None assert self.i2.type == 'basic' def test_tiers(self): assert len(self.i1.tiers) == 0 assert len(self.i2.tiers) == 2 # contained Tiers should now have their igt specified for t in self.i2.tiers: assert t.igt is self.i2 def test_parents(self): assert self.i1.corpus is None assert self.i2.corpus is None def test_metadata(self): assert len(self.i1.metadata) == 0 assert self.i2.metadata[0].type == 'meta' assert len(self.i2.metadata[0].metas) == 1 assert self.i2.metadata[0][0].text == 'meta' def test_attributes(self): assert self.i1.attributes == dict() assert self.i2.attributes == {'attr':'val'} def test_eq(self): assert self.i1 == self.i1 assert self.i2 == self.i2 assert self.i1 != self.i2 def test_getitem(self): assert self.i2[0] == self.t1 assert self.i2['a'] == self.t1 assert self.i2['0'] == self.t1 assert self.i2[1] == self.t2 with pytest.raises(IndexError): self.i2[2] with pytest.raises(IndexError): self.i2['2'] with pytest.raises(KeyError): self.i2['c'] def test_setitem(self): igt = Igt(tiers=[Tier(id='a'), Tier(id='b')]) igt[0] = Tier(id='c') assert len(igt) == 2 assert igt[0].id == 'c' with pytest.raises(KeyError): igt['a'] with pytest.raises(ValueError): igt['b'] = Tier(id='d') def test_delitem(self): igt = Igt(tiers=[Tier(id='a'), Tier(id='b')]) assert len(igt) == 2 del igt[0] assert len(igt) == 1 assert igt[0].id == 'b' with pytest.raises(KeyError): igt['a'] del igt['b'] assert len(igt) == 0 with pytest.raises(KeyError): igt['b'] def test_get(self): assert self.i1.get(0) is None assert self.i1.get('t') is None assert self.i1.get('t', default=1) == 1 assert self.i2.get(0).id == 'a' assert self.i2.get(3) is None assert self.i2.get('a').id == 'a' assert self.i2.get('a', default=Tier(id='x')).id == 'a' def test_get_item(self): assert self.i1.get_item('a') is None assert self.i1.get_item('a1') is None assert self.i2.get_item('a') is None assert self.i2.get_item('a1').id == 'a1' assert self.i2.get_item('b2').id == 'b2' def test_get_any(self): assert self.i1.get_any('a') is None assert self.i1.get_any('a1') is None assert self.i2.get_any('a').id is 'a' assert self.i2.get_any('a1').id == 'a1' assert self.i2.get_any('b2').id == 'b2' def test_append(self): igt = Igt() with pytest.raises(XigtStructureError): igt.append(Item()) with pytest.raises(XigtStructureError): igt.append(Igt()) with pytest.raises(XigtStructureError): igt.append(XigtCorpus()) with pytest.raises(XigtStructureError): igt.append(Metadata()) with pytest.raises(XigtStructureError): igt.append(Meta()) assert len(igt) == 0 igt.append(Tier(id='t')) assert len(igt) == 1 with pytest.raises(XigtError): igt.append(Tier(id='t')) igt.append(Tier(id='x')) assert len(igt) == 2 assert igt[0].id == 't' assert igt[1].id == 'x' def test_insert(self): igt = Igt() assert len(igt) == 0 igt.insert(0, Tier(id='t')) assert len(igt) == 1 with pytest.raises(XigtError): igt.insert(0, Tier(id='t')) igt.insert(0, Tier(id='x')) igt.insert(100, Tier(id='y')) assert len(igt) == 3 assert igt[0].id == 'x' assert igt[1].id == 't' assert igt[2].id == 'y' def test_extend(self): igt = Igt() assert len(igt) == 0 igt.extend([Tier(id='t')]) assert len(igt) == 1 igt.extend([]) assert len(igt) == 1 igt.extend([Tier(id='x'), Tier(id='y')]) assert len(igt) == 3 assert igt[0].id == 't' assert igt[1].id == 'x' assert igt[2].id == 'y' def test_remove(self): igt = Igt(tiers=[Tier(id='a'), Tier(id='b')]) assert len(igt) == 2 igt.remove(igt[0]) assert len(igt) == 1 assert igt[0].id == 'b' with pytest.raises(KeyError): igt['a'] def test_clear(self): igt = Igt() igt.extend([Tier(id='t'), Tier(id='x'), Tier(id='y')]) assert len(igt) == 3 igt.clear() assert len(igt) == 0 assert igt.get(0) is None assert igt.get('t') is None def test_get_attribute(self): igt = Igt(id='i1', attributes={'one': 1, 'two': 2}) xc = XigtCorpus(igts=[igt], attributes={'three': 3}) assert igt.get_attribute('one') == 1 assert igt.get_attribute('two') == 2 assert igt.get_attribute('three') is None assert igt.get_attribute('three', inherit=True) == 3 assert igt.get_attribute('three', default=4) == 4 class TestXigtCorpus(): c1 = XigtCorpus() i1 = Igt(id='i1') i2 = Igt(id='i2') c2 = XigtCorpus( id='xc1', type='basic', attributes={'attr':'val'}, metadata=[Metadata(type='meta', metas=[Meta(text='meta')])], igts=[i1, i2] ) def test_init(self): with pytest.raises(ValueError): XigtCorpus(id='1') # invalid id # don't allow multiple igts with the same ID with pytest.raises(XigtError): XigtCorpus(igts=[Igt(id='i1'), Igt(id='i1')]) def test_id(self): assert self.c1.id is None assert self.c2.id == 'xc1' def test_type(self): assert self.c1.type is None assert self.c2.type is 'basic' def test_igts(self): assert len(self.c1.igts) == 0 assert len(self.c2.igts) == 2 # contained Igts should now have their corpus specified for i in self.c2.igts: assert i.corpus is self.c2 def test_attributes(self): assert self.c1.attributes == dict() assert self.c2.attributes == {'attr':'val'} def test_metadata(self): assert len(self.c1.metadata) == 0 assert self.c2.metadata[0].type == 'meta' assert len(self.c2.metadata[0].metas) == 1 assert self.c2.metadata[0][0].text == 'meta' def test_eq(self): assert self.c1 == self.c1 assert self.c2 == self.c2 assert self.c1 != self.c2 def test_getitem(self): assert self.c2[0] == self.i1 assert self.c2['i1'] == self.i1 assert self.c2['0'] == self.i1 assert self.c2[1] == self.i2 with pytest.raises(IndexError): self.c2[2] with pytest.raises(IndexError): self.c2['2'] with pytest.raises(KeyError): self.c2['i3'] def test_setitem(self): xc = XigtCorpus(igts=[Igt(id='i1'), Igt(id='i2')]) xc[0] = Igt(id='i3') assert len(xc) == 2 assert xc[0].id == 'i3' with pytest.raises(KeyError): xc['i1'] with pytest.raises(ValueError): xc['i2'] = Igt(id='i3') def test_delitem(self): xc = XigtCorpus(igts=[Igt(id='i1'), Igt(id='i2')]) assert len(xc) == 2 del xc[0] assert len(xc) == 1 assert xc[0].id == 'i2' with pytest.raises(KeyError): xc['i1'] del xc['i2'] assert len(xc) == 0 with pytest.raises(KeyError): xc['i2'] def test_get(self): assert self.c1.get(0) is None assert self.c1.get('i1') is None assert self.c1.get('i1', default=1) == 1 assert self.c2.get(0).id == 'i1' assert self.c2.get(3) is None assert self.c2.get('i1').id == 'i1' assert self.c2.get('i1', default=Igt(id='i3')).id == 'i1' def test_append(self): xc = XigtCorpus() with pytest.raises(XigtStructureError): xc.append(Item()) with pytest.raises(XigtStructureError): xc.append(Tier()) with pytest.raises(XigtStructureError): xc.append(XigtCorpus()) with pytest.raises(XigtStructureError): xc.append(Metadata()) with pytest.raises(XigtStructureError): xc.append(Meta()) assert len(xc) == 0 xc.append(Igt(id='i1')) assert len(xc) == 1 with pytest.raises(XigtError): xc.append(Igt(id='i1')) xc.append(Igt(id='i2')) assert len(xc) == 2 assert xc[0].id == 'i1' assert xc[1].id == 'i2' def test_insert(self): xc = XigtCorpus() assert len(xc) == 0 xc.insert(0, Igt(id='i1')) assert len(xc) == 1 with pytest.raises(XigtError): xc.insert(0, Igt(id='i1')) xc.insert(0, Igt(id='i2')) xc.insert(100, Igt(id='i3')) assert len(xc) == 3 assert xc[0].id == 'i2' assert xc[1].id == 'i1' assert xc[2].id == 'i3' def test_extend(self): xc = XigtCorpus() assert len(xc) == 0 xc.extend([Igt(id='i1')]) assert len(xc) == 1 xc.extend([]) assert len(xc) == 1 xc.extend([Igt(id='i2'), Igt(id='i3')]) assert len(xc) == 3 assert xc[0].id == 'i1' assert xc[1].id == 'i2' assert xc[2].id == 'i3' def test_remove(self): xc = XigtCorpus(igts=[Igt(id='i1'), Igt(id='i2')]) assert len(xc) == 2 xc.remove(xc[0]) assert len(xc) == 1 assert xc[0].id == 'i2' with pytest.raises(KeyError): xc['i1'] def test_clear(self): xc = XigtCorpus() xc.extend([Igt(id='i1'), Igt(id='i2'), Igt(id='i3')]) assert len(xc) == 3 xc.clear() assert len(xc) == 0 assert xc.get(0) is None assert xc.get('i1') is None def test_get_attribute(self): xc = XigtCorpus(attributes={'one': 1, 'two': 2}) assert xc.get_attribute('one') == 1 assert xc.get_attribute('two') == 2 assert xc.get_attribute('three') is None assert xc.get_attribute('three', inherit=True) == None
31,939
11,428
from django.http import JsonResponse def names(request): return JsonResponse({'names': ['William', 'Rod', 'Grant']})
121
38
from sensor import Sensor from stepper import Stepper sensor = Sensor() stepper = Stepper(100) #stepper.start() while True: print(sensor.measure())
156
56
# Generated by Django 2.2.4 on 2019-08-18 16:16 from django.db import migrations class Migration(migrations.Migration): dependencies = [("peering", "0050_auto_20190806_2159")] operations = [ migrations.RenameField( model_name="autonomoussystem", old_name="comment", new_name="comments" ), migrations.RenameField( model_name="bgpgroup", old_name="comment", new_name="comments" ), migrations.RenameField( model_name="community", old_name="comment", new_name="comments" ), migrations.RenameField( model_name="directpeeringsession", old_name="comment", new_name="comments" ), migrations.RenameField( model_name="internetexchange", old_name="comment", new_name="comments" ), migrations.RenameField( model_name="internetexchangepeeringsession", old_name="comment", new_name="comments", ), migrations.RenameField( model_name="router", old_name="comment", new_name="comments" ), migrations.RenameField( model_name="routingpolicy", old_name="comment", new_name="comments" ), migrations.RenameField( model_name="template", old_name="comment", new_name="comments" ), ]
1,351
405
# # PySNMP MIB module HP-ICF-IPV6-RA-GUARD-MIB (http://snmplabs.com/pysmi) # ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/HP-ICF-IPV6-RA-GUARD-MIB # Produced by pysmi-0.3.4 at Wed May 1 13:34:21 2019 # On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4 # Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15) # Integer, OctetString, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "Integer", "OctetString", "ObjectIdentifier") NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues") ValueSizeConstraint, SingleValueConstraint, ValueRangeConstraint, ConstraintsIntersection, ConstraintsUnion = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueSizeConstraint", "SingleValueConstraint", "ValueRangeConstraint", "ConstraintsIntersection", "ConstraintsUnion") hpSwitch, = mibBuilder.importSymbols("HP-ICF-OID", "hpSwitch") ifIndex, = mibBuilder.importSymbols("IF-MIB", "ifIndex") ObjectGroup, NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "ObjectGroup", "NotificationGroup", "ModuleCompliance") MibScalar, MibTable, MibTableRow, MibTableColumn, NotificationType, Counter32, Gauge32, Counter64, IpAddress, TimeTicks, Integer32, iso, Bits, ObjectIdentity, Unsigned32, ModuleIdentity, MibIdentifier = mibBuilder.importSymbols("SNMPv2-SMI", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "NotificationType", "Counter32", "Gauge32", "Counter64", "IpAddress", "TimeTicks", "Integer32", "iso", "Bits", "ObjectIdentity", "Unsigned32", "ModuleIdentity", "MibIdentifier") DisplayString, TextualConvention, TruthValue = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TextualConvention", "TruthValue") hpicfIpv6RAGuard = ModuleIdentity((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 87)) hpicfIpv6RAGuard.setRevisions(('2011-03-16 05:24',)) if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0): if mibBuilder.loadTexts: hpicfIpv6RAGuard.setRevisionsDescriptions(('Initial revision.',)) if mibBuilder.loadTexts: hpicfIpv6RAGuard.setLastUpdated('201103160524Z') if mibBuilder.loadTexts: hpicfIpv6RAGuard.setOrganization('Hewlett-Packard Company HP Networking') if mibBuilder.loadTexts: hpicfIpv6RAGuard.setContactInfo('Hewlett-Packard Company 8000 Foothills Blvd. Roseville, CA 95747') if mibBuilder.loadTexts: hpicfIpv6RAGuard.setDescription('This MIB module contains HP proprietary objects for managing RA Guard.') hpicfIpv6RAGuardObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 87, 1)) hpicfIpv6RAGuardConfig = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 87, 1, 1)) hpicfRAGuardPortTable = MibTable((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 87, 1, 1, 1), ) if mibBuilder.loadTexts: hpicfRAGuardPortTable.setStatus('current') if mibBuilder.loadTexts: hpicfRAGuardPortTable.setDescription('Per-interface configuration for RA Guard. Ra Guard is used to block IPv6 router advertisements and ICMPv6 router redirects. The log option is to enable debug logging for troubleshooting. It uses a lot of CPU and should be used only for short periods of time. To display debug logging, use debug security ra-guard command.') hpicfRAGuardPortEntry = MibTableRow((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 87, 1, 1, 1, 1), ).setIndexNames((0, "IF-MIB", "ifIndex")) if mibBuilder.loadTexts: hpicfRAGuardPortEntry.setStatus('current') if mibBuilder.loadTexts: hpicfRAGuardPortEntry.setDescription('RA Guard configuration information for a single port.') hpicfRAGuardPortBlocked = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 87, 1, 1, 1, 1, 1), TruthValue()).setMaxAccess("readwrite") if mibBuilder.loadTexts: hpicfRAGuardPortBlocked.setStatus('current') if mibBuilder.loadTexts: hpicfRAGuardPortBlocked.setDescription('This object indicates whether this port is blocked for Router Advertisements and Redirects.') hpicfRAGuardPortBlockedRAs = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 87, 1, 1, 1, 1, 2), Counter64()).setMaxAccess("readonly") if mibBuilder.loadTexts: hpicfRAGuardPortBlockedRAs.setStatus('current') if mibBuilder.loadTexts: hpicfRAGuardPortBlockedRAs.setDescription('This number of Router Advertisements blocked for the port.') hpicfRAGuardPortBlockedRedirs = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 87, 1, 1, 1, 1, 3), Counter64()).setMaxAccess("readonly") if mibBuilder.loadTexts: hpicfRAGuardPortBlockedRedirs.setStatus('current') if mibBuilder.loadTexts: hpicfRAGuardPortBlockedRedirs.setDescription('This number of Router Redirects blocked for the port.') hpicfRAGuardPortLog = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 87, 1, 1, 1, 1, 4), TruthValue()).setMaxAccess("readwrite") if mibBuilder.loadTexts: hpicfRAGuardPortLog.setStatus('current') if mibBuilder.loadTexts: hpicfRAGuardPortLog.setDescription('Whether to log RAs and Redirects for the port. The log option is to enable debug logging for troubleshooting. It uses a lot of CPU and should be used only for short periods of time.') hpicfRAGuardLastErrorCode = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 87, 1, 1, 1, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("noError", 1), ("insufficientHardwareResources", 2), ("genericError", 3)))).setMaxAccess("readonly") if mibBuilder.loadTexts: hpicfRAGuardLastErrorCode.setStatus('current') if mibBuilder.loadTexts: hpicfRAGuardLastErrorCode.setDescription('Error code of the last error that occurred. A non-zero value indicates that the last operation performed by this instance did not succeed.') hpicfIpv6RAGuardConformance = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 87, 2)) hpicfIpv6RAGuardCompliances = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 87, 2, 1)) hpicfIpv6RAGuardGroups = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 87, 2, 2)) hpicfIpv6RAGuardGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 87, 2, 2, 1)).setObjects(("HP-ICF-IPV6-RA-GUARD-MIB", "hpicfRAGuardPortBlocked"), ("HP-ICF-IPV6-RA-GUARD-MIB", "hpicfRAGuardPortBlockedRAs"), ("HP-ICF-IPV6-RA-GUARD-MIB", "hpicfRAGuardPortBlockedRedirs"), ("HP-ICF-IPV6-RA-GUARD-MIB", "hpicfRAGuardPortLog"), ("HP-ICF-IPV6-RA-GUARD-MIB", "hpicfRAGuardLastErrorCode")) if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0): hpicfIpv6RAGuardGroup = hpicfIpv6RAGuardGroup.setStatus('current') if mibBuilder.loadTexts: hpicfIpv6RAGuardGroup.setDescription('A collection of objects providing configuration for Ipv6 RA Guard.') hpicfIpv6RAGuardCompliance = ModuleCompliance((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 87, 2, 1, 1)).setObjects(("HP-ICF-IPV6-RA-GUARD-MIB", "hpicfIpv6RAGuardGroup")) if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0): hpicfIpv6RAGuardCompliance = hpicfIpv6RAGuardCompliance.setStatus('current') if mibBuilder.loadTexts: hpicfIpv6RAGuardCompliance.setDescription('The compliance statement for devices support of HP-ICF-IPV6-RA-GUARD-MIB.') mibBuilder.exportSymbols("HP-ICF-IPV6-RA-GUARD-MIB", hpicfIpv6RAGuardConfig=hpicfIpv6RAGuardConfig, hpicfRAGuardPortLog=hpicfRAGuardPortLog, hpicfIpv6RAGuardCompliances=hpicfIpv6RAGuardCompliances, hpicfIpv6RAGuardGroup=hpicfIpv6RAGuardGroup, hpicfIpv6RAGuardCompliance=hpicfIpv6RAGuardCompliance, hpicfRAGuardPortEntry=hpicfRAGuardPortEntry, hpicfIpv6RAGuardObjects=hpicfIpv6RAGuardObjects, PYSNMP_MODULE_ID=hpicfIpv6RAGuard, hpicfRAGuardPortBlocked=hpicfRAGuardPortBlocked, hpicfRAGuardPortTable=hpicfRAGuardPortTable, hpicfRAGuardPortBlockedRAs=hpicfRAGuardPortBlockedRAs, hpicfRAGuardPortBlockedRedirs=hpicfRAGuardPortBlockedRedirs, hpicfRAGuardLastErrorCode=hpicfRAGuardLastErrorCode, hpicfIpv6RAGuardConformance=hpicfIpv6RAGuardConformance, hpicfIpv6RAGuardGroups=hpicfIpv6RAGuardGroups, hpicfIpv6RAGuard=hpicfIpv6RAGuard)
7,784
3,395
from transformers import RobertaTokenizer tokenizer = RobertaTokenizer.from_pretrained("roberta-base")
102
29
import numpy as np from .fasthist import hist2d stdquant = np.ndarray(13) stdquant[0] = (0.0000316712418331200) #-4 sdev stdquant[1] = (0.0013498980316301000) #-3 sdev stdquant[2] = (0.0227501319481792000) #-2 sdev stdquant[3] = (0.05) stdquant[4] = (0.1586552539314570000) #-1 sdev or lsdev stdquant[5] = (0.25) #first quartile stdquant[6] = (0.50) #median stdquant[7] = (0.75) #third quartile stdquant[8] = (0.8413447460685430000) #+1 sdev or usdev stdquant[9] = (0.95) stdquant[10] = (0.9772498680518210000) #+2 sdev stdquant[11] = (0.9986501019683700000) #+3 sdev stdquant[12] = (0.9999683287581670000) #+4 sdev def get_standard_quantiles(arr, bins=64, step=None, quantiles=None): hist, starts, stepsize = hist2d(arr, bins, step, plot=False) cumhist = np.cumsum(hist) if quantiles is None: quantiles = stdquant else: quantiles = np.array(quantiles) n = len(quantiles) npix = np.multiply.reduce(arr.shape) quantiles *= npix thresh = [0] * n #TO DO: speed up by using interpolation function of numpy for ind in range(n): thresh[ind] = starts[(cumhist < quantiles[ind]).sum()] return thresh def get_sigma_range(arr, sigma=1, bins=64, step=None): if sigma == 1: return get_standard_quantiles(arr, bins, step, (stdquant[4], stdquant[8])) elif sigma == 2: return get_standard_quantiles(arr, bins, step, (stdquant[2], stdquant[10])) elif sigma == 3: return get_standard_quantiles(arr, bins, step, (stdquant[1], stdquant[11])) elif sigma == 4: return get_standard_quantiles(arr, bins, step, (stdquant[0], stdquant[12])) def get_sigma_range_for_hist(starts, hist, sigma): cumhist = np.cumsum(hist) if sigma==1: quantiles = np.array((stdquant[4], stdquant[8])) elif sigma==2: quantiles = np.array((stdquant[2], stdquant[10])) elif sigma==3: quantiles = np.array((stdquant[1], stdquant[11])) elif sigma==4: quantiles = np.array((stdquant[0], stdquant[12])) n = len(quantiles) npix = cumhist[-1] quantiles *= npix thresh = [0] * n #TO DO: speed up by using interpolation function of numpy for ind in range(n): thresh[ind] = starts[(cumhist < quantiles[ind]).sum()] return thresh
2,531
1,040
# coding: utf-8 """ Copyright 2016 SmartBear Software Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Ref: https://github.com/swagger-api/swagger-codegen """ from pprint import pformat from six import iteritems import re class ReportSubreportPolicyFileMatchingPatternOrCriteriaItemAndCriteriaItem(object): """ NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually. """ def __init__(self): """ ReportSubreportPolicyFileMatchingPatternOrCriteriaItemAndCriteriaItem - a model defined in Swagger :param dict swaggerTypes: The key is attribute name and the value is attribute type. :param dict attributeMap: The key is attribute name and the value is json key in definition. """ self.swagger_types = { 'attribute_exists': 'bool', 'case_sensitive': 'bool', 'field': 'str', 'operator': 'str', 'type': 'str', 'value': 'str', 'whole_word': 'bool' } self.attribute_map = { 'attribute_exists': 'attribute_exists', 'case_sensitive': 'case_sensitive', 'field': 'field', 'operator': 'operator', 'type': 'type', 'value': 'value', 'whole_word': 'whole_word' } self._attribute_exists = None self._case_sensitive = None self._field = None self._operator = None self._type = None self._value = None self._whole_word = None @property def attribute_exists(self): """ Gets the attribute_exists of this ReportSubreportPolicyFileMatchingPatternOrCriteriaItemAndCriteriaItem. For \"custom_attribute\" type criteria. The file will match as long as the attribute named by \"field\" exists. Default is true. :return: The attribute_exists of this ReportSubreportPolicyFileMatchingPatternOrCriteriaItemAndCriteriaItem. :rtype: bool """ return self._attribute_exists @attribute_exists.setter def attribute_exists(self, attribute_exists): """ Sets the attribute_exists of this ReportSubreportPolicyFileMatchingPatternOrCriteriaItemAndCriteriaItem. For \"custom_attribute\" type criteria. The file will match as long as the attribute named by \"field\" exists. Default is true. :param attribute_exists: The attribute_exists of this ReportSubreportPolicyFileMatchingPatternOrCriteriaItemAndCriteriaItem. :type: bool """ self._attribute_exists = attribute_exists @property def case_sensitive(self): """ Gets the case_sensitive of this ReportSubreportPolicyFileMatchingPatternOrCriteriaItemAndCriteriaItem. If true, the value comparison will be case sensitive. Default is true. :return: The case_sensitive of this ReportSubreportPolicyFileMatchingPatternOrCriteriaItemAndCriteriaItem. :rtype: bool """ return self._case_sensitive @case_sensitive.setter def case_sensitive(self, case_sensitive): """ Sets the case_sensitive of this ReportSubreportPolicyFileMatchingPatternOrCriteriaItemAndCriteriaItem. If true, the value comparison will be case sensitive. Default is true. :param case_sensitive: The case_sensitive of this ReportSubreportPolicyFileMatchingPatternOrCriteriaItemAndCriteriaItem. :type: bool """ self._case_sensitive = case_sensitive @property def field(self): """ Gets the field of this ReportSubreportPolicyFileMatchingPatternOrCriteriaItemAndCriteriaItem. The name of the file attribute to match on (only required if this is a custom_attribute type criterion). Default is an empty string \"\". :return: The field of this ReportSubreportPolicyFileMatchingPatternOrCriteriaItemAndCriteriaItem. :rtype: str """ return self._field @field.setter def field(self, field): """ Sets the field of this ReportSubreportPolicyFileMatchingPatternOrCriteriaItemAndCriteriaItem. The name of the file attribute to match on (only required if this is a custom_attribute type criterion). Default is an empty string \"\". :param field: The field of this ReportSubreportPolicyFileMatchingPatternOrCriteriaItemAndCriteriaItem. :type: str """ self._field = field @property def operator(self): """ Gets the operator of this ReportSubreportPolicyFileMatchingPatternOrCriteriaItemAndCriteriaItem. How to compare the specified attribute of each file to the specified value. :return: The operator of this ReportSubreportPolicyFileMatchingPatternOrCriteriaItemAndCriteriaItem. :rtype: str """ return self._operator @operator.setter def operator(self, operator): """ Sets the operator of this ReportSubreportPolicyFileMatchingPatternOrCriteriaItemAndCriteriaItem. How to compare the specified attribute of each file to the specified value. :param operator: The operator of this ReportSubreportPolicyFileMatchingPatternOrCriteriaItemAndCriteriaItem. :type: str """ allowed_values = ["==", "!=", ">", ">=", "<", "<=", "!"] if operator is not None and operator not in allowed_values: raise ValueError( "Invalid value for `operator`, must be one of {0}" .format(allowed_values) ) self._operator = operator @property def type(self): """ Gets the type of this ReportSubreportPolicyFileMatchingPatternOrCriteriaItemAndCriteriaItem. The type of this criterion, that is, which file attribute to match on. :return: The type of this ReportSubreportPolicyFileMatchingPatternOrCriteriaItemAndCriteriaItem. :rtype: str """ return self._type @type.setter def type(self, type): """ Sets the type of this ReportSubreportPolicyFileMatchingPatternOrCriteriaItemAndCriteriaItem. The type of this criterion, that is, which file attribute to match on. :param type: The type of this ReportSubreportPolicyFileMatchingPatternOrCriteriaItemAndCriteriaItem. :type: str """ allowed_values = ["name", "path", "accessed_time", "accessed_before", "accessed_after", "birth_time", "birth_before", "birth_after", "changed_time", "changed_before", "changed_after", "size", "file_type", "posix_regex_name", "user_name", "user_id", "group_name", "group_id", "no_user", "no_group"] if type not in allowed_values: raise ValueError( "Invalid value for `type`, must be one of {0}" .format(allowed_values) ) self._type = type @property def value(self): """ Gets the value of this ReportSubreportPolicyFileMatchingPatternOrCriteriaItemAndCriteriaItem. The value to compare the specified attribute of each file to. :return: The value of this ReportSubreportPolicyFileMatchingPatternOrCriteriaItemAndCriteriaItem. :rtype: str """ return self._value @value.setter def value(self, value): """ Sets the value of this ReportSubreportPolicyFileMatchingPatternOrCriteriaItemAndCriteriaItem. The value to compare the specified attribute of each file to. :param value: The value of this ReportSubreportPolicyFileMatchingPatternOrCriteriaItemAndCriteriaItem. :type: str """ self._value = value @property def whole_word(self): """ Gets the whole_word of this ReportSubreportPolicyFileMatchingPatternOrCriteriaItemAndCriteriaItem. If true, the attribute must match the entire word. Default is true. :return: The whole_word of this ReportSubreportPolicyFileMatchingPatternOrCriteriaItemAndCriteriaItem. :rtype: bool """ return self._whole_word @whole_word.setter def whole_word(self, whole_word): """ Sets the whole_word of this ReportSubreportPolicyFileMatchingPatternOrCriteriaItemAndCriteriaItem. If true, the attribute must match the entire word. Default is true. :param whole_word: The whole_word of this ReportSubreportPolicyFileMatchingPatternOrCriteriaItemAndCriteriaItem. :type: bool """ self._whole_word = whole_word def to_dict(self): """ Returns the model properties as a dict """ result = {} for attr, _ in iteritems(self.swagger_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value return result def to_str(self): """ Returns the string representation of the model """ return pformat(self.to_dict()) def __repr__(self): """ For `print` and `pprint` """ return self.to_str() def __eq__(self, other): """ Returns true if both objects are equal """ return self.__dict__ == other.__dict__ def __ne__(self, other): """ Returns true if both objects are not equal """ return not self == other
10,584
2,793
from sys import stdin for line in stdin: n = int(line) if n == 42: break print(n)
89
43
# -*- coding: utf-8 -*- def remainder(number,divisor): return number % divisor def flow_rate(weight,time,period=1): return weight/time*period if __name__=="__main__": print(remainder(20,7)) print(remainder(20,divisor=7)) print(remainder(number=20,divisor=7)) print(remainder(divisor=7,number=20)) print(flow_rate(0.5,3)) print(flow_rate(6,3,100))
367
165
############################## ## cread purified h5ad file ## ############################## # input: annotation table and the whole expression profile # output: purified h5ad file import os import pandas as pd import anndata import argparse import gc import numpy as np parser = argparse.ArgumentParser(description='cread purified h5ad file for DAISM-XMBD') parser.add_argument("-anno", type=str, help="annotation table (contains 'sample.name' and 'cell.type' two columns)", default=None) parser.add_argument("-exp", type=str, help="the whole expression profile (sample.name in column and gene symbol in row)", default=None) parser.add_argument("-outdir", type=str, help="the directory to store h5ad file", default="example/") parser.add_argument("-prefix",type=str,help="the prefix of h5ad file",default= "purified") def main(): inputArgs = parser.parse_args() if os.path.exists(inputArgs.outdir)==False: os.mkdir(inputArgs.outdir) anno_table = pd.read_csv(inputArgs.anno) cell_list = list(anno_table['cell.type'].unique()) exp = pd.read_csv(inputArgs.exp,sep="\t",index_col=0) adata = [] for cell in cell_list: tmp = anno_table[anno_table['cell.type']==cell] sample_list = tmp['sample.name'] sample_list_inter = list(set(sample_list).intersection(list(exp.columns))) exp_select=exp[sample_list_inter] anno = pd.DataFrame(np.repeat(cell,exp_select.shape[1]),columns=['cell.type']) adata.append(anndata.AnnData(X=exp_select.T.values, obs=anno, var=pd.DataFrame(columns=[],index=list(exp_select.index)))) for i in range(1, len(adata)): print("Concatenating " + str(i)) adata[0] = adata[0].concatenate(adata[1]) del adata[1] gc.collect() print(len(adata)) adata = adata[0] adata.write(inputArgs.outdir+'/'+inputArgs.prefix+'.h5ad') if __name__ == "__main__": main()
1,981
646
# -*- coding: utf-8 -*- """ Created on Thu Nov 29 13:56:44 2018 @author: RomanGutin """ import pandas as pd import numpy as np #Frequency Tuning Loop amino_letter = ['A','R','D','N','C','E','Q','G','H','I','L','K','M','F','P','S','T','W','Y','V'] length_scores =[4,8,6,6,5,7,7,4,7,5,6,8,7,8,5,5,5,9,8,5] FM_df = pd.DataFrame(0, index= just_let.index, columns= range(0,81)) FM_score_dict = dict(zip(amino_letter,length_scores)) #splitting amino letter into new independent variables based on its length score# fm_letter_dict ={} for letter in amino_letter: new_vars =[] for i in range(FM_score_dict[letter]): new_vars.append(letter+str(i+1)) fm_letter_dict[letter]=new_vars #generate new FM_tuned dataframe for seq in FM_df.index: letter_list= list(seq) for letter in letter_list: for var in fm_letter_dict[letter]: row= FM_df.loc[seq,:] spot= row[row==0].index[0] FM_df.loc[seq,spot]= var FM_df= pd.read_csv('Frequency Tuned Dataset') #data after frequency tuning wit FM_df.set_index('sequence', inplace= True) FM_df_arr = np.array(FM_df.values, dtype=[('O', np.float)]).astype(np.float) #New letter to weight holding the new FM tuned variables ltw_fm_MLE={} for amino in amino_letter: for var in fm_letter_dict[amino]: ltw_fm_MLE[var]= ltw_AM_n[amino] ltw_fm_MLE = np.load('ltw_fm_MLE.npy').item()
1,424
581
from kafka import KafkaConsumer class Consumer: def __init__(self, config): bootstrap_server = config.get( "bootstrap_server") + ":" + config.get("port") self.consumer = KafkaConsumer(config.get( "subscription_id_2"), bootstrap_servers=bootstrap_server, api_version=(0, 10), auto_offset_reset='earliest', enable_auto_commit=True, group_id="test") self.messages = [] def get_message(self): if len(self.messages) > 0: mes = self.messages.pop(0) return mes def listen(self): for message in self.consumer: self.messages.append(message.value)
692
202
# utilities import os from re import sub import uuid import subprocess # Image To Pdf import img2pdf # PDF To Images from pdf2image import convert_from_path # PDF To Word from pdf2docx import parse _BASE_DIR = os.getcwd() _BASE_DIR_FILE = os.path.join(_BASE_DIR, "files") def process_image_to_pdf(files, pdf_name): img = [] with open(f"{_BASE_DIR_FILE}/{pdf_name}.pdf","wb") as fil: for fname in files: path = os.path.join(_BASE_DIR_FILE, fname) img.append(path) fil.write(img2pdf.convert(img)) return pdf_name def process_word_to_pdf(file): file_address = os.path.join(_BASE_DIR_FILE, file) command = ['lowriter' ,'--convert-to','pdf' , file_address , "--outdir", _BASE_DIR_FILE] command_run = subprocess.run(command) file_name = -1 if command_run.returncode == 0: file_name = ".".join(file.split(".")[:-1]) + ".pdf" return file_name def process_pdf_to_images(file): file_address = os.path.join(_BASE_DIR_FILE, file) folder_name = str(uuid.uuid1()) folder_address = os.path.join(_BASE_DIR_FILE, folder_name) os.mkdir(folder_address) try: convert_from_path(file_address, output_folder=folder_address, fmt="jpeg", thread_count=10, jpegopt="quality") return folder_address except: import shutil shutil.rmtree(folder_address) return -1 def process_pdf_to_word(file): file_address = os.path.join(_BASE_DIR_FILE, file) word_file = str(uuid.uuid1()) + ".docx" word_file_address = os.path.join(_BASE_DIR_FILE, word_file) try: parse(file_address, word_file_address, multi_processing=True) return word_file_address except: return -1 def del_user_files(list): for file in list: file_address = os.path.join(_BASE_DIR_FILE, file) try: os.remove(file_address) except: pass def del_one_file(file): try: os.remove(file) except: try: file_address = os.path.join(_BASE_DIR_FILE, file) os.remove(file_address) except: pass pass return 1
2,159
744
from kivy.uix.screenmanager import Screen from kivy.properties import StringProperty, ObjectProperty, NumericProperty, ListProperty, BooleanProperty from kivy.app import App from kivy.logger import Logger from library_widgets import TrackingScreenMixin from utils import import_kv import_kv(__file__) class TasksScreen(TrackingScreenMixin, Screen): family = StringProperty(None, allownone=True) played_times = NumericProperty() tasks = ListProperty() _main_manager = ObjectProperty() loading = ObjectProperty() quick_test = BooleanProperty(False) def on_quick_test(self, *args): if self._main_manager: self.update_content() @property def main_manager(self): if not self._main_manager: from .content import TaskScreenManager self._main_manager = TaskScreenManager() return self._main_manager def update_content(self, *args, **kwargs): if self.quick_test: self.main_manager.start_test(self.family, self.tasks) self.main_manager.current = 'test' else: self.main_manager.task_sets_screen.fill() self.main_manager.current = 'task_sets' app = App.get_running_app() sessions_starts = app.storage['sessions']['started'] app.tracker.send_event('tasks', 'sessions', label='started', value=sessions_starts + 1) app.storage['sessions'] = {"started": sessions_starts + 1, "finished": app.storage['sessions']['finished']} self.played_times += 1 Logger.info("Tasks: playing %s times" % self.played_times) if self.played_times == 10: App.get_running_app().google_client.unlock_achievement("addicted") if self.main_manager.parent != self: self.loading.hide(self._main_manager) def on_enter(self, *args): super(TasksScreen, self).on_enter(*args) app = App.get_running_app() app.initialize_billing(self.update_content)
2,026
596
"""Train logistic regression model on hdf5 features for classification Modified from: https://gurus.pyimagesearch.com/topic/transfer-learning-example-dogs-and-cats/ """ import pickle from sklearn.linear_model import LogisticRegression from sklearn.metrics import classification_report def train_model(h5py_db, model_output='model.pickle', percent_train=1.0): """Train logistic regression classifier :param h5py_db: path to HDF5 database containing 'features', 'labels', & 'label_names' :param model_output: path to save trained model to using pickle :param percent_train: percent of images to be used for training (instead of testing) :return: None; output is written to `model_output` """ i = int(h5py_db['labels'].shape[0] * percent_train) # C decided with sklearn.model_selection.GridSearchCV model = LogisticRegression(C=0.1) model.fit(h5py_db['features'][:i], h5py_db['labels'][:i]) if percent_train < 1.0: preds = model.predict(h5py_db['features'][i:]) print(classification_report(h5py_db['labels'][i:], preds, target_names=h5py_db['label_names'])) with open(model_output, 'wb') as f: f.write(pickle.dumps(model))
1,240
403
import pandas as pd v_4 = pd.read_csv('50/predictions_dev_queries_50k_normalized_exp.csv') temp = list(v_4['query_id']) v_4['query_id'] = list(v_4['reference_id']) v_4['reference_id'] = temp v_5 = pd.read_csv('ibn/predictions_dev_queries_50k_normalized_exp.csv') temp = list(v_5['query_id']) v_5['query_id'] = list(v_5['reference_id']) v_5['reference_id'] = temp v_6 = pd.read_csv('152/predictions_dev_queries_50k_normalized_exp.csv') temp = list(v_6['query_id']) v_6['query_id'] = list(v_6['reference_id']) v_6['reference_id'] = temp v_4_query = list(v_4['query_id']) v_4_reference = list(v_4['reference_id']) v_4_com = [] for i in range(len(v_4)): v_4_com.append((v_4_query[i],v_4_reference[i])) v_5_query = list(v_5['query_id']) v_5_reference = list(v_5['reference_id']) v_5_com = [] for i in range(len(v_5)): v_5_com.append((v_5_query[i],v_5_reference[i])) v_6_query = list(v_6['query_id']) v_6_reference = list(v_6['reference_id']) v_6_com = [] for i in range(len(v_6)): v_6_com.append((v_6_query[i],v_6_reference[i])) inter_45 = list(set(v_4_com).intersection(set(v_5_com))) inter_46 = list(set(v_4_com).intersection(set(v_6_com))) inter_456 = list(set(inter_45).intersection(set(inter_46))) new_456 = pd.DataFrame() q = [] for i in range(len(inter_456)): q.append(inter_456[i][0]) r = [] for i in range(len(inter_456)): r.append(inter_456[i][1]) new_456['query_id'] = q new_456['reference_id'] = r df_2 = pd.merge(new_456, v_4, on=['query_id','reference_id'], how='inner') df_3 = pd.merge(new_456, v_5, on=['query_id','reference_id'], how='inner') df_4 = pd.merge(new_456, v_6, on=['query_id','reference_id'], how='inner') fast_456 = pd.concat((df_2,df_3,df_4)) fast_456.to_csv('R-baseline-CC-234-50k.csv',index=False)
1,760
831
from src.utils.cache import cache def cheats_key(chat_id: int, user_id: int) -> str: return f'i_stat:cheats:{chat_id}:{user_id}' def cheats_found(chat_id: int, user_id: int, sum_count: int) -> bool: key = cheats_key(chat_id, user_id) sums = cache.get(key, 0) sums += sum_count if sums > 50: return True cache.set(key, sums, time=10 * 60) # 10m return False
399
165
from threading import Thread from flask_mail import Mail, Message from resources.errors import InternalServerError mail = Mail(app=None) app = None def initialize_mail_service(appiclation): global mail global app mail = Mail(app=appiclation) app = appiclation def send_async_email(app, msg, mail): with app.app_context(): try: mail.send(msg) except ConnectionRefusedError: raise InternalServerError("[MAIL SERVER] not working") def send_email(subject, sender, recipients, text_body, html_body): msg = Message(subject, sender=sender, recipients=recipients) msg.body = text_body msg.html = html_body Thread(target=send_async_email, args=(app, msg, mail)).start()
744
225
import base64 import json from OpenSSL.SSL import ( VERIFY_PEER, VERIFY_FAIL_IF_NO_PEER_CERT, VERIFY_NONE, SSLv3_METHOD, SSLv23_METHOD, TLSv1_METHOD) from twisted.web.http_headers import Headers from twisted.internet.defer import inlineCallbacks, fail, succeed from vxsandbox.resources.http import ( HttpClientContextFactory, HttpClientPolicyForHTTPS, make_context_factory, HttpClientResource) from vxsandbox.resources.tests.utils import ResourceTestCaseBase class DummyResponse(object): def __init__(self): self.headers = Headers({}) class DummyHTTPClient(object): def __init__(self): self._next_http_request_result = None self.http_requests = [] def set_agent(self, agent): self.agent = agent def get_context_factory(self): # We need to dig around inside our Agent to find the context factory. # Since this involves private attributes that have changed a few times # recently, we need to try various options. if hasattr(self.agent, "_contextFactory"): # For Twisted 13.x return self.agent._contextFactory elif hasattr(self.agent, "_policyForHTTPS"): # For Twisted 14.x return self.agent._policyForHTTPS elif hasattr(self.agent, "_endpointFactory"): # For Twisted 15.0.0 (and possibly newer) return self.agent._endpointFactory._policyForHTTPS else: raise NotImplementedError( "I can't find the context factory on this Agent. This seems" " to change every few versions of Twisted.") def fail_next(self, error): self._next_http_request_result = fail(error) def succeed_next(self, body, code=200, headers={}): default_headers = { 'Content-Length': str(len(body)), } default_headers.update(headers) response = DummyResponse() response.code = code for header, value in default_headers.items(): response.headers.addRawHeader(header, value) response.content = lambda: succeed(body) self._next_http_request_result = succeed(response) def request(self, *args, **kw): self.http_requests.append((args, kw)) return self._next_http_request_result class TestHttpClientResource(ResourceTestCaseBase): resource_cls = HttpClientResource @inlineCallbacks def setUp(self): super(TestHttpClientResource, self).setUp() yield self.create_resource({}) self.dummy_client = DummyHTTPClient() self.patch(self.resource_cls, 'http_client_class', self.get_dummy_client) def get_dummy_client(self, agent): self.dummy_client.set_agent(agent) return self.dummy_client def http_request_fail(self, error): self.dummy_client.fail_next(error) def http_request_succeed(self, body, code=200, headers={}): self.dummy_client.succeed_next(body, code, headers) def assert_not_unicode(self, arg): self.assertFalse(isinstance(arg, unicode)) def get_context_factory(self): return self.dummy_client.get_context_factory() def get_context(self, context_factory=None): if context_factory is None: context_factory = self.get_context_factory() if hasattr(context_factory, 'creatorForNetloc'): # This context_factory is a new-style IPolicyForHTTPS # implementation, so we need to get a context from through its # client connection creator. The creator could either be a wrapper # around a ClientContextFactory (in which case we treat it like # one) or a ClientTLSOptions object (which means we have to grab # the context from a private attribute). creator = context_factory.creatorForNetloc('example.com', 80) if hasattr(creator, 'getContext'): return creator.getContext() else: return creator._ctx else: # This context_factory is an old-style WebClientContextFactory and # will build us a context object if we ask nicely. return context_factory.getContext('example.com', 80) def assert_http_request(self, url, method='GET', headers=None, data=None, timeout=None, files=None): timeout = (timeout if timeout is not None else self.resource.timeout) args = (method, url,) kw = dict(headers=headers, data=data, timeout=timeout, files=files) [(actual_args, actual_kw)] = self.dummy_client.http_requests # NOTE: Files are handed over to treq as file pointer-ish things # which in our case are `StringIO` instances. actual_kw_files = actual_kw.get('files') if actual_kw_files is not None: actual_kw_files = actual_kw.pop('files', None) kw_files = kw.pop('files', {}) for name, file_data in actual_kw_files.items(): kw_file_data = kw_files[name] file_name, content_type, sio = file_data self.assertEqual( (file_name, content_type, sio.getvalue()), kw_file_data) self.assertEqual((actual_args, actual_kw), (args, kw)) self.assert_not_unicode(actual_args[0]) self.assert_not_unicode(actual_kw.get('data')) headers = actual_kw.get('headers') if headers is not None: for key, values in headers.items(): self.assert_not_unicode(key) for value in values: self.assert_not_unicode(value) def test_make_context_factory_no_method_verify_none(self): context_factory = make_context_factory(verify_options=VERIFY_NONE) self.assertIsInstance(context_factory, HttpClientContextFactory) self.assertEqual(context_factory.verify_options, VERIFY_NONE) self.assertEqual(context_factory.ssl_method, None) self.assertEqual( self.get_context(context_factory).get_verify_mode(), VERIFY_NONE) def test_make_context_factory_no_method_verify_peer(self): # This test's behaviour depends on the version of Twisted being used. context_factory = make_context_factory(verify_options=VERIFY_PEER) context = self.get_context(context_factory) self.assertEqual(context_factory.ssl_method, None) self.assertNotEqual(context.get_verify_mode(), VERIFY_NONE) if HttpClientPolicyForHTTPS is None: # We have Twisted<14.0.0 self.assertIsInstance(context_factory, HttpClientContextFactory) self.assertEqual(context_factory.verify_options, VERIFY_PEER) self.assertEqual(context.get_verify_mode(), VERIFY_PEER) else: self.assertIsInstance(context_factory, HttpClientPolicyForHTTPS) def test_make_context_factory_no_method_verify_peer_or_fail(self): # This test's behaviour depends on the version of Twisted being used. context_factory = make_context_factory( verify_options=(VERIFY_PEER | VERIFY_FAIL_IF_NO_PEER_CERT)) context = self.get_context(context_factory) self.assertEqual(context_factory.ssl_method, None) self.assertNotEqual(context.get_verify_mode(), VERIFY_NONE) if HttpClientPolicyForHTTPS is None: # We have Twisted<14.0.0 self.assertIsInstance(context_factory, HttpClientContextFactory) self.assertEqual( context_factory.verify_options, VERIFY_PEER | VERIFY_FAIL_IF_NO_PEER_CERT) self.assertEqual( context.get_verify_mode(), VERIFY_PEER | VERIFY_FAIL_IF_NO_PEER_CERT) else: self.assertIsInstance(context_factory, HttpClientPolicyForHTTPS) def test_make_context_factory_no_method_no_verify(self): # This test's behaviour depends on the version of Twisted being used. context_factory = make_context_factory() self.assertEqual(context_factory.ssl_method, None) if HttpClientPolicyForHTTPS is None: # We have Twisted<14.0.0 self.assertIsInstance(context_factory, HttpClientContextFactory) self.assertEqual(context_factory.verify_options, None) else: self.assertIsInstance(context_factory, HttpClientPolicyForHTTPS) def test_make_context_factory_sslv3_no_verify(self): # This test's behaviour depends on the version of Twisted being used. context_factory = make_context_factory(ssl_method=SSLv3_METHOD) self.assertEqual(context_factory.ssl_method, SSLv3_METHOD) if HttpClientPolicyForHTTPS is None: # We have Twisted<14.0.0 self.assertIsInstance(context_factory, HttpClientContextFactory) self.assertEqual(context_factory.verify_options, None) else: self.assertIsInstance(context_factory, HttpClientPolicyForHTTPS) @inlineCallbacks def test_handle_get(self): self.http_request_succeed("foo") reply = yield self.dispatch_command('get', url='http://www.example.com') self.assertTrue(reply['success']) self.assertEqual(reply['body'], "foo") self.assert_http_request('http://www.example.com', method='GET') @inlineCallbacks def test_handle_post(self): self.http_request_succeed("foo") reply = yield self.dispatch_command('post', url='http://www.example.com') self.assertTrue(reply['success']) self.assertEqual(reply['body'], "foo") self.assert_http_request('http://www.example.com', method='POST') @inlineCallbacks def test_handle_patch(self): self.http_request_succeed("foo") reply = yield self.dispatch_command('patch', url='http://www.example.com') self.assertTrue(reply['success']) self.assertEqual(reply['body'], "foo") self.assert_http_request('http://www.example.com', method='PATCH') @inlineCallbacks def test_handle_head(self): self.http_request_succeed("foo") reply = yield self.dispatch_command('head', url='http://www.example.com') self.assertTrue(reply['success']) self.assertEqual(reply['body'], "foo") self.assert_http_request('http://www.example.com', method='HEAD') @inlineCallbacks def test_handle_delete(self): self.http_request_succeed("foo") reply = yield self.dispatch_command('delete', url='http://www.example.com') self.assertTrue(reply['success']) self.assertEqual(reply['body'], "foo") self.assert_http_request('http://www.example.com', method='DELETE') @inlineCallbacks def test_handle_put(self): self.http_request_succeed("foo") reply = yield self.dispatch_command('put', url='http://www.example.com') self.assertTrue(reply['success']) self.assertEqual(reply['body'], "foo") self.assert_http_request('http://www.example.com', method='PUT') @inlineCallbacks def test_failed_get(self): self.http_request_fail(ValueError("HTTP request failed")) reply = yield self.dispatch_command('get', url='http://www.example.com') self.assertFalse(reply['success']) self.assertEqual(reply['reason'], "HTTP request failed") self.assert_http_request('http://www.example.com', method='GET') @inlineCallbacks def test_null_url(self): reply = yield self.dispatch_command('get') self.assertFalse(reply['success']) self.assertEqual(reply['reason'], "No URL given") @inlineCallbacks def test_https_request(self): # This test's behaviour depends on the version of Twisted being used. self.http_request_succeed("foo") reply = yield self.dispatch_command('get', url='https://www.example.com') self.assertTrue(reply['success']) self.assertEqual(reply['body'], "foo") self.assert_http_request('https://www.example.com', method='GET') context_factory = self.get_context_factory() self.assertEqual(context_factory.ssl_method, None) if HttpClientPolicyForHTTPS is None: self.assertIsInstance(context_factory, HttpClientContextFactory) self.assertEqual(context_factory.verify_options, None) else: self.assertIsInstance(context_factory, HttpClientPolicyForHTTPS) @inlineCallbacks def test_https_request_verify_none(self): self.http_request_succeed("foo") reply = yield self.dispatch_command( 'get', url='https://www.example.com', verify_options=['VERIFY_NONE']) self.assertTrue(reply['success']) self.assertEqual(reply['body'], "foo") self.assert_http_request('https://www.example.com', method='GET') context = self.get_context() self.assertEqual(context.get_verify_mode(), VERIFY_NONE) @inlineCallbacks def test_https_request_verify_peer_or_fail(self): # This test's behaviour depends on the version of Twisted being used. self.http_request_succeed("foo") reply = yield self.dispatch_command( 'get', url='https://www.example.com', verify_options=['VERIFY_PEER', 'VERIFY_FAIL_IF_NO_PEER_CERT']) self.assertTrue(reply['success']) self.assertEqual(reply['body'], "foo") self.assert_http_request('https://www.example.com', method='GET') context = self.get_context() # We don't control verify mode in newer Twisted. self.assertNotEqual(context.get_verify_mode(), VERIFY_NONE) if HttpClientPolicyForHTTPS is None: self.assertEqual( context.get_verify_mode(), VERIFY_PEER | VERIFY_FAIL_IF_NO_PEER_CERT) @inlineCallbacks def test_handle_post_files(self): self.http_request_succeed('') reply = yield self.dispatch_command( 'post', url='https://www.example.com', files={ 'foo': { 'file_name': 'foo.json', 'content_type': 'application/json', 'data': base64.b64encode(json.dumps({'foo': 'bar'})), } }) self.assertTrue(reply['success']) self.assert_http_request( 'https://www.example.com', method='POST', files={ 'foo': ('foo.json', 'application/json', json.dumps({'foo': 'bar'})), }) @inlineCallbacks def test_data_limit_exceeded_using_head_method(self): self.http_request_succeed('', headers={ 'Content-Length': str(self.resource.DEFAULT_DATA_LIMIT + 1), }) reply = yield self.dispatch_command( 'head', url='https://www.example.com',) self.assertTrue(reply['success']) self.assertEqual(reply['body'], "") self.assert_http_request('https://www.example.com', method='HEAD') @inlineCallbacks def test_data_limit_exceeded_using_header(self): self.http_request_succeed('', headers={ 'Content-Length': str(self.resource.DEFAULT_DATA_LIMIT + 1), }) reply = yield self.dispatch_command( 'get', url='https://www.example.com',) self.assertFalse(reply['success']) self.assertEqual( reply['reason'], 'Received %d bytes, maximum of %s bytes allowed.' % ( self.resource.DEFAULT_DATA_LIMIT + 1, self.resource.DEFAULT_DATA_LIMIT,)) @inlineCallbacks def test_data_limit_exceeded_inferred_from_body(self): self.http_request_succeed('1' * (self.resource.DEFAULT_DATA_LIMIT + 1)) reply = yield self.dispatch_command( 'get', url='https://www.example.com',) self.assertFalse(reply['success']) self.assertEqual( reply['reason'], 'Received %d bytes, maximum of %s bytes allowed.' % ( self.resource.DEFAULT_DATA_LIMIT + 1, self.resource.DEFAULT_DATA_LIMIT,)) @inlineCallbacks def test_https_request_method_default(self): self.http_request_succeed("foo") reply = yield self.dispatch_command( 'get', url='https://www.example.com') self.assertTrue(reply['success']) self.assertEqual(reply['body'], "foo") self.assert_http_request('https://www.example.com', method='GET') context_factory = self.get_context_factory() self.assertEqual(context_factory.ssl_method, None) @inlineCallbacks def test_https_request_method_SSLv3(self): self.http_request_succeed("foo") reply = yield self.dispatch_command( 'get', url='https://www.example.com', ssl_method='SSLv3') self.assertTrue(reply['success']) self.assertEqual(reply['body'], "foo") self.assert_http_request('https://www.example.com', method='GET') context_factory = self.get_context_factory() self.assertEqual(context_factory.ssl_method, SSLv3_METHOD) @inlineCallbacks def test_https_request_method_SSLv23(self): self.http_request_succeed("foo") reply = yield self.dispatch_command( 'get', url='https://www.example.com', ssl_method='SSLv23') self.assertTrue(reply['success']) self.assertEqual(reply['body'], "foo") self.assert_http_request('https://www.example.com', method='GET') context_factory = self.get_context_factory() self.assertEqual(context_factory.ssl_method, SSLv23_METHOD) @inlineCallbacks def test_https_request_method_TLSv1(self): self.http_request_succeed("foo") reply = yield self.dispatch_command( 'get', url='https://www.example.com', ssl_method='TLSv1') self.assertTrue(reply['success']) self.assertEqual(reply['body'], "foo") self.assert_http_request('https://www.example.com', method='GET') context_factory = self.get_context_factory() self.assertEqual(context_factory.ssl_method, TLSv1_METHOD)
18,541
5,388
from lightutils import get_free_tcp_port port = get_free_tcp_port() print(port) print(type(port))
99
37
from .accessor import Accessor from . import parsers import inspect def populateAccessors(): """ Find all filetype-specific Accessor subclasses in the parsers file (i.e. NVSPL, SRCID, etc.) and instantiate them. This way, one instance of each Accessor is added to the soundDB namespace under the name of the Endpoint it uses. """ predicate = lambda obj: inspect.isclass(obj) and issubclass(obj, Accessor) and obj is not Accessor specificAccessorSubclasses = inspect.getmembers(parsers, predicate) accessors = { cls.endpointName: cls for name, cls in specificAccessorSubclasses } return accessors globals().update(populateAccessors()) del inspect, accessor, parsers, populateAccessors
722
202
from datetime import datetime from sqlalchemy import ( Column, Integer, Text, DateTime, SmallInteger, BigInteger, String, Date, ForeignKey, UniqueConstraint ) from sqlalchemy.ext.declarative import declarative_base from sqlalchemy.orm.exc import NoResultFound from sqlalchemy.orm import ( scoped_session, sessionmaker, relationship ) from ..tools import as_timezone from ..models import Base, CommonModel, DefaultModel, DBSession class Propinsi(Base, DefaultModel): __tablename__ = 'propinsis' __table_args__ = {'extend_existing':True} kode = Column(String(2), unique=True, nullable=False) nama = Column(String(30), unique=True, nullable=False) @classmethod def get_deferred(cls): return DBSession.query(cls.id, cls.nama).order_by(cls.kode).all() class Dati2(Base, DefaultModel): __tablename__ = 'dati2s' __table_args__ = (UniqueConstraint('propinsi_id','kode', name="dati2_kode_key"), {'extend_existing':True}) kode = Column(String(2), nullable=False) nama = Column(String(30), unique=True, nullable=False) propinsi_id = Column(Integer, ForeignKey('propinsis.id')) propinsi = relationship("Propinsi", backref="dati2") @classmethod def get_by_kode(cls, propinsi_id, kode): return cls.query().filter_by(propinsi_id=propinsi_id,kode=kode).first() @classmethod def get_deferred(cls): return DBSession.query(cls.id, cls.nama).order_by(cls.kode).all() class Registers(Base, DefaultModel): __tablename__ = 'registers' __table_args__ = {'extend_existing':True} kode = Column(String(5), unique=True, nullable=False) nama = Column(String(30), unique=True, nullable=False) alamat_pemda = Column(String(128), unique=True, nullable=False) nama_pic = Column(String(30), nullable=False) nip_pic = Column(String(18), unique=True, nullable=False) no_telpon = Column(String(18), unique=True, nullable=False) no_hp = Column(String(18), unique=True, nullable=False) tgl_register = Column(Date, nullable=False) tgl_update = Column(Date, nullable=True) tgl_valid = Column(Date, nullable=False) status = Column(SmallInteger, nullable=False, default=0) e_mail = Column(String(32), unique=True, nullable=False) jns_bayar = Column(SmallInteger, nullable=False) #Transfer/Kartu Kredit tagih_nama = Column(String(30), nullable=False) tagih_alamat = Column(String(128), nullable=False) password = Column(String(128), nullable=False) periode_bayar = Column(SmallInteger, nullable=False) rpc_url = Column(String(128), nullable=False) rpc_userid = Column(String(128), nullable=False) rpc_password = Column(String(128), unique=True, nullable=False) propinsi_id = Column(Integer, ForeignKey('propinsis.id')) propinsi = relationship("Propinsi", backref="register") dati2_id = Column(Integer, ForeignKey('dati2s.id')) dati2 = relationship("Dati2", backref="register") class Invoices(Base, DefaultModel): __tablename__ = 'invoices' __table_args__ = {'extend_existing':True} kode = Column(String(5), unique=True, nullable=False) nama = Column(String(30), unique=True, nullable=False) alamat = Column(String(128), unique=True, nullable=False) register_id = Column(Integer, ForeignKey("registers.id"), nullable=False) jumlah = Column(BigInteger, nullable = False) tgl_invoice = Column(Date, nullable = False) class Payments(Base, DefaultModel): __tablename__ = 'payments' __table_args__ = {'extend_existing':True} invoice_id = Column(Integer, ForeignKey("invoices.id"), nullable=False) jumlah = Column(BigInteger, nullable = False) tgl_bayar = Column(Date, nullable = False) jns_bayar = Column(SmallInteger, nullable = False) posted = Column(SmallInteger, nullable=False, default=0)
3,953
1,375
import nose import os from ogcserver.WMS import BaseWMSFactory def test_wms_capabilities(): base_path, tail = os.path.split(__file__) file_path = os.path.join(base_path, 'mapfile_encoding.xml') wms = BaseWMSFactory() with open(file_path) as f: settings = f.read() wms.loadXML(xmlstring=settings, basepath=base_path) wms.finalize() if len(wms.layers) != 1: raise Exception('Incorrect number of layers') if len(wms.styles) != 1: raise Exception('Incorrect number of styles') return True
556
188
from django.db import models from .base import Base class Transaction(Base): date = models.DateField() description = models.CharField(max_length=400) ammount = models.DecimalField(max_digits=10, decimal_places=2) category = models.ForeignKey('Category', on_delete=models.CASCADE) from_account = models.ForeignKey('Account', on_delete=models.CASCADE, related_name='from_transactions') to_account = models.ForeignKey('Account', on_delete=models.CASCADE, null=True, blank=True, related_name='to_transactions') notes = models.TextField(null=True, blank=True) consolidated = models.BooleanField(default=False) @property def is_transfer(self): return self.from_account and self.to_account @property def type(self): if self.is_transfer: return 'transfer' return 'credit' if self.ammount > 0 else 'debit' def __str__(self): return self.description
977
299
import setuptools try: with open('README.md', 'r') as fh: long_description = fh.read() except: long_description = '' setuptools.setup( name='blackout', version='1.0.4', author='Mike Malinowski', author_email='mike@twisted.space', description='A python package making it easy to drop a multi-module package from sys.modules', long_description=long_description, long_description_content_type='text/markdown', url='https://github.com/mikemalinowski/blackout', packages=setuptools.find_packages(), entry_points=""" [console_scripts] blackout = blackout:blackout """, py_modules=["blackout"], classifiers=[ 'Programming Language :: Python', 'License :: OSI Approved :: MIT License', 'Operating System :: OS Independent', ], )
864
272
class Element: def __init__(self, name, single): self.name = name self.single = single self.attrs = {} self.content = "" def set_attr(self, k, v): self.attrs[k] = v def get_attr(self, v): return self.attrs[k] def tohtml(self): attrs = (" " + " ".join([ "%s=\"%s\"" % (k,v) for k,v in self.attrs.items() ]) if len(self.attrs) > 0 else "") if self.single: s = "<%s%s>" % (self.name, attrs) return s else: s = "<%s%s>" % (self.name, attrs) s += self.content s += "</%s>" % self.name return s __str__ = tohtml; __repr__ = tohtml
710
246
#!/usr/bin/python # -*- coding: utf-8 -*- from __future__ import print_function import os import argparse from subprocess import call from .vk_music import VkMusic from .exceptions import AlreadyRunningError from .defaults import SafeFsStorage def main(): parser = argparse.ArgumentParser() parser.add_argument('dir', type=str, nargs='?', help="Directory for synchronization") parser.add_argument("-uid", type=int, default=60411837, help="Vk user id") # Default is my VK id :-) parser.add_argument("-client_id", type=int, default=2970439, help="Application id") # Application ID from VK parser.add_argument("--threads", "-t", type=int, default=2, help="Number of threads to use") parser.add_argument("-token", type=str, help="access token to use") parser.add_argument("-token_dir", type=str, help="Directory where script will save token and temp data") parser.add_argument("-f", dest='force', default=False, action='store_true', help="Ignore already running error") parser.add_argument("-from", type=int, default=0, help="Start downloading from position") parser.add_argument("-to", type=int, help="End downloading on position") parser.add_argument("-redirect_url", type=str, help="Redirect url after getting token") args = vars(parser.parse_args()) # Don't let not passed arguments to be for k, v in args.items(): if v is None: del args[k] workdir = args.get('dir', '').decode('utf-8') or os.getcwd() + '/Music' try: # Try to create directory if not exists if not os.path.isdir(workdir): os.makedirs(workdir) # Need write access to that dir os.chmod(workdir, 0o755) if not os.access(workdir, os.W_OK): raise Exception('Permission denied for dir %s' % workdir) except Exception as e: exit("Problem with directory '%s': %s" % (workdir, e)) storage = SafeFsStorage(workdir) try: with VkMusic(storage, **args) as manager: # Start working result = manager.synchronize() try: call(['notify-send', 'Vk Music', 'Saved: %(saved)s\n' 'Skipped: %(skipped)s\n' 'Removed: %(removed)s\n' 'Not removed: %(not_removed)s' % result]) except Exception: pass except AlreadyRunningError: # If is running - terminate print('Other sync process is running. Please wait') if __name__ == '__main__': main()
2,590
782
__author__ = "Jens Honer" __copyright__ = "Copyright 2018, Jens Honer Tracking Toolbox" __email__ = "-" __license__ = "mit" __version__ = "1.0" __status__ = "Prototype" import numpy as np _bbox_sign_factors = np.asarray( [ [1.0, 1.0], [0.0, 1.0], [-1.0, 1.0], [-1.0, 0.0], [-1.0, -1.0], [0.0, -1.0], [1.0, -1.0], [1.0, 0.0], ], dtype='f4') def convert_rectangle_to_eight_point(bboxes): pt_set = np.zeros((len(bboxes), 8, 2)) pt_set[:] = bboxes['center_xy'][:, None, :] for i, bbox in enumerate(bboxes): s_phi_offset, c_phi_offset = np.sin(bbox['orientation']), np.cos(bbox['orientation']) rot = np.array([[c_phi_offset, - s_phi_offset], [s_phi_offset, c_phi_offset]]) offset_xy = np.dot(_bbox_sign_factors * 0.5 * bbox['dimension'], rot.T) pt_set[i, :, :] += offset_xy return pt_set
950
405
from .base import * # noqa DEBUG = True SECURE_SSL_REDIRECT = False # SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY = "CHANGEME!!!" # Enable FE component library PATTERN_LIBRARY_ENABLED = True INTERNAL_IPS = ("127.0.0.1", "10.0.2.2") BASE_URL = "http://localhost:8000" # URL to direct preview requests to PREVIEW_URL = "http://localhost:8001/preview" EMAIL_BACKEND = "django.core.mail.backends.console.EmailBackend" AUTH_PASSWORD_VALIDATORS = [] # Enable Wagtail's style guide in Wagtail's settings menu. # http://docs.wagtail.io/en/stable/contributing/styleguide.html INSTALLED_APPS += ["wagtail.contrib.styleguide"] # noqa # Set URL for the preview iframe. Should point at Gatsby. PREVIEW_URL = "http://localhost:8003/preview/" MEDIA_PREFIX = BASE_URL try: from .local import * # noqa except ImportError: pass
866
340
import swig_example swig_example.swig_example_hello() swig_example.link_liba_hello()
84
32
from torch.utils.data import Dataset from torchvision.transforms import transforms from sklearn.model_selection import train_test_split import os import glob import torch import numpy as np from PIL import Image import pdb class LocalDataset(Dataset): def __init__( self, root: str, dataset_name: str, target_transform, train=True, random_state=None, split=True, random_effect=True, ): super(Dataset, self).__init__() self.target_transform = target_transform self.classes = [0, 1] self.root = root self.train = train # training set or test set # self.dataset_path = os.path.join(self.root, self.dataset_name) # class_idx/image X = np.array(glob.glob(os.path.join(self.root, "*/*.[jp][pn][g]"))) y = [int(i.split("/")[-2]) for i in X] y = np.array(y) if split: idx_norm = y == 0 idx_out = y != 0 # 80% data for training and 20% for testing; keep outlier ratio # pdb.set_trace() X_train_norm, X_test_norm, y_train_norm, y_test_norm = train_test_split( X[idx_norm], y[idx_norm], test_size=0.1, random_state=random_state, stratify=y[idx_norm] ) X_train_out, X_test_out, y_train_out, y_test_out = train_test_split( X[idx_out], y[idx_out], test_size=0.1, random_state=random_state, stratify=y[idx_out] ) X_train = np.concatenate((X_train_norm, X_train_out)) X_test = np.concatenate((X_test_norm, X_test_out)) y_train = np.concatenate((y_train_norm, y_train_out)) y_test = np.concatenate((y_test_norm, y_test_out)) if self.train: self.data = X_train self.targets = torch.tensor(y_train, dtype=torch.int64) else: self.data = X_test self.targets = torch.tensor(y_test, dtype=torch.int64) else: self.data = X self.targets = torch.tensor(y, dtype=torch.int64) self.semi_targets = torch.zeros_like(self.targets) # for training we will add brightness variance if random_effect: self.transform = transforms.Compose( [ # transforms.ColorJitter( # brightness=0.5 + int(np.random.rand(1)), contrast=0.5 + int(np.random.rand(1)) # ), # saturation=0.5 + int(np.random.rand(1)), # hue=0.5 + int(np.random.rand(1))), transforms.Resize((224, 224)), transforms.ToTensor(), ] ) # for testing else: self.transform = transforms.Compose([transforms.Resize((224, 224)), transforms.ToTensor()]) def __getitem__(self, index): """ Args: index (int): Index Returns: tuple: (sample, target, semi_target, index) """ data = Image.open(self.data[index]) data = self.transform(data) sample, target, semi_target = data, 0 if self.targets[index] == 0 else 1, int(self.semi_targets[index]) return sample, target, semi_target, index def __len__(self): return len(self.data)
3,356
1,087
# Generated by Django 3.1.7 on 2021-04-15 22:46 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('envdaq', '0008_interface'), ] operations = [ migrations.AlterField( model_name='interface', name='config', field=models.JSONField(default=dict, verbose_name='Configuration'), ), ]
408
133
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('documents', '0001_initial'), ] operations = [ migrations.CreateModel( name='Client', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('name', models.CharField(max_length=100)), ('address', models.TextField()), ('phone', models.CharField(max_length=50)), ('city', models.CharField(max_length=50)), ('country', models.CharField(max_length=50)), ('cuit', models.CharField(max_length=50)), ], ), migrations.CreateModel( name='Company', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('name', models.CharField(max_length=100)), ('logo', models.ImageField(upload_to=b'companies_logos')), ('address', models.TextField(null=True, blank=True)), ('phone', models.CharField(max_length=100)), ('email', models.EmailField(max_length=254)), ('web_url', models.URLField(null=True, blank=True)), ], ), migrations.CreateModel( name='Work', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('name', models.CharField(max_length=100)), ('description', models.TextField(null=True, blank=True)), ('hours', models.FloatField(null=True, blank=True)), ('observations', models.TextField(null=True, blank=True)), ('proform', models.ForeignKey(related_name='works', to='documents.Proform')), ], ), ]
2,020
553
from tutils import pdb from tutils import subprocess from tutils import Counter from tutils import partial from tutils import reduce from tutils import wraps from tutils import count from tutils import groupby from tutils import product from tutils import prod from tutils import itemgetter from tutils import Path from tutils import ascii_lowercase from tutils import ascii_digits from tutils import Any from tutils import Callable from tutils import List from tutils import Iterable from tutils import IterableS from tutils import Optional from tutils import Sequence from tutils import OInt from tutils import ODict from tutils import UListStr from tutils import Tuple from tutils import Union from tutils import hexc from tutils import compose_left from tutils import concat from tutils import curry from tutils import do from tutils import excepts from tutils import iterate from tutils import keyfilter from tutils import pluck from tutils import pipe from tutils import sliding_window from tutils import toolz_pick from tutils import toolz_omit from tutils import omit from tutils import pick from tutils import add_debug from tutils import add_debug_list from tutils import run_process from tutils import until_stable from tutils import oxford from tutils import excepts_wrap from tutils import nextwhere from tutils import noncontinuous from tutils import lnoncontinuous from tutils import lfilter from tutils import lcompact from tutils import lmap from tutils import lpluck from tutils import lstrip from tutils import splitstrip from tutils import splitstriplines from tutils import seq_to_dict from tutils import split_to_dict from tutils import c_map from tutils import c_lmap from tutils import is_char_az from tutils import is_char_hex from tutils import is_char_az09 from tutils import filter_str from tutils import filter_az from tutils import filter_az09 from tutils import filter_hex from tutils import add_pprint from tutils import add_pprinting from tutils import make_incrementer from tutils import adjacent_transforms from tutils import load_input from tutils import process_input from tutils import tests from tutils import load_and_process_input from tutils import run_tests """ END HELPER FUNCTIONS """ DAY = "00" INPUT, TEST = f"input-{DAY}.txt", f"test-input-{DAY}.txt" TA1 = None TA2 = None ANSWER1 = None ANSWER2 = None def process_one(data: Any) -> Any: pdb.set_trace() return def process_two(data: Any) -> Any: pdb.set_trace() return def cli_main() -> None: input_funcs = [splitstriplines] data = load_and_process_input(INPUT, input_funcs) run_tests(TEST, TA1, TA2, ANSWER1, input_funcs, process_one, process_two) answer_one = process_one(data) if ANSWER1 is not None: if answer_one != ANSWER1: pdb.set_trace() assert answer_one == ANSWER1 print("Answer one:", answer_one) if ANSWER1 is not None: answer_two = process_two(data) if ANSWER2 is not None: if answer_two != ANSWER2: pdb.set_trace() assert answer_two == ANSWER2 print("Answer two:", answer_two) if __name__ == "__main__": cli_main()
3,181
983
from setuptools import setup setup( name='weather', version='0.1', description='CLI frontend for querying weather', packages=['weather'], entry_points={ 'console_scripts': ['weather = weather.__main__:main'] }, author='Aleksi Kauppila', author_email='aleksi.kauppila@gmail.com' )
323
109
from django.contrib import admin from django.contrib.auth.admin import UserAdmin from .models import * admin.site.register(CharacterEvent) admin.site.register(Event) admin.site.register(CharacterOwner) admin.site.register(Character) admin.site.register(User, UserAdmin)
271
77
#coding=utf-8 import tkinter as tk from tkinter import ttk from tkinter import scrolledtext from tkinter import messagebox as mBox from tkinter import filedialog import matplotlib matplotlib.use('TkAgg') from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg import matplotlib.pyplot as plt import datetime import threading import flight import outlier import analytics # 标题 win = tk.Tk() win.title("机票数据爬取分析预测") win.resizable(0, 0) # 三个页面 tabControl = ttk.Notebook(win) tab1 = ttk.Frame(tabControl) tabControl.add(tab1, text='爬取') tab2 = ttk.Frame(tabControl) tabControl.add(tab2, text='分析') tab3 = ttk.Frame(tabControl) tabControl.add(tab3, text='预测') tabControl.pack(expand=1, fill="both") # 参数框 monty = ttk.LabelFrame(tab1, text='') monty.grid(column=0, row=0, padx=8, pady=4) labelsFrame = ttk.LabelFrame(monty, text=' 参数 ') labelsFrame.grid(column=0, row=0) # 城市标签 ttk.Label(labelsFrame, text="城市:").grid(column=0, row=0, sticky='W') # 城市输入框 city = tk.Text(labelsFrame, width=20, height=10) city.insert(tk.END, "'SHA', 'SIA', 'BJS', 'CAN', 'SZX', 'CTU', 'HGH', 'WUH', 'CKG', 'TAO', 'CSX', 'NKG', 'XMN', 'KMG', 'DLC', 'TSN', 'CGO', 'SYX', 'TNA', 'FOC'") city.grid(column=1, row=0, sticky='W') # 起始日期标签 ttk.Label(labelsFrame, text="起始日期:").grid(column=0, row=1, sticky='W') # 起始日期输入框 date1 = tk.StringVar() da_days = datetime.datetime.now() + datetime.timedelta(days=1) date1.set(da_days.strftime('%Y-%m-%d')) date1Entered = ttk.Entry(labelsFrame, textvariable=date1) date1Entered.grid(column=1, row=1, sticky='W') # 截止日期标签 ttk.Label(labelsFrame, text="截止日期:").grid(column=0, row=2, sticky='W') # 截止日期输入框 date2 = tk.StringVar() da_days2 = datetime.datetime.now() + datetime.timedelta(days=1) date2.set(da_days2.strftime('%Y-%m-%d')) date2Entered = ttk.Entry(labelsFrame, textvariable=date2) date2Entered.grid(column=1, row=2, sticky='W') # Log框 scrolW = 91; scrolH = 37; scr = scrolledtext.ScrolledText(monty, width=scrolW, height=scrolH, wrap=tk.WORD) scr.grid(column=3, row=0, sticky='WE', rowspan=5) # 爬取数据 def spider_flight(): spider_flight.flight = flight.spider(city.get("0.0", "end"), date1.get(), date2.get(), scr) spider_flight.flight = None def run_spider_flight(): scr.insert(tk.END, datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S') + '\n爬取数据:\n城市:' + str(city.get("0.0", "end")) + '\n日期:' + str(date1.get()) + ' 至 ' + str(date2.get()) + '\n\n') t = threading.Thread(target=spider_flight) t.start() # 爬取标签 spider = ttk.Button(labelsFrame, text="爬取", width=10, command=run_spider_flight) spider.grid(column=0, row=4, sticky='W') # 保存文件 def save_file(): if spider_flight.flight is not None: fname = tk.filedialog.asksaveasfilename(filetypes=[("JSON", ".json")], defaultextension='.json') if fname is not '': spider_flight.flight.save(fname) scr.insert(tk.END, datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S') + '\n数据保存到 ' + fname + '\n\n') else: mBox.showwarning('Python Message Warning Box', '请先爬取数据!') # 保存标签 save = ttk.Button(labelsFrame, text="保存", width=10, command=save_file) save.grid(column=1, row=4, sticky='E') for child in labelsFrame.winfo_children(): child.grid_configure(padx=8, pady=4) for child in monty.winfo_children(): child.grid_configure(padx=3, pady=1) # 参数框 monty2 = ttk.LabelFrame(tab2, text='') monty2.grid(column=0, row=0, padx=8, pady=4) labelsFrame2 = ttk.LabelFrame(monty2, text=' 参数 ') labelsFrame2.grid(column=0, row=0) # Log框 scrolW = 34; scrolH = 25; scr2 = scrolledtext.ScrolledText(monty2, width=scrolW, height=scrolH, wrap=tk.WORD) scr2.grid(column=0, row=3, sticky='WE') # 数据标签 ttk.Label(labelsFrame2, text="数据:").grid(column=0, row=0, sticky='W') # 打开文件 def data_file(): fname = tk.filedialog.askopenfilename(filetypes=[("JSON", ".json")], defaultextension='.json') if fname is not '': data_file.outlier = outlier.Outlier(fname) scr2.insert(tk.END, datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S') + '\n打开文件 ' + fname + '\n\n') data_file.outlier = None # 打开文件按钮 data = ttk.Button(labelsFrame2, text="打开文件", width=10, command=data_file) data.grid(column=1, row=0, sticky='E') # 异常数标签 ttk.Label(labelsFrame2, text="异常数:").grid(column=0, row=1, sticky='W') # 异常数输入框 diff = tk.IntVar() diff.set(5) diffEntered = ttk.Entry(labelsFrame2, textvariable=diff) diffEntered.grid(column=1, row=1, sticky='W') # 图框 def drawdiff(): try: num_diff = int(diffEntered.get()) except: num_diff = 5 diffEntered.delete(0, tk.END) diffEntered.insert(0, 5) drawdiff.f.clf() drawdiff.out = data_file.outlier.extreme(drawdiff.f, scr2, num_diff) drawdiff.canvas.show() drawdiff.out = None drawdiff.f = plt.figure() drawdiff.canvas = FigureCanvasTkAgg(drawdiff.f, master=monty2) drawdiff.canvas.show() drawdiff.canvas.get_tk_widget().grid(column=1, row=0, rowspan=4) def run_drawdiff(): if data_file.outlier is not None: scr2.insert(tk.END, datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S') + '\n分析数据(设定 ' + str(diffEntered.get()) + ' 个异常值)...\n\n异常值:\n') t = threading.Thread(target=drawdiff) t.start() else: mBox.showwarning('Python Message Warning Box', '请先打开文件!') # 分析按钮 da = ttk.Button(labelsFrame2, text="分析", width=10, command=run_drawdiff) da.grid(column=0, row=2, sticky='W') # 保存文件 def save_file2(): if drawdiff.out is not None: fname = tk.filedialog.asksaveasfilename(filetypes=[("JSON", ".json")], defaultextension='.json') if fname is not '': with open(fname, 'w') as f1: f1.write(str(drawdiff.out)) scr2.insert(tk.END, datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S') + '\n异常值保存到 ' + fname + '\n\n') else: mBox.showwarning('Python Message Warning Box', '请先分析数据!') # 保存按钮 save2 = ttk.Button(labelsFrame2, text="保存", width=10, command=save_file2) save2.grid(column=1, row=2, sticky='E') for child in labelsFrame2.winfo_children(): child.grid_configure(padx=8, pady=4) for child in monty2.winfo_children(): child.grid_configure(padx=8, pady=4) # 参数框 monty3 = ttk.LabelFrame(tab3, text='') monty3.grid(column=0, row=0, padx=8, pady=4) labelsFrame3 = ttk.LabelFrame(monty3, text=' 参数 ') labelsFrame3.grid(column=0, row=0) # Log框 scrolW = 34; scrolH = 25; scr3 = scrolledtext.ScrolledText(monty3, width=scrolW, height=scrolH, wrap=tk.WORD) scr3.grid(column=0, row=3, sticky='WE') # 数据标签 ttk.Label(labelsFrame3, text="数据:").grid(column=0, row=0, sticky='W') # 打开文件 def data_file2(): fname = tk.filedialog.askopenfilename(filetypes=[("JSON", ".json")], defaultextension='.json') if fname is not '': data_file2.analytics = analytics.Analytics(fname) scr3.insert(tk.END, datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S') + '\n打开文件 ' + fname + '\n\n') data_file2.analytics = None # 打开文件按钮 data2 = ttk.Button(labelsFrame3, text="打开文件", width=10, command=data_file2) data2.grid(column=1, row=0, sticky='E') # 预测天数标签 ttk.Label(labelsFrame3, text="预测天数:").grid(column=0, row=1, sticky='W') # 预测天数输入框 days = tk.IntVar() days.set(30) daysEntered = ttk.Entry(labelsFrame3, textvariable=days) daysEntered.grid(column=1, row=1, sticky='W') # 图框 def drawpredict(): try: num_day = int(daysEntered.get()) except: num_day = 30 daysEntered.delete(0, tk.END) daysEntered.insert(0, 30) # 清空图像,以使得前后两次绘制的图像不会重叠 drawpredict.f.clf() drawpredict.out = data_file2.analytics.predict(num_day, scr3) drawpredict.canvas.show() drawpredict.out = None drawpredict.f = plt.figure() drawpredict.canvas = FigureCanvasTkAgg(drawpredict.f, master=monty3) drawpredict.canvas.show() drawpredict.canvas.get_tk_widget().grid(column=1, row=0, rowspan=4) def run_drawpredict(): if data_file2.analytics is not None: scr3.insert(tk.END, datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S') + '\n分析数据(设定预测 ' + str(daysEntered.get()) + ' 天)...\n\n训练过程:\n轮次/总轮次 [损失]\n') t = threading.Thread(target=drawpredict) t.start() else: mBox.showwarning('Python Message Warning Box', '请先打开文件!') # 预测按钮 pr = ttk.Button(labelsFrame3, text="预测", width=10, command=run_drawpredict) pr.grid(column=0, row=2, sticky='W') # 保存文件 def save_file3(): if drawpredict.out is not None: fname = tk.filedialog.asksaveasfilename(filetypes=[("JSON", ".json")], defaultextension='.json') with open(fname, 'w') as f1: # 打开文件 f1.write(str(drawpredict.out)) scr3.insert(tk.END, datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S') + '\n训练过程和预测结果保存到 ' + fname + '\n\n') else: mBox.showwarning('Python Message Warning Box', '请先预测数据!') # 保存按钮 save = ttk.Button(labelsFrame3, text="保存", width=10, command=save_file3) save.grid(column=1, row=2, sticky='E') for child in labelsFrame3.winfo_children(): child.grid_configure(padx=8, pady=4) for child in monty3.winfo_children(): child.grid_configure(padx=8, pady=4) if __name__ == "__main__": win.mainloop()
9,142
4,060
"""Example of assigning a variable.""" user_name = input("What is your name? ")
80
23
from .common import layers, grid, plotter, DEFAULT_COLORS, set_axes_equal from .atoms import plot_atoms, plot_points from .SiteNetworkPlotter import SiteNetworkPlotter from .SiteTrajectoryPlotter import SiteTrajectoryPlotter
228
69
#!/usr/bin/env python # -*- coding: utf-8 -*- # Copyright 2017-2022 Anderson Bravalheri, Univertity of Bristol # High Performance Networks Group # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. class UndefinedAdapter(RuntimeWarning): """Avoid committing grid if adapter not specified.""" DEFAULT_MESSAGE = "Please use a valid adapter when defining the grid." def __init__(self, message=DEFAULT_MESSAGE, *args, **kwargs): super(UndefinedAdapter, self).__init__(message, *args, **kwargs) class FrozenObject(AttributeError): """Cannot set attributes in frozen objects.""" class ReadonlyAttribute(AttributeError): """Should be thrown when some tries to access a readonly attribute.""" class UnsupportedResolution(ValueError): """Grid should respect vendor grid specification.""" class OverlappedChannels(ValueError): """Grid channels must be disjoint.""" class OutOfRange(ValueError): """Value should respect range specification by vendor."""
1,525
420
""" Python 2/3 Compatibility ======================== Not sure we need to support anything but Python 2.7 at this point , but copied this module over from flask-peewee for the time being. """ import sys PY2 = sys.version_info[0] == 2 if PY2: text_type = unicode string_types = (str, unicode) unichr = unichr reduce = reduce else: text_type = str string_types = (str, ) unichr = chr from functools import reduce
449
151
class Comic: def __init__(self, comic_num): self.id = comic_num self.title = "" self.title_text = "" self.transcript = "" self.explanation = "" self.img_url = "" self.og_title = "" self.og_ttext = ""
216
97
# from discord.ext.commands import Cog # from discord_slash import SlashContext, cog_ext # from discord_slash.utils.manage_commands import create_option # # # class TicTacToeAI(Cog): # def __init__(self, client): # self.client = client # # @cog_ext.cog_subcommand( # base="tictactoe", # base_description="You know and love it - TicTacToe", # name="computer", # description="Try to beat me in a tic tac toe game", # options=[ # create_option( # name="easy_mode", # description="Set this to true if you are too weak for the normal mode", # option_type=5, # required=False, # ), # ], # ) # async def _tictactoe_ai(self, ctx: SlashContext, easy_mode: bool = False): # pass # # # def setup(client): # TicTacToeAI(client)
888
283
"""This module defines the karma_test rule.""" load("@infra-sk_npm//@bazel/typescript:index.bzl", "ts_library") load("@infra-sk_npm//@bazel/rollup:index.bzl", "rollup_bundle") load("@infra-sk_npm//karma:index.bzl", _generated_karma_test = "karma_test") def karma_test(name, srcs, deps, entry_point = None): """Runs unit tests in a browser with Karma and the Mocha test runner. When executed with `bazel test`, a headless Chrome browser will be used. This supports testing multiple karma_test targets in parallel, and works on RBE. When executed with `bazel run`, it prints out a URL to stdout that can be opened in the browser, e.g. to debug the tests using the browser's developer tools. Source maps are generated. When executed with `ibazel test`, the test runner never exits, and tests will be rerun every time a source file is changed. When executed with `ibazel run`, it will act the same way as `bazel run`, but the tests will be rebuilt automatically when a source file changes. Reload the browser page to see the changes. Args: name: The name of the target. srcs: The *.ts test files. deps: The ts_library dependencies for the source files. entry_point: File in srcs to be used as the entry point to generate the JS bundle executed by the test runner. Optional if srcs contains only one file. """ if len(srcs) > 1 and not entry_point: fail("An entry_point must be specified when srcs contains more than one file.") if entry_point and entry_point not in srcs: fail("The entry_point must be included in srcs.") if len(srcs) == 1: entry_point = srcs[0] ts_library( name = name + "_lib", srcs = srcs, deps = deps + [ # Add common test dependencies for convenience. "@infra-sk_npm//@types/mocha", "@infra-sk_npm//@types/chai", "@infra-sk_npm//@types/sinon", ], ) rollup_bundle( name = name + "_bundle", entry_point = entry_point, deps = [ name + "_lib", "@infra-sk_npm//@rollup/plugin-node-resolve", "@infra-sk_npm//@rollup/plugin-commonjs", "@infra-sk_npm//rollup-plugin-sourcemaps", ], format = "umd", config_file = "//infra-sk:rollup.config.js", ) # This rule is automatically generated by rules_nodejs from Karma's package.json file. _generated_karma_test( name = name, size = "large", data = [ name + "_bundle", "//infra-sk/karma_test:karma.conf.js", "@infra-sk_npm//karma-chrome-launcher", "@infra-sk_npm//karma-sinon", "@infra-sk_npm//karma-mocha", "@infra-sk_npm//karma-chai", "@infra-sk_npm//karma-chai-dom", "@infra-sk_npm//karma-spec-reporter", "@infra-sk_npm//mocha", ], templated_args = [ "start", "$(execpath //infra-sk/karma_test:karma.conf.js)", "$$(rlocation $(location %s_bundle))" % name, ], tags = [ # Necessary for it to work with ibazel. "ibazel_notify_changes", ], )
3,254
1,028
from fileutils.fileutils import save_output_to_file, select_option_from_menu class ClassicAnalyzerStats: def __init__(self, data, current_event, output_file_name): self.__data = data self.__current_event = current_event self.__output_file_name = output_file_name self.__output = [] self.__options = self.__init_options() self.__append_options_to_output() def save_stats_output_to_file(self): save_output_to_file(self.__output_file_name, "a+", self.__output) def stats_menu(self): while True: exception_msg = "\n[!] Please enter an integer from 1 to 10." option = select_option_from_menu(self.__options, exception_msg) self.__output.append("Selected option: {}".format(option)) if option == -1: continue if option == 1: self.__calculate_average_points() elif option == 2: self.__print_captains((list(map(lambda x: x.captain_name, self.__data)))) elif option == 3: self.__print_captains((list(map(lambda x: x.vice_captain_name, self.__data)))) elif option == 4: self.__print_chip_usage_whole_season() elif option == 5: self.__print_chip_usage_current_event() elif option == 6: self.__count_managers_made_transfer() elif option == 7: self.__count_managers_took_hit() elif option == 8: self.__print_team_value(max) elif option == 9: self.__print_team_value(min) elif option == 10: self.__output.append("") break else: print("\n[!] Invalid option. Try again!") @staticmethod def init_a_dict(key, dictionary): if key not in dictionary: dictionary[key] = 1 else: dictionary[key] += 1 def print_chips(self, chips): for chip in chips: string = "{}({})".format(chip, chips[chip]) print(string, end=" ") self.__output.append(string) print() self.__output.append("") def __init_options(self): options = ["\n* Please choose an option from 1 to 10:", "1) Sample's average score", "2) Most captained players", "3) Most vice-captained players", "4) Chips usage during the whole season", "5) Chips usage during GW{}".format(self.__current_event), "6) Count of managers made at least one transfer", "7) Count of managers took at least one hit", "8) Richest manager(s)", "9) Poorest manager(s)", "10) Exit"] return options def __calculate_average_points(self): managers_count = len(self.__data) total_points = 0 for manager in self.__data: total_points += manager.gw_points() total_points -= manager.gw_hits average_points = total_points / managers_count result = "{:.2f} points".format(average_points) print(result) self.__output.append(result) self.__output.append("") def __print_captains(self, list_of_captains): captains = {} for captain in list_of_captains: self.init_a_dict(captain, captains) captains_sorted = [(captain, captains[captain]) for captain in sorted(captains, key=captains.get, reverse=True)] for key, value in captains_sorted: captain = "{}({})".format(key, value) print(captain, end=" ") self.__output.append(captain) print() self.__output.append("") def __print_chip_usage_whole_season(self): chips = {} for manager in self.__data: for chip in manager.used_chips_by_gw: self.init_a_dict(chip, chips) self.print_chips(chips) def __print_chip_usage_current_event(self): active_chips = {} for manager in self.__data: active_chip = manager.active_chip if active_chip != "None": self.init_a_dict(active_chip, active_chips) if len(active_chips) < 1: result = "No manager has used any chip in GW{}".format(self.__current_event) self.__log_string(result) else: self.print_chips(active_chips) def __count_managers_made_transfer(self): result = len(list(filter(lambda x: x.gw_transfers > 0, self.__data))) if result == 1: managers_count = "1 manager" else: managers_count = "{} managers".format(result) self.__log_string(managers_count) def __count_managers_took_hit(self): result = len(list(filter(lambda x: x.gw_hits > 0, self.__data))) managers_count = "{} managers".format(result) self.__log_string(managers_count) def __print_team_value(self, extremum): team_values = list(map(lambda x: x.team_value, self.__data)) max_value = extremum(team_values) richest_managers = list(filter(lambda x: x.team_value == max_value, self.__data)) richest_managers_names = (list(map(lambda x: x.manager_name, richest_managers))) result = ", ".join(richest_managers_names) result_string = "{} ({}M)".format(result, format(max_value, ".1f")) self.__log_string(result_string) def __append_options_to_output(self): self.__output.append("") [self.__output.append(option) for option in self.__options] self.__output.append("") def __log_string(self, string): print(string) self.__output.append(string) self.__output.append("")
5,894
1,760
import xallennlp.training.mlflow_callback import xallennlp.training.mlflow_checkpointer
88
28
# -*- coding: utf-8 -*- """colabUtil.ipynb Automatically generated by Colaboratory. Original file is located at https://colab.research.google.com/drive/1KX9x-rqyj0XfUkLtfOVh8t8T_kW0hs0u #Colab Util This is a collection of utility functions that simplifies data science researchin using colab. I wrote this while working through *Deep Learning with Python* by Francisco Chollet. Most of creatPyDrive is from https://gist.github.com/rdinse/159f5d77f13d03e0183cb8f7154b170a ##Usage ###Pull in py files into colab. The content will be in colabUtil folder. ```python !pip install -U -q PyDrive !git clone https://github.com/cmcheungMOOC/colabUtil.git ``` ###Add colab directory to module path ```python import sys sys.path.insert(0, '/content/colabUtil') ``` ###Share and enjoy! ```python import colabutil as cu cu.setupGlove() cu.setupAclImdb() cu.setupKaggleCatsAndDogs() cu.restore('CNN_Results') cu.save('CNN_Results') ``` ##Assumptions I have made the following assumptions to allow me to simplify my code. This code is not meant for general usage. * Colab VMs are reliable * Colab VMs will be recycled These assumptions simply means that you can count on the VM to do work correctly while it is still assigned to you, but the VM will be yanked from under you. So, it is necessary to backup intermediate state information to persistent storage such as a Google drive. The transient nature of you Colab work space means that there is little reason for complicated directory hierarchies. After all, anything you built up will vanish overnight. This means that a simple directory hierarchy supporting the tasks at hand is all you need. ##Directory Hierarchy Colab workspace is rooted at /content. This is our defaull directory. In addition, we use /content/dataset to store downloaded datasets. Intermediate states of a ML algorithm is written onto /content. All top level content /content can be zipped up and saved. The content can be restored when needed. Note that only the latest state persists in the Google drive. Unfortuately, I know of no easy way to get the title of a Jupyter notebook. So, a user defined name need to be chosen for the backup zip file. ## Utility Functions """ #@title Download Dataset import requests, os def download(url, overwrite=False): baseName = os.path.basename(url) path = os.path.join(os.getcwd(), baseName) print('Downloading', url, 'to', path) if os.path.isfile(path): if not overwrite: print(path, 'already exists') return path r = requests.get(url, allow_redirects=True) open(path, 'wb').write(r.content) return path #@title Test Download { run: "auto", vertical-output: true } url = "" #@param ["", "http://ai.stanford.edu/~amaas/data/sentiment/aclImdb_v1.tar.gz", "http://nlp.stanford.edu/data/glove.6B.zip"] overwrite = False #@param {type:"boolean"} if url != "": download(url, overwrite) os.listdir() """###Untar Dataset into Current Working Directory Currently, untar only support *.tar.gz. This will be extended only if there is a real use case. """ import tarfile, os, shutil def untar(gzName, dstDir='', skipIfDstDirExists=False): if dstDir == '': dstDir = os.path.dirname(gzName) if dstDir == '': dstDir = os.getcwd() if skipIfDstDirExists and os.path.isdir(dstDir): print(dstDir, 'exists') return dstDir print('Extracting', gzName, 'to', dstDir) t = tarfile.open(name=gzName, mode='r:gz') #topLevelDirInTar = os.path.commonprefix(t.getnames()) #print('topLevelDirInTar', topLevelDirInTar) t.extractall(dstDir) return dstDir #@title Test Untar { run: "auto", vertical-output: true } gzName = "" #@param ["", "aclImdb_v1.tar.gz"] dstDir = "" #@param ["", ".", "/content/dataset"] if gzName != "": d = untar(gzName, dstDir) print(d) print(os.listdir(d)) #@title Zip Up Content of a Specified Directory import zipfile, os def zip(srcDir='.', mode='w'): print('zip', srcDir, mode) if not os.path.isdir(srcDir): print(srcDir, 'is not a dir') return None if srcDir == '.': srcDir = os.getcwd() zipName = srcDir + '.zip' print('Creating', zipName, 'from', srcDir) with zipfile.ZipFile(zipName, mode=mode) as zf: compression = zipfile.ZIP_DEFLATED for fname in os.listdir(srcDir): if os.path.isdir(fname): print('Skipping', fname) continue _, ext = os.path.splitext(fname) if ext.lower() in ['.zip', '.gz']: print('Skipping', fname) continue path = os.path.join(srcDir, fname) zf.write(path, compress_type=compression) print(path, 'is added to', zipName) return zipName #@title Test Zip { run: "auto" } srcDir = "" #@param ["", ".", "/content", "/content/datalab"] if srcDir != '': if not os.path.isdir(srcDir): os.mkdir(srcDir) print(zip(srcDir)) #@title Unzip Content import os, zipfile, shutil def unzip(zipName, dstDir = '', skipIfDstDirExists=False): if dstDir == '': dstDir = os.path.dirname(zipName) if skipIfDstDirExists and os.path.isdir(dstDir): print(dstDir, 'exists') return dstDir print('Extracting', zipName, 'to', dstDir) z = zipfile.ZipFile(zipName, 'r') z.extractall(dstDir) return dstDir #@title Test Unzip { run: "auto", vertical-output: true } zipName = "" #@param ["", "glove.6B.zip", "/content/datalab.zip"] dstDir = "" #@param ["", ".", "/content/dataset/glove.6B", "/content/dataset", "datalab", "a/b", "dataset/tmp"] if zipName != "": d = unzip(zipName, dstDir) print(d) print(os.listdir(d)) os.listdir(d) #@title Setup GLOVE def setupGlove(): zipFile = download('http://nlp.stanford.edu/data/glove.6B.zip') unzip(zipFile, dstDir='/content/dataset/glove.6B', skipIfDstDirExists=True) #@title Test GLOVE Setup { run: "auto", vertical-output: true } test = False #@param {type:"boolean"} if test: setupGlove() #@title Setup ACLIMDB def setupAclImdb(): gzFile = download('http://ai.stanford.edu/~amaas/data/sentiment/aclImdb_v1.tar.gz') untar(gzFile, dstDir='/content/dataset/aclImdb_v1', skipIfDstDirExists=True) #@title Test ACLIMDB Setup { run: "auto", vertical-output: true } test = False #@param {type:"boolean"} if test: setupAclImdb() #@title Setup Kaggle Cats and Dogs def setupKaggleCatsAndDogs(): zipFile = download('https://download.microsoft.com/download/3/E/1/3E1C3F21-ECDB-4869-8368-6DEBA77B919F/kagglecatsanddogs_3367a.zip') unzip(zipFile, dstDir='/content/dataset/kagglecatsanddogs_3367a', skipIfDstDirExists=True) #@title Test Kaggle Cats and Dogs Setup { run: "auto", vertical-output: true } test = False #@param {type:"boolean"} if test: setupKaggleCatsAndDogs() """##Pydrive Utilities https://gsuitedevs.github.io/PyDrive/docs/build/html/index.html Content of a specified directory is saved to or restored from a Google drive. Most of creatPyDrive is from https://gist.github.com/rdinse/159f5d77f13d03e0183cb8f7154b170a """ #@title Authenticate and Create the PyDrive Client from pydrive.auth import GoogleAuth from pydrive.drive import GoogleDrive from google.colab import auth from oauth2client.client import GoogleCredentials def createPyDrive(): print('createPyDrive') mycreds_file = 'mycreds_file.json' gauth = GoogleAuth() # https://stackoverflow.com/a/24542604/5096199 # Try to load saved client credentials gauth.LoadCredentialsFile(mycreds_file) if gauth.credentials is None: # Authenticate if they're not there auth.authenticate_user() gauth.credentials = GoogleCredentials.get_application_default() print(gauth.credentials) elif gauth.access_token_expired: # Refresh them if expired gauth.Refresh() else: # Initialize the saved creds gauth.Authorize() # Save the current credentials to a file gauth.SaveCredentialsFile(mycreds_file) return GoogleDrive(gauth) #@title Test CreatePyDrive { run: "auto", vertical-output: true } test = False #@param {type:"boolean"} if test: drive = createPyDrive() os.listdir() #@title Create & Upload a File def uploadFile(drive, fname): print('uploadFile', fname) uploaded = drive.CreateFile({'title': fname}) uploaded.SetContentFile(fname) uploaded.Upload() print('Uploaded {} with ID {}'.format(fname, uploaded.get('id'))) #@title Test UploadFile to Google Drive { run: "auto", vertical-output: true } fname = "" #@param ["", "a.txt"] if fname != '': if not os.path.exists(fname): print('Creating', fname) with open(fname, 'w') as fp: fp.write('abc') uploadFile(drive, fname) #@title Find a File by Name in the Google Drive def findFile(drive, fname): file_list = drive.ListFile({'q': "'root' in parents and trashed=false"}).GetList() for file1 in file_list: if file1['title'] == fname: print('title: %s, id: %s' % (file1['title'], file1['id'])) return file1 #@title Test Find File in Google Drive { run: "auto", vertical-output: true } fname = "" #@param ["", "a.txt"] if fname != '': findFile(drive, fname) #@title Download a File and Optionally Trash it def downloadFile(drive, fname, trashIt=False): print('downloadFile', fname) file1 = findFile(drive, fname) if not file1: print(fname, 'not found') return None downloaded = drive.CreateFile({'id': file1['id']}) downloaded.GetContentFile(fname) if trashIt: downloaded.Trash() print(fname, 'is moved to trash') return file1['title'] #@title Test Download from Google Drive { run: "auto", vertical-output: true } fname = "" #@param ["", "a.txt"] trashIt = False #@param {type:"boolean"} if fname != '': print(downloadFile(drive, fname, trashIt)) #@title Google Drive Class class GDrive: def __init__(self): self.drive = createPyDrive() def upload(self, fname): uploadFile(self.drive, fname) def download(self, fname, trashIt=True): return downloadFile(self.drive, fname, trashIt) #@title Test Google Drive Class { run: "auto", vertical-output: true } fname = "" #@param ["", "a.txt"] if fname != '': if not os.path.exists(fname): with open(fname, 'w') as fp: fp.write('abc') gd = GDrive() gd.upload(fname) gd.download(fname) """###Save and Restore the Content of a Directory""" #@title Save Directory to Google Drive def save(srcDirName): if '/' in srcDirName: print('Use only the name of the dir, not the path to it') return zipName = zip(srcDirName) gd = GDrive() gd.upload(zipName) #@title Test Directory Save { run: "auto", vertical-output: true } srcDirName = "" #@param ["", "datalab", "/content/datalab"] if srcDirName != '': if not os.path.isdir(srcDirName): os.mkdir(srcDirName) path = os.path.join(srcDirName, 'abc.txt') if not os.path.exists(path): with open(path, 'w') as fp: fp.write('abc') save(srcDirName) #@title Restore Directory from Google Drive import os def restore(dstDirName): if '/' in srcDirName: print('Use only the name of the dir, not the path to it') return if os.path.isdir(dstDirName): print(dstDirName, 'already exists') return dstDirName zipName = dstDirName + '.zip' gd = GDrive() zf = gd.download(zipName) print('zf is', zf) if zf == None: os.mkdir(dstDirName) return None return unzip(zf, '.') #@title Test Restore Directory { run: "auto", vertical-output: true } dstDirName = "" #@param ["", "datalab", "CNN_Results"] import shutil if dstDirName != '': if os.path.isdir(dstDirName): print('rmtree', dstDirName) shutil.rmtree(dstDirName) print(restore(dstDirName))
11,599
4,169
""" Base common features for product readers """ __classification__ = "UNCLASSIFIED" __author__ = "Thomas McCullough" from typing import Sequence, List, Tuple, Union from sarpy.io.general.base import AbstractReader from sarpy.io.product.sidd1_elements.SIDD import SIDDType as SIDDType1 from sarpy.io.product.sidd2_elements.SIDD import SIDDType as SIDDType2 from sarpy.io.complex.sicd_elements.SICD import SICDType class SIDDTypeReader(AbstractReader): def __init__(self, sidd_meta, sicd_meta): """ Parameters ---------- sidd_meta : None|SIDDType1|SIDDType2|Sequence[SIDDType1]|Sequence[SIDDType2] The SIDD metadata object(s), if provided sicd_meta : None|SICDType|Sequence[SICDType] the SICD metadata object(s), if provided """ if sidd_meta is None: self._sidd_meta = None elif isinstance(sidd_meta, (SIDDType1, SIDDType2)): self._sidd_meta = sidd_meta else: temp_list = [] # type: List[Union[SIDDType1]] for el in sidd_meta: if not isinstance(el, (SIDDType1, SIDDType2)): raise TypeError( 'Got a collection for sidd_meta, and all elements are required ' 'to be instances of SIDDType.') temp_list.append(el) self._sidd_meta = tuple(temp_list) if sicd_meta is None: self._sicd_meta = None elif isinstance(sicd_meta, SICDType): self._sicd_meta = (sicd_meta, ) else: temp_list = [] # type: List[SICDType] for el in sicd_meta: if not isinstance(el, SICDType): raise TypeError( 'Got a collection for sicd_meta, and all elements are required ' 'to be instances of SICDType.') temp_list.append(el) self._sicd_meta = tuple(temp_list) @property def sidd_meta(self): # type: () -> Union[None, SIDDType1, SIDDType2, Tuple[SIDDType1], Tuple[SIDDType2]] """ None|SIDDType1|SIDDType2|Tuple[SIDDType1]|Tuple[SIDDType2]: the sidd meta_data collection. """ return self._sidd_meta @property def sicd_meta(self): # type: () -> Union[None, Tuple[SICDType]] """ None|Tuple[SICDType]: the sicd meta_data collection. """ return self._sicd_meta def get_sidds_as_tuple(self): """ Get the sidd collection as a tuple - for simplicity and consistency of use. Returns ------- Tuple[SIDDType1]|Tuple[SIDDType2] """ if self._sidd_meta is None: return None elif isinstance(self._sidd_meta, tuple): return self._sidd_meta else: return (self._sidd_meta, )
2,893
945
class GladierBaseTool(object): """Gladier Defaults defines a common method of tying together flows, funcx-functions, and default inputs for starting a flow.""" flow_definition = None flow_input = dict() required_input = [] alias_exempt = ['funcx_endpoint_compute', 'funcx_endpoint_non_compute'] funcx_endpoints = dict() funcx_functions = [] def __init__(self, alias=None, alias_class=None): self.alias = alias alias_cls = alias_class if alias and not alias_class: raise ValueError( f'{self.__class__.__name__} given alias "{alias}" but not "alias_class". ' 'ex: alias_class=gladier.utils.tool_alias.StateSuffixVariablePrefix' ) if alias_class: self.alias_renamer = alias_cls(alias) def get_required_input(self): if self.alias: required = [] for input_var in self.required_input: if input_var not in self.alias_exempt: required.append(self.alias_renamer.rename_variable(input_var, self)) else: required.append(input_var) return required else: return self.required_input def get_flow_input(self): if not self.alias: return self.flow_input flow_input = dict() for input_var, val in self.flow_input.items(): if input_var not in self.alias_exempt: flow_input[self.alias_renamer.rename_variable(input_var, self)] = val else: flow_input[input_var] = val return flow_input def get_original_inputs(self): return [input_var for input_var in set(self.required_input) | set(self.flow_input.keys()) if input_var not in self.alias_exempt] def rename_state(self, state_name, state_data): name = self.alias_renamer.rename_state(state_name, self) data = self.alias_renamer.rename_input_variables(state_data, self.get_original_inputs(), self) return name, data
2,144
608
from django.conf import settings from redis import StrictRedis from rest_framework.response import Response from rest_framework.views import APIView from PersonManage.role.models import Role from PersonManage.role.serializer import OneRole, ManyRole from PersonManage.jurisdiction.models import Jurisdiction class RoleView(APIView): def get(self, request, id=None): if id: if role := Role.objects.filter(pk=id).first(): data = OneRole(instance=role, many=False).data return Response({'code': 200, 'msg': 'Query was successful!', 'data': data}) return Response({'code': 400, 'msg': 'Data does not exist!', 'data': None}) else: roles = Role.objects.all() data = ManyRole(instance=roles, many=True).data return Response({'code': 200, 'msg': 'Query was successful!', 'data': data}) def post(self, request): try: role = Role(name=request.data['name'], describe=request.data['describe']) role.save() return Response({'code': 200, 'msg': 'Create successful!', 'data': None}) except Exception as ex: if 'UNIQUE' in str(ex): return Response({'code': 400, 'msg': 'Data duplication!', 'data': None}) return Response({'code': 500, 'msg': str(ex), 'data': None}) def put(self, request, id=None): if role := Role.objects.filter(pk=id).first(): data = request.data if name := data.get('name'): role.name = name if describe := data.get('describe'): role.describe = describe if 'jurisdictions' in data: redis = StrictRedis(host=settings.DATABASES['redis']['HOST'], port=settings.DATABASES['redis']['PORT'], db=settings.DATABASES['redis']['NAME_2'], password=settings.DATABASES['redis']['PASS']) redis.flushdb() role.jurisdictions.clear() for i in data['jurisdictions']: jur = Jurisdiction.objects.filter(pk=i).first() role.jurisdictions.add(jur) role.save() return Response({'code': 200, 'msg': 'Update successful!', 'data': None}) return Response({'code': 400, 'msg': 'Data does not exist!', 'data': None}) def delete(self, request, id=None): if role := Role.objects.filter(pk=id).first(): role.delete() return Response({'code': 200, 'msg': 'Delete successful!'}) return Response({'code': 400, 'msg': 'Data does not exist!', 'data': None})
2,717
778
# emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*- # ex: set sts=4 ts=4 sw=4 noet: # ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## # # See LICENSE file distributed along with the datalad_osf package for the # copyright and license terms. # # ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## import json from os import environ from datalad.downloaders.credentials import ( Token, UserPassword, ) from datalad import ui # Note: This should ultimately go into osfclient def create_node(osf_session, title, category="data", tags=None, public=False, parent=None, description=None): """ Create a node on OSF Parameters ---------- title: str Title of the node category: str categorization changes how the node is displayed on OSF, but doesn't appear to have a "real" function tags: list of str public: bool whether to make the new node public parent: str, optional ID of an OSF parent node to create a child node for Returns ------- str ID of the created node """ if parent: # we have a parent, use its URL to create children url = osf_session.build_url('nodes', parent, 'children') else: url = osf_session.build_url('nodes') post_data = {"data": {"type": "nodes", "attributes": {"title": title, "category": category, "public": public, } } } if tags: post_data["data"]["attributes"]["tags"] = tags if description: post_data["data"]["attributes"]["description"] = description response = osf_session.post(url, data=json.dumps(post_data)) # TODO: figure what errors to better deal with / # create a better message from response.raise_for_status() # TODO: This should eventually return an `node` instance (see osfclient). # Response contains all properties of the created node. node_id = response.json()['data']['id'] # Note: Going for "html" URL here for reporting back to the user, since this # what they would need to go to in order to proceed manually. # There's also the flavor "self" instead, which is the node's # API endpoint. proj_url = response.json()["data"]["links"]["html"] return node_id, proj_url def delete_node(osf_session, id_): """ Delete a node on OSF Parameters ---------- id_: str to be deleted node ID """ url = osf_session.build_url('nodes', id_) response = osf_session.delete(url) response.raise_for_status() def initialize_osf_remote(remote, node, encryption="none", autoenable="true"): """Initialize special remote with a given node convenience wrapper for git-annex-initremote w/o datalad Parameters ---------- remote: str name for the special remote node: str ID of the node/component to use encryption: str see git-annex-initremote; mandatory option; autoenable: str 'true' or 'false'; tells git-annex to automatically enable the special remote on git-annex-init (particularly after a fresh git-clone """ init_opts = ["type=external", "externaltype=osf", "encryption={}".format(encryption), "autoenable={}".format(autoenable), "node={}".format(node)] import subprocess subprocess.run(["git", "annex", "initremote", remote] + init_opts) def get_credentials(allow_interactive=True): # prefer the environment if 'OSF_TOKEN' in environ or all( k in environ for k in ('OSF_USERNAME', 'OSF_PASSWORD')): return dict( token=environ.get('OSF_TOKEN', None), username=environ.get('OSF_USERNAME', None), password=environ.get('OSF_PASSWORD', None), ) # fall back on DataLad credential manager token_auth = Token( name='https://osf.io', url='https://osf.io/settings/tokens', ) up_auth = UserPassword( name='https://osf.io', url='https://osf.io/settings/account', ) do_interactive = allow_interactive and ui.is_interactive() # get auth token, from environment, or from datalad credential store # if known-- we do not support first-time entry during a test run token = environ.get( 'OSF_TOKEN', token_auth().get('token', None) if do_interactive or token_auth.is_known else None) username = None password = None if not token: # now same for user/password if there was no token username = environ.get( 'OSF_USERNAME', up_auth().get('user', None) if do_interactive or up_auth.is_known else None) password = environ.get( 'OSF_PASSWORD', up_auth().get('password', None) if do_interactive or up_auth.is_known else None) return dict(token=token, username=username, password=password)
5,288
1,522
#!/usr/bin/env python # -*- coding: utf-8 -*- """ Module that contains Qt observer pattern related functions and classes """ from __future__ import print_function, division, absolute_import from uuid import uuid4 from functools import partial from Qt.QtCore import Signal, QObject class ObservableProxy(QObject): """ Observer class that allows us to invoke callbacks in UI threads from non UI threads. """ observerSignal = Signal(str, object) def __init__(self): super(ObservableProxy, self).__init__() self._mapping = dict() self.observerSignal.connect(self._on_call) # ================================================================================================================= # BASE # ================================================================================================================= def add_mapping(self, callback): callback_uuid = str(uuid4()) proxy_callback = partial(self.observerSignal.emit, callback_uuid) self._mapping[callback_uuid] = callback return proxy_callback # ================================================================================================================= # CALLBACKS # ================================================================================================================= def _on_call(self, uuid, *args, **kwargs): if uuid in self._mapping: self._mapping[uuid](args, kwargs)
1,487
357
"""Advent of Code 2019 Day 12.""" from functools import lru_cache import re def main(file_input='input.txt'): lines = [line.strip() for line in get_file_contents(file_input)] moons = parse_moons(lines) after_steps = simulate_steps(moons, 1000) total_energy = find_total_energy(after_steps) print(f'Total energy after 1000 steps: {total_energy}') cycles = simulate_steps(moons) *two_cycles, last_cycle = cycles.values() steps_to_repeat = int(lcm(lcm(*two_cycles), last_cycle)) print(f'Steps to reach first repeating state: {steps_to_repeat}') def simulate_steps(moons, steps=None): """Simulate number steps of moons. Returns moons after number of steps. If steps is None returns cycles of moons.""" cycles = {} initial_moons = moons step = 0 while not steps or step < steps: step += 1 moons = moon_motion(moons) if steps: continue for axis in range(3): if axis in cycles: continue if is_cycle(moons, initial_moons, axis): cycles[axis] = step if len(cycles) == 3: return cycles return moons def is_cycle(moons, initial, axis): """Check if moons cycled at the axis to the initial values.""" for moon, initial in zip(moons, initial): if (moon['position'][axis] != initial['position'][axis] or moon['velocity'][axis] != initial['velocity'][axis]): return False return True def moon_motion(initial_moons): """Move moons by one step.""" moons = [] for moon in initial_moons: cur_velocity = moon['velocity'] for other_moon in initial_moons: if moon == other_moon: continue velocity_change = join_with_function( gravity_effect, moon['position'], other_moon['position']) cur_velocity = join_with_function( int.__add__, cur_velocity, velocity_change) new_position = join_with_function( int.__add__, moon['position'], cur_velocity) moons.append({ 'position': new_position, 'velocity': cur_velocity, }) return moons def join_with_function(func, values1, values2): """Join values using func function.""" return [ func(value1, value2) for value1, value2 in zip(values1, values2) ] def gravity_effect(position, other_position): """Return effect other_position has on position.""" if position == other_position: return 0 elif position > other_position: return -1 return 1 def find_total_energy(moons): """Get total energy from moons.""" return sum(get_energy(moon['position']) * get_energy(moon['velocity']) for moon in moons) def get_energy(values): """Get energy from values.""" return sum(abs(value) for value in values) def parse_moons(lines): """Parse lines to dictionary with positions and velocity.""" moons = [] regex = r'([-\d]+)' for line in lines: position = [int(num) for num in re.findall(regex, line)] moons.append({ 'position': position, 'velocity': [0, 0, 0] }) return moons @lru_cache() def lcm(a, b): """Least common multiple.""" return abs(a * b) / gcd(a, b) @lru_cache() def gcd(a, b): """Greatest common divisor.""" if b == 0: return a return gcd(b, a % b) def get_file_contents(file): """Read all lines from file.""" with open(file) as f: return f.readlines() if __name__ == '__main__': main()
3,645
1,151
from django.utils.encoding import force_text from django.utils.text import slugify try: from rest_framework.serializers import ManyRelatedField except ImportError: ManyRelatedField = type(None) try: from rest_framework.serializers import ListSerializer except ImportError: ListSerializer = type(None) def get_related_field(field): if isinstance(field, ManyRelatedField): return field.child_relation if isinstance(field, ListSerializer): return field.child return field def is_related_many(field): if hasattr(field, "many"): return field.many if isinstance(field, ManyRelatedField): return True if isinstance(field, ListSerializer): return True return False def model_from_obj(obj): model = getattr(obj, "model", None) if model is not None: return model queryset = getattr(obj, "queryset", None) if queryset is not None: return queryset.model return None def model_to_resource_type(model): '''Return the verbose plural form of a model name, with underscores Examples: Person -> "people" ProfileImage -> "profile_image" ''' if model is None: return "data" return force_text(model._meta.verbose_name_plural) # # String conversion # def camelcase(string): '''Return a string in lowerCamelCase Examples: "people" -> "people" "profile images" -> "profileImages" ''' out = slug(string).replace('-', ' ').title().replace(' ', '') return out[0].lower() + out[1:] def slug(string): '''Return a string where words are connected with hyphens''' return slugify(force_text(string)) def snakecase(string): '''Return a string where words are connected with underscores Examples: "people" -> "people" "profile images" -> "profile_images" ''' return slug(string).replace('-', '_')
1,909
581
# -*- coding: utf-8 -*- # Form implementation generated from reading ui file 'Ui_ZhkuMainWindow.ui' # # Created by: PyQt5 UI code generator 5.15.2 # # WARNING: Any manual changes made to this file will be lost when pyuic5 is # run again. Do not edit this file unless you know what you are doing. from PyQt5 import QtCore, QtGui, QtWidgets class Ui_ZhkuMainWindow(object): def setupUi(self, ZhkuMainWindow): ZhkuMainWindow.setObjectName("ZhkuMainWindow") ZhkuMainWindow.resize(1007, 543) ZhkuMainWindow.setStyleSheet("QMainWindow#ZhkuMainWindow{\n" "border-image:url(:/img/img/ece414499b12f26fc1cdc8ccd7e019ea.jpg)}") self.centralwidget = QtWidgets.QWidget(ZhkuMainWindow) self.centralwidget.setStyleSheet("") self.centralwidget.setObjectName("centralwidget") self.horizontalLayout_2 = QtWidgets.QHBoxLayout(self.centralwidget) self.horizontalLayout_2.setContentsMargins(0, 0, 0, 0) self.horizontalLayout_2.setSpacing(0) self.horizontalLayout_2.setObjectName("horizontalLayout_2") self.widget = QtWidgets.QWidget(self.centralwidget) self.widget.setObjectName("widget") self.horizontalLayout = QtWidgets.QHBoxLayout(self.widget) self.horizontalLayout.setContentsMargins(0, 0, 0, 0) self.horizontalLayout.setSpacing(0) self.horizontalLayout.setObjectName("horizontalLayout") self.horizontalLayout_2.addWidget(self.widget) self.widget_2 = QtWidgets.QWidget(self.centralwidget) self.widget_2.setObjectName("widget_2") self.horizontalLayout_3 = QtWidgets.QHBoxLayout(self.widget_2) self.horizontalLayout_3.setContentsMargins(0, 0, 0, 0) self.horizontalLayout_3.setSpacing(0) self.horizontalLayout_3.setObjectName("horizontalLayout_3") self.horizontalLayout_2.addWidget(self.widget_2) ZhkuMainWindow.setCentralWidget(self.centralwidget) self.retranslateUi(ZhkuMainWindow) QtCore.QMetaObject.connectSlotsByName(ZhkuMainWindow) def retranslateUi(self, ZhkuMainWindow): _translate = QtCore.QCoreApplication.translate ZhkuMainWindow.setWindowTitle(_translate("ZhkuMainWindow", "MainWindow")) import qr_img_rc
2,246
744
from django.apps import AppConfig class CalheatmapConfig(AppConfig): name = 'calheatmap'
95
32
import numpy as np from pyNTCIREVAL import Labeler from pyNTCIREVAL.metrics import MSnDCG from collections import defaultdict from ntcir15_tools.data import en_query_ids, ja_query_ids, en_labels, ja_labels def get_rel_level(text): if text == "L0": return 0 if text == "L1": return 1 if text == "L2": return 2 return 0 def get_qrels(query_id): lang = query_id.split("-")[1] assert query_id in en_query_ids or query_id in ja_query_ids, "not valid query_id" if lang == "E": labels = en_labels else: labels = ja_labels temp = labels[labels[:, 0] == query_id] temp = temp[:, 1:] result = {} for col_id, text in temp: result[col_id] = get_rel_level(text) return result
767
276
from .contrastive import SupConLoss, NoiseConLoss
50
18
C = int(input("Insira um valor: ")) Fire = (9 * C / 5) + 32 print(Fire)
71
35
""" filename: test.py author: Supriya Sudarshan version: 19.04.2021 description: Takes in the images and predicts (Covid or Non-Covid/Normal) using the *.h5 models """ import numpy as np import matplotlib.pyplot as plt import os from tensorflow.keras.models import load_model from tensorflow.keras.preprocessing import image from tensorflow.keras.applications.vgg19 import preprocess_input import random def evaluate(img_path, model): """ Given the image path and model, preprocess the input image and get predictions """ img = image.load_img(img_path, target_size=(224,224)) x = image.img_to_array(img) x = np.expand_dims(x, axis=0) image_data = preprocess_input(x) y_pred = model.predict(image_data) probability = y_pred[0] if probability[0] > 0.5: prediction = str('%.2f' % (probability[0]*100) + '% COVID') else: prediction = str('%.2f' % ((1-probability[0])*100) + '% Normal') plt.title(prediction) plt.imshow(img) plt.show() if __name__ == "__main__": # Load appropriate models ct_model = load_model('../saved_models/chest_ct_vggmodel.h5') xray_model = load_model('../saved_models/chest_xray_vggmodel.h5') ultrasound_model = load_model('../saved_models/ultrasound_vggmodel.h5') ##### Predictions CT path = '../images_for_testing/CT' img = random.choice([x for x in os.listdir(path) if os.path.isfile(os.path.join(path, x))]) print('\nPreparing to predict for a CT image: {}'.format(img)) evaluate(path + '/'+ img, ct_model) ##### Predictions Xray path = '../images_for_testing/Xray' img = random.choice([x for x in os.listdir(path) if os.path.isfile(os.path.join(path, x))]) print('\nPreparing to predict for a Xray image: {}'.format(img)) evaluate(path + '/'+ img, xray_model) ##### Predictions Ultrasound path = '../images_for_testing/Ultrasound' img = random.choice([x for x in os.listdir(path) if os.path.isfile(os.path.join(path, x))]) print('\nPreparing to predict for a ultrasound image: {}'.format(img)) evaluate(path + '/'+ img, ultrasound_model)
2,145
769
"""Problem 41 of https://projecteuler.net""" from itertools import permutations from projecteuler.inspectors import is_prime def problem_41(): """Solution to problem 41.""" # All 8 and 9 digit pandigitals are divisible by 3. perms = [int(''.join(x)) for x in permutations('1234567')] return max(x for x in perms if is_prime(x))
348
128
import re import numpy as np from tqdm import tqdm from ..decorators import print_step from multiprocessing import Pool # Compiling for optimization re_sub_1 = re.compile(r"(:(?=\s))|((?<=\s):)") re_sub_2 = re.compile(r"(\d+\.)+\d+") re_sub_3 = re.compile(r"\d{2}:\d{2}:\d{2}") re_sub_4 = re.compile(r"Mar|Apr|Dec|Jan|Feb|Nov|Oct|May|Jun|Jul|Aug|Sep") re_sub_5 = re.compile(r":?(\w+:)+") re_sub_6 = re.compile(r"\.|\(|\)|\<|\>|\/|\-|\=|\[|\]") p = re.compile(r"[^(A-Za-z)]") def remove_parameters(msg): # Removing parameters with Regex msg = re.sub(re_sub_1, "", msg) msg = re.sub(re_sub_2, "", msg) msg = re.sub(re_sub_3, "", msg) msg = re.sub(re_sub_4, "", msg) msg = re.sub(re_sub_5, "", msg) msg = re.sub(re_sub_6, " ", msg) L = msg.split() # Filtering strings that have non-letter tokens new_msg = [k for k in L if not p.search(k)] msg = " ".join(new_msg) return msg def remove_parameters_slower(msg): # Removing parameters with Regex msg = re.sub(r"(:(?=\s))|((?<=\s):)", "", msg) msg = re.sub(r"(\d+\.)+\d+", "", msg) msg = re.sub(r"\d{2}:\d{2}:\d{2}", "", msg) msg = re.sub(r"Mar|Apr|Dec|Jan|Feb|Nov|Oct|May|Jun|Jul|Aug|Sep", "", msg) msg = re.sub(r":?(\w+:)+", "", msg) msg = re.sub(r"\.|\(|\)|\<|\>|\/|\-|\=|\[|\]", " ", msg) L = msg.split() p = re.compile("[^(A-Za-z)]") # Filtering strings that have non-letter tokens new_msg = [k for k in L if not p.search(k)] msg = " ".join(new_msg) return msg @print_step def process_logs(input_source, output, process_line=None): with open(output, "w", encoding='latin-1') as f: # counting first to show progress with tqdm with open(input_source, 'r', encoding='latin-1') as IN: line_count = sum(1 for line in IN) with open(input_source, 'r', encoding='latin-1') as IN: with Pool() as pool: results = pool.imap(process_line, IN, chunksize=10000) f.writelines(tqdm(results, total=line_count)) @print_step def load_logs(params, ignore_unlabeled=False): log_path = params['logs'] unlabel_label = params['healthy_label'] x_data = [] y_data = [] label_dict = {} target_names = [] with open(log_path, 'r', encoding='latin-1') as IN: line_count = sum(1 for line in IN) with open(log_path, 'r', encoding='latin-1') as IN: for line in tqdm(IN, total=line_count): L = line.strip().split() label = L[0] if label not in label_dict: if ignore_unlabeled and label == unlabel_label: continue if label == unlabel_label: label_dict[label] = -1.0 elif label not in label_dict: label_dict[label] = len(label_dict) target_names.append(label) x_data.append(" ".join(L[1:])) y_data.append(label_dict[label]) x_data = np.array(x_data) y_data = np.array(y_data) return x_data, y_data, target_names
3,064
1,172
import os from dataset.data_config import DataConfig images_data_base_dir = os.path.abspath('../../../data/datasets_coco/') data_conf = { DataConfig.IMAGE_BASEDIR: images_data_base_dir, DataConfig.TRAIN: [ { DataConfig.NICKNAME: 'decay_train', DataConfig.ANN_PATH: os.path.join(os.path.abspath('../../../data/'), 'coco_stack_out/web_decay_600-5.json') } ] , DataConfig.EVAL: [ { DataConfig.NICKNAME: 'decay_eval', DataConfig.ANN_PATH: os.path.join(os.path.abspath('../../../data/'), 'coco_stack_out/legacy_decay-3.json') } ] } # images_data_base_dir = os.path.abspath('../../../data/datasets_coco/') data_conf_tooth_only = { DataConfig.IMAGE_BASEDIR: os.path.abspath('../../../data/datasets_coco/'), DataConfig.TRAIN: [ { DataConfig.NICKNAME: 'decay_train', DataConfig.ANN_PATH: os.path.join(os.path.abspath('../../../data/'), 'coco_stack_out/web_decay_600-6-tooth.json') } ] , DataConfig.EVAL: [ { DataConfig.NICKNAME: 'decay_eval', DataConfig.ANN_PATH: os.path.join(os.path.abspath('../../../data/'), 'coco_stack_out/legacy_decay-7-tooth.json') # } ] } data_conf_tooth_legacy_of = { DataConfig.IMAGE_BASEDIR: os.path.abspath('../../../data/datasets_coco/'), DataConfig.TRAIN: [ { DataConfig.NICKNAME: 'decay_train', DataConfig.ANN_PATH: os.path.join(os.path.abspath('../../../data/'), 'coco_stack_out/legacy_decay-7-tooth.json') } ] , DataConfig.EVAL: [ { DataConfig.NICKNAME: 'decay_eval', DataConfig.ANN_PATH: os.path.join(os.path.abspath('../../../data/'), 'coco_stack_out/legacy_decay-7-tooth.json') # } ] } data_conf_tooth_web_of = { DataConfig.IMAGE_BASEDIR: os.path.abspath('../../../data/datasets_coco/'), DataConfig.TRAIN: [ { DataConfig.NICKNAME: 'decay_train', DataConfig.ANN_PATH: os.path.join(os.path.abspath('../../../data/'), 'coco_stack_out/web_decay_600-6-tooth.json') } ] , DataConfig.EVAL: [ { DataConfig.NICKNAME: 'decay_eval', DataConfig.ANN_PATH: os.path.join(os.path.abspath('../../../data/'), 'coco_stack_out/web_decay_600-6-tooth.json') # } ] } data_conf_lesion_only = { DataConfig.IMAGE_BASEDIR: os.path.abspath('../../../data/datasets_coco/'), DataConfig.TRAIN: [ { DataConfig.NICKNAME: 'decay_train', DataConfig.ANN_PATH: os.path.join(os.path.abspath('../../../data/'), 'coco_stack_out/web_decay_600-9-lesion.json') } ] , DataConfig.EVAL: [ { DataConfig.NICKNAME: 'decay_eval', DataConfig.ANN_PATH: os.path.join(os.path.abspath('../../../data/'), 'coco_stack_out/legacy_decay-8-lesion.json') # } ] } data_conf_gingivitis_only = { DataConfig.IMAGE_BASEDIR: os.path.abspath('../../../data/datasets_coco/'), DataConfig.TRAIN: [ { DataConfig.NICKNAME: 'decay_train', DataConfig.ANN_PATH: os.path.join(os.path.abspath('../../../data/'), 'coco_stack_out/gingivitis_web_490-13-ging.json') } ] , DataConfig.EVAL: [ { DataConfig.NICKNAME: 'decay_eval', DataConfig.ANN_PATH: os.path.join(os.path.abspath('../../../data/'), 'coco_stack_out/legacy_decay-14-ging.json') # } ] }
4,494
1,444
from django.urls import reverse from projectroles.tests.test_permissions import TestProjectPermissionBase from beaconsite.tests.factories import ConsortiumFactory, SiteFactory class UsersMixin: def setUp(self): super().setUp() self.consortium = ConsortiumFactory() self.site = SiteFactory() self.good_users = [ self.superuser, ] self.bad_users = [ self.anonymous, self.user_no_roles, self.owner_as_cat.user, self.owner_as.user, self.delegate_as.user, self.contributor_as.user, self.guest_as.user, ] class TestIndexView(UsersMixin, TestProjectPermissionBase): def test_index(self): url = reverse("beaconsite:index") self.assert_response(url, self.good_users, 200) self.assert_response(url, self.bad_users, 302) class TestConsortiumViews(UsersMixin, TestProjectPermissionBase): def test_list(self): url = reverse("beaconsite:consortium-list") self.assert_response(url, self.good_users, 200) self.assert_response(url, self.bad_users, 302) def test_detail(self): url = reverse( "beaconsite:consortium-detail", kwargs={"consortium": str(self.consortium.sodar_uuid)} ) self.assert_response(url, self.good_users, 200) self.assert_response(url, self.bad_users, 302) def test_update(self): url = reverse( "beaconsite:consortium-update", kwargs={"consortium": str(self.consortium.sodar_uuid)} ) self.assert_response(url, self.good_users, 200) self.assert_response(url, self.bad_users, 302) def test_delete(self): url = reverse( "beaconsite:consortium-delete", kwargs={"consortium": str(self.consortium.sodar_uuid)} ) self.assert_response(url, self.good_users, 200) self.assert_response(url, self.bad_users, 302) class TestSiteViews(UsersMixin, TestProjectPermissionBase): def test_list(self): url = reverse("beaconsite:site-list") self.assert_response(url, self.good_users, 200) self.assert_response(url, self.bad_users, 302) def test_detail(self): url = reverse("beaconsite:site-detail", kwargs={"site": str(self.site.sodar_uuid)}) self.assert_response(url, self.good_users, 200) self.assert_response(url, self.bad_users, 302) def test_update(self): url = reverse("beaconsite:site-update", kwargs={"site": str(self.site.sodar_uuid)}) self.assert_response(url, self.good_users, 200) self.assert_response(url, self.bad_users, 302) def test_delete(self): url = reverse("beaconsite:site-delete", kwargs={"site": str(self.site.sodar_uuid)}) self.assert_response(url, self.good_users, 200) self.assert_response(url, self.bad_users, 302)
2,905
983
from django.forms import ModelForm from backend.models import Image, Image2 from django.contrib.auth.forms import UserCreationForm from django.contrib.auth.models import User from django import forms class CreateUserForm(UserCreationForm): email = forms.EmailField( widget=forms.TextInput(attrs={'class': 'form-control', }), ) username = forms.CharField( widget=forms.TextInput(attrs={'class': 'form-control', }), ) password1 = forms.CharField( widget=forms.PasswordInput(attrs={'class': 'form-control', }), ) password2 = forms.CharField( widget=forms.PasswordInput(attrs={'class': 'form-control', }), ) class Meta: model = User fields = ['username', 'email', 'password1', 'password2'] class ImageForm(ModelForm): class Meta: model = Image fields = "__all__" class ImageForm2(ModelForm): class Meta: model = Image2 fields = "__all__"
965
286
""" Created on Mar 7 2018 @author: MCC """ from ctypes import (CDLL, CFUNCTYPE, Structure, c_uint, c_int, c_longlong, POINTER, c_double, c_char, py_object, c_ulonglong, cast, c_char_p, c_byte) from enum import IntEnum from .ul_structs import DaqDeviceDescriptor, AiQueueElement, TransferStatus from .ul_structs import DaqInChanDescriptor, MemDescriptor, DaqOutChanDescriptor, EventCallbackArgs from .ul_enums import DaqEventType from sys import platform if platform.startswith('darwin'): lib = CDLL('libuldaq.dylib') else: lib = CDLL('libuldaq.so') # # Structures # class EventParams(Structure): _fields_ = [("user_data", py_object), # the user data ("user_callback", py_object), ] # # Enums # class UlInfoItem (IntEnum): """UL version information.""" VER_STR = 2000, #: UL version number IP_ADDR_STR = 2001, #: Returns the IP address of the Ethernet DAQ device NET_IFC_STR = 2002, #: Returns the name of the network interface which is used to connect to the Ethernet DAQ device class DevItemInfo (IntEnum): """Device information types""" HAS_AI_DEV = 1, #: The DAQ device has an analog input subsystem. HAS_AO_DEV = 2, #: The DAQ device has an analog output subsystem. HAS_DIO_DEV = 3, #: The DAQ device has a Digital I/O subsystem. HAS_CTR_DEV = 4, #: The DAQ device has a counter input subsystem. HAS_TMR_DEV = 5, #: The DAQ device has a timer output subsystem. HAS_DAQI_DEV = 6, #: The DAQ device has a DAQ input subsystem. HAS_DAQO_DEV = 7, #: The DAQ device has an DAQ output subsystem. DAQ_EVENT_TYPES = 8, #: Event types supported by the DAQ device MEM_REGIONS = 9, #: Memory regions supported by the DAQ device class DevConfigItem (IntEnum): """Device Configuration Items""" HAS_EXP = 1, #: The DAQ device has an expansion board attached. CONNECTION_CODE = 2, #: Connection code of the Ethernet DAQ device. MEM_UNLOCK_CODE = 3, #: Memory unlock code. RESET = 4, #: Resets the DAQ device. class AiInfoItem (IntEnum): """Use with ulAIGetInfo() to obtain AI subsystem information.""" RESOLUTION = 1, #: The A/D resolution in number of bits. NUM_CHANS = 2, #: The number of A/D channels on the specified device. NUM_CHANS_BY_MODE = 3, #: The number of A/D channels for the specified channel mode. NUM_CHANS_BY_TYPE = 4, #: The number of A/D channels for the specified channel type. CHAN_TYPES = 5, #: A bitmask of supported :func:'~ul_daq.AiChanType' values. SCAN_OPTIONS = 6, #: A bitmask of supported :func:'~ul_daq.ScanOption' values. HAS_PACER = 7, #: Paced operations are supported. NUM_DIFF_RANGES = 8, #: A number of supported :func:'~ul_daq.Range' values for differential mode operations. NUM_SE_RANGES = 9, #: A number of supported :func:'~ul_daq.Range' values for single-ended mode operations. DIFF_RANGE = 10, #: The :func:'~ul_daq.Range' for the specified differential range index. SE_RANGE = 11, #: The :func:'~ul_daq.Range' for the specified single-ended range index. TRIG_TYPES = 12, #: A bitmask of supported :func:'~ul_daq.TriggerType' values. MAX_QUEUE_LENGTH_BY_MODE = 13, #: The maximum length of the queue for the specified channel mode. QUEUE_TYPES = 14, #: A bitmask of supported :func:'~ul_daq.AiQueueType' values supported for the specified device. QUEUE_LIMITS = 15, #: A bitmask of supported :func:'~ul_daq.AiChanQueueLimitation' values. FIFO_SIZE = 16, #: FIFO size in bytes. IEPE_SUPPORTED = 17, #: Returns a zero or non-zero value to the infoValue argument. If non-zero, IEPE mode is supported. class AiInfoItemDbl (IntEnum): """Use with ulAIGetInfoDbl() to obtain AI subsystem information.""" MIN_SCAN_RATE = 1000, #: The minimum scan rate of the specified device. MAX_SCAN_RATE = 1001, #: The maximum scan rate of the specified device. MAX_THROUGHPUT = 1002, #: The maximum throughput in samples per second of the specified device. MAX_BURST_RATE = 1003, #: The maximum scan rate in samples per second when using :func:'~ul_daq.ScanOption.SO_BURSTIO' mode. MAX_BURST_THROUGHPUT = 1004, #: The maximum throughput in samples per second when using :func:'~ul_daq.ScanOption.SO_BURSTIO' mode. class AiConfigItem (IntEnum): """Use with ulSetConfig() and ulGetConfig() to perform configuration operations on the AI subsystem.""" CHAN_TYPE = 1, #: The channel type of the specified channel. Set with :func:'~ul_daq.AiChanType'. CHAN_TC_TYPE = 2, #: The thermocouple type of the specified channel. Set with :func:'~ul_daq.TcType'. CHAN_TEMP_UNIT = 3, #: The temperature unit of the specified channel. Set with :func:'~ul_daq.TempUnit'. TEMP_UNIT = 4, #: The temperature unit for the specified device. Set with :func:'~ul_daq.AiChanType'. ADC_TIMING_MODE = 5, #: The timing mode. Set with :func:'~ul_daq.AdcTimingMode'. AUTO_ZERO_MODE = 6, #: The auto zero mode. Set with :func:'~ul_daq.AutoZeroMode'. CAL_DATE = 7, #: The date when the device was calibrated last. #: The IEPE current excitation mode for the specified channel. Set with :func:'~ul_daq.IepeMode'. CHAN_IEPE_MODE = 8, CHAN_COUPLING_MODE = 9, #: The coupling mode for the specified device. Set with :func:'~ul_daq.CouplingMode'. CHAN_SENSOR_CONNECTION_TYPE = 10, #: The connection type of the sensor connected to the specified channel. CHAN_OTD_MODE = 11, #: The open thermocouple detection mode for the specified channel. Set with :func:'~ul_daq.OtdMode'. OTD_MODE = 12, #: The open thermocouple detection mode. CAL_TABLE_TYPE = 13, #: The calibration table type. REJECT_FREQ_TYPE = 14, #: The rejection frequency type. #: The date when the expansion board was calibrated last in UNIX Epoch time. #: Set index to 0 for the factory calibration date, or 1 for the field #: calibration date. If the value read is not a valid date or the index is #: invalid, 0 (Unix Epoch) is returned. EXP_CAL_DATE = 15, class AiConfigItemDbl (IntEnum): """Use with ulSetConfigDbl() and ulGetConfigDbl() to perform configuration operations on the AI subsystem. """ CHAN_SLOPE = 1000, #: The custom slope of the specified channel. CHAN_OFFSET = 1001, #: The custom offset of the specified channel. CHAN_SENSOR_SENSIVITY = 1002, #: The sensitivity of the sensor connected to the specified channel. CHAN_DATA_RATE = 1003, #: The data rate of the specified channel. class AiConfigItemStr(IntEnum): #: Calibration date CAL_DATE = 2000, #: The channel coefficients used for the configured sensor. CHAN_COEFS = 2001, #: Returns the calibration date of expansion board. Set index to 0 for the #: factory calibration date, or 1 for the field calibration date. #: If the value read is not a valid date or the index is invalid, #: Unix Epoch is returned. EXP_CAL_DATE_STR = 2002, class DioInfoItem (IntEnum): """Use with ulDIOGetInfo() to obtain information about the DIO subsystem.""" NUM_PORTS = 1, #: The number of ports on the specified device. PORT_TYPE = 2, #: The port type for the specified port index. PORT_IO_TYPE = 3, #: The #DigitalPortIoType for the specified port index. NUM_BITS = 4, #: The number of bits on the port specified by the port index. HAS_PACER = 5, #: Paced operations are supported for the specified digital direction. SCAN_OPTIONS = 6, #: A bit mask of supported :func:'~ul_daq.ScanOption' values for the specified digital direction. TRIG_TYPES = 7, #: A bitmask of supported :func:'~ul_daq.TriggerType' values for the specified digital direction. FIFO_SIZE = 8, #: FIFO size in bytes for the specified digital direction. class DioInfoItemDbl (IntEnum): """Use with ulDIOGetInfoDbl() to obtain information about the DIO subsystem.""" MIN_SCAN_RATE = 1000, #: The minimum scan rate of the specified device. MAX_SCAN_RATE = 1001, #: The maximum scan rate of the specified device. MAX_THROUGHPUT = 1002, #: The maximum scanning throughput of the specified device. class DioConfigItem (IntEnum): """ Use with ulDIOGetConfig() to obtain information about the DIO subsystem configuration. """ #: The port direction. Set with :func:'~ul_daq.DigitalDirection'. PORT_DIRECTION_MASK = 1, #: Writes a value to the specified port number. This allows writing a value when the port is in #: input mode so that when the port is switched to output mode, the state of the bits is known. PORT_INITIAL_OUTPUT_VAL = 2, #: Returns or writes the low-pass filter setting. A 0 indicates that the filter is disabled for the #: corresponding bit. PORT_ISO_FILTER_MASK = 3, #: Returns the port logic. A 0 indicates non-invert mode, and a non-zero value indicates output inverted. PORT_LOGIC = 4, class DaqIInfoItem (IntEnum): """Use with ulDaqIGetInfo() to obtain DAQ input subsystem information.""" CHAN_TYPES = 1, #: A bitmask of supported :func:'~ul_daq.DaqInChanType' values. SCAN_OPTIONS = 2, #: A bit mask of supported :func:'~ul_daq.ScanOption' values. TRIG_TYPES = 3, #: A bitmask of supported :func:'~ul_daq.TriggerType' values. FIFO_SIZE = 4, #: FIFO size in bytes. class DaqIInfoItemDbl (IntEnum): """Use with ulDaqIGetInfoDbl() to obtain information about the counter subsystem.""" MIN_SCAN_RATE = 1000, #: The minimum scan rate in samples per second. MAX_SCAN_RATE = 1001, #: The maximum scan rate of the specified device. MAX_THROUGHPUT = 1002, #: The maximum throughput of the specified device. class AoConfigItem(IntEnum): SYNC_MODE = 1, #: The sync mode. Set with AOutSyncMode. CHAN_SENSE_MODE = 2, #: The channel sense mode. Set with AOutSenseMode. class AoInfoItem (IntEnum): """Use with ulAOGetInfo() to obtain information about the analog output subsystem.""" RESOLUTION = 1, #: The D/A resolution. NUM_CHANS = 2, #: The number of D/A channels on the specified device. SCAN_OPTIONS = 3, #: A bit mask of supported :func:'~ul_daq.ScanOption; values. HAS_PACER = 4, #: Paced operations are supported. NUM_RANGES = 5, #: The number of supported :func:'~ul_daq.Range' values for D/A operations. RANGE = 6, #: The :func:'~ul_daq.Range' for the specified range index. TRIG_TYPES = 7, #: A bitmask of supported :func:'~ul_daq.TriggerType' values. FIFO_SIZE = 8, #: FIFO size in bytes. class AoInfoItemDbl (IntEnum): """Use with ulAOGetInfoDbl() to obtain information about the Analog output subsystem.""" MIN_SCAN_RATE = 1000, #: The minimum scan rate of the specified device. MAX_SCAN_RATE = 1001, #: The maximum scan rate of the specified device. MAX_THROUGHPUT = 1002, #: The maximum scanning throughput of the specified device. class DaqoInfoItem (IntEnum): """Use with ulDaqOGetInfo() to obtain information about the DAQ output subsystem.""" CHAN_TYPES = 1, #: A bit mask of supported :class:`DaqOutChanType` values. SCAN_OPTIONS = 2, #: A bit mask of supported :class:`ScanOption` values. TRIG_TYPES = 3, #: A bit mask of supported :class:`TriggerType` values. FIFO_SIZE = 4, #: FIFO size in bytes. class DaqoInfoItemDbl (IntEnum): """Use with ulDaqOGetInfoDbl() to obtain information about the DAQ output subsystem.""" MIN_SCAN_RATE = 1000, #: The minimum scan rate in samples per second. MAX_SCAN_RATE = 1001, #: The maximum scan rate of the specified device. MAX_THROUGHPUT = 1002, #: The maximum throughput of the specified device. class CtrInfoItem (IntEnum): """Use with ulCtrGetInfo() to obtain information about the counter subsystem.""" NUM_CTRS = 1, #: The number of counter channels on the specified device. MEASUREMENT_TYPES = 2, #: A bit mask of supported :class:`CounterMeasurementType` values. MEASUREMENT_MODES = 3, #: A bit mask of supported :class:`CounterMeasurementType` values. REGISTER_TYPES = 4, #: A bit mask of supported :class:`CounterRegisterType` values. RESOLUTION = 5, #: The resolution of the specified counter channel. HAS_PACER = 6, #: Paced operations are supported. SCAN_OPTIONS = 7, #: A bit mask of supported :class:`ScanOption` values. TRIG_TYPES = 8, #: A bit mask of supported :class:`TriggerType` values. FIFO_SIZE = 9, #: FIFO size in bytes. class CtrInfoItemDbl (IntEnum): """Use with ulCtrGetInfoDbl() to obtain information about the counter subsystem.""" MIN_SCAN_RATE = 1000, #: The minimum scan rate in samples per second. MAX_SCAN_RATE = 1001, #: The maximum scan rate of the specified device. MAX_THROUGHPUT = 1002, #: The maximum throughput of the specified device. class CtrConfigItem (IntEnum): """Use with ulCtrSetConfig() and ulCtrGetConfig() to configure the Ctr subsystem.""" REG = 1, #: The counter(s) configuration register. class TmrInfoItem (IntEnum): """Use with ulTmrGetInfo() to obtain information about the timer subsystem.""" NUM_TMRS = 1, #: The :class:`TimerType` of the specified timer index. TYPE = 2, #: The number of bits on the port specified by the port index. class TmrInfoItemDbl (IntEnum): """Use with ulTmrGetInfoDbl() to obtain information about the timer subsystem.""" MIN_FREQ = 1000, #: The minimum frequency of the specified device. MAX_FREQ = 1001, #: The maximum frequency of the specified device. # Prototypes for callbacks InterfaceCallbackProcType = CFUNCTYPE(None, c_longlong, c_uint, c_ulonglong, POINTER(EventParams)) def interface_event_callback_function(handle, event_type, event_data, event_params): # type: (int, DaqEventType, py_object, py_object) -> None """Internal function used for handling event callbacks.""" event_parameters = cast(event_params, POINTER(EventParams)).contents user_data = event_parameters.user_data cb = event_parameters.user_callback cb(EventCallbackArgs(event_type, event_data, user_data)) return # Prototypes for DAQ Device lib.ulDevGetConfigStr.argtypes = (c_longlong, c_uint, c_uint, POINTER(c_char), POINTER(c_uint)) lib.ulDevGetConfig.argtypes = (c_longlong, c_uint, c_uint, POINTER(c_longlong)) lib.ulGetDaqDeviceDescriptor.argtypes = (c_longlong, POINTER(DaqDeviceDescriptor)) lib.ulDevGetInfo.argtypes = (c_longlong, c_uint, c_uint, POINTER(c_longlong)) lib.ulGetDaqDeviceInventory.argtypes = (c_uint, POINTER(DaqDeviceDescriptor), POINTER(c_uint)) lib.ulConnectDaqDevice.argtypes = (c_longlong,) lib.ulEnableEvent.argtypes = (c_longlong, c_uint, c_ulonglong, InterfaceCallbackProcType, POINTER(EventParams)) lib.ulDisableEvent.argtypes = (c_longlong, c_uint) lib.ulMemRead.argtypes = (c_longlong, c_uint, c_uint, POINTER(c_byte), c_uint) lib.ulMemWrite.argtypes = (c_longlong, c_uint, c_uint, POINTER(c_byte), c_uint) lib.ulCreateDaqDevice.argtypes = (DaqDeviceDescriptor,) lib.ulReleaseDaqDevice.argtypes = (c_longlong,) lib.ulIsDaqDeviceConnected.argtypes = (c_longlong, POINTER(c_int)) lib.ulDisconnectDaqDevice.argtypes = (c_longlong,) lib.ulFlashLed.argtypes = (c_longlong, c_int) lib.ulGetInfoStr.argtypes = (c_uint, c_uint, POINTER(c_char), POINTER(c_uint)) lib.ulSetConfig.argtypes = (c_uint, c_uint, c_longlong) lib.ulGetConfig.argtypes = (c_uint, c_uint, POINTER(c_longlong)) lib.ulGetNetDaqDeviceDescriptor.argtypes = (c_char_p, c_uint, c_char_p, POINTER(DaqDeviceDescriptor), c_double) lib.ulDaqDeviceConnectionCode.argtypes = (c_uint, c_longlong) # Prototypes for the analog input subsystem lib.ulAIn.argtypes = (c_longlong, c_int, c_uint, c_uint, c_uint, POINTER(c_double)) lib.ulAInScan.argtypes = (c_longlong, c_int, c_int, c_uint, c_uint, c_int, POINTER(c_double), c_uint, c_uint, POINTER(c_double)) lib.ulAInScanWait.argtypes = (c_longlong, c_uint, c_longlong, c_double) lib.ulAInLoadQueue.argtypes = (c_longlong, POINTER(AiQueueElement), c_uint) lib.ulAInSetTrigger.argtypes = (c_longlong, c_uint, c_int, c_double, c_double, c_uint) lib.ulAInScanStatus.argtypes = (c_longlong, POINTER(c_uint), POINTER(TransferStatus)) lib.ulAISetConfig.argtypes = (c_longlong, c_uint, c_uint, c_longlong) lib.ulAIGetConfig.argtypes = (c_longlong, c_uint, c_uint, POINTER(c_longlong)) lib.ulAISetConfigDbl.argtypes = (c_longlong, c_uint, c_uint, c_double) lib.ulAIGetConfigDbl.argtypes = (c_longlong, c_uint, c_uint, POINTER(c_double)) lib.ulAIGetInfo.argtypes = (c_longlong, c_uint, c_uint, POINTER(c_longlong)) lib.ulAIGetInfoDbl.argtypes = (c_longlong, c_uint, c_uint, POINTER(c_double)) lib.ulAInScanStop.argtypes = (c_longlong,) lib.ulAIGetConfigStr.argtypes = (c_longlong, c_uint, c_uint, POINTER(c_char), POINTER(c_uint)) lib.ulTIn.argtypes = (c_longlong, c_int, c_uint, c_uint, POINTER(c_double)) lib.ulTInArray.argtypes = (c_longlong, c_int, c_int, c_uint, c_uint, POINTER(c_double)) # Prototypes for the analog output subsystem lib.ulAOut.argtypes = (c_longlong, c_int, c_uint, c_uint, c_double) lib.ulAOutScan.argtypes = (c_longlong, c_int, c_int, c_uint, c_int, POINTER(c_double), c_uint, c_uint, POINTER(c_double)) lib.ulAOutScanWait.argtypes = (c_longlong, c_uint, c_longlong, c_double) lib.ulAOutScanStatus.argtypes = (c_longlong, POINTER(c_uint), POINTER(TransferStatus)) lib.ulAOutScanStop.argtypes = (c_longlong,) lib.ulAOutSetTrigger.argtypes = (c_longlong, c_uint, c_int, c_double, c_double, c_uint) lib.ulAOGetInfo.argtypes = (c_longlong, c_uint, c_uint, POINTER(c_longlong)) lib.ulAOGetInfoDbl.argtypes = (c_longlong, c_uint, c_uint, POINTER(c_double)) lib.ulAOutArray.argtypes = (c_longlong, c_int, c_int, POINTER(c_uint), c_uint, POINTER(c_double)) # Prototypes for the DAQ input subsystem lib.ulDaqInSetTrigger.argtypes = (c_longlong, c_uint, DaqInChanDescriptor, c_double, c_double, c_uint) lib.ulDaqInScan.argtypes = (c_longlong, POINTER(DaqInChanDescriptor), c_int, c_int, POINTER(c_double), c_uint, c_uint, POINTER(c_double)) lib.ulDaqInScanStatus.argtypes = (c_longlong, POINTER(c_uint), POINTER(TransferStatus)) lib.ulDaqInScanStop.argtypes = (c_longlong,) lib.ulDaqInScanWait.argtypes = (c_longlong, c_uint, c_longlong, c_double) lib.ulDaqIGetInfo.argtypes = (c_longlong, c_uint, c_uint, POINTER(c_longlong)) lib.ulDaqIGetInfoDbl.argtypes = (c_longlong, c_uint, c_uint, POINTER(c_double)) # Prototypes for DIO subsystem lib.ulDIn.argtypes = (c_longlong, c_uint, POINTER(c_ulonglong)) lib.ulDOut.argtypes = (c_longlong, c_uint, c_ulonglong) lib.ulDBitIn.argtypes = (c_longlong, c_uint, c_int, POINTER(c_uint)) lib.ulDBitOut.argtypes = (c_longlong, c_uint, c_int, c_uint) lib.ulDInScan.argtypes = (c_longlong, c_uint, c_uint, c_int, POINTER(c_double), c_uint, c_uint, POINTER(c_ulonglong)) lib.ulDOutScan.argtypes = (c_longlong, c_uint, c_uint, c_int, POINTER(c_double), c_uint, c_uint, POINTER(c_ulonglong)) lib.ulDInScanStatus.argtypes = (c_longlong, POINTER(c_uint), POINTER(TransferStatus)) lib.ulDOutScanStatus.argtypes = (c_longlong, POINTER(c_uint), POINTER(TransferStatus)) lib.ulDOutScanStop.argtypes = (c_longlong,) lib.ulDInScanStop.argtypes = (c_longlong,) lib.ulDInScanWait.argtypes = (c_longlong, c_uint, c_longlong, c_double) lib.ulDOutScanWait.argtypes = (c_longlong, c_uint, c_longlong, c_double) lib.ulDInSetTrigger.argtypes = (c_longlong, c_uint, c_int, c_double, c_double, c_uint) lib.ulDOutSetTrigger.argtypes = (c_longlong, c_uint, c_int, c_double, c_double, c_uint) lib.ulDConfigPort.argtypes = (c_longlong, c_uint, c_uint) lib.ulDConfigBit.argtypes = (c_longlong, c_uint, c_int, c_uint) lib.ulDIOGetInfo.argtypes = (c_longlong, c_uint, c_uint, POINTER(c_longlong)) lib.ulDIOGetInfoDbl.argtypes = (c_longlong, c_uint, c_uint, POINTER(c_double)) lib.ulDIOGetConfig.argtypes = (c_longlong, c_uint, c_uint, POINTER(c_longlong)) lib.ulDIOSetConfig.argtypes = (c_longlong, c_uint, c_uint, c_longlong) lib.ulDInArray.argtypes = (c_longlong, c_uint, c_uint, POINTER(c_ulonglong)) lib.ulDOutArray.argtypes = (c_longlong, c_uint, c_uint, POINTER(c_ulonglong)) # prototypes for DAQ output subsystem lib.ulDaqOutScan.argtypes = (c_longlong, POINTER(DaqOutChanDescriptor), c_int, c_int, POINTER(c_double), c_uint, c_uint, POINTER(c_double)) lib.ulDaqOutScanWait.argtypes = (c_longlong, c_uint, c_longlong, c_double) lib.ulDaqOutScanStatus.argtypes = (c_longlong, POINTER(c_uint), POINTER(TransferStatus)) lib.ulDaqOutScanStop.argtypes = (c_longlong,) lib.ulDaqOutSetTrigger.argtypes = (c_longlong, c_uint, DaqInChanDescriptor, c_double, c_double, c_uint) lib.ulDaqOGetInfo.argtypes = (c_longlong, c_uint, c_uint, POINTER(c_longlong)) lib.ulDaqOGetInfoDbl.argtypes = (c_longlong, c_uint, c_uint, POINTER(c_double)) # prototypes for counter subsystem lib.ulCIn.argtypes = (c_longlong, c_int, POINTER(c_ulonglong)) lib.ulCRead.argtypes = (c_longlong, c_int, c_uint, POINTER(c_ulonglong)) lib.ulCLoad.argtypes = (c_longlong, c_int, c_uint, c_ulonglong) lib.ulCClear.argtypes = (c_longlong, c_int) lib.ulCConfigScan.argtypes = (c_longlong, c_int, c_uint, c_uint, c_uint, c_uint, c_uint, c_uint, c_uint) lib.ulCInScan.argtypes = (c_longlong, c_int, c_int, c_int, POINTER(c_double), c_uint, c_uint, POINTER(c_ulonglong)) lib.ulCInSetTrigger.argtypes = (c_longlong, c_uint, c_int, c_double, c_double, c_uint) lib.ulCInScanStatus.argtypes = (c_longlong, POINTER(c_uint), POINTER(TransferStatus)) lib.ulCInScanStop.argtypes = (c_longlong,) lib.ulCInScanWait.argtypes = (c_longlong, c_uint, c_longlong, c_double) lib.ulCtrGetInfo.argtypes = (c_longlong, c_uint, c_uint, POINTER(c_longlong)) lib.ulCtrGetInfoDbl.argtypes = (c_longlong, c_uint, c_uint, POINTER(c_double)) lib.ulCtrSetConfig.argtypes = (c_longlong, c_uint, c_uint, c_longlong) lib.ulCtrGetConfig.argtypes = (c_longlong, c_uint, c_uint, POINTER(c_longlong)) # Prototypes for the timer subsystem lib.ulTmrPulseOutStart.argtypes = (c_longlong, c_int, POINTER(c_double), POINTER(c_double), c_ulonglong, POINTER(c_double), c_uint, c_uint) lib.ulTmrPulseOutStop.argtypes = (c_longlong, c_int) lib.ulTmrPulseOutStatus.argtypes = (c_longlong, c_int, POINTER(c_uint)) lib.ulTmrSetTrigger.argtypes = (c_longlong, c_uint, c_int, c_double, c_double, c_uint) lib.ulTmrGetInfo.argtypes = (c_longlong, c_uint, c_uint, POINTER(c_longlong)) lib.ulTmrGetInfoDbl.argtypes = (c_longlong, c_uint, c_uint, POINTER(c_double)) # Other Prototypes lib.ulGetErrMsg.argtypes = (c_uint, POINTER(c_char)) lib.ulDevGetInfo.argtypes = (c_longlong, c_uint, c_uint, POINTER(c_longlong)) lib.ulMemGetInfo.argtypes = (c_longlong, c_uint, POINTER(MemDescriptor))
22,851
8,589
""" Given a Singly Linked List which has data members sorted in ascending order. Construct a Balanced Binary Search Tree which has same data members as the given Linked List. """ from typing import Optional from binary_tree_node import Node # type: ignore from tree_traversal import inorder # type: ignore class LLNode: def __init__(self, data: int): self.data = data self.next: Optional[LLNode] = None def ll_size(head: Optional[LLNode]) -> int: temp = head count = 0 while temp: temp = temp.next count += 1 return count def sorted_ll_to_bst(head: Optional[LLNode]) -> Optional[Node]: def construct(length: int) -> Optional[Node]: nonlocal head if head is None or length == 0: return None left = construct(length // 2) root = Node(head.data) head = head.next root.left = left root.right = construct(length - length // 2 - 1) return root return construct(ll_size(head)) if __name__ == "__main__": head = LLNode(1) head.next = LLNode(2) head.next.next = LLNode(3) inorder(sorted_ll_to_bst(head)) print() head = LLNode(1) head.next = LLNode(2) head.next.next = LLNode(3) head.next.next.next = LLNode(4) head.next.next.next.next = LLNode(5) head.next.next.next.next.next = LLNode(6) head.next.next.next.next.next.next = LLNode(7) inorder(sorted_ll_to_bst(head)) print()
1,478
493
import numpy as np import matplotlib.pyplot as plt from modules.conversions import enu2uvw data = np.load("uv-array.npy") e = data[0,:].transpose() n = data[1,:].transpose() uvarray = [] for i in range(120): u,v = enu2uvw( wavelength=1.690, hour_angle=i/30, declination=0, ref_declination=-30, ref_hour_angle=0, e=e, n=n) # np.save("uv-coverage.npy",u) uvarray.append((u,v)) np.save("uv-coverage.npy",uvarray)
556
215
# Author: Bishal Sarang import json import os import pickle import time import bs4 import colorama import requests from colorama import Back, Fore from ebooklib import epub from selenium import webdriver from selenium.webdriver.chrome.options import Options from selenium.webdriver.common.by import By from selenium.webdriver.support import expected_conditions as EC from selenium.webdriver.support.ui import WebDriverWait from utils import * import epub_writer # Initialize Colorama colorama.init(autoreset=True) options = Options() options.headless = True # Disable Warning, Error and Info logs # Show only fatal errors options.add_argument("--log-level=3") driver = webdriver.Chrome(options=options) # Get upto which problem it is already scraped from track.conf file completed_upto = read_tracker("track.conf") # Load chapters list that stores chapter info # Store chapter info with open('chapters.pickle', 'rb') as f: chapters = pickle.load(f) def download(problem_num, url, title, solution_slug): print( Fore.BLACK + Back.CYAN + f"Fetching problem num " + Back.YELLOW + f" {problem_num} " + Back.CYAN + " with url " + Back.YELLOW + f" {url} ") n = len(title) try: driver.get(url) # Wait 20 secs or until div with id initial-loading disappears element = WebDriverWait(driver, 20).until( EC.invisibility_of_element_located((By.ID, "initial-loading")) ) # Get current tab page source html = driver.page_source soup = bs4.BeautifulSoup(html, "html.parser") # Construct HTML title_decorator = '*' * n problem_title_html = title_decorator + f'<div id="title">{title}</div>' + '\n' + title_decorator problem_html = problem_title_html + str( soup.find("div", {"class": "content__u3I1 question-content__JfgR"})) + '<br><br><hr><br>' # Append Contents to a HTML file with open("out.html", "ab") as f: f.write(problem_html.encode(encoding="utf-8")) # create and append chapters to construct an epub c = epub.EpubHtml(title=title, file_name=f'chap_{problem_num}.xhtml', lang='hr') c.content = problem_html chapters.append(c) # Write List of chapters to pickle file dump_chapters_to_file(chapters) # Update upto which the problem is downloaded update_tracker('track.conf', problem_num) print( Fore.BLACK + Back.GREEN + f"Writing problem num " + Back.YELLOW + f" {problem_num} " + Back.GREEN + " with url " + Back.YELLOW + f" {url} ") print(Fore.BLACK + Back.GREEN + " successfull ") # print(f"Writing problem num {problem_num} with url {url} successfull") except Exception as e: print(Back.RED + f" Failed Writing!! {e} ") driver.quit() def main(): MAXIMUM_NUMBER_OF_PROBLEMS_PER_INSTANCE = int(os.environ.get("MAXIMUM_NUMBER_OF_PROBLEMS", 400)) SLEEP_TIME_PER_PROBLEM_IN_SECOND = int(os.environ.get("SLEEP_TIME_PER_PROBLEM_IN_SECOND", 5)) # Leetcode API URL to get json of problems on algorithms categories ALGORITHMS_ENDPOINT_URL = "https://leetcode.com/api/problems/algorithms/" # Problem URL is of format ALGORITHMS_BASE_URL + question__title_slug # If question__title_slug = "two-sum" then URL is https://leetcode.com/problems/two-sum ALGORITHMS_BASE_URL = "https://leetcode.com/problems/" # Load JSON from API algorithms_problems_json = requests.get(ALGORITHMS_ENDPOINT_URL).content algorithms_problems_json = json.loads(algorithms_problems_json) # List to store question_title_slug links = [] for child in algorithms_problems_json["stat_status_pairs"]: # Only process free problems if not child["paid_only"]: question__title_slug = child["stat"]["question__title_slug"] question__article__slug = child["stat"]["question__article__slug"] question__title = child["stat"]["question__title"] frontend_question_id = child["stat"]["frontend_question_id"] difficulty = child["difficulty"]["level"] links.append( (question__title_slug, difficulty, frontend_question_id, question__title, question__article__slug)) has_new_problems = (completed_upto != len(links) - 1) if has_new_problems: styles_str = "<style>pre{white-space:pre-wrap;background:#f7f9fa;padding:10px 15px;color:#263238;line-height:1.6;font-size:13px;border-radius:3px margin-top: 0;margin-bottom:1em;overflow:auto}b,strong{font-weight:bolder}#title{font-size:16px;color:#212121;font-weight:600;margin-bottom:10px}hr{height:10px;border:0;box-shadow:0 10px 10px -10px #8c8b8b inset}</style>" with open("out.html", "ab") as f: f.write(styles_str.encode(encoding="utf-8")) # Sort by difficulty follwed by problem id in ascending order links = sorted(links, key=lambda x: (x[1], x[2])) downloaded_now = 0 try: for i in range(completed_upto + 1, len(links)): question__title_slug, _, frontend_question_id, question__title, question__article__slug = links[i] url = ALGORITHMS_BASE_URL + question__title_slug title = f"{frontend_question_id}. {question__title}" # Download each file as html and write chapter to chapters.pickle download(i, url, title, question__article__slug) downloaded_now += 1 if downloaded_now == MAXIMUM_NUMBER_OF_PROBLEMS_PER_INSTANCE: break # Sleep for 5 secs for each problem and 2 mins after every 30 problems if i % 30 == 0: print(f"Sleeping 120 secs\n") time.sleep(120) else: print(f"Sleeping {SLEEP_TIME_PER_PROBLEM_IN_SECOND} secs\n") time.sleep(SLEEP_TIME_PER_PROBLEM_IN_SECOND) finally: # Close the browser after download driver.quit() try: if has_new_problems: epub_writer.write("Leetcode Questions.epub", "Leetcode Questions", "Anonymous", chapters) print(Back.GREEN + "All operations successful") else: print(Back.GREEN + "No new problems found. Exiting") except Exception as e: print(Back.RED + f"Error making epub {e}") if __name__ == "__main__": main()
6,540
2,223
# Adapted from Magenta console commands import os from magenta.models.arbitrary_image_stylization import arbitrary_image_stylization_build_model as build_model from magenta.models.image_stylization import image_utils import numpy as np import tensorflow.compat.v1 as tf import tf_slim as slim class Magenta_Model(): def __init__(self, checkpoint, content_square_crop=False, style_square_crop=False, style_image_size=256, content_image_size=256): tf.disable_v2_behavior() tf.Graph().as_default() sess = tf.Session() # Defines place holder for the style image. self.style_img_ph = tf.placeholder(tf.float32, shape=[None, None, 3]) if style_square_crop: style_img_preprocessed = image_utils.center_crop_resize_image( style_img_ph, style_image_size) else: style_img_preprocessed = image_utils.resize_image(self.style_img_ph, style_image_size) # Defines place holder for the content image. content_img_ph = tf.placeholder(tf.float32, shape=[None, None, 3]) if content_square_crop: content_img_preprocessed = image_utils.center_crop_resize_image( content_img_ph, content_image_size) else: content_img_preprocessed = image_utils.resize_image( content_img_ph, content_image_size) # Defines the model. stylized_images, _, _, bottleneck_feat = build_model.build_model( content_img_preprocessed, style_img_preprocessed, trainable=False, is_training=False, inception_end_point='Mixed_6e', style_prediction_bottleneck=100, adds_losses=False) checkpoint = tf.train.latest_checkpoint(checkpoint) init_fn = slim.assign_from_checkpoint_fn(checkpoint, slim.get_variables_to_restore()) sess.run([tf.local_variables_initializer()]) init_fn(sess) self.sess = sess self.stylized_images = stylized_images self.content_img_preprocessed = content_img_preprocessed self.style_img_preprocessed = style_img_preprocessed self.content_img_ph = content_img_ph self.bottleneck_feat = bottleneck_feat def process_data(self, style_images_paths, content_images_paths): # Gets the list of the input images. style_img_list = tf.gfile.Glob(style_images_paths) content_img_list = tf.gfile.Glob(content_images_paths) for content_i, content_img_path in enumerate(content_img_list): content_img_np = image_utils.load_np_image_uint8(content_img_path)[:, :, :3] content_img_name = os.path.basename(content_img_path)[:-4] # Saves preprocessed content image. inp_img_croped_resized_np = self.sess.run( self.content_img_preprocessed, feed_dict={ self.content_img_ph: content_img_np}) # Computes bottleneck features of the style prediction network for the # identity transform. identity_params = self.sess.run( self.bottleneck_feat, feed_dict={self.style_img_ph: content_img_np}) for style_i, style_img_path in enumerate(style_img_list): style_img_name = os.path.basename(style_img_path)[:-4] style_image_np = image_utils.load_np_image_uint8(style_img_path)[:, :, :3] self.content_img_np = content_img_np self.style_image_np = style_image_np self.identity_params = identity_params self.style_img_name = style_img_name self.content_img_name = content_img_name def run(self, output_dir, interpolation_weights): style_params = self.sess.run( self.bottleneck_feat, feed_dict={self.style_img_ph: self.style_image_np}) for interp_i, wi in enumerate(interpolation_weights): stylized_image_res = self.sess.run( self.stylized_images, feed_dict={ self.bottleneck_feat: self.identity_params * (1 - wi) + style_params * wi, self.content_img_ph: self.content_img_np }) # Saves stylized image. image_utils.save_np_image( stylized_image_res, os.path.join(output_dir, '%s_stylized_%s_%d.jpg' % \ (self.content_img_name, self.style_img_name, interp_i))) magenta_model = Magenta_Model("/mnt/disks/ssd_disk/final/models/", content_square_crop=False, style_square_crop=False, style_image_size=256, content_image_size=256) magenta_model.process_data(style_images_paths="/mnt/disks/ssd_disk/final/data/content_images/*", content_images_paths="/mnt/disks/ssd_disk/final/data/content_images/*") magenta_model.run("/mnt/disks/ssd_disk/final/tmp/", [0., 1.])
4,985
1,606