index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
994,800 | c088123e9e001371e3b9094c9bf701dcccfd8cec | import re, sys
import pandas as pd
import numpy as np
class gaf:
"""A simple class for reading or generating gaf files"""
annotations = pd.DataFrame()
def read_gaf(self,infile):
obofile = open(infile,"r")
lines = obofile.readlines()
if self.check_gaf_version(lines):
self.annotations = self.get_annotations_from_gaf(lines)
self.annotations = self.annotations.reindex_axis(self.gaf_2_x_fields,1)
else:
sys.exit("The input is not a gaf file.\nPlease check the input file for errors")
def check_gaf_version(self,lines):
comment_lines = [s for s in lines[1:20] if "!" in s]
gaf_line = map(re.compile(r'!gaf-version\s*:\s*2\..').search,comment_lines)
if len(gaf_line)>0:
return True
else:
return False
def get_annotations_from_gaf(self,lines):
tmp_annotations = []
col_ids = self.gaf_2_x_fields
for index,line in enumerate(lines):
if "!" in line:
continue
else:
line = line.strip("\n")
col_data = line.split("\t")
col_dict = dict(zip(col_ids,col_data))
tmp_annotations.append(col_dict)
if index % 100 == 0:
print "Processed %s lines" % (index)
gaf_df = pd.DataFrame.from_dict(tmp_annotations,"columns")
return gaf_df
def write_gaf(self,outfile="test.out",col_name="with",th=0):
if self.annotations.shape[1] < 17:
sys.stderr.write("Number of columns do not match gaf 2.0 format\n")
all_cols = set(self.gaf_2_x_fields)
exist_cols = set(self.annotations.columns)
need_cols = all_cols.difference(exist_cols)
", ".join(need_cols)
sys.stderr.write("%s\n" % ", ".join(need_cols))
return None
gaf_out = open(outfile,"w+")
gaf_out.write("!gaf-version:2.0\n")
gaf_out.write("!%s\n" % ("\t".join(self.gaf_2_x_fields)))
gaf_out.close()
self.annotations[self.annotations[col_name]>th].to_csv(outfile,sep="\t",header=False, index=False,mode="a")
return True
def write_gaf_head(self,outfile="test.out"):
gaf_out = open(outfile,"w")
gaf_out.write("!gaf-version:2.0\n")
gaf_out.write("!%s\n" % ("\t".join(self.gaf_2_x_fields)))
gaf_out.close()
def clear_annotations(self):
self.annotations = pd.DataFrame()
def add_annotation(self,in_gaf_2_x):
in_gaf_2_x_df = pd.DataFrame.from_dict([in_gaf_2_x],"columns")
self.annotations = self.annotations.append(in_gaf_2_x_df)
def add_col_all(self,col_name,value):
num_annot = len(self.annotations)
if(num_annot==0):
sys.stderr.write("No annotations, please add some valid data before filling in other columns\n")
valid_cols = ", ".join([tag for tag,value in self.gaf_2_x_default.iteritems() if value == 2])
sys.stderr.write("%s\n" % valid_cols)
else:
col_data = np.repeat(value,num_annot)
col_series = pd.Series(col_data,name=col_name)
self.add_col(col_series)
def add_col(self,col):
if len(self.annotations) == 0:
self.annotations = pd.DataFrame(col)
else:
if col.name in self.annotations:
self.annotations[col.name] = col
else:
self.annotations = pd.concat([self.annotations,col],axis=1)
def init_annotations(self,cols):
if len(self.annotations) > 0:
sys.stderr.write("This GAF has already been initialized\n")
else:
db_object_id = pd.Series(cols["gene"],name="db_object_id")
db_object_symbol = pd.Series(cols["gene"],name="db_object_symbol")
term_accession = pd.Series(cols["go"],name="term_accession")
self.annotations = pd.concat([db_object_id,db_object_symbol,term_accession],axis=1)
def add_aspect(self,obo):
if len(self.annotations) == 0:
sys.stderr.write("This GAF has not been initialized\n")
else:
go_list = self.annotations["term_accession"]
aspects = {"biological_process":"P","molecular_function":"F","cellular_component":"C"}
go_aspect = {term['id']:aspects[term['namespace']] for term in obo.terms}
aspect = [ go_aspect[go] if go in go_aspect else "N" for go in go_list]
aspect_series = pd.Series(aspect,name="aspect")
self.add_col(aspect_series)
def drop_col(self,col_name):
self.annotations = self.annotations.drop(col_name,axis=1)
def reorder_cols(self):
self.annotations = self.annotations.reindex_axis(self.gaf_2_x_fields,1)
gaf_2_x_fields = ["db","db_object_id","db_object_symbol","qualifier","term_accession","db_reference","evidence_code","with","aspect","db_object_name","db_object_synonym","db_object_type","taxon","date","assigned_by","annotation_extension","gene_product_form_id"]
gaf_2_x_default = {
"db": 1,
"db_object_id": 2,
"db_object_symbol": 2,
"qualifier": 0,
"term_accession": 2,
"db_reference": 1,
"evidence_code": 1,
"with": 0,
"aspect": 2,
"db_object_name": 0,
"db_object_synonym": 0,
"db_object_type": 1,
"taxon": 1,
"date": 1,
"assigned_by": 1,
"annotation_extension": 0,
"gene_product_form_id": 0
}
'''
gaf_2_x_default = {
"db": True,
"db_object_id": True,
"db_object_symbol": True,
"qualifier": False,
"term_accession": True,
"db_reference": True,
"evidence_code": True,
"with": False,
"aspect": True,
"db_object_name": False,
"db_object_synonym": False,
"db_object_type": True,
"taxon": True,
"date": True,
"assigned_by": True,
"annotation_extension": False,
"gene_product_form_id": False
}
'''
|
994,801 | f99bdb6f74033c804cab7f14c2851d0332ce2289 | if __name__ == '__main__':
from kaa import bot
bot.run()
|
994,802 | 924122a604ce0aa26e735dd7019a23d1129b5bfa | # coding=utf-8
# Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Pubmed dataset."""
from inspect import EndOfBlock
import json
import os
import datasets
logger = datasets.logging.get_logger(__name__)
_DESCRIPTION = """
PubMed articles.
There are three features:
- src: source dialog.
- src_amr: source AMR graph.
- tgt: summary.
- src_amr: target AMR graph.
"""
_SRC = "src"
_TGT = "tgt"
class AMRData(datasets.GeneratorBasedBuilder):
"""DualSumm Dataset."""
# Version 1.2.0 expands coverage, includes ids, and removes web contents.
VERSION = datasets.Version("1.2.0")
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
_SRC: datasets.Value("string"),
_TGT: datasets.Value("string"),
}
),
supervised_keys=None,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
train_path = self.config.data_files["train"]
dev_path = self.config.data_files["validation"]
test_path = self.config.data_files["test"]
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": train_path}),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION, gen_kwargs={"filepath": dev_path}
),
datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": test_path}),
]
def _generate_examples(self, filepath):
"""Yields examples."""
logger.info("generating examples from = %s", filepath)
with open(filepath, "r", encoding="utf-8") as f:
lines = f.readlines()
for idx, line in enumerate(lines):
json_dict = json.loads(line)
src = json_dict["src"]
tgt = json_dict["tgt"]
yield idx, {_SRC: src, _TGT: tgt} |
994,803 | a7c0ec840fce2a37e2e950ca316b04cefae02c99 | #!/usr/bin/env python3
"""Script to create PLDM FW update package"""
import argparse
import binascii
import enum
import json
import math
import os
import struct
import sys
from datetime import datetime
from bitarray import bitarray
from bitarray.util import ba2int
string_types = dict(
[
("Unknown", 0),
("ASCII", 1),
("UTF8", 2),
("UTF16", 3),
("UTF16LE", 4),
("UTF16BE", 5),
]
)
initial_descriptor_type_name_length = {
0x0000: ["PCI Vendor ID", 2],
0x0001: ["IANA Enterprise ID", 4],
0x0002: ["UUID", 16],
0x0003: ["PnP Vendor ID", 3],
0x0004: ["ACPI Vendor ID", 4],
}
descriptor_type_name_length = {
0x0000: ["PCI Vendor ID", 2],
0x0001: ["IANA Enterprise ID", 4],
0x0002: ["UUID", 16],
0x0003: ["PnP Vendor ID", 3],
0x0004: ["ACPI Vendor ID", 4],
0x0100: ["PCI Device ID", 2],
0x0101: ["PCI Subsystem Vendor ID", 2],
0x0102: ["PCI Subsystem ID", 2],
0x0103: ["PCI Revision ID", 1],
0x0104: ["PnP Product Identifier", 4],
0x0105: ["ACPI Product Identifier", 4],
}
class ComponentOptions(enum.IntEnum):
"""
Enum to represent ComponentOptions
"""
ForceUpdate = 0
UseComponentCompStamp = 1
def check_string_length(string):
"""Check if the length of the string is not greater than 255."""
if len(string) > 255:
sys.exit("ERROR: Max permitted string length is 255")
def write_pkg_release_date_time(pldm_fw_up_pkg, release_date_time):
"""
Write the timestamp into the package header. The timestamp is formatted as
series of 13 bytes defined in DSP0240 specification.
Parameters:
pldm_fw_up_pkg: PLDM FW update package
release_date_time: Package Release Date Time
"""
time = release_date_time.time()
date = release_date_time.date()
us_bytes = time.microsecond.to_bytes(3, byteorder="little")
pldm_fw_up_pkg.write(
struct.pack(
"<hBBBBBBBBHB",
0,
us_bytes[0],
us_bytes[1],
us_bytes[2],
time.second,
time.minute,
time.hour,
date.day,
date.month,
date.year,
0,
)
)
def write_package_version_string(pldm_fw_up_pkg, metadata):
"""
Write PackageVersionStringType, PackageVersionStringLength and
PackageVersionString to the package header.
Parameters:
pldm_fw_up_pkg: PLDM FW update package
metadata: metadata about PLDM FW update package
"""
# Hardcoded string type to ASCII
string_type = string_types["ASCII"]
package_version_string = metadata["PackageHeaderInformation"][
"PackageVersionString"
]
check_string_length(package_version_string)
format_string = "<BB" + str(len(package_version_string)) + "s"
pldm_fw_up_pkg.write(
struct.pack(
format_string,
string_type,
len(package_version_string),
package_version_string.encode("ascii"),
)
)
def write_component_bitmap_bit_length(pldm_fw_up_pkg, metadata):
"""
ComponentBitmapBitLength in the package header indicates the number of bits
that will be used represent the bitmap in the ApplicableComponents field
for a matching device. The value shall be a multiple of 8 and be large
enough to contain a bit for each component in the package. The number of
components in the JSON file is used to populate the bitmap length.
Parameters:
pldm_fw_up_pkg: PLDM FW update package
metadata: metadata about PLDM FW update package
Returns:
ComponentBitmapBitLength: number of bits that will be used
represent the bitmap in the ApplicableComponents field for a
matching device
"""
# The script supports upto 32 components now
max_components = 32
bitmap_multiple = 8
num_components = len(metadata["ComponentImageInformationArea"])
if num_components > max_components:
sys.exit("ERROR: only upto 32 components supported now")
component_bitmap_bit_length = bitmap_multiple * math.ceil(
num_components / bitmap_multiple
)
pldm_fw_up_pkg.write(struct.pack("<H", int(component_bitmap_bit_length)))
return component_bitmap_bit_length
def write_pkg_header_info(pldm_fw_up_pkg, metadata):
"""
ComponentBitmapBitLength in the package header indicates the number of bits
that will be used represent the bitmap in the ApplicableComponents field
for a matching device. The value shall be a multiple of 8 and be large
enough to contain a bit for each component in the package. The number of
components in the JSON file is used to populate the bitmap length.
Parameters:
pldm_fw_up_pkg: PLDM FW update package
metadata: metadata about PLDM FW update package
Returns:
ComponentBitmapBitLength: number of bits that will be used
represent the bitmap in the ApplicableComponents field for a
matching device
"""
uuid = metadata["PackageHeaderInformation"]["PackageHeaderIdentifier"]
package_header_identifier = bytearray.fromhex(uuid)
pldm_fw_up_pkg.write(package_header_identifier)
package_header_format_revision = metadata["PackageHeaderInformation"][
"PackageHeaderFormatVersion"
]
# Size will be computed and updated subsequently
package_header_size = 0
pldm_fw_up_pkg.write(
struct.pack("<BH", package_header_format_revision, package_header_size)
)
try:
release_date_time = datetime.strptime(
metadata["PackageHeaderInformation"]["PackageReleaseDateTime"],
"%d/%m/%Y %H:%M:%S",
)
write_pkg_release_date_time(pldm_fw_up_pkg, release_date_time)
except KeyError:
write_pkg_release_date_time(pldm_fw_up_pkg, datetime.now())
component_bitmap_bit_length = write_component_bitmap_bit_length(
pldm_fw_up_pkg, metadata
)
write_package_version_string(pldm_fw_up_pkg, metadata)
return component_bitmap_bit_length
def get_applicable_components(device, components, component_bitmap_bit_length):
"""
This function figures out the components applicable for the device and sets
the ApplicableComponents bitfield accordingly.
Parameters:
device: device information
components: list of components in the package
component_bitmap_bit_length: length of the ComponentBitmapBitLength
Returns:
The ApplicableComponents bitfield
"""
applicable_components_list = device["ApplicableComponents"]
applicable_components = bitarray(
component_bitmap_bit_length, endian="little"
)
applicable_components.setall(0)
for component_index in applicable_components_list:
if 0 <= component_index < len(components):
applicable_components[component_index] = 1
else:
sys.exit("ERROR: Applicable Component index not found.")
return applicable_components
def prepare_record_descriptors(descriptors):
"""
This function processes the Descriptors and prepares the RecordDescriptors
section of the the firmware device ID record.
Parameters:
descriptors: Descriptors entry
Returns:
RecordDescriptors, DescriptorCount
"""
record_descriptors = bytearray()
vendor_defined_desc_type = 65535
vendor_desc_title_str_type_len = 1
vendor_desc_title_str_len_len = 1
descriptor_count = 0
for descriptor in descriptors:
descriptor_type = descriptor["DescriptorType"]
if descriptor_count == 0:
if (
initial_descriptor_type_name_length.get(descriptor_type)
is None
):
sys.exit("ERROR: Initial descriptor type not supported")
else:
if (
descriptor_type_name_length.get(descriptor_type) is None
and descriptor_type != vendor_defined_desc_type
):
sys.exit("ERROR: Descriptor type not supported")
if descriptor_type == vendor_defined_desc_type:
vendor_desc_title_str = descriptor[
"VendorDefinedDescriptorTitleString"
]
vendor_desc_data = descriptor["VendorDefinedDescriptorData"]
check_string_length(vendor_desc_title_str)
vendor_desc_title_str_type = string_types["ASCII"]
descriptor_length = (
vendor_desc_title_str_type_len
+ vendor_desc_title_str_len_len
+ len(vendor_desc_title_str)
+ len(bytearray.fromhex(vendor_desc_data))
)
format_string = "<HHBB" + str(len(vendor_desc_title_str)) + "s"
record_descriptors.extend(
struct.pack(
format_string,
descriptor_type,
descriptor_length,
vendor_desc_title_str_type,
len(vendor_desc_title_str),
vendor_desc_title_str.encode("ascii"),
)
)
record_descriptors.extend(bytearray.fromhex(vendor_desc_data))
descriptor_count += 1
else:
descriptor_type = descriptor["DescriptorType"]
descriptor_data = descriptor["DescriptorData"]
descriptor_length = len(bytearray.fromhex(descriptor_data))
if (
descriptor_length
!= descriptor_type_name_length.get(descriptor_type)[1]
):
err_string = (
"ERROR: Descriptor type - "
+ descriptor_type_name_length.get(descriptor_type)[0]
+ " length is incorrect"
)
sys.exit(err_string)
format_string = "<HH"
record_descriptors.extend(
struct.pack(format_string, descriptor_type, descriptor_length)
)
record_descriptors.extend(bytearray.fromhex(descriptor_data))
descriptor_count += 1
return record_descriptors, descriptor_count
def write_fw_device_identification_area(
pldm_fw_up_pkg, metadata, component_bitmap_bit_length
):
"""
Write firmware device ID records into the PLDM package header
This function writes the DeviceIDRecordCount and the
FirmwareDeviceIDRecords into the firmware update package by processing the
metadata JSON. Currently there is no support for optional
FirmwareDevicePackageData.
Parameters:
pldm_fw_up_pkg: PLDM FW update package
metadata: metadata about PLDM FW update package
component_bitmap_bit_length: length of the ComponentBitmapBitLength
"""
# The spec limits the number of firmware device ID records to 255
max_device_id_record_count = 255
devices = metadata["FirmwareDeviceIdentificationArea"]
device_id_record_count = len(devices)
if device_id_record_count > max_device_id_record_count:
sys.exit(
"ERROR: there can be only upto 255 entries in the "
" FirmwareDeviceIdentificationArea section"
)
# DeviceIDRecordCount
pldm_fw_up_pkg.write(struct.pack("<B", device_id_record_count))
for device in devices:
# RecordLength size
record_length = 2
# DescriptorCount
record_length += 1
# DeviceUpdateOptionFlags
device_update_option_flags = bitarray(32, endian="little")
device_update_option_flags.setall(0)
# Continue component updates after failure
supported_device_update_option_flags = [0]
for option in device["DeviceUpdateOptionFlags"]:
if option not in supported_device_update_option_flags:
sys.exit("ERROR: unsupported DeviceUpdateOptionFlag entry")
device_update_option_flags[option] = 1
record_length += 4
# ComponentImageSetVersionStringType supports only ASCII for now
component_image_set_version_string_type = string_types["ASCII"]
record_length += 1
# ComponentImageSetVersionStringLength
component_image_set_version_string = device[
"ComponentImageSetVersionString"
]
check_string_length(component_image_set_version_string)
record_length += len(component_image_set_version_string)
record_length += 1
# Optional FirmwareDevicePackageData not supported now,
# FirmwareDevicePackageDataLength is set to 0x0000
fw_device_pkg_data_length = 0
record_length += 2
# ApplicableComponents
components = metadata["ComponentImageInformationArea"]
applicable_components = get_applicable_components(
device, components, component_bitmap_bit_length
)
applicable_components_bitfield_length = round(
len(applicable_components) / 8
)
record_length += applicable_components_bitfield_length
# RecordDescriptors
descriptors = device["Descriptors"]
record_descriptors, descriptor_count = prepare_record_descriptors(
descriptors
)
record_length += len(record_descriptors)
format_string = (
"<HBIBBH"
+ str(applicable_components_bitfield_length)
+ "s"
+ str(len(component_image_set_version_string))
+ "s"
)
pldm_fw_up_pkg.write(
struct.pack(
format_string,
record_length,
descriptor_count,
ba2int(device_update_option_flags),
component_image_set_version_string_type,
len(component_image_set_version_string),
fw_device_pkg_data_length,
applicable_components.tobytes(),
component_image_set_version_string.encode("ascii"),
)
)
pldm_fw_up_pkg.write(record_descriptors)
def get_component_comparison_stamp(component):
"""
Get component comparison stamp from metadata file.
This function checks if ComponentOptions field is having value 1. For
ComponentOptions 1, ComponentComparisonStamp value from metadata file
is used and Default value 0xFFFFFFFF is used for other Component Options.
Parameters:
component: Component image info
Returns:
component_comparison_stamp: Component Comparison stamp
"""
component_comparison_stamp = 0xFFFFFFFF
if (
int(ComponentOptions.UseComponentCompStamp)
in component["ComponentOptions"]
):
# Use FD vendor selected value from metadata file
if "ComponentComparisonStamp" not in component.keys():
sys.exit(
"ERROR: ComponentComparisonStamp is required"
" when value '1' is specified in ComponentOptions field"
)
else:
try:
tmp_component_cmp_stmp = int(
component["ComponentComparisonStamp"], 16
)
if 0 < tmp_component_cmp_stmp < 0xFFFFFFFF:
component_comparison_stamp = tmp_component_cmp_stmp
else:
sys.exit(
"ERROR: Value for ComponentComparisonStamp "
" should be [0x01 - 0xFFFFFFFE] when "
"ComponentOptions bit is set to"
"'1'(UseComponentComparisonStamp)"
)
except ValueError: # invalid hext format
sys.exit("ERROR: Invalid hex for ComponentComparisonStamp")
return component_comparison_stamp
def write_component_image_info_area(pldm_fw_up_pkg, metadata, image_files):
"""
Write component image information area into the PLDM package header
This function writes the ComponentImageCount and the
ComponentImageInformation into the firmware update package by processing
the metadata JSON.
Parameters:
pldm_fw_up_pkg: PLDM FW update package
metadata: metadata about PLDM FW update package
image_files: component images
"""
components = metadata["ComponentImageInformationArea"]
# ComponentImageCount
pldm_fw_up_pkg.write(struct.pack("<H", len(components)))
component_location_offsets = []
# ComponentLocationOffset position in individual component image
# information
component_location_offset_pos = 12
for component in components:
# Record the location of the ComponentLocationOffset to be updated
# after appending images to the firmware update package
component_location_offsets.append(
pldm_fw_up_pkg.tell() + component_location_offset_pos
)
# ComponentClassification
component_classification = component["ComponentClassification"]
if component_classification < 0 or component_classification > 0xFFFF:
sys.exit(
"ERROR: ComponentClassification should be [0x0000 - 0xFFFF]"
)
# ComponentIdentifier
component_identifier = component["ComponentIdentifier"]
if component_identifier < 0 or component_identifier > 0xFFFF:
sys.exit("ERROR: ComponentIdentifier should be [0x0000 - 0xFFFF]")
# ComponentComparisonStamp
component_comparison_stamp = get_component_comparison_stamp(component)
# ComponentOptions
component_options = bitarray(16, endian="little")
component_options.setall(0)
supported_component_options = [0, 1, 2]
for option in component["ComponentOptions"]:
if option not in supported_component_options:
sys.exit(
"ERROR: unsupported ComponentOption in "
" ComponentImageInformationArea section"
)
component_options[option] = 1
# RequestedComponentActivationMethod
requested_component_activation_method = bitarray(16, endian="little")
requested_component_activation_method.setall(0)
supported_requested_component_activation_method = [0, 1, 2, 3, 4, 5]
for option in component["RequestedComponentActivationMethod"]:
if option not in supported_requested_component_activation_method:
sys.exit(
"ERROR: unsupported RequestedComponent "
" ActivationMethod entry"
)
requested_component_activation_method[option] = 1
# ComponentLocationOffset
component_location_offset = 0
# ComponentSize
component_size = 0
# ComponentVersionStringType
component_version_string_type = string_types["ASCII"]
# ComponentVersionStringlength
# ComponentVersionString
component_version_string = component["ComponentVersionString"]
check_string_length(component_version_string)
format_string = "<HHIHHIIBB" + str(len(component_version_string)) + "s"
pldm_fw_up_pkg.write(
struct.pack(
format_string,
component_classification,
component_identifier,
component_comparison_stamp,
ba2int(component_options),
ba2int(requested_component_activation_method),
component_location_offset,
component_size,
component_version_string_type,
len(component_version_string),
component_version_string.encode("ascii"),
)
)
index = 0
pkg_header_checksum_size = 4
start_offset = pldm_fw_up_pkg.tell() + pkg_header_checksum_size
# Update ComponentLocationOffset and ComponentSize for all the components
for offset in component_location_offsets:
file_size = os.stat(image_files[index]).st_size
pldm_fw_up_pkg.seek(offset)
pldm_fw_up_pkg.write(struct.pack("<II", start_offset, file_size))
start_offset += file_size
index += 1
pldm_fw_up_pkg.seek(0, os.SEEK_END)
def write_pkg_header_checksum(pldm_fw_up_pkg):
"""
Write PackageHeaderChecksum into the PLDM package header.
Parameters:
pldm_fw_up_pkg: PLDM FW update package
"""
pldm_fw_up_pkg.seek(0)
package_header_checksum = binascii.crc32(pldm_fw_up_pkg.read())
pldm_fw_up_pkg.seek(0, os.SEEK_END)
pldm_fw_up_pkg.write(struct.pack("<I", package_header_checksum))
def update_pkg_header_size(pldm_fw_up_pkg):
"""
Update PackageHeader in the PLDM package header. The package header size
which is the count of all bytes in the PLDM package header structure is
calculated once the package header contents is complete.
Parameters:
pldm_fw_up_pkg: PLDM FW update package
"""
pkg_header_checksum_size = 4
file_size = pldm_fw_up_pkg.tell() + pkg_header_checksum_size
pkg_header_size_offset = 17
# Seek past PackageHeaderIdentifier and PackageHeaderFormatRevision
pldm_fw_up_pkg.seek(pkg_header_size_offset)
pldm_fw_up_pkg.write(struct.pack("<H", file_size))
pldm_fw_up_pkg.seek(0, os.SEEK_END)
def append_component_images(pldm_fw_up_pkg, image_files):
"""
Append the component images to the firmware update package.
Parameters:
pldm_fw_up_pkg: PLDM FW update package
image_files: component images
"""
for image in image_files:
with open(image, "rb") as file:
for line in file:
pldm_fw_up_pkg.write(line)
def main():
"""Create PLDM FW update (DSP0267) package based on a JSON metadata file"""
parser = argparse.ArgumentParser()
parser.add_argument(
"pldmfwuppkgname", help="Name of the PLDM FW update package"
)
parser.add_argument("metadatafile", help="Path of metadata JSON file")
parser.add_argument(
"images",
nargs="+",
help=(
"One or more firmware image paths, in the same order as "
" ComponentImageInformationArea entries"
),
)
args = parser.parse_args()
image_files = args.images
with open(args.metadatafile) as file:
try:
metadata = json.load(file)
except ValueError:
sys.exit("ERROR: Invalid metadata JSON file")
# Validate the number of component images
if len(image_files) != len(metadata["ComponentImageInformationArea"]):
sys.exit(
"ERROR: number of images passed != number of entries "
" in ComponentImageInformationArea"
)
try:
with open(args.pldmfwuppkgname, "w+b") as pldm_fw_up_pkg:
component_bitmap_bit_length = write_pkg_header_info(
pldm_fw_up_pkg, metadata
)
write_fw_device_identification_area(
pldm_fw_up_pkg, metadata, component_bitmap_bit_length
)
write_component_image_info_area(
pldm_fw_up_pkg, metadata, image_files
)
update_pkg_header_size(pldm_fw_up_pkg)
write_pkg_header_checksum(pldm_fw_up_pkg)
append_component_images(pldm_fw_up_pkg, image_files)
pldm_fw_up_pkg.close()
except BaseException:
pldm_fw_up_pkg.close()
os.remove(args.pldmfwuppkgname)
raise
if __name__ == "__main__":
main()
|
994,804 | 4b95c493a12bd665694f05b17d6e70ce141fc968 | import os,inspect,sys
current_dir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parent_dir = os.path.dirname(current_dir)
sys.path.append(parent_dir)
import pandas as pd
import pathlib
import matplotlib.image as im
import numpy as np
import tensorflow as tf
import config
from sklearn.metrics import confusion_matrix
from collections import defaultdict
def get_data_path():
path_list=sys.path[0].split('\\')[:-1]
data_path=path_list[0]
for p in path_list[1:]:
data_path+='\\'+p
data_path=data_path+'\datasets'
return data_path
def png_data_read(data_path):
data_dir=pathlib.Path(data_path)
png_file_list=data_dir.glob('*.png')
png_file_list=map(lambda name: str(name), png_file_list)
return list(png_file_list)
def mix_up(ds_one, ds_two, alpha=0.2):
images_one, labels_one=ds_one
images_two, labels_two=ds_two
batch_size=tf.shape(images_one)[0]
def sample_beta_distribution(size, c0=alpha, c1=alpha):
g1_sample=tf.random.gamma(shape=[size], alpha=c1)
g2_sample=tf.random.gamma(shape=[size], alpha=c0)
return g1_sample/(g1_sample+g2_sample)
l=sample_beta_distribution(batch_size, alpha, alpha)
x_l=tf.reshape(l, (batch_size, 1, 1, 1))
y_l=tf.reshape(l, (batch_size, 1))
images=images_one*x_l+images_two*(1-x_l)
labels=labels_one*y_l+labels_two*(1-y_l)
return (images, labels)
def train_data_gen(normal_image_path, abnormal_image_path, normal_train, abnormal_train):
def gen():
for i in normal_image_path[:normal_train]:
out=im.imread(i).reshape(config.IMG_SHAPE, config.IMG_SHAPE, 1)
out=out.astype(np.float32)
label=np.array([0., 1.]).astype(np.float32) # normal data
yield (out, label)
for i in abnormal_image_path[:abnormal_train]:
out=im.imread(i).reshape(config.IMG_SHAPE, config.IMG_SHAPE, 1)
out=out.astype(np.float32)
label=np.array([1., 0.]).astype(np.float32)
yield (out, label)
return gen
def multi_train_data_gen(normal_image_path, pneu_image_path, covid_image_path, x_image_path, y_image_path, split_nbr=config.SPLIT):
def gen():
for i in normal_image_path[:int(len(normal_image_path)*split_nbr)]:
out=im.imread(i).reshape(config.IMG_SHAPE, config.IMG_SHAPE, 1)
out=out.astype(np.float32)
label=np.array([0., 1.]).astype(np.float32) # normal data
yield (out, label)
for i in abnormal_image_path[:int(len(pneu_image_path)*split_nbr)]:
out=im.imread(i).reshape(config.IMG_SHAPE, config.IMG_SHAPE, 1)
out=out.astype(np.float32)
label=np.array([1., 0.]).astype(np.float32)
yield (out, label)
for i in covid_image_path[:int(len(covid_image_path)*split_nbr)]:
out=im.imread(i).reshape(config.IMG_SHAPE, config.IMG_SHAPE, 1)
out=out.astype(np.float32)
label=np.array([0., 1.]).astype(np.float32) # normal data
yield (out, label)
for i in x_image_path[:int(len(x_image_path)*split_nbr)]:
out=im.imread(i).reshape(config.IMG_SHAPE, config.IMG_SHAPE, 1)
out=out.astype(np.float32)
label=np.array([1., 0.]).astype(np.float32)
yield (out, label)
for i in y_image_path[:int(len(y_image_path)*split_nbr)]:
out=im.imread(i).reshape(config.IMG_SHAPE, config.IMG_SHAPE, 1)
out=out.astype(np.float32)
label=np.array([1., 0.]).astype(np.float32)
yield (out, label)
return gen
def val_data_gen(normal_image_path, abnormal_image_path, normal_train, abnormal_train):
def gen():
for i in normal_image_path[normal_train:]:
out=im.imread(i).reshape(config.IMG_SHAPE, config.IMG_SHAPE, 1)
out=out.astype(np.float32)
label=np.array([0., 1.]).astype(np.float32) # normal data
yield (out, label)
for i in abnormal_image_path[abnormal_train:]:
out=im.imread(i).reshape(config.IMG_SHAPE, config.IMG_SHAPE, 1)
out=out.astype(np.float32)
label=np.array([1., 0.]).astype(np.float32) # abnormal data
yield (out, label)
return gen
def total_data_gen(normal_image_path, abnormal_image_path):
def gen():
for i in normal_image_path:
out=im.imread(i).reshape(config.IMG_SHAPE, config.IMG_SHAPE, 1)
out=out.astype(np.float32)
label=np.array([.0, 1.]).astype(np.float32)
yield (out, label)
for i in abnormal_image_path:
out=im.imread(i).reshape(config.IMG_SHAPE, config.IMG_SHAPE, 1)
out=out.astype(np.float32)
label=np.array([1., 0.]).astype(np.float32)
yield (out, label)
return gen
def mg_train_gen(normal_image_path, abnormal_image_path, normal_train, abnormal_train):
def gen():
for i in range(normal_train):
temp=[im.imread(i).reshape(config.IMG_SHAPE, config.IMG_SHAPE, 1) for i in normal_image_path[4*i: 4*i+4]]
out=np.concatenate(temp, axis=-1)
out=out.astype(np.float32)
label=np.array([0, 1]).astype(np.float32)
yield (out, label)
for i in range(abnormal_train):
temp=[im.imread(i).reshape(config.IMG_SHAPE, config.IMG_SHAPE, 1) for i in abnormal_image_path[4*i: 4*i+4]]
out=np.concatenate(temp, axis=-1)
out=out.astype(np.float32)
label=np.array([1, 0]).astype(np.float32)
yield (out, label)
return gen
def mg_val_gen(normal_image_path, abnormal_image_path, normal_train, abnormal_train):
def gen():
val_normal_train=(len(normal_image_path)-4*normal_train)//4
for i in range(val_normal_train):
temp=[im.imread(i).reshape(config.IMG_SHAPE, config.IMG_SHAPE, 1) for i in normal_image_path[4*normal_train+4*i: 4*normal_train+4*i+4]]
out=np.concatenate(temp, axis=-1)
out=out.astype(np.float32)
label=np.array([0, 1]).astype(np.float32)
yield (out, label)
val_abnormal_train=(len(abnormal_image_path)-4*abnormal_train)//4
for i in range(val_abnormal_train):
temp=[im.imread(i).reshape(config.IMG_SHAPE, config.IMG_SHAPE, 1) for i in abnormal_image_path[4*abnormal_train+4*i: 4*abnormal_train+4*i+4]]
out=np.concatenate(temp, axis=-1)
out=out.astype(np.float32)
label=np.array([1, 0]).astype(np.float32)
yield (out, label)
return gen
def mg_total_gen(normal_image_path, abnormal_image_path):
def gen():
for i in range(len(normal_image_path)):
temp=[im.imread(i).reshape(config.IMG_SHAPE, config.IMG_SHAPE, 1) for i in normal_image_path[4*i: 4*i+4]]
out=np.concatenate(temp, axis=-1)
out=out.astype(np.float32)
label=np.array([0, 1]).astype(np.float32)
yield (out, label)
for i in range(len(abnormal_image_path)):
temp=[im.imread(i).reshape(config.IMG_SHAPE, config.IMG_SHAPE, 1) for i in abnormal_image_path[4*i: 4*i+4]]
out=np.concatenate(temp, axis=-1)
out=out.astype(np.float32)
label=np.array([1, 0]).astype(np.float32)
yield (out, label)
return gen
def train_with_mixup(model, train_data, val_data, batch_size=8, epochs=1, weight=None, callbacks=None):
AUTO=tf.data.AUTOTUNE
train_ds_one=tf.data.Dataset.from_generator(train_data, (tf.float32, tf.float32), ((config.IMG_SHAPE, config.IMG_SHAPE, 1), (2))).cache().shuffle(100).batch(1).prefetch(AUTO)
train_ds_two=tf.data.Dataset.from_generator(train_data, (tf.float32, tf.float32), ((config.IMG_SHAPE, config.IMG_SHAPE, 1), (2))).cache().shuffle(100).batch(1).prefetch(AUTO)
train_ds=tf.data.Dataset.zip((train_ds_one, train_ds_two))
alpha=0.2
train_ds_mu=train_ds.map(lambda ds_one, ds_two: mix_up(ds_one, ds_two, alpha), num_parallel_calls=AUTO)
model.fit(train_ds_mu, validation_data=val_data, epochs=epochs, batch_size=batch_size, verbose=2, callbacks=callbacks)
def train_with_datagen(model, datagen, train_data, val_data, batch_size=16, epochs=1, class_weight=None, weight=None, callbacks=None):
x_train=[]
x_label=[]
for i, j in train_data():
x_label.append(j)
x_train.append(i)
x_train=np.array(x_train)
x_label=np.array(x_label)
model.fit(datagen.flow(x_train, x_label, batch_size=batch_size), class_weight=class_weight, validation_data=val_data, verbose=2, epochs=epochs, callbacks=callbacks)
def calc_confusion_matrix(model, val_data):
x_test=[]
test_label=[]
for img, label in val_data():
x_test.append(img)
if list(label)==[0., 1.]:
label=0 # normal
elif list(label)==[1., 0.]:
label=1 # abnormal
test_label.append(label)
x_test=np.array(x_test)
test_label=np.array(test_label)
pred=model.predict(x_test)
pred=np.argmax(pred, axis=1)
cf=confusion_matrix(pred, test_label)
return cf
def create_csv_file(model, img_path, weight_nbr, weight_path, save_path):
data_to_csv=defaultdict(list)
images=[im.imread(i).reshape(1, config.IMG_SHAPE, config.IMG_SHAPE, 1) for i in img_path]
predictions=[]
for i in range(len(images)):
for j in weight_nbr:
img=images[i]
w=weight_path+str(j)+'.h5'
model.load_weights(w)
prediction=model.predict(img)
prediction=tf.nn.softmax(prediction)
predictions.append(prediction)
mean_pred=tf.math.reduce_mean(predictions, axis=0)
result=np.argmax(mean_pred)
img_name=img_path[i].split('\\')[-1]
data_to_csv['ID'].append(img_name) # should be designated!
data_to_csv['result'].append(result)
final_csv=pd.DataFrame(data_to_csv)
final_csv.set_index('ID', inplace=True)
final_csv.to_csv(save_path)
|
994,805 | ccc9a0640f2069ffeab85ac1a5c162e3737480ab | from sklearn.datasets import load_breast_cancer
from sklearn.linear_model import LogisticRegression, LinearRegression
from sklearn.model_selection import train_test_split
from sklearn.decomposition import PCA, SparsePCA
from sklearn.datasets import load_breast_cancer
from sklearn.ensemble import RandomForestClassifier, ExtraTreesClassifier
from sklearn.metrics import roc_auc_score
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
import matplotlib.pyplot as plt
import numpy as np
# Carrego llibreria que conte les dades de features i labels.
data = load_breast_cancer()
features = data['data']
labels = data['target']
# A target, 0 es maligne i 1 es benigne. Normalitzem les features.
features_norm = (features - features.mean(0))/ features.std(0)
# M de maligne i B de benigne.
M = data['target'] == 0
B = data['target'] == 1
# Faig PCA de dos components per tal de visualitzar els dos grups benignes i malignes.
pca = PCA(n_components = 2).fit(features_norm)
features_pca = pca.transform(features_norm)
print (features_pca.shape)
fig = plt.figure()
plt.hist(features_pca[M, 0])
plt.hist(features_pca[B, 0])
plt.legend(['Malign', 'Benign'])
plt.xlabel("Component 1")
fig.tight_layout()
fig.savefig("HistogramPlotPCA1.png")
plt.show()
fig = plt.figure()
plt.hist(features_pca[M, 1])
plt.hist(features_pca[B, 1])
plt.xlabel("Component 2")
plt.legend(['Malign', 'Benign'])
fig.tight_layout()
fig.savefig("HistogramPlotPCA2.png")
plt.show()
lda = LinearDiscriminantAnalysis(n_components = 2).fit(features_norm, labels)
features_lda = lda.transform(features_norm)
fig = plt.figure()
plt.hist(features_lda[M, 0])
plt.hist(features_lda[B, 0])
plt.legend(['Malign', 'Benign'])
plt.xlabel("Component 1")
fig.tight_layout()
fig.savefig("HistogramPlotLDA1.png")
plt.show()
fig = plt.figure()
plt.scatter(features_pca[M, 0], features_pca[M, 1])
plt.scatter(features_pca[B, 0], features_pca[B, 1])
plt.xlabel("Component 1")
plt.ylabel("Component 2")
plt.legend(['Malign', 'Benign'])
fig.tight_layout()
fig.savefig("PCA.png")
plt.show()
# Faig train_test_split i les prediccions del y_test.
x_train, x_test, y_train, y_test = train_test_split(features_norm, labels, test_size = 0.25, random_state = 10)
model_logistic = LogisticRegression().fit(x_train, y_train)
model_forest = RandomForestClassifier().fit(x_train, y_train)
model_linear = LinearRegression().fit(x_train, y_train)
# Miro la importancia de cada feature.
forest = ExtraTreesClassifier(n_estimators = 100)
forest.fit(x_train,y_train)
importancia = forest.feature_importances_
plt.bar(np.arange(len(importancia)), importancia, tick_label = data['feature_names'])
plt.xticks(rotation = 'vertical')
# plt.margins(0.2)
plt.subplots_adjust(bottom=0.3)
plt.show()
print(importancia)
# Miro el score de cada model i faig les prediccions de y_train i y_test
train_yhat_linear = model_linear.predict(x_train)
test_yhat_linear = model_linear.predict(x_test)
print (model_linear.score(x_test, y_test))
print (model_linear.score(x_test, y_test))
train_yhat_logistic = model_logistic.predict(x_train)
test_yhat_logistic = model_logistic.predict(x_test)
print (model_logistic.score(x_test, y_test))
print (model_logistic.score(x_test, y_test))
train_yhat_forest = model_forest.predict(x_train)
test_yhat_forest = model_forest.predict(x_test)
print (model_forest.score(x_test, y_test))
print (model_forest.score(x_test, y_test))
# Faig funcio de confusion_matrix.
def confusion_matrix(predicted, real, gamma):
decisio = np.where(predicted < gamma, 0, 1)
tp = np.logical_and(real == 1, decisio == 1).sum()
tn = np.logical_and(real == 0, decisio == 0).sum()
fp = np.logical_and(real == 0, decisio == 1).sum()
fn = np.logical_and(real == 1, decisio == 0).sum()
return tp, fp, tn, fn
# Calculo en una llista la tpr i fpr de cada un.
tpr_train = []
fpr_train = []
fpr_test = []
tpr_test = []
gammas = np.arange(0, 1, 0.01)
for gamma in gammas:
tp_train, fp_train, tn_train, fn_train = confusion_matrix(train_yhat_linear, y_train, gamma)
tpr_train.append(tp_train/(tp_train + fn_train))
fpr_train.append(fp_train/(tn_train + fp_train))
tp_test, fp_test, tn_test, fn_test = confusion_matrix(test_yhat_linear, y_test, gamma)
tpr_test.append(tp_test/(tp_test + fn_test))
fpr_test.append(fp_test/(tn_test + fp_test))
tp_train, fp_train, tn_train, fn_train = confusion_matrix(train_yhat_logistic, y_train, gamma)
tpr_train.append(tp_train/(tp_train + fn_train))
fpr_train.append(fp_train/(tn_train + fp_train))
tp_test, fp_test, tn_test, fn_test = confusion_matrix(test_yhat_logistic, y_test, gamma)
tpr_test.append(tp_test/(tp_test + fn_test))
fpr_test.append(fp_test/(tn_test + fp_test))
tp_train, fp_train, tn_train, fn_train = confusion_matrix(train_yhat_forest, y_train, gamma)
tpr_train.append(tp_train/(tp_train + fn_train))
fpr_train.append(fp_train/(tn_train + fp_train))
tp_test, fp_test, tn_test, fn_test = confusion_matrix(test_yhat_forest, y_test, gamma)
tpr_test.append(tp_test/(tp_test + fn_test))
fpr_test.append(fp_test/(tn_test + fp_test))
# Faig un plot scatter de fpr i tpr per veure la ROC curve.
fig = plt.figure()
plt.scatter(fpr_train, tpr_train, s = 2)
plt.scatter(fpr_test, tpr_test, s = 2)
plt.xlabel("FPR")
plt.ylabel("TPR")
plt.legend(['Train', 'Test'])
fig.tight_layout()
fig.savefig("fpr_tpr.png")
plt.show()
# Miro el score de la ROC curve (mesurada amb l'area del grafic)
print (roc_auc_score(y_train, train_yhat_linear))
print (roc_auc_score(y_train, train_yhat_logistic))
print (roc_auc_score(y_train, train_yhat_forest))
|
994,806 | 9b3f461a50f8a57683e5bf5613751a27ea721af9 | """
For this code we need to import two famous third party
library into our code. numpy, opencv.
Numpy: for working with arrays (efficiently and fast)
Opencv: Open Source Computer Vision Library
the goal of this code is to use opencv to do
OCR (Optical character reader) for numbers.
after installing opencv and numpy
using pip, you can go to:
http://opencv.org/opencv-3-2.html
and download the source. it contains so many
useful packages and data for face recognition
and object tracking and so on...also there's an
image called "digits.png" which we need here.
"""
import numpy as np
import cv2
img = cv2.imread('numbers.png') # this is how you read an image.
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY) # turning it into gray (one channle instead of three RGB)
cv2.imshow("test",gray)
cv2.waitKey(0)
# Now we split the image to 5000 cells, each 20x20 size with a number in each cell:
# cells = [np.hsplit(row,1) for row in np.vsplit(gray,3)]
cells = np.split(gray,3)
# Make it into a Numpy array. It size will be (50,100,20,20):
x = np.array(cells)
# Now we prepare train_data and test_data.
train = x[:,:].reshape(-1,400).astype(np.float32) # Size = (2500,400)
# Create labels for train and test data
k = np.arange(10)
train_labels = np.repeat(k,500)[:,np.newaxis]
test_labels = train_labels.copy()
# Initiate kNN, train the data, then test it with test data for k=1
knn = cv2.ml.KNearest_create() # KNearest is a classification algorithem. look at the end of code.
knn.train(train, cv2.ml.ROW_SAMPLE, train_labels) # training our machine to learn the trend of data.
# ok...now we have done the training part.
# let's try to see if it can actually read numbers:
'''
1) open paint and draw a number in a 20*20 pixel canvas.
2) Invert it's color so the box be black and your
number be in white.
3) save as something.png and close it.
'''
number_iamge = cv2.imread('something.png')
gray = cv2.cvtColor(number_iamge,cv2.COLOR_BGR2GRAY)
x = np.array(gray).reshape(-1,400).astype(np.float32)
ret,result,neighbours,dist = knn.findNearest(x,k=1)
print(result)
# for more info on k-Nearest Neighbour:
# http://docs.opencv.org/trunk/d5/d26/tutorial_py_knn_understanding.html
# for more info on Machine Learning:
# http://docs.opencv.org/trunk/d6/de2/tutorial_py_table_of_contents_ml.html
# also:
# http://www.nptel.ac.in/courses/106108057/1#
# this code is modification of this source code:
# http://docs.opencv.org/trunk/d8/d4b/tutorial_py_knn_opencv.html
# tip: you can open the training image that i mention in line 24 with paint
# and invert its color therefore you don't have to invert the
# color of numbers you write in paint afterwards in something.png!
'''it goes without saying: this code doesn't work on sololearn'''
|
994,807 | 748a2fa95b49187f5e7e856f9f9120f1fa259f1e | import pexpect
import time
username = ''
password = ''
email = ''
child = pexpect.spawn('ssh bbsu@ptt.cc')
child.expect('new'.encode('big5'))
time.sleep(5)
print('sending username...')
child.sendline(str(username + '\r\n').encode('big5'))
child.expect(':')
time.sleep(5)
print('sending password...')
child.sendline(str(password + '\r\n').encode('big5'))
time.sleep(5)
print('checking password...')
log = child.read(128).decode('utf-8', errors='ignore')
if '密碼不對' in log:
os.system("echo 'PTT login failed!' | mail -s 'Ptt login warning!' " + email)
print('Login failed!!')
else:
print('Login succeed!!')
child.close()
print('done')
|
994,808 | 78287b4fc4a2f6007445461bb373a2ee9eb6e398 | from typing import Literal
Width = Literal[
'0',
'px',
'0.5',
'1',
'1.5',
'2',
'2.5',
'3',
'3.5',
'4',
'5',
'6',
'7',
'8',
'9',
'10',
'11',
'12',
'14',
'16',
'20',
'24',
'28',
'32',
'36',
'40',
'44',
'48',
'52',
'56',
'60',
'64',
'72',
'80',
'96',
'auto',
'1/2',
'1/3',
'2/3',
'1/4',
'2/4',
'3/4',
'1/5',
'2/5',
'3/5',
'4/5',
'1/6',
'2/6',
'3/6',
'4/6',
'5/6',
'1/12',
'2/12',
'3/12',
'4/12',
'5/12',
'6/12',
'7/12',
'8/12',
'9/12',
'10/12',
'11/12',
'full',
'screen',
'min',
'max',
'fit',
]
|
994,809 | ac9a4f1e04d7735c0626430ee78e724a1b2836fe | # unittests/__init__.py
from datetime import datetime
import logging
import re
import time
from search import Query, InvalidQueryError
from .testobject import (
TestObject,
PropertyTestObject,
NestedTestObject
)
logger = logging.getLogger(__name__)
REGISTERED_UNITTESTS = []
#NB: This should all be converted to pytest
def unittest(func):
'''Decorator to register a test'''
REGISTERED_UNITTESTS.append(func)
return func
test = unittest
def run(test_filter, list_tests=False):
'''Run tests based on the test filter, as a string, provided by the caller.
Parameters:
test_filter - a regular expression used to filter tests
Returns - an int representing the number of failing test cases.
'''
start_time = time.time()
filtered_tests = [t for t in REGISTERED_UNITTESTS
if re.search(test_filter, t.__name__)]
total_count = len(filtered_tests)
failed_tests = []
if filtered_tests:
padding = max(len(t.__name__) for t in filtered_tests)
for test in filtered_tests:
name = ' '.join(
[p.title() for p in test.__name__.replace('_', ' ').split()])
name = name.replace('testobject', 'TestObject')
try:
if not list_tests:
test()
logger.info(f'{name:<{padding}} '
f'{"SKIP" if list_tests else "PASS"}')
except AssertionError as e:
logger.info(f' {e}')
logger.info(f'{name:<{padding}} FAIL')
failed_tests.append(name)
_print_results(total_count, failed_tests, start_time)
return len(failed_tests)
def _print_results(total_count, failed_tests, start_time):
# Print failing test cases, if any
for idx, failed_test in enumerate(failed_tests):
if idx == 0:
logger.info('')
logger.info(f'Failed Tests:')
logger.info(f' {idx+1}) {failed_test}')
padding = len(str(total_count))
logger.info('')
logger.info(f'Pass {total_count - len(failed_tests):>{padding}}')
logger.info(f'Fail {len(failed_tests):>{padding}}')
logger.info(f"-------{'-' * padding}")
logger.info(f'Total {total_count:>{padding}}')
logger.info('')
logger.info(f'Total time: {time.time() - start_time:,.2f} secs')
|
994,810 | a4a484b5cda5c58ecb0d4978ac6149dffc86d028 | import time
import serial
import RPi.GPIO as GPIO
# Set up gpio for button push
PIN_NUM = 7
GPIO.setwarnings(False)
GPIO.setmode(GPIO.BOARD)
GPIO.setup(PIN_NUM, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)
class HardwareInterface:
def __init__(self):
# Set up printer
self.ser = serial.Serial('/dev/serial0', 19200)
# To write to serial port do:
# ser.write(b'Your text here\n\n')
def isButtonPressed(self) -> bool:
return GPIO.input(PIN_NUM) == GPIO.HIGH
def writeToPrinter(self, st: str):
self.ser.write(st.encode())
if __name__ == '__main__':
print("hardware_interface.py")
|
994,811 | d5f8a201b262f37af12ebe5321835720b5ba7be0 | import sqlite3
from sqlite3 import Error
def sqlite_connect(db_file = '/Users/vs/Documents/workspace@roy/text-editor/textEditor/database/writerBox.db'):
try:
conn = sqlite3.connect(db_file)
return conn
except Error as e:
raise e
def create_new_table(statement):
try:
conn = sqlite_connect()
cursor = conn.cursor()
cursor.execute(statement)
except Error as e:
raise e
finally:
conn.close()
def select_query(sql):
try:
conn = sqlite_connect()
cursor = conn.cursor()
rows = cursor.execute(sql)
return rows.fetchall()
except Error as e:
raise e
finally:
conn.close()
def insert_query(query, variable):
try:
conn = sqlite_connect()
cur = conn.cursor()
cur.execute(query, variable)
conn.commit()
except Error as e:
raise e
finally:
conn.close()
def update_query(query, variable):
try:
conn = sqlite_connect()
cur = conn.cursor()
cur.execute(query, variable)
conn.commit()
except Error as e:
raise e
finally:
conn.close()
def delete_task(query, variable):
"""
Delete a task by task id
:param conn: Connection to the SQLite database
:param id: id of the task
:return:
"""
try:
conn = sqlite_connect()
cur = conn.cursor()
cur.execute(query, variable)
conn.commit()
except Error as e:
raise e
finally:
conn.close()
if __name__ == '__main__':
select= 'Select * from document;'
select_query(select, 'document')
# sqlite_connect("../../database/writerBox.db") |
994,812 | 2961dcbb2fba16b974da5c791fb56f15aa383209 | def minimumBribes(q):
bribe = 0
for position in range(len(q)):
print('index: {}'.format(q.index(q[position])+1))
print('q: {}'.format(q[position]))
if q[position] <= q.index(q[position])+2:
bribe += q.index(q[position])+1 - q[position]
print('Posicao Correta')
# else:
# return print('Too chaotic')
# print(q.index(q[position])+1)
return bribe, q
n = 5
q = [2, 1, 5, 3, 4]
# n = 5
# q = [2, 5, 1, 3, 4]
print(minimumBribes(q)) |
994,813 | 1c531888c113592a86928d6f3197e0f9d2933e1e | list1 = []
print(list1)
list2 = [1,]
print(list2)
list3 = [1, 2]
print(list3)
tuple1 = (1, 2, 3)
print(tuple1)
print(tuple1[0])
dic1 = {'book1':'C语言','book2':'R语言', 'book3':'Python语言'}
print(dic1)
dic1['book4'] = '深度学习与keras'
print(dic1)
print(dic1.keys())
print(dic1.values())
# 字符串
"""
座右铭:好好学习,天天向上
"""
str1 = '好好学习'
print(str1)
str2 = "天天向上"
print(str2) |
994,814 | 3ecc1bb6db05589e315b790a422d9971d05d0b1c | #!/usr/bin/python
# -*- coding: utf-8 -*-
import sys
'''
对于从1到N的连续整集合,能划分成两个子集合,且保证每个集合的数字和是相等的。
举个例子,如果N=3,对于{1,2,3}能划分成两个子集合,他们每个的所有数字和是相等的:
{3} and {1,2}
这是唯一一种分发(交换集合位置被认为是同一种划分方案,因此不会增加划分方案总数)
如果N=7,有四种方法能划分集合{1,2,3,4,5,6,7},每一种分发的子集合各数字和是相等的:
{1,6,7} and {2,3,4,5} {注 1+6+7=2+3+4+5}
{2,5,7} and {1,3,4,6}
{3,4,7} and {1,2,5,6}
{1,2,4,7} and {3,5,6}
给出N,你的程序应该输出划分方案总数,如果不存在这样的划分方案,则输出0。程序不能预存结果直接输出。
'''
dyn = {}
ccc = 0
def get_subset(*args):
global ccc
global dyn
n, remain = args
# check cached result
if args in dyn:
return dyn[args]
if remain == 0:
cnt = 1
else:
cnt = 0
for i in xrange(min(n, remain), 0, -1):
if ((i * (i + 1)) << 1) < remain:
break
cnt += get_subset(i - 1, remain - i)
ccc += 1
# put result in cache
dyn[args] = cnt
return cnt
def subset_sum(n):
global ccc
ccc = 0
# 求和
_sum = (n * (n + 1)) / 2
# 和是奇数,没有解
if _sum & 1:
return 0
# 求有多少种组合
return get_subset(n - 1, (_sum >> 1) - n), ccc
def main():
for i in sys.argv[1:]:
try:
i = int(i)
except:
print i, 0
else:
print i, subset_sum(i)
if __name__ == "__main__":
main()
|
994,815 | 780d848c6828270ff597a54816bb0d77ee622988 | # 1306. Jump Game III
class Solution:
def canReach(self, arr: List[int], start: int) -> bool:
stack, visited, n = [start], set(), len(arr)
while stack:
i = stack.pop()
if arr[i] == 0:
return True
visited.add(i)
i1, i2 = i + arr[i], i - arr[i]
if i1 < n and i1 not in visited:
stack.append(i1)
if i2 >=0 and i2 not in visited:
stack.append(i2)
return False
|
994,816 | 62bf352ae5560cc9d02e76b22bd870576ee576e7 | # Local imports
import logging
import bb_auditlogger
import config_services
import bb_dbconnector_factory
from messaging import bb_sms_handler
import bb_notification_handler as notify
# Set up the logger
logger = logging.getLogger(__name__)
# Method is run when a SET command to determine which particular setting the user is updating.
def setting(sms_to, sms_from, sms_list):
# Add analytics record
instance_name, a = config_services.identify_service(sms_to)
bb_dbconnector_factory.DBConnectorInterfaceFactory().create().add_analytics_record("Count", "Command-SETTING", instance_name)
# Make sure that there are enough values in the message to actually change a setting
# You need three - 1. SET, 2. SETTING (e.g. EMAIL), 3. VALUE (e.g. ON)
if len(sms_list) >= 3:
# Firstly check to see which setting the user wishes to update
if sms_list[1].upper() == "EMAIL":
logger.debug("User changing email settings")
email_settings(sms_to, sms_from, sms_list)
elif sms_list[1].upper() == "CONTACT":
logger.debug("User changing contact settings")
contact_settings(sms_to, sms_from, sms_list)
else:
return
return
else:
bb_sms_handler.send_sms_notification(sms_to, sms_from,
"Please ensure you provide all the necessary parameters to change settings. ")
return
# Method to process a setting request for EMAIL
def email_settings(sms_to, sms_from, sms_list):
if sms_list[2].upper() == "OFF":
logger.debug("User requested that email notifications are disabled")
result = bb_dbconnector_factory.DBConnectorInterfaceFactory().create().disable_email_notifications(sms_from)
bb_sms_handler.send_sms_notification(sms_to, sms_from, result)
return
elif sms_list[2].upper() == "ON":
logger.debug("User requested that email notifications are enabled")
result = bb_dbconnector_factory.DBConnectorInterfaceFactory().create().enable_email_notifications(sms_from)
bb_sms_handler.send_sms_notification(sms_to, sms_from, result)
notify.send_notifications(sms_from, "Test Notification", "Email notifications have now been turned on. "
"You should add blockbuster.notify@gmail.com "
"to your contacts.")
return
else:
email_address = sms_list[2]
logger.debug("Updating with email address " + email_address)
result = bb_dbconnector_factory.DBConnectorInterfaceFactory().create().update_email_address(sms_from, email_address)
bb_sms_handler.send_sms_notification(sms_to, sms_from, result)
notify.send_notifications(sms_from, "Test Notification", "Notifications are now enabled for this email address.")
return
# Method to process setting changes for CONTACT
def contact_settings(bb_service_number, user_mobile, sms_list):
# If the user sends a command of "SET CONTACT MOBILE OFF", mobile number sharing for them will be disabled.
global result
if len(sms_list) > 3 and sms_list[2].upper() == "MOBILE" and sms_list[3].upper() == "OFF":
# Log that the user has chosen to disable sharing of their mobile number
logger.debug("Updating user setting Share_Mobile to OFF")
# Attempt to disable mobile number sharing for this user - will return a success code of 0 (success) or 1 (fail)
success_code = bb_dbconnector_factory.DBConnectorInterfaceFactory()\
.create().disable_mobile_number_sharing(user_mobile)
if success_code == 0:
result = "Share Mobile Number is now OFF."
logger.info("User Setting Updated: Share Mobile OFF")
else:
result = "There was an issue enabling this setting - please contact BlockBuster support."
logger.error("Error disabling Share Mobile setting for user.")
bb_sms_handler.send_sms_notification(bb_service_number, user_mobile, result)
return
# If the user sends any other command beginning with "SET CONTACT MOBILE"
# then mobile number sharing will be enabled for them.
elif sms_list[2].upper() == "MOBILE":
# Log that the user has chosen to enable sharing of their mobile number
logger.debug("Updating user setting Share_Mobile to ON")
# Attempt to enable mobile number sharing for this user - will return a success code of 0 (success) or 1 (fail)
success_code = bb_dbconnector_factory.DBConnectorInterfaceFactory()\
.create().enable_mobile_number_sharing(user_mobile)
if success_code == 0:
result = "Share Mobile Number is now ON."
logger.info("User Setting Updated: Share Mobile ON")
else:
result = "There was an issue enabling this setting - please contact BlockBuster support."
logger.error("Error enabling Share Mobile setting for user.")
bb_sms_handler.send_sms_notification(bb_service_number, user_mobile, result)
return
# If the user sends a "SET CONTACT CLEAR" command
# erase any alternative contact text that they have set and enable mobile sharing
elif sms_list[2].upper() == "CLEAR":
# Log that the user has chosen to enable sharing of their mobile number
logger.debug("Clearing alternative contact text and enabling mobile sharing")
bb_auditlogger.BBAuditLoggerFactory().create().logAudit('app',
'SETTING-CONTACT-CLEAR',
"Mobile:" + user_mobile)
# Attempt to enable mobile number sharing for this user - will return a success code of 0 (success) or 1 (fail)
success_clear = bb_dbconnector_factory.DBConnectorInterfaceFactory()\
.create().remove_alternative_contact_text(user_mobile)
success_code = bb_dbconnector_factory.DBConnectorInterfaceFactory()\
.create().enable_mobile_number_sharing(user_mobile)
if success_code == 0 and success_clear == 0:
result = "Your additional contact information has been cleared and mobile number sharing is enabled."
logger.info("User Setting Updated: Share Mobile ON and Alternative Contact Info CLEARED.")
else:
result = "There was an issue clearing your contact information - please report this issue."
# TODO: Create a new logError method on the BBAuditLogger and then convert the below
# BBAuditLogger.BBAuditLoggerFactory().create().logException('app',
# 'SETTING-CONTACT-CLEAR',
# "Mobile:" + user_mobile)
bb_sms_handler.send_sms_notification(bb_service_number, user_mobile, result)
return
else:
# Assign the alternative text provided by the user to a variable
alt_text_last_index = (len(sms_list))
alternative_text = sms_list[2]
i = 3
while i < alt_text_last_index:
alternative_text = alternative_text + " " + sms_list[i]
i += 1
# Log that the user has chosen to update their alternative contact information
logger.info("Updating user setting with alternative contact information.")
logger.debug("New contact information: " + alternative_text)
# Call the method in the DAL to update the alternative contact information for that user.
# Assign the result to a variable.
success_code = bb_dbconnector_factory.DBConnectorInterfaceFactory()\
.create()\
.update_alternative_contact_text(user_mobile, alternative_text)
if success_code == 0:
result = "Alternative contact info has been set to:\n\n \"" + alternative_text + "\""
else:
result = "There was an issue setting the alternative contact info - please contact BlockBuster support."
# Send an SMS to the user confirming that their details have been updated.
bb_sms_handler.send_sms_notification(bb_service_number, user_mobile, result)
return
|
994,817 | d6b9ac206c2ca884cec9cbec8adacccc4dfa6e72 | cnt = df.isurban.value_counts(normalize = true) |
994,818 | 909fa6a56de4f9d966519b67b937557a25501a28 | import sys
import math
script, Dvc, ep, delta = sys.argv
Dvc = int(Dvc)
ep = float(ep)
delta = float(delta)
print("VC Dimension : {}".format(Dvc))
print("Epsilon : {}".format(ep))
print("Delta : {}".format(delta))
def testN(N):
poly = (2*N)**Dvc + 1
internalLog = 4 * poly / delta
ln = math.log(internalLog)
rightSide = (8 / ep**2) * ln
return N >= rightSide
N = 2
base = N
top = N * 2
lastN = -1
i = 1
#use binary search
while lastN != N:
i += 1
lastN = N
if not testN(N):
base = N
top = N * 2
else:
top = N
N = math.ceil((base + top) / 2)
print("Minimal number of point is {}. Found after {} steps.".format(N, i))
|
994,819 | ba5a378d3180f96545056a8b87a87846fe26fef9 | # -*- coding=utf8 -*-
__author__ = 'admin'
#@Time :2019/1/9 16:43
import time
#装饰器的架子
def timmer(func): #func=test
# def wrapper(name,age,gender): #以下类似都用*args,**kwargs同写
def wrapper(*args,**kwargs):
#print(func) #作用域
start_time=time.time()
res=func(*args,**kwargs) #就是在运行test()
#pass
stop_time=time.time()
print('运行时间是%s'%(stop_time-start_time))
return res
return wrapper
@timmer #就相当于test=timmer(test) 语法糖
def test(name,age):
time.sleep(1)
print('test函数运行完毕,名字是【%s】年龄是【%s】'%(name,age))
return '这是test的返回值'
res=test('linhaifeng',18) #就是在运行wrapper
# print(res)
@timmer
def test1(name,age,gender):
time.sleep(1)
print('test1函数运行完毕,名字是【%s】年龄是【%s】性别是【%s】'%(name,age,gender))
return '这是test1的返回值'
test1('alex',18,'male')
#a,b,c={1,2,3} 解压序列 一一对应的关系
#l = [1,2,3,4,5,6,7,8]
#a,*_,d = l
#京东后端就是一个个的功能
#都装上一个验证功能
def index():
pass
def home():
pass
def shooping_car():
pass
def order():
pass
|
994,820 | 0ceb29a3b3d91771dc78b6c1bb68ba547a5a555b | #!/usr/bin/python3
"""
11-square: class Square from Rectangle
"""
Rectangle = __import__('9-rectangle').Rectangle
class Square(Rectangle):
"""
Square inherits from Rectangle
Attributes:
size (int): side of square
Methods:
__init__ - initialises the square
"""
def __init__(self, size):
"""
initialises Square
"""
self.integer_validator("size", size)
self.__size = size
def area(self):
"""
Returns the area of square
"""
area = self.__size * self.__size
return area
def __str__(self):
return ("[{}] {}/{}".format(type(self).__name__,
self.__size, self.__size))
|
994,821 | f0449bc7e3ea671e05e98252fb5c5c2620fb829d | import pytest
from unittest import TestCase
from moto import mock_dynamodb2
from ..dynamo_connector import DynamoConnector
class TestDynamo(TestCase):
"""
Class to test async
"""
@mock_dynamodb2
def setUp(self):
self.connector = DynamoConnector()
@mock_dynamodb2
def tearDown(self):
del self.connector
@pytest.mark.asyncio
@mock_dynamodb2
def test_create_table(self):
response = self.connector.create_table('test')
self.assertIsNotNone(response)
@pytest.mark.asyncio
@mock_dynamodb2
def test_update_item(self):
self.connector.put_item('test',{
'username': 'janedoe',
'first_name': 'Jane',
'last_name': 'Doe',
'age': 25,
'account_type': 'standard_user',
})
@pytest.mark.asyncio
@mock_dynamodb2
def test_get_item(self):
self.connector.get_item('test', {
'username': 'janedoe',
'last_name': 'Doe'
})
@pytest.mark.asyncio
@mock_dynamodb2
def test_update_item(self):
self.connector.update_item('test', {
'username': 'janedoe',
'last_name': 'Doe'
},
'SET age = :val1',
{
':val1': 26
})
|
994,822 | cf13bfe920e97d0ee3df4cd77c88b72e0848ddfe | expected_output = {
"vlan": {
"vlan1": {
"vlan_name": "default",
"vlan_status": "active",
"vlan_port": [
"Gi1/0/1",
"Gi1/0/2",
"Gi1/0/4",
"Gi1/0/5",
"Gi1/0/6",
"Gi1/0/8",
"Gi1/0/9",
"Gi1/0/10",
"Gi1/0/11",
"Gi1/0/12",
"Gi1/0/14",
"Gi1/0/15",
"Gi1/0/16",
"Gi1/0/17",
"Gi1/0/19",
"Gi1/0/20",
"Gi1/0/21",
"Gi1/0/22",
"Gi1/0/23",
"Gi1/0/24",
"Te1/1/1",
"Te1/1/2",
"Te1/1/3",
"Te1/1/4",
"Te1/1/8",
"Ap1/0/1",
"Gi3/0/1",
"Gi3/0/2",
"Gi3/0/3",
"Gi3/0/4",
"Gi3/0/5",
"Gi3/0/6",
"Gi3/0/7",
"Gi3/0/8",
"Gi3/0/9",
"Gi3/0/10",
"Gi3/0/11",
"Gi3/0/12",
"Gi3/0/13",
"Gi3/0/14",
"Gi3/0/15",
"Gi3/0/16",
"Gi3/0/17",
"Gi3/0/18",
"Gi3/0/19",
"Gi3/0/20",
"Gi3/0/21",
"Gi3/0/22",
"Gi3/0/23",
"Gi3/0/24",
"Gi3/0/26",
"Gi3/0/27",
"Gi3/0/28",
"Gi3/0/29",
"Gi3/0/30",
"Gi3/0/31",
"Gi3/0/32",
"Gi3/0/34",
"Gi3/0/35",
"Gi3/0/36",
"Gi3/0/37",
"Gi3/0/38",
"Gi3/0/39",
"Gi3/0/40",
"Gi3/0/41",
"Gi3/0/42",
"Gi3/0/43",
"Gi3/0/44",
"Gi3/0/45",
"Gi3/0/46",
"Gi3/0/47",
"Gi3/0/48",
"Fo3/1/1",
"Fo3/1/2",
"Ap3/0/1",
"Gi4/0/3",
"Gi4/0/4",
"Gi4/0/5",
"Gi4/0/6",
"Gi4/0/7",
"Gi4/0/8",
"Gi4/0/9",
"Gi4/0/10",
"Gi4/0/11",
"Gi4/0/12",
"Gi4/0/14",
"Gi4/0/15",
"Gi4/0/16",
"Gi4/0/17",
"Gi4/0/18",
"Gi4/0/19",
"Gi4/0/20",
"Gi4/0/21",
"Gi4/0/22",
"Gi4/0/23",
"Gi4/0/24",
"Gi4/0/26",
"Gi4/0/27",
"Gi4/0/28",
"Gi4/0/30",
"Gi4/0/32",
"Gi4/0/33",
"Gi4/0/34",
"Gi4/0/35",
"Gi4/0/36",
"Gi4/0/38",
"Gi4/0/39",
"Gi4/0/40",
"Gi4/0/42",
"Gi4/0/43",
"Gi4/0/44",
"Gi4/0/45",
"Gi4/0/46",
"Gi4/0/47",
"Gi4/0/48",
"Ap4/0/1",
],
},
"vlan50": {"vlan_name": "VLAN0050", "vlan_status": "active"},
"vlan666": {
"vlan_name": "VLAN0666",
"vlan_status": "active",
"vlan_port": ["Te1/1/6", "Te1/1/7"],
},
"vlan777": {
"vlan_name": "VLAN0777",
"vlan_status": "active",
"vlan_port": ["Te1/1/5"],
},
"vlan1005": {
"vlan_name": "trnet-default",
"vlan_status": "act/unsup"
},
}
} |
994,823 | 33d4e555aea4235ec02c235bb21525260cdf8c69 | from os import getcwd
from scrapy.commands.parse import Command
from scrapy.http.headers import Headers
from scrapy.http.request import Request
from scrapy.http.response.html import HtmlResponse
from unittest import TestCase, main
from Doc.quotes.quotes.spiders.quotes_spider_2 import QuotesSpider2
START_PAGE = '/tag/humor/'
QUOTES_FOLDER_NAME = 'quotes'
TESTS_FOLDER_NAME = 'tests'
TEST_DATA_FOLDER_NAME = 'test_data'
SPIDERS_FOLDER_NAME = 'spiders'
START_HTML_FILE = '/quotes.mhtml'
SPIDER_NAME = 'quotes_test_spider'
FILE_SYSTEM_PREFIX = 'file://'
SLASHE = "/"
EXPECTED_NUMBER_OF_QUOTES_PAGE_01 = 10
EXPECTED_NUMBER_OF_QUOTES_PAGE_02 = 2
class QuotesSpiderTest(TestCase):
@classmethod
def get_path_to_test_data(cls):
result = getcwd()
result = QuotesSpiderTest \
.build_path(result,
QUOTES_FOLDER_NAME,
SLASHE + QUOTES_FOLDER_NAME +
SLASHE + QUOTES_FOLDER_NAME)
result = QuotesSpiderTest \
.build_path(result,
QUOTES_FOLDER_NAME + '/' + QUOTES_FOLDER_NAME,
SLASHE + QUOTES_FOLDER_NAME)
result = QuotesSpiderTest \
.build_path(result,
TESTS_FOLDER_NAME,
SLASHE + TESTS_FOLDER_NAME)
result = QuotesSpiderTest \
.build_path(result,
TEST_DATA_FOLDER_NAME,
SLASHE + TEST_DATA_FOLDER_NAME)
result = FILE_SYSTEM_PREFIX + result
return result
@classmethod
def get_path_to_spider(cls, spider_name):
result = getcwd()
result = QuotesSpiderTest \
.build_path(result,
QUOTES_FOLDER_NAME,
SLASHE + QUOTES_FOLDER_NAME +
SLASHE + QUOTES_FOLDER_NAME)
result = QuotesSpiderTest \
.build_path(result,
QUOTES_FOLDER_NAME + SLASHE + QUOTES_FOLDER_NAME,
SLASHE + QUOTES_FOLDER_NAME)
result = QuotesSpiderTest \
.build_path(result,
SPIDERS_FOLDER_NAME,
SLASHE + SPIDERS_FOLDER_NAME + SLASHE)
result = FILE_SYSTEM_PREFIX + result + spider_name + '.py'
return result
def setUp(self) -> None:
QuotesSpider2.URL = self.get_path_to_test_data()
start_urls = [self.get_path_to_test_data() + START_HTML_FILE]
self.spider = QuotesSpider2(name=SPIDER_NAME)
self.spider.start_urls = start_urls
#self.spider.get_request = self.get_request
self.spider._follow_links = True
self.spider.parse = self.parse
#self.spider.parse_extract = self.parse_extract
self.response = self.get_response_object(
self.get_path_to_test_data() + START_HTML_FILE)
'''
def test_quotes_on_page_01(self):
# When
actual_result = list(self.spider._parse_response(
self.response,
callback=self.spider.parse,
cb_kwargs={},
follow=True))
# Then
assert EXPECTED_NUMBER_OF_QUOTES_PAGE_01 + 1 == len(actual_result)
for res in actual_result:
print(res)
'''
def test_quotes_on_page_02(self):
# Given
'''
first_page_result = list(self.spider._parse_response(
self.response,
callback=self.spider.parse,
cb_kwargs={},
follow=True))
'''
first_page_result = list(self.spider.parse(self.response))
response = first_page_result[-1:].pop(0)
# print(response)
for res in first_page_result:
print(res)
'''
# When
actual_result = list(self.spider.parse(response)),
# Then
#assert EXPECTED_NUMBER_OF_QUOTES_PAGE_02 == len(actual_result)
print(len(actual_result))
for res in actual_result:
print(res)
'''
def get_request(self, response, url):
response = self.get_response_object(url)
print("===========get_request TEST================")
print("url: %s" % response.url)
return response
def get_response_object(self, url):
path_to_file = url.replace(FILE_SYSTEM_PREFIX, '')
if path_to_file[-1:] == SLASHE:
path_to_file = path_to_file[:-1]
if url[-1:] == SLASHE:
url = url[:-1]
f = open(path_to_file, 'rb')
bytess = f.read()
f.close()
return HtmlResponse(url, 200, self.generate_response_headers(),
bytess, None, Request(url), encoding='utf-8')
def generate_response_headers(self):
headers = Headers()
headers.appendlist('Connection', 'keep-alive')
headers.appendlist('Content-Encoding', 'gzip')
headers.appendlist('Content-Type', 'text/html; charset=utf-8')
headers.appendlist('Date', 'Thu, 20 Feb 2020 00:59:44 GMT')
headers.appendlist('Server', 'nginx/1.14.0 (Ubuntu)')
headers.appendlist('Transfer-Encoding', 'chunked')
headers.appendlist('X-Upstream', 'toscrape-pstrial-2019-12-16_web')
return headers
@classmethod
def build_path(self, path, condition, addition):
if condition not in path:
return path + addition
return path
def parse(self, response):
return self.parse_extract(response)
def parse_extract(self, response):
print("========================TEST parse_extract======================")
print("response: %s" % response)
print("url: %s" % response.url)
response = self.get_request(response, response.url)
for quote in response.css('div.quote'):
yield {
'text': quote.css('span.text::text').get(),
'author': quote.xpath('//span/small/text()').get(),
}
next_page = response.css('li.next a::attr("href")').get()
if next_page is not None:
yield response.follow(self.get_path_to_test_data() + next_page, self.parse_extract)
# yield self.get_response(response, self.URL + nex#_page)
#url = self.URL + next_page
# yield self.get_request(response, url)
# yield QuotesSpider2.get_request(response=response, url=url)
# for resp in response:
# yield QuotesSpider2.get_request(response=resp, url=url)
if __name__ == '__main__':
main()
|
994,824 | f82a8950bd3072a73839241220f03c9ddb53b738 | from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import division
from random import choice, sample
import time
import unicodedata
import sys
import logging
_LOG = logging.getLogger(__name__)
from django.db import connection
from django.http import HttpResponseNotFound, HttpResponseForbidden
from django.shortcuts import render_to_response, get_object_or_404
from django.db.models import Q, Count, Avg, Max, Min
from django.contrib.auth import get_user_model
import pygal
from pygal.maps.world import World as Worldmap
from pygal.style import LightGreenStyle, Style
from nano.countries.models import Country
from cals.forms import *
from cals.tools import compare_features
from cals.modeltools import LANGTYPES, language_alphabetic_letters, \
language_first_letters
from cals.modeltools import languages_ranked_by_averageness, get_langs, \
compare_value_sets, compare_languages, feature_usage, \
language_most_average_internal, set_averageness_for_langs, \
get_averageness_for_lang
from cals.feature.models import FeatureValue, Feature
from cals.language.models import Language
from cals.people.models import Profile
from cals.languagefeature.models import LanguageFeature
from translations.models import Translation
def country_most_common():
return Country.objects.annotate(count=Count('profile')).filter(count__gt=0).order_by('-count')
def conlanger_map():
foo = dict([(country.iso.lower(), country.count) for country in country_most_common()])
custom_style = Style(
background='#fff',
plot_background='#fff',
foreground='#ffffff',
foreground_light='#ffffff',
foreground_dark='#ffffff',
opacity='.6',
opacity_hover='.9',
transition='400ms ease-in',
colors=('#527C3A', '#E8537A', '#E95355', '#E87653', '#E89B53')
)
chart = Worldmap(style=custom_style)
chart.no_prefix = True
chart.disable_xml_declaration = True
chart.show_legend = False
chart.add('Conlangers', foo)
return chart.render()
def unused_featurevalues():
"""Returns feature values not used by conlangs.
That is, values only used by natlangs are also included.
"""
fvs = FeatureValue.objects.filter(feature__active=True)
unused_fvs = fvs.filter(languages__isnull=True)
natlang_only_fvs = fvs.filter(languages__language__natlang=True).exclude(languages__language__natlang=False)
if not natlang_only_fvs:
# Natlangs had no unique features so return early
return unused_fvs
# dsd
decorate = ((fv.id, fv) for fv in set(unused_fvs) | set(natlang_only_fvs))
sort = sorted(decorate)
return [fv for (_, fv) in sort]
def timeline():
"Statistics over which days/weeks/months are most visited/update etc."
User = get_user_model()
joined = User.objects.dates('date_joined', 'day')
login = User.objects.dates('last_login', 'day')
created = Language.objects.dates('created', 'day')
last_modified = Language.objects.dates('last_modified', 'day')
_LOG.info('Joined: %s' % joined[:5])
def median(datapoints, n=0):
n = n or len(datapoints)
datapoints = sorted(datapoints)
middle = n // 2
if n % 2:
# odd
return datapoints[middle]
else:
# even
return (datapoints[middle - 1] + datapoints[middle]) // 2
def stddev(datapoints):
from math import sqrt
n = len(datapoints)
mean = sum(datapoints) / n
std = sqrt(sum((float(dp) - mean) ** 2 for dp in datapoints) / n)
return std
def vocab_chart(rows):
chart = pygal.Line(style=LightGreenStyle)
chart.disable_xml_declaration = True
chart.show_y_guides = False
chart.show_dots = False
chart.human_readable = True
chart.show_legend = False
chart.add('', rows)
return chart.render()
def vocab_size():
"""Generate statistics on the vocabulary_size-field."""
MAXSIZE = 10000
ls = Language.objects.exclude(id=80).filter(vocabulary_size__gt=0, vocabulary_size__lte=MAXSIZE).conlangs()
outliers = Language.objects.filter(vocabulary_size__gt=MAXSIZE).order_by('vocabulary_size')
# Assumes unimodal distribution
modes = [(mode['count'], mode['vocabulary_size'])
for mode in ls.values('vocabulary_size').annotate(count=Count('vocabulary_size')).order_by('-count', '-vocabulary_size')
if mode['count'] > 5]
mode = modes[0][1]
avg_maximum_minimum = ls.aggregate(avg=Avg('vocabulary_size'), maximum=Max('vocabulary_size'), minimum=Min('vocabulary_size'))
avg = avg_maximum_minimum['avg']
maximum = avg_maximum_minimum['maximum']
minimum = avg_maximum_minimum['minimum']
curve = ls.order_by('-vocabulary_size')
rows = [v.vocabulary_size for v in curve]
chart_svg = vocab_chart(rows)
# median
med = median(rows)
return {'average': avg,
'min': minimum,
'max': maximum,
'median': med,
'chart_svg': chart_svg,
'mode': mode,
'common': modes,
'stddev': stddev(rows),
'outliers': outliers,
'upper_bound': MAXSIZE}
def get_all_lurkers():
users = get_user_model().objects.filter(is_active=True, profile__is_visible=True)
lurkers = users.filter(
edits__isnull=True,
manages__isnull=True,
translations__isnull=True,
languages__isnull=True,
translation_exercises__isnull=True,
languages_modified__isnull=True
)
return lurkers
def country():
barchart = pygal.Bar(style=LightGreenStyle)
barchart.add('', [c.count for c in country_most_common()])
barchart.x_labels = [c.name for c in country_most_common()]
barchart.x_label_rotation = 90
return {
'most_common': country_most_common(),
'map': conlanger_map(),
'chart': barchart.render(),
}
def generate_feature_stats():
conlangs = Language.objects.conlangs()
skeleton_langs = conlangs.filter(num_features=0)
features = Feature.objects.active()
num_features = features.count()
features_mu = feature_usage(langtype=LANGTYPES.CONLANG, limit=20)
natlang_features_mu = feature_usage(langtype=LANGTYPES.NATLANG, limit=20)
not_used = unused_featurevalues()
data = {
'number': num_features,
'percentage_wals': str(142 / num_features * 100),
'most_used': features_mu,
'natlang_most_used': natlang_features_mu,
#'least_used': tuple(reversed(features_mu))[:20],
'not_used': not_used,
'num_not_used': len(not_used),
'skeleton_langs': skeleton_langs,
}
return data
def generate_people_stats():
data = {'country': country()}
return data
def generate_vocabulary_stats():
data = vocab_size()
return data
def generate_langname_stats():
data = {
'first_letters': {
'conlangs': language_first_letters(),
'natlangs': language_first_letters(langtype=LANGTYPES.NATLANG),
},
'alpha_letters': {
'conlangs': language_alphabetic_letters(),
'natlangs': language_alphabetic_letters(langtype=LANGTYPES.NATLANG),
},
}
return data
def generate_averageness_stats():
conlangs = Language.objects.conlangs()
natlangs = Language.objects.natlangs()
most_average = conlangs.exclude(num_features=0).order_by('-average_score', '-num_features')
most_average_natlangs = natlangs.order_by('-average_score', '-num_features')[:20]
lma = most_average.count()
least_average = tuple(most_average)[-10:]
most_average = most_average[:20]
data = {
'most_average': most_average,
'most_average_natlangs': most_average_natlangs,
'least_average': least_average,
'lma': lma - 10,
}
return data
def generate_milestone_stats():
conlangs = Language.objects.conlangs()
User = get_user_model()
user100 = User.objects.get(id=139)
user150 = User.objects.get(id=200)
user200 = User.objects.get(id=284)
user250 = User.objects.get(id=336)
user500 = User.objects.get(id=586)
lang100 = conlangs.get(id=154)
lang150 = conlangs.get(id=271)
lang200 = conlangs.get(id=466)
lang250 = conlangs.get(id=526)
lang500 = conlangs.get(id=776)
data = {
'user100': user100,
'user150': user150,
'user200': user200,
'user250': user250,
'user500': user500,
'lang100': lang100,
'lang150': lang150,
'lang200': lang200,
'lang250': lang250,
'lang500': lang500,
}
return data
def generate_global_stats():
"Used by the statistics-view"
features = Feature.objects.active()
fvs = FeatureValue.objects.filter(feature__active=True) #value_counts()
langs = Language.objects.all()
conlangs = Language.objects.conlangs()
natlangs = Language.objects.natlangs()
users = Profile.objects.filter(user__is_active=True, is_visible=True)
lfs = LanguageFeature.objects.all()
num_features = features.count()
num_conlangs = conlangs.count()
num_natlangs = natlangs.count()
num_langs = num_conlangs + num_natlangs
num_users = users.count()
num_lfs = lfs.count()
num_greetings = Language.objects.conlangs().exclude(greeting__isnull=True).count()
num_backgrounds = Language.objects.conlangs().exclude(background__isnull=True).exclude(background='').count()
num_translations = Translation.objects.count()
num_countries = users.filter(country__isnull=False).count()
num_lurkers = len(get_all_lurkers())
data = {}
data['langs'] = {
'number': num_conlangs,
'number_natlangs': num_natlangs,
'features_per_lang': str(num_lfs / num_langs),
'percentage_greetings': str(num_greetings / num_langs * 100),
'percentage_backgrounds': str(num_backgrounds / num_langs * 100),
'num_translations': num_translations,
}
data['features'] = {
'number': num_features,
'percentage_wals': str(142 / num_features * 100),
}
data['users'] = {
'number': num_users,
'langs_per_user': str(num_langs / num_users),
'percentage_countries': str(num_countries / num_users * 100),
'percentage_lurkers': str(num_lurkers / num_users * 100)
}
return data
|
994,825 | 50833190036c025a8659e08a6f49ba093a6d2c93 | from celery import Celery
import time
import logging
from app import config
celery_app = Celery()
celery_app.config_from_object(config)
TASK_PRIORITY_HIGH = 9
TASK_PRIORITY_MEDIUM = 4
TASK_PRIORITY_LOW = 0
@celery_app.task(name="app.one.low", priority=TASK_PRIORITY_LOW)
def one_low():
time.sleep(config.task_sleep_time)
@celery_app.task(name="app.one.medium", priority=TASK_PRIORITY_MEDIUM)
def one_medium():
time.sleep(config.task_sleep_time)
@celery_app.task(name="app.one.high", priority=TASK_PRIORITY_HIGH)
def one_high():
time.sleep(config.task_sleep_time)
@celery_app.task(name="app.two.low", priority=TASK_PRIORITY_LOW)
def two_low():
time.sleep(config.task_sleep_time)
@celery_app.task(name="app.two.medium", priority=TASK_PRIORITY_MEDIUM)
def two_medium():
time.sleep(config.task_sleep_time)
@celery_app.task(name="app.two.high", priority=TASK_PRIORITY_HIGH)
def two_high():
time.sleep(config.task_sleep_time)
@celery_app.task(name="app.three.low", priority=TASK_PRIORITY_LOW)
def three_low():
time.sleep(config.task_sleep_time)
@celery_app.task(name="app.three.medium", priority=TASK_PRIORITY_MEDIUM)
def three_medium():
time.sleep(config.task_sleep_time)
@celery_app.task(name="app.three.high", priority=TASK_PRIORITY_HIGH)
def three_high():
time.sleep(config.task_sleep_time)
|
994,826 | 3c197da02ec10310d788193a5311291dd0c7f432 | from contextlib import contextmanager
from thenewboston_node.core.clients.node import NodeClient
@contextmanager
def force_node_client(node_client: NodeClient):
try:
NodeClient.set_instance_cache(node_client)
yield
finally:
NodeClient.clear_instance_cache()
|
994,827 | 97618039e190da47544cddd8a82b6ae6c42bbb15 | import torch
import torchvision
from ptpt.log import error
from pathlib import Path
def get_dataset(task: str):
if task in ['ffhq1024','ffhq1024-large']:
transforms = torchvision.transforms.Compose([
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5]),
])
dataset = torchvision.datasets.ImageFolder('data/ffhq1024', transform=transforms)
train_idx, test_idx = torch.arange(0, 60_000), torch.arange(60_000, len(dataset))
train_dataset, test_dataset = torch.utils.data.Subset(dataset, train_idx), torch.utils.data.Subset(dataset, test_idx)
elif task == 'ffhq256':
transforms = torchvision.transforms.Compose([
torchvision.transforms.Resize(256),
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5]),
])
dataset = torchvision.datasets.ImageFolder('data/ffhq1024', transform=transforms)
train_idx, test_idx = torch.arange(0, 60_000), torch.arange(60_000, len(dataset))
train_dataset, test_dataset = torch.utils.data.Subset(dataset, train_idx), torch.utils.data.Subset(dataset, test_idx)
elif task == 'ffhq128':
transforms = torchvision.transforms.Compose([
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5]),
])
dataset = torchvision.datasets.ImageFolder('data/ffhq128', transform=transforms)
train_idx, test_idx = torch.arange(0, 60_000), torch.arange(60_000, len(dataset))
train_dataset, test_dataset = torch.utils.data.Subset(dataset, train_idx), torch.utils.data.Subset(dataset, test_idx)
elif task == 'cifar10':
transforms = torchvision.transforms.Compose([
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5]),
])
train_dataset = torchvision.datasets.CIFAR10('data', train=True, transform=transforms, download=True)
test_dataset = torchvision.datasets.CIFAR10('data', train=False, transform=transforms, download=True)
elif task == 'mnist':
transforms = torchvision.transforms.Compose([
torchvision.transforms.ToTensor(),
])
train_dataset = torchvision.datasets.MNIST('data', train=True, transform=transforms, download=True)
test_dataset = torchvision.datasets.MNIST('data', train=False, transform=transforms, download=True)
elif task == 'kmnist':
transforms = torchvision.transforms.Compose([
torchvision.transforms.ToTensor(),
])
train_dataset = torchvision.datasets.KMNIST('data', train=True, transform=transforms, download=True)
test_dataset = torchvision.datasets.KMNIST('data', train=False, transform=transforms, download=True)
else:
msg = f"unrecognised dataset '{task}'!"
error(msg)
raise ValueError(msg)
return train_dataset, test_dataset
|
994,828 | b8c3acf85983b367f5cfb05abed687caa4297512 | """
클래스 속성 오버라이드
-
"""
|
994,829 | 7d3ae571ef7f6058e8af52bc334ca6d49f097c6f | import xml.etree.ElementTree as ET
from dataclasses import dataclass
from typing import List, Dict, Iterator, Optional
from urllib.parse import urlparse
import requests
from panasonic_camera.discover import discover_panasonic_camera_devices
class RejectError(Exception):
pass
class BusyError(Exception):
pass
class CriticalError(Exception):
pass
class UnsuitableApp(Exception):
def __init__(self, *args) -> None:
self.message = ("Camera replied 'unsuitable_app'. If this camera is"
" DC-FZ80 or similar, you probably need to specify the "
"identifyAs parameter so that an accctl request will be"
" sent.")
super().__init__(self.message)
@dataclass
class State:
batt: str
cammode: str
sdcardstatus: str
sd_memory: str
sd_access: str
version: str
@dataclass
class ProductInfo:
model_name: str
@dataclass
class Setting:
current_value: str
values: List[str]
@dataclass
class Capability:
comm_proto_ver: str
product_info: ProductInfo
commands: List[str]
controls: List[str]
settings: Dict[str, Setting]
states: List[str]
specifications: List[str]
def find_text(element: Optional[ET.Element],
path: str,
default: str = '') -> str:
if element is None:
return default
e = element.find(path)
return default if e is None else str(e.text)
def find_all_text(element: ET.Element, path: str) -> List[str]:
return [str(e.text) for e in element.findall(path)]
def find_elements(element: ET.Element, path: str) -> Iterator[ET.Element]:
elements = element.find(path)
if elements is not None:
for e in list(elements):
if e is not None:
yield e
class PanasonicCamera:
def __init__(self, hostname: str) -> None:
self.cam_cgi_url = 'http://{}/cam.cgi'.format(hostname)
@staticmethod
def _validate_camrply(camrply: ET.Element) -> None:
assert camrply.tag == 'camrply'
result = find_text(camrply, 'result')
if result == 'err_reject':
raise RejectError(result)
elif result == 'err_busy':
raise BusyError(result)
elif result == 'err_critical':
raise CriticalError(result)
elif result == 'err_unsuitable_app':
raise UnsuitableApp(result)
assert result == 'ok', 'unknown result "{}"'.format(result)
def _request_xml(self, *args, **kwargs) -> ET.Element:
kwargs.setdefault('timeout', 2)
response = requests.get(self.cam_cgi_url, *args, **kwargs)
camrply: ET.Element = ET.fromstring(response.text)
self._validate_camrply(camrply)
return camrply
def _request_csv(self, *args, **kwargs) -> List[str]:
kwargs.setdefault('timeout', 2)
response = requests.get(self.cam_cgi_url, *args, **kwargs)
camrply: List[str] = response.text.split(',')
return camrply
def get_state(self) -> State:
camrply: ET.Element = self._request_xml(params={'mode': 'getstate'})
state = camrply.find('state')
return State(
batt=find_text(state, 'batt'),
cammode=find_text(state, 'cammode'),
sdcardstatus=find_text(state, 'sdcardstatus'),
sd_memory=find_text(state, 'sd_memory'),
sd_access=find_text(state, 'sd_access'),
version=find_text(state, 'version'))
def _camcmd(self, value: str) -> None:
self._request_xml(params={'mode': 'camcmd', 'value': value})
def recmode(self) -> None:
self._camcmd('recmode')
def playmode(self) -> None:
self._camcmd('playmode')
def video_recstart(self) -> None:
self._camcmd('video_recstart')
def video_recstop(self) -> None:
self._camcmd('video_recstop')
def zoom_stop(self) -> None:
self._camcmd('zoomstop')
def zoom_in_slow(self) -> None:
self._camcmd('tele-normal')
def zoom_in_fast(self) -> None:
self._camcmd('tele-fast')
def zoom_out_slow(self) -> None:
self._camcmd('wide-normal')
def zoom_out_fast(self) -> None:
self._camcmd('wide-fast')
def _get_info(self, info_type: str) -> ET.Element:
return self._request_xml(params={'mode': 'getinfo', 'type': info_type})
def get_info_capability(self) -> Capability:
camrply: ET.Element = self._get_info('capability')
# print(ET.tostring(camrply, encoding='utf8', method='xml').decode())
# print(generate_class('Capability', camrply))
settings: Dict[str, Setting] = {
e.tag: Setting(current_value=find_text(e, 'curvalue'),
values=find_text(e, 'valuelist').split(','))
for e in find_elements(camrply, 'settinglist')
}
return Capability(
comm_proto_ver=find_text(camrply, 'comm_proto_ver'),
product_info=ProductInfo(
model_name=find_text(camrply, 'productinfo/modelname')),
commands=find_all_text(camrply, 'camcmdlist/camcmd'),
controls=find_all_text(camrply, 'camctrllist/camctrl'),
settings=settings,
states=find_all_text(camrply, 'getstatelist/getstate'),
specifications=find_all_text(camrply, 'camspeclist/camspec'))
def register_with_camera(self, identify_as: str):
# Cameras like the DC-FZ80 keep a list of devices that remote
# control them. This request adds the current device to the list with
# a name specified by device_name.
return self._request_csv(
params={
'mode': 'accctrl',
'type': 'req_acc',
'value': '0',
'value2': identify_as
})
def start_stream(self, port=49199):
return self._request_xml(
params={'mode': 'startstream', 'value': port})
def stop_stream(self):
return self._request_xml(
params={'mode': 'stopstream'})
def generate_class(name, xml_element: ET.Element):
parameter = ['self']
constructor_body = []
child: ET.Element
for child in xml_element:
parameter_name: str = child.tag
parameter_type = None
if not list(child):
parameter_type = 'str'
elif parameter_name.endswith('list'):
parameter_type = parameter_name[0].upper() + parameter_name[1:-4]
if parameter_type:
parameter.append('{}: {}'.format(parameter_name, parameter_type))
else:
parameter.append(parameter_name)
constructor_body.append('self.{0} = {0}'.format(parameter_name))
if len(parameter) == 1:
constructor_body_str = 'pass'
else:
constructor_body_str = '\n '.join(constructor_body)
return """\
class {name}:
def __init__(
{constructor_parameter_list}):
{constructor_body}
""".format(
name=name,
constructor_parameter_list=',\n '.join(parameter),
constructor_body=constructor_body_str)
if __name__ == '__main__':
from pprint import pprint
devices = discover_panasonic_camera_devices()
for device in devices:
print(device)
camera = PanasonicCamera(urlparse(device.location).hostname)
pprint(camera.get_state())
pprint(camera.get_info_capability().__dict__)
|
994,830 | fa8ea38f7b212434db36e70099da20b9dab10685 | from unityagents import UnityEnvironment
from maddpg import MADDPG
from ddpg import ReplayBuffer
import numpy as np
import torch
import matplotlib.pyplot as plt
from collections import deque
env = UnityEnvironment(file_name = "Tennis.app")
brain_name = env.brain_names[0]
brain = env.brains[brain_name]
agent = MADDPG(discount_factor = 0.99, tau = 0.02, batch_size = 256)
agent.maddpg_agent[0].actor.load_state_dict(torch.load('bin/actor0_finished.pth', map_location = lambda storage, loc: storage))
agent.maddpg_agent[1].actor.load_state_dict(torch.load('bin/actor1_finished.pth', map_location = lambda storage, loc: storage))
env_info = env.reset(train_mode = False)[brain_name]
state = env_info.vector_observations
state = torch.from_numpy(np.array(state)).float().unsqueeze(0)
score = np.zeros(2)
while True:
actions = agent.act(state, 0)
actions_array = torch.stack(actions).detach().numpy()
env_info = env.step(actions_array)[brain_name]
next_state = env_info.vector_observations
next_state = torch.from_numpy(np.array(next_state)).float().unsqueeze(0)
reward = np.array(env_info.rewards).reshape(1, -1)
dones = np.array(env_info.local_done).reshape(1, -1)
actions_array = actions_array.reshape(1, -1)
score += reward[0]
state = next_state
if np.any(dones):
break
print(np.max(score))
env.close() |
994,831 | 5c8a557a4fdd43e15765e9f9eb56cd688e005b19 | from django.shortcuts import render, redirect, get_object_or_404, HttpResponse
from django.contrib.auth.decorators import login_required
from django.views.decorators.http import require_POST
from django.utils import timezone
from django.contrib import messages
from suds.client import Client
from ecommerce.local_settings import ZARINPAL_MERCHANT_KEY, ZARINPAL_WEB_GATE_URL, \
ZARINPAL_DESCRIPTION, ZARINPAL_CALLBACK_URL, ZARINPAL_START_PAY_URL
from .models import Order, OrderItem, Coupon
from .forms import CouponForm
from cart.cart import Cart
@login_required
def create_order_view(request):
cart = Cart(request)
order = Order.objects.create(user=request.user)
for item in cart:
OrderItem.objects.create(order=order, product=item['product'], price=item['price'], quantity=item['quantity'])
return redirect('orders:order_detail', order.id)
@login_required
def order_detail_view(request, order_id):
order = get_object_or_404(Order, id=order_id)
form = CouponForm()
return render(request, 'orders/order_detail.html', {'order': order, 'form': form})
@require_POST
def apply_coupon_view(request, order_id):
now = timezone.now()
form = CouponForm(request.POST)
if form.is_valid():
code = form.cleaned_data['code']
try:
coupon = Coupon.objects.get(code__exact=code, valid_from__lte=now, valid_to__gte=now, is_active=True)
except Coupon.DoesNotExist:
messages.error(request, 'This coupon dose not exist', 'danger')
return redirect('orders:order_detail', order_id)
order = Order.objects.get(id=order_id)
order.discount = coupon.discount
order.save()
messages.success(request, 'coupon is applied successfully', 'success')
return redirect('orders:order_detail', order_id)
client = Client(ZARINPAL_WEB_GATE_URL)
@login_required
def payment(request, order_id):
order = get_object_or_404(Order, id=order_id)
amount = order.get_total_price()
phone = request.user.phone
email = request.user.email
if email:
result = client.service.PaymentRequest(
ZARINPAL_MERCHANT_KEY, amount, ZARINPAL_DESCRIPTION, email, phone, ZARINPAL_CALLBACK_URL + str(order_id)
)
else:
result = client.service.PaymentRequest(
ZARINPAL_MERCHANT_KEY, amount, ZARINPAL_DESCRIPTION, phone, ZARINPAL_CALLBACK_URL + str(order_id)
)
if result.Status == 100:
return redirect(ZARINPAL_START_PAY_URL + str(result.Authority))
else:
return HttpResponse('Error code: ' + str(result.Status))
@login_required
def verify(request, order_id):
if request.GET.get('Status') == 'OK':
order = Order.objects.get(id=order_id)
amount = order.get_total_price()
result = client.service.PaymentVerification(ZARINPAL_MERCHANT_KEY, request.GET['Authority'], amount)
if result.Status == 100:
cart = Cart(request)
cart.clear()
order.paid = True
order.save()
messages.success(request, 'Transaction was successful', 'success')
return redirect('shop:home')
elif result.Status == 101:
return HttpResponse('Transaction submitted')
else:
return HttpResponse('Transaction failed.')
else:
return HttpResponse('Transaction failed or canceled by user')
|
994,832 | a24e3caafe7ee44d38a93f27de6edaa47a2a9ae7 | # Classes
# -> Making an object from a class is called instantiation, and you work with instances of a class.
# In this chapter you’ll write classes and create instances of those classes.
# You’ll specify the kind of information that can be stored in instances, and you’ll define actions that can be taken with these instances.
# You’ll also write classes that extend the functionality of existing classes, so similar classes can share code efficiently.
# You’ll store your classes in modules and import classes written by other programmers into your own program files.
# Creating the Dog Class
# -> Each instance created from the Dog class will store a name and an age, and we’ll give each dog the ability to sit() and roll_over()
class Dog:
def __init__(self, name, age):
self.name = name
self.age = age
def sit(self):
print(f"{self.name} is now sitting.")
def roll_over(self):
print(f"{self.name} rolled over!")
# -> At 13 we define a class called Dog.
# By convention, capitalized names refer to classes in Python.
# There are no parentheses in the class definition because we’re creating this class from scratch.
# The __init__() Method
# -> A function that’s part of a class is a method.
# Everything you learned about functions applies to methods as well; the only practical difference for now is the way we’ll call methods.
# The __init__() method at 14 is a special method that Python runs automatically whenever we create a new instance based on the Dog class.
# This method has two leading underscores and two trailing underscores, a convention that helps prevent Python’s default method names from conflicting with your method names.
# Make sure to use two underscores on each side of __init__().
# If you use just one on each side, the method won’t be called automatically when you use your class, which can result in errors that are difficult to identify.
# -> We define the __init__() method to have three parameters: self, name, and age. The self parameter is required in the method definition, and it must come first before the other parameters.
# It must be included in the definition because when Python calls this method later (to create an instance of Dog), the method call will automatically pass the self argument.
# Every method call associated with an instance automatically passes self, which is a reference to the instance itself; it gives the individual instance access to the attributes and methods in the class.
# When we make an instance of Dog, Python will call the __init__() method from the Dog class. We’ll pass Dog() a name and an age as arguments; self is passed automatically, so we don’t need to pass it.
# Whenever we want to make an instance from the Dog class, we’ll provide values for only the last two parameters, name and age.
# -> The two variables defined at 15 each have the prefix self.
# Any variable prefixed with self is available to every method in the class, and we’ll also be able to access these variables through any instance created from the class.
# The line self.name = name takes the value associated with the parameter name and assigns it to the variable name, which is then attached to the instance being created.
# The same process happens with self.age = age. Variables that are accessible through instances like this are called attributes.
# -> The Dog class has two other methods defined: sit() and roll_over() 19.
# Because these methods don’t need additional information to run, we just define them to have one parameter, self.
# The instances we create later will have access to these methods.
# In other words, they’ll be able to sit and roll over.
# For now, sit() and roll_over() don’t do much. They simply print a message saying the dog is sitting or rolling over.
# But the concept can be extended to realistic situations: if this class were part of an actual computer game, these methods would contain code to make an animated dog sit and roll over.
# If this class was written to control a robot, these methods would direct movements that cause a robotic dog to sit and roll over.
# Making an Instance from a Class
# -> Think of a class as a set of instructions for how to make an instance.
# The class Dog is a set of instructions that tells Python how to make individual instances representing specific dogs.
my_dog = Dog('Willie', 6)
print(f"My dog's name is {my_dog.name}.")
print(f"My dog is {my_dog.age} years old.")
# -> At 58 we tell Python to create a dog whose name is 'Willie' and whose age is 6.
# When Python reads this line, it calls the __init__() method in Dog with the arguments 'Willie' and 6.
# The __init__() method creates an instance representing this particular dog and sets the name and age attributes using the values we provided.
# Python then returns an instance representing this dog. We assign that instance to the variable my_dog.
# Accessing Attributes
# -> To access the attributes of an instance, you use dot notation. At ➋ we access the value of my_dog’s attribute name by writing:
# my_dog.name
# -> Dot notation is used often in Python.
# This syntax demonstrates how Python finds an attribute’s value.
# Here Python looks at the instance my_dog and then finds the attribute name associated with my_dog.
# This is the same attribute referred to as self.name in the class Dog. At 61 we use the same approach to work with the attribute age.
# Calling Methods
# -> After we create an instance from the class Dog, we can use dot notation to call any method defined in Dog. Let’s make our dog sit and roll over.
my_dog.sit()
my_dog.roll_over()
# -> To call a method, give the name of the instance (in this case, my_dog) and the method you want to call, separated by a dot.
# When Python reads my_dog.sit(), it looks for the method sit() in the class Dog and runs that code.
# Python interprets the line my_dog.roll_over() in the same way.
# Creating Multiple Instances
# -> You can create as many instances from a class as you need. Let’s create a second dog called your_dog.
my_dog = Dog('Willie', 6)
your_dog = Dog('Lucy', 3)
print(f"\nMy dog's name is {my_dog.name}.")
print(f"My dog is {my_dog.age} years old.")
my_dog.sit()
print(f"\nYour dog's name is {your_dog.name}.")
print(f"Your dog is {your_dog.age} years old.")
your_dog.sit()
# -> In this example we create a dog named Willie and a dog named Lucy.
# Each dog is a separate instance with its own set of attributes, capable of the same set of actions.
# -> Even if we used the same name and age for the second dog, Python would still create a separate instance from the Dog class.
# You can make as many instances from one class as you need, as long as you give each instance a unique variable name or it occupies a unique spot in a list or dictionary.
# Working with Classes and Instances
# -> You can use classes to represent many real-world situations.
# Once you write a class, you’ll spend most of your time working with instances created from that class.
# One of the first tasks you’ll want to do is modify the attributes associated with a particular instance.
# You can modify the attributes of an instance directly or write methods that update attributes in specific ways.
# -> Let’s write a new class representing a car.
# Our class will store information about the kind of car we’re working with, and it will have a method that summarizes this information.
class Car:
def __init__(self, make, model, year):
"""Initialize attributes to describe a car."""
self.make = make
self.model = model
self.year = year
def get_descriptive_name(self):
long_name = f"\n{self.year} {self.make} {self.model}"
return long_name.title()
my_new_car = Car('audi', 'a4', 2019)
print(my_new_car.get_descriptive_name())
# -> At 122 in the Car class, we define the __init__() method with the self parameter first, just like we did before with our Dog class.
# We also give it three other parameters: make, model, and year. The __init__() method takes in these parameters and assigns them to the attributes that will be associated with instances made from this class.
# When we make a new Car instance, we’ll need to specify a make, model, and year for our instance.
# At 127 we define a method called get_descriptive_name() that puts a car’s year, make, and model into one string neatly describing the car.
# This will spare us from having to print each attribute’s value individually.
# To work with the attribute values in this method, we use self.make, self.model, and self.year.
# At 130 we make an instance from the Car class and assign it to the variable my_new_car.
# Then we call get_descriptive_name() to show what kind of car we have.
# Setting a Default Value for an Attribute
# -> When an instance is created, attributes can be defined without being passed in as parameters.
# These attributes can be defined in the __init__() method, where they are assigned a default value.
# -> Let’s add an attribute called odometer_reading that always starts with a value of 0. We’ll also add a method read_odometer() that helps us read each car’s odometer.
class Car:
def __init__(self, make, model, year):
"""Initialize attributes to describe a car."""
self.make = make
self.model = model
self.year = year
self.odometer_reading = 0
def get_descriptive_name(self):
long_name = f"\n{self.year} {self.make} {self.model}"
return long_name.title()
def read_odometer(self):
print(f"This car has {self.odometer_reading} miles on it.")
my_new_car = Car('audi', 'a4', 2019)
print(my_new_car.get_descriptive_name())
my_new_car.read_odometer()
# -> This time when Python calls the __init__() method to create a new instance, it stores the make, model, and year values as attributes like it did in the previous example.
# Then Python creates a new attribute called odometer_reading and sets its initial value to 0.
# We also have a new method called read_odometer() that makes it easy to read a car’s mileage.
# Modifying Attribute Values
# -> You can change an attribute’s value in three ways: you can change the value directly through an instance, set the value through a method, or increment the value (add a certain amount to it) through a method. Let’s look at each of these approaches.
# Modifying an Attribute’s Value Directly
# -> The simplest way to modify the value of an attribute is to access the attribute directly through an instance. Here we set the odometer reading to 23 directly.
class Car:
def __init__(self, make, model, year):
"""Initialize attributes to describe a car."""
self.make = make
self.model = model
self.year = year
self.odometer_reading = 0
def get_descriptive_name(self):
long_name = f"\n{self.year} {self.make} {self.model}"
return long_name.title()
def read_odometer(self):
print(f"This car has {self.odometer_reading} miles on it.")
my_new_car = Car('audi', 'a4', 2019)
print(my_new_car.get_descriptive_name())
my_new_car.odometer_reading = 23
my_new_car.read_odometer()
# -> At 192 we use dot notation to access the car’s odometer_reading attribute and set its value directly.
# This line tells Python to take the instance my_new_car, find the attribute odometer_reading associated with it, and set the value of that attribute to 23.
# Modifying an Attribute’s Value Through a Method
# -> It can be helpful to have methods that update certain attributes for you.
# Instead of accessing the attribute directly, you pass the new value to a method that handles the updating internally.
class Car:
def __init__(self, make, model, year):
"""Initialize attributes to describe a car."""
self.make = make
self.model = model
self.year = year
self.odometer_reading = 0
def get_descriptive_name(self):
long_name = f"\n{self.year} {self.make} {self.model}"
return long_name.title()
def read_odometer(self):
print(f"This car has {self.odometer_reading} miles on it.")
def updated_odometer(self, milage):
self.odometer_reading = milage
my_new_car = Car('audi', 'a4', 2019)
print(my_new_car.get_descriptive_name())
my_new_car.updated_odometer(23)
my_new_car.read_odometer()
# -> The only modification to Car is the addition of update_odometer() at 215.
# This method takes in a mileage value and assigns it to self.odometer_reading. At 219 we call update_odometer() and give it 23 as an argument (corresponding to the mileage parameter in the method definition).
# It sets the odometer reading to 23, and read_odometer() prints the reading.
# -> We can extend the method update_odometer() to do additional work every time the odometer reading is modified.
# Let’s add a little logic to make sure no one tries to roll back the odometer reading.
class Car:
def __init__(self, make, model, year):
"""Initialize attributes to describe a car."""
self.make = make
self.model = model
self.year = year
self.odometer_reading = 0
def get_descriptive_name(self):
long_name = f"\n{self.year} {self.make} {self.model}"
return long_name.title()
def read_odometer(self):
print(f"This car has {self.odometer_reading} miles on it.")
def updated_odometer(self, milage):
if milage >= self.odometer_reading:
self.odometer_reading = milage
else:
print("You cant roll back an odometer!")
my_new_car = Car('audi', 'a4', 2019)
print(my_new_car.get_descriptive_name())
my_new_car.updated_odometer(23)
my_new_car.read_odometer()
# -> Now update_odometer() checks that the new reading makes sense before modifying the attribute. If the new mileage, mileage, is greater than or equal to the existing mileage, self.odometer_reading, you can update the odometer reading to the new mileage 242.
# If the new mileage is less than the existing mileage, you’ll get a warning that you can’t roll back an odometer 245.
# Incrementing an Attribute’s Value Through a Method
class Car:
def __init__(self, make, model, year):
"""Initialize attributes to describe a car."""
self.make = make
self.model = model
self.year = year
self.odometer_reading = 0
def get_descriptive_name(self):
long_name = f"\n{self.year} {self.make} {self.model}"
return long_name.title()
def read_odometer(self):
print(f"This car has {self.odometer_reading} miles on it.")
def updated_odometer(self, milage):
if milage >= self.odometer_reading:
self.odometer_reading = milage
else:
print("You cant roll back an odometer!")
def increment_odometer(self, miles):
self.odometer_reading += miles
my_new_car = Car('subaru', 'outback', 2015)
print(my_new_car.get_descriptive_name())
my_new_car.updated_odometer(23500)
my_new_car.read_odometer()
my_new_car.increment_odometer(100)
my_new_car.read_odometer()
# -> The new method increment_odometer() at 273 takes in a number of miles, and adds this value to self.odometer_reading.
# At 275 we create a used car, my_used_car.
# We set its odometer to 23,500 by calling update_odometer() and passing it 23_500 at 277.
# At 279 we call increment_odometer() and pass it 100 to add the 100 miles that we drove between buying the car and registering it. |
994,833 | 2b294832a25c98e0d21bcc45ec7cbe1143061b3f | import socket
import sys
import select
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect(('localhost',5285))
r,w,e = select.select([s],[],[])
msg = r[0].recv(64)
if not msg:
print 'server connection dropped'
|
994,834 | bed6e9964d4d2994925788be9b76958d5a84e968 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Nov 2 16:56:55 2017
@author: shivam
"""
import numpy as np
import os
from skimage.feature import hog
from skimage import color, transform
from skimage.io import imread,imshow
from sklearn import svm
from sklearn.externals import joblib
def getkey(n):
if n <= 10:
return chr(n + 47)
elif n <= 36:
return chr(n + 54)
else:
return chr(n + 60)
classifier = joblib.load(os.getcwd() + "/savedSVMs/svmfnt-30-10-17.pkl")
img = transform.resize(x,(128,128), mode='reflect')
feature, hog_img = hog(img, orientations=9, block_norm='L2-Hys', pixels_per_cell=(12, 12),
cells_per_block=(2, 2), visualise=True)
fd = feature.reshape(1,-1)
fd1 = feature.reshape(-1,1)
getkey(classifier.predict(fd)) |
994,835 | 8cc2205c9a20450571b5332d6988b0df319f17fd | import sys
import math
import os
import osfiles
import argparse
import molio
from org.nmrfx.structure.chemistry.energy import RNARotamer
from org.nmrfx.structure.chemistry.energy import Dihedral
from org.nmrfx.structure.rna import RNAAnalysis
def dumpDis(mol, fileName='distances.txt', delta=0.5, atomPat='*.H*',maxDis=4.5,prob=1.1,fixLower=0.0):
""" Writes a dump file containing distance violations based on input distance
constraints and actual distance between atoms.
# Parameters:
* fileName (string); name of the output dump file
* delta (float);
* atomPat (string);
* maxDis (float);
* prob (float);
* fixLower (float);
"""
mol.selectAtoms(atomPat)
pairs = mol.getDistancePairs(maxDis,False)
with open(fileName,'w') as fOut:
for pair in pairs:
if prob < 1.0:
r = random.random()
if r > prob:
continue
(atom1,atom2,distance) = pair.toString().split()
(res1,aname1) = atom1[2:].split('.')
(res2,aname2) = atom2[2:].split('.')
atom1 = res1+'.'+aname1
atom2 = res2+'.'+aname2
distance = float(distance)
if res1 != res2:
upper = distance + delta
if fixLower > 1.0:
lower = fixLower
else:
lower = distance - delta
outStr = "%s %s %.1f %.1f\n" % (atom1,atom2,lower,upper)
fOut.write(outStr)
def genResidueList(resString):
if resString == '':
return None
residues = None
if resString != '':
residues = []
elems = resString.split(',')
for elem in elems:
elem = elem.strip()
if elem[0] == '-':
sign = '-'
elem = elem[1:]
else:
sign = ''
if '-' in elem:
if '--' in elem:
r1,r2 = elem.split('--')
r1 = sign+r1
r2 = '-'+r2
else:
r1,r2 = elem.split('-')
r1 = sign+r1
r1 = int(r1)
r2 = int(r2)
for r in range(r1,r2+1):
residues.append(str(r))
else:
residues.append(sign+elem)
return set(residues)
def rnaDotBracket(mol, fileName='stdout'):
rnaResidues = [residue for polymer in mol.getPolymers() if polymer.isRNA() for residue in polymer.getResidues()]
vienna = RNAAnalysis.getViennaSequence(mol)
dotBracketDict = {}
for rnaResidue,dotBracket in zip(rnaResidues, vienna):
dotBracketDict[rnaResidue.toString()]=str(dotBracket)
vienna = ''.join(vienna)
return vienna,dotBracketDict
def sequence(mol, fileName='stdout'):
for polymer in mol.getPolymers():
type = polymer.getPolymerType()
print polymer.getName(), type
for residue in polymer.getResidues():
print residue
#print residue.toString()()
def getRNAPairs(mol):
pairs = RNAAnalysis.getPairList(mol, -1)
rnaPairDict = {}
for pair in pairs:
resA = pair.getResA()
resB = pair.getResB()
type = pair.getType()
resAStr = resA.toString()
resBStr = resB.toString()
resAName = resA.getName()
resBName = resB.getName()
rnaPairDict[resAStr] = (resBStr,type,resAName+resBName)
rnaPairDict[resBStr] = (resAStr,type,resBName+resAName)
return rnaPairDict
def rnaSuite(mol, includeResidues, fileName='stdout'):
if fileName == 'stdout':
fOut = sys.stdout
else:
fOut = open(fileName,'w')
rnaDotBracketDict = None
for polymer in mol.getPolymers():
if polymer.isRNA():
if rnaDotBracketDict == None:
vienna, rnaDotBracketDict = rnaDotBracket(mol)
rnaPairDict = getRNAPairs(mol)
polyName = polymer.getName()
for residue in polymer.getResidues():
if includeResidues != None:
if residue.getNumber() not in includeResidues:
continue
chi = residue.calcChi()
if chi != None:
chi = chi * 180.0 / math.pi
chi = "%6.1f" % (chi)
else:
chi = "na"
nu2 = residue.calcNu2()
nu3 = residue.calcNu3()
if nu2 != None and nu3 != None:
pseudo = Dihedral.calcPseudoAngle(nu2, nu3)
pseudoAngle = pseudo[0]*180.0/math.pi
if pseudoAngle > 180.0:
pseudoAngle = pseudoAngle - 360.0
pucker = pseudo[1]*180.0/math.pi
pseudoAngle = "%6.1f" % (pseudoAngle)
else:
pseudoAngle = "na"
resID = residue.toString()
dotBracket = rnaDotBracketDict[resID]
resPair = '_'
type = 0
pair = '_'
if resID in rnaPairDict:
resPair,type,pair = rnaPairDict[resID]
rotamerScore = RNARotamer.scoreResidue(residue)
if rotamerScore != None:
outStr = "%8s %2s %2s %2d %8s %102s %6s %6s\n" %(resID,dotBracket,pair,type,resPair, rotamerScore.report(), pseudoAngle, chi)
fOut.write(outStr)
else:
outStr = "%8s %2s %2s %2d %8s\n" %(resID,dotBracket,pair, type, resPair)
fOut.write(outStr)
if fileName != 'stdout':
fOut.close()
def loadStructure(fileName, xMode=False, iStruct=0):
if fileName.endswith('.pdb'):
if (xMode):
mol = molio.readPDBX(fileName)
molio.readPDBXCoords(fileName, -1, True, False)
else:
mol = molio.readPDB(fileName, iStruct=iStruct)
elif fileName.endswith('.cif'):
mol = molio.readMMCIF(fileName)
elif fileName.endswith('.sdf'):
cmpd = molio.readSDF(fileName)
mol = cmpd.molecule
elif fileName.endswith('.mol'):
cmpd = molio.readSDF(fileName)
mol = cmpd.molecule
else:
print 'Invalid file type'
exit(1)
return mol
def parseArgs():
parser = argparse.ArgumentParser(description="predictor options")
parser.add_argument("-x", dest="xMode", default=False, action="store_true", help="Whether to read without library(False")
parser.add_argument("-dis", dest="disMode", default=False, action="store_true", help="Whether to output distances(False")
parser.add_argument("-suite", dest="suiteMode", default=False, action="store_true", help="Whether to output RNA suites(False")
parser.add_argument("-dot", dest="dotMode", default=False, action="store_true", help="Whether to output RNA dot-bracket (Vienna) sequence(False")
parser.add_argument("-seq", dest="seqMode", default=False, action="store_true", help="Whether to output sequence(False")
parser.add_argument("-residues", dest="includeResidues", default='', help="Limit residues to these (use all)")
parser.add_argument("fileNames",nargs="*")
args = parser.parse_args()
includeResidues = genResidueList(args.includeResidues)
for fileName in args.fileNames:
mol = loadStructure(fileName, args.xMode)
if args.disMode:
dumpDis(mol)
if args.suiteMode:
rnaSuite(mol, includeResidues)
getRNAPairs(mol)
if args.dotMode:
vienna, rnaDotBracketDict = rnaDotBracket(mol)
print vienna
if args.seqMode:
sequence(mol)
|
994,836 | 9cc769de93bb4407dfe4d6752bb36e377bd32b1f | import cv2
img = cv2.imread('RAA_dc.png')
# print(type(img))
cv2.imshow('', img)
cv2.waitKey(0) #& 0xFF
# if key==27:
cv2.destroyAllWindows() |
994,837 | 6a58edac3691138c2f915bb7d9c189e4d8facca3 | def maxim(arr):
return max(arr)
print(maxim([4,1,-5,0,4,8,2,1])) |
994,838 | 312a513f3c368fc69f8af017c60cb52edd6e730c | import pokesql.builder
import pokeapi.provider
provider = pokeapi.provider.PokeapiProvider()
type_max = 17
print("Conecting to database...")
builder = pokesql.builder.PokeSQLBuilder("localhost", "root", "yourpassword", "pokemon")
print("Database connected !")
print("Start building database...")
builder.build_data_base()
print("Database built !")
print("Populating type table and references...")
types = provider.get_types()
builder.populate_type(types)
print("Populating ability table...")
abilities = provider.get_abilities()
builder.populate_ability(abilities)
print("Populating egg_group table...")
egg_groups = provider.get_egg_groups()
builder.populate_egg_groups(egg_groups)
print("Populating stats table...")
stats = provider.get_stats()
builder.populate_stats(stats)
print("Populating pokemon table...")
pokemons = provider.get_pokemon()
builder.populate_pokemon(pokemons)
builder.populate_pokemon_type(pokemons, types)
builder.populate_pokemon_stat(pokemons, stats) |
994,839 | ab18c38536301feaf0139e375103e5fa89f2b23f |
from xai.brain.wordbase.nouns._instalment import _INSTALMENT
#calss header
class _INSTALMENTS(_INSTALMENT, ):
def __init__(self,):
_INSTALMENT.__init__(self)
self.name = "INSTALMENTS"
self.specie = 'nouns'
self.basic = "instalment"
self.jsondata = {}
|
994,840 | de5861761f9fde47fde6cf7ba89cbdf249256709 | """
Given an integer n, return the number of trailing zeroes in n!.
Note: Your solution should be in logarithmic time complexity.
"""
class Solution:
# @param {integer} n
# @return {integer}
def trailingZeroes(self, n):
x = 5
ans = 0
while n >= x:
ans += n / x
x *= 5
return ans
|
994,841 | 8d25e7c621e97d3b9c784d80d7a55ffb0ecec9b7 | #!/usr/bin/python3
import os
import sys
data_dir = "../data/wikipedia/"
languages = ["fr", "ko"]
models = ["lstm"]
def read_wordlist(path):
try:
infile = open(path,"r")
result = {line.split(" ")[0] for line in infile}
infile.close()
return result
except OSError:
print("Failed to open for wordlist creation",path)
def create_examples(language,model):
generated_path = data_dir + language + "/" + model + "/generated_wordlist.txt"
huge_path = data_dir + language + "/huge_wordlist.txt"
input_path = data_dir + language + "/input_wordlist.txt"
huge_words = read_wordlist(huge_path)
input_words = read_wordlist(input_path)
generated_file = open(generated_path, "r")
huge_file = open(data_dir + language + "/" + model + "/generated_hugewords.txt", "w")
input_file = open(data_dir + language + "/" + model + "/generated_inputwords.txt", "w")
new_file = open(data_dir + language + "/" + model + "/generated_newwords.txt", "w")
gendict = {}
for line in generated_file:
word = line.split(" ")[0]
frequency = int(line.split(" ")[1])
gendict[word] = frequency
generated_file.close()
for word,freq in sorted(gendict.items(), key=lambda x: x[1], reverse=True):
if word in input_words:
input_file.write(word + " " + str(freq) + "\n")
elif word in huge_words:
huge_file.write(word + " " + str(freq) + "\n")
else:
new_file.write(word + " " + str(freq) + "\n")
huge_file.close()
input_file.close()
new_file.close()
for language in languages:
for model in models:
create_examples(language,model)
|
994,842 | dd68d8e5b8bf3f41411813979579d490caa24618 | import pandas as pd
from sklearn import metrics, preprocessing
from sklearn.model_selection import train_test_split, GridSearchCV
import numpy as np
from numpy import genfromtxt
from classifiers import *
import matplotlib.pyplot as plt
import time
def main():
'''
loads dataset and learns 3 models. chooses best model and hyperpatrameters
with grid search and cross validation.
calculates accuracy of selected model on test set.
'''
np.random.seed(123456)
digits, label = get_data(False)
X_train, Y_train, X_test, Y_test = split(digits, label)
Y_train = Y_train.reshape(-1,1)
Y_test = Y_test.reshape(-1,1)
X_train = preprocessing.normalize(X_train)
X_test = preprocessing.normalize(X_test)
#X_train = preprocessing.scale(X_train)
#X_test = preprocessing.scale(X_test)
# train and test different classifiers
classifiers = [MLP_classifier(),randomforest(), support_vector()]
best_clf = None
best_score = 0
start = time.time()
for clf in classifiers:
print('fits classifier...', type(clf).__name__)
clf.fit(X_train, Y_train)
score = clf.get_grid_search().best_score_
print('score:', score)
if score>best_score:
best_clf = clf
print(f'best params{clf.get_grid_search().best_params_}')
totaltime = time.time() - start
print('fitting finished in ', totaltime, 'seconds')
model_stats(clf.get_grid_search(), X_test, Y_test)
print('best classifier was ', best_clf)
#perform prediction with best classifier
start = time.time()
pred_test = best_clf.get_grid_search().predict(X_test)
pred_score = get_score(pred_test, Y_test)
totaltime = time.time() - start
print('predicted test score: ', pred_score)
print('prediction took ', totaltime, 'seconds')
#plot the confusion matrix for the best classifier
#model_stats(best_clf.get_grid_search(), X_test, Y_test)
def get_data(full_dataset):
'''read files and return either full dataset or a smaller for testing.
can also create smaller files'''
if full_dataset:
digits = get_feature('../data/handwritten_digits_images.csv')
label = get_label('../data/handwritten_digits_labels.csv')
#create_smaller_file(digits, label)
else:
#read given file
digits = get_feature('../data/digit_smaller.csv')
label = get_feature('../data/label_smaller.csv')
return digits, label
def create_smaller_file(X, y):
'''create and save a smaller file for testing'''
seed = 33
X_keep, X_throw, y_keep, y_throw = train_test_split(X, y,
test_size=0.80,
shuffle=True,
random_state=seed)
np.savetxt('../data/digit_smaller.csv',X_keep , delimiter=',', fmt='%f')
np.savetxt('../data/label_smaller.csv',y_keep , delimiter='\n', fmt='%f')
def split(digits, label):
'''split dataset into test, train, val'''
seed = 33
X_train, X_test, Y_train, Y_test = train_test_split(digits, label,
test_size=0.3,
shuffle=True,
random_state=seed)
return X_train, Y_train, X_test, Y_test
def get_score(predict, actual):
'''calcuate percentage of correct predictions'''
correct, wrong = 0,0
for x, y in enumerate(actual):
if y == predict[x]:
correct+=1
else:
wrong+=1
return correct/(correct+wrong)
def plot(digits, label, predict):
'''Plot a single image and print the label to console'''
img = digits.reshape(digits.shape[0], 28, 28)
plt.imshow(img[predict], cmap="Greys")
print(label[predict])
plt.show()
def model_stats(clf, X_test, Y_test):
'''plots confusion matrix'''
disp = metrics.plot_confusion_matrix(clf, X_test, Y_test, normalize='true')
disp.figure_.suptitle(f"Confusion Matrix for classifier:{clf}")
print("Confusion matrix:\n%s" % disp.confusion_matrix)
plt.show()
def get_feature(fileName):
return genfromtxt(fileName, delimiter=',')
def get_label(fileName):
return genfromtxt(fileName, delimiter='\n')
if __name__ == "__main__":
main() |
994,843 | 6eef084eb2b3417c6f59571bc50204088147822c | """
Intuition
Return the sum of all positive differences for count of alphabets from String t in String s.
"""
class Solution:
def solve(self, s, t):
c1 = Counter(s)
c2 = Counter(t)
ct = 0
for i, val in c2.items():
ct += max(val - c1.get(i, 0), 0)
return ct |
994,844 | 3e46baeee6176b2bafa4e3833ec976ec7ede21dc | from django.db import models
from django.contrib.auth.models import User
LEVEL_CHOICES = (
('1', 'Easy'),
('2', 'Medium'),
('3', 'Hard'),
)
class Set(models.Model):
title = models.CharField(max_length=200)
level = models.CharField(max_length=20, choices=LEVEL_CHOICES)
creator = models.ForeignKey(User, related_name="%(app_label)s_%(class)s_related")
create_dt = models.DateTimeField(auto_now=False, auto_now_add=True)
update_dt = models.DateTimeField(auto_now=True, auto_now_add=True)
def get_cards(self):
cards = self.set_cardset.all()
return cards
@models.permalink
def get_absolute_url(self):
return ('conf.urls.set_detail', [set_id])
def __unicode__(self):
return self.title
class Card(models.Model):
question = models.TextField(max_length=500)
answer = models.TextField(max_length=500)
known = models.BooleanField(default=False)
set = models.ForeignKey(Set)
creator = models.ForeignKey(User, related_name="%(app_label)s_%(class)s_related")
create_dt = models.DateTimeField(auto_now=False, auto_now_add=True, )
update_dt = models.DateTimeField(auto_now=True, auto_now_add=True)
def __unicode__(self):
return self.question
@models.permalink
def get_absolute_url(self):
return ('conf.urls.card_detail', [set_id, card_id])
|
994,845 | 408b6a8fef4cf67be56adbd2f140f8b0a6e4dd2e | import json
from pathlib import Path
from configuration.config import data_dir
def remove_duplication():
# res_path = (Path(data_dir)/'result.json').open('w')
for l in (Path(data_dir)/'submission_object.json').open():
l = json.loads(l)
mention_data = l['mention_data']
for m in mention_data:
m.update({'end_idx':int(m['offset'])+len(m['mention'])})
new_mention_data = mention_data.copy()
for m1 in mention_data:
for m2 in mention_data:
if m1['offset'] == m2['offset'] and m1['end_idx'] != m2['end_idx'] and len(m1['mention']) > len(m2['mention']) and m2 in new_mention_data:
print(f'**{m2} ---- {m1}')
new_mention_data.remove(m2)
if m1['offset'] != m2['offset'] and m1['end_idx'] == m2['end_idx'] and len(m1['mention']) > len(m2['mention']) and m2 in new_mention_data:
print(f'**{m2} ---- {m1}')
new_mention_data.remove(m2)
for m in new_mention_data:
del m['end_idx']
l['mention_data'] = [m for m in new_mention_data if m['kb_id']!='NIL']
# res_path.write(json.dumps(l, ensure_ascii=False) + '\n')
def remove_nil():
res_path = (Path(data_dir) / 'result_.json').open('w')
for l in (Path(data_dir) / 'result.json').open():
l = json.loads(l)
l['mention_data'] = [m for m in l['mention_data'] if m['kb_id'] != 'NIL']
res_path.write(json.dumps(l, ensure_ascii=False) + '\n')
if __name__ == '__main__':
remove_duplication()
|
994,846 | 6bfdf4338426c036445b8953c8b0b9ac1866aca1 | # Time Limit Exceeded
# probably have bug
class Solution:
def numDistinct(self, s: str, t: str) -> int:
self.result = 0
self.arrange_combine(s, t, "")
return self.result
def arrange_combine(self, s, t, curr):
if curr == t:
self.result += 1
if s == "":
return
else:
for i, c in enumerate(s):
self.arrange_combine(s[i + 1:], t, curr + c)
# Runtime: 144 ms, faster than 64.79% of Python3 online submissions for Distinct Subsequences.
# Memory Usage: 17.9 MB, less than 57.14% of Python3 online submissions for Distinct Subsequences.
class Solution:
def numDistinct(self, s: str, t: str) -> int:
dp = [[0] * (len(s) + 1) for _ in range(len(t) + 1)]
for j in range(len(s) + 1):
dp[0][j] = 1
for i in range(1, len(t) + 1):
for j in range(1, len(s) + 1):
if t[i - 1] == s[j - 1]:
dp[i][j] = dp[i][j - 1] + dp[i - 1][j - 1]
else:
dp[i][j] = dp[i][j - 1]
return dp[-1][-1]
# Runtime: 112 ms, faster than 82.50% of Python3 online submissions for Distinct Subsequences.
# Memory Usage: 14.1 MB, less than 57.14% of Python3 online submissions for Distinct
class Solution:
def numDistinct(self, s: str, t: str) -> int:
dp = [1] * (len(s) + 1)
for i in range(1, len(t) + 1):
temp = [0] * (len(s) + 1)
for j in range(1, len(s) + 1):
if t[i - 1] == s[j - 1]:
temp[j] = temp[j - 1] + dp[j - 1]
else:
temp[j] = temp[j - 1]
dp = temp
return dp[-1]
|
994,847 | 5e4858b3e56ab054c282e23ec9fa2c8ce73e952a | import csv
import sys
import logfile
from os.path import exists
import datetime
from dateutil import parser
from pprint import pprint
IT_REQUESTS = 0
IT_TIME = 1
IT_SIZE = 2
IT_TIME_MAX = 3
IT_SIZE_MAX = 4
def check_last_state(log_file, config, options):
state = config.get('state')
if state:
log_file.seek(0, 2)
file_size = log_file.tell()
last_position_fname = state[0]
if not exists(last_position_fname):
with open(last_position_fname, 'wt') as last_run_file:
current_time = datetime.datetime.now()
last_run_file.write("%s %s\n" % (str(file_size), current_time.isoformat()))
if not options.quiet:
print "Created initial file position"
sys.exit(0)
else:
with open(last_position_fname, 'rt') as last_run_file:
last_postion, last_time = last_run_file.readline().split()
last_postion = int(last_postion)
last_time = parser.parse(last_time)
if file_size == last_postion:
if not options.quiet:
print "File size did not change, nothing to parse."
sys.exit(0)
if file_size < last_postion: # File was truncated, restart from beginning
if not options.quiet:
print "File was truncated, restarting from beginning of file"
log_file.seek(0)
else:
log_file.seek(last_postion) # Go to last position
return last_time
def summarize_log_data(log_file, config, options, skip_last_state):
# Returns a list of elements with the following structure:
# ( hour, [vhost_name, [requests, time, size, time_max, size_max]] )
state = config.get('state')
current_time = last_time = datetime.datetime.now()
if not skip_last_state:
last_time = check_last_state(log_file, config, options)
elapsed_seconds = (current_time - last_time).total_seconds()
parsed_lines = 0
aggregated_data = {}
# Setup format logging
format_str = config.get('format', mandatory=True)[0]
logfile.set_log_fmt(format_str)
# Setup replacements
replacements = config.regex_map('replacements')
while True:
line = log_file.readline()
if not line: # EOF
break
line = line.strip('\n')
line_dict = logfile.logline2dict(line)
if not line_dict:
if not options.quiet:
print "=== Invalid line:\n" + line
continue
line_dict = replacements.apply_to(line_dict)
if not line_dict:
# Skipping line excluded by replacement rules
if not options.quiet:
print "*** Excluded line:\n" + line
continue
parsed_lines += 1
aggregation_key = tuple([line_dict[group_name] for group_name in config.get('group_by')])
aggregated_row = aggregated_data.get(aggregation_key)
# Create new key if needed
if not aggregated_row:
aggregated_data[aggregation_key] = aggregated_row = [0] * 5
aggregated_row[IT_REQUESTS] += 1
time_taken = line_dict['msec']
response_size = line_dict['size']
aggregated_row[IT_TIME] += time_taken
aggregated_row[IT_SIZE] += response_size
aggregated_row[IT_TIME_MAX] = max(time_taken, aggregated_row[IT_TIME_MAX])
aggregated_row[IT_SIZE_MAX] = max(response_size, aggregated_row[IT_SIZE_MAX])
# Calculate results
if options.results:
resuls_aggregated_data = {}
for key, row in aggregated_data.iteritems():
row[IT_TIME] = row[IT_TIME] / row[IT_REQUESTS]
row[IT_SIZE] = row[IT_SIZE] / row[IT_REQUESTS]
row[IT_REQUESTS] = int(row[IT_REQUESTS] / elapsed_seconds * 100) / 100.0
# Sort data by aggregation key
aggregated_data = sorted(aggregated_data.iteritems())
if not skip_last_state and state:
last_position_fname = state[0]
file_size = log_file.tell()
with open(last_position_fname, 'wt') as last_run_file:
current_time = datetime.datetime.now()
last_run_file.write("%s %s\n" % (str(file_size), current_time.isoformat()))
if not options.quiet:
print "Processed", parsed_lines
return aggregated_data
def print_results(aggregated_data, options):
csvwriter = csv.writer(sys.stdout, delimiter=',',
quotechar='"', quoting=csv.QUOTE_MINIMAL)
aggregation_keys = aggregated_data[0][0]
keys = ['key_' + str(i) for i in range(len(aggregation_keys))]
if options.results:
headers = keys + ['rps', 'avg (ms)', 'size', 'taken_max', 'size_max']
else:
headers = keys + ['requests', 'taken (ms)', 'size', 'taken_max', 'size_max']
if not options.quiet:
csvwriter.writerow(headers)
for aggregation_keys, aggregated_row in aggregated_data:
csvwriter.writerow(list(aggregation_keys) + aggregated_row)
|
994,848 | a9616f07c1e6c7fb1ef9f7dd0f5491f93bc974d8 | """
(C) Copyright 2021 IBM Corp.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Created on June 30, 2021
"""
import pathlib
import sys
import os
from typing import List, Tuple, Union, Optional
from collections import OrderedDict
import csv
import pandas as pd
import numpy as np
from fuse.utils.ndict import NDict
from fuse.utils.file_io.file_io import create_dir
from fuse.eval.evaluator import EvaluatorDefault
from fuse.eval.metrics.classification.metrics_classification_common import (
MetricAUCROC,
MetricROCCurve,
)
from fuse.eval.metrics.metrics_common import CI
from functools import partial
## Constants
# Constants that defines the expected format of the prediction and target files and list the classes for task 1 and task @
EXPECTED_TASK1_PRED_KEYS = {"case_id", "NoAT-score", "CanAT-score"}
EXPECTED_TASK2_PRED_KEYS = {
"case_id",
"B-score",
"LR-score",
"IR-score",
"HR-score",
"VHR-score",
}
EXPECTED_TARGET_KEYS = {"case_id", "Task1-target", "Task2-target"}
PRED_CASE_ID_NAME = "case_id"
TARGET_CASE_ID_NAME = "case_id"
TASK1_CLASS_NAMES = ("NoAT", "CanAT") # must be aligned with task1 targets
TASK2_CLASS_NAMES = ("B", "LR", "IR", "HR", "VHR") # must be aligned with task2 targets
def post_processing(sample_dict: NDict, task1: bool = True, task2: bool = True) -> dict:
"""
post caching processing. Will group together to an array the per class scores and verify it sums up to 1.0
:param sample_dict: a dictionary that contais all the extracted values of a single sample
:param task1: if true will evaluate task1
:param task2: if true will evaluate task2
:return: a modified/alternative dictionary
"""
# verify sample
expected_keys = [f"target.{key}" for key in EXPECTED_TARGET_KEYS]
if task1:
expected_keys += [f"task1_pred.{key}" for key in EXPECTED_TASK1_PRED_KEYS]
if task2:
expected_keys += [f"task2_pred.{key}" for key in EXPECTED_TASK2_PRED_KEYS]
set(expected_keys).issubset(set(sample_dict.keypaths()))
# convert scores to numpy array
# task 1
if task1:
task1_pred = []
for cls_name in TASK1_CLASS_NAMES:
task1_pred.append(sample_dict[f"task1_pred.{cls_name}-score"])
task1_pred_array = np.array(task1_pred)
if not np.isclose(task1_pred_array.sum(), 1.0, rtol=0.05):
print(
f"Warning: expecting task 1 prediction for case {sample_dict['descriptor']} to sum up to almost 1.0, got {task1_pred_array}"
)
sample_dict["task1_pred.array"] = task1_pred_array
# task 2
if task2:
task2_pred = []
for cls_name in TASK2_CLASS_NAMES:
task2_pred.append(sample_dict[f"task2_pred.{cls_name}-score"])
task2_pred_array = np.array(task2_pred)
if not np.isclose(task2_pred_array.sum(), 1.0, rtol=0.05):
print(
f"Error: expecting task 2 prediction for case {sample_dict['descriptor']} to sum up to almost 1.0, got {task2_pred_array}"
)
sample_dict["task2_pred.array"] = task2_pred_array
return sample_dict
def decode_results(
results: NDict, output_dir: str, task1: bool, task2: bool
) -> Tuple[OrderedDict, str]:
"""
Gets the results computed by the dictionary and summarize it in a markdown text and dictionary.
The dictionary will be saved in <output_dir>/results.csv and the markdown text in <output_dir>/results.md
:param results: the results computed by the metrics
:param output_dir: path to an output directory
:param task1: if true will evaluate task1
:param task2: if true will evaluate task2
:return: ordered dict summarizing the results and markdown text
"""
results = NDict(results["metrics"])
results_table = OrderedDict()
# Table
## task1
if task1:
results_table["Task1-AUC"] = f"{results['task1_auc.macro_avg.org']:.3f}"
results_table[
"Task1-AUC-CI"
] = f"[{results['task1_auc.macro_avg.conf_lower']:.3f}-{results['task1_auc.macro_avg.conf_upper']:.3f}]"
## task2
if task2:
results_table["Task2-AUC"] = f"{results['task2_auc.macro_avg.org']:.3f}"
results_table[
"Task2-AUC-CI"
] = f"[{results['task2_auc.macro_avg.conf_lower']:.3f}-{results['task2_auc.macro_avg.conf_upper']:.3f}]"
for cls_name in TASK2_CLASS_NAMES:
results_table[
f"Task2-AUC-{cls_name}VsRest"
] = f"{results[f'task2_auc.{cls_name}.org']:.3f}"
results_table[
f"Task2-AUC-{cls_name}VsRest-CI"
] = f"[{results[f'task2_auc.{cls_name}.conf_lower']:.3f}-{results[f'task2_auc.{cls_name}.conf_upper']:.3f}]"
# mark down text
results_text = ""
## task 1
if task1:
results_text += "# Task 1 - adjuvant treatment candidacy classification\n"
results_text += (
f"AUC: {results_table['Task1-AUC']} {results_table['Task1-AUC-CI']}\n"
)
results_text += "## ROC Curve\n"
results_text += (
'<br/>\n<img src="task1_roc.png" alt="drawing" width="40%"/>\n<br/>\n'
)
## task 2
if task2:
results_text += "\n# Task 2 - risk categories classification\n"
results_text += (
f"AUC: {results_table['Task2-AUC']} {results_table['Task2-AUC-CI']}\n"
)
results_text += "## Multi-Class AUC\n"
table_columns = ["AUC"] + [
f"AUC-{cls_name}VsRest" for cls_name in TASK2_CLASS_NAMES
]
results_text += "\n|"
results_text += "".join([f" {column} |" for column in table_columns])
results_text += "\n|"
results_text += "".join([" ------ |" for column in table_columns])
results_text += "\n|"
results_text += "".join(
[
f" {results_table[f'Task2-{column}']} {results_table[f'Task2-{column}-CI']} |"
for column in table_columns
]
)
results_text += "\n## ROC Curve\n"
results_text += (
'<br/>\n<img src="task2_roc.png" alt="drawing" width="40%"/>\n<br/>\n'
)
# save files
with open(os.path.join(output_dir, "results.md"), "w") as output_file:
output_file.write(results_text)
with open(os.path.join(output_dir, "results.csv"), "w") as output_file:
w = csv.writer(output_file)
w.writerow(results_table.keys())
w.writerow(results_table.values())
return results_table, results_text
def eval(
task1_prediction_filename: str,
task2_prediction_filename: str,
target_filename: str,
output_dir: str,
case_ids_source: Optional[Union[str, List[str]]] = "target",
) -> Tuple[OrderedDict, str]:
"""
Load the prediction and target files, evaluate the predictions and save the results into files.
:param task1_prediction_filename: path to a prediction csv file for task1. Expecting the columns listed in EXPECTED_TASK1_PRED_KEYS to exist (including the header).
if set to "" - the script will not evaluate task1
:param task2_prediction_filename: path to a prediction csv file for task2. Expecting the columns listed in EXPECTED_TASK2_PRED_KEYS to exist (including the header)
if set to "" - the script will not evaluate task2
:param target_filename: path to a prediction csv file. Expecting the columns listed in TARGET_CASE_ID_NAME to exist
:param output_dir: path to directory to save the output files
:param case_ids_source: will run the evaluation on the specified list of case ids. Supported values:
* "task1_pred" to evaluate all the samples/cases specified in task1 prediction file.
* "task2_pred" to evaluate all the samples/cases specified in task2 prediction file.
* "target" to evaluate all the samples/cases specified in targets file.
* list to define the samples explicitly
:return: ordered dict summarizing the results and markdown text
"""
create_dir(output_dir)
# eval task1, task2 or both
task1 = task1_prediction_filename is not None and task1_prediction_filename != ""
task2 = task2_prediction_filename is not None and task2_prediction_filename != ""
if case_ids_source is None:
if task1:
case_ids_source = "task1_pred"
else:
case_ids_source = "task2_pred"
dataframes_dict = {}
metrics = {}
post_proc = partial(post_processing, task1=task1, task2=task2)
# task 1
if task1:
# metrics to evaluate
metrics.update(
{
"task1_auc": CI(
MetricAUCROC(
pred="task1_pred.array",
target="target.Task1-target",
class_names=TASK1_CLASS_NAMES,
pre_collect_process_func=post_proc,
),
stratum="target.Task1-target",
),
"task1_roc_curve": MetricROCCurve(
pred="task1_pred.array",
target="target.Task1-target",
class_names=[None, ""],
pre_collect_process_func=post_proc,
output_filename=os.path.join(output_dir, "task1_roc.png"),
),
}
)
# read files
task1_pred_df = pd.read_csv(
task1_prediction_filename, dtype={PRED_CASE_ID_NAME: object}
)
# verify input
assert set(task1_pred_df.keys()).issubset(
EXPECTED_TASK1_PRED_KEYS
), f"Expecting task1 prediction file {os.path.abspath(task1_prediction_filename)} to include also the following keys: {EXPECTED_TASK1_PRED_KEYS - set(task1_pred_df.keys())}"
task1_pred_df["id"] = task1_pred_df[PRED_CASE_ID_NAME]
dataframes_dict["task1_pred"] = task1_pred_df
# task 2
if task2:
# metrics to evaluate
metrics.update(
{
"task2_auc": CI(
MetricAUCROC(
pred="task2_pred.array",
target="target.Task2-target",
class_names=TASK2_CLASS_NAMES,
pre_collect_process_func=post_proc,
),
stratum="target.Task2-target",
),
"task2_roc_curve": MetricROCCurve(
pred="task2_pred.array",
target="target.Task2-target",
class_names=TASK2_CLASS_NAMES,
output_filename=os.path.join(output_dir, "task2_roc.png"),
pre_collect_process_func=post_proc,
),
}
)
# read files
task2_pred_df = pd.read_csv(
task2_prediction_filename, dtype={PRED_CASE_ID_NAME: object}
)
# verify input
assert set(task2_pred_df.keys()).issubset(
EXPECTED_TASK2_PRED_KEYS
), f"Expecting task2 prediction file {os.path.abspath(task2_prediction_filename)} to include also the following keys: {EXPECTED_TASK2_PRED_KEYS - set(task2_pred_df.keys())}"
task2_pred_df["id"] = task2_pred_df[PRED_CASE_ID_NAME]
dataframes_dict["task2_pred"] = task2_pred_df
# read files
target_df = pd.read_csv(target_filename, dtype={TARGET_CASE_ID_NAME: object})
# verify input
assert set(target_df.keys()).issubset(
EXPECTED_TARGET_KEYS
), f"Expecting target file {os.path.abspath(target_filename)} to include also the following keys: {EXPECTED_TARGET_KEYS - set(target_df.keys())}"
target_df["id"] = target_df[TARGET_CASE_ID_NAME]
dataframes_dict["target"] = target_df
# analyze
evaluator = EvaluatorDefault()
results = evaluator.eval(
ids=list(dataframes_dict[case_ids_source]["id"]),
data=dataframes_dict,
metrics=metrics,
output_dir=None,
)
# output
return decode_results(results, output_dir=output_dir, task1=task1, task2=task2)
if __name__ == "__main__":
"""
Run evaluation:
Usage: python eval.py <target_filename> <task1 prediction_filename> <task2 prediction_filename> <output dir>
See details in function eval()
Run dummy example (set the working dir to fuse_examples/imaging/classification/knight/eval): python eval.py example/example_targets.csv example/example_task1_predictions.csv example/example_task2_predictions.csv example/results
"""
if len(sys.argv) == 1:
dir_path = pathlib.Path(__file__).parent.resolve()
# no arguments - set arguments inline - see details in function eval()
target_filename = os.path.join(dir_path, "example/example_targets.csv")
task1_prediction_filename = os.path.join(
dir_path, "example/example_task1_predictions.csv"
)
task2_prediction_filename = os.path.join(
dir_path, "example/example_task2_predictions.csv"
)
output_dir = "example/result"
else:
# get arguments from sys.argv
assert (
len(sys.argv) == 5
), f"Error: expecting 4 input arguments, but got {len(sys.argv)-1}. Usage: python eval.py <target_filename> <task1_prediction_filename> <task2_prediction_filename> <output_dir>. See details in function eval()"
target_filename = sys.argv[1]
task1_prediction_filename = sys.argv[2]
task2_prediction_filename = sys.argv[3]
output_dir = sys.argv[4]
eval(
target_filename=target_filename,
task1_prediction_filename=task1_prediction_filename,
task2_prediction_filename=task2_prediction_filename,
output_dir=output_dir,
)
|
994,849 | 62d7fc36559b65d707677d5fc47849ce676eb0f0 | def main():
print ("PYTHON GUESSING GAME")
main()
answer = "dog"
guess = ""
while not guess == answer:
print("I'm thinking of an animal")
guess = input("What animal am I thinking about?")
guess = guess.lower()
if guess == answer:
choice = input("Do you like the animal? Y for yes and N for no.")
if choice == "Y":
print ("I love them too.")
if choice == "N":
print("I don't like them either.")
break
elif guess[0] == "q":
break
else:
print ("Wrong! Try Again")
|
994,850 | d4606ab34391223ba8c46acac9fe969532b44b99 | #!/usr/bin/python3
"""
Makes sure bluetooth service is not running while suspended.
"""
import sys
import subprocess as sp
from pathlib import Path
import logging
logging.basicConfig()
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
BT_STATE_PATH = Path("/var/tmp/bluetooth-rfkill-before-suspend")
def block_bluetooth():
logger.info("Blocking bluetooth")
sp.run(['/usr/sbin/rfkill', 'block', 'bluetooth'], check=True)
def unblock_bluetooth():
logger.info("Unblocking bluetooth")
sp.run(['/usr/sbin/rfkill', 'unblock', 'bluetooth'], check=True)
def get_bluetooth_state():
logger.info("Fetch bluetooth rfkill soft state")
result = sp.run(['/usr/sbin/rfkill', '--noheadings',
'--output', 'SOFT',
'list', 'bluetooth'], check=True, stdout=sp.PIPE)
bluetooth_state = result.stdout.decode().splitlines()[0]
return bluetooth_state
def get_saved_bluetooth_state():
state = "blocked"
if BT_STATE_PATH.exists():
with BT_STATE_PATH.open() as state_fd:
state = state_fd.read().strip()
return state
def save_bluetooth_state():
with BT_STATE_PATH.open('w') as state_fd:
state_fd.write(get_bluetooth_state())
def maybe_restore_bluetooth_state():
state = get_saved_bluetooth_state()
if state == "unblocked":
unblock_bluetooth()
if __name__ == '__main__':
if len(sys.argv) < 2:
logger.error("Needs more arguments!!")
sys.exit(1)
action = sys.argv[1]
if action == "pre":
save_bluetooth_state()
block_bluetooth()
elif action == "post":
maybe_restore_bluetooth_state()
elif action == "debug":
logger.info("Bluetooth state: %s", get_bluetooth_state())
save_bluetooth_state()
logger.info("Bluetooth saved state: %s", get_saved_bluetooth_state())
else:
sys.exit(1)
sys.exit(0)
|
994,851 | e35063d76b71630145927f653c2637bcae1ff786 | print("Welcome to faulty calculator:This is develop by Kirtika\n")
print(" + for Addition ")
print(" - for Substration")
print(" * for Multiply ")
print(" / for Division")
print(" ** for Power")
print(" % for modulo\n")
num1=int(input("Enter first Number:\n"))
num2=int(input("Enter second Number:\n"))
operation=input("Enter Your Operator:\n")
if num1 == 45 and num2 == 3 and operation == "*":
print(f"{num1}+{num2} = 555")
elif num1 == 56 and num2 == 9 and operation == "+":
print("f{num1}+{num2} = 77")
elif num1 == 56 and num2 == 6 and operation== "/":
print(f"{num1}+{num2} =4")
elif operation == "%":
num4=num1%num2
print(f"{num1}+{num2} = {num4}")
elif operation == "/":
num4=num1/num2
print(f"{num1}+{num2} = {num4}")
elif operation == "*":
num4=num1*num2
print(f"{num1}+{num2} = {num4}")
elif operation == "**":
num4=num1**num2
print(f"{num1}+{num2} = {num4}")
elif operation == "+":
num4=num1+num2
print(f"{num1}+{num2} = {num4}")
elif operation == "-":
num4=num1-num2
print(f"{num1}+{num2} = {num4}")
else:
print("Unexpeted Symbol") |
994,852 | 53eb1025d0e90bc57ff78f6b5b5f063140bc403a | from ast import literal_eval
import numpy as np
import pickle
def filter_data(tokens_list):
return [token for token in tokens_list if token != ' ']
def reverse_vocab(word_dictionary):
return {value: key for key, value in word_dictionary.items()}
def word_level(sentence, word_dictionary, mode, checker):
new = []
for word in sentence.split(" "):
if checker == 'decoding':
word = int(word)
new.append(word if word_dictionary is None else word_dictionary[word])
if mode != 'output':
new.append(' ' if word_dictionary is None else word_dictionary[' '])
return new[-1] if mode != 'output' else new
def char_level(sentence, word_dictionary):
return [char if word_dictionary is None else word_dictionary[char] for word in sentence for char in sentence]
def build_vocab(sentences):
dictionary = {' ': 1}
for sentence in sentences:
for word in sentence.split(" "):
words_set = list(dictionary.keys())
if word not in words_set:
i = len(dictionary) + 1
dictionary[word] = i
return dictionary
def save_config(tokenizer):
with open('tokenizer_output.pickle', 'wb') as file:
pickle.dump(tokenizer, file)
class Oper(object):
def __init__(self,
sentence,
mode,
word_dictionary,
max_length,
checker):
self.sentence = sentence
self.word_dictionary = word_dictionary
self.mode = mode
self.max_length = max_length
self.checker = checker
def do(self, word_dictionary, checker=None):
if self.mode == 'char_level':
tokenized = char_level(self.sentence, word_dictionary)
tokenized = word_level(self.sentence, word_dictionary, self.mode, self.checker)
if self.mode == 'output':
return filter_data(tokenized)
return tokenized
@property
def tokens(self):
word_dictionary = None
return self.do(word_dictionary)
@property
def ids(self):
word_dictionary = self.word_dictionary
return self.do(word_dictionary)
@property
def padded_tokens(self):
word_dictionary = self.word_dictionary
tokens = self.do(word_dictionary)
return np.pad(tokens, (0, self.max_length - len(tokens)), 'constant',
constant_values=(self.word_dictionary['<pad>']))
class TokenizerBase(object):
def __init__(self,
files,
max_length,
special_tokens,
mode='word_level',
checker='output',
tokenizer=None):
self.mode = mode
self.checker = checker
self.max_length = max_length
self.vocab = {}
with open(files, 'r') as file:
sentences = file.read()
sentences = literal_eval(sentences)
sentences = sentences + special_tokens
self.sentences = sentences
@property
def get_vocab(self):
return self.vocab
def train(self):
self.vocab = build_vocab(self.sentences)
self.reverse_vocab = reverse_vocab(self.vocab)
save_config(self)
def encode(self, sentence):
word_dictionary = self.vocab
sentence = '<start> ' + sentence + ' <end>'
return Oper(sentence=sentence, mode=self.checker, word_dictionary=word_dictionary,
max_length=self.max_length, checker=None)
def decode(self, sentence):
word_dictionary = self.reverse_vocab
sentence = [str(item) for item in sentence]
sentence = " ".join(sentence)
return " ".join(Oper(sentence=sentence, mode=self.checker, word_dictionary=word_dictionary,
max_length=self.max_length, checker='decoding').ids)
class Tokenizer(object):
def __init__(self,
files,
max_length,
special_tokens,
mode='word_level',
checker='output',
tokenizer=None):
if tokenizer is None:
self.tokenizer = TokenizerBase(files, max_length, special_tokens,
mode, checker, tokenizer)
else:
with open(tokenizer, 'rb') as file:
tokenizer = pickle.load(file)
self.tokenizer = tokenizer
@property
def get_vocab(self):
return self.tokenizer.vocab
@property
def get_reverse_vocab(self):
return self.tokenizer.reverse_vocab
def train(self):
self.tokenizer.train()
def encode(self, sentence):
return self.tokenizer.encode(sentence)
def decode(self, tokens):
return self.tokenizer.decode(tokens)
|
994,853 | c2bba8b1e747f7797a3cce7704e3e33c9470eba6 | #!/usr/bin/python3
if __name__ == "__main__":
from calculator_1 import add, sub, mul, div
from sys import argv
leng = len(argv[1:])
if leng != 3:
print("Usage: ./100-my_calculator.py <a> <operator> <b>")
exit(1)
for i, arg in enumerate(argv[1:]):
if i == 0:
arg1 = int(arg)
elif i == 1:
arg2 = arg
elif i == 2:
arg3 = int(arg)
if arg2 == '+':
calc = add(arg1, arg3)
elif arg2 == '-':
calc = sub(arg1, arg3)
elif arg2 == '*':
calc = mul(arg1, arg3)
elif arg2 == '/':
calc = div(arg1, arg3)
if arg2 == '+' or arg2 == '-' or arg2 == '/' or arg2 == '*':
print("{:d} {} {:d} = {:d}".format(arg1, arg2, arg3, calc))
exit(0)
else:
print("Unknown operator. Available operators: +, -, * and /")
exit(1)
|
994,854 | 186377b94c8a5207762139591eaf13b9043afc80 | import random
def crearLista(desde,hasta):
n = int(input("Ingrese la cantidad de numeros que desea que se agreguen a la lista: "))
lista = []
for i in range(n):
num = random.randint(desde,hasta)
lista.append(num)
return lista
def sumaLista(lista):
suma = 0
for i in range(len(lista)):
suma = suma + lista[i]
return suma
lista1 = crearLista(1,10)
sumar = sumaLista(lista1)
print(sumar) |
994,855 | a9f093117e548b62bb7a6e21c1c22cb7630642bc | from bs4 import BeautifulSoup
import urllib2
import numpy as np
from numpy.linalg import inv
import pandas as pd
import sympy as sp
from scipy import linalg
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from sklearn import datasets
from sklearn.decomposition import PCA
def Scrape():
site= "https://en.wikipedia.org/wiki/Iris_flower_data_set"
hdr = {'User-Agent': 'Mozilla/5.0'}
req = urllib2.Request(site, headers=hdr)
page = urllib2.urlopen(req)
soup = BeautifulSoup(page, "lxml")
SepalLen = 0
SepalWidth = 0
PetalLen = 0
PetalWidth = 0
Species = ""
table = soup.find("table", { "class" : "wikitable sortable" })
fl = open('Data.csv', 'w')
write_to_file = "SepalLength" + "," + "SepalWidth" + "," + "PetalLength" + "," + "PetalWidth" + "," + "Species" + "\n"
fl.write(write_to_file)
for row in table.findAll("tr"):
cells = row.findAll("td")
#For each "tr", assign each "td" to a variable.
if len(cells) == 5:
SepalLen = cells[0].find(text=True)
SepalWidth = cells[1].find(text=True)
PetalLen = cells[2].find(text=True)
PetalWidth = cells[3].find(text=True)
Species = cells[4].find(text=True)
write_to_file = SepalLen + "," + SepalWidth + "," + PetalLen + "," + PetalWidth + "," + Species[3:] + "\n"
write_unic = write_to_file.encode("utf8")
fl.write(write_unic)
fl.close()
def MatrixDef():
no_of_datasets = 150
dataset_per_class = no_of_datasets/3
for percent in (10, 30, 50):
df = pd.read_csv("Data.csv")
Xi = df.as_matrix() #Intermediate X to convert data into Matrix
Xc = Xi[:,:-1]
Xc = np.c_[Xc, np.ones(no_of_datasets)] #Adding column of 1's for unit matrix
A = Xc[0:50]
B = Xc[50:100]
C = Xc[100:150]
#Concatenated Matrices
cA = np.c_[A, np.ones(dataset_per_class)] #Adding column of 1's
cA = np.c_[cA, np.zeros(dataset_per_class)] #for unit matrix and
cA = np.c_[cA, np.zeros(dataset_per_class)] #column of 0's for others
cB = np.c_[B, np.zeros(dataset_per_class)]
cB = np.c_[cB, np.ones(dataset_per_class)]
cB = np.c_[cB, np.zeros(dataset_per_class)]
cC = np.c_[C, np.zeros(dataset_per_class)]
cC = np.c_[cC, np.zeros(dataset_per_class)]
cC = np.c_[cC, np.ones(dataset_per_class)]
samples_per_class = dataset_per_class * percent /100
matrownosA = np.random.choice(dataset_per_class, samples_per_class, replace=False) #random number gen
matrownosA = matrownosA.tolist()
rA = cA[matrownosA]
rTestA = np.delete(cA,matrownosA,0)
matrownosB = np.random.choice(dataset_per_class, samples_per_class, replace=False)
matrownosB = matrownosB.tolist()
rB = cB[matrownosB]
rTestB = np.delete(cB,matrownosB,0)
matrownosC = np.random.choice(dataset_per_class, samples_per_class, replace=False)
matrownosC = matrownosC.tolist()
rC = cC[matrownosC]
rTestC = np.delete(cC,matrownosC,0)
randomMatrix = np.append(rA, rB, axis=0)
randomMatrix = np.append(randomMatrix, rC, axis=0)
randomTestMatrix = np.append(rTestA, rTestB, axis=0)
randomTestMatrix = np.append(randomTestMatrix, rTestC, axis=0)
X = randomMatrix[:,0:5] #X training Matrix
Xtest = randomTestMatrix[:,0:5]
Y = randomMatrix[:,5:]
Ytest = randomTestMatrix[:,5:]
Xt = X.transpose()
lmbd = 0.5
posD = (np.dot(Xt,X))
posDef = sp.Matrix(posD)
idenScalar = (lmbd*np.identity(5))
posDef = posDef + idenScalar
posInv = posDef.inv()
thetaHat = posInv * Xt * Y
thetaHatnp = np.array(thetaHat.tolist()).astype(np.float64) #convert to Numpy from Sympy
Yhat = np.dot(X,thetaHatnp)
rsltTrain = np.argmax(Yhat, axis= 1)
countError = 0
YhatRow = Yhat.shape[0]
for i in range(YhatRow):
if ((i<samples_per_class and rsltTrain[i]!=0)
or ( i>=samples_per_class and i<(samples_per_class*2) and rsltTrain[i]!=1)
or ( i>=(samples_per_class*2) and i<(samples_per_class*3) and rsltTrain[i]!=2)):
countError += 1
misclassErrorTrain = countError/(YhatRow + 0.0)
print "Nos of Training errors for "+str(percent) + "% data:" + str(countError)
print "Misclassification error for "+str(percent)+ "% Training data:" + str(misclassErrorTrain)
YhatTest = np.dot(Xtest,thetaHatnp)
rsltTest = np.argmax(YhatTest, axis= 1)
countErrorTest = 0
YhatTestRow = no_of_datasets - YhatRow
TestDataPerClass = dataset_per_class - samples_per_class
for i in range(YhatTestRow):
if ((i<TestDataPerClass and rsltTest[i]!=0) or ( i>=TestDataPerClass and i<(TestDataPerClass*2) and rsltTest[i]!=1) or ( i>=(TestDataPerClass*2) and i<(TestDataPerClass*3) and rsltTest[i]!=2)):
countErrorTest += 1
misclassErrorTest = countErrorTest/(YhatTestRow + 0.0)
print "Nos of Test errors for "+str(percent)+"% data:" + str(countErrorTest)
print "Misclassification error for "+str(percent)+"% Test data:" + str(misclassErrorTest)
Scrape()
MatrixDef()
|
994,856 | 106aecc650f6563c32cc1435ca32474a2d38b3f8 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from math import sqrt
import unittest
from numpy import array, abs, tile
from pyspark.mllib.linalg import SparseVector, DenseVector, Vectors
from pyspark.mllib.linalg.distributed import RowMatrix
from pyspark.mllib.feature import HashingTF, IDF, StandardScaler, ElementwiseProduct, Word2Vec
from pyspark.testing.mllibutils import MLlibTestCase
class FeatureTest(MLlibTestCase):
def test_idf_model(self):
data = [
Vectors.dense([1, 2, 6, 0, 2, 3, 1, 1, 0, 0, 3]),
Vectors.dense([1, 3, 0, 1, 3, 0, 0, 2, 0, 0, 1]),
Vectors.dense([1, 4, 1, 0, 0, 4, 9, 0, 1, 2, 0]),
Vectors.dense([2, 1, 0, 3, 0, 0, 5, 0, 2, 3, 9]),
]
model = IDF().fit(self.sc.parallelize(data, 2))
idf = model.idf()
self.assertEqual(len(idf), 11)
class Word2VecTests(MLlibTestCase):
def test_word2vec_setters(self):
model = (
Word2Vec()
.setVectorSize(2)
.setLearningRate(0.01)
.setNumPartitions(2)
.setNumIterations(10)
.setSeed(1024)
.setMinCount(3)
.setWindowSize(6)
)
self.assertEqual(model.vectorSize, 2)
self.assertTrue(model.learningRate < 0.02)
self.assertEqual(model.numPartitions, 2)
self.assertEqual(model.numIterations, 10)
self.assertEqual(model.seed, 1024)
self.assertEqual(model.minCount, 3)
self.assertEqual(model.windowSize, 6)
def test_word2vec_get_vectors(self):
data = [
["a", "b", "c", "d", "e", "f", "g"],
["a", "b", "c", "d", "e", "f"],
["a", "b", "c", "d", "e"],
["a", "b", "c", "d"],
["a", "b", "c"],
["a", "b"],
["a"],
]
model = Word2Vec().fit(self.sc.parallelize(data))
self.assertEqual(len(model.getVectors()), 3)
class StandardScalerTests(MLlibTestCase):
def test_model_setters(self):
data = [[1.0, 2.0, 3.0], [2.0, 3.0, 4.0], [3.0, 4.0, 5.0]]
model = StandardScaler().fit(self.sc.parallelize(data))
self.assertIsNotNone(model.setWithMean(True))
self.assertIsNotNone(model.setWithStd(True))
self.assertEqual(model.transform([1.0, 2.0, 3.0]), DenseVector([-1.0, -1.0, -1.0]))
def test_model_transform(self):
data = [[1.0, 2.0, 3.0], [2.0, 3.0, 4.0], [3.0, 4.0, 5.0]]
model = StandardScaler().fit(self.sc.parallelize(data))
self.assertEqual(model.transform([1.0, 2.0, 3.0]), DenseVector([1.0, 2.0, 3.0]))
class ElementwiseProductTests(MLlibTestCase):
def test_model_transform(self):
weight = Vectors.dense([3, 2, 1])
densevec = Vectors.dense([4, 5, 6])
sparsevec = Vectors.sparse(3, [0], [1])
eprod = ElementwiseProduct(weight)
self.assertEqual(eprod.transform(densevec), DenseVector([12, 10, 6]))
self.assertEqual(eprod.transform(sparsevec), SparseVector(3, [0], [3]))
class HashingTFTest(MLlibTestCase):
def test_binary_term_freqs(self):
hashingTF = HashingTF(100).setBinary(True)
doc = "a a b c c c".split(" ")
n = hashingTF.numFeatures
output = hashingTF.transform(doc).toArray()
expected = Vectors.sparse(
n,
{hashingTF.indexOf("a"): 1.0, hashingTF.indexOf("b"): 1.0, hashingTF.indexOf("c"): 1.0},
).toArray()
for i in range(0, n):
self.assertAlmostEqual(
output[i],
expected[i],
14,
"Error at " + str(i) + ": expected " + str(expected[i]) + ", got " + str(output[i]),
)
class DimensionalityReductionTests(MLlibTestCase):
denseData = [
Vectors.dense([0.0, 1.0, 2.0]),
Vectors.dense([3.0, 4.0, 5.0]),
Vectors.dense([6.0, 7.0, 8.0]),
Vectors.dense([9.0, 0.0, 1.0]),
]
sparseData = [
Vectors.sparse(3, [(1, 1.0), (2, 2.0)]),
Vectors.sparse(3, [(0, 3.0), (1, 4.0), (2, 5.0)]),
Vectors.sparse(3, [(0, 6.0), (1, 7.0), (2, 8.0)]),
Vectors.sparse(3, [(0, 9.0), (2, 1.0)]),
]
def assertEqualUpToSign(self, vecA, vecB):
eq1 = vecA - vecB
eq2 = vecA + vecB
self.assertTrue(sum(abs(eq1)) < 1e-6 or sum(abs(eq2)) < 1e-6)
def test_svd(self):
denseMat = RowMatrix(self.sc.parallelize(self.denseData))
sparseMat = RowMatrix(self.sc.parallelize(self.sparseData))
m = 4
n = 3
for mat in [denseMat, sparseMat]:
for k in range(1, 4):
rm = mat.computeSVD(k, computeU=True)
self.assertEqual(rm.s.size, k)
self.assertEqual(rm.U.numRows(), m)
self.assertEqual(rm.U.numCols(), k)
self.assertEqual(rm.V.numRows, n)
self.assertEqual(rm.V.numCols, k)
# Test that U returned is None if computeU is set to False.
self.assertEqual(mat.computeSVD(1).U, None)
# Test that low rank matrices cannot have number of singular values
# greater than a limit.
rm = RowMatrix(self.sc.parallelize(tile([1, 2, 3], (3, 1))))
self.assertEqual(rm.computeSVD(3, False, 1e-6).s.size, 1)
def test_pca(self):
expected_pcs = array(
[
[0.0, 1.0, 0.0],
[sqrt(2.0) / 2.0, 0.0, sqrt(2.0) / 2.0],
[sqrt(2.0) / 2.0, 0.0, -sqrt(2.0) / 2.0],
]
)
n = 3
denseMat = RowMatrix(self.sc.parallelize(self.denseData))
sparseMat = RowMatrix(self.sc.parallelize(self.sparseData))
for mat in [denseMat, sparseMat]:
for k in range(1, 4):
pcs = mat.computePrincipalComponents(k)
self.assertEqual(pcs.numRows, n)
self.assertEqual(pcs.numCols, k)
# We can just test the updated principal component for equality.
self.assertEqualUpToSign(pcs.toArray()[:, k - 1], expected_pcs[:, k - 1])
if __name__ == "__main__":
from pyspark.mllib.tests.test_feature import * # noqa: F401
try:
import xmlrunner
testRunner = xmlrunner.XMLTestRunner(output="target/test-reports", verbosity=2)
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
|
994,857 | 370f1ee50fe85f35aef18f1c19b0be60c0549e58 | from . import lstm
all_models = {
'lstm': lstm
}
|
994,858 | 18c9133cdce4bbf90300f88c67a18721ddf8ff2a | """Module to handling Transantiago-data headers"""
import os
import zipfile
import rarfile
import gzip
#import TransantiagoConstants
#For Jupyter.
from Utils import TransantiagoConstants
defaultEtapasHeaders = TransantiagoConstants.defaultEtapasHeaders
defaultViajesHeaders = TransantiagoConstants.defaultViajesHeaders
defaultPerfilesHeaders = TransantiagoConstants.defaultPerfilesHeaders
defaultTRXPPUHeaders = TransantiagoConstants.defaultTRXPPUHeaders
currentSSHDates = TransantiagoConstants.currentSSHDates
SSHDir = TransantiagoConstants.SSHDir
def getHeaders(*args):
"""args[0]=fileType, args[1]=date"""
try:
if len(args) == 1:
if args[0] == 'etapas':
return defaultEtapasHeaders
elif args[0] == 'viajes':
return defaultViajesHeaders
elif args[0] == 'perfiles':
return defaultPerfilesHeaders
elif args[0] == 'TRXPPU':
return defaultTRXPPUHeaders
else:
print('Wrong fileType')
elif len(args) == 2:
try:
if args[0] == 'TRXPPU':
print('Warning: complete path should be specified. Information about available dates is currently not maintained')
TRXPPUPath = input('Enter the path to the specific file: ')
readAndPrintZipHeader(TRXPPUPath)
elif args[0] == 'etapas' and args[1] in currentSSHDates:
etapasFile = args[1] + '.etapas.gz'
etapasPath = os.path.join(SSHDir, etapasFile)
readAndPrintZipHeader(etapasPath)
elif args[0] == 'viajes' and args[1] in currentSSHDates:
viajesFile = args[1] + '.viajes.gz'
viajesPath = os.path.join(SSHDir, viajesFile)
readAndPrintZipHeader(viajesPath)
elif args[0] == 'perfiles' and args[1] in currentSSHDates:
perfilesFile = args[1] + '.perfiles.gz'
perfilesPath = os.path.join(SSHDir, perfilesFile)
readAndPrintZipHeader(perfilesPath)
else:
raise ValueError('Wrong SSH date')
except ValueError as dateErr:
print (dateErr.args)
else:
raise ValueError('Wrong number of arguments')
except ValueError as argsErr:
print(argsErr.args)
def getCurrentSSHDates():
return currentSSHDates
def readAndPrintHeader(filePath):
with open(filePath, "r") as file:
first_line = file.readline()
print(first_line)
file.close()
def readAndPrintZipHeader(filePath):
if filePath[-4:] == '.zip':
with zipfile.ZipFile(filePath) as myZipFiles:
files = myZipFiles.infolist()
with myZipFiles.open(files[0],'rt') as myZipFile:
first_line = myZipFile.readline()
print(first_line)
myZipFile.close()
myZipFiles.close()
elif filePath[-4:] == '.rar':
with rarfile.RarFile(filePath) as myRarFiles:
files = myRarFiles.infolist()
with myRarFiles.open(files[0],'rt') as myRarFile:
first_line = myRarFile.readline()
print(first_line)
myRarFile.close()
myRarFiles.close()
elif filePath[-3:] == '.gz':
with gzip.open(filePath,'rt') as myGzipFile:
first_line = myGzipFile.readline()
print(first_line)
myGzipFile.close()
def getIndexOfAttribute(fileType,headerName):
"""Only referenced to defaultHeaders. TODO: Should be updated when read a new file"""
try:
if fileType == 'etapas':
for i,x in enumerate(defaultEtapasHeaders):
if x == headerName:
return i
elif fileType == 'viajes':
for i,x in enumerate(defaultViajesHeaders):
if x == headerName:
return i
elif fileType == 'perfiles':
for i,x in enumerate(defaultPerfilesHeaders):
if x == headerName:
return i
elif fileType == 'TRXPPU':
for i,x in enumerate(defaultTRXPPUHeaders):
if x == headerName:
return i
else:
raise ValueError('Wrong fileType or headerName')
except ValueError as err:
print(err.args)
|
994,859 | 066c35f2f6f544f1c87ffab3845e3037d217b618 | # Copyright (c) 2014 - 2016 Qualcomm Technologies International, Ltd.
# All Rights Reserved.
# Qualcomm Technologies International, Ltd. Confidential and Proprietary.
# Part of BlueLab-7.1-Release
# Part of the Python bindings for the kalaccess library.
from ctypes import c_uint, c_void_p, POINTER
from ka_ctypes import ka_err
class KaDspState(object):
def __init__(self, raw_state):
self.raw_state = raw_state
def __repr__(self):
state = ["DSP state: %#x" % self.raw_state]
if self.is_running():
state.append("Running")
if self.is_on_pm_break():
state.append("PM breakpoint")
if self.is_on_dm_break():
state.append("DM breakpoint")
if self.is_on_instruction_break():
state.append("Instruction break")
if self.is_on_exception_break():
state.append("Exception break")
if self.is_on_external_break():
state.append("External break")
return "\n\t".join(state)
def is_running(self):
return self.raw_state & KaDspState.KAL_STATUS_RUNNING != 0
def is_on_pm_break(self):
return self.raw_state & KaDspState.KAL_STATUS_PM_BREAK != 0
def is_on_dm_break(self):
return self.raw_state & KaDspState.KAL_STATUS_DM_BREAK != 0
def is_on_instruction_break(self):
return self.raw_state & KaDspState.KAL_STATUS_INSTR_BREAK != 0
def is_on_exception_break(self):
return self.raw_state & KaDspState.KAL_STATUS_EXCEPTION != 0
def is_on_external_break(self):
return self.raw_state & KaDspState.KAL_STATUS_EXTERNAL_BREAK != 0
class KaOther:
def __init__(self, core):
self._core = core
self._cfuncs = {}
self._core._add_cfunc(self._cfuncs, 'ka_reset', POINTER(ka_err), [c_void_p])
self._core._add_cfunc(self._cfuncs, 'ka_hal_get_dsp_state', POINTER(ka_err), [c_void_p, POINTER(c_uint)])
self._core._add_cfunc(self._cfuncs, 'ka_hal_pcprofile' , POINTER(ka_err), [c_void_p, POINTER(c_uint), c_uint])
KaDspState.KAL_STATUS_PM_BREAK = self._core._extract_c_integer_constant("KAL_STATUS_PM_BREAK")
KaDspState.KAL_STATUS_DM_BREAK = self._core._extract_c_integer_constant("KAL_STATUS_DM_BREAK")
KaDspState.KAL_STATUS_RUNNING = self._core._extract_c_integer_constant("KAL_STATUS_RUNNING")
KaDspState.KAL_STATUS_INSTR_BREAK = self._core._extract_c_integer_constant("KAL_STATUS_INSTR_BREAK")
KaDspState.KAL_STATUS_EXCEPTION = self._core._extract_c_integer_constant("KAL_STATUS_EXCEPTION")
KaDspState.KAL_STATUS_EXTERNAL_BREAK = self._core._extract_c_integer_constant("KAL_STATUS_EXTERNAL_BREAK")
def reset(self):
"""Performs a reset by disabling the Kalimba, and then enabling it again."""
err = self._cfuncs['ka_reset'](self._core._get_ka())
self._core._handle_error(err)
def get_dsp_state(self):
"""Reads the state of the Kalimba core. Returns a KaDspState object, which can be queried for various possible
states."""
result = c_uint()
err = self._cfuncs['ka_hal_get_dsp_state'](self._core._get_ka(), result)
self._core._handle_error(err)
return KaDspState(result.value)
def pcprofile(self, num_samples):
"""Reads Kalimba's program counter repeatedly, as fast as possible, until num_samples have
been read. Returns the PC values as an array."""
pc_samples = (c_uint * num_samples)()
err = self._cfuncs['ka_hal_pcprofile'](self._core._get_ka(), pc_samples, num_samples)
self._core._handle_error(err)
return map(lambda x:x, pc_samples) |
994,860 | a970d068974da2473c4dea77b286eab540e25e38 | from ._main import __version__
__all__ = ['__version__']
|
994,861 | 9eeba07938408d8f892bc990211f39280b15a621 | '''
For a 6x6 array, calculate the maximum hourglass sum where an hourglass is:
a b c
d
e f g
'''
def hourglassSum(arr):
ans = 0
for x in range(int(len(arr)/2)+1):
for y in range(int(len(arr[x])/2)+1):
sum_nums = sum(arr[x][y:y+3]) + arr[x+1][y+1] + sum(arr[x+2][y:y+3])
ans = max(ans,sum_nums)
return ans
array = [[1, 1, 1, 0, 0, 0],
[0, 1, 10, 0, 0, 0],
[1, 1, 1, 0, 0, 0],
[0, 0, 2, 4, 4, 0],
[0, 0, 0, 2, 0, 0],
[0, 0, 1, 2, 4, 0]]
assert hourglassSum(array) == 20
|
994,862 | be5336924b80a93e239e778314a6c4abbc32f8dd | ###############################################################################
''''''
###############################################################################
from ._base import _Vanishable, _Colourable, _Fillable, _Fadable
from .edges import Edges
from .grid import Grid
from .ticks import Ticks
from .legend import Legend
from ._element import _MplText, _MplGeometry, _MplVanishable
class _PropsController(_MplVanishable, _Colourable, _Fillable, _Fadable):
def __init__(self, mplax, **kwargs):
self.mplax = mplax
super().__init__(**kwargs)
def _get_mplelement(self):
return self.mplax
class AxTitle(_MplText):
def __init__(self, mplax, **kwargs):
self.mplax = mplax
super().__init__(**kwargs)
def _get_mplelement(self):
return self.mplax.title
class AxPatch(_MplGeometry):
def __init__(self, mplax, **kwargs):
self.mplax = mplax
super().__init__(**kwargs)
def _get_mplelement(self):
return self.mplax.patch
def _set_colour(self, value):
self.mplelement.set_edgecolor(value)
class Props(_PropsController):
def __init__(self,
mplax,
outerupdate,
dims = ('x', 'y'),
alpha = 1.,
colour = 'black',
fill = 'white',
visible = True,
title = '',
):
self._outerupdate = outerupdate
super().__init__(
mplax,
alpha = alpha,
colour = colour,
visible = visible,
fill = fill,
)
self._add_sub(AxTitle(
mplax,
text = title,
), 'title')
self._add_sub(Grid(
mplax,
dims = dims,
colour = 'grey',
alpha = 0.5,
), 'grid')
self._add_sub(Ticks(
mplax,
dims = dims,
), 'ticks')
self._add_sub(Edges(
mplax,
dims = dims,
), 'edges')
self._add_sub(Legend(
mplax,
visible = False,
), 'legend')
self._add_sub(AxPatch(
mplax,
alpha = 0.,
), 'patch')
for dim in dims:
self['edges'][dim]._add_sub(self['ticks'][dim], 'ticks')
self['edges'][dim]['primary']._add_sub(self['ticks'][dim], 'ticks')
def update(self):
super().update()
self._outerupdate()
###############################################################################
''''''
###############################################################################
|
994,863 | 1816c1cc03bdd006665cee1bb29836d22b7ce048 | # -*- coding: utf-8 -*-
MONTHS = {1: 'Января', 2: 'Февраля', 3: 'Марта', 4: 'Апреля', 5: 'Мая', 6: 'Июня', 7: 'Июля', 8: 'Августа',
9: 'Сентября', 10: 'Октября', 11: 'Ноября', 12: 'Декабря'}
|
994,864 | 8a4d08f06ef857756d908817c671af642adc6496 | names = ['Michael', 'Bob', 'Tracy']
for name in names:
if name == "Michael":
print("指定名字:%s" %(name))
else:
print(name)
name1 = "liuxinchi"
name2 = "liuxinchi"
if name1 == name2:
print(True)
else:
print(False);
d = {};
d['Jack'] = 90
if 'Jacka' in d:
print(d['Jacka'])
else:
print("Jacka不存在!")
if d.get("Jacka",False):
print(d['Jack'])
else:
print("Jacka不存在!") |
994,865 | 00a4ee01195920cd48b1dddcfa242cedc1b7f2e0 | from datetime import datetime
from django.db import models
# Create your models here.
class Owner(models.Model):
name = models.CharField(max_length=50)
short_name = models.CharField(max_length=20)
def __unicode__(self):
return self.short_name
def __str__(self):
return self.__unicode__()
# start with Assets, Expense = Liability, Equity, Income
class AccountType(models.Model):
name = models.CharField(max_length=40)
sign_modifier = models.IntegerField(default=1)
def __unicode__(self):
return self.name
def __str__(self):
return self.__unicode__()
class Account(models.Model):
owner = models.ForeignKey(Owner)
name = models.CharField(max_length=80)
short_name = models.CharField(max_length=20)
type = models.ForeignKey(AccountType)
def __unicode__(self):
return self.short_name
def __str__(self):
return self.__unicode__()
class Transaction(models.Model):
description = models.CharField(max_length=80)
date = models.DateTimeField(default=datetime.now)
value = models.DecimalField(max_digits=14, decimal_places=2)
debit = models.ForeignKey(Account, related_name="+")
credit = models.ForeignKey(Account, related_name="+")
def __unicode__(self):
format_string = "%s/%s/%s: %.2f %s / %s - %s"
return format_string % (self.date.day, self.date.month,
self.date.year,
self.value,
self.debit, self.credit,
self.description[:80], )
def __str__(self):
return self.__unicode__()
|
994,866 | 20d471b2dfd9ae115b862560c85f37657410e8b0 | #!/usr/bin/env python
# coding: utf-8
# In[1]:
List = ["Name = Morali Shah",
"email = shahmorali@gmail.com",
"slackname = @morali",
"biostack = Drug Development"]
print (*List, sep = "\n")
# In[ ]:
|
994,867 | 44ac278abe1062d5f2d60392e85a2e41da30734c | products_info = input().split()
products = [products_info[i] for i in range(len(products_info)) if i % 2 == 0]
quantities = [int(products_info[i]) for i in range(len(products_info)) if not i % 2 == 0]
my_dict = dict(zip(products, quantities))
print(my_dict)
|
994,868 | 391a769fbc6e2c53c895195845914b08a462b34e | import pandas as pd
def write_client_database(df_clients_path, clients_table):
allRows = clients_table.rowCount()
new_df = pd.DataFrame(columns=['Cliente','Dirección','RUC'])
for row in range(allRows):
client = clients_table.item(row,0)
address = clients_table.item(row,1)
ruc = clients_table.item(row,2)
client = client.text() if client else ""
address= address.text() if address else ""
ruc = ruc.text() if ruc else ""
new_df=new_df.append({'Cliente':client, 'Dirección':address, 'RUC':ruc}, ignore_index=True)
new_df.to_csv(df_clients_path, index=False) |
994,869 | f99695e711677b78587d72c7b202c68f418c58ea | # expression = input()
expression = '{([]){}()}'
# expression = ']['
brackets = [('(', ')'),
('[', ']'),
('{', '}')]
counts = all([expression.count(a) == expression.count(b) for a, b in brackets]) # do all bracket pairs counts match?
order = all([expression.find(a) <= expression.find(b) for a, b in brackets]) # is the order right?
print("true" if order and counts else "false")
|
994,870 | 442978f04badc7c542d31370ae696ae3e766692f | # Copyright The Cloud Custodian Authors.
# SPDX-License-Identifier: Apache-2.0
from c7n.manager import resources
from c7n import query
@resources.register("appstream-fleet")
class AppStreamFleet(query.QueryResourceManager):
class resource_type(query.TypeInfo):
service = "appstream"
enum_spec = ('describe_fleets', 'Fleets', None)
arn_type = "fleet"
arn = "Arn"
id = "Name"
name = "Name"
cfn_type = "AWS::AppStream::Fleet"
universal_taggable = object()
source_mapping = {
"describe": query.DescribeWithResourceTags,
}
@resources.register("appstream-stack")
class AppStreamStack(query.QueryResourceManager):
class resource_type(query.TypeInfo):
service = "appstream"
enum_spec = ('describe_stacks', 'Stacks', None)
arn_type = "stack"
arn = "Arn"
id = "Name"
name = "Name"
cfn_type = "AWS::AppStream::Stack"
universal_taggable = object()
source_mapping = {
"describe": query.DescribeWithResourceTags,
}
|
994,871 | ba5ad38b9ad0c4db3e62e75f8f2d463a42be9ee3 | data = [
[1, 1, 0],
[1, 1, 0],
[1, 1, 0],
[1, 1, 1],
[0, 1, 1],
[0, 1, 1],
]
targets = [1, 1, 1, -1, -1, -1]
test_data = [1, 1, 1]
from sklearn.naive_bayes import MultinomialNB
model = MultinomialNB()
model.fit(data, targets)
result = model.predict(test_data)
print(result) |
994,872 | 07fa8baeb072cf6a3b21aa8fc88b49ec5c948ba4 |
def num_ways(msg):
if (len(msg) == 0):
return 1
if (len(msg) != 1):
str2 = int(msg[0:2])
msg2 = msg[2:len(msg)]
msg1 = msg[1:len(msg)]
if (str2 < 27):
return 1 + num_ways(msg1) + num_ways(msg2)
else:
return 1 + num_ways(msg1)
ev = "123"
i = num_ways(ev)
print (i) |
994,873 | be93779f45f4522e5fac0d03c80e61ec6dd0b6d5 | #! /usr/bin/python3
import unittest
from collections import defaultdict
def mostfrequenteven(l):
f = defaultdict(lambda: 0)
for n in l:
if n % 2 == 0:
f[n] += 1
if len(f) > 0:
m = max(f.values())
l = [i for i in f.keys() if f[i] == m]
l.sort()
return l[0]
else:
return -1
class TestMostfrequenteven(unittest.TestCase):
def test_ex1(self):
self.assertEqual(mostfrequenteven([1, 1, 2, 6, 2]), 2, 'example 1')
def test_ex2(self):
self.assertEqual(mostfrequenteven([1, 3, 5, 7]), -1, 'example 2')
def test_ex3(self):
self.assertEqual(mostfrequenteven([6, 4, 4, 6, 1]), 4, 'example 3')
unittest.main()
|
994,874 | ff39f349bf570a99c809f3c110b057d5a98318e2 | import operator
class Polynomial:
def __init__(self, coefss):
if not coefss:
self.coeffs = [0]
self.degree = 0
return
for i, coeff in enumerate(coefss):
if not isinstance(coeff, (int, float)):
raise TypeError("polynomial coeffs should have int or float types!")
self.coeffs = []
self.coeffs[0:len(coefss)] = coefss[:]
i = 0
while ((i < len(self.coeffs)) and (self.coeffs[i] == 0)):
self.coeffs.pop(0)
if not self.coeffs:
self.coeffs.insert(0, 0)
self.degree = len(self.coeffs) - 1
def __add__(self, other):
if isinstance(other, Polynomial):
if (self.degree == other.degree):
result = list(map(operator.add, self.coeffs, other.coeffs))
return Polynomial(result)
elif (self.degree > other.degree):
rhPolynomialCoeffs = [0] * (self.degree - other.degree) + other.coeffs
result = list(map(operator.add, self.coeffs, rhPolynomialCoeffs))
return Polynomial(result)
else:
lhPolynomialCoeffs = [0] * (other.degree - self.degree) + self.coeffs
result = list(map(operator.add, lhPolynomialCoeffs, other.coeffs))
return Polynomial(result)
else:
if not isinstance(other, (int, float)):
raise TypeError("incorrect type of value!")
result = Polynomial(self.coeffs)
result.coeffs[len(result.coeffs) - 1] += other
return result
def __mul__(self, other):
if isinstance(other, Polynomial):
result = [0] * (self.degree + other.degree + 1)
for i, l in enumerate(self.coeffs):
for j, r in enumerate(other.coeffs):
result[i + j] += l * r
return Polynomial(result)
else:
if not isinstance(other, (int, float)):
raise TypeError("incorrect type of value!")
result = Polynomial([coef * other for coef in self.coeffs])
return result
def __eq__(self, other):
if not isinstance(other, Polynomial):
raise TypeError("incorrect type of value!")
return self.coeffs == other.coeffs
def __str__(self):
result = ""
count = len(self.coeffs)
if (count == 1):
result += str(self.coeffs[0])
return result
for i, coeff in enumerate(self.coeffs):
if ((count - 1 - i) == self.degree):
if (self.degree == 1):
dgr = ''
else:
dgr = str((self.degree))
if (coeff == -1):
symb = '-'
elif (coeff == 1):
symb = ''
else:
symb = str(coeff)
result = result + symb + 'x' + dgr
elif ((count - 1 - i) == 0):
if (coeff > 0):
result += '+' + str(coeff)
elif (coeff < 0):
result += str(coeff)
return result
else:
if (coeff != 0):
if (self.degree - i == 1):
dgr = ''
else:
dgr = str((self.degree - i))
if (coeff == -1):
symb = '-'
elif (coeff == 1):
symb = ''
else:
symb = str(coeff)
if (coeff > 0):
result = result + '+' + symb + 'x' + dgr
else:
result = result + symb + 'x' + dgr |
994,875 | a0ed3f1511adf47f83d3821158fccb90a907bd55 | from lilaclib import *
import os
repo_depends = ["simgear", "flightgear-data"]
build_prefix = 'extra-x86_64'
def pre_build():
aur_pre_build()
for line in edit_file('PKGBUILD'):
if line.startswith('depends='):
print("depends=('libxmu' 'openscenegraph34' 'openal' 'qt5-svg')")
continue
if line.startswith('makedepends='):
print("makedepends=('boost' 'cmake' 'mesa' 'sharutils' 'simgear' 'qt5-base' 'qt5-declarative' 'plib' 'glu' 'libxrandr' 'subversion')")
continue
else:
print(line)
# if __name__ == '__main__':
# single_main('extra-x86_64')
|
994,876 | 5c69b1e64a32b18abdb3c12af378fcb587084479 | firstName = "Alex "
lastName = "Perrotta"
print "Hello, my name is " + (firstName) + (lastName)
first = raw_input("first name: ")
last = raw_input("last name: ")
|
994,877 | c1c96f50ca6721c36ef15dede27dce9ae4940a21 | from .. import MMC_MASS
# Neural Net feats
HH_FEATURES_NN = [
"ditau_tau0_pt",
"ditau_tau0_eta",
"ditau_tau0_phi",
"ditau_tau0_m",
"ditau_tau1_pt",
"ditau_tau1_eta",
"ditau_tau1_phi",
"ditau_tau1_m",
"met_et",
"met_etx",
"met_ety",
]
# # reco level training; goal is to predict Higgs mass.
HH_FEATURES= ['ditau_dr',
'met_et',
'ditau_scal_sum_pt',
'ditau_vect_sum_pt',
'ditau_mt_lep0_met',
'ditau_mt_lep1_met',
'ditau_vis_mass',
'ditau_dphi',
'ditau_tau0_pt',
'ditau_tau1_pt',
'ditau_met_min_dphi',
'ditau_met_lep0_cos_dphi',
'ditau_met_lep1_cos_dphi',
'ditau_dpt',
# # # low correlation features
# "ditau_tau0_eta",
# "ditau_tau0_phi",
# "ditau_tau0_m",
# "ditau_tau1_eta",
# "ditau_tau1_phi",
# "ditau_tau1_m",
# "met_etx",
# "met_ety",
# 'met_sumet',
# 'ditau_coll_approx_m',
# # 'ditau_coll_approx_x0',
# # 'ditau_coll_approx_x1',
# "ditau_tau0_n_tracks",
# "ditau_tau1_n_tracks",
# "ditau_tau0_jet_bdt_score",
# # NOT IN NTUPS
#'ditau_ptx',
#'ditau_pty',
#"dijet_vis_mass",
#"dijet_deta",
# "ditau_tau0_jet_bdt_score_trans",
# 'ditau_pt_diff',
# 'ditau_pt_ratio',
# 'ditau_mt',
# 'ditau_cos_theta',
# "ditau_mmc_mlm_m",
]
HH_CUT_BRANCHES = [
"ditau_tau0_jet_bdt_medium",
"ditau_tau1_jet_bdt_medium",
"ditau_tau0_matched_isHadTau",
"ditau_tau1_matched_isHadTau",
"jet_1_pt",
"jet_0_pt",
]
TARGET = ["parent_m"]
HH_MASS_PREDICTORS = [
"ditau_mmc_mlm_m",
#'ditau_mmc_mlnu3p_m',
#'ditau_mosaic_mH_m6',
]
HH_BRACNHES = list(
set(HH_FEATURES + HH_CUT_BRANCHES + TARGET + HH_MASS_PREDICTORS)
)
##------------------------------------------------------------------------
## lephad channel features
LH_FEATURES= ['lephad_dr',
'met_reco_et',
'lephad_scal_sum_pt',
'lephad_vect_sum_pt',
'lephad_mt_lep0_met',
'lephad_mt_lep1_met',
'lephad_vis_mass',
'lephad_dphi',
'lep_0_pt',
'tau_0_pt',
'lephad_met_min_dphi',
'lephad_met_lep0_cos_dphi',
'lephad_met_lep1_cos_dphi',
'lephad_dpt',
]
LH_MASS_PREDICTORS = ["lephad_mmc_mlm_m"]
LH_CUT_BRACNHES = [
"is_oneselectedlep",
"is_oneselectedtau",
"met_reco_et",
"lep_0_q",
"tau_0_q",
"tau_0_jet_bdt_score",
"tau_0_jet_bdt_medium",
"tau_0_eta",
"n_bjets",
"lephad_mt_lep0_met",
"ditau_matched",
"is_boosted_mva",
"is_vbf_mva",
]
LH_BRANCHES = list(
set(LH_FEATURES + LH_CUT_BRACNHES + TARGET + LH_MASS_PREDICTORS)
)
##------------------------------------------------------------------------
## TRUTH MC15 feats
HH_FEATURES_TRUTH= ['true_ditau_vis_dr',
'true_met_et',
'true_ditau_vis_scal_sum_pt',
'true_ditau_vis_vect_sum_pt',
'true_ditau_vis_dpt',
'true_ditau_vis_mass',
'true_ditau_vis_dphi',
'true_tau_0_pt_vis',
'true_tau_1_pt_vis',
# 'true_ditau_vis_mt_lep0_met',
# 'true_ditau_vis_mt_lep1_met',
# 'true_ditau_vis_met_min_dphi',
# 'true_ditau_vis_met_lep0_cos_dphi',
# 'true_ditau_vis_met_lep1_cos_dphi',
'parent_m'
]
HH_FEATURES_DITAU_TRUTH_MATCHED = [
'ditau_matched_vis_dr',
'true_met_et',
'ditau_matched_vis_scal_sum_pt',
'ditau_matched_vis_vect_sum_pt',
# 'ditau_matched_vis_mt_lep0_met',
# 'ditau_matched_vis_mt_lep1_met',
'ditau_matched_vis_mass',
'ditau_ditau_matched_vect_sum_pt_dphi',
'ditau_matched_tau0_pt',
'ditau_matched_tau1_pt',
'ditau_met_min_dphi',
'ditau_met_lep0_cos_dphi', #<! not availbale
'ditau_met_lep1_cos_dphi', #<! not available
'ditau_matched_dpt',
# low correlation features
# 'met_sumet',
# 'ditau_mt',
# 'ditau_pt_diff',
# 'ditau_cos_theta',
# 'ditau_coll_approx_m',
# 'ditau_coll_approx_x0',
# 'ditau_coll_approx_x1',
#'ditau_pt_ratio',
]
# # RECO TRUTH feats:
RECO_TRUTH_FEATURES = {
'ditau_dr':
["ditau_dr", "ditau_matched_vis_dr"],
'met_et':
['met_et', 'true_met_et'],
'ditau_scal_sum_pt':
['ditau_scal_sum_pt', 'ditau_matched_vis_scal_sum_pt'],
'ditau_vect_sum_pt':
['ditau_vect_sum_pt', 'ditau_matched_vis_vect_sum_pt'],
# 'ditau_mt_lep0_met':
# ['ditau_mt_lep0_met', 'ditau_mt_lep0_met']
# 'ditau_mt_lep1_met':
# ['ditau_mt_lep1_met', 'ditau_mt_lep1_met'],
'ditau_vis_mass':
['ditau_vis_mass', 'ditau_matched_vis_mass'],
'ditau_dphi':
['ditau_dphi', 'ditau_matched_vis_dphi'],
'ditau_tau0_pt':
['ditau_tau0_pt', 'ditau_tau0_matched_pt'],
'ditau_tau1_pt':
['ditau_tau1_pt', 'ditau_tau1_matched_pt'],
# 'ditau_met_min_dphi':
# ['ditau_met_min_dphi', 'ditau_met_min_dphi'],
# 'ditau_met_lep0_cos_dphi':
# ['ditau_met_lep0_cos_dphi', 'ditau_met_lep0_cos_dphi'],
# 'ditau_met_lep1_cos_dphi':
# ['ditau_met_lep1_cos_dphi', 'ditau_met_lep1_cos_dphi'],
# 'ditau_dpt':
# ['ditau_dpt', 'ditau_matched_vis_dpt'],
}
##------------------------------------------------------------------------
## MC12 features
FEATS_HH_TRUTH = (
'dR_tau1_tau2',
'MET_et',
'sum_pt_tau1_tau2_met',
'transverse_mass_tau1_met',
'transverse_mass_tau2_met',
'pt_diff_tau1_tau2',
'mass_vis_tau1_tau2',
'sum_pt_tau1_tau2',
'dPhi_tau1_tau2',
'transverse_mass_tau1_tau2',
'tau1_pt',
'mass_collinear_tau1_tau2',
'cos_theta_tau1_tau2',
'tau2_pt',
'tau1_eta',
'tau2_eta',
'dPhi_tau1_MET',
'dPhi_tau2_MET',
'dPhi_tau1_tau2_MET',
'vector_sum_pt_tau1_tau2',
'vector_sum_pt_tau1_tau2_met')
TARGET_HH_TRUTH = ['resonance_m', 'resonance_et']
|
994,878 | be1a52b2d31a41b6142df7fbae5a64a93fa3e70f | # Python doesn't provide it's own representation of LinkList, further we required a special type of \
# Link list, where the values were inserted in a sorted order
# Import Node class, see Node.py for details
from Node import Node
class SortedLinkList:
def __init__(self):
self.start_node = Node(None, prev=None)
def print_list(self):
node = self.start_node.next
while node:
if(node.next):
print(node.value, end=", ")
else:
print(node.value)
node = node.next
def count(self):
n = 0
node = self.start_node
while node.next:
node = node.next
n+=1
return n
def insert(self, value):
# Initial (Empty list) Case
if self.count() == 0:
self.start_node.next = Node(value, prev=self.start_node)
return self.start_node.next
# Start with the current node
node = self.start_node
# Check the next node (A minute optimization)
node_next = node.next
# If the next Node is not None, then insert in sorted way
while node_next.value < value:
node = node_next
node_next = node_next.next
if node_next == None: break
# Means insertion is not at the end of the list
if node_next:
new_node = Node(value, node_next, prev=node)
node.next = new_node
return new_node
# Insertion to the end of the list
else:
node.next = Node(value, prev=node)
return node.next
# Return new node, so down pointer can be assigned
def linear_search(self, value):
hops = 0
node = self.start_node
while node.next:
hops+=1
if node.value == value:
return 1, hops
elif node.next.value > value:
return 0, hops
node = node.next
return 1 if node.value == value else 0, hops |
994,879 | 08169439bdf4ae6da065b30974bc1c2d2db05ce1 | def formula(n:int)->int:
return (3*pow(n,4)+2*pow(n,3)-3*pow(n,2)-2*n)//12
print(formula(100))
|
994,880 | 7a4a9685fa84731221afbda1d450081575f8da0f | # OCSP command-line test tool?
openssl ocsp -whatever
|
994,881 | 51d538d5d5107a1939195d6cf5c83ff13a12427f | import math
def cos_exp(x):
return(math.cos(x)*math.exp(x)) |
994,882 | 85665cdbb51d101ef7f61a4c3e3a467a6ed3091d | def template_text(x, y, z):
return f'{x}時の{y}は{z}'
def test_template_text():
assert template_text(12, '気温', 22.4) == '12時の気温は22.4'
print(template_text(12, '気温', 22.4))
|
994,883 | 830f633f0da3ed37989732c593ebff43cf5247fc | class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
def getAllElements(root1, root2):
arr1 = []
arr2 = []
if(root1):
arr1 = inorder(root1,arr1)
if(root2):
arr2 = inorder(root2,arr2)
N = len(arr1)
M = len(arr2)
output = []
i = 0
j = 0
while(i<N and j<M):
if(arr1[i] <= arr2[j]):
output.append(arr1[i])
i += 1
else:
output.append(arr2[j])
j += 1
if(i==N):
while(j<M):
output.append(arr2[j])
j += 1
else:
while(i<N):
output.append(arr1[i])
i += 1
return output
def inorder(root,arr):
if(not root):
return
inorder(root.left,arr)
arr.append(root.val)
inorder(root.right,arr)
return arr
root1 = TreeNode(2)
root1.left = TreeNode(1)
root1.right = TreeNode(4)
root2 = TreeNode(1)
root2.left = TreeNode(0)
root2.right = TreeNode(3)
print(getAllElements(root1,root2)) |
994,884 | c09034c1d14c0b27f18a9316c232c8e758750625 | from PyQt5 import QtCore, QtGui, QtWidgets
import random
import sys
class Ui_MainWindow(QtWidgets.QMainWindow):
def __init__(self, bo, rounter, app, MainWindow, c_pl, pl_n):
super(Ui_MainWindow, self).__init__()
self.navhov = False
self.setMouseTracking(True)
self.gS_l = ["Started", "Won", "Drawn"]
self.c_gS = self.gS_l[0]
self.omp = None
self.setupUi(bo, rounter, app, MainWindow, c_pl, pl_n)
self.label.mousePressEvent = self.mousePressEvent
self.label.mouseMoveEvent = self.mouseMoveEvent
self.label.mouseReleaseEvent = self.mouseReleaseEvent
def mousePressEvent(self, e: QtGui.QMouseEvent):
if e.buttons() == QtCore.Qt.LeftButton:
self.omp = e.pos()
def mouseMoveEvent(self, e: QtGui.QMouseEvent):
if e.buttons() == QtCore.Qt.LeftButton:
MainWindow.move(e.globalPos() - self.omp)
def mouseReleaseEvent(self, e: QtGui.QMouseEvent):
if e.buttons() == QtCore.Qt.LeftButton:
self.omp = None
def setupUi(self, bo, rounter, app, MainWindow, c_pl, pl_n):
self.cc_p = c_pl
self.cpl_n = pl_n
self._tr = app.translate
_translate = self._tr
MainWindow.setObjectName("MainWindow")
MainWindow.resize(710, 619)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.Frame = QtWidgets.QFrame(self.centralwidget)
self.Frame.setGeometry(QtCore.QRect(0, 0, 710, 621))
self.Frame.setStyleSheet("QFrame {\n" "background-color:rgb(0, 0, 47);\n" "}")
self.Frame.setFrameShape(QtWidgets.QFrame.NoFrame)
self.Frame.setFrameShadow(QtWidgets.QFrame.Raised)
self.Frame.setLineWidth(1)
self.Frame.setObjectName("Frame")
self.frame = QtWidgets.QFrame(self.Frame)
self.frame.setGeometry(QtCore.QRect(0, 0, 710, 32))
sizePolicy = QtWidgets.QSizePolicy(
QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Preferred
)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.frame.sizePolicy().hasHeightForWidth())
self.frame.setSizePolicy(sizePolicy)
self.frame.setStyleSheet("background-color: rgb(20, 20, 47);")
self.frame.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.frame.setFrameShadow(QtWidgets.QFrame.Raised)
self.frame.setObjectName("frame")
self.label = QtWidgets.QLabel(self.frame)
self.label.setGeometry(QtCore.QRect(0, 0, 710, 32))
font = QtGui.QFont()
font.setFamily("Oxygen")
font.setPointSize(16)
self.label.setFont(font)
self.label.setStyleSheet(
"color: white;\n" "background-color: rgb(20, 20, 47);\n"
)
self.label.setAlignment(QtCore.Qt.AlignCenter)
self.label.setIndent(0)
self.label.setObjectName("label")
self.pushButton = QtWidgets.QPushButton(self.Frame)
self.pushButton.setGeometry(QtCore.QRect(630, 10, 10, 10))
self.pushButton.setStyleSheet(
"background-color: rgb(0, 255, 0);\n" "border-radius: 5px;\n"
)
self.pushButton.setText("")
self.pushButton.setObjectName("pushButton")
if "" == "".join(bo.values()):
self.pushButton.clicked.connect(self.reset)
self.label.setText(_translate("MainWindow", "Player: " + c_pl))
else:
self.pushButton.clicked.connect(self.reconf_pop)
self.pushButton.installEventFilter(self)
self.pushButton_2 = QtWidgets.QPushButton(self.Frame)
self.pushButton_2.setGeometry(QtCore.QRect(660, 10, 10, 10))
self.pushButton_2.setStyleSheet(
"background-color: rgb(255, 255, 0);\n" "border-radius: 5px\n;"
)
self.pushButton_2.setText("")
self.pushButton_2.setObjectName("pushButton_2")
self.pushButton_2.clicked.connect(MainWindow.showMinimized)
self.pushButton_2.installEventFilter(self)
self.pushButton_3 = QtWidgets.QPushButton(self.Frame)
self.pushButton_3.setGeometry(QtCore.QRect(690, 10, 10, 10))
self.pushButton_3.setStyleSheet(
"background-color: rgb(255, 0, 0);\n" "border-radius: 5px\n;"
)
self.pushButton_3.setText("")
self.pushButton_3.setObjectName("pushButton_3")
self.pushButton_3.clicked.connect(sys.exit)
self.pushButton_3.installEventFilter(self)
font = QtGui.QFont()
font.setFamily("Oxygen")
font.setPointSize(16)
self.line = QtWidgets.QFrame(self.Frame)
self.line.setGeometry(QtCore.QRect(70, 240, 574, 4))
self.line.setStyleSheet(
"background-color: white;\n" "color: white;\n" "border-color: white;"
)
self.line.setLineWidth(0)
self.line.setFrameShape(QtWidgets.QFrame.HLine)
self.line.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line.setObjectName("line")
self.line_2 = QtWidgets.QFrame(self.Frame)
self.line_2.setGeometry(QtCore.QRect(70, 384, 574, 4))
self.line_2.setStyleSheet(
"background-color: white;\n" "color: white;\n" "border-color: white;"
)
self.line_2.setLineWidth(0)
self.line_2.setFrameShape(QtWidgets.QFrame.HLine)
self.line_2.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_2.setObjectName("line_2")
self.line_3 = QtWidgets.QFrame(self.Frame)
self.line_3.setGeometry(QtCore.QRect(258, 100, 4, 428))
self.line_3.setStyleSheet(
"background-color: white;\n" "color: white;\n" "border-color: white;"
)
self.line_3.setLineWidth(0)
self.line_3.setFrameShape(QtWidgets.QFrame.HLine)
self.line_3.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_3.setObjectName("line_3")
self.line_4 = QtWidgets.QFrame(self.Frame)
self.line_4.setGeometry(QtCore.QRect(454, 100, 4, 428))
self.line_4.setStyleSheet(
"background-color: white;\n" "color: white;\n" "border-color: white;"
)
self.line_4.setLineWidth(0)
self.line_4.setFrameShape(QtWidgets.QFrame.HLine)
self.line_4.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_4.setObjectName("line_4")
self.push_button = QtWidgets.QPushButton(self.Frame)
self.push_button.setGeometry(QtCore.QRect(70, 100, 188, 140))
font = QtGui.QFont()
font.setFamily("Montserrat")
font.setPointSize(72)
self.push_button.setFont(font)
self.push_button.setStyleSheet(
"color: white;\n"
"text-align: center;\n"
"background-color: rgb(0, 0, 47);\n"
"border: none;"
)
self.push_button.setObjectName("push_button")
self.push_button_2 = QtWidgets.QPushButton(self.Frame)
self.push_button_2.setGeometry(QtCore.QRect(262, 100, 192, 140))
font = QtGui.QFont()
font.setFamily("Montserrat")
font.setPointSize(72)
self.push_button_2.setFont(font)
self.push_button_2.setStyleSheet(
"color: white;\n"
"text-align: center;\n"
"background-color: rgb(0, 0, 47);\n"
"border: none;"
)
self.push_button_2.setObjectName("push_button_2")
self.push_button_3 = QtWidgets.QPushButton(self.Frame)
self.push_button_3.setGeometry(QtCore.QRect(458, 100, 186, 140))
font = QtGui.QFont()
font.setFamily("Montserrat")
font.setPointSize(72)
self.push_button_3.setFont(font)
self.push_button_3.setStyleSheet(
"color: white;\n"
"text-align: center;\n"
"background-color: rgb(0, 0, 47);\n"
"border: none;"
)
self.push_button_3.setObjectName("push_button_3")
self.push_button_4 = QtWidgets.QPushButton(self.Frame)
self.push_button_4.setGeometry(QtCore.QRect(70, 244, 188, 140))
font = QtGui.QFont()
font.setFamily("Montserrat")
font.setPointSize(72)
self.push_button_4.setFont(font)
self.push_button_4.setStyleSheet(
"color: white;\n"
"text-align: center;\n"
"background-color: rgb(0, 0, 47);\n"
"border: none;"
)
self.push_button_4.setObjectName("push_button_4")
self.push_button_5 = QtWidgets.QPushButton(self.Frame)
self.push_button_5.setGeometry(QtCore.QRect(262, 244, 192, 140))
font = QtGui.QFont()
font.setFamily("Montserrat")
font.setPointSize(72)
self.push_button_5.setFont(font)
self.push_button_5.setStyleSheet(
"color: white;\n"
"text-align: center;\n"
"background-color: rgb(0, 0, 47);\n"
"border: none;"
)
self.push_button_5.setObjectName("push_button_5")
self.push_button_6 = QtWidgets.QPushButton(self.Frame)
self.push_button_6.setGeometry(QtCore.QRect(458, 244, 186, 140))
font = QtGui.QFont()
font.setFamily("Montserrat")
font.setPointSize(72)
self.push_button_6.setFont(font)
self.push_button_6.setStyleSheet(
"color: white;\n"
"text-align: center;\n"
"background-color: rgb(0, 0, 47);\n"
"border: none;"
)
self.push_button_6.setObjectName("push_button_6")
self.push_button_7 = QtWidgets.QPushButton(self.Frame)
self.push_button_7.setGeometry(QtCore.QRect(70, 388, 188, 140))
font = QtGui.QFont()
font.setFamily("Montserrat")
font.setPointSize(72)
self.push_button_7.setFont(font)
self.push_button_7.setStyleSheet(
"color: white;\n"
"text-align: center;\n"
"background-color: rgb(0, 0, 47);\n"
"border: none;"
)
self.push_button_7.setObjectName("push_button_7")
self.push_button_8 = QtWidgets.QPushButton(self.Frame)
self.push_button_8.setGeometry(QtCore.QRect(262, 388, 192, 140))
font = QtGui.QFont()
font.setFamily("Montserrat")
font.setPointSize(72)
self.push_button_8.setFont(font)
self.push_button_8.setStyleSheet(
"color: white;\n"
"text-align: center;\n"
"background-color: rgb(0, 0, 47);\n"
"border: none;"
)
self.push_button_8.setObjectName("push_button_8")
self.push_button_9 = QtWidgets.QPushButton(self.Frame)
self.push_button_9.setGeometry(QtCore.QRect(458, 388, 186, 140))
font = QtGui.QFont()
font.setFamily("Montserrat")
font.setPointSize(72)
self.push_button_9.setFont(font)
self.push_button_9.setStyleSheet(
"color: white;\n"
"text-align: center;\n"
"background-color: rgb(0, 0, 47);\n"
"border: none;"
)
self.push_button_9.setObjectName("push_button_9")
self.frame.raise_()
self.pushButton.raise_()
self.pushButton_2.raise_()
self.pushButton_3.raise_()
self.line.raise_()
self.line_2.raise_()
self.line_3.raise_()
self.line_4.raise_()
ti_l = []
self.push_button.raise_()
ti_l.append(self.push_button)
self.push_button_2.raise_()
ti_l.append(self.push_button_2)
self.push_button_3.raise_()
ti_l.append(self.push_button_3)
self.push_button_4.raise_()
ti_l.append(self.push_button_4)
self.push_button_5.raise_()
ti_l.append(self.push_button_5)
self.push_button_6.raise_()
ti_l.append(self.push_button_6)
self.push_button_7.raise_()
ti_l.append(self.push_button_7)
self.push_button_8.raise_()
ti_l.append(self.push_button_8)
self.push_button_9.raise_()
ti_l.append(self.push_button_9)
MainWindow.setCentralWidget(self.centralwidget)
MainWindow.setWindowFlag(QtCore.Qt.FramelessWindowHint)
self.retranslateUi(app, rounter, MainWindow, c_pl, pl_n, ti_l, bo)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def eventFilter(self, obj, event):
_translate = self._tr
nbs = [self.pushButton, self.pushButton_2, self.pushButton_3]
if obj in nbs and event.type() == QtCore.QEvent.Enter:
m = int(nbs.index(obj) * 30)
self.navhov = True
obj.setGeometry(QtCore.QRect(627 + m, 7, 16, 16))
if obj == nbs[0]:
self.label.setText(_translate("MainWindow", "Restart Game"))
obj.setStyleSheet(
"background-color: rgb(0, 255, 0);\n"
"border-radius: 8px;\n"
"border-width: 2px;\n"
"border-color: white;\n"
"border-style: solid;\n"
)
elif obj == nbs[1]:
self.label.setText(_translate("MainWindow", "Minimize"))
obj.setStyleSheet(
"background-color: rgb(255, 255, 0);\n"
"border-radius: 8px;\n"
"border-width: 2px;\n"
"border-color: white;\n"
"border-style: solid;\n"
)
else:
self.label.setText(_translate("MainWindow", "Close"))
obj.setStyleSheet(
"background-color: rgb(255, 0, 0);\n"
"border-radius: 8px;\n"
"border-width: 2px;\n"
"border-color: white;\n"
"border-style: solid;\n"
)
return True
elif obj in nbs and event.type() == QtCore.QEvent.Leave:
m = int(nbs.index(obj) * 30)
self.navhov = False
obj.setGeometry(QtCore.QRect(630 + m, 10, 10, 10))
if self.c_gS == self.gS_l[0]:
self.label.setText(_translate("MainWindow", "Player: " + self.cc_p))
elif self.c_gS == self.gS_l[1]:
self.label.setText(
_translate(
"MainWindow", "Game Over! Player " + self.cc_p + " Wins!"
)
)
elif self.c_gS == self.gS_l[2]:
self.label.setText(
_translate(
"MainWindow",
"Draw! Both Players are Equally Good (or Equally Bad)!",
)
)
if obj == nbs[0]:
obj.setStyleSheet(
"background-color: rgb(0, 255, 0);\n" "border-radius: 5px;\n"
)
elif obj == nbs[1]:
obj.setStyleSheet(
"background-color: rgb(255, 255, 0);\n" "border-radius: 5px;\n"
)
else:
obj.setStyleSheet(
"background-color: rgb(255, 0, 0);\n" "border-radius: 5px;\n"
)
return False
else:
return False
# displaying the board and all the moves
def retranslateUi(self, app, rounter, MainWindow, c_pl, pl_n, ti_l, bo, w_ts=None):
_translate = self._tr
MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow"))
if rounter == "Game Over!":
if type(w_ts) == str:
self.label.setText(_translate("MainWindow", w_ts))
elif type(w_ts) == list:
self.label.setText(
_translate("MainWindow", "Game Over! Player " + c_pl + " Wins!")
)
for tin in range(len(ti_l)):
if int(tin + 1) in w_ts:
ti_l[tin].setStyleSheet(
"background-color: white;\n"
"color: rgb(0, 0, 47);\n"
"text-align: center;\n"
"border: none\n"
)
ti_l[tin].setCursor(QtGui.QCursor(QtCore.Qt.ArrowCursor))
ti_l[tin].setDisabled(True)
elif type(rounter) == int and rounter <= 9:
self.label.setText(_translate("MainWindow", "Player: " + c_pl))
# checking if the current tile is clicked or not
def is_ti_clicked(ti, bo, rounter, c_pl, pl_n, ti_l, app, MainWindow):
# getting the current state of the current tile
bk = int(ti_l.index(ti) + 1)
# setting the new state of the current tile
ti.setText(_translate("MainWindow", bo[bk]))
if bo[bk] == "":
ti.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
# updating the current state of the tile if clicked
def upd():
if bo[bk] == "":
bo.update({bk: c_pl})
ti.setText(_translate("MainWindow", c_pl))
ti.setCursor(QtGui.QCursor(QtCore.Qt.ArrowCursor))
self.ch_end(bo, rounter, c_pl, pl_n, ti_l, app, MainWindow)
ti.clicked.connect(upd)
for ti in ti_l:
is_ti_clicked(ti, bo, rounter, c_pl, pl_n, ti_l, app, MainWindow)
# confirm resetting the board/game
def reconf_pop(self):
pop = QtWidgets.QMessageBox()
pop.setWindowTitle("Confirm Reset")
pop.setText("Clear the Board and Restart the Game?")
pop.setStyleSheet("background-color: rgb(0, 0, 47);\n" "color: white;")
font = QtGui.QFont()
font.setFamily("Montserrat")
font.setPointSize(12)
pop.setFont(font)
pop.setStandardButtons(QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No)
for b in pop.buttons():
b.setStyleSheet(
"background-color: white;\n"
"color: rgb(0, 0, 47);\n"
"border: none;\n"
"padding: 2px 10px 2px 10px;\n"
)
b.setFont(font)
if b.text() == "&No":
pop.setDefaultButton(b)
pop.buttonClicked.connect(self.pop_res)
popex = pop.exec()
def pop_res(self, res):
if res.text() == "&Yes":
self.reset()
# clear and restart the board/game and switch player
def reset(self):
self.c_gS = self.gS_l[0]
pl_n = self.cpl_n
bo = {}
for n in range(9):
bo[n + 1] = ""
main(bo, rounter, int(1 - pl_n), app, MainWindow)
# checking for endgame (winner/stalemate)
def ch_end(self, bo, rounter, c_pl, pl_n, ti_l, app, MainWindow):
vl = list(bo.keys())
def cols_match():
a_cols = [k for k in vl[::3]]
b_cols = [k for k in vl[1::3]]
c_cols = [k for k in vl[2::3]]
all_cols = [a_cols, b_cols, c_cols]
p_matches = [
(
all(bo[a_cols[0]] == bo[f_col] for f_col in a_cols)
and bo[a_cols[0]] != ""
),
(
all(bo[b_cols[0]] == bo[f_col] for f_col in b_cols)
and bo[b_cols[0]] != ""
),
(
all(bo[c_cols[0]] == bo[f_col] for f_col in c_cols)
and bo[c_cols[0]] != ""
),
]
col_res = {
"matches": (True in p_matches),
"m_tis": None,
}
for e in range(len(p_matches)):
if p_matches[e] == True:
col_res["m_tis"] = [c for c in all_cols[e]]
return col_res
def rows_match():
f_rows = [row_vs for row_vs in vl[:3]]
s_rows = [row_vs for row_vs in vl[3:6]]
t_rows = [row_vs for row_vs in vl[6:9]]
all_rows = [f_rows, s_rows, t_rows]
p_matches = [
(
all(bo[f_rows[0]] == bo[ff_row] for ff_row in f_rows)
and bo[f_rows[0]] != ""
),
(
all(bo[s_rows[0]] == bo[fs_row] for fs_row in s_rows)
and bo[s_rows[0]] != ""
),
(
all(bo[t_rows[0]] == bo[ft_row] for ft_row in t_rows)
and bo[t_rows[0]] != ""
),
]
row_res = {
"matches": (True in p_matches),
"m_tis": None,
}
for e in range(len(p_matches)):
if p_matches[e] == True:
row_res["m_tis"] = [r for r in all_rows[e]]
return row_res
def diags_match():
ps_diags = [k for k in vl[2:7:2]]
ns_diags = [k for k in vl[::4]]
all_diags = [ps_diags, ns_diags]
p_matches = [
(
all(bo[ps_diags[0]] == bo[pdv] for pdv in ps_diags)
and bo[ps_diags[0]] != ""
),
(
all(bo[ns_diags[0]] == bo[ndv] for ndv in ns_diags)
and bo[ns_diags[0]] != ""
),
]
diag_res = {
"matches": (True in p_matches),
"m_tis": None,
}
for e in range(len(p_matches)):
if p_matches[e] == True:
diag_res["m_tis"] = [r for r in all_diags[e]]
return diag_res
c = dict(cols_match())
r = dict(rows_match())
d = dict(diags_match())
if c["matches"] or r["matches"] or d["matches"] or rounter >= 9:
if c["matches"] or r["matches"] or d["matches"]:
self.c_gS = self.gS_l[1]
if c["matches"]:
w_ts = c["m_tis"]
elif r["matches"]:
w_ts = r["m_tis"]
elif d["matches"]:
w_ts = d["m_tis"]
elif rounter == 9:
self.c_gS = self.gS_l[2]
w_ts = "Draw! Both Players are Equally Good (or Equally Bad)!"
self.retranslateUi(
app, "Game Over!", MainWindow, c_pl, pl_n, ti_l, bo, w_ts=w_ts
)
else:
main(bo, rounter, int(1 - pl_n), app, MainWindow)
def main(bo, rounter, pl_n, app, MainWindow):
pls = ["X", "O"]
c_pl = pls[pl_n]
rounter += 1
ui = Ui_MainWindow(bo, rounter, app, MainWindow, c_pl, pl_n)
MainWindow.show()
if __name__ == "__main__":
# initializing the board
board = {}
for n in range(9):
board[n + 1] = ""
# initializing the round number
rounter = 0
# initializing random player (X/O)
rng = random.randint(0, 1)
app = QtWidgets.QApplication(sys.argv)
MainWindow = QtWidgets.QMainWindow()
main(board, rounter, rng, app, MainWindow)
sys.exit(app.exec_()) |
994,885 | 1d97998f2946c068f8d5b5a41f707b6f4fae207d | import csv
import argparse
import pathlib
import logging
import os
import traceback
import datetime
import redis
from dotenv import load_dotenv
from multiprocessing import Pool
from services import Postgres
from psycopg2.extensions import AsIs
from constants import county_csv_headers, date_keys
from common import replace_bom, yes_no, pk_string, find_by_name_and_address, find_by_registration_number
"""
This script is idempotent when run on a directory corresponding to a given day.
We can ingest 10-08, then add a new CSV to 10-08 and then run it again without a problem.
However, we should never run 10-07 after running 10-08 even for "new" 10-07 data.
We must distinguish between three dates here:
- date of information: the date the county pulled the data
- received date: the date we received the data
- ingest date: the date we ingested the data
There is not really a right answer to how we organize this, however if CountyCSV_A was
pulled before CountyCSV_B and we ingest A after B we will ruin the integrity of the data.
However, we organize the data it should be with this in mind.
"""
def find_and_compare(cursor, row, stem, day):
existing_row = None
# try to find by registration number
if row.get('registration_number'):
existing_row = find_by_registration_number(cursor, row.get('registration_number'))
# fallback on name and address
if not existing_row:
existing_row = find_by_name_and_address(cursor, row)
reject_date = None
if existing_row:
logs = existing_row['logs']
reject_date = existing_row['reject_date']
cure_date = existing_row['cure_date']
# remove our columns prior to comparison
del existing_row['created_at']
del existing_row['updated_at']
del existing_row['logs']
del existing_row['log']
del existing_row['reject_date']
del existing_row['cure_date']
del existing_row['county_data']
# convert dates back to strings for comparison with CSV data
for key in date_keys:
if existing_row.get(key):
existing_row[key] = existing_row[key].strftime('%-m/%-d/%Y')
if row.get(key):
row[key] = datetime.datetime.strptime(row[key], '%m/%d/%Y').strftime('%-m/%-d/%Y')
has_changed = False
# iterate over key-values and log any differences
for k, v in row.items():
if existing_row[k] != v:
has_changed = True
logging.info(' | '.join(['UPDATE', pk_string(row), k, f'{existing_row[k]} => {row[k]}']))
logs.append(' | '.join([f'{stem}.csv', f'{day}', 'UPDATE', k, f'{existing_row[k]} => {row[k]}']))
if k == 'ballot_status' and existing_row[k] is None:
reject_date = f'2020-{day}'
# if a ballot status goes from null => rejected => null => rejected
# we want to remove the cure date since a cure date cannot coexist with a non-null status
cure_date = None
elif k == 'ballot_status' and row[k] is None:
cure_date = f'2020-{day}'
return has_changed, existing_row['id'], logs, reject_date, cure_date
# a ballot can appear for the first time and be marked as rejected on the same CSV
if row['ballot_status']:
reject_date = f'2020-{day}'
# initial log entry
logging.info(' | '.join(['INSERT', pk_string(row)]))
logs = [' | '.join([f'{stem}.csv', f'{day}', 'INSERT'])]
return None, None, logs, reject_date, None
def insert_row(cursor, row, stem, day):
# check the CSV headers
csv_key_set = set(county_csv_headers.keys())
for key in row.keys():
if key not in csv_key_set:
raise Exception(f'Unexpected CSV header: {stem} | {day} | {key}')
row_dict = {}
for csv_key, sql_key in county_csv_headers.items():
# TODO: add more rigorous vetting for CSV data once enums are properly defined
if csv_key == 'COUNTY':
# if a county does not appear in the data set use the name of the file instead
# some county data has other counties listed, which is why we do not simply use the path stem
row_dict[sql_key] = row.get(csv_key) or stem
elif csv_key == 'PARTY':
# consolidate all non Dems and Reps under the banner of Other
if row.get(csv_key) != 'DEM' and row.get(csv_key) != 'REP':
row_dict[sql_key] = 'OTH'
else:
row_dict[sql_key] = row.get(csv_key)
elif row.get(csv_key) is not None and csv_key == 'IS_VOID':
# int => bool
row_dict[sql_key] = row.get(csv_key) == '1'
elif row.get(csv_key) is not None and csv_key == 'REGN_NUM':
# consider any non-integer string an error and replace with None
try:
row_dict[sql_key] = int(row.get(csv_key))
except ValueError:
row_dict[sql_key] = None
elif not row.get(csv_key) or not row.get(csv_key).strip():
# replace falsy values for text fields with null
row_dict[sql_key] = None
else:
row_dict[sql_key] = row.get(csv_key)
# has_changed: None => new, need to insert; False => existing, no changes; True => existing, need to update
has_changed, psql_id, logs, reject_date, cure_date = find_and_compare(cursor, row_dict, stem, day)
# assign our data
row_dict['logs'] = logs
row_dict['log'] = '\n'.join(logs)
row_dict['reject_date'] = reject_date
row_dict['cure_date'] = cure_date
row_dict['county_data'] = True
# upsert row
columns = row_dict.keys()
values = tuple(row_dict.values())
if has_changed:
# we're only ok with a full overwrite here because of we're logging all changes
# we can accept that any recent data is more reliable and should replace existing values
# while at the same time tracking all changes in case we need to see what happened
query = (
'UPDATE voters '
'SET (%s) = %s '
'WHERE id = %s'
)
cursor.execute(query, (AsIs(','.join(columns)), values, psql_id))
elif has_changed is None:
query = (
'INSERT INTO voters (%s) '
'VALUES %s'
)
cursor.execute(query, (AsIs(','.join(columns)), values))
def clean_row(row):
# remove leading and trailing whitespace from all keys and values
clean = {}
for key, value in row.items():
clean[key.strip()] = " ".join(value.strip().split()) if type(value) is str else value
return clean
def ingest_csv(args_tuple):
# "postgres_args_" to not shadow the name under __main__ and because multiprocessing cannot share global vars
path, day, postgres_args_, is_prod = args_tuple
log_dir = 'logs' if is_prod else 'dev_logs'
logging.basicConfig(filename=f'{log_dir}/{path.stem}-{day}.log', format='%(asctime)s | %(message)s', level=logging.INFO)
# remove the leading BOM present in many Excel documents and CSVs exported from Excel
replace_bom(path)
with Postgres(**postgres_args_) as cursor:
print(f'Processing {path.name}...')
with open(path) as f:
for row in csv.DictReader(f):
try:
insert_row(cursor, clean_row(row), path.stem, day)
except Exception as e:
tb = traceback.TracebackException.from_exception(e)
logging.error(f'ERROR | {"".join(tb.format())} | {str(row)}')
if redis_client.get('kill_ingest'):
print('Kill switch detected...')
break
print(f'Done with {path.name}...')
def main():
with Pool(args.workers) as pool:
pool.map(ingest_csv, [(path, args.day, postgres_args, is_prod) for path in pathlib.Path('csvs').joinpath(args.day).glob('*.csv')])
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-d', dest='day', required=True)
parser.add_argument('-w', dest='workers', type=int, default=1)
args = parser.parse_args()
# ensure log dirs
pathlib.Path('logs/').mkdir(exist_ok=True)
pathlib.Path('dev_logs/').mkdir(exist_ok=True)
load_dotenv()
is_prod = yes_no('Target production?')
if is_prod:
postgres_args = {
'host': os.getenv('POSTGRES_HOST'),
'port': int(os.getenv('POSTGRES_PORT')),
'user': os.getenv('POSTGRES_USER'),
'password': os.getenv('POSTGRES_PASSWORD'),
'dbname': os.getenv('POSTGRES_DB'),
}
else:
postgres_args = {
'host': os.getenv('DEV_POSTGRES_HOST'),
'port': int(os.getenv('DEV_POSTGRES_PORT')),
'user': os.getenv('DEV_POSTGRES_USER'),
'password': os.getenv('DEV_POSTGRES_PASSWORD'),
'dbname': os.getenv('DEV_POSTGRES_DB'),
}
redis_client = redis.StrictRedis(host='localhost', decode_responses=True)
main()
|
994,886 | 660deeef18163545053ea6fc8e69234e3707f125 | from functools import reduce
import pydash as _
from toolz import pipe
from progressbar import progressbar
import utils as u
from data_cleaners import clean_page
def is_valid_page(page):
'''Invalid pages are images or disambiguation pages that were not
flagged at the parser level. Also check that the page has more than 5 characters.'''
flags = ['.jpg', '.svg', '.png', '.gif', '.jpeg', '.bmp', '.tiff', '(disambiguation)']
has_content = page and (len(page['plaintext'].strip()) > 5)
if page and has_content and 'title' in page:
return not any([_.has_substr(page['title'].lower(), flag) for flag in flags])
else:
return False
def is_valid_link(link):
'''Invalid links are to images. This works for both implicit and regular style links.'''
flags = ['.jpg', '.svg', '.png', '.gif', '.jpeg', '.bmp', '.tiff']
result = True
if link and 'page' in link:
result = result and (not any([_.has_substr(link['page'].lower(), flag) for flag in flags]))
else:
return False
if link and 'text' in link:
result = result and (not any([_.has_substr(link['text'].lower(), flag) for flag in flags]))
return result
def get_outlinks(processed_pages):
'''set of page names that these pages link to'''
link_names = sum([list(processed_page['link_contexts'].keys()) for processed_page in processed_pages],
[])
return set(link_names)
def _process_pages(redirects_lookup, pages, is_seed_page=False, limit=10000):
processed = []
for i, page in enumerate(pages):
if i >= limit: break
if is_valid_page(page):
processed.append(process_page(redirects_lookup,
page,
is_seed_page=is_seed_page))
return processed
def _fetch_pages(pages_db, page_titles):
return [pages_db.find_one({'_id': title}) for title in page_titles]
def process_seed_pages(pages_db, redirects_lookup, seed_pages, depth=1, limit=10000):
'''Get the mentions in each of the seed pages as well as the pages
they link to. Set `depth` > 1 to also process the pages that those
pages link to'''
processed_pages = _process_pages(redirects_lookup, seed_pages, is_seed_page=True, limit=limit)
latest_processed_pages = processed_pages
visited_page_titles = set([processed_page['document_info']['title'] for processed_page in processed_pages])
for layer in range(depth):
print("Getting referenced pages")
pages_referenced = get_outlinks(latest_processed_pages)
page_titles_to_fetch = pages_referenced - visited_page_titles
batch_size = 1000
print("Fetching and processing", len(page_titles_to_fetch), "pages in", batch_size, "batches")
for batch_num, titles_batch in progressbar(enumerate(u.create_batches(list(page_titles_to_fetch),
batch_size=batch_size)),
max_value=int(len(page_titles_to_fetch)/batch_size)):
batch_pages_to_process = _fetch_pages(pages_db, titles_batch)
latest_processed_pages = _process_pages(redirects_lookup, batch_pages_to_process)
processed_pages += latest_processed_pages
visited_page_titles = visited_page_titles.union(pages_referenced)
return processed_pages
def get_mention_offset(page_text, sentence_text, mention):
try:
sentence_page_offset = page_text.index(sentence_text)
except ValueError:
raise ValueError('Sentence not found in page')
try:
mention_sentence_offset = sentence_text.index(mention)
except ValueError:
raise ValueError('Mention not found in sentence')
return sentence_page_offset + mention_sentence_offset
def _get_entity(redirects_lookup, link):
link_destination = link['page']
followed_redirect = redirects_lookup.get(link_destination)
return _.upper_first(followed_redirect or link_destination)
def _sentence_to_link_contexts(redirects_lookup, page, sentence):
page_title = page['title']
contexts = {}
if 'links' in sentence:
for link in sentence['links']:
if is_valid_link(link):
link_text = link.get('text') or link['page']
try:
mention_offset = get_mention_offset(page['plaintext'], sentence['text'], link_text)
entity = _get_entity(redirects_lookup, link)
context = {'text': link_text,
'sentence': sentence['text'],
'offset': mention_offset,
'page_title': page_title,
'preredirect': _.upper_first(link['page'])}
if entity in contexts:
contexts[entity].append(context)
else:
contexts[entity] = [context]
except ValueError:
continue
return contexts
def _sentence_to_link_contexts_reducer(redirects_lookup, page, contexts_acc, sentence):
contexts = _sentence_to_link_contexts(redirects_lookup, page, sentence)
if not _.is_empty(contexts):
concat = lambda dest, src: dest + src if dest else src
_.merge_with(contexts_acc, contexts, iteratee=concat)
return contexts_acc
def get_link_contexts(redirects_lookup, page):
'''link contexts is a dictionary from entity to mention details'''
sections = page['sections']
sentences = sum([section['sentences'] for section in sections if 'sentences' in section], [])
sentences_from_tables = sum([[table['data'] for table in section['tables'][0] if table.get('data')] for section in sections if section.get('tables')],
[])
all_sentences = sentences + sentences_from_tables
return reduce(_.curry(_sentence_to_link_contexts_reducer)(redirects_lookup, page),
all_sentences,
{})
def _mention_overlaps(mentions, mention_to_check):
'''does a mention overlap a mention in the list.'''
mention_spans = [[mention['offset'],
mention['offset'] + len(mention['text'])] for mention in mentions]
start = mention_to_check['offset']
end = mention_to_check['offset'] + len(mention_to_check['text'])
starts_inside_a_mention = any([start >= span[0] and start <= span[1] for span in mention_spans])
ends_inside_a_mention = any([end >= span[0] and end <= span[1] for span in mention_spans])
contains_a_mention = any([start <= span[0] and end >= span[1] for span in mention_spans])
return starts_inside_a_mention or ends_inside_a_mention or contains_a_mention
def _apply_match_heuristic(page, link_contexts, to_match, entity):
'''helper for defining heuristics for finding mentions of an entity'''
matches = u.match_all(to_match, page['plaintext'])
mentions = sum(link_contexts.values(), [])
link_context = {entity: [{'text': to_match,
'offset': match_index,
'page_title': page['title'],
'preredirect': _.upper_first(entity)} for match_index in matches]}
filtered_link_context = {entity: [mention for mention in link_context[entity] if not _mention_overlaps(mentions, mention)]}
concat = lambda dest, src: _.uniq_by(dest + src, 'offset') if dest else src
if not _.is_empty(filtered_link_context[entity]):
return _.merge_with(link_contexts, filtered_link_context, iteratee=concat)
else:
return link_contexts
def _apply_exact_match_heuristic(page, link_contexts, entity_to_match):
return _apply_match_heuristic(page, link_contexts, entity_to_match, entity_to_match)
def _page_title_exact_match_heuristic(page, link_contexts):
'''look for an occurance of the page title'''
return _apply_exact_match_heuristic(page, link_contexts, page['title'])
def _link_title_exact_match_heuristic(page, link_contexts):
'''look for an occurance of the link anchor text'''
link_titles = list(link_contexts.keys())
return reduce(_.curry(_apply_exact_match_heuristic)(page),
link_titles,
link_contexts)
def _entity_for_each_page(page, link_contexts):
'''make sure that each page has an entry in the dict'''
return _.assign({page['title']: []}, link_contexts)
def _drop_overlapping_mentions_reducer(acc, pair):
mentions_so_far, link_contexts = acc
entity, mention = pair
if not _mention_overlaps(mentions_so_far, mention):
mentions_so_far.append(mention)
u.append_at_key(link_contexts, entity, mention)
return (mentions_so_far, link_contexts)
def _drop_overlapping_mentions(link_contexts):
entity_mention_pairs = sum(_.map_values(link_contexts,
lambda mentions, entity: [[entity, mention] for mention in mentions]).values(),
[])
__, reduced_link_contexts = reduce(_drop_overlapping_mentions_reducer,
entity_mention_pairs,
([], {}))
return reduced_link_contexts
def get_link_contexts_using_heuristics(redirects_lookup, page):
return pipe(get_link_contexts(redirects_lookup, page),
_.partial(_page_title_exact_match_heuristic, page),
_.partial(_link_title_exact_match_heuristic, page),
_drop_overlapping_mentions,
_.partial(_entity_for_each_page, page))
def process_page(redirects_lookup, page, is_seed_page=False):
cleaned_page = clean_page(page)
document_info = {'source_id': cleaned_page['pageID'],
'title': cleaned_page['title'],
'text': cleaned_page['plaintext'],
'categories': cleaned_page['categories'],
'is_disambiguation_page': cleaned_page['isDisambiguation'],
'is_seed_page': is_seed_page}
link_contexts = get_link_contexts_using_heuristics(redirects_lookup, cleaned_page)
entity_counts = _.map_values(link_contexts, len)
return {'document_info': document_info,
'link_contexts': link_contexts,
'entity_counts': entity_counts}
def merge_mentions(processed_pages):
'''merge the link contexts from a list of pages'''
concat = lambda dest, src: dest + src if dest else src
link_contexts = reduce(lambda acc, val: _.merge_with(acc, val, iteratee=concat),
[processed_page['link_contexts'] for processed_page in processed_pages],
{})
entity_counts = reduce(lambda acc, val: _.merge_with(acc, val, iteratee=concat),
[processed_page['entity_counts'] for processed_page in processed_pages],
{})
return _.map_values(link_contexts,
lambda val, key: {'link_contexts': val,
'entity_counts': entity_counts[key]})
|
994,887 | 97b0938118ac418024deaeecb4338a80a82657bb | # -*- coding: utf-8 -*-
import os
import sys
def entrypoint_exists(entry_point):
if sys.platform == "win32":
entry_point += ".exe"
executable_dir = os.path.dirname(sys.executable)
return os.path.exists(os.path.join(executable_dir, entry_point)) |
994,888 | 06593b574344ab888faad19841950b7dda541857 | import final_project as fp
# Example string input. Use it to test your code.
example_input="John is connected to Bryant, Debra, Walter.\
John likes to play The Movie: The Game, The Legend of Corgi, Dinosaur Diner.\
Bryant is connected to Olive, Ollie, Freda, Mercedes.\
Bryant likes to play City Comptroller: The Fiscal Dilemma, Super Mushroom Man.\
Mercedes is connected to Walter, Robin, Bryant.\
Mercedes likes to play The Legend of Corgi, Pirates in Java Island, Seahorse Adventures.\
Olive is connected to John, Ollie.\
Olive likes to play The Legend of Corgi, Starfleet Commander.\
Debra is connected to Walter, Levi, Jennie, Robin.\
Debra likes to play Seven Schemers, Pirates in Java Island, Dwarves and Swords.\
Walter is connected to John, Levi, Bryant.\
Walter likes to play Seahorse Adventures, Ninja Hamsters, Super Mushroom Man.\
Levi is connected to Ollie, John, Walter.\
Levi likes to play The Legend of Corgi, Seven Schemers, City Comptroller: The Fiscal Dilemma.\
Ollie is connected to Mercedes, Freda, Bryant.\
Ollie likes to play Call of Arms, Dwarves and Swords, The Movie: The Game.\
Jennie is connected to Levi, John, Freda, Robin.\
Jennie likes to play Super Mushroom Man, Dinosaur Diner, Call of Arms.\
Robin is connected to Ollie.\
Robin likes to play Call of Arms, Dwarves and Swords.\
Freda is connected to Olive, John, Debra.\
Freda likes to play Starfleet Commander, Ninja Hamsters, Seahorse Adventures."
net = fp.create_data_structure(example_input)
#print net
print net
print fp.get_connections(net, "Debra")
print fp.get_connections(net, "Mercedes")
print fp.get_games_liked(net, "John")
print fp.add_connection(net, "John", "Freda")
print fp.add_new_user(net, "Debra", [])
print fp.add_new_user(net, "Nick", ["Seven Schemers", "The Movie: The Game"]) # True
print fp.get_secondary_connections(net, "Mercedes")
print fp.connections_in_common(net, "Mercedes", "John")
print fp.path_to_friend(net, "John", "Ollie")
print "*******"
print "*******"
network = fp.create_data_structure('')
network = fp.add_new_user(network,'Alice',[])
network = fp.add_new_user(network,'Bob',[])
network = fp.add_new_user(network,'Carol',[])
network = fp.add_connection(network,'Alice','Bob')
network = fp.add_connection(network,'Bob','Carol')
network = fp.add_connection(network,'Carol','Bob')
print network
print fp.path_to_friend(network,'Bob','Alice')
print "*******"
print "*******"
network = fp.create_data_structure('')
network = fp.add_new_user(network,'Alice',[])
network = fp.add_new_user(network,'Bob',[])
network = fp.add_new_user(network,'Carol',[])
network = fp.add_connection(network,'Alice','Bob')
network = fp.add_connection(network,'Bob','Carol')
network = fp.add_connection(network,'Carol','Bob')
print network
fp.path_to_friend(network,'David','Carol')
|
994,889 | 33ec20f9abbd052635cd0619920d7d43984fdafa | import tensorflow as tf
import awesome_gans.modules as t
tf.set_random_seed(777) # reproducibility
class CycleGAN:
def __init__(
self,
s,
batch_size=8,
height=128,
width=128,
channel=3,
sample_num=1 * 1,
sample_size=1,
df_dim=64,
gf_dim=32,
fd_unit=512,
g_lr=2e-4,
d_lr=2e-4,
epsilon=1e-9,
):
"""
# General Settings
:param s: TF Session
:param batch_size: training batch size, default 8
:param height: input image height, default 128
:param width: input image width, default 128
:param channel: input image channel, default 3 (RGB)
- in case of Celeb-A, image size is 128x128x3(HWC).
# Output Settings
:param sample_num: the number of output images, default 4
:param sample_size: sample image size, default 2
# For CNN model
:param df_dim: discriminator filter, default 64
:param gf_dim: generator filter, default 32
:param fd_unit: fully connected units, default 512
# Training Option
:param g_lr: generator learning rate, default 2e-4
:param d_lr: discriminator learning rate, default 2e-4
:param epsilon: epsilon, default 1e-9
"""
self.s = s
self.batch_size = batch_size
self.height = height
self.width = width
self.channel = channel
self.image_shape = [self.batch_size, self.height, self.width, self.channel]
self.sample_num = sample_num
self.sample_size = sample_size
self.df_dim = df_dim
self.gf_dim = gf_dim
self.fd_unit = fd_unit
self.beta1 = 0.5
self.beta2 = 0.999
self.d_lr = d_lr
self.g_lr = g_lr
self.lambda_ = 10.0
self.lambda_cycle = 10.0
self.n_train_critic = 10
self.eps = epsilon
# pre-defined
self.g_a2b = None
self.g_b2a = None
self.g_a2b2a = None
self.g_b2a2b = None
self.w_a = None
self.w_b = None
self.w = None
self.gp_a = None
self.gp_b = None
self.gp = None
self.g_loss = 0.0
self.g_a_loss = 0.0
self.g_b_loss = 0.0
self.d_loss = 0.0
self.cycle_loss = 0.0
self.d_op = None
self.g_op = None
self.merged = None
self.writer = None
self.saver = None
# placeholders
self.a = tf.placeholder(tf.float32, [None, self.height, self.width, self.channel], name='image-a')
self.b = tf.placeholder(tf.float32, [None, self.height, self.width, self.channel], name='image-b')
self.lr_decay = tf.placeholder(tf.float32, None, name='learning_rate-decay')
self.build_cyclegan() # build CycleGAN
def discriminator(self, x, reuse=None, name=""):
"""
:param x: 128x128x3 images
:param reuse: re-usability
:param name: name
:return: logits, prob
"""
with tf.variable_scope('discriminator-%s' % name, reuse=reuse):
def residual_block(x, f, name=''):
x = t.conv2d(x, f=f, k=4, s=2, name='disc-conv2d-%s' % name)
x = t.instance_norm(x, name='disc-ins_norm-%s' % name)
x = tf.nn.leaky_relu(x, alpha=0.2)
return x
x = t.conv2d(x, f=self.df_dim, name='disc-conv2d-0')
x = tf.nn.leaky_relu(x, alpha=0.2)
x = residual_block(x, f=self.df_dim * 2, name='1')
x = residual_block(x, f=self.df_dim * 4, name='2')
x = residual_block(x, f=self.df_dim * 8, name='3')
# for 256x256x3 images
# x = residual_block(x, f=self.df_dim * 8, name='4')
# x = residual_block(x, f=self.df_dim * 8, name='5')
logits = t.conv2d(x, f=1, name='disc-con2d-last')
# prob = tf.nn.sigmoid(logits)
return logits
def generator(self, x, reuse=None, name=""):
"""The form of Auto-Encoder
:param x: 128x128x3 images
:param reuse: re-usability
:param name: name
:return: logits, prob
"""
with tf.variable_scope('generator-%s' % name, reuse=reuse):
def d(x, f, name=''):
x = t.conv2d(x, f=f, k=3, s=2, name='gen-d-conv2d-%s' % name)
x = t.instance_norm(x, name='gen-d-ins_norm-%s' % name)
x = tf.nn.relu(x)
return x
def R(x, f, name=''):
x = t.conv2d(x, f=f, k=3, s=1, name='gen-R-conv2d-%s-0' % name)
x = t.conv2d(x, f=f, k=3, s=1, name='gen-R-conv2d-%s-1' % name)
x = t.instance_norm(x, name='gen-R-ins_norm-%s' % name)
x = tf.nn.relu(x)
return x
def u(x, f, name=''):
x = t.deconv2d(x, f=f, k=3, s=2, name='gen-u-deconv2d-%s' % name)
x = t.instance_norm(x, name='gen-u-ins_norm-%s' % name)
x = tf.nn.relu(x)
return x
x = t.conv2d(x, f=self.gf_dim, k=7, s=1, name='gen-conv2d-0')
x = d(x, self.gf_dim * 2, name='1')
x = d(x, self.gf_dim * 4, name='2')
for i in range(1, 7):
x = R(x, self.gf_dim * 4, name=str(i))
x = u(x, self.gf_dim * 4, name='1')
x = u(x, self.gf_dim * 2, name='2')
logits = t.conv2d(x, f=3, k=7, s=1, name='gen-conv2d-1')
prob = tf.nn.tanh(logits)
return prob
def build_cyclegan(self):
# Generator
with tf.variable_scope("generator-a2b"):
self.g_a2b = self.generator(self.a, name="a2b") # a to b
with tf.variable_scope("generator-b2a"):
self.g_b2a = self.generator(self.b, name="b2a") # b to a
with tf.variable_scope("generator-b2a", reuse=True):
self.g_a2b2a = self.generator(self.g_a2b, reuse=True, name="b2a") # a to b to a
with tf.variable_scope("generator-a2b", reuse=True):
self.g_b2a2b = self.generator(self.g_b2a, reuse=True, name="a2b") # b to a to b
# Classifier
with tf.variable_scope("discriminator-a"):
alpha = tf.random_uniform(shape=[self.batch_size, 1, 1, 1], minval=0.0, maxval=1.0)
a_hat = alpha * self.a + (1.0 - alpha) * self.g_b2a
d_a = self.discriminator(self.a)
d_b2a = self.discriminator(self.g_b2a, reuse=True)
d_a_hat = self.discriminator(a_hat, reuse=True)
with tf.variable_scope("discriminator-b"):
alpha = tf.random_uniform(shape=[self.batch_size, 1, 1, 1], minval=0.0, maxval=1.0)
b_hat = alpha * self.b + (1.0 - alpha) * self.g_a2b
d_b = self.discriminator(self.b)
d_a2b = self.discriminator(self.g_a2b, reuse=True)
d_b_hat = self.discriminator(b_hat, reuse=True)
# Training Ops
self.w_a = tf.reduce_mean(d_a) - tf.reduce_mean(d_b2a)
self.w_b = tf.reduce_mean(d_b) - tf.reduce_mean(d_a2b)
self.w = self.w_a + self.w_b
self.gp_a = tf.reduce_mean(
(tf.sqrt(tf.reduce_sum(tf.gradients(d_a_hat, a_hat)[0] ** 2, reduction_indices=[1, 2, 3])) - 1.0) ** 2
)
self.gp_b = tf.reduce_mean(
(tf.sqrt(tf.reduce_sum(tf.gradients(d_b_hat, b_hat)[0] ** 2, reduction_indices=[1, 2, 3])) - 1.0) ** 2
)
self.gp = self.gp_a + self.gp_b
self.d_loss = self.lambda_ * self.gp - self.w
cycle_a_loss = tf.reduce_mean(tf.reduce_mean(tf.abs(self.a - self.g_a2b2a), reduction_indices=[1, 2, 3]))
cycle_b_loss = tf.reduce_mean(tf.reduce_mean(tf.abs(self.b - self.g_b2a2b), reduction_indices=[1, 2, 3]))
self.cycle_loss = cycle_a_loss + cycle_b_loss
# using adv loss
self.g_a_loss = -1.0 * tf.reduce_mean(d_b2a)
self.g_b_loss = -1.0 * tf.reduce_mean(d_a2b)
self.g_loss = self.g_a_loss + self.g_b_loss + self.lambda_cycle * self.cycle_loss
# Summary
tf.summary.scalar("loss/d_loss", self.d_loss)
tf.summary.scalar("loss/cycle_loss", self.cycle_loss)
tf.summary.scalar("loss/cycle_a_loss", cycle_a_loss)
tf.summary.scalar("loss/cycle_b_loss", cycle_b_loss)
tf.summary.scalar("loss/g_loss", self.g_loss)
tf.summary.scalar("loss/g_a_loss", self.g_a_loss)
tf.summary.scalar("loss/g_b_loss", self.g_b_loss)
tf.summary.scalar("misc/gradient_penalty", self.gp)
tf.summary.scalar("misc/g_lr", self.g_lr)
tf.summary.scalar("misc/d_lr", self.d_lr)
# Optimizer
t_vars = tf.trainable_variables()
d_params = [v for v in t_vars if v.name.startswith('d')]
g_params = [v for v in t_vars if v.name.startswith('g')]
self.d_op = tf.train.AdamOptimizer(
learning_rate=self.d_lr * self.lr_decay, beta1=self.beta1, beta2=self.beta2
).minimize(self.d_loss, var_list=d_params)
self.g_op = tf.train.AdamOptimizer(
learning_rate=self.g_lr * self.lr_decay, beta1=self.beta1, beta2=self.beta2
).minimize(self.g_loss, var_list=g_params)
# Merge summary
self.merged = tf.summary.merge_all()
# Model saver
self.saver = tf.train.Saver(max_to_keep=1)
self.writer = tf.summary.FileWriter('./model/', self.s.graph)
|
994,890 | 40f634813d86c0b52491af676c89dece532efc20 |
import requests
from bs4 import BeautifulSoup
#pulling all homepage content to test out the web-scraper
url = 'http://mothertreewellness.com'
response = requests.get(url, timeout = 5)
content = BeautifulSoup( response.content, "html.parser")
print (content)
|
994,891 | 35e15d19521612ef418e84821edc2099a9f8c00b | # Generated by Django 3.1.7 on 2021-03-25 12:45
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('United', '0007_auto_20210304_1552'),
]
operations = [
migrations.CreateModel(
name='Ads',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('headline', models.CharField(max_length=60)),
('description', models.CharField(max_length=90)),
('callToAction', models.CharField(max_length=15)),
('bid', models.IntegerField()),
('advertiser', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='advert', to=settings.AUTH_USER_MODEL)),
],
),
]
|
994,892 | 3648836158d0134512a2e697a293eee997d0a7e9 | """Base class of stress testing"""
import time
import logging
import sys
from logging.handlers import RotatingFileHandler
from selenium.webdriver import Firefox as ffx
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.common.exceptions import TimeoutException
from xvfbwrapper import Xvfb
__author__ = "Jonny Elliott"
class WebPage(ffx):
def __init__(self, url, headless=False):
if headless:
self.makeHeadLess()
ffx.__init__(self)
self.url = url
self.minimum_timeout = 60
self.headless = headless
self.timed_out = False
self.load_time = -99
self.login_time = -99
self.logout_time = -99
# Logging
logfmt = '%(levelname)s [%(asctime)s]:\t %(message)s'
datefmt= '%m/%d/%Y %I:%M:%S %p'
formatter = logging.Formatter(fmt=logfmt,datefmt=datefmt)
self.logger = logging.getLogger('__main__')
logging.root.setLevel(logging.DEBUG)
rfh = RotatingFileHandler(filename="/diska/home/jonny/sw/python/stress/stress/gp_timings.log",maxBytes=1048576,backupCount=3,mode='a')
rfh.setFormatter(formatter)
ch = logging.StreamHandler()
ch.setFormatter(formatter)
self.logger.handlers = []
self.logger.addHandler(ch)
self.logger.addHandler(rfh)
def makeHeadLess(self):
self.xvfb = Xvfb(width=1280, height=720)
self.xvfb.start()
self.headless = True
def pageLoad(self, wait=False):
self.logger.info("Loading page.")
start_time = time.time()
self.get(self.url)
if wait:
try:
element = WebDriverWait(self, self.minimum_timeout).until(EC.presence_of_element_located((By.ID, wait)))
except TimeoutException:
self.logger.warning("TimeoutException thrown. Continuing.")
self.timed_out = True
except:
self.logger.error("Unexpected behaviour. Terminating.")
sys.exit()
end_time = time.time()
self.load_time = end_time - start_time
self.logger.info("Page loaded successfully.")
def findAndClick(self, value, by=By.ID):
self.find_element(by=by, value=value).click()
def pageLogin(self, username="test", password="test",
username_value="test", password_value="test", login_value="test",
wait=False):
self.logger.info("Logging into page.")
username_element = self.find_element(by=By.ID, value=username_value)
password_element = self.find_element(by=By.ID, value=password_value)
username_element.send_keys(username)
password_element.send_keys(password)
start_time = time.time()
self.findAndClick(login_value)
if wait:
try:
element = WebDriverWait(self, self.minimum_timeout).until(EC.presence_of_element_located((By.ID, wait)))
except TimeoutException:
start_time = 0
self.logger.warning("TimeoutException thrown. Continuing.")
except:
self.logger.error("Unexpected behaviour. Terminating.")
sys.exit()
end_time = time.time()
self.login_time = end_time - start_time
self.logger.info("Page logged into sucessfully.")
def pageLogout(self, logout_value="tool-exit", confirm_value="//button[text()=\"Yes\"]", wait=False):
self.logger.info("Logging out of page.")
start_time = time.time()
self.findAndClick(value=logout_value)
self.findAndClick(value=confirm_value, by=By.XPATH)
if wait:
try:
element = WebDriverWait(self, self.minimum_timeout).until(EC.presence_of_element_located((By.ID, wait)))
except TimeoutException:
start_time = 0
self.logger.warning("TimeoutException thrown. Continuing.")
except:
self.logger.error("Unexpected behaviour. Terminating.")
sys.exit()
end_time = time.time()
self.logout_time = end_time - start_time
self.logger.info("Page logged out successfully.")
def quit(self):
if self.headless:
self.xvfb.stop()
super(WebPage, self).quit()
|
994,893 | dd34c4979e6fc2d07f93017346284566a521ac7b | #!/usr/bin/env python3
# -*- coding:utf-8 -*-
# __author__:吕从雷
import pandas as pd
import re
filt = re.compile('\n')
f = open('../liepin/workyear_data/workyear.txt')
def load_data():
'''
加载从hive导出的原始数据,以及过滤不相关数据
:return: work_list
'''
line = f.readline()
work_list = []
while line:
tmp = line.strip().split('\t')
work_list.append(tmp)
line = f.readline()
return work_list
def save_to_csv():
work_list = load_data()
work_df = pd.DataFrame(work_list)
work_df.columns = ['work','count']
work_df.to_csv('../liepin/workyear_data/raw_work.csv',index=None)
print('转存csv成功')
def run_main_liepin():
save_to_csv()
if __name__ == '__main__':
save_to_csv() |
994,894 | 7ce7c709a373019279571b81c63e34b2d5b8985c | import parsl
from parsl.app.app import python_app
from parsl.tests.configs.local_threads import config
from parsl.executors.errors import UnsupportedFeatureError
from parsl.executors import WorkQueueExecutor
@python_app
def double(x, parsl_resource_specification={}):
return x * 2
def test_resource(n=2):
spec = {'cores': 2, 'memory': '1GiB'}
fut = double(n, parsl_resource_specification=spec)
try:
fut.result()
except Exception as e:
assert isinstance(e, UnsupportedFeatureError)
else:
executors = parsl.dfk().executors
executor = None
for label in executors:
if label != 'data_manager':
executor = executors[label]
break
assert isinstance(executor, WorkQueueExecutor)
if __name__ == '__main__':
local_config = config
parsl.load(local_config)
x = test_resource(2)
|
994,895 | 9d1cc84eaa3b14881de9675bcbafe42cdff34522 | from unittest import TestCase
import numpy as np
from numpy.testing import assert_allclose
from graphdg.standardize import ArrayStandardizer
class TestNormalizers(TestCase):
def setUp(self):
self.matrix = np.array([
[0, 1, 2],
[1, 2, 4],
])
def test_standardize(self):
standardizer = ArrayStandardizer.from_array(self.matrix)
assert_allclose(standardizer.mean, np.array([0.5, 1.5, 3]))
self.assertEqual(standardizer.mean.shape, (3, ))
assert_allclose(standardizer.std, np.array([0.5, 0.5, 1]))
self.assertEqual(standardizer.std.shape, (3, ))
assert_allclose(self.matrix, standardizer.destandardize(standardizer.standardize(self.matrix)))
def test_standardizer_fail(self):
standardizer = ArrayStandardizer.from_array(self.matrix)
t = np.array([[0, 1]])
with self.assertRaises(ValueError):
standardizer.standardize(t)
with self.assertRaises(ValueError):
standardizer.destandardize(t)
|
994,896 | b5a79cbbc5a5705478af6d71de398fbcab52b8ab | from django.conf.urls import url
from . import views
app_name = 'TransportationManagement'
urlpatterns = [
url(r'^$',views.need_login, name='login'),
url(r'^index/$', views.index, name='index'),
url(r'^hello/$', views.hello, name='hello'),
url(r'^test/$', views.test, name='test'),
url(r'^accident/$', views.accident, name='accident'),
url(r'^car/$', views.car, name='car'),
url(r'^record/$', views.record, name='record'),
url(r'^proposer/$', views.proposer, name='proposer'),
url(r'^driver/$', views.driver, name='driver'),
url(r'^register/$', views.register, name='register'),
url(r'^login/$', views.mylogin, name='mylogin'),
url(r'^logout/$', views.mylogout, name='mylogout'),
url(r'^addcar/$', views.add_car, name='addcar'),
url(r'^changecar/$', views.change_car, name='changecar'),
url(r'^deletecar/$', views.delete_car, name='deletecar'),
url(r'^adddriver/$', views.add_driver, name='adddriver'),
url(r'^changedriver/$', views.change_driver, name='changedriver'),
url(r'^deletedriver/$', views.delete_driver, name='deletedriver'),
url(r'^addproposer/$', views.add_proposer, name='addproposer'),
url(r'^changeproposer/$', views.change_proposer, name='changeproposer'),
url(r'^deleteproposer/$', views.delete_proposer, name='deleteproposer'),
url(r'^addrecord/$', views.add_record, name='addrecord'),
url(r'^changerecord/$', views.change_record, name='changerecord'),
url(r'^deleterecord/$', views.delete_record, name='deleterecord'),
url(r'^addaccident/$', views.add_accident, name='addaccident'),
url(r'^changeaccident/$', views.change_accident, name='changeaccident'),
url(r'^deleteaccident/$', views.delete_accident, name='deleteaccident'),
url(r'^ajax_get/$', views.ajax_get, name='ajax_get'),
] |
994,897 | 38c0e5040e53822de7751dcde252f4e38c2f189a | #!/usr/bin/env python
import os
import numpy
import multiprocessing
__MULTIPROCESSING__ = True
# Enulate HW - for testing without hardware
__EMULATE_HW__ = False
# Simulate Events = for testing
__SIMULATE_EVENTS__ = True
|
994,898 | e210a3f9fda1c9fda34269f34f7d7db6a129c630 | #!/usr/bin/python
"""@package OSCServer
Documentation for this module
"""
import sys
sys.path.append("/usr/local/pltn/Modules")
from OSC import OSCServer
from CommonLED import CommonLED
from Effects import Effects
import time
from subprocess import call
import threading
from pltnGpio import pltnGpio
#import ledTwitter
#dict for GPIO pins
pg = pltnGpio.pltnGpio()
gpioPins = pg.getAllPins('asDict')
ef = Effects.Effects()
#Define OSC server port and traceback IP
OSCPort = 4567
OSCIP = "0.0.0.0"
#Instantiate server
oscSrv = OSCServer((OSCIP,OSCPort))
#instantiate time object
localtime = time.asctime( time.localtime(time.time()) )
#instantiate common LED object
cLED = CommonLED.CommonLED()
def led(path, tags, args, source):
"""Callback function to handle all LED functions.
OSC Msg: /pltn/led <color+stripNum>|<LEDprogram> <action> (0|1)
Examples: /pltn/led r1 1 solid
/pltn/led b1 1 flash
/pltn/led g1 1 flashFade
"""
oscProg = args[0]
#pinValue = args[1]
#action = args[2]
print oscProg
#print pinValue
#print action
#check if first argument is a pin value
if oscProg in gpioPins.keys():
pinValue = args[1]
action = args[2]
#search gpioPins dict for pin value. Exit when found
for dictColor,gpioPin in gpioPins.iteritems():
if oscProg == dictColor:
break
#set the pin color
if action == 'solid':
cLED.setPinValue(gpioPin,pinValue)
elif action == 'flashFade':
t = threading.Thread(target=ef.ledFlashFade,args=(gpioPin,pinValue,0.01))
t.start()
t.join
elif action =="flash":
ef.flash(gpioPin,0.1)
elif action =="contFlash":
ef.flash(gpioPin,0.1)
else:
#not a valid option
pass
#Turn all LEDs on
elif oscProg == 'allOn':
cLED.setColor(1,[1,1,1])
cLED.setColor(2,[1,1,1])
#Turn all LEDs off
elif oscProg == 'allOff':
cLED.allOff()
else:
pass
def rpi(path, tags, args, source):
"""
Callback function to handle all RPi related functions
OSC Msg: /pltn/rpi <function> <secretKey>
"""
#get the RPi command to run
authzKey = "3681"
cmd = args[0]
key = args[1]
#Check for proper command and authorization Key
if cmd == 'off' and key == authzKey:
print "{0}: RPi received shutdown command" .format(localtime)
call(["sudo", "shutdown", "-h", "now"])
else:
print "{0}: \"{1} {2} {3}\" Not an allowed function/key combo" .format(localtime,path,cmd,key)
def srvc(path, tags, args, source):
"""
Callback function to handle all RPi related functions
OSC Msg: /pltn/srvc <srvcName> start|stop
"""
#list of allowed services and values. Security to prevent rogue
#msgs being sent and started
allowedSrvcs = ["pi-blaster","ssh","rangeSensor","pltnAgent"]
allowedCmds = ["start","stop","status"]
srvcName = args[0]
value = args[1]
#check if this is an allowed command
if srvcName in allowedSrvcs and value in allowedCmds:
call(["sudo", "service", srvcName, value])
else:
print "{0}: \"{1} {2} {3}\" Not allowed" .format(localtime,path,srvcName,value)
def heartbeat(path, tags, args, source):
"""
Callback function to process heartbeats from RPi.
Code here will send to OSC server on CC.pde
"""
print "---------------"
print path
print args[0]
print args[1]
print args[2]
print args[3]
#def tweet(path, tags, args, source):
#ledTwitter.fadeTweet()
#Message Handlers and Callback functions
oscSrv.addMsgHandler("/pltn/led",led)
oscSrv.addMsgHandler("/pltn/srvc",srvc)
oscSrv.addMsgHandler("/pltn/rpi",rpi)
oscSrv.addMsgHandler("/pltn/heartbeat",heartbeat)
#oscSrv.addMsgHandler("/led/tweet",tweet)
print "\n listening on port: %i" % OSCPort
try:
while True:
oscSrv.handle_request()
except KeyboardInterrupt:
cLED.allOff()
print "Quit"
oscSrv.close()
|
994,899 | 50e22fb1bb8918b9b1c5185b31d79398ef653a2e | # -*- coding: utf-8 -*-
from __future__ import print_function
from acq4.util import Qt
import pyqtgraph as pg
from acq4.util.debug import Profiler
Ui_Form = Qt.importTemplate('.atlasCtrlTemplate')
class Atlas(Qt.QObject):
DBIdentity = None
"""An Atlas is responsible for determining the position of images, cells, scan data, etc relative
to a common coordinate system."""
def __init__(self, state=None):
Qt.QObject.__init__(self)
if state is not None:
self.restoreState(state)
def ctrlWidget(self, host):
raise Exception("Must be reimplemented in subclass.")
def mapToAtlas(self, obj):
"""Maps obj into atlas coordinates. Obj can be any object mappable by QMatrix4x4"""
raise Exception("Must be reimplemented in subclass.")
def getState(self):
raise Exception("Must be reimplemented in subclass.")
def setState(self, state):
raise Exception("Must be reimplemented in subclass.")
def restoreState(self, state):
raise Exception("Must be reimplemented in subclass.")
def name(self):
"""Returns the name of the atlas"""
raise Exception("Must be reimplemented in subclass.")
#def close(self):
#pass
class AtlasCtrlWidget(Qt.QWidget):
def __init__(self, atlas, host):
Qt.QWidget.__init__(self)
self.sliceDir = None
#self.blockUpdate = 0 ## used in CNAtlas to block re-rendering
self.atlas = atlas
self.host = host
self.canvas = host.getElement('Canvas')
self.dataManager = host.dataManager()
self.dataModel = self.dataManager.dataModel()
self.loader = host.getElement('File Loader')
self.loader.sigBaseChanged.connect(self.baseDirChanged)
self.ctrl = Qt.QWidget()
self.ui = Ui_Form()
self.ui.setupUi(self)
self.ui.setSliceBtn.clicked.connect(self.setSliceClicked)
self.ui.storeBtn.clicked.connect(self.storeBtnClicked)
#self.baseDirChanged()
## set up two tables for storing atlas positions of cells and stimulation sites
if atlas.DBIdentity == None:
raise Exception("Atlas needs to have a DBIdentity specified." )
tables = {
atlas.DBIdentity+"_cell": "%s_Cell" %atlas.name(),
atlas.DBIdentity+"_protocol": "%s_Protocol" %atlas.name(),
}
self.ui.dbWidget.setDataManager(self.dataManager)
self.ui.dbWidget.setTables(tables)
def loadState(self):
raise Exception("Must be re-implemented in subclass.")
def saveState(self):
raise Exception("Must be re-implemented in subclass.")
def generateDataArray(self, positions, dirType):
"""Return a tuple (data, fields). Data should be a record array with the column names/values to be stored.
Fields should be an OrderedDict of column names : sql datatype."""
raise Exception("Must be re-implemented in subclass")
def baseDirChanged(self):
## file loader base dir changed; if it s a slice, set it now.
try:
self.setSliceDir(self.loader.baseDir())
except:
pass
def setSliceClicked(self):
dh = self.loader.selectedFiles()
if len(dh) != 1:
raise Exception('Select a slice directory from the file tree.')
self.setSliceDir(dh[0])
def setSliceDir(self, dh):
if not dh.isDir() or not self.dataModel.dirType(dh) == 'Slice':
#self.sliceRoi.setVisible(False)
self.sliceDir = None
self.ui.sliceLabel.setText('None')
raise Exception('Selected file is not a slice directory')
self.sliceDir = dh
#self.sliceRoi.setVisible(True)
base = self.loader.baseDir()
if dh is base:
name = dh.shortName()
else:
name = dh.name(relativeTo=base)
self.ui.sliceLabel.setText(name)
if self.atlas.name() in dh.info().get('atlas', {}):
self.loadState()
#else:
# self.updateAtlas()
def storeBtnClicked(self):
self.ui.storeBtn.processing("Storing...")
try:
self.storeToDB()
self.ui.storeBtn.success("Stored!")
except:
self.ui.storeBtn.failure()
raise
def storeToDB(self):
## collect list of cells and scans under this slice,
## read all positions with userTransform corrections
prof = Profiler("Atlas.storeToDB", disabled=True)
loaded = self.host.getLoadedFiles()
cells = []
prots = []
for f in loaded:
if not f.isDir() or not f.isGrandchildOf(self.sliceDir):
continue
if self.dataModel.dirType(f) == 'Cell':
info = f.info()
if 'userTransform' not in info:
continue
cells.append((f, info['userTransform']['pos']))
elif self.dataModel.dirType(f) == 'Protocol':
info = f.info()
scanInfo = info.get('Scanner', None)
if scanInfo is None:
continue
tr = pg.SRTTransform(info.get('userTransform', None))
pos = tr.map(*scanInfo['position'])
prots.append((f, pos))
elif self.dataModel.dirType(f) == 'ProtocolSequence':
info = f.info()
tr = pg.SRTTransform(info.get('userTransform', None))
for subName in f.subDirs():
subf = f[subName]
scanInfo = subf.info().get('Scanner', None)
if scanInfo is None:
continue
pos = tr.map(*scanInfo['position'])
prots.append((subf, pos))
prof.mark("made list of positions")
for ident, dirType, positions in [('_cell', 'Cell', cells), ('_protocol', 'Protocol', prots)]:
## map positions, build data tables
data, fields = self.generateDataArray(positions, dirType)
prof.mark("got data arrays for %s" %dirType)
#dirColumn = dirType + 'Dir'
#data = np.empty(len(positions), dtype=[('SliceDir', object), (dirColumn, object), ('right', float), ('anterior', float), ('dorsal', float)])
#for i in range(len(positions)):
#dh, pos = positions[i]
#mapped = self.atlas.mapToAtlas(pg.Point(pos))
##print dh, pos
##print " right:", mapped.x()
##print " anter:", mapped.y()
##print " dorsl:", mapped.z()
#data[i] = (self.sliceDir, dh, mapped.x(), mapped.y(), mapped.z())
## write to DB
db = self.ui.dbWidget.getDb()
prof.mark('got db')
table = self.ui.dbWidget.getTableName(self.atlas.DBIdentity+ident)
prof.mark('got table')
#fields = collections.OrderedDict([
#('SliceDir', 'directory:Slice'),
#(dirColumn, 'directory:'+dirType),
#('right', 'real'),
#('anterior', 'real'),
#('dorsal', 'real'),
#])
## Make sure target table exists and has correct columns
db.checkTable(table, owner=self.atlas.DBIdentity+ident, columns=fields, create=True)
prof.mark('checked table')
## delete old -- This is the slow part!
old = db.select(table, where={'SliceDir':self.sliceDir}, toArray=True)
if old is not None: ## only do deleting if there is already data stored for this slice -- try to speed things up
for source in set(data[dirType+'Dir']):
if source in old[dirType+'Dir']: ## check that source is in the old data before we delete it - try to speed things up
db.delete(table, where={dirType+'Dir': source})
prof.mark('deleted old data')
## write new
db.insert(table, data)
prof.mark("added %s data to db" %dirType)
prof.finish()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.