index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
984,200 | 6337656a9c8285bac898c1fb96a0b64cef68bb4e | # This file is generated by objective.metadata
#
# Last update: Thu Dec 15 21:00:16 2022
#
# flake8: noqa
import objc, sys
from typing import NewType
if sys.maxsize > 2**32:
def sel32or64(a, b):
return b
else:
def sel32or64(a, b):
return a
if objc.arch == "arm64":
def selAorI(a, b):
return a
else:
def selAorI(a, b):
return b
misc = {}
constants = """$SCStreamErrorDomain$SCStreamFrameInfoContentRect$SCStreamFrameInfoContentScale$SCStreamFrameInfoDirtyRects$SCStreamFrameInfoDisplayTime$SCStreamFrameInfoScaleFactor$SCStreamFrameInfoScreenRect$SCStreamFrameInfoStatus$"""
enums = """$SCFrameStatusBlank@2$SCFrameStatusComplete@0$SCFrameStatusIdle@1$SCFrameStatusStarted@4$SCFrameStatusStopped@5$SCFrameStatusSuspended@3$SCStreamErrorAttemptToConfigState@-3810$SCStreamErrorAttemptToStartStreamState@-3807$SCStreamErrorAttemptToStopStreamState@-3808$SCStreamErrorAttemptToUpdateFilterState@-3809$SCStreamErrorFailedApplicationConnectionInterrupted@-3805$SCStreamErrorFailedApplicationConnectionInvalid@-3804$SCStreamErrorFailedNoMatchingApplicationContext@-3806$SCStreamErrorFailedToStart@-3802$SCStreamErrorFailedToStartAudioCapture@-3818$SCStreamErrorFailedToStopAudioCapture@-3819$SCStreamErrorInternalError@-3811$SCStreamErrorInvalidParameter@-3812$SCStreamErrorMissingEntitlements@-3803$SCStreamErrorNoCaptureSource@-3815$SCStreamErrorNoDisplayList@-3814$SCStreamErrorNoWindowList@-3813$SCStreamErrorRemovingStream@-3816$SCStreamErrorUserDeclined@-3801$SCStreamErrorUserStopped@-3817$SCStreamOutputTypeAudio@1$SCStreamOutputTypeScreen@0$"""
misc.update(
{
"SCStreamErrorCode": NewType("SCStreamErrorCode", int),
"SCFrameStatus": NewType("SCFrameStatus", int),
"SCStreamOutputType": NewType("SCStreamOutputType", int),
}
)
misc.update({"SCStreamFrameInfo": NewType("SCStreamFrameInfo", str)})
misc.update({})
r = objc.registerMetaDataForSelector
objc._updatingMetadata(True)
try:
r(
b"NSObject",
b"stream:didOutputSampleBuffer:ofType:",
{
"required": False,
"retval": {"type": b"v"},
"arguments": {
2: {"type": b"@"},
3: {"type": b"^{opaqueCMSampleBuffer=}"},
4: {"type": b"q"},
},
},
)
r(
b"NSObject",
b"stream:didStopWithError:",
{
"required": False,
"retval": {"type": b"v"},
"arguments": {2: {"type": b"@"}, 3: {"type": b"@"}},
},
)
r(
b"SCShareableContent",
b"getShareableContentExcludingDesktopWindows:onScreenWindowsOnly:completionHandler:",
{
"arguments": {
2: {"type": b"Z"},
3: {"type": b"Z"},
4: {
"callable": {
"retval": {"type": b"v"},
"arguments": {
0: {"type": b"^v"},
1: {"type": b"@"},
2: {"type": b"@"},
},
}
},
}
},
)
r(
b"SCShareableContent",
b"getShareableContentExcludingDesktopWindows:onScreenWindowsOnlyAboveWindow:completionHandler:",
{
"arguments": {
2: {"type": b"Z"},
4: {
"callable": {
"retval": {"type": b"v"},
"arguments": {
0: {"type": b"^v"},
1: {"type": b"@"},
2: {"type": b"@"},
},
}
},
}
},
)
r(
b"SCShareableContent",
b"getShareableContentExcludingDesktopWindows:onScreenWindowsOnlyBelowWindow:completionHandler:",
{
"arguments": {
2: {"type": b"Z"},
4: {
"callable": {
"retval": {"type": b"v"},
"arguments": {
0: {"type": b"^v"},
1: {"type": b"@"},
2: {"type": b"@"},
},
}
},
}
},
)
r(
b"SCShareableContent",
b"getShareableContentWithCompletionHandler:",
{
"arguments": {
2: {
"callable": {
"retval": {"type": b"v"},
"arguments": {
0: {"type": b"^v"},
1: {"type": b"@"},
2: {"type": b"@"},
},
}
}
}
},
)
r(
b"SCStream",
b"addStreamOutput:type:sampleHandlerQueue:error:",
{"retval": {"type": b"Z"}, "arguments": {5: {"type_modifier": b"o"}}},
)
r(
b"SCStream",
b"removeStreamOutput:type:error:",
{"retval": {"type": b"Z"}, "arguments": {4: {"type_modifier": b"o"}}},
)
r(
b"SCStream",
b"startCaptureWithCompletionHandler:",
{
"arguments": {
2: {
"callable": {
"retval": {"type": b"v"},
"arguments": {0: {"type": b"^v"}, 1: {"type": b"@"}},
}
}
}
},
)
r(
b"SCStream",
b"stopCaptureWithCompletionHandler:",
{
"arguments": {
2: {
"callable": {
"retval": {"type": b"v"},
"arguments": {0: {"type": b"^v"}, 1: {"type": b"@"}},
}
}
}
},
)
r(
b"SCStream",
b"updateConfiguration:completionHandler:",
{
"arguments": {
3: {
"callable": {
"retval": {"type": b"v"},
"arguments": {0: {"type": b"^v"}, 1: {"type": b"@"}},
}
}
}
},
)
r(
b"SCStream",
b"updateContentFilter:completionHandler:",
{
"arguments": {
3: {
"callable": {
"retval": {"type": b"v"},
"arguments": {0: {"type": b"^v"}, 1: {"type": b"@"}},
}
}
}
},
)
r(b"SCStreamConfiguration", b"capturesAudio", {"retval": {"type": b"Z"}})
r(
b"SCStreamConfiguration",
b"excludesCurrentProcessAudio",
{"retval": {"type": b"Z"}},
)
r(
b"SCStreamConfiguration",
b"minimumFrameInterval",
{"retval": {"type": b"{_CMTime=qiIq}"}},
)
r(b"SCStreamConfiguration", b"scalesToFit", {"retval": {"type": b"Z"}})
r(
b"SCStreamConfiguration",
b"setCapturesAudio:",
{"arguments": {2: {"type": b"Z"}}},
)
r(
b"SCStreamConfiguration",
b"setExcludesCurrentProcessAudio:",
{"arguments": {2: {"type": b"Z"}}},
)
r(
b"SCStreamConfiguration",
b"setMinimumFrameInterval:",
{"arguments": {2: {"type": b"{_CMTime=qiIq}"}}},
)
r(b"SCStreamConfiguration", b"setScalesToFit:", {"arguments": {2: {"type": b"Z"}}})
r(b"SCStreamConfiguration", b"setShowsCursor:", {"arguments": {2: {"type": b"Z"}}})
r(b"SCStreamConfiguration", b"showsCursor", {"retval": {"type": b"Z"}})
r(b"SCWindow", b"isActive", {"retval": {"type": b"Z"}})
r(b"SCWindow", b"isOnScreen", {"retval": {"type": b"Z"}})
finally:
objc._updatingMetadata(False)
expressions = {}
# END OF FILE
|
984,201 | 6ecd64aa3b66928a3bd75e13a0248f6512dcefd2 | """
2027. 대각선 출력하기
주어진 텍스트를 그대로 출력하세요.
[Test]
입력
출력
#++++
+#+++
++#++
+++#+
++++#
"""
s = '#++++'
for i in range(5):
print(s[-i:]+s[:-i])
|
984,202 | b530cb321b42cefe19d71492094d54b65be42c93 | # mpi finds the sum of a list
from mpi4py import MPI
import random
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
size = comm.Get_size()
def createList(size):
numList=[]
for element in (range(size)):
numList.append(random.randint(0, size))
print(numList)
return numList
def sumOfList(size):
result = 0
numList = createList(size)
for value in numList:
result+=value
return result
#below sets partial list to proc
if rank==0:
numList=createList(1000)
listSize = 1000
partial_list = int(result/size) #result/proc = partial
print(partial_list)
else:
message = comm.recv(source = 0) #
#below sends the partial list to the proc
for proc in (1,size):
start= proc * partialLen #splicing the list
end = start+partialLen
partial_list = numList[start:end]
comm.send(partial_list, dest=proc)
print(partial_list)
else:
partial_list = comm.recv(source = 0)
sum_x=sumOfList()
#after (file.read) reading a file the file will be a string
|
984,203 | acd9220838b8ed84ddba91324ccad2136d3d901e | import volar, pprint, ConfigParser, unittest
class TestThoroughBroadcast(unittest.TestCase):
"""
Tests the ability to connect to the CMS via, hopefully, valid credentials.
"""
def setUp(self):
# load settings
c = ConfigParser.ConfigParser()
c.read('sample.cfg') #note that this file is only for use with this script. however, you can copy its contents and this code to use in your own scripts
base_url = c.get('settings','base_url')
api_key = c.get('settings','api_key')
secret = c.get('settings','secret')
self.v = volar.Volar(base_url = base_url, api_key = api_key, secret = secret)
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(TestBroadcastThorough)
unittest.TextTestRunner(verbosity = 2).run(suite)
|
984,204 | d35aef60d841d37795f5fa1d04b336d38ffda328 | def counter(lst,Dict):
if len(lst) == 0:
lst[str(lst)] = 1
return 0
for key in Dict:
if key.upper() == lst.upper():
Dict[key] += 1
return 0
Dict[str(lst)] = 1
def sorting(Dict):
List=list(Dict.items())
l=len(List)
for i in range(l-1):
for j in range(i+1,l):
if List[i][1] < List[j][1]:
temp = List[i]
List[i] = List[j]
List[j] = temp
sortDict = dict(List)
return sortDict
def printDict(dict1,n):
if n > len(dict1) or n < 0:
print("Your input great in list length or small of 0")
return 0
for k, v in dict1.items():
if n == 0:
break
print(k, v)
n -= 1
|
984,205 | fe11e948c480e5a4264d37e41a3a23c480111e2c | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from ._enums import *
__all__ = [
'GoogleCloudChannelV1AssociationInfoArgs',
'GoogleCloudChannelV1CommitmentSettingsArgs',
'GoogleCloudChannelV1ConditionalOverrideArgs',
'GoogleCloudChannelV1ContactInfoArgs',
'GoogleCloudChannelV1ParameterArgs',
'GoogleCloudChannelV1PercentageAdjustmentArgs',
'GoogleCloudChannelV1PeriodArgs',
'GoogleCloudChannelV1RenewalSettingsArgs',
'GoogleCloudChannelV1RepricingAdjustmentArgs',
'GoogleCloudChannelV1RepricingConditionArgs',
'GoogleCloudChannelV1RepricingConfigChannelPartnerGranularityArgs',
'GoogleCloudChannelV1RepricingConfigEntitlementGranularityArgs',
'GoogleCloudChannelV1RepricingConfigArgs',
'GoogleCloudChannelV1SkuGroupConditionArgs',
'GoogleCloudChannelV1ValueArgs',
'GoogleTypeDateArgs',
'GoogleTypeDecimalArgs',
'GoogleTypePostalAddressArgs',
]
@pulumi.input_type
class GoogleCloudChannelV1AssociationInfoArgs:
def __init__(__self__, *,
base_entitlement: Optional[pulumi.Input[str]] = None):
"""
Association links that an entitlement has to other entitlements.
:param pulumi.Input[str] base_entitlement: The name of the base entitlement, for which this entitlement is an add-on.
"""
if base_entitlement is not None:
pulumi.set(__self__, "base_entitlement", base_entitlement)
@property
@pulumi.getter(name="baseEntitlement")
def base_entitlement(self) -> Optional[pulumi.Input[str]]:
"""
The name of the base entitlement, for which this entitlement is an add-on.
"""
return pulumi.get(self, "base_entitlement")
@base_entitlement.setter
def base_entitlement(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "base_entitlement", value)
@pulumi.input_type
class GoogleCloudChannelV1CommitmentSettingsArgs:
def __init__(__self__, *,
renewal_settings: Optional[pulumi.Input['GoogleCloudChannelV1RenewalSettingsArgs']] = None):
"""
Commitment settings for commitment-based offers.
:param pulumi.Input['GoogleCloudChannelV1RenewalSettingsArgs'] renewal_settings: Optional. Renewal settings applicable for a commitment-based Offer.
"""
if renewal_settings is not None:
pulumi.set(__self__, "renewal_settings", renewal_settings)
@property
@pulumi.getter(name="renewalSettings")
def renewal_settings(self) -> Optional[pulumi.Input['GoogleCloudChannelV1RenewalSettingsArgs']]:
"""
Optional. Renewal settings applicable for a commitment-based Offer.
"""
return pulumi.get(self, "renewal_settings")
@renewal_settings.setter
def renewal_settings(self, value: Optional[pulumi.Input['GoogleCloudChannelV1RenewalSettingsArgs']]):
pulumi.set(self, "renewal_settings", value)
@pulumi.input_type
class GoogleCloudChannelV1ConditionalOverrideArgs:
def __init__(__self__, *,
adjustment: pulumi.Input['GoogleCloudChannelV1RepricingAdjustmentArgs'],
rebilling_basis: pulumi.Input['GoogleCloudChannelV1ConditionalOverrideRebillingBasis'],
repricing_condition: pulumi.Input['GoogleCloudChannelV1RepricingConditionArgs']):
"""
Specifies the override to conditionally apply.
:param pulumi.Input['GoogleCloudChannelV1RepricingAdjustmentArgs'] adjustment: Information about the applied override's adjustment.
:param pulumi.Input['GoogleCloudChannelV1ConditionalOverrideRebillingBasis'] rebilling_basis: The RebillingBasis to use for the applied override. Shows the relative cost based on your repricing costs.
:param pulumi.Input['GoogleCloudChannelV1RepricingConditionArgs'] repricing_condition: Specifies the condition which, if met, will apply the override.
"""
pulumi.set(__self__, "adjustment", adjustment)
pulumi.set(__self__, "rebilling_basis", rebilling_basis)
pulumi.set(__self__, "repricing_condition", repricing_condition)
@property
@pulumi.getter
def adjustment(self) -> pulumi.Input['GoogleCloudChannelV1RepricingAdjustmentArgs']:
"""
Information about the applied override's adjustment.
"""
return pulumi.get(self, "adjustment")
@adjustment.setter
def adjustment(self, value: pulumi.Input['GoogleCloudChannelV1RepricingAdjustmentArgs']):
pulumi.set(self, "adjustment", value)
@property
@pulumi.getter(name="rebillingBasis")
def rebilling_basis(self) -> pulumi.Input['GoogleCloudChannelV1ConditionalOverrideRebillingBasis']:
"""
The RebillingBasis to use for the applied override. Shows the relative cost based on your repricing costs.
"""
return pulumi.get(self, "rebilling_basis")
@rebilling_basis.setter
def rebilling_basis(self, value: pulumi.Input['GoogleCloudChannelV1ConditionalOverrideRebillingBasis']):
pulumi.set(self, "rebilling_basis", value)
@property
@pulumi.getter(name="repricingCondition")
def repricing_condition(self) -> pulumi.Input['GoogleCloudChannelV1RepricingConditionArgs']:
"""
Specifies the condition which, if met, will apply the override.
"""
return pulumi.get(self, "repricing_condition")
@repricing_condition.setter
def repricing_condition(self, value: pulumi.Input['GoogleCloudChannelV1RepricingConditionArgs']):
pulumi.set(self, "repricing_condition", value)
@pulumi.input_type
class GoogleCloudChannelV1ContactInfoArgs:
def __init__(__self__, *,
email: Optional[pulumi.Input[str]] = None,
first_name: Optional[pulumi.Input[str]] = None,
last_name: Optional[pulumi.Input[str]] = None,
phone: Optional[pulumi.Input[str]] = None,
title: Optional[pulumi.Input[str]] = None):
"""
Contact information for a customer account.
:param pulumi.Input[str] email: The customer account's contact email. Required for entitlements that create admin.google.com accounts, and serves as the customer's username for those accounts. Use this email to invite Team customers.
:param pulumi.Input[str] first_name: The customer account contact's first name. Optional for Team customers.
:param pulumi.Input[str] last_name: The customer account contact's last name. Optional for Team customers.
:param pulumi.Input[str] phone: The customer account's contact phone number.
:param pulumi.Input[str] title: Optional. The customer account contact's job title.
"""
if email is not None:
pulumi.set(__self__, "email", email)
if first_name is not None:
pulumi.set(__self__, "first_name", first_name)
if last_name is not None:
pulumi.set(__self__, "last_name", last_name)
if phone is not None:
pulumi.set(__self__, "phone", phone)
if title is not None:
pulumi.set(__self__, "title", title)
@property
@pulumi.getter
def email(self) -> Optional[pulumi.Input[str]]:
"""
The customer account's contact email. Required for entitlements that create admin.google.com accounts, and serves as the customer's username for those accounts. Use this email to invite Team customers.
"""
return pulumi.get(self, "email")
@email.setter
def email(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "email", value)
@property
@pulumi.getter(name="firstName")
def first_name(self) -> Optional[pulumi.Input[str]]:
"""
The customer account contact's first name. Optional for Team customers.
"""
return pulumi.get(self, "first_name")
@first_name.setter
def first_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "first_name", value)
@property
@pulumi.getter(name="lastName")
def last_name(self) -> Optional[pulumi.Input[str]]:
"""
The customer account contact's last name. Optional for Team customers.
"""
return pulumi.get(self, "last_name")
@last_name.setter
def last_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "last_name", value)
@property
@pulumi.getter
def phone(self) -> Optional[pulumi.Input[str]]:
"""
The customer account's contact phone number.
"""
return pulumi.get(self, "phone")
@phone.setter
def phone(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "phone", value)
@property
@pulumi.getter
def title(self) -> Optional[pulumi.Input[str]]:
"""
Optional. The customer account contact's job title.
"""
return pulumi.get(self, "title")
@title.setter
def title(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "title", value)
@pulumi.input_type
class GoogleCloudChannelV1ParameterArgs:
def __init__(__self__, *,
name: Optional[pulumi.Input[str]] = None,
value: Optional[pulumi.Input['GoogleCloudChannelV1ValueArgs']] = None):
"""
Definition for extended entitlement parameters.
:param pulumi.Input[str] name: Name of the parameter.
:param pulumi.Input['GoogleCloudChannelV1ValueArgs'] value: Value of the parameter.
"""
if name is not None:
pulumi.set(__self__, "name", name)
if value is not None:
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the parameter.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def value(self) -> Optional[pulumi.Input['GoogleCloudChannelV1ValueArgs']]:
"""
Value of the parameter.
"""
return pulumi.get(self, "value")
@value.setter
def value(self, value: Optional[pulumi.Input['GoogleCloudChannelV1ValueArgs']]):
pulumi.set(self, "value", value)
@pulumi.input_type
class GoogleCloudChannelV1PercentageAdjustmentArgs:
def __init__(__self__, *,
percentage: Optional[pulumi.Input['GoogleTypeDecimalArgs']] = None):
"""
An adjustment that applies a flat markup or markdown to an entire bill.
:param pulumi.Input['GoogleTypeDecimalArgs'] percentage: The percentage of the bill to adjust. For example: Mark down by 1% => "-1.00" Mark up by 1% => "1.00" Pass-Through => "0.00"
"""
if percentage is not None:
pulumi.set(__self__, "percentage", percentage)
@property
@pulumi.getter
def percentage(self) -> Optional[pulumi.Input['GoogleTypeDecimalArgs']]:
"""
The percentage of the bill to adjust. For example: Mark down by 1% => "-1.00" Mark up by 1% => "1.00" Pass-Through => "0.00"
"""
return pulumi.get(self, "percentage")
@percentage.setter
def percentage(self, value: Optional[pulumi.Input['GoogleTypeDecimalArgs']]):
pulumi.set(self, "percentage", value)
@pulumi.input_type
class GoogleCloudChannelV1PeriodArgs:
def __init__(__self__, *,
duration: Optional[pulumi.Input[int]] = None,
period_type: Optional[pulumi.Input['GoogleCloudChannelV1PeriodPeriodType']] = None):
"""
Represents period in days/months/years.
:param pulumi.Input[int] duration: Total duration of Period Type defined.
:param pulumi.Input['GoogleCloudChannelV1PeriodPeriodType'] period_type: Period Type.
"""
if duration is not None:
pulumi.set(__self__, "duration", duration)
if period_type is not None:
pulumi.set(__self__, "period_type", period_type)
@property
@pulumi.getter
def duration(self) -> Optional[pulumi.Input[int]]:
"""
Total duration of Period Type defined.
"""
return pulumi.get(self, "duration")
@duration.setter
def duration(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "duration", value)
@property
@pulumi.getter(name="periodType")
def period_type(self) -> Optional[pulumi.Input['GoogleCloudChannelV1PeriodPeriodType']]:
"""
Period Type.
"""
return pulumi.get(self, "period_type")
@period_type.setter
def period_type(self, value: Optional[pulumi.Input['GoogleCloudChannelV1PeriodPeriodType']]):
pulumi.set(self, "period_type", value)
@pulumi.input_type
class GoogleCloudChannelV1RenewalSettingsArgs:
def __init__(__self__, *,
enable_renewal: Optional[pulumi.Input[bool]] = None,
payment_cycle: Optional[pulumi.Input['GoogleCloudChannelV1PeriodArgs']] = None,
payment_plan: Optional[pulumi.Input['GoogleCloudChannelV1RenewalSettingsPaymentPlan']] = None,
resize_unit_count: Optional[pulumi.Input[bool]] = None):
"""
Renewal settings for renewable Offers.
:param pulumi.Input[bool] enable_renewal: If false, the plan will be completed at the end date.
:param pulumi.Input['GoogleCloudChannelV1PeriodArgs'] payment_cycle: Describes how frequently the reseller will be billed, such as once per month.
:param pulumi.Input['GoogleCloudChannelV1RenewalSettingsPaymentPlan'] payment_plan: Describes how a reseller will be billed.
:param pulumi.Input[bool] resize_unit_count: If true and enable_renewal = true, the unit (for example seats or licenses) will be set to the number of active units at renewal time.
"""
if enable_renewal is not None:
pulumi.set(__self__, "enable_renewal", enable_renewal)
if payment_cycle is not None:
pulumi.set(__self__, "payment_cycle", payment_cycle)
if payment_plan is not None:
pulumi.set(__self__, "payment_plan", payment_plan)
if resize_unit_count is not None:
pulumi.set(__self__, "resize_unit_count", resize_unit_count)
@property
@pulumi.getter(name="enableRenewal")
def enable_renewal(self) -> Optional[pulumi.Input[bool]]:
"""
If false, the plan will be completed at the end date.
"""
return pulumi.get(self, "enable_renewal")
@enable_renewal.setter
def enable_renewal(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enable_renewal", value)
@property
@pulumi.getter(name="paymentCycle")
def payment_cycle(self) -> Optional[pulumi.Input['GoogleCloudChannelV1PeriodArgs']]:
"""
Describes how frequently the reseller will be billed, such as once per month.
"""
return pulumi.get(self, "payment_cycle")
@payment_cycle.setter
def payment_cycle(self, value: Optional[pulumi.Input['GoogleCloudChannelV1PeriodArgs']]):
pulumi.set(self, "payment_cycle", value)
@property
@pulumi.getter(name="paymentPlan")
def payment_plan(self) -> Optional[pulumi.Input['GoogleCloudChannelV1RenewalSettingsPaymentPlan']]:
"""
Describes how a reseller will be billed.
"""
return pulumi.get(self, "payment_plan")
@payment_plan.setter
def payment_plan(self, value: Optional[pulumi.Input['GoogleCloudChannelV1RenewalSettingsPaymentPlan']]):
pulumi.set(self, "payment_plan", value)
@property
@pulumi.getter(name="resizeUnitCount")
def resize_unit_count(self) -> Optional[pulumi.Input[bool]]:
"""
If true and enable_renewal = true, the unit (for example seats or licenses) will be set to the number of active units at renewal time.
"""
return pulumi.get(self, "resize_unit_count")
@resize_unit_count.setter
def resize_unit_count(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "resize_unit_count", value)
@pulumi.input_type
class GoogleCloudChannelV1RepricingAdjustmentArgs:
def __init__(__self__, *,
percentage_adjustment: Optional[pulumi.Input['GoogleCloudChannelV1PercentageAdjustmentArgs']] = None):
"""
A type that represents the various adjustments you can apply to a bill.
:param pulumi.Input['GoogleCloudChannelV1PercentageAdjustmentArgs'] percentage_adjustment: Flat markup or markdown on an entire bill.
"""
if percentage_adjustment is not None:
pulumi.set(__self__, "percentage_adjustment", percentage_adjustment)
@property
@pulumi.getter(name="percentageAdjustment")
def percentage_adjustment(self) -> Optional[pulumi.Input['GoogleCloudChannelV1PercentageAdjustmentArgs']]:
"""
Flat markup or markdown on an entire bill.
"""
return pulumi.get(self, "percentage_adjustment")
@percentage_adjustment.setter
def percentage_adjustment(self, value: Optional[pulumi.Input['GoogleCloudChannelV1PercentageAdjustmentArgs']]):
pulumi.set(self, "percentage_adjustment", value)
@pulumi.input_type
class GoogleCloudChannelV1RepricingConditionArgs:
def __init__(__self__, *,
sku_group_condition: Optional[pulumi.Input['GoogleCloudChannelV1SkuGroupConditionArgs']] = None):
"""
Represents the various repricing conditions you can use for a conditional override.
:param pulumi.Input['GoogleCloudChannelV1SkuGroupConditionArgs'] sku_group_condition: SKU Group condition for override.
"""
if sku_group_condition is not None:
pulumi.set(__self__, "sku_group_condition", sku_group_condition)
@property
@pulumi.getter(name="skuGroupCondition")
def sku_group_condition(self) -> Optional[pulumi.Input['GoogleCloudChannelV1SkuGroupConditionArgs']]:
"""
SKU Group condition for override.
"""
return pulumi.get(self, "sku_group_condition")
@sku_group_condition.setter
def sku_group_condition(self, value: Optional[pulumi.Input['GoogleCloudChannelV1SkuGroupConditionArgs']]):
pulumi.set(self, "sku_group_condition", value)
@pulumi.input_type
class GoogleCloudChannelV1RepricingConfigChannelPartnerGranularityArgs:
def __init__(__self__):
"""
Applies the repricing configuration at the channel partner level. The channel partner value is derived from the resource name. Takes an empty json object.
"""
pass
@pulumi.input_type
class GoogleCloudChannelV1RepricingConfigEntitlementGranularityArgs:
def __init__(__self__, *,
entitlement: Optional[pulumi.Input[str]] = None):
"""
Applies the repricing configuration at the entitlement level.
:param pulumi.Input[str] entitlement: Resource name of the entitlement. Format: accounts/{account_id}/customers/{customer_id}/entitlements/{entitlement_id}
"""
if entitlement is not None:
pulumi.set(__self__, "entitlement", entitlement)
@property
@pulumi.getter
def entitlement(self) -> Optional[pulumi.Input[str]]:
"""
Resource name of the entitlement. Format: accounts/{account_id}/customers/{customer_id}/entitlements/{entitlement_id}
"""
return pulumi.get(self, "entitlement")
@entitlement.setter
def entitlement(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "entitlement", value)
@pulumi.input_type
class GoogleCloudChannelV1RepricingConfigArgs:
def __init__(__self__, *,
adjustment: pulumi.Input['GoogleCloudChannelV1RepricingAdjustmentArgs'],
effective_invoice_month: pulumi.Input['GoogleTypeDateArgs'],
rebilling_basis: pulumi.Input['GoogleCloudChannelV1RepricingConfigRebillingBasis'],
channel_partner_granularity: Optional[pulumi.Input['GoogleCloudChannelV1RepricingConfigChannelPartnerGranularityArgs']] = None,
conditional_overrides: Optional[pulumi.Input[Sequence[pulumi.Input['GoogleCloudChannelV1ConditionalOverrideArgs']]]] = None,
entitlement_granularity: Optional[pulumi.Input['GoogleCloudChannelV1RepricingConfigEntitlementGranularityArgs']] = None):
"""
Configuration for repricing a Google bill over a period of time.
:param pulumi.Input['GoogleCloudChannelV1RepricingAdjustmentArgs'] adjustment: Information about the adjustment.
:param pulumi.Input['GoogleTypeDateArgs'] effective_invoice_month: The YearMonth when these adjustments activate. The Day field needs to be "0" since we only accept YearMonth repricing boundaries.
:param pulumi.Input['GoogleCloudChannelV1RepricingConfigRebillingBasis'] rebilling_basis: The RebillingBasis to use for this bill. Specifies the relative cost based on repricing costs you will apply.
:param pulumi.Input['GoogleCloudChannelV1RepricingConfigChannelPartnerGranularityArgs'] channel_partner_granularity: Applies the repricing configuration at the channel partner level. This is the only supported value for ChannelPartnerRepricingConfig.
:param pulumi.Input[Sequence[pulumi.Input['GoogleCloudChannelV1ConditionalOverrideArgs']]] conditional_overrides: The conditional overrides to apply for this configuration. If you list multiple overrides, only the first valid override is used. If you don't list any overrides, the API uses the normal adjustment and rebilling basis.
:param pulumi.Input['GoogleCloudChannelV1RepricingConfigEntitlementGranularityArgs'] entitlement_granularity: Applies the repricing configuration at the entitlement level. This is the only supported value for CustomerRepricingConfig.
"""
pulumi.set(__self__, "adjustment", adjustment)
pulumi.set(__self__, "effective_invoice_month", effective_invoice_month)
pulumi.set(__self__, "rebilling_basis", rebilling_basis)
if channel_partner_granularity is not None:
pulumi.set(__self__, "channel_partner_granularity", channel_partner_granularity)
if conditional_overrides is not None:
pulumi.set(__self__, "conditional_overrides", conditional_overrides)
if entitlement_granularity is not None:
pulumi.set(__self__, "entitlement_granularity", entitlement_granularity)
@property
@pulumi.getter
def adjustment(self) -> pulumi.Input['GoogleCloudChannelV1RepricingAdjustmentArgs']:
"""
Information about the adjustment.
"""
return pulumi.get(self, "adjustment")
@adjustment.setter
def adjustment(self, value: pulumi.Input['GoogleCloudChannelV1RepricingAdjustmentArgs']):
pulumi.set(self, "adjustment", value)
@property
@pulumi.getter(name="effectiveInvoiceMonth")
def effective_invoice_month(self) -> pulumi.Input['GoogleTypeDateArgs']:
"""
The YearMonth when these adjustments activate. The Day field needs to be "0" since we only accept YearMonth repricing boundaries.
"""
return pulumi.get(self, "effective_invoice_month")
@effective_invoice_month.setter
def effective_invoice_month(self, value: pulumi.Input['GoogleTypeDateArgs']):
pulumi.set(self, "effective_invoice_month", value)
@property
@pulumi.getter(name="rebillingBasis")
def rebilling_basis(self) -> pulumi.Input['GoogleCloudChannelV1RepricingConfigRebillingBasis']:
"""
The RebillingBasis to use for this bill. Specifies the relative cost based on repricing costs you will apply.
"""
return pulumi.get(self, "rebilling_basis")
@rebilling_basis.setter
def rebilling_basis(self, value: pulumi.Input['GoogleCloudChannelV1RepricingConfigRebillingBasis']):
pulumi.set(self, "rebilling_basis", value)
@property
@pulumi.getter(name="channelPartnerGranularity")
def channel_partner_granularity(self) -> Optional[pulumi.Input['GoogleCloudChannelV1RepricingConfigChannelPartnerGranularityArgs']]:
"""
Applies the repricing configuration at the channel partner level. This is the only supported value for ChannelPartnerRepricingConfig.
"""
return pulumi.get(self, "channel_partner_granularity")
@channel_partner_granularity.setter
def channel_partner_granularity(self, value: Optional[pulumi.Input['GoogleCloudChannelV1RepricingConfigChannelPartnerGranularityArgs']]):
pulumi.set(self, "channel_partner_granularity", value)
@property
@pulumi.getter(name="conditionalOverrides")
def conditional_overrides(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['GoogleCloudChannelV1ConditionalOverrideArgs']]]]:
"""
The conditional overrides to apply for this configuration. If you list multiple overrides, only the first valid override is used. If you don't list any overrides, the API uses the normal adjustment and rebilling basis.
"""
return pulumi.get(self, "conditional_overrides")
@conditional_overrides.setter
def conditional_overrides(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['GoogleCloudChannelV1ConditionalOverrideArgs']]]]):
pulumi.set(self, "conditional_overrides", value)
@property
@pulumi.getter(name="entitlementGranularity")
def entitlement_granularity(self) -> Optional[pulumi.Input['GoogleCloudChannelV1RepricingConfigEntitlementGranularityArgs']]:
"""
Applies the repricing configuration at the entitlement level. This is the only supported value for CustomerRepricingConfig.
"""
return pulumi.get(self, "entitlement_granularity")
@entitlement_granularity.setter
def entitlement_granularity(self, value: Optional[pulumi.Input['GoogleCloudChannelV1RepricingConfigEntitlementGranularityArgs']]):
pulumi.set(self, "entitlement_granularity", value)
@pulumi.input_type
class GoogleCloudChannelV1SkuGroupConditionArgs:
def __init__(__self__, *,
sku_group: Optional[pulumi.Input[str]] = None):
"""
A condition that applies the override if a line item SKU is found in the SKU group.
:param pulumi.Input[str] sku_group: Specifies a SKU group (https://cloud.google.com/skus/sku-groups). Resource name of SKU group. Format: accounts/{account}/skuGroups/{sku_group}. Example: "accounts/C01234/skuGroups/3d50fd57-3157-4577-a5a9-a219b8490041".
"""
if sku_group is not None:
pulumi.set(__self__, "sku_group", sku_group)
@property
@pulumi.getter(name="skuGroup")
def sku_group(self) -> Optional[pulumi.Input[str]]:
"""
Specifies a SKU group (https://cloud.google.com/skus/sku-groups). Resource name of SKU group. Format: accounts/{account}/skuGroups/{sku_group}. Example: "accounts/C01234/skuGroups/3d50fd57-3157-4577-a5a9-a219b8490041".
"""
return pulumi.get(self, "sku_group")
@sku_group.setter
def sku_group(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "sku_group", value)
@pulumi.input_type
class GoogleCloudChannelV1ValueArgs:
def __init__(__self__, *,
bool_value: Optional[pulumi.Input[bool]] = None,
double_value: Optional[pulumi.Input[float]] = None,
int64_value: Optional[pulumi.Input[str]] = None,
proto_value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
string_value: Optional[pulumi.Input[str]] = None):
"""
Data type and value of a parameter.
:param pulumi.Input[bool] bool_value: Represents a boolean value.
:param pulumi.Input[float] double_value: Represents a double value.
:param pulumi.Input[str] int64_value: Represents an int64 value.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] proto_value: Represents an 'Any' proto value.
:param pulumi.Input[str] string_value: Represents a string value.
"""
if bool_value is not None:
pulumi.set(__self__, "bool_value", bool_value)
if double_value is not None:
pulumi.set(__self__, "double_value", double_value)
if int64_value is not None:
pulumi.set(__self__, "int64_value", int64_value)
if proto_value is not None:
pulumi.set(__self__, "proto_value", proto_value)
if string_value is not None:
pulumi.set(__self__, "string_value", string_value)
@property
@pulumi.getter(name="boolValue")
def bool_value(self) -> Optional[pulumi.Input[bool]]:
"""
Represents a boolean value.
"""
return pulumi.get(self, "bool_value")
@bool_value.setter
def bool_value(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "bool_value", value)
@property
@pulumi.getter(name="doubleValue")
def double_value(self) -> Optional[pulumi.Input[float]]:
"""
Represents a double value.
"""
return pulumi.get(self, "double_value")
@double_value.setter
def double_value(self, value: Optional[pulumi.Input[float]]):
pulumi.set(self, "double_value", value)
@property
@pulumi.getter(name="int64Value")
def int64_value(self) -> Optional[pulumi.Input[str]]:
"""
Represents an int64 value.
"""
return pulumi.get(self, "int64_value")
@int64_value.setter
def int64_value(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "int64_value", value)
@property
@pulumi.getter(name="protoValue")
def proto_value(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Represents an 'Any' proto value.
"""
return pulumi.get(self, "proto_value")
@proto_value.setter
def proto_value(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "proto_value", value)
@property
@pulumi.getter(name="stringValue")
def string_value(self) -> Optional[pulumi.Input[str]]:
"""
Represents a string value.
"""
return pulumi.get(self, "string_value")
@string_value.setter
def string_value(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "string_value", value)
@pulumi.input_type
class GoogleTypeDateArgs:
def __init__(__self__, *,
day: Optional[pulumi.Input[int]] = None,
month: Optional[pulumi.Input[int]] = None,
year: Optional[pulumi.Input[int]] = None):
"""
Represents a whole or partial calendar date, such as a birthday. The time of day and time zone are either specified elsewhere or are insignificant. The date is relative to the Gregorian Calendar. This can represent one of the following: * A full date, with non-zero year, month, and day values. * A month and day, with a zero year (for example, an anniversary). * A year on its own, with a zero month and a zero day. * A year and month, with a zero day (for example, a credit card expiration date). Related types: * google.type.TimeOfDay * google.type.DateTime * google.protobuf.Timestamp
:param pulumi.Input[int] day: Day of a month. Must be from 1 to 31 and valid for the year and month, or 0 to specify a year by itself or a year and month where the day isn't significant.
:param pulumi.Input[int] month: Month of a year. Must be from 1 to 12, or 0 to specify a year without a month and day.
:param pulumi.Input[int] year: Year of the date. Must be from 1 to 9999, or 0 to specify a date without a year.
"""
if day is not None:
pulumi.set(__self__, "day", day)
if month is not None:
pulumi.set(__self__, "month", month)
if year is not None:
pulumi.set(__self__, "year", year)
@property
@pulumi.getter
def day(self) -> Optional[pulumi.Input[int]]:
"""
Day of a month. Must be from 1 to 31 and valid for the year and month, or 0 to specify a year by itself or a year and month where the day isn't significant.
"""
return pulumi.get(self, "day")
@day.setter
def day(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "day", value)
@property
@pulumi.getter
def month(self) -> Optional[pulumi.Input[int]]:
"""
Month of a year. Must be from 1 to 12, or 0 to specify a year without a month and day.
"""
return pulumi.get(self, "month")
@month.setter
def month(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "month", value)
@property
@pulumi.getter
def year(self) -> Optional[pulumi.Input[int]]:
"""
Year of the date. Must be from 1 to 9999, or 0 to specify a date without a year.
"""
return pulumi.get(self, "year")
@year.setter
def year(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "year", value)
@pulumi.input_type
class GoogleTypeDecimalArgs:
def __init__(__self__, *,
value: Optional[pulumi.Input[str]] = None):
"""
A representation of a decimal value, such as 2.5. Clients may convert values into language-native decimal formats, such as Java's BigDecimal or Python's decimal.Decimal. [BigDecimal]: https://docs.oracle.com/en/java/javase/11/docs/api/java.base/java/math/BigDecimal.html [decimal.Decimal]: https://docs.python.org/3/library/decimal.html
:param pulumi.Input[str] value: The decimal value, as a string. The string representation consists of an optional sign, `+` (`U+002B`) or `-` (`U+002D`), followed by a sequence of zero or more decimal digits ("the integer"), optionally followed by a fraction, optionally followed by an exponent. An empty string **should** be interpreted as `0`. The fraction consists of a decimal point followed by zero or more decimal digits. The string must contain at least one digit in either the integer or the fraction. The number formed by the sign, the integer and the fraction is referred to as the significand. The exponent consists of the character `e` (`U+0065`) or `E` (`U+0045`) followed by one or more decimal digits. Services **should** normalize decimal values before storing them by: - Removing an explicitly-provided `+` sign (`+2.5` -> `2.5`). - Replacing a zero-length integer value with `0` (`.5` -> `0.5`). - Coercing the exponent character to upper-case, with explicit sign (`2.5e8` -> `2.5E+8`). - Removing an explicitly-provided zero exponent (`2.5E0` -> `2.5`). Services **may** perform additional normalization based on its own needs and the internal decimal implementation selected, such as shifting the decimal point and exponent value together (example: `2.5E-1` <-> `0.25`). Additionally, services **may** preserve trailing zeroes in the fraction to indicate increased precision, but are not required to do so. Note that only the `.` character is supported to divide the integer and the fraction; `,` **should not** be supported regardless of locale. Additionally, thousand separators **should not** be supported. If a service does support them, values **must** be normalized. The ENBF grammar is: DecimalString = '' | [Sign] Significand [Exponent]; Sign = '+' | '-'; Significand = Digits '.' | [Digits] '.' Digits; Exponent = ('e' | 'E') [Sign] Digits; Digits = { '0' | '1' | '2' | '3' | '4' | '5' | '6' | '7' | '8' | '9' }; Services **should** clearly document the range of supported values, the maximum supported precision (total number of digits), and, if applicable, the scale (number of digits after the decimal point), as well as how it behaves when receiving out-of-bounds values. Services **may** choose to accept values passed as input even when the value has a higher precision or scale than the service supports, and **should** round the value to fit the supported scale. Alternatively, the service **may** error with `400 Bad Request` (`INVALID_ARGUMENT` in gRPC) if precision would be lost. Services **should** error with `400 Bad Request` (`INVALID_ARGUMENT` in gRPC) if the service receives a value outside of the supported range.
"""
if value is not None:
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def value(self) -> Optional[pulumi.Input[str]]:
"""
The decimal value, as a string. The string representation consists of an optional sign, `+` (`U+002B`) or `-` (`U+002D`), followed by a sequence of zero or more decimal digits ("the integer"), optionally followed by a fraction, optionally followed by an exponent. An empty string **should** be interpreted as `0`. The fraction consists of a decimal point followed by zero or more decimal digits. The string must contain at least one digit in either the integer or the fraction. The number formed by the sign, the integer and the fraction is referred to as the significand. The exponent consists of the character `e` (`U+0065`) or `E` (`U+0045`) followed by one or more decimal digits. Services **should** normalize decimal values before storing them by: - Removing an explicitly-provided `+` sign (`+2.5` -> `2.5`). - Replacing a zero-length integer value with `0` (`.5` -> `0.5`). - Coercing the exponent character to upper-case, with explicit sign (`2.5e8` -> `2.5E+8`). - Removing an explicitly-provided zero exponent (`2.5E0` -> `2.5`). Services **may** perform additional normalization based on its own needs and the internal decimal implementation selected, such as shifting the decimal point and exponent value together (example: `2.5E-1` <-> `0.25`). Additionally, services **may** preserve trailing zeroes in the fraction to indicate increased precision, but are not required to do so. Note that only the `.` character is supported to divide the integer and the fraction; `,` **should not** be supported regardless of locale. Additionally, thousand separators **should not** be supported. If a service does support them, values **must** be normalized. The ENBF grammar is: DecimalString = '' | [Sign] Significand [Exponent]; Sign = '+' | '-'; Significand = Digits '.' | [Digits] '.' Digits; Exponent = ('e' | 'E') [Sign] Digits; Digits = { '0' | '1' | '2' | '3' | '4' | '5' | '6' | '7' | '8' | '9' }; Services **should** clearly document the range of supported values, the maximum supported precision (total number of digits), and, if applicable, the scale (number of digits after the decimal point), as well as how it behaves when receiving out-of-bounds values. Services **may** choose to accept values passed as input even when the value has a higher precision or scale than the service supports, and **should** round the value to fit the supported scale. Alternatively, the service **may** error with `400 Bad Request` (`INVALID_ARGUMENT` in gRPC) if precision would be lost. Services **should** error with `400 Bad Request` (`INVALID_ARGUMENT` in gRPC) if the service receives a value outside of the supported range.
"""
return pulumi.get(self, "value")
@value.setter
def value(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "value", value)
@pulumi.input_type
class GoogleTypePostalAddressArgs:
def __init__(__self__, *,
region_code: pulumi.Input[str],
address_lines: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
administrative_area: Optional[pulumi.Input[str]] = None,
language_code: Optional[pulumi.Input[str]] = None,
locality: Optional[pulumi.Input[str]] = None,
organization: Optional[pulumi.Input[str]] = None,
postal_code: Optional[pulumi.Input[str]] = None,
recipients: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
revision: Optional[pulumi.Input[int]] = None,
sorting_code: Optional[pulumi.Input[str]] = None,
sublocality: Optional[pulumi.Input[str]] = None):
"""
Represents a postal address, e.g. for postal delivery or payments addresses. Given a postal address, a postal service can deliver items to a premise, P.O. Box or similar. It is not intended to model geographical locations (roads, towns, mountains). In typical usage an address would be created via user input or from importing existing data, depending on the type of process. Advice on address input / editing: - Use an internationalization-ready address widget such as https://github.com/google/libaddressinput) - Users should not be presented with UI elements for input or editing of fields outside countries where that field is used. For more guidance on how to use this schema, please see: https://support.google.com/business/answer/6397478
:param pulumi.Input[str] region_code: CLDR region code of the country/region of the address. This is never inferred and it is up to the user to ensure the value is correct. See https://cldr.unicode.org/ and https://www.unicode.org/cldr/charts/30/supplemental/territory_information.html for details. Example: "CH" for Switzerland.
:param pulumi.Input[Sequence[pulumi.Input[str]]] address_lines: Unstructured address lines describing the lower levels of an address. Because values in address_lines do not have type information and may sometimes contain multiple values in a single field (e.g. "Austin, TX"), it is important that the line order is clear. The order of address lines should be "envelope order" for the country/region of the address. In places where this can vary (e.g. Japan), address_language is used to make it explicit (e.g. "ja" for large-to-small ordering and "ja-Latn" or "en" for small-to-large). This way, the most specific line of an address can be selected based on the language. The minimum permitted structural representation of an address consists of a region_code with all remaining information placed in the address_lines. It would be possible to format such an address very approximately without geocoding, but no semantic reasoning could be made about any of the address components until it was at least partially resolved. Creating an address only containing a region_code and address_lines, and then geocoding is the recommended way to handle completely unstructured addresses (as opposed to guessing which parts of the address should be localities or administrative areas).
:param pulumi.Input[str] administrative_area: Optional. Highest administrative subdivision which is used for postal addresses of a country or region. For example, this can be a state, a province, an oblast, or a prefecture. Specifically, for Spain this is the province and not the autonomous community (e.g. "Barcelona" and not "Catalonia"). Many countries don't use an administrative area in postal addresses. E.g. in Switzerland this should be left unpopulated.
:param pulumi.Input[str] language_code: Optional. BCP-47 language code of the contents of this address (if known). This is often the UI language of the input form or is expected to match one of the languages used in the address' country/region, or their transliterated equivalents. This can affect formatting in certain countries, but is not critical to the correctness of the data and will never affect any validation or other non-formatting related operations. If this value is not known, it should be omitted (rather than specifying a possibly incorrect default). Examples: "zh-Hant", "ja", "ja-Latn", "en".
:param pulumi.Input[str] locality: Optional. Generally refers to the city/town portion of the address. Examples: US city, IT comune, UK post town. In regions of the world where localities are not well defined or do not fit into this structure well, leave locality empty and use address_lines.
:param pulumi.Input[str] organization: Optional. The name of the organization at the address.
:param pulumi.Input[str] postal_code: Optional. Postal code of the address. Not all countries use or require postal codes to be present, but where they are used, they may trigger additional validation with other parts of the address (e.g. state/zip validation in the U.S.A.).
:param pulumi.Input[Sequence[pulumi.Input[str]]] recipients: Optional. The recipient at the address. This field may, under certain circumstances, contain multiline information. For example, it might contain "care of" information.
:param pulumi.Input[int] revision: The schema revision of the `PostalAddress`. This must be set to 0, which is the latest revision. All new revisions **must** be backward compatible with old revisions.
:param pulumi.Input[str] sorting_code: Optional. Additional, country-specific, sorting code. This is not used in most regions. Where it is used, the value is either a string like "CEDEX", optionally followed by a number (e.g. "CEDEX 7"), or just a number alone, representing the "sector code" (Jamaica), "delivery area indicator" (Malawi) or "post office indicator" (e.g. Côte d'Ivoire).
:param pulumi.Input[str] sublocality: Optional. Sublocality of the address. For example, this can be neighborhoods, boroughs, districts.
"""
pulumi.set(__self__, "region_code", region_code)
if address_lines is not None:
pulumi.set(__self__, "address_lines", address_lines)
if administrative_area is not None:
pulumi.set(__self__, "administrative_area", administrative_area)
if language_code is not None:
pulumi.set(__self__, "language_code", language_code)
if locality is not None:
pulumi.set(__self__, "locality", locality)
if organization is not None:
pulumi.set(__self__, "organization", organization)
if postal_code is not None:
pulumi.set(__self__, "postal_code", postal_code)
if recipients is not None:
pulumi.set(__self__, "recipients", recipients)
if revision is not None:
pulumi.set(__self__, "revision", revision)
if sorting_code is not None:
pulumi.set(__self__, "sorting_code", sorting_code)
if sublocality is not None:
pulumi.set(__self__, "sublocality", sublocality)
@property
@pulumi.getter(name="regionCode")
def region_code(self) -> pulumi.Input[str]:
"""
CLDR region code of the country/region of the address. This is never inferred and it is up to the user to ensure the value is correct. See https://cldr.unicode.org/ and https://www.unicode.org/cldr/charts/30/supplemental/territory_information.html for details. Example: "CH" for Switzerland.
"""
return pulumi.get(self, "region_code")
@region_code.setter
def region_code(self, value: pulumi.Input[str]):
pulumi.set(self, "region_code", value)
@property
@pulumi.getter(name="addressLines")
def address_lines(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Unstructured address lines describing the lower levels of an address. Because values in address_lines do not have type information and may sometimes contain multiple values in a single field (e.g. "Austin, TX"), it is important that the line order is clear. The order of address lines should be "envelope order" for the country/region of the address. In places where this can vary (e.g. Japan), address_language is used to make it explicit (e.g. "ja" for large-to-small ordering and "ja-Latn" or "en" for small-to-large). This way, the most specific line of an address can be selected based on the language. The minimum permitted structural representation of an address consists of a region_code with all remaining information placed in the address_lines. It would be possible to format such an address very approximately without geocoding, but no semantic reasoning could be made about any of the address components until it was at least partially resolved. Creating an address only containing a region_code and address_lines, and then geocoding is the recommended way to handle completely unstructured addresses (as opposed to guessing which parts of the address should be localities or administrative areas).
"""
return pulumi.get(self, "address_lines")
@address_lines.setter
def address_lines(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "address_lines", value)
@property
@pulumi.getter(name="administrativeArea")
def administrative_area(self) -> Optional[pulumi.Input[str]]:
"""
Optional. Highest administrative subdivision which is used for postal addresses of a country or region. For example, this can be a state, a province, an oblast, or a prefecture. Specifically, for Spain this is the province and not the autonomous community (e.g. "Barcelona" and not "Catalonia"). Many countries don't use an administrative area in postal addresses. E.g. in Switzerland this should be left unpopulated.
"""
return pulumi.get(self, "administrative_area")
@administrative_area.setter
def administrative_area(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "administrative_area", value)
@property
@pulumi.getter(name="languageCode")
def language_code(self) -> Optional[pulumi.Input[str]]:
"""
Optional. BCP-47 language code of the contents of this address (if known). This is often the UI language of the input form or is expected to match one of the languages used in the address' country/region, or their transliterated equivalents. This can affect formatting in certain countries, but is not critical to the correctness of the data and will never affect any validation or other non-formatting related operations. If this value is not known, it should be omitted (rather than specifying a possibly incorrect default). Examples: "zh-Hant", "ja", "ja-Latn", "en".
"""
return pulumi.get(self, "language_code")
@language_code.setter
def language_code(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "language_code", value)
@property
@pulumi.getter
def locality(self) -> Optional[pulumi.Input[str]]:
"""
Optional. Generally refers to the city/town portion of the address. Examples: US city, IT comune, UK post town. In regions of the world where localities are not well defined or do not fit into this structure well, leave locality empty and use address_lines.
"""
return pulumi.get(self, "locality")
@locality.setter
def locality(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "locality", value)
@property
@pulumi.getter
def organization(self) -> Optional[pulumi.Input[str]]:
"""
Optional. The name of the organization at the address.
"""
return pulumi.get(self, "organization")
@organization.setter
def organization(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "organization", value)
@property
@pulumi.getter(name="postalCode")
def postal_code(self) -> Optional[pulumi.Input[str]]:
"""
Optional. Postal code of the address. Not all countries use or require postal codes to be present, but where they are used, they may trigger additional validation with other parts of the address (e.g. state/zip validation in the U.S.A.).
"""
return pulumi.get(self, "postal_code")
@postal_code.setter
def postal_code(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "postal_code", value)
@property
@pulumi.getter
def recipients(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Optional. The recipient at the address. This field may, under certain circumstances, contain multiline information. For example, it might contain "care of" information.
"""
return pulumi.get(self, "recipients")
@recipients.setter
def recipients(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "recipients", value)
@property
@pulumi.getter
def revision(self) -> Optional[pulumi.Input[int]]:
"""
The schema revision of the `PostalAddress`. This must be set to 0, which is the latest revision. All new revisions **must** be backward compatible with old revisions.
"""
return pulumi.get(self, "revision")
@revision.setter
def revision(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "revision", value)
@property
@pulumi.getter(name="sortingCode")
def sorting_code(self) -> Optional[pulumi.Input[str]]:
"""
Optional. Additional, country-specific, sorting code. This is not used in most regions. Where it is used, the value is either a string like "CEDEX", optionally followed by a number (e.g. "CEDEX 7"), or just a number alone, representing the "sector code" (Jamaica), "delivery area indicator" (Malawi) or "post office indicator" (e.g. Côte d'Ivoire).
"""
return pulumi.get(self, "sorting_code")
@sorting_code.setter
def sorting_code(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "sorting_code", value)
@property
@pulumi.getter
def sublocality(self) -> Optional[pulumi.Input[str]]:
"""
Optional. Sublocality of the address. For example, this can be neighborhoods, boroughs, districts.
"""
return pulumi.get(self, "sublocality")
@sublocality.setter
def sublocality(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "sublocality", value)
|
984,206 | 8b0836a399f8c1866c5f6ee50edb1e5fda01d8de | import pygame
FLOOR = 0
BRICK = 1
tilemap = [
[BRICK, BRICK, BRICK, BRICK, BRICK, BRICK, BRICK, BRICK, BRICK, BRICK, BRICK, BRICK, BRICK, BRICK, BRICK],
[BRICK, FLOOR, FLOOR, FLOOR, FLOOR, FLOOR, FLOOR, FLOOR, FLOOR, FLOOR, FLOOR, FLOOR, FLOOR, FLOOR, BRICK],
[BRICK, FLOOR, BRICK, FLOOR, BRICK, FLOOR, BRICK, FLOOR, BRICK, FLOOR, BRICK, FLOOR, BRICK, FLOOR, BRICK],
[BRICK, FLOOR, FLOOR, FLOOR, FLOOR, FLOOR, FLOOR, FLOOR, FLOOR, FLOOR, FLOOR, FLOOR, FLOOR, FLOOR, BRICK],
[BRICK, FLOOR, BRICK, FLOOR, BRICK, FLOOR, BRICK, FLOOR, BRICK, FLOOR, BRICK, FLOOR, BRICK, FLOOR, BRICK],
[BRICK, FLOOR, FLOOR, FLOOR, FLOOR, FLOOR, FLOOR, FLOOR, FLOOR, FLOOR, FLOOR, FLOOR, FLOOR, FLOOR, BRICK],
[BRICK, FLOOR, BRICK, FLOOR, BRICK, FLOOR, BRICK, FLOOR, BRICK, FLOOR, BRICK, FLOOR, BRICK, FLOOR, BRICK],
[BRICK, FLOOR, FLOOR, FLOOR, FLOOR, FLOOR, FLOOR, FLOOR, FLOOR, FLOOR, FLOOR, FLOOR, FLOOR, FLOOR, BRICK],
[BRICK, FLOOR, BRICK, FLOOR, BRICK, FLOOR, BRICK, FLOOR, BRICK, FLOOR, BRICK, FLOOR, BRICK, FLOOR, BRICK],
[BRICK, FLOOR, FLOOR, FLOOR, FLOOR, FLOOR, FLOOR, FLOOR, FLOOR, FLOOR, FLOOR, FLOOR, FLOOR, FLOOR, BRICK],
[BRICK, FLOOR, BRICK, FLOOR, BRICK, FLOOR, BRICK, FLOOR, BRICK, FLOOR, BRICK, FLOOR, BRICK, FLOOR, BRICK],
[BRICK, FLOOR, FLOOR, FLOOR, FLOOR, FLOOR, FLOOR, FLOOR, FLOOR, FLOOR, FLOOR, FLOOR, FLOOR, FLOOR, BRICK],
[BRICK, BRICK, BRICK, BRICK, BRICK, BRICK, BRICK, BRICK, BRICK, BRICK, BRICK, BRICK, BRICK, BRICK, BRICK]
] |
984,207 | 40f8638ea933540f7a897c4e5547ca491de4abe0 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# openvas.py
#
# Copyright (c) 2021 Simon Krenz
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; Applies version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
__version__ = '0.1'
__author__ = 'Simon Krenz'
from .helper import Helper
from . import constant
from bs4 import BeautifulSoup
import logging
class OpenVAS:
def __init__(self,
hostname=None,
port=None,
basic_auth_user=None,
basic_auth_pass=None,
username=None,
password=None):
self.logger = logging.getLogger('OpenVAS_API')
if constant.DEBUG:
self.logger.setLevel(logging.DEBUG)
if basic_auth_user is None or basic_auth_pass is None:
raise Exception('[ERROR] Missing basic auth username or password.')
if username is None or password is None:
raise Exception('[ERROR] Missing username or password.')
self.basename = hostname
self.baseurl = f"https://{hostname}:{port}"
self.basic_auth = True
self.basic_auth_user = basic_auth_user
self.basic_auth_pass = basic_auth_pass
self.username = username
self.password = password
self.helper = Helper()
self.xml_reports = dict()
self.headers = {
'Accept': constant.HEADER_ACCEPT,
'Accept-Encoding': constant.HEADER_ACCEPT_ENCODING,
'Accept-Language': constant.HEADER_ACCEPT_LANGUAGE,
'Cache-Control': constant.HEADER_CACHE_CONTROL,
'Connection': constant.HEADER_CONNECTION,
'Content-Type': constant.HEADER_CONTENT_TYPE,
'User-Agent': constant.HEADER_USER_AGENT,
'X-Requested-With': constant.HEADER_X_REQUESTED_WITH,
}
self.login()
self._get_report_format_ids()
def _login_token(self):
"""Get the OpenVAS auth token and cookies and sets them to self
:returns: self.token and self.cookies
"""
data = {
'cmd': 'login',
'login': self.username,
'password': self.password,
}
token = self.helper._post_request(
self.basename,
self.basic_auth,
data,
self.headers)
if token.status_code == 200:
xml_response = BeautifulSoup(token.content, 'lxml')
self.token = xml_response.find('token').get_text()
self.cookies = token.cookies.get_dict()
else:
raise Exception('[FAIL] Could not login to OpenVAS')
def login(self):
"""Calls _login_token to set auth token and cookies
"""
r = self._login_token()
def _get_report_format_ids(self):
"""Retrieves existent report formats.
"""
self.logger.info('[INFO] Retrieving all available OpenVAS report formats...')
params = {
'cmd': 'get_report_formats',
'token': self.token,
}
url = self.basename + "/gmp"
r = self.helper._get_request(
url,
self.basic_auth,
params,
self.headers,
self.cookies)
if r.status_code == 200:
xml_response = BeautifulSoup(r.content, 'lxml')
formats_xml = xml_response.find_all('report_format')
for report in formats_xml:
if report.findChild('name', recursive=False).text == 'XML':
self.xml_report_id = report.get('id')
if report.findChild('name', recursive=False).text == 'CSV Results':
self.csv_report_id = report.get('id')
else:
raise Exception('[FAIL] Could not get report formats from OpenVAS')
print(self.csv_report_id)
def get_xml_reports(self):
"""Retrieves all existent XML based reports and lists them
"""
self.logger.info('[INFO] Retrieving all existing OpenVAS reports...')
params = {
'cmd': 'get_reports',
'token': self.token,
'details': 0,
'filter': 'sort-reverse=date first=1 rows=10'
}
url = self.basename + "/gmp"
r = self.helper._get_request(
url,
self.basic_auth,
params,
self.headers,
self.cookies)
if r.status_code == 200:
xml_response = BeautifulSoup(r.content, 'lxml')
reports_xml = xml_response.find_all('report', {
'extension':'xml',
'format_id': self.xml_report_id})
for report in reports_xml:
self.xml_reports[report.get('id')] = dict()
self.xml_reports[report.get('id')] = {
'name': report.findChild('name', recursive=False).get_text(),
'hosts': report.findChild('hosts').get_text(),
'vulns': report.findChild('vulns').get_text(),
'high': report.findChild('hole').findChild('full').get_text(),
'medium': report.findChild('warning').findChild('full').get_text(),
'low': report.findChild('info').findChild('full').get_text(),
'log': report.findChild('log').findChild('full').get_text(),
'severity': report.findChild('severity').findChild('full').get_text(),
}
else:
raise Exception('[FAIL] Could not get reports from OpenVAS')
def get_report(self, report_id: str):
"""Retrieves a specific report by id
"""
self.logger.info(f'[INFO] Retrieving OpenVAS report {report_id}...')
params = {
'cmd': 'get_report',
'token': self.token,
'report_id': report_id,
'filter': 'apply_overrides=0 min_qod=70 autofp=0 levels=hml first=1 rows=0 sort-reverse=severity',
'ignore_pagination': 1,
'report_format_id': self.csv_report_id,
'submit': 'Download',
}
|
984,208 | 457cdc4f385bfce603ba23a8b3e8eaefab7e2e13 | '''
>>> b = Baralho()
>>> b[0]
<A de copas>
>>> b[:3]
[<A de copas>, <2 de copas>, <3 de copas>]
>>> b[-3:]
[<J de paus>, <Q de paus>, <K de paus>]
>>> for carta in b: # doctest:+ELLIPSIS
... print carta
<A de copas>
<2 de copas>
<3 de copas>
<4 de copas>
<5 de copas>
...
>>> for carta in reversed(b): # doctest:+ELLIPSIS
... print carta
<K de paus>
<Q de paus>
<J de paus>
<10 de paus>
...
>>> b = Baralho()
>>> iterb = iter(b)
>>> iterb.next()
<A de copas>
>>> iterb.next()
<2 de copas>
>>> bi = BaralhoInverso()
>>> iterbi = iter(bi)
>>> iterbi.next()
<K de paus>
>>> iterbi.next()
<Q de paus>
'''
from random import shuffle
class Carta(object):
def __init__(self, valor, naipe):
self.valor = valor
self.naipe = naipe
def __repr__(self):
return '<%s de %s>' % (self.valor, self.naipe)
class Baralho(object):
naipes = 'copas ouros espadas paus'.split()
valores = 'A 2 3 4 5 6 7 8 9 10 J Q K'.split()
def __init__(self):
self.cartas = [Carta(v, n)
for n in self.naipes
for v in self.valores]
def __getitem__(self, pos):
return self.cartas[pos]
def __len__(self):
return len(self.cartas)
class BaralhoInverso(Baralho):
def __init__(self):
Baralho.__init__(self)
self.inverse_index = reversed(range(len(self.cartas)))
def __iter__(self):
for index in self.inverse_index:
yield self.cartas[index]
class BaralhoMisturado(Baralho):
def __init__(self):
Baralho.__init__(self)
self.shuffle_index = range(len(self.cartas))
shuffle(self.shuffle_index)
def __iter__(self):
for index in self.shuffle_index:
yield self.cartas[index] |
984,209 | 90114fc0f6587707e443917467f430f74b8d3755 | # -*- coding: utf-8 -*-
"""
Created on Sat Jun 29 21:55:42 2019
@author: Administrator
"""
import os
import matplotlib.pyplot as plt
import numpy as np
from scipy.optimize import curve_fit
from matplotlib import gridspec
alllines = []
path = "TT"
filelst = os.listdir(path)
for file in filelst:
f = open(os.path.join(path + "/"+file),"r")
lines = f.readlines()
data = [i.replace("\n","") for i in lines]
data = [i.split(",") for i in data]
data_x = [i[0] for i in data]
data_y = [i[1] for i in data]
data_x = np.array(data_x,dtype = "float64")*1000
data_y = np.array(data_y,dtype = "float64")*1000
data = [file]+ [data_x] + [data_y]
alllines.append(data)
f.close()
np.save('allines',alllines)
# =============================================================================
#
# =============================================================================
def linear_fit(x,m,b):
return m*x+b
# =============================================================================
#
# =============================================================================
#alllines = np.load('allines.npy')
c_index = []
for i in range(len(alllines)):
if alllines[i][0] == 'dark signal.txt':
d = i
elif alllines[i][0] == 'background.txt':
b = i
elif alllines[i][0][0] == 'c':
if alllines[i][0][1] == '0':
c_0 = i
c_index.append(i)
fig,(ax,ax2) = plt.subplots(2, 1, gridspec_kw={"height_ratios":[1, 5]})
ax2.plot(alllines[b][1],alllines[b][2],lw = 0.5, label = "Background signal")
ax.plot(alllines[d][1],alllines[d][2],".g",markersize = 0.5,fillstyle = 'full',label ="Dark signal")
ax.set_xlim(min(alllines[d][1]),max(alllines[d][1]))
ax2.set_ylim(min(alllines[b][2])-5,-150+10)
ax.set_ylim(0.2, .65)
ax.spines['bottom'].set_visible(False)
ax2.spines['top'].set_visible(False)
ax.xaxis.tick_top()
ax.tick_params(labeltop='off')
ax2.xaxis.tick_bottom()
ax2.set_xlabel("Time $t$/ms")
dd = .01
kwargs = dict(transform=ax.transAxes, color='k', clip_on=False)
ax.plot((-dd, +dd), (-dd*5, +dd*5),lw = 0.5, **kwargs)
ax.plot((1 - dd, 1 + dd), (-dd*5, +dd*5),lw = 0.5, **kwargs)
kwargs.update(transform=ax2.transAxes)
ax2.plot((-dd, +dd), (1 - dd, 1 + dd),lw = 0.5, **kwargs)
ax2.plot((1 - dd, 1 + dd), (1 - dd, 1 + dd), lw = 0.5, **kwargs)
plt.subplots_adjust(hspace = 0.06)
hand, leg = ax.get_legend_handles_labels()
hand2, leg2 = ax2.get_legend_handles_labels()
plt.xlabel("Time $t$/ms")
plt.ylabel("Voltage $U$/mV")
plt.legend(hand+hand2, leg+leg2,loc = "best")
plt.savefig('Background',dpi = 300)
#plt.show()
# =============================================================================
#
# =============================================================================
#allines = np.load('allines')
dark_signal = np.average(alllines[d][2])
dark_signal_err = np.std(alllines[d][2])/np.sqrt(len(alllines[d][2]))
background = np.average(alllines[b][2][0:200])
background_err = np.std(alllines[b][2][0:200])/np.sqrt(200)
data_m = []
for i in c_index:
data_m.append(alllines[i])
data_m.sort()
data = []
plt.figure()
for i in data_m:
lab = i[0][1:-8]
y = i[2] - dark_signal
err = np.std(i[2][0:200])/np.sqrt(200)
y_err = np.sqrt(err**2 + dark_signal_err**2)
plt.plot(i[1],y,"-",lw = 0.5, label = lab + " mbar")
data.append([lab,i[1],y,y_err])
plt.xlabel("Time $t$/ms")
plt.ylabel("Voltage $U$/mV")
plt.xlim(min(i[1]),max(i[1]))
plt.legend(ncol = 3,loc = 0)
plt.savefig('Measurements',dpi = 300)
#plt.show()
# =============================================================================
#
# =============================================================================
d_cell = .1
P = 50.5
c_I2 = P/8.314/(27.4+273.15)
c_I2_err = P/8.314/(27.4+273.15)**2*0.5
I_0 = np.average(alllines[c_0][2][0:100])-dark_signal
I_0_err = np.std(alllines[c_0][2][0:100])/np.sqrt(100)
print(I_0)
epsilon_I2 = -np.log(I_0/(background-dark_signal))/d_cell/c_I2
epsilon_I2_err = np.sqrt((np.log(I_0/(background-dark_signal))*c_I2_err/c_I2**2/d_cell/np.log(10))**2
+(np.log(-I_0)/(background-dark_signal)/c_I2/d_cell/np.log(10))**2
+(np.log(-background+dark_signal)/I_0/c_I2/d_cell/np.log(10))**2)
print(epsilon_I2)
def c_I2(x,x0):
return 2/epsilon_I2/d_cell*np.log(x/x0)
#plt1,ax1 = plt.subplots()
#plt2,ax2 = plt.subplots()
#plt3,ax3 = plt.subplots()
fit = []
for i in range(len(data)):
fig,ax = plt.subplots()
indices = [n for n, x in enumerate(data[i][1]) if x == 0.004999949]
start = indices[0]
color = ['k','b','c']
U_0 = np.average(data[i][2][:indices[0]-10])
print(data[i][0])
print(U_0)
# for n in range(start+1,len(data[i][2])):
# if data[i][2][n] > U_0-1:
# end = n
# break
y = 1/c_I2(data[i][2][start:start+500],U_0)
x = np.array(data[i][1][start:start+500])
# if i%3 == 0:
# fig = ax1
# if i%3 == 1:
# fig = ax2
# if i%3 == 2:
# fig = ax3
plt.plot(x,y,".",ms = 1,fillstyle = 'full',mec = color[i//3],mfc =color[i//3])
popt,pcov = curve_fit(linear_fit,x[100:175],y[100:175])
perr = np.sqrt(np.diag(pcov))
plt.plot(np.arange(0,11), linear_fit(np.array(np.arange(0,11)),*popt) ,ls = '--', lw = 1,color = color[i//3])
plt.plot(-50,-50,'.--',color = color[i//3], label = str(data[i][0]) + " mbar")
fit.append([data[i][0],popt,perr])
plt.xlabel("Time $t$/ms")
plt.ylabel("Concentration 1/[I]/ m$^3$ mol$^{-1}$")
plt.legend()
plt.xlim(0,max(x)*1.1)
plt.ylim(0,max(y)*1.1)
ax.ticklabel_format(axis='y', style='sci',scilimits=(-3,3))
plt.savefig(str(i),dpi = 300)
plt.show()
#ax1.set_xlabel("Time $t$/ms")
#ax1.set_ylabel("Concentration 1/[I]/ m$^3$ mol$^{-1}$")
#ax1.ticklabel_format(axis='y', style='sci',scilimits=(-3,3))
#ax1.set_xlim(0,10)
##ax1.set_ylim(0,200)
#ax1.legend()
#ax2.set_xlabel("Time $t$/ms")
#ax2.set_ylabel("Concentration 1/[I]/ m$^3$ mol$^{-1}$")
#ax2.ticklabel_format(axis='y', style='sci',scilimits=(-3,3))
#ax2.set_xlim(0,10)
##ax2.set_ylim(0,200)
#ax2.legend()
#ax3.set_xlabel("Time $t$/ms")
#ax3.set_ylabel("Concentration 1/[I]/ m$^3$ mol$^{-1}$")
#ax3.ticklabel_format(axis='y', style='sci',scilimits=(-3,3))
#ax3.set_xlim(0,10)
#ax3.set_ylim(0,200)
#ax3.legend()
#plt1.savefig('1',dpi = 300)
#plt2.savefig('2',dpi = 300)
#plt3.savefig('3',dpi = 300)
#plt.show()
# =============================================================================
#
# =============================================================================
f = open('fit.txt','w')
f.write('epsilon_I2\n')
f.write(str(epsilon_I2)+'\n')
f.write('dark\n')
f.write(str(dark_signal)+'\n')
f.write(str(dark_signal_err)+'\n')
f.write('background\n')
f.write(str(background)+'\n')
f.write(str(background_err)+'\n')
for i in fit:
f.write(str(i)+'\n')
T = [32,33,33,34.5,34.5,35,35,35,33]
plt.figure()
x = [float(i[0]) for i in fit ]
x = np.array(x)
x = x*100/8.314/(np.array(T)+273.15)
x_err = np.sqrt((1*100/8.314/(np.array(T)+273.15))**2+(x*100*0.5/8.314/(np.array(T)+273.15)**2)**2)
y = [float(i[1][0])*1000 for i in fit]
print(y)
y_err = [float(i[2][0])*1000 for i in fit]
plt.errorbar(x,y,yerr = y_err, fmt='x',ecolor = 'r',label = '$k_{tot}$')
popt,pcov = curve_fit(linear_fit,x[:-2],y[:-2])
perr = np.sqrt(np.diag(pcov))
plt.plot(np.arange(0,21), linear_fit(np.array(np.arange(0,21)),*popt) ,ls = '-', lw = 1,label = 'Linear fit')
plt.xlim(0,20)
#plt.ylim(-100,5000)
plt.xlabel("Concentration [Ar]/mol m$^{-3}$")
plt.ylabel("Total rate constant $k_{tot}$/mol m$^{-3}$ s$^{-1}$")
plt.ticklabel_format(axis='y', style='sci' ,scilimits=(-3,3))
plt.legend()
plt.savefig('10',dpi = 300)
#plt.show()
f.write(str(x)+'\n')
f.write(str(x_err)+'\n')
f.write(str(popt)+'\n')
f.write(str(perr)+'\n')
f.close()
k_I2 = fit[0][1][0]/P*8.314*(27.4+273.15)*1e3
ratio = k_I2/popt[0]
k_I2_err = ((fit[0][2][0]/P*8.314*(27.4+273.15))**2+
(fit[0][1][0]*c_I2_err/fit[0][1][0]/P**2*8.314**2*(27.4+273.15)**2)**2)
ratio_err = np.sqrt((((k_I2_err/popt[0]*1e3))/popt[0]**2)+(k_I2*perr[0]/popt[0]**2)**2)
|
984,210 | 77f8188bd8fd21e94310d8d2e99a2d88500b0611 | import tensorflow as tf
class EuclideanDistanceMetric(tf.keras.metrics.Metric):
"""
A custom Keras metric to compute the euclidian distance
"""
def __init__(self, **kwargs):
if 'is_training' in kwargs:
if kwargs['is_training']:
super(EuclideanDistanceMetric, self).__init__(name='Euclidean Distance')
else:
super(EuclideanDistanceMetric, self).__init__()
else:
super(EuclideanDistanceMetric, self).__init__()
self.l2_norms = self.add_weight("norm2", initializer="zeros")
self.count = self.add_weight("counter", initializer="zeros")
# def __init__(self, **kwargs):
# super(EuclideanDistanceMetric, self).__init__( **kwargs)
# self.l2_norms = self.add_weight("norm2", initializer="zeros")
# self.count = self.add_weight("counter", initializer="zeros")
def update_state(self, y_true, y_pred, sample_weight=None) -> None:
self.l2_norms.assign_add(tf.norm(y_pred - y_true, ord='euclidean'))
self.count.assign_add(1)
# tf.print('update_state ', self.l2_norms, ' ', self.count)
def result(self) -> tf.Tensor:
val = self.l2_norms / self.count
# tf.print('result ', self.l2_norms, ' ', self.count, ' ', val)
return val
def reset_states(self):
# tf.print('reset_states ', self.l2_norms)
self.l2_norms.assign(0)
self.count.assign(0)
class AUCMetric(tf.keras.metrics.Metric):
"""
A custom Keras metric to compute the euclidian distance
"""
def __init__(self, **kwargs):
if 'is_training' in kwargs:
if kwargs['is_training']:
super(EuclideanDistanceMetric, self).__init__(name='AUC')
else:
super(EuclideanDistanceMetric, self).__init__()
else:
super(EuclideanDistanceMetric, self).__init__()
self.l2_norms = self.add_weight("norm2", initializer="zeros")
self.count = self.add_weight("counter", initializer="zeros")
# def __init__(self, **kwargs):
# super(EuclideanDistanceMetric, self).__init__( **kwargs)
# self.l2_norms = self.add_weight("norm2", initializer="zeros")
# self.count = self.add_weight("counter", initializer="zeros")
def update_state(self, y_true, y_pred, sample_weight=None) -> None:
self.l2_norms.assign_add(tf.norm(y_pred - y_true, ord='euclidean'))
self.count.assign_add(1)
# tf.print('update_state ', self.l2_norms, ' ', self.count)
def result(self) -> tf.Tensor:
val = self.l2_norms / self.count
# tf.print('result ', self.l2_norms, ' ', self.count, ' ', val)
return val
def reset_states(self):
# tf.print('reset_states ', self.l2_norms)
self.l2_norms.assign(0)
self.count.assign(0)
|
984,211 | 4963012affb8f671bb7c5362d29ad8b46fc3551e | import math
import re
import time
from tqdm import trange
from .utils import get_soup
comments_url_form = 'https://movie.naver.com/movie/bi/mi/pointWriteFormList.nhn?code={}&order=newest&page={}&onlySpoilerPointYn=N' # idx, type, page
def scrap_comments(idx, limit=-1, sleep=0.05, last_time=None, i_movie=-1, n_total_movies=-1):
max_page = num_of_comment_pages(idx)
if limit > 0:
max_page = min(limit, max_page)
if max_page <= 0:
return []
if n_total_movies < 0 or i_movie < 0:
desc = f'Scrap comments {idx}'
else:
desc = f'Scrap comments {idx} ({i_movie}/{n_total_movies})'
comments = []
for p in trange(1, max_page + 1, desc=desc):
url = comments_url_form.format(idx, p)
comments_p, stop = parse_a_page(get_soup(url), last_time)
comments += comments_p
if stop:
print(f'\r movie {idx}. stop scrap comments. found existing comments {p} / {max_page}')
break
return comments[::-1]
def parse_a_page(soup, last_time=None):
comments = []
stop = False
for row in soup.select('div[class=score_result] li'):
try:
score = int(row.select('div[class=star_score] em')[0].text.strip())
text = row.select('div[class=score_reple] p')[0].text.strip()
# detach '관람객' icon
if text[:4] == '관람객\n':
text = text[4:].strip()
# detach '스포일러' icon
if text[:25] == '스포일러가 포함된 감상평입니다. 감상평 보기\n':
text = text[25:].strip()
idx = row.select('a[onclick^=javascript]')[0].attrs.get('onclick', '').split('(')[1].split(',')[0]
masked_user = row.select('div[class=score_reple] em')[0].text.strip()
written_at = re.search(r"\d+\.\d+\.\d+ \d+:\d+", row.text).group()
agree = int(row.select('strong[class^="sympathy"]')[0].text.strip())
disagree = int(row.select('strong[class^="notSympathy"]')[0].text.strip())
if (last_time is not None) and (written_at <= last_time):
stop = True
break
comments.append(
{'score': score,
'text': text,
'idx': idx,
'user': masked_user,
'written_at': written_at,
'agree': agree,
'disagree': disagree
})
except Exception as e:
continue
return comments, stop
def num_of_comment_pages(idx):
url = comments_url_form.format(idx, 1)
soup = get_soup(url)
try:
num_comments = int(soup.select('div[class="score_total"] em')[-1].text.replace(',',''))
return math.ceil(num_comments / 5)
except Exception as e:
return -1
|
984,212 | 40fea4aa76cae49a4404be14458d5e611230e3f6 | from datetime import datetime as dt
"""
Queremos armazenar os seguintes dados:
* Nome
* Idade
* Altura
* Peso
* Tipo sanguíneo (se conhecido)
* Queixa no PS (se houver)
* Datas de consulta
Operações:
* Alterar idade
* Alterar peso
* Alterar queixa
* Adicionar uma data de consulta
"""
class Paciente:
##lista de atributos da classe (construtor)
def __init__(self, nome, idade, altura, peso, queixa=None, tipoSanguineo=None):
self.nome = nome
self.idade = idade
self.altura = altura
self.peso = peso
self.queixa = queixa
self.tipoSansuineo = tipoSanguineo
self.consultas = []
def adiciona_consulta(self, dataConsulta):
self.consultas.append(dataConsulta)
def altera_idade(self, nova_idade):
self.idade = nova_idade
def altera_peso(self, novo_peso):
self.peso = novo_peso
def altera_queixa(self, nova_queixa):
self.queixa = nova_queixa
|
984,213 | 051d96484bfba00ec747edcacc31c6f448103868 | # -*- coding: ISO-8859-1 -*-
from pathlib import Path
import csv
STRING_ENCODING = "utf_8_sig"
def get_columns_from_csv(file_path: Path) -> list:
with open(file_path, "r", ) as f:
reader = csv.DictReader(f)
row = next(reader)
return list(row.keys())
def csv_to_json(file_path: Path) -> dict:
"""
Conversion of CSV to dictionary/JSON for sequenced PowerPlans
"""
output = {}
with open(file_path, "r", ) as f:
columns = get_columns_from_csv(file_path)
reader = csv.DictReader(f, fieldnames=columns)
next(reader, None)
for row in reader:
powerplan = row["DESCRIPTION"]
if powerplan not in output and powerplan:
output[powerplan] = {
k: v
for k, v in row.items()
if not k.startswith("PHASE") and not k.startswith("DOT")
}
output[powerplan]["phases"] = {}
phase = row["PHASE_DESCRIPTION"]
if phase not in output[powerplan]["phases"] and phase:
output[powerplan]["phases"][phase] = {
k: v for k, v in row.items() if k.startswith("PHASE")
}
output[powerplan]["phases"][phase]["dots"] = {}
dot = row["DOT_DESCRIPTION"]
if phase:
if dot not in output[powerplan]["phases"][phase]["dots"] and dot:
output[powerplan]["phases"][phase]["dots"][dot] = {
k: v for k, v in row.items() if k.startswith("DOT")
}
return output
def compare_key_val(
d1: dict, d2: dict, plan_name: str = None, phase_name: str = None
) -> list:
output = []
for k, v in d1.items():
if not isinstance(v, dict) and not k.endswith("_ID"):
val1 = v
val2 = d2.get(k)
if val1 != val2:
output.append(
{
"plan": plan_name,
"phase": phase_name,
"key": k,
"value1": val1,
"value2": val2,
}
)
return output
def main():
script_path = Path(__file__).parent
b0783 = csv_to_json(Path(script_path, "data", "b0783_pathway.csv"))
p0783 = csv_to_json(Path(script_path, "data", "p0783_pathway.csv"))
output = []
for plan_desc, plan_dict in b0783.items():
b0783_plan_dict = plan_dict
p0783_plan_dict = p0783.get(plan_desc)
if b0783_plan_dict and p0783_plan_dict is not None:
output.extend(
compare_key_val(
d1=b0783_plan_dict,
d2=p0783_plan_dict,
plan_name=plan_desc,
)
)
else:
# PowerPlan doesn't exist in the other dictionary, so just skip
continue
# Phases exist
if b0783_plan_dict.get("phases"):
for phase_desc, phase_dict in b0783_plan_dict.get("phases").items():
b0783_phase_dict = phase_dict
p0783_phase_dict = p0783_plan_dict.get("phases").get(phase_desc)
if b0783_phase_dict and p0783_phase_dict is not None:
output.extend(
compare_key_val(
d1=b0783_phase_dict,
d2=p0783_phase_dict,
plan_name=plan_desc,
phase_name=phase_desc,
)
)
elif b0783_phase_dict and p0783_phase_dict is None:
output.append(
{
"plan": plan_desc,
"phase": phase_desc,
"key": f"Phase is missing: {phase_desc}",
"value1": "Exists",
"value2": "Does not exist",
}
)
# DOTs exist (phases must exist)
if b0783_phase_dict.get("dots"):
for dot_desc, dot_dict in b0783_phase_dict.get("dots", {}).items():
b0783_dot_dict = dot_dict
p0783_dot_dict = p0783_phase_dict.get("dots", {}).get(dot_desc)
if b0783_dot_dict and p0783_dot_dict is not None:
output.extend(
compare_key_val(
d1=b0783_dot_dict,
d2=p0783_dot_dict,
plan_name=plan_desc,
phase_name=phase_desc,
)
)
elif b0783_dot_dict and p0783_dot_dict is None:
output.append(
{
"plan": plan_desc,
"phase": phase_desc,
"key": f"DOT is missing: {dot_desc}",
"value1": "Exists",
"value2": "Does not exist",
}
)
with open("output_pathway_compare.csv", "w", encoding=STRING_ENCODING, newline="") as f:
writer = csv.DictWriter(
f, fieldnames=["plan", "phase", "key", "value1", "value2"]
)
writer.writeheader()
for row in output:
writer.writerow(row)
if __name__ == "__main__":
main() |
984,214 | c2d129bfbe73244a9c6e028fc20d93d8cc5f5ee3 | from concurrent.futures import ThreadPoolExecutor
from functools import partial
from itertools import islice
import pytest
from future_map import FutureMap, future_map
@pytest.fixture(name="executor")
def fixture_executor():
return ThreadPoolExecutor(max_workers=1)
def double(value):
return value * 2
def test_future_map(executor):
with executor:
double_future = partial(executor.submit, double)
results = list(FutureMap(double_future, range(4), 2))
assert sorted(results) == [0, 2, 4, 6]
def test_future_map_function(executor):
with executor:
double_future = partial(executor.submit, double)
results = list(future_map(double_future, range(4), 2))
assert sorted(results) == [0, 2, 4, 6]
def test_future_with_infinite_iterable(executor):
with executor:
def inf_input():
cnt = 0
while True:
yield cnt
cnt += 1
double_future = partial(executor.submit, double)
results = list(islice(FutureMap(double_future, inf_input(), 2), 4))
assert len(results) == 4
def test_future_with_error(executor):
with executor:
def raise_error(_):
raise Exception("for test")
error_future = partial(executor.submit, raise_error)
with pytest.raises(Exception) as excinfo:
list(FutureMap(error_future, range(4), 2))
assert str(excinfo.value) == "for test"
|
984,215 | 3cd2fb05a73da485ec4b5a236a8022cd310ed08d | import matplotlib.pyplot as plt
import numpy as np
from matplotlib.ticker import NullFormatter
from operator import attrgetter
def compute_mean_error(list_boxes, attribute, component):
mean = np.mean([getattr(getattr(box, attribute), component) for box in list_boxes], axis=0)
std = np.std([getattr(getattr(box, attribute), component) for box in list_boxes], axis=0)
return mean, std
def plot_mean_attribute(list_tracers, list_colors, labels,
attribute, component, ylabel, title=None, save = None):
for i, tracer in enumerate(list_tracers):
mean, std = compute_mean_error(tracer, attribute, component)
if labels is not None:
plt.errorbar(getattr(tracer[0], 'r')[:-1], mean[:-1], yerr= std[:1], color=list_colors[i], label = labels[i])
else:
plt.errorbar(getattr(tracer[0], 'r')[:-1], mean[:-1], yerr= std[:-1], color=list_colors[i])
plt.xlabel('r [Mpc/h]')
if labels is not None:
plt.legend()
plt.ylabel(ylabel)
def jointplot(x, y, jointpdf, log=False):
'''
Plots the joint PDF of two random variables together with its marginals
Args:
x and y, random variables,
jointpdf, their joint PDF
'''
nullfmt = NullFormatter() # no labels
# definitions for the axes
left, width = 0.1, 0.65
bottom, height = 0.1, 0.65
bottom_h = left_h = left + width + 0.02
rect_scatter = [left, bottom, width, height]
rect_histx = [left, bottom_h, width, 0.2]
rect_histy = [left_h, bottom, 0.2, height]
# start with a rectangular Figure
plt.figure(1, figsize=(8, 8))
axScatter = plt.axes(rect_scatter)
axHistx = plt.axes(rect_histx)
axHisty = plt.axes(rect_histy)
# no labels
axHistx.xaxis.set_major_formatter(nullfmt)
axHisty.yaxis.set_major_formatter(nullfmt)
# the scatter plot:
if(log == True):
axScatter.contour(x, y, np.log10(jointpdf))
else:
axScatter.contour(x, y, jointpdf)
axHistx.plot(y, abs(y[1] - y[0]) * np.sum(jointpdf, axis=0))
axHisty.plot(abs(x[1] - x[0]) * np.sum(jointpdf, axis=-1), x)
axScatter.set_xlabel(r'$v_r$ [Mpc/h]')
axScatter.set_ylabel(r'$v_t$ [Mpc/h]')
axHistx.set_ylabel('Marginal radial PDF')
axHisty.set_xlabel('Marginal tangential PDF')
plt.show()
def bestfit_jointplot(x, y, jointpdf, bestfitpdf, log=False, log_marginals=False, save=None):
'''
Plots the joint PDF of two random variables together with its marginals
Args:
x and y, random variables,
jointpdf, their joint PDF
'''
nullfmt = NullFormatter() # no labels
# definitions for the axes
left, width = 0.1, 0.65
bottom, height = 0.1, 0.65
bottom_h = left_h = left + width + 0.02
rect_scatter = [left, bottom, width, height]
rect_histx = [left, bottom_h, width, 0.2]
rect_histy = [left_h, bottom, 0.2, height]
# start with a rectangular Figure
plt.figure()
plt.figure(1, figsize=(8, 8))
axScatter = plt.axes(rect_scatter)
axHistx = plt.axes(rect_histx)
axHisty = plt.axes(rect_histy)
# no labels
axHistx.xaxis.set_major_formatter(nullfmt)
axHisty.yaxis.set_major_formatter(nullfmt)
vmin = np.min([jointpdf, bestfitpdf])
vmax = np.max([jointpdf, bestfitpdf])
levels = np.linspace(vmin, vmax, 10)
# the scatter plot:
if log:
if(vmin != 0):
vmin = np.log10(vmin)
else:
vmin = np.log10(np.min(bestfitpdf)) # this one should never be == 0.
vmax = np.log10(vmax)
levels = np.log10(np.logspace(vmin, vmax, 5))
cont = axScatter.contour(x, y, np.log10(jointpdf),vmin=vmin, vmax=vmax, levels=levels)
axScatter.contour(x, y, np.log10(bestfitpdf),linestyles='dashed', vmin=vmin, vmax=vmax, levels=levels )
else:
cont = axScatter.contour(x, y, jointpdf, vmin=vmin, vmax=vmax, levels=levels)
axScatter.contour(x, y, bestfitpdf, linestyles='dashed',vmin=vmin, vmax=vmax, levels=levels)
if log_marginals:
axHistx.semilogy(y, abs(y[1] - y[0]) * np.sum(jointpdf, axis=0), label='Measured')
axHistx.semilogy(y, abs(y[1] - y[0]) * np.sum(bestfitpdf, axis=0), linestyle='--', color='purple', label='Best fit')
axHisty.semilogx(abs(x[1] - x[0]) * np.sum(jointpdf, axis=-1), x, label='Measured' )
axHisty.semilogx(abs(x[1] - x[0]) * np.sum(bestfitpdf, axis=-1), x, linestyle='--', color='purple', label='Best fit')
else:
axHistx.plot(y, abs(y[1] - y[0]) * np.sum(jointpdf, axis=0), label='Measured')
axHistx.plot(y, abs(y[1] - y[0]) * np.sum(bestfitpdf, axis=0), linestyle='--', color='purple', label='Best fit')
axHisty.plot(abs(x[1] - x[0]) * np.sum(jointpdf, axis=-1), x, label='Measured' )
axHisty.plot(abs(x[1] - x[0]) * np.sum(bestfitpdf, axis=-1), x, linestyle='--', color='purple', label='Best fit')
axScatter.set_xlabel(r'$v_r$ [Mpc/h]')
axScatter.set_ylabel(r'$v_t$ [Mpc/h]')
axHistx.set_ylabel('Radial ')
axHisty.set_xlabel('Tangential ')
if save:
print('Saving plot...')
plt.savefig(save)
plt.close()
else:
plt.show()
def plot_attribute_residual(instance, models, attribute, ylabel, res_ylabel, title = None, save = None):
fig, (ax1, ax2) = plt.subplots(nrows = 2, ncols = 1, sharex = True, squeeze = True,
gridspec_kw = {'height_ratios':[4,1]})
if title is not None:
plt.suptitle(title)
get_attributes = attrgetter(f'measured.{attribute}.mean', f'measured.{attribute}.std')
measured_mean, measured_std = get_attributes(instance)
get_att = attrgetter('measured.s_c.mean')
x_measured = get_att(instance)
ax1.errorbar(x_measured, x_measured*x_measured*measured_mean,
yerr = x_measured*x_measured*measured_std,
label = 'measured', color = instance.measured.color,
linestyle = '', marker = 'o', markersize = 3)
for model in models:
get_attributes = attrgetter(f'{model}.{attribute}', f'{model}.color')
model_value, model_color = get_attributes(instance)
get_att = attrgetter(f'{model}.s_c')
x = get_att(instance)
ax1.plot(x, x*x*model_value, label = model, color = model_color, linewidth = 2)
step = int(len(x)/len(x_measured))
ax2.plot(x_measured, (model_value[step-1::step] - measured_mean)/measured_std,
color = model_color, linewidth = 2)
ax2.fill_between(x, -1., 1., facecolor = 'yellow', alpha = 0.5)
ax1.legend(bbox_to_anchor = [1., 1.])
ax2.axhline(y = 0., linestyle='dashed', color='gray')
ax2.set_ylim(-5.,5.)
ax2.set_xlabel('s [Mpc/h]')
ax1.set_ylabel(ylabel)
ax2.set_ylabel(res_ylabel)
plt.subplots_adjust(wspace = 0, hspace = 0)
if save is not None:
plt.savefig(save, dpi = 200, bbox_to_inches = 'tight')
def plot_attribute_percent(instance, models, attribute, ylabel, res_ylabel, title = None, save = None):
fig, (ax1, ax2) = plt.subplots(nrows = 2, ncols = 1, sharex = True, squeeze = True,
gridspec_kw = {'height_ratios':[4,1]})
if title is not None:
plt.suptitle(title)
get_attributes = attrgetter(f'measured.{attribute}.mean', f'measured.{attribute}.std')
measured_mean, measured_std = get_attributes(instance)
get_att = attrgetter('measured.s_c.mean')
x_measured = get_att(instance)
ax1.errorbar(x_measured, x_measured*x_measured*measured_mean,
yerr = x_measured*x_measured*measured_std,
label = 'measured', color = instance.measured.color,
linestyle = '', marker = 'o', markersize = 3)
for model in models:
get_attributes = attrgetter(f'{model}.{attribute}', f'{model}.color')
model_value, model_color = get_attributes(instance)
get_att = attrgetter(f'{model}.s_c')
x = get_att(instance)
ax1.plot(x, x*x*model_value, label = model, color = model_color, linewidth = 2)
step = int(len(x)/len(x_measured))
ax2.plot(x_measured, (model_value[step-1::step] - measured_mean)/model_value[step-1::step],
color = model_color, linewidth = 2)
ax2.fill_between(x, -0.01, 0.01, facecolor = 'yellow', alpha = 0.5)
ax1.legend()
ax2.axhline(y = 0., linestyle='dashed', color='gray')
ax2.set_ylim(-0.05,0.05)
ax2.set_xlabel('s [Mpc/h]')
ax1.set_ylabel(ylabel)
ax2.set_ylabel(res_ylabel)
plt.subplots_adjust(wspace = 0, hspace = 0)
if save is not None:
plt.savefig(save, dpi = 200, bbox_to_inches = 'tight')
|
984,216 | df572f83b75d28192d4cb2dd70ceb8ae7c745097 | from django.apps import AppConfig
class PersonalAccountingConfig(AppConfig):
name = 'personal_accounting'
|
984,217 | 67766a97e4ab49c9a67705d0c0b32257946aa265 | import argparse
import os
import hack_parser as parser
import encode_binary as encoder
# take filename as an argument
ap = argparse.ArgumentParser()
ap.add_argument('filepath', metavar='fp', type=str, help='Path to the .asm file to process.')
args = ap.parse_args()
# create an output file stream
filename = os.path.splitext(os.path.basename(args.filepath))[0]
containing_folder = os.path.dirname(args.filepath)
outfile = open(containing_folder + '/' + filename + '.hack', "w")
# intitialise symbol table
symbols = {
"SP": 0,
"LCL": 1,
"ARG": 2,
"THIS": 3,
"THAT": 4,
"R0": 0,
"R1": 1,
"R2": 2,
"R3": 3,
"R4": 4,
"R5": 5,
"R6": 6,
"R7": 7,
"R8": 8,
"R9": 9,
"R10": 10,
"R11": 11,
"R12": 12,
"R13": 13,
"R14": 14,
"R15": 15,
"SCREEN": 16384,
"KBD": 24576
}
rom_address = 0
commands = []
# create an input file stream and loop through it line by line
with open(args.filepath, 'r') as infile:
for line in infile:
# parse each line
command = parser.format_line(line)
# ignore line if there is no command
if len(command) == 0: continue
# check the command type
command_type = parser.get_command_type(command)
# produce binary output
# write binary output to file
if command_type == "A_COMMAND":
commands.append(command)
rom_address += 1
elif command_type == "C_COMMAND":
commands.append(command)
rom_address += 1
elif command_type == "L_COMMAND":
symbol = command[1:-1]
symbols[symbol] = rom_address
next_available_address = 16
for command in commands:
# check the command type
command_type = parser.get_command_type(command)
# produce binary output
# write binary output to file
if command_type == "A_COMMAND":
value = command[1:]
if parser.A_is_symbol(value):
if value not in symbols:
symbols[value] = next_available_address
next_available_address += 1
address = symbols[value]
else:
address = int(value)
encoded_command = encoder.encode_A(address)
outfile.writelines(encoded_command + "\n")
elif command_type == "C_COMMAND":
dest, comp, jump = parser.split_C(command)
encoded_command = encoder.encode_C(dest, comp, jump)
outfile.writelines(encoded_command + "\n")
|
984,218 | 68649c77ccece11a2fb20c19b5a8f184c913ecc9 | # -*- coding: utf-8 -*-
# coding=utf-8
"""
create_author : zhangcl
create_time : 2018-11-05
program : *_* course exam question *_*
"""
import os
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
import xlwt
def writeExcelFile(filepath, sheet_datas):
"""
保存若干个sheet中的数据
:param filepath: excel文件名称,全路径
:param sheet_datas: sheet的字典数据
:return:
"""
wb = xlwt.Workbook()
for s_sheet in sheet_datas.keys():
# sheet的名称
sheet = wb.add_sheet(s_sheet)
# 单元格的格式
style = 'pattern: pattern solid, fore_colour yellow; ' # 背景颜色为黄色
style += 'font: bold on; ' # 粗体字
style += 'align: horz centre, vert center; ' # 居中
header_style = xlwt.easyxf(style)
sheet_data = sheet_datas.get(s_sheet)
row_count = len(sheet_data)
for row in range(0, row_count):
col_count = len(sheet_data[row])
for col in range(0, col_count):
content = sheet_data[row][col]
if row == 0: # 设置表头单元格的格式
sheet.write(row, col, content, header_style)
else:
sheet.write(row, col, content)
wb.save(filepath)
if __name__ == '__main__':
# 二维数组
datas = [['a', 'b', 'c'], ['d', 'e', 'f'], ['g', 'h']]
file_path = u'./../../data/course-knowledge-machine/20181026-600plus/q-src-xls/test.xls'
wb = xlwt.Workbook()
sheet = wb.add_sheet('test') # sheet的名称为test
# 单元格的格式
style = 'pattern: pattern solid, fore_colour yellow; ' # 背景颜色为黄色
style += 'font: bold on; ' # 粗体字
style += 'align: horz centre, vert center; ' # 居中
header_style = xlwt.easyxf(style)
row_count = len(datas)
col_count = len(datas[0])
for row in range(0, row_count):
col_count = len(datas[row])
for col in range(0, col_count):
if row == 0: # 设置表头单元格的格式
sheet.write(row, col, datas[row][col], header_style)
else:
sheet.write(row, col, datas[row][col])
wb.save(file_path) |
984,219 | c9c369a9fe948aa361e14e2ea3fe8ff341614fce | # Generated by Django 2.2 on 2019-05-29 15:58
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('sprawdziany', '0002_sprawdzian'),
]
operations = [
migrations.AddField(
model_name='sprawdzian',
name='klasa',
field=models.CharField(blank=True, max_length=5),
),
]
|
984,220 | 34f971900d3365d6f8b6e6a6868f705ab8a3b125 | import os
import sys
import re
txtFile = sys.argv[1]
regFile = sys.argv[2]
with open(txtFile, 'r') as f1:
txtlines = f1.readlines()
with open(regFile, 'r') as f2:
regLines = f2.readlines()
for regLine in regLines:
tp_ls = regLine.split('\t')
p = re.compile(tp_ls[1])
tg_lines = list()
for txtline in txtlines:
txtchain = txtline.split('\t')
if txtchain[2] == 'S' and p.search(txtchain):
tg_lines.append(txtline)
with open(tp_ls[0]+'.txt', 'w') as w3:
w3.writelines(tg_lines)
|
984,221 | cdd0f4b983c6dab7196f3c08156f9d7f177cee3c | import json
import numpy as np
import pandas as pd
from sklearn.preprocessing import MultiLabelBinarizer
data_path = "../data/json"
with open('%s/train.json' % (data_path)) as json_data:
train = json.load(json_data)
with open('%s/test.json' % (data_path)) as json_data:
test = json.load(json_data)
with open('%s/validation.json' % (data_path)) as json_data:
validation = json.load(json_data)
train_img_url = train['images']
train_img_url = pd.DataFrame(train_img_url)
train_ann = train['annotations']
train_ann = pd.DataFrame(train_ann)
train = pd.merge(train_img_url, train_ann, on='imageId', how='inner')
# test data
test = pd.DataFrame(test['images'])
# Validation Data
val_img_url = validation['images']
val_img_url = pd.DataFrame(val_img_url)
val_ann = validation['annotations']
val_ann = pd.DataFrame(val_ann)
validation = pd.merge(val_img_url, val_ann, on='imageId', how='inner')
datas = {'Train': train, 'Test': test, 'Validation': validation}
for data in datas.values():
data['imageId'] = data['imageId'].astype(np.uint32)
mlb = MultiLabelBinarizer()
train_label = mlb.fit_transform(train['labelId'])
validation_label = mlb.transform(validation['labelId'])
dummy_label_col = list(mlb.classes_)
print(dummy_label_col)
for data in [validation_label, train_label, test]:
print(data.shape)
np.save(f'{data_path}/train', train_label)
np.save(f'{data_path}/val', validation_label)
|
984,222 | 3d66fc4059c5815c6199297b28deac3441a3ba07 | from django.conf.urls import include, url
from accounts import views
urlpatterns = [
url(r'^crear_transaccion/$', views.create_transaction, name='createTransaction'),
url(r'^crear_evento/$', views.new_event, name='newEvent'),
url(r'^crear_items/$', views.addItem, name='addItem'),
url(r'^crear_venta/$', views.new_sale, name='newVenta'),
url(r'^balances/$', views.balances, name='balances'),
]
|
984,223 | 504d25bcae046e76ff0175cb0a649bd6fa143073 | import math
import numpy as np
import pylab as plt
k = 0.5
def f(t,x,u):
x1,x2,v1,v2,theta = x
u1,u2 = u
return np.array([v1,v2,u1*math.cos(theta)-k*v1,u1*math.sin(theta)-k*v2,u2])
def h(t,x,u):
x1,x2,v1,v2,theta = x
u1,u2 = u
return np.array([x1,x2,theta])
def u1(t):
return np.array([0,0])
def sim(f, t, x, u, dt=1e-4):
j, t_, x_, u_ = 0, [0], [x], [u]
while j*dt < t:
t_.append((j+1)*dt)
u_.append(u1(j*dt))
x_.append(x_[-1]+dt*f(j*dt,x_[-1],u_[-1]))
j += 1
return np.array(t_),np.array(x_)
#x0 = [1,1,0,0,2]
#u0 = [0,0]
#print 'f(x0) = ', f(0.,x0,u0)
t1 = 10
xs = [2,1,-1,1,-1.57]
us = [0,0]
t_,x_ = sim(f,t1,xs,us,dt=4e-2)
lineObjects = plt.plot(t_,x_,'-')
plt.xlabel('time (sec)')
plt.ylabel('state (init =[2,1,-1,1,-1.57])')
plt.legend(iter(lineObjects),('x1','x2','v1','v2','theta'))
plt.show()
|
984,224 | f2703a8f7b7064f5bf3de720031f2528b35a4e01 |
class Solution:
def findErrorNums(self, nums: List[int]) -> List[int]:
s = set(nums)
sum_set = sum(s)
sum_all = sum(nums)
dupl = sum_all-sum_set
return [dupl, (((1+len(nums))*len(nums))//2)-sum_set]
|
984,225 | 07e2ce29c4a7841bf1a9dc0f62f71f09ff639e59 | #!/usr/bin/python
# -- Content-Encoding: UTF-8 --
"""
Herald Bluetooth Message Implementation
:author: Luc Libralesso
:copyright: Copyright 2014, isandlaTech
:license: Apache License 2.0
:version: 0.0.3
:status: Alpha
..
Copyright 2014 isandlaTech
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from uuid import uuid4
DELIMITER = ':'
HELLO_MESSAGE = b'[[[HELLO]]]'
def to_string(msg):
"""
Take a str and returns the same object
or take a bytes object and transform it
to a string object (useful for comparisons)
:param msg: message to transform to a string
:return: resulted string
"""
if type(msg) is bytes:
msg = str(msg)
msg = msg[2:]
return msg[:-1]
else:
return msg
def to_bluetooth_message(msg):
"""
put a bluetooth message to the peer.
It adds correct delimiters
:param msg: string message
:return: bluetooth message as a string
"""
msg = to_string(msg)
return str(len(msg)) + DELIMITER + msg
def gen_uuid():
"""
:return: uuid of a message 32 random hexadecimal chars
"""
return str(uuid4())
class MessageReader:
"""
reads herald message from bluetooth messages
"""
def __init__(self, automata, hello_callback=None):
self._automata = automata
self._buffer = []
self._hello_received_callback = hello_callback
def read(self):
"""
reads from automata to extract SerialHeraldMessages
:return: SerialHeraldMessage if there are any, None elsewhere
"""
if self._automata.any_message():
msg = self._automata.get_message()
# if there is a hello message
# if len(self._buffer) == 0:
# if we are not into reading a new herald message
if to_string(msg) == to_string(HELLO_MESSAGE):
# call the hello received callback
if self._hello_received_callback:
self._hello_received_callback()
# exiting before continuing in the
# creation of an herald message
return None
self._buffer.append(msg)
if len(self._buffer) >= 8:
res = SerialHeraldMessage(*self._buffer)
self._buffer.clear()
return res
return None
class SerialHeraldMessage:
"""
Represents a bluetooth message implementation
"""
def __init__(self,
subject,
sender_uid,
original_sender,
final_destination,
content,
reply_to='',
message_uid='',
group=''):
self._subject = to_string(subject)
self._sender_uid = to_string(sender_uid)
self._original_sender = to_string(original_sender)
self._final_destination = to_string(final_destination)
self._content = to_string(content)
self._reply_to = to_string(reply_to)
self._message_uid = to_string(message_uid)
if self._message_uid == '':
self._message_uid = gen_uuid()
self._group = group
def to_automata_string(self):
res = ''
res += to_bluetooth_message(self.subject)
res += to_bluetooth_message(self.sender_uid)
res += to_bluetooth_message(self.original_sender)
res += to_bluetooth_message(self.final_destination)
res += to_bluetooth_message(self.content)
res += to_bluetooth_message(self.reply_to)
res += to_bluetooth_message(self.message_uid)
res += to_bluetooth_message(self.group)
return res
@property
def subject(self):
return self._subject
@property
def sender_uid(self):
return self._sender_uid
@property
def original_sender(self):
return self._original_sender
@property
def final_destination(self):
return self._final_destination
@property
def content(self):
return self._content
@property
def reply_to(self):
return self._reply_to
@property
def message_uid(self):
return self._message_uid
@property
def group(self):
return self._group
def set_uid(self, new_uid):
self._message_uid = new_uid
def __str__(self):
"""
:return: string representing the message
"""
return """
===================
message uid: {}
subject: {}
sender uid: {}
original sender: {}
final destination: {}
replies to: {}
group: {}
-------------------
{}
-------------------
""".format(self.message_uid, self.subject, self.sender_uid,
self.original_sender, self.final_destination, self.reply_to,
self.group, self.content)
|
984,226 | 8d5762054909836bf04c2576602b13771d2e5c58 | import random
import math
import pyglet
from yaff.scene import Scene
from .player import Player
class GameScene(Scene):
KEY_MOVEMENT_MAPPING = {
pyglet.window.key.A: Player.DIRECTION_LEFT,
pyglet.window.key.D: Player.DIRECTION_RIGHT,
pyglet.window.key.W: Player.DIRECTION_UP,
pyglet.window.key.S: Player.DIRECTION_DOWN,
}
def __init__(self, *args, **kwargs):
super(GameScene, self).__init__(*args, **kwargs)
image = pyglet.resource.image('res/images/sprites/sprite.png')
self.batch = pyglet.graphics.Batch()
self.player = Player(0, 0, image, batch=self.batch)
body_img = pyglet.resource.image('res/images/ui/body.png')
self.body = pyglet.sprite.Sprite(body_img, x=100, y=0,
batch=self.batch)
self.body.scale = 0.3
self.background = pyglet.resource.image('res/images/bg/bg.jpg')
def on_key_press(self, symbol, modifier):
if symbol in self.KEY_MOVEMENT_MAPPING:
self.player.on_key_pressed(self.KEY_MOVEMENT_MAPPING[symbol])
return True
return False
def on_key_release(self, symbol, modifier):
if symbol in self.KEY_MOVEMENT_MAPPING:
self.player.on_key_released(self.KEY_MOVEMENT_MAPPING[symbol])
return True
return False
def on_update(self, dt):
self.player.on_update(dt)
def on_draw(self, window):
super(GameScene, self).on_draw(window)
self.background.blit(0, 0)
self.batch.draw()
|
984,227 | 0ac434f642d480fbe85d9f24c44276f5b1a8a38c | #!/usr/bin/python
import json
import logging
import sys
import common
logging.basicConfig(level=logging.DEBUG, format='%(asctime)s %(levelname)s %(message)s')
if len(sys.argv) != 1:
print "usage: " + common.sarg(0)
sys.exit(1)
config_file = common.SYSTEM_CONFIG_FILE
config_stream = open(config_file)
config = json.load(config_stream)
cmdList = []
for member in config["group_members"]:
pid = member["pid"]
group = member["group"]
host = member["host"]
port = member["port"]
launchNodeCmdString = [common.JAVA_BIN, common.JAVA_CLASSPATH, '-DHOSTNAME=' + str(pid) + "-" + str(group)]
launchNodeCmdString += [common.LIBSKEEN_CLASS_SERVER, pid, config_file]
# launchNodeCmdString += ["true", common.NODES[1], common.SENSE_PORT, common.SENSE_DIRECTORY, common.SENSE_DURATION, common.SENSE_WARMUP]
launchNodeCmdString = " ".join([str(val) for val in launchNodeCmdString])
cmdList.append({"node": host, "port": port, "cmdstring": launchNodeCmdString})
# print launchNodeCmdString
config_stream.close()
print(cmdList)
thread = common.LauncherThread(cmdList)
thread.start()
thread.join()
|
984,228 | 428c7346d3ebaa806df04a4d918505e7e173f040 | # coding: utf-8
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
train_df = pd.read_csv('../data/train.csv')
sns.distplot(train_df['SalePrice'])
plt.show() |
984,229 | 45c532428373cb63c5a31b91b9450360b39c479f | from data_utils import PeMSD7M, describe_data, load_data
from model import SpatioTemporalConv
from model_updated import STGCN
from torch_geometric.nn import GCNConv
from torch_geometric.data import Data
import torch.nn as nn
import torch
from torch.utils.tensorboard import SummaryWriter
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
num_epochs = 200
batch_size = 12
K_t = 3
K_s = 1
c = [1, 64, 16, 64]
num_nodes = 228
num_edges = 51756
num_graphs = 12
dataset = PeMSD7M(root='./data/PeMS-M/')
train, val, test = load_data(dataset, batch_size)
print('Data Loading Complete!')
writer = SummaryWriter()
model = STGCN(K_t, K_s, c, num_nodes, num_edges, num_graphs).to(device)
# model = SpatioTemporalConv(3, 1, 1, 1).to(device)
# writer.add_graph(model, train)
criterion = nn.MSELoss(reduction='sum')
optimizer = torch.optim.RMSprop(model.parameters(), lr=0.001)
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=5, gamma=0.7)
for epoch in range(num_epochs):
# training
for batch in train:
labels = batch.label
labels = labels.to(device)
batch = batch.to(device)
# forward pass
outputs = model(batch)
train_loss = criterion(outputs, labels)
writer.add_scalar('Training Loss', train_loss, epoch)
# backward, optimize
optimizer.zero_grad()
train_loss.backward()
optimizer.step()
print(f'Epoch [{epoch+1}]/[{num_epochs}], Training Loss:{train_loss:.4f}')
# validation
with torch.no_grad():
for i, batch in enumerate(val):
labels = batch.label
labels = labels.to(device)
batch = batch.to(device)
outputs = model(batch)
val_loss = criterion(outputs, labels)
val_loss = torch.sqrt(val_loss)
writer.add_scalar('Validation Loss', val_loss, epoch)
# print(outputs[100], labels[100])
print(f'Epoch [{epoch+1}]/[{num_epochs}], Validation Loss:{val_loss:.4f}')
scheduler.step()
print('Training Complete!')
# with torch.no_grad():
# n_correct = 0
# n_samples = 0
# for batch in test:
# labels = batch.label
# labels = labels.to(device)
# batch = batch.to(device)
# # forward pass
# outputs = model(batch)
# n_correct += torch.eq(outputs, labels).sum()
# n_samples += outputs.shape[0]
# acc = 100.0 * n_correct/n_samples
# print(f'Test Accuracy: {acc}%')
with torch.no_grad():
for batch in test:
labels = batch.label
labels = labels.to(device)
batch = batch.to(device)
outputs = model(batch)
test_loss = criterion(outputs, labels)
test_loss = torch.sqrt(test_loss)
print(f'Epoch [{epoch+1}]/[{num_epochs}], Test Loss:{test_loss:.4f}')
print('Test Finished!')
|
984,230 | b9301297642ccea292ded110f5dd9a0ca70ddaf5 | from subprocess import call
from time import sleep
from pathlib import Path
import os
import zipfile
import json
formats = [".zip", ".rar"]
cwd = os.getcwd()
new_wd = None
def openStatJsonR():
try:
with open(str(os.path.dirname(os.path.abspath(__file__))) + r"\statistics.txt") as json_file:
statisticData = json.load(json_file)
return statisticData
except Exception as e:
print(e)
def add_del_folders_stat(amount):
try:
statData = openStatJsonR()
statData["folders_deleted"] += amount
with open(str(os.path.dirname(os.path.abspath(__file__))) + r"\statistics.txt", 'w') as outfile:
json.dump(statData, outfile)
except Exception as e:
print(e)
def add_unzipped_stat(amount):
try:
statData = openStatJsonR()
statData["data_unzipped"] += amount
with open(str(os.path.dirname(os.path.abspath(__file__))) + r"\statistics.txt", 'w') as outfile:
json.dump(statData, outfile)
except Exception as e:
print(e)
def createBackup(c):
global new_wd
try:
os.mkdir(cwd + r"\{0}_{1}".format(file_name, c))
new_wd = cwd + r"\{0}_{1}".format(file_name, c)
print("> created backup folder: {0}".format(new_wd))
except:
createBackup(c + 1)
for entry in os.scandir():
if not entry.is_dir():
file_path = Path(entry.name)
file_name, file_format = os.path.splitext(str(file_path))
whole_path = cwd + r"\{0}".format(file_path)
if file_format in formats:
try:
os.mkdir(cwd + r"\{0}".format(file_name))
new_wd = cwd + r"\{0}".format(file_name)
print("> created folder: {0}".format(new_wd))
except Exception as e:
createBackup(1)
try:
with zipfile.ZipFile(whole_path, 'r') as zip_ref:
zip_ref.extractall(new_wd)
print("> transfer completed")
add_unzipped_stat(1)
try:
checker = input(r"> Delete Zip? Insert 'n' for no: ")
if checker != "n":
os.remove(whole_path)
print("> removed zip folder: {0}".format(file_path))
add_del_folders_stat(1)
except Exception as e:
print(e)
# error handling
print("\n")
except Exception as e:
print(e)
# error handling
print("> done!")
sleep(0)
|
984,231 | 76fa6943fbc1c960dab5fdf4a0fcb25866de1c46 | from translate import *
import numpy as np
import pickle
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score
def evaluate_case(test_txt, test_labels, words, txt_lang, labels_lang, model, device, max_label_len, output_msg='', save_pred=None):
acc_ord = 0
acc_un = 0
predictions = []
predictions_bin = []
confidence = []
for sentence, case_labels in zip(test_txt, test_labels):
pred, _, conf = translate_sentence(sentence, txt_lang, labels_lang, model, device, max_len=max_label_len)
if pred[:-1] == case_labels:
acc_ord += 1
if all([x in case_labels for x in pred[:-1]]) and len(pred[:-1]) == len(case_labels):
acc_un += 1
predictions.append(pred)
predictions_bin.append([1 if y in pred else 0 for y in words])
confidence.append(conf)
if save_pred is not None:
output = {}
output['labels']=test_labels
output['predictions']=predictions
output['confidence']= confidence
with open(save_pred,'wb') as f:
pickle.dump(output, f)
labels = [1 if y in x else 0 for x in test_labels for y in words]
labels = np.array(labels)
predictions_bin = np.array(predictions_bin)
acc_ord = acc_ord / len(test_txt)
acc_un = acc_un / len(test_txt)
acc_el = accuracy_score(labels.reshape([-1]), predictions_bin.reshape([-1]))
pre = precision_score(labels.reshape([-1]), predictions_bin.reshape([-1]))
rec = recall_score(labels.reshape([-1]), predictions_bin.reshape([-1]))
f1 = f1_score(labels.reshape([-1]), predictions_bin.reshape([-1]))
print()
print('{}Accuracy ordered: {:.4f}'.format(output_msg, acc_ord))
print('{}Accuracy unordered: {:.4f}'.format(output_msg, acc_un))
print('{}Accuracy per element: {:.4f}'.format(output_msg, acc_el))
print('{}Precision per element: {:.4f}'.format(output_msg, pre))
print('{}Recall per element: {:.4f}'.format(output_msg, rec))
print('{}F1-score per element: {:.4f}'.format(output_msg, f1))
print()
return acc_ord, acc_un, acc_el, pre, rec, f1
|
984,232 | a127231cbbc03ab3c43a37b2b16398bef0455eca | import cv2
import argparse
import numpy as np
import itertools
def repeat(f, N):
for _ in itertools.repeat(None, N): f()
# set up arg parser & read args
parser = argparse.ArgumentParser(description="EIASR 2015Z project")
parser.add_argument("-i", "--image", required=True,
help="Path to an input image used in coin detection task")
args = parser.parse_args()
print("Image path: ", args.image)
# read image from path given as arg
image = cv2.imread(args.image)
orig = image.copy()
# get image width and heigh
height, width, _ = image.shape
target = 480
ratio = height/target
resize_ratio = ratio if ratio < 1 else 1/ratio
print(*image.shape, sep=', ')
# convert to a grayscale
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# resize to speedup bilateral filter
resized = cv2.resize(gray, (0,0), fx=resize_ratio, fy=resize_ratio)
print(*resizde.shape, sep=', ')
# apply gaussian blur
# median = cv2.medianBlur(gray, ksize=15)
# gauss = cv2.GaussianBlur(gray, ksize=(25, 25), sigmaX=0)
# bilateral = median
# N = 10
# for _ in itertools.repeat(None, N):
# bilateral = cv2.bilateralFilter(bilateral, d=9, sigmaColor=9, sigmaSpace=7)
# apply Otsu bizarization
# ret, thresh = cv2.threshold(bilateral, 0, 255,
# cv2.THRESH_BINARY + cv2.THRESH_OTSU)
# show images
window_name = "Transformed"
tb_name_filter = "Filter"
nothing = lambda *args: None
cv2.namedWindow(window_name, cv2.WINDOW_NORMAL)
cv2.createTrackbar(tb_name_filter, window_name, 1, 255, nothing)
median = gray
# Loop for get trackbar pos and process it
while True:
# Get position in trackbar
tb_pos = cv2.getTrackbarPos(tb_name_filter, window_name)
# Apply blur
tb_pos = tb_pos if tb_pos % 2 == 1 else tb_pos + 1
# filtered = cv2.GaussianBlur(gray, ksize=(tb_pos, tb_pos), sigmaX=0)
filtered = cv2.medianBlur(gray, ksize=tb_pos)
# apply Otsu bizarization
ret, thresh = cv2.threshold(filtered, 0, 255,
cv2.THRESH_BINARY + cv2.THRESH_OTSU)
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, ksize=(11, 11))
opened = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, kernel, 10);
dilated = cv2.dilate(opened, None, iterations = 10)
# closed = cv2.morphologyEx(dilated, cv2.MORPH_CLOSE, kernel, 10);
# Show in window
cv2.imshow("Transformed", thresh)
# If you press "ESC", it will return value
ch = cv2.waitKey(5)
if ch == 27:
break
cv2.namedWindow("Original", cv2.WINDOW_NORMAL)
cv2.imshow("Original", orig)
cv2.waitKey(0)
cv2.destroyAllWindows() |
984,233 | 34f92bae7ec71bc80bd1b15becfef1bd6d7a211e | from flask import Flask, session, request, redirect, render_template, flash, url_for
from db.data_layer import get_show, create_user, login_user, get_user_by_id, create_like, get_user_likes, delete_like
from flask_wtf.csrf import CSRFProtect
app = Flask(__name__)
app.secret_key = '8118d0875ad5b6b3ad830b956b111fb0'
csrf = CSRFProtect(app)
@app.route('/')
def index():
movies = []
if 'user_id' in session:
user = get_user_by_id(session['user_id'])
movies = get_user_likes(user.id)
return render_template('index.html', movies=movies, user_id=user.id)
else:
return render_template('index.html', movies=movies)
@app.route('/authenticate')
def authenticate():
return render_template('authenticate.html')
@app.route('/register', methods=['POST'])
def register():
name = request.form['html_fullname']
email = request.form['html_email']
password = request.form['html_password']
confirm = request.form['html_confirm']
user = create_user(name, email, password, confirm)
if user:
session['user_id'] = user.id
session['name'] = user.name
return redirect(url_for('index'))
else:
for messages in user:
flash(messages)
return redirect(url_for('authenticate'))
@app.route('/login', methods=['POST'])
def login():
email = request.form['html_email']
password = request.form['html_password']
user = login_user(email, password)
if user:
session['user_id'] = user.id
session['name'] = user.name
return redirect(url_for('index'))
else:
for messages in user:
flash(messages)
return redirect(url_for('authenticate'))
@app.route('/logout')
def logout():
session.clear()
return redirect(url_for('index'))
@app.route('/search', methods=['POST'])
def search_query():
query = request.form['query']
return redirect(url_for('search', query=query))
@app.route('/search/<query>')
def search(query):
movies = get_show(query)
if 'user_id' in session:
likes = get_user_likes(session['user_id'])
liked = {}
for like in likes:
liked[like.movie.api_id] = True
else:
liked = {}
return render_template('search.html', movies=movies, liked=liked)
@app.route('/like/<movie_id>')
def create_user_like(movie_id):
user = get_user_by_id(session['user_id'])
create_like(user.id, movie_id)
url = request.headers['Referer']
return redirect(url)
@app.route('/unlike/<movie_id>')
def delete_user_like(movie_id):
user = get_user_by_id(session['user_id'])
delete_like(movie_id)
url = request.headers['Referer']
return redirect(url)
@app.route('/user/<user_id>')
def get_like(user_id):
movies = get_user_likes(user_id)
return render_template('like.html', movies=movies)
app.jinja_env.auto_reload = True
app.config['TEMPLATE_AUTO_RELOAD'] = True
app.run(debug=True, use_reloader=True)
|
984,234 | 4cd8adb95886ed6dee4fd6a61da970d3ab72cb31 | import os
def setup_key(remote, port):
os.system('ssh %s@%s -p %d mkdir -p .ssh' % (user, remote, port))
cmd = "cat /home/ahagen/.ssh/id_rsa.pub | ssh %s@%s -p %d 'cat >> .ssh/authorized_keys'" % (user, remote, port)
print cmd
os.system(cmd)
# check if ~/.ssh/id_rsa.pub is there
# if not, run ssh-keygen
users = ['inokuser', 'inokuser', 'inokuser']
remotes = ['128.46.92.228', '128.46.92.228', '128.46.92.228']
ports = [2120, 2220, 2420]
# then, for remotes in list
for user, remote, port in zip(users, remotes, ports):
# run the authorized keys
setup_key(remote, port)
|
984,235 | ed507f910d0d19b54cca46bf416bf7ad0414c717 | from pandac.PandaModules import *
from toontown.toonbase import TTLocalizer
from toontown.toonbase import ToontownGlobals
ENDLESS_GAME = config.GetBool('endless-ring-game', 0)
NUM_RING_GROUPS = 16
MAX_TOONXZ = 15.0
MAX_LAT = 5
MAX_FIELD_SPAN = 135
CollisionRadius = 1.5
CollideMask = ToontownGlobals.CatchGameBitmask
TARGET_RADIUS = (MAX_TOONXZ / 3.0) * 0.90000000000000002
targetColors = ((TTLocalizer.ColorRed,
VBase4(1.0, 0.40000000000000002, 0.20000000000000001, 1.0)),
(TTLocalizer.ColorGreen,
VBase4(0.0, 0.90000000000000002, 0.20000000000000001,
1.0)), (TTLocalizer.ColorOrange,
VBase4(1.0, 0.5, 0.25,
1.0)), (TTLocalizer.ColorPurple,
VBase4(1.0, 0.0, 1.0, 1.0)),
(TTLocalizer.ColorWhite,
VBase4(1.0, 1.0, 1.0, 1.0)), (TTLocalizer.ColorBlack,
VBase4(0.0, 0.0, 0.0, 1.0)),
(TTLocalizer.ColorYellow,
VBase4(1.0, 1.0, 0.20000000000000001, 1.0)))
ENVIRON_LENGTH = 300
ENVIRON_WIDTH = 150.0
ringColorSelection = [(0, 1, 2), 3, 4, 5, 6]
colorRed = {}
colorRed['Red'] = 1.0
colorRed['Green'] = 0.0
colorRed['Blue'] = 0.0
colorRed['Alpha'] = 0.5
colorBlue = {}
colorBlue['Red'] = 0.0
colorBlue['Green'] = 0.0
colorBlue['Blue'] = 1.0
colorBlue['Alpha'] = 0.5
colorGreen = {}
colorGreen['Red'] = 0.0
colorGreen['Green'] = 1.0
colorGreen['Blue'] = 0.0
colorGreen['Alpha'] = 0.5
colorYellow = {}
colorYellow['Red'] = 1.0
colorYellow['Green'] = 1.0
colorYellow['Blue'] = 0.0
colorYellow['Alpha'] = 0.5
colorPurple = {}
colorPurple['Red'] = 0.75
colorPurple['Green'] = 0.0
colorPurple['Blue'] = 1.0
colorPurple['Alpha'] = 0.5
colorOrange = {}
colorOrange['Red'] = 1.0
colorOrange['Green'] = 0.59999999999999998
colorOrange['Blue'] = 0.0
colorOrange['Alpha'] = 0.5
colorBlack = {}
colorBlack['Red'] = 0.0
colorBlack['Green'] = 0.0
colorBlack['Blue'] = 0.0
colorBlack['Alpha'] = 1.0
colorWhite = {}
colorWhite['Red'] = 1.0
colorWhite['Green'] = 1.0
colorWhite['Blue'] = 1.0
colorWhite['Alpha'] = 1.0
difficultyPatterns = {
ToontownGlobals.ToontownCentral:
[[8, 4, 2, 0], [10, 16, 21, 28], [31, 15, 7, 3.5],
[colorRed, colorGreen, colorBlue, colorYellow], [2, 2, 2, 1], 10, 2],
ToontownGlobals.DonaldsDock:
[[7, 4, 2, 0], [11, 17, 23, 32], [29, 13, 6.5, 3.2000000000000002],
[colorRed, colorGreen, colorBlue, colorYellow], [2, 2, 2, 1], 9, 2],
ToontownGlobals.DaisyGardens:
[[6, 4, 2, 0], [11, 18, 25, 34], [29, 13, 6.5, 3.1000000000000001],
[colorRed, colorGreen, colorBlue, colorYellow], [2, 2, 2, 1], 8, 2],
ToontownGlobals.MinniesMelodyland:
[[6, 4, 2, 0], [12, 19, 27, 37], [28, 12, 6, 3.0],
[colorGreen, colorBlue, colorYellow, colorPurple], [2, 2, 2, 1], 8, 2],
ToontownGlobals.TheBrrrgh: [[5, 4, 2, 0], [12, 20, 29, 40],
[25, 12, 5.5, 2.5],
[
colorGreen, colorBlue, colorYellow,
colorPurple
], [2, 2, 2, 1], 7, 2],
ToontownGlobals.DonaldsDreamland: [[4, 3, 1, 0], [12, 21, 31, 42],
[20, 10, 4.5, 2.0],
[
colorBlue, colorYellow, colorPurple,
colorOrange
], [2, 2, 2, 1], 7, 2]
}
|
984,236 | 112ec4f0712a04d3c71ebd2752680bd0228d0827 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'ChildForm.ui'
#
# Created by: PyQt5 UI code generator 5.9.2
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_ChildForm(object):
def setupUi(self, ChildForm):
ChildForm.setObjectName("ChildForm")
ChildForm.resize(800, 600)
self.centralwidget = QtWidgets.QWidget(ChildForm)
self.centralwidget.setObjectName("centralwidget")
ChildForm.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(ChildForm)
self.menubar.setGeometry(QtCore.QRect(0, 0, 800, 26))
self.menubar.setObjectName("menubar")
ChildForm.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(ChildForm)
self.statusbar.setObjectName("statusbar")
ChildForm.setStatusBar(self.statusbar)
self.retranslateUi(ChildForm)
QtCore.QMetaObject.connectSlotsByName(ChildForm)
def retranslateUi(self, ChildForm):
_translate = QtCore.QCoreApplication.translate
ChildForm.setWindowTitle(_translate("ChildForm", "MainWindow"))
|
984,237 | 8912602048121cf8b79517ec4013c284082e833b | from tkinter import filedialog
from tkinter import messagebox
from tkinter import *
from scrapy import spiderloader
from scrapy.utils import project
from scrapy.utils.log import configure_logging
from scrapy.crawler import CrawlerRunner
from twisted.internet import reactor
import threading
def get_spiders():
settings = project.get_project_settings()
spider_loader = spiderloader.SpiderLoader.from_settings(settings)
return spider_loader.list()
def get_chosen_spider(spider):
global chosen_spider
chosen_spider = spider
return chosen_spider
def get_chosen_feed(feed):
global chosen_feed
chosen_feed = feed
return chosen_feed
def browse_btn():
global folder_path
folder_path = filedialog.askdirectory()
folder_path_entry.delete(0,END)
folder_path_entry.insert(0,folder_path)
return folder_path
#Spawing a new thread
def start_execute_thread(event):
global execute_thread
execute_thread = threading.Thread(target=execute_spider, daemon=True)
execute_thread.start()
app.after(10, check_execute_thread)
def check_execute_thread(event):
if execute_thread.is_alive():
app.after(10, check_execute_thread)
def execute_spider():
if dataset_entry.get() == '' or chosen_feed not in ['csv','json']:
messagebox.showerror("Error IF",'All entries are required')
return
try:
feed_url = f"file:///{folder_path}/{dataset_entry.get()}.{chosen_feed}"
except:
messagebox.showerror("Error TRY",'All entries are required')
settings = project.get_project_settings()
settings.set('FEED_URI',feed_url)
settings.set('FEED_TYPE',chosen_feed)
configure_logging()
runner = CrawlerRunner(settings)
runner.crawl(chosen_spider)
reactor.run(installSignalHandlers=False)
def drop_menu_items(sp_label='Choose a spider',sp_text='choose a spider',spiders_list=['Spider_1', 'spider_2'],scom='' ,srow=0, scolumn=0, drow=0, dcolumn=1, clspan=2, spx=10, spy=10):
spider_label = Label(app, text=sp_label)
spider_label.grid(row=srow , column=scolumn, sticky=W, pady=spx, padx=spy)
spider_text = StringVar(app)
spider_text.set(sp_text)
spiders_dropdown = OptionMenu(app, spider_text, *spiders_list, command= scom)
spiders_dropdown.grid(row=drow, column=dcolumn, columnspan=clspan)
app = Tk()
spiders_loader_list= [spider for spider in get_spiders()]
#Spiders List
drop_menu_items(spiders_list=spiders_loader_list,scom=get_chosen_spider)
#Feed Type
drop_menu_items(sp_label='Choose a feed',sp_text='choose a feed',scom=get_chosen_feed,spiders_list=['json', 'csv'], srow=1, scolumn=0, drow=1, dcolumn=1)
#Path Entry
folder_path_text= StringVar(app)
folder_path_entry= Entry(app, textvariable=folder_path_text)
folder_path_entry.grid(row=2, column=0, pady=10 ,padx=10)
#Dataset Entry
dataset_text= StringVar(app)
dataset_entry= Entry(app, textvariable=dataset_text ,width=10)
dataset_entry.grid(row=2, column=1, pady=10 ,padx=10)
browse_btn = Button(app, text="Browse", command=browse_btn)
browse_btn.grid(row=2, column=2)
exe_btn = Button(app, text="Execute", command=lambda:start_execute_thread(None))
exe_btn.grid(row=3, column=0, columnspan=3)
app.title('Spider Executor')
app.geometry('300x200')
app.resizable(False, False)
app.mainloop()
|
984,238 | 5ae06b7dc1de658fc836be9f8b33ad634bac15ab | # -*- coding: utf-8 -*-
a=int(input('Moeda a: '))
b=int(input('Moeda b: '))
c=int(input('Cédula: '))
qa=0
qb=0 |
984,239 | 57e43deb1bb4bfb534b4369673ec3d1409aa2432 | from sphinxcontrib.domaintools import custom_domain
def setup(app):
app.add_domain(custom_domain('VaggaOptions',
name = 'vagga',
label = "Vagga Yaml Options",
elements = dict(
opt = dict(
objname = "Yaml Option",
indextemplate = "option: %s",
),
)))
|
984,240 | f933123c6fa8cc1ba48f02910a3a595ca3b03d07 | from rest_framework import serializers
from datetime import datetime
class UserFetchCovDataSerializer(serializers.Serializer):
timeline = serializers.DateTimeField(required=False, allow_null=True, default=datetime.now())
country = serializers.CharField(required=False) |
984,241 | fd8475f3cf345290dcf55f1148a46212f49e00e5 | import RPi.GPIO as GPIO
from mfrc522 import SimpleMFRC522
reader = SimpleMFRC522.SimpleMFRC522(bus=0, device=1)
try:
print("Now place your tag to write the new key")
reader.modify_key()
finally:
GPIO.cleanup() |
984,242 | ec80678bf7b96d89f8fb4f8dd87a36c57ec2a869 | __author__ = "saeedamen" # Saeed Amen
#
# Copyright 2016 Cuemacro
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on a "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and
# limitations under the License.
#
from findatapy.util import DataConstants
class SwimPool(object):
"""Creating thread and process pools in a generic way. Allows users to
specify the underlying thread or multiprocess library
they wish to use. Note you can share Pool objects between processes.
"""
def __init__(self, multiprocessing_library=None):
self._pool = None
if multiprocessing_library is None:
multiprocessing_library = DataConstants().multiprocessing_library
self._multiprocessing_library = multiprocessing_library
self._thread_technique = 'na'
if multiprocessing_library == 'multiprocess':
try:
import multiprocess;
multiprocess.freeze_support()
except:
pass
elif multiprocessing_library == 'multiprocessing_on_dill':
try:
import multiprocessing_on_dill;
multiprocessing_on_dill.freeze_support()
except:
pass
elif multiprocessing_library == 'multiprocessing':
try:
import multiprocessing;
multiprocessing.freeze_support()
except:
pass
def create_pool(self, thread_technique, thread_no, force_new=True,
run_in_parallel=True):
self._thread_technique = thread_technique
if not (force_new) and self._pool is not None:
return self._pool
if thread_technique == "thread" or run_in_parallel == False:
from multiprocessing.dummy import Pool
elif thread_technique == "multiprocessing":
# most of the time is spend waiting for Bloomberg to return, so can use threads rather than multiprocessing
# must use the multiprocessing_on_dill library otherwise can't pickle objects correctly
# note: currently not very stable
if self._multiprocessing_library == 'multiprocessing_on_dill':
from multiprocessing_on_dill import Pool
elif self._multiprocessing_library == 'multiprocess':
from multiprocess import Pool
elif self._multiprocessing_library == 'multiprocessing':
from multiprocessing import Pool
elif self._multiprocessing_library == 'pathos':
from pathos.multiprocessing import Pool
# from pathos.pools import ProcessPool as Pool
elif self._multiprocessing_library == 'billiard':
from billiard.pool import Pool
if run_in_parallel == False: thread_no = 1
self._pool = Pool(thread_no)
return self._pool
def close_pool(self, pool, force_process_respawn=False):
if pool is not None:
if (self._thread_technique != 'multiprocessing' and
self._multiprocessing_library != 'pathos') \
or force_process_respawn:
pool.close()
pool.join()
|
984,243 | bb5ab5f9c0dadab655434ab446ac9e4a14941c41 | def getUserInput():
""" Function for money and cost input and
for computing the maximum amount of apples
that you can buy and the amount of your change"""
money_ = float(input('Enter the amount of your money: '))
cost_ = float(input('Enter the price of an apple: '))
apple_= int(money_/cost_)
change_ = float(money_%cost_)
return apple_, change_
def displayOutput(apples_, change_):
print(f'You can buy {apples_} apples and your change is {change_:.2f} pesos.')
apples, change = getUserInput()
displayOutput(apples, change) |
984,244 | 6ded1b4be5dc2e7e3a7846709d32f6dee80e80be | # -*- coding: utf-8 -*-
"""Console script for av_slice."""
import sys
import click
from moviepy.video.io.VideoFileClip import VideoFileClip
from moviepy.audio.io.AudioFileClip import AudioFileClip
from .audio import loud_sections
from .video import join_sections
@click.command()
@click.option('--output_file', default='', help='filename of output')
@click.argument('file')
@click.option('--threshold', default=.01,
help='threshold under which to make a cut')
def video(file, output_file: str, threshold: float):
if output_file == '':
n, *ext = file.split('.')
output_file = f'{n}_modified.{".".join(ext)}'
click.echo(f'saving result to {output_file}')
click.echo('calculating removals...')
inpt = VideoFileClip(file)
# chunk_length is the number of audio frames in 1 video frame as this is
# the smallest resolution possible to split the video up into.
cuts = loud_sections(inpt.audio, int(inpt.audio.fps/inpt.fps),
threshold=threshold)
click.echo(f'making {len(cuts)} cuts')
final = join_sections(inpt, cuts)
final.write_videofile(output_file)
@click.command()
@click.option('--output_file', default='', help='filename of output')
@click.argument('file')
@click.option('--threshold', default=.01,
help='threshold under which to make a cut')
@click.option('-a', 'audio_input', help='input file is an audio file',
is_flag=True, flag_value=True)
@click.option('-v', 'audio_input', help='input file is a video file',
is_flag=True, flag_value=False)
@click.option('--resolution', default=1/30,
help='resolution of search in seconds')
def audio(file, output_file, threshold, audio_input, resolution):
click.echo(f'extracting audio from {file} and removing silent/quiet' +
' portions')
if output_file == '':
n, *ext = file.split('.')
output_file = f'{n}_modified.{".".join(ext)}'
click.echo(f'saving result to {output_file}')
click.echo(f'calculating removals...')
if audio_input:
audio = AudioFileClip(file)
else:
infile = VideoFileClip(file)
audio = infile.audio
cuts = loud_sections(audio, resolution, threshold=threshold)
final = join_sections(audio, cuts)
final.write_audiofile(output_file, fps=audio.fps)
if __name__ == "__main__":
sys.exit(video()) # pragma: no cover
|
984,245 | d404a6eaa7925dc26bdec858ffcc7743002cec32 | import json
player = {
'armor_type' : 1,
'armor_mod' : 1,
'armor_name' : 'Iron',
'weapon_type': 0,
'weapon_mod' : 3,
'weapon_name': 'Sword',
'hand_coeff' : 1
}
with open('data.txt', 'w') as outfile:
json.dump(player, outfile)
with open('data.txt') as json_file:
data = json.load(json_file)
print(data)
[print(f'{key}: {value}') for key, value in data.items()]
|
984,246 | ff1a82b3d9266cb411b039e405c158ba98da5eb3 | import numpy as np
import matplotlib.pyplot as plt
from utils import conversion_db_veces
from my_plots import plot_histograma
def mu_y(snr, n, sigma):
'''
Devuelve un arreglo numpy del primer sumando de la salida analogica.
'''
exponente = (n-1)/2
cociente_snr = np.asarray([(x/(x+1))**exponente for x in snr])
hA = sigma*np.sqrt(snr)
return hA * cociente_snr
def arr_cociente_snr(snr_hz, n, sigma):
'''
Devuelve un arreglo con el cociente (SNR/SNR+1)^n-i. Es parte del segundo sumando de la salida analogica.
'''
snr = snr_hz/(snr_hz+1)
arr_snr = []
for i in range(1,n+1):
arr_snr.append ( snr**(n-i) )
return arr_snr
def simulacion_analogica ():
'''
Devuelve un arreglo con la salida Yn analogica
'''
snr_veces = conversion_db_veces(np.arange(5,25))
sigma = 1.0
M = int(1e6)
n=9
y = np.array([])
mu = mu_y(snr_veces, n=9, sigma=sigma)
w = np.random.normal(size = M, scale= sigma)
w.resize( (1, M) )
for _ in range(1, n): #arma array random de n x M
w_i = np.random.normal(size = M, scale= sigma)
w_i.resize( (1,M) )
w = np.append( w, w_i, axis = 0)
for mu_i, snr in zip(mu, snr_veces):
arr_snr = arr_cociente_snr(snr, n, sigma) #devuelve vector de 9
realiz = np.dot(arr_snr, w)
probabilidad_error = np.count_nonzero( realiz > mu_i )/M #cuenta los verdaderos, entonces doy vuelta la condicion
y = np.append(y, probabilidad_error)
plot_histograma(realiz, snr_veces, mu, arr_cociente_snr)
return y |
984,247 | 36999f67bc5d6800d8c2e7284c20321138d989dd |
import tensorflow as tf
import numpy as np
from lib.Layer import Layer
from lib.ConvBlock import ConvBlock
from lib.ConvDWBlock import ConvDWBlock
from lib.UpSample import UpSample
class DecodeBlock(Layer):
def __init__(self, input_shape, filter_shape, ksize, init, name, load=None, train=True):
self.input_shape = input_shape
self.batch, self.h, self.w, self.fin = self.input_shape
self.filter_shape = filter_shape
self.fin, self.fout = self.filter_shape
self.ksize = ksize
self.init = init
self.name = name
self.load = load
self.train_flag = train
input_shape_1 = [self.batch, self.h, self.w, self.fin]
input_shape_2 = [self.batch, self.h, self.w, self.fout]
input_shape_3 = input_shape_2
self.conv_dw = ConvDWBlock(input_shape=input_shape_1,
filter_shape=[3, 3, self.fin, 1],
strides=[1,1,1,1],
init=self.init,
name=self.name + '_conv_block_dw',
load=self.load,
train=self.train_flag)
self.conv_pw = ConvBlock(input_shape=input_shape_2,
filter_shape=[1, 1, self.fin, self.fout],
strides=[1,1,1,1],
init=self.init,
name=self.name + '_conv_block_pw',
load=self.load,
train=self.train_flag)
self.upsample = UpSample(input_shape=input_shape_3, ksize=self.ksize)
###################################################################
def get_weights(self):
weights = []
weights.extend(self.conv_dw.get_weights())
weights.extend(self.conv_pw.get_weights())
return weights
def output_shape(self):
return self.output_shape
def num_params(self):
return self.conv_dw.num_params() + self.conv_pw.num_params()
###################################################################
def forward(self, X):
conv_dw = self.conv_dw.forward(X)
conv_pw = self.conv_pw.forward(conv_dw['aout'])
up = self.upsample.forward(conv_pw['aout'])
cache = {'conv_dw':conv_dw, 'conv_pw':conv_pw, 'up': up}
return {'aout':up['aout'], 'cache':cache}
def bp(self, AI, AO, DO, cache):
up, conv_dw, conv_pw = cache['up'], cache['conv_dw'], cache['conv_pw']
dup, gup = self.up.bp( conv_pw['aout'], up['aout'], DO, up['cache'])
dconv_pw, gconv_pw = self.conv_pw.bp(conv_dw['aout'], conv_pw['aout'], dup['dout'], conv_pw['cache'])
dconv_dw, gconv_dw = self.conv_dw.bp(AI, conv_dw['aout'], dconv_pw['dout'], conv_dw['cache'])
cache.update({'dconv_dw':dconv_dw, 'dconv_pw':dconv_pw, 'dup':dup})
grads = []
grads.extend(gconv_dw)
grads.extend(gconv_pw)
return {'dout':dconv_dw['dout'], 'cache':cache}, grads
def dfa(self, AI, AO, DO, cache):
return self.bp(AI, AO, DO, cache)
def lel(self, AI, AO, DO, cache):
return self.bp(AI, AO, DO, cache)
###################################################################
|
984,248 | 8139e7c31c945f1434ea1fda907e696172ff549e | from Core.DAO.FactorDao.FactorDao import FactorDao
from Core.DAO.TableMakerDao import TableMaker
from Core.Conf.DatabaseConf import Schemas
from Core.Error.Error import Error
import traceback
class Initializer(object):
"""
This class implement the interface of factor manager's initialization
"""
def __init__(self, db_engine, logger):
self._dao = FactorDao(db_engine, logger)
self._table_maker = TableMaker(db_engine, logger)
self._logger = logger.sub_logger(self.__class__.__name__)
def _check_runtime_environment(self):
pass
def create_schemas(self):
err = self._table_maker.create_schema_if_not_exist(Schemas.SCHEMA_META)
if err:
return err
err = self._table_maker.create_schema_if_not_exist(Schemas.SCHEMA_FACTOR_DATA)
if err:
return err
err = self._table_maker.create_schema_if_not_exist(Schemas.SCHEMA_TICK_DATA)
if err:
return err
err = self._table_maker.create_schema_if_not_exist(Schemas.SCHEMA_STOCK_VIEW_DATA)
if err:
return err
return Error.SUCCESS
def create_factor_tables(self):
err = self._table_maker.create_factor_list_table()
if err:
return err
err = self._table_maker.create_factor_version_table()
if err:
return err
err = self._table_maker.create_factor_tick_linkage_table()
if err:
return err
err = self._table_maker.create_factor_update_log_table()
if err:
return err
err = self._table_maker.create_group_factor_list_table()
if err:
return err
return Error.SUCCESS
def create_tick_tables(self):
err = self._table_maker.create_tick_update_log_table()
if err:
return err
err = self._table_maker.create_stock_view_list_table()
if err:
return err
return Error.SUCCESS
def create_name_node_tables(self):
err = self._table_maker.create_finished_tasks_table()
if err:
return err
err = self._table_maker.create_finish_task_dependency_table()
if err:
return err
return Error.SUCCESS
@staticmethod
def create_factor_generator_dir():
"""
创建factor生成脚本根目录
:return:
"""
try:
import sys, os
from Core.Conf.PathConf import Path
sys.path.append(Path.FACTOR_GENERATOR_BASE)
sys.path.append(Path.SKYECON_BASE)
os.mkdir(Path.FACTOR_GENERATOR_BASE)
except:
pass
def init_master_node(self):
self._logger.log_info("initializing name node...")
# check runtime environment
self._check_runtime_environment()
# create schemas and tables
err = self.create_schemas()
if err:
print("Failed to init schams, aborted")
exit(-1)
# create factor tables
err = self.create_factor_tables()
if err:
print("Failed to init factor tables, aborted")
exit(-1)
# create tick tables
err = self.create_tick_tables()
if err:
print("Failed to init tick tables, aborted")
exit(-1)
# create manager tables
err = self.create_name_node_tables()
if err:
print("Failed to init name node tables, aborted")
exit(-1)
# create factor generator path
self.create_factor_generator_dir()
self._logger.log_info("successfully initialized name node.")
class LogInitializer(object):
@staticmethod
def init_log_dir():
import os
assert os.path.exists("../Log")
try:
if not os.path.exists("../Log/NameNode"):
os.mkdir("../Log/NameNode")
log_dirs = ['error', 'warn', 'info']
for ld in log_dirs:
if not os.path.exists('../Log/NameNode/' + ld):
os.mkdir('../Log/NameNode/' + ld)
except:
traceback.print_exc()
exit()
|
984,249 | df07807082528d8e4fed67619d27dd12e1594666 | class GitDeployer:
pass |
984,250 | 92b4869236daf04128685c808ecc7dde8e197278 | #!/usr/bin/env python
# coding: utf-8
# In[ ]:
import pandas as pd
import numpy as np
import os
import env
# creating a connection to connect to the Codeup Student Database
def get_connection(db, user=env.user, host=env.host, password=env.password):
return f'mysql+pymysql://{user}:{password}@{host}/{db}'
def get_zillow_data():
'''This function will connect to the Codeup Student Database. It will then cache a local copy to the computer to use for later
in the form of a CSV file. If you want to reproduce the results, you will need your own env.py file and database credentials.'''
filename = "zillow.csv"
if os.path.isfile(filename):
return pd.read_csv(filename)
else:
# read the SQL query into a dataframe
df = pd.read_sql('''
SELECT parcelid, fips, latitude, longitude, lotsizesquarefeet, calculatedfinishedsquarefeet, bedroomcnt, bathroomcnt, taxamount, taxvaluedollarcnt FROM properties_2017
JOIN predictions_2017 USING (parcelid)
WHERE transactiondate BETWEEN '2017-05-01' AND '2017-08-31'
AND (propertylandusetypeid IN (261, 262, 263, 264, 268, 273, 274, 276, 279))
AND (bathroomcnt > 0)
AND (bedroomcnt > 0);
''' , get_connection('zillow'))
# Write that dataframe to disk for later. Called "caching" the data for later.
df.to_csv(filename)
# Return the dataframe to the calling code
return df
|
984,251 | 80b5990f252f2437c4fab719f7a8ae9aa6ef0598 | '''
Source: ACM Japan 2005.
IDs for online judges: POJ 2739, UVA 3399.
Some positive integers can be arrived at by adding consecutive primes.
Ex.
53 5 + 7 + 11 + 13 + 17 and 53
41 2 + 3 + 5 + 7 + 11 + 13, 11 + 13 + 17, and 41
3 3
but not
20 3 + 5 + 5 + 7 // 5 is repeated and hence not consecutive
Task
Write a program that reports the number of representations for the given positive integer.
Input
The input is a sequence of positive integers, each in a separate line. The integers are between 2 and
10,000, inclusive. The end of the input is indicated by a zero
output
For each input write how many prime representation it has
constraints
None of the representation will have a prime number greater then 10000
methodology
1.create a prime sieve upto 10000
2.Starting with first prime
2a. keep adding next primes till either its equal to input or exceed
2b. if equal we have the number
2c. if exceed: start again with above loop using next prime number
2d. ultimately if prime number at (2) exceed input
it can not have a prime representation
result: Time limit exceed.
option: Try some other faster supported language like Java or c++
accepted solution prime_expression.cpp
'''
from sys import stdin, stdout
def prime_seive(n):
primes_index = [1]*(n+1)
primes = []
for i in range(2, n+1):
if primes_index[i]:
primes.append(i)
x = i + i
while x < n:
primes_index[x] = 0
x += i
return primes
primes = prime_seive(10001)
inps = 1
while inps != 0:
inps = int(stdin.readline().strip())
cnt = 0
for i in range(len(primes)):
if i > inps:break
sums = 0
for j in range(i,len(primes)):
sums += primes[j]
if sums == inps:
cnt += 1
break
if inps > 0:print(cnt)
|
984,252 | 86a987befea4c4612a6d84773031579208b58614 |
# parsetab.py
# This file is automatically generated. Do not edit.
_tabversion = '3.8'
_lr_method = 'LALR'
_lr_signature = '54820A6D8FB5AA5FA5E3D4961CD92D46'
_lr_action_items = {'$end':([1,2,3,4,],[-3,0,-1,-2,]),'NUM':([0,1,2,3,4,],[1,-3,1,-1,-2,]),}
_lr_action = {}
for _k, _v in _lr_action_items.items():
for _x,_y in zip(_v[0],_v[1]):
if not _x in _lr_action: _lr_action[_x] = {}
_lr_action[_x][_k] = _y
del _lr_action_items
_lr_goto_items = {'integer':([0,],[2,]),'digit':([0,2,],[3,4,]),}
_lr_goto = {}
for _k, _v in _lr_goto_items.items():
for _x, _y in zip(_v[0], _v[1]):
if not _x in _lr_goto: _lr_goto[_x] = {}
_lr_goto[_x][_k] = _y
del _lr_goto_items
_lr_productions = [
("S' -> integer","S'",1,None,None,None),
('integer -> digit','integer',1,'p_start','simpleGrammar.py',31),
('integer -> integer digit','integer',2,'p_start','simpleGrammar.py',32),
('digit -> NUM','digit',1,'p_digit','simpleGrammar.py',40),
]
|
984,253 | e9597de46e477870c479da4ef318b57aae9d31a1 | from django.test import TestCase
from .models import Item
class TestViews(TestCase):
def test_get_todo_list(self):
# To test the HTTP response of the view
response = self.client.get('/')
self.assertEqual(response.status_code, 200)
# To confirm the view uses the correct template
# And tell it the templatre we expect
self.assertTemplateUsed(response, 'todo/todo_list.html')
def test_get_add_item_page(self):
response = self.client.get('/add')
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'todo/add_item.html')
def test_get_edit_item_page(self):
# Create a DB item to use
item = Item.objects.create(name='Test Todo Item')
# We pass in the URL followed by an item ID
response = self.client.get(f'/edit/{item.id}')
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'todo/edit_item.html')
def test_can_add_item(self):
# Tests adding an item to the DB
response = self.client.post('/add', {'name': 'Test Added Item'})
# Redirect back to the home page
self.assertRedirects(response, '/')
def test_can_delete_item(self):
# Create a DB item
item = Item.objects.create(name='Test Todo Item')
# Deletes the item from the DB
response = self.client.get(f'/delete/{item.id}')
self.assertRedirects(response, '/')
# To prove the item is deleted try to get from the
# Db using it's ID
existing_items = Item.objects.filter(id=item.id)
# Check the lenght of existing items in the DB
self.assertEqual(len(existing_items), 0)
def test_can_toggle_item(self):
# Create a DB item
item = Item.objects.create(name='Test Todo Item', done=True)
# Deletes the item from the DB
response = self.client.get(f'/toggle/{item.id}')
self.assertRedirects(response, '/')
updated_item = Item.objects.get(id=item.id)
self.assertFalse(updated_item.done)
def test_can_edit_item(self):
item = Item.objects.create(name='Test Todo Item')
# Post an updated name
response = self.client.post(
f'/edit/{item.id}', {'name': 'Updated Name'})
self.assertRedirects(response, '/')
updated_item = Item.objects.get(id=item.id)
self.assertEqual(updated_item.name, 'Updated Name')
|
984,254 | c672bf1f1aa41769625df485c89ced8b9c84eee3 | """Tests bor bootstrap Transformers."""
|
984,255 | 078fec1243252387ef27b2d06a6b680ed2b4cb1b | #! /usr/bin/python
# Import the core Python modules for ROS and to implement ROS Actions:
import rospy
import actionlib
# Import all the necessary ROS message types:
from sensor_msgs.msg import LaserScan
# Import some other modules from within this package (copied from other package)
from move_tb3 import MoveTB3
# Import some other useful Python Modules
from math import radians
import datetime as dt
import os
import numpy as np
FRONT = 'front'
LEFT = 'fleft'
RIGHT = 'fright'
class ObstacleAvoidance(object):
def __init__(self, front_range = 36, left_range = 36, right_range = 36, fdist_thresh=0.4, rldist_thresh=0.1, robot_controller=None, init=True):
# Initialise action server
if init:
rospy.init_node('obstacle_avoidance')
# Lidar subscriber
self.lidar_subscriber = rospy.Subscriber(
'/scan', LaserScan, self.lidar_callback)
self.lidar = { FRONT: 0.0, LEFT: 0.0, RIGHT: 0.0 }
self.raw_data = np.array(tuple())
f_right = int(front_range / 2)
f_left = 359 - f_right
self.r_front = [ (f_left, 359), (0, f_right) ]
self.r_left = (f_left - left_range, f_left)
self.r_right = (f_right, f_right + right_range)
self.fdist_thresh = fdist_thresh
self.rldist_thresh = rldist_thresh
# Robot movement and odometry
if robot_controller is None:
self.robot_controller = MoveTB3()
else:
self.robot_controller = robot_controller
self.ctrl_c = False
rospy.on_shutdown(self.shutdown_ops)
self.rate = rospy.Rate(5)
def lidar_callback(self, lidar_data):
"""Returns arrays of lidar data"""
self.raw_data = np.array(lidar_data.ranges)
f = self.r_front
l = self.r_left
r = self.r_right
# Distance Detection
self.lidar[FRONT] = min(
min(min(self.raw_data[f[0][0]:f[0][1]]), min(self.raw_data[f[1][0]:f[1][1]])), 10) # front 36 degrees
self.lidar[LEFT] = min(
min(self.raw_data[l[0]:l[1]]), 10)
self.lidar[RIGHT] = min(
min(self.raw_data[r[0]:r[1]]), 10)
def shutdown_ops(self):
self.robot_controller.stop()
self.ctrl_c = True
def attempt_avoidance(self):
front = self.lidar[FRONT]
fleft = self.lidar[LEFT]
fright = self.lidar[RIGHT]
t = self.rldist_thresh
degrees = 0
# If we're too close to the object right in front of us
if front < self.fdist_thresh:
self.robot_controller.stop()
if fright > fleft:
degrees = 25
else:
degrees = -25
self.robot_controller.deg_rotate(degrees)
def action_launcher(self):
# set the robot velocity:
self.robot_controller.set_move_cmd(linear=0)
while not rospy.is_shutdown():
self.attempt_avoidance()
if __name__ == '__main__':
oa = ObstacleAvoidance()
try:
oa.action_launcher()
except rospy.ROSInterruptException:
pass
|
984,256 | 8386a556e7e45893a8edc639ea90c6f06191e173 | #!/usr/bin/env python3
import random
def start_game(min_num, max_num, try_to, random_num):
print("Passed args: %s %s %s %s" % (min_num, max_num, try_to, random_num))
min_num = 0
max_num = 100
try_to = 3
random_num = random.randrange(min_num, max_num)
print("Try to guess the number I think about. It's from {0} to {1}".format(min_num, max_num))
print("You have {0} tries".format(try_to))
text = input("Enter the number: ")
print(text)
start_game(min_num, max_num, try_to, random_num) |
984,257 | 742d367334179e3285e3c530a0e3b6967be66bf7 | import pynrc
import pynrc.reduce.ref_pixels
import numpy as np
from astropy.io import fits, ascii
import glob
import os
from os import listdir, getcwd
from subprocess import call
from shutil import copyfile
import yaml
import pdb
from copy import deepcopy
import make_syml
from multiprocessing import Pool
import warnings
pynrc.setup_logging('WARN', verbose=False)
biasDir = '/usr/local/nircamsuite/ncdhas/cal/Bias/'
paramFile = 'parameters/pipe_params.yaml'
if os.path.exists(paramFile) == False:
copyfile('parameters/example_pipe_params.yaml',paramFile)
with open(paramFile) as paramFileOpen:
symLinkParam = yaml.safe_load(paramFileOpen)
if 'skipSide' in symLinkParam:
if symLinkParam['skipSide'] == True:
skipSide = True
else:
skipSide = False
else:
skipSide = False
dirname = 'raw_separated_refpix'
linkDir = os.path.join(symLinkParam['symLinkDir'],'symlinks_separated')
outDir = os.path.join(symLinkParam['symLinkDir'],dirname)
def make_symlink_dir():
## First make a copy of symlinks
linkDir = os.path.join(symLinkParam['symLinkDir'],'symlinks_separated')
if os.path.exists(outDir) == False:
os.mkdir(outDir)
#call('cp -r {} {}'.format(linkDir,outDir),shell=True)
#No need to copy files again since we'll process them and make new ones here
def get_rawfiles():
raw_dirs= listdir(linkDir)
raw_files = {}
ignoretypes = ['.red.','.dia.','.slp.','.cds.','.txt']
for oneDir in raw_dirs:
fitsList = listdir(os.path.join(linkDir,oneDir))
useList = deepcopy(fitsList)
for onefile in fitsList:
for ignoretype in ignoretypes:
if ignoretype in onefile:
useList.remove(onefile)
raw_files[oneDir] = useList
return raw_files
def get_bias(dat,header):
nZ, nY, nX = dat.shape
if ('DETECTOR' in header):
skip_bias = False
if header['DETECTOR'] == 'NRCALONG':
bias_name = 'NRCA5_17158_Bias_ISIMCV3_2016-02-09.fits'
elif header['DETECTOR'] == 'NRCA1':
bias_name = 'NRCA1_17004_Bias_ISIMCV3_2016-02-09.fits'
elif header['DETECTOR'] == 'NRCA2':
bias_name = 'NRCA2_17006_Bias_ISIMCV3_2016-02-09.fits'
elif header['DETECTOR'] == 'NRCA3':
bias_name = 'NRCA3_17012_Bias_ISIMCV3_2016-02-09.fits'
elif header['DETECTOR'] == 'NRCA4':
bias_name = 'NRCA4_17048_Bias_ISIMCV3_2016-02-09.fits'
elif header['DETECTOR'] == 'NRCBLONG':
bias_name = 'NRCB5_17161_Bias_ISIMCV3_2016-02-09.fits'
elif header['DETECTOR'] == 'NRCB1':
bias_name = 'NRCB1_16991_Bias_ISIMCV3_2016-02-09.fits'
elif header['DETECTOR'] == 'NRCB2':
bias_name = 'NRCB2_17005_Bias_ISIMCV3_2016-02-09.fits'
elif header['DETECTOR'] == 'NRCB3':
bias_name = 'NRCB3_17011_Bias_ISIMCV3_2016-02-09.fits'
elif header['DETECTOR'] == 'NRCB4':
bias_name = 'NRCB4_17047_Bias_ISIMCV3_2016-02-09.fits'
else:
skip_bias = True
## cut out bias and tile it to match the cube, if there is a bias
if skip_bias == True:
bias_cube = 0
else:
bias_file = os.path.join(biasDir,bias_name)
bias_full = fits.getdata(bias_file,extname='SCI')
startX = header['COLCORNR'] - 1
endX = startX + nX
startY = header['ROWCORNR'] - 1
endY = startY + nY
bias_cut = bias_full[startY:endY,startX:endX]
bias_cube = np.tile(bias_cut,[nZ,1,1])
else:
bias_cube = 0
return bias_cube
def one_file_refpix(allInput):
fileName,linkDir,dirNow,saveDir = allInput
HDUList = fits.open(os.path.join(linkDir,dirNow,fileName))
dat = HDUList[0].data
header = HDUList[0].header
bias_cube = get_bias(dat,header)
refObject = pynrc.reduce.ref_pixels.NRC_refs(dat-bias_cube,header,altcol=True)
refObject.calc_avg_amps()
refObject.correct_amp_refs()
refObject.calc_avg_cols(avg_type='pixel')
refObject.calc_col_smooth(savgol=True)
if (refObject.refs_side_avg is None) | (skipSide == True):
pass ## can't do side ref correction with no side ref pixels
else:
refObject.correct_col_refs()
useDat = refObject.data #+ bias_cube
header['REFPIX'] = (True,'pynrc reference pixel applied?')
header['SKIPSD'] = (skipSide,'Skip the side reference pixel correction?')
outName = os.path.join(saveDir,fileName)
primHDU = fits.PrimaryHDU(useDat,header=header)
if os.path.exists(outName) == True:
print("Already found {}. Skipping.".format(outName))
else:
primHDU.writeto(outName,overwrite=True)
HDUList.close()
def do_refpix(testMode=False):
raw_files = get_rawfiles()
for dirNow in raw_files.keys():
print("Working on directory {} of {}".format(dirNow,len(raw_files.keys())))
saveDir = os.path.join(outDir,dirNow)
if os.path.exists(saveDir) == False:
os.mkdir(saveDir)
if testMode == True:
useFiles = [raw_files[dirNow][0]]
else:
useFiles = raw_files[dirNow]
inputList = []
for fileName in useFiles:
inputList.append([fileName,linkDir,dirNow,saveDir])
#one_file_refpix([fileName,linkDir,dirNow,saveDir])
p = Pool(12)
p.map(one_file_refpix,inputList)
def do_testrun():
do_refpix(testMode=True)
def do_all_pipeline(pipeParamsFileName='parameters/pipe_params.yaml'):
with open(pipeParamsFileName) as pipeParamFile:
pipeParams = yaml.safe_load(pipeParamFile)
## Correct reference pixels with pynrc since it works on subarrays
make_symlink_dir()
do_refpix()
print("Making symbolic links to refence-corrected integrations...")
## Make symbolic links to the files that have been reference pixel corrected
make_syml.make_syml(fromRefPix=True)
## copy the symbolic links where ncdas will be run
symlinks_sep_refpix = os.path.join(pipeParams['symLinkDir'],'symlinks_sep_refpix')
for runDirectory_baseName in ['raw_separated_MMM_refpix','raw_separated_MPM_refpix']:
runDirectory = os.path.join(pipeParams['symLinkDir'],runDirectory_baseName)
if os.path.exists(runDirectory) == False:
os.mkdir(runDirectory)
call('cp -r {}/* {}'.format(symlinks_sep_refpix,runDirectory),shell=True)
if __name__ == "__main__":
do_all_pipeline()
|
984,258 | bcca33d328de4a06764c9e0ca06b6a046983b48b | from __future__ import print_function
import unittest
import gevent
try:
from gevent.resolver.ares import Resolver
except ImportError as ex:
Resolver = None
from gevent import socket
import gevent.testing as greentest
from gevent.testing.sockets import udp_listener
@unittest.skipIf(
Resolver is None,
"Needs ares resolver"
)
class TestTimeout(greentest.TestCase):
__timeout__ = 30
def test(self):
listener = self._close_on_teardown(udp_listener())
address = listener.getsockname()
def reader():
while True:
listener.recvfrom(10000)
greader = gevent.spawn(reader)
self._close_on_teardown(greader.kill)
r = Resolver(servers=[address[0]], timeout=0.001, tries=1,
udp_port=address[-1])
self._close_on_teardown(r)
with self.assertRaisesRegex(socket.herror, "ARES_ETIMEOUT"):
r.gethostbyname('www.google.com')
if __name__ == '__main__':
greentest.main()
|
984,259 | a5dce63a19e6d4da3aa2191070dd91a946ce9362 |
from sklearn import preprocessing
import numpy as np
def set_nan_to_zero(a):
where_are_NaNs = np.isnan(a)
a[where_are_NaNs] = 0
return a
def TSC_data_loader(dataset_path,dataset_name):
print("[INFO] {}".format(dataset_name))
Train_dataset = np.loadtxt(
dataset_path + '/' + dataset_name + '/' + dataset_name + '_TRAIN.tsv')
Test_dataset = np.loadtxt(
dataset_path + '/' + dataset_name + '/' + dataset_name + '_TEST.tsv')
Train_dataset = Train_dataset.astype(np.float32)
Test_dataset = Test_dataset.astype(np.float32)
X_train = Train_dataset[:, 1:]
y_train = Train_dataset[:, 0:1]
X_test = Test_dataset[:, 1:]
y_test = Test_dataset[:, 0:1]
le = preprocessing.LabelEncoder()
le.fit(np.squeeze(y_train, axis=1))
y_train = le.transform(np.squeeze(y_train, axis=1))
y_test = le.transform(np.squeeze(y_test, axis=1))
return set_nan_to_zero(X_train), y_train, set_nan_to_zero(X_test), y_test
|
984,260 | e40c5d864e78e56d018d7e5a886cb265ffd19fea | #-*- codeing = utf-8 -*-
#@Time : 2020/8/15 11:27\
#@Author : YJY
#@File : 测试代码.py
#@Software : PyCharm
import pandas
data = pandas.read_excel("E:/calc.xls",sheet_name=0,names=['s1','op','s2','s3'],dtype={'s1':str,'op':str,'s2':str,'s3':str},header=None)
data = data.values.tolist()
print(data) |
984,261 | 8f1a80d51a216e262f03a7e5288e5f8830666a0a | r"""
Tests for the :mod:`scglue.models.base` module
"""
import pytest
import torch
import scglue
def test_base():
model = scglue.models.Model()
with pytest.raises(RuntimeError):
_ = model.trainer
model.compile()
with pytest.raises(NotImplementedError):
model.fit([torch.randn(128, 10)])
|
984,262 | 741870274835da2247891b2b76774f36158592ae | n = int(input())
k = int(input())
sset = []
for _ in range(n):
sset.append(int(input()))
sset = list(sorted(sset))
new = []
for idx in range(0, len(sset)-1):
new.append(abs(sset[idx]-sset[idx+1]))
# print(new)
import sys
smallest_sum, idx = sys.maxsize,0
new_k = k-1
for i, el in enumerate(new):
if i == (len(new) - new_k)+1:
break
current_sum = sum(new[i:i+new_k])
# current_sum = el + sum(new[i+1:i+(new_k)])
if current_sum < smallest_sum:
smallest_sum = current_sum
idx = i
# print(idx)
# print(smallest_sum)
numbers = sset[idx:idx+k]
# print(numbers)
unfairness = 0
for idx, num in enumerate(numbers):
unfairness += sum(abs(num-numbers[j]) for j in range(idx+1, len(numbers)))
print(unfairness) |
984,263 | 8fa4e4936f56d6cf941afaccf72fb21a39fbfde6 |
# https://leetcode.com/problems/search-in-rotated-sorted-array/
# Time: O(Log N) | Space; O(1)
def search(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: int
"""
if len(nums) == 0:
return -1
if len(nums) == 1:
return 0 if target == nums[0] else -1
lower = 0
upper = len(nums) - 1
while lower+1 < upper:
mid = (lower+upper)//2
if nums[mid] == target:
return mid
if nums[mid] > nums[lower]:
if target >= nums[lower] and target < nums[mid]:
upper = mid - 1
else:
lower = mid + 1
else:
if target > nums[mid] and target <= nums[upper]:
lower = mid + 1
else:
upper = mid - 1
if nums[lower] == target:
return lower
if nums[upper] == target:
return upper
return -1
|
984,264 | c91adaf6753a9e6793dfbbb708bef76c5d46861c | from django.contrib.auth.forms import UserCreationForm
from django.shortcuts import render
from django.views import generic
from django.http import HttpResponseRedirect, JsonResponse, HttpResponse
from django.urls import reverse
from django.contrib import messages
from django.contrib.auth import authenticate, login, logout, get_user_model
from django.contrib.auth.models import User
from django.contrib.auth.decorators import login_required
from .forms import CreateUserForm
from .models import Profile, Blog, SportsXP
from .forms import CreateUserForm, ExerciseForm, BmiForm
from .models import Profile, Blog, Exercise, Bmi
from friendship.models import Friend, FriendshipRequest, Block
from isodate import parse_duration
from django.conf import settings
import datetime
import requests
#-----stuff to make dynamic progress bar work----------------#
sports_list = {
'Basketball': ['basketball', 0],
'Cross Training': ['cross_training', 0],
'Cardio': ['cardio', 0],
'Strength Training': ['strength_training', 0],
'Climbing': ['climbing', 0],
'Soccer': ['soccer', 0],
'American Football': ['american_football', 0],
'Dance': ['dance', 0],
'Gymnastics': ['gymnastics', 0],
'Hiking': ['hiking', 0],
'Swimming': ['swimming', 0],
'Yoga': ['yoga', 0]
}
total_xp = 0
# ----- Views used for when the user has been logged in ------#
@login_required(login_url='exercise:login')
def home(request):
'''
Method to render the homepage (dashboard) of the user
'''
if request.user.profile.first_login:
return HttpResponseRedirect(reverse('exercise:firstlogin'))
update_xp(request)
all_friends = Friend.objects.friends(request.user)
unread_friend_requests_amount = Friend.objects.unrejected_request_count(
user=request.user)
my_user_id = User.objects.get(
username=request.user.get_username()).pk
try:
friend_requests = Friend.objects.unrejected_requests(
user=request.user)
except:
friend_requests = None
context = {'sports': sports_list, 'all_friends': all_friends,
'number_unread_requests': unread_friend_requests_amount, 'friend_requests': friend_requests,
'total': total_xp}
return render(request, 'exercise/home.html', context)
@login_required(login_url='exercise:login')
def first_login(request):
'''
Method to save the data from first time users
'''
print(request.POST)
if request.method == 'POST':
user = User.objects.get(pk=User.objects.get(
username=request.user.get_username()).pk) # Grabs user based on the id
user.first_name = request.POST.get(
'firstname') # Adds to first name
user.last_name = request.POST.get('lastname') # Adds to last name
user.profile.first_login = False # Updates the first login
user.profile.bio = request.POST.get('bio') # Updates the bio field
user.sportsxp = SportsXP()
user.sportsxp.save()
user.save() # Saves the data
return HttpResponseRedirect(reverse('exercise:home'))
context = {}
return render(request, 'exercise/firstlogin.html', context)
@login_required(login_url='exercise:login')
def exercise_logging(request):
'''
Method to save an exercise and view previous exercises
'''
global total_xp
if request.user.profile.first_login:
return HttpResponseRedirect(reverse('exercise:firstlogin'))
if request.method == 'POST':
user = User.objects.get(pk=User.objects.get(
username=request.user.get_username()).pk)
form = ExerciseForm(request.POST)
if form.is_valid():
form.instance.user = user
update_xp(request)
user.sportsxp.total_xp += 1
user.sportsxp.save()
form.save()
# redirect to itself
return HttpResponseRedirect(reverse('exercise:exerciselogging'))
else:
form = ExerciseForm()
# Gets all the logged exercises of a user
exercise = Exercise.objects.filter(user=request.user)
# Gets all the friends of a user
all_friends = Friend.objects.friends(request.user)
# Gets all the exercises of user's friends
friend_exercises = Exercise.objects.filter(user__in=all_friends)
context = {'form': form, 'exercises': exercise,
'friend_exercises': friend_exercises}
return render(request, 'exercise/exercise_logging_form.html', context)
@login_required(login_url='exercise:login')
def blog_post(request):
'''
Method to save items from a blog post
'''
global total_xp
if request.user.profile.first_login:
return HttpResponseRedirect(reverse('exercise:firstlogin'))
try:
now = datetime.datetime.now()
blog = Blog(blog_post=request.POST['blog'],
blog_user=request.user.get_username(),
date_published=now) # Makes an instance of the blog
except (KeyError): # Error handling
context = {'blog': Blog, 'error': "An error has occurred"}
return render(request, 'exercise/blog.html', context)
else:
update_xp(request)
user = User.objects.get(pk=User.objects.get(username=request.user.get_username()).pk)
user.sportsxp.total_xp = user.sportsxp.total_xp + 0.1
user.sportsxp.save()
blog.save() # Saves the blog to the database
return HttpResponseRedirect(reverse('exercise:blog'))
@login_required(login_url='exercise:login')
def read_sportsxp(request):
if request.user.profile.first_login:
return HttpResponseRedirect(reverse('exercise:firstlogin'))
xp_update = update_xp(request)
context = {'sportxplist': xp_update,
'sports': sports_list, 'total': total_xp}
return HttpResponseRedirect(reverse('exercise:home'))
@login_required(login_url='exercise:login')
def sport_redirect(request):
if request.user.profile.first_login:
return HttpResponseRedirect(reverse('exercise:firstlogin'))
context = {}
return render(request, 'exercise/instruction.html', context)
@login_required(login_url='exercise:login')
def friendship(request):
if request.user.profile.first_login:
return HttpResponseRedirect(reverse('exercise:firstlogin'))
all_friends = Friend.objects.friends(request.user)
unread_friend_requests_amount = Friend.objects.unrejected_request_count(
user=request.user)
my_user_id = User.objects.get(
username=request.user.get_username()).pk
try:
friend_requests = Friend.objects.unrejected_requests(
user=request.user)
except:
friend_requests = None
context = {'sports': sports_list, 'all_friends': all_friends,
'number_unread_requests': unread_friend_requests_amount, 'friend_requests': friend_requests,
'total': total_xp}
return render(request, 'exercise/friendship.html', context)
@login_required(login_url='exercise:login')
def update_sportsxp(request):
global sports_list
update_xp(request)
if request.method == 'POST':
if (request.POST.get('submit') == 'Submit'):
for key, value in sports_list.items():
if request.POST.get('activities') == key:
user = User.objects.get(pk=User.objects.get(
username=request.user.get_username()).pk) # Grabs user based on the id
item_id = value[0]
value = getattr(user.sportsxp, value[0])
user.sportsxp.total_xp = user.sportsxp.total_xp + 1
user.sportsxp.basketball = value + 1
user.sportsxp.cross_training = value + 1
user.sportsxp.cardio = value + 1
user.sportsxp.strength_training = value + 1
user.sportsxp.climbing = value + 1
user.sportsxp.soccer = value + 1
user.sportsxp.american_football = value + 1
user.sportsxp.dance = value + 1
user.sportsxp.gymnastics = value + 1
user.sportsxp.hiking = value + 1
user.sportsxp.swimming = value + 1
user.sportsxp.yoga = value + 1
user.sportsxp.save(
update_fields=[item_id, 'total_xp'])
elif (request.POST.get('reset') == 'Reset'):
for key, value in sports_list.items():
if request.POST.get('activities') == key:
user = User.objects.get(pk=User.objects.get(
username=request.user.get_username()).pk) # Grabs user based on the id
item_id = value[0]
value = getattr(user.sportsxp, value[0])
user.sportsxp.total_xp = user.sportsxp.total_xp - value
user.sportsxp.basketball = 0
user.sportsxp.cross_training = 0
user.sportsxp.cardio = 0
user.sportsxp.strength_training = 0
user.sportsxp.climbing = 0
user.sportsxp.soccer = 0
user.sportsxp.american_football = 0
user.sportsxp.dance = 0
user.sportsxp.gymnastics = 0
user.sportsxp.hiking = 0
user.sportsxp.swimming = 0
user.sportsxp.yoga = 0
user.sportsxp.save(
update_fields=[item_id, 'total_xp'])
elif (request.POST.get('resetall') == "Reset All"):
user = User.objects.get(pk=User.objects.get(
username=request.user.get_username()).pk) # Grabs user based on the id
for key, value in sports_list.items():
user.sportsxp.total_xp = user.sportsxp.total_xp - \
getattr(user.sportsxp, value[0])
user.sportsxp.basketball = 0
user.sportsxp.cross_training = 0
user.sportsxp.cardio = 0
user.sportsxp.strength_training = 0
user.sportsxp.climbing = 0
user.sportsxp.soccer = 0
user.sportsxp.american_football = 0
user.sportsxp.dance = 0
user.sportsxp.gymnastics = 0
user.sportsxp.hiking = 0
user.sportsxp.swimming = 0
user.sportsxp.yoga = 0
user.sportsxp.total_xp = 0
user.sportsxp.save()
return HttpResponseRedirect(reverse('exercise:home'))
@login_required(login_url='exercise:login')
def bmi_display(request):
'''
Method to save items from a blog post
'''
if request.user.profile.first_login:
return HttpResponseRedirect(reverse('exercise:firstlogin'))
if request.method == 'POST':
if int(request.POST.get('height_feet')) == 0 and int(request.POST.get('height_inches')) == 0:
return HttpResponseRedirect(reverse('exercise:bmidisplay'))
try:
now = datetime.datetime.now()
height_feet = int(request.POST.get('height_feet'))
height_inches = int(request.POST.get('height_inches'))
weight_pounds = int(request.POST.get('weight_pounds'))
height_meters = height_feet * 0.3048 + height_inches * 0.0254
weight_kg = weight_pounds * 0.453592
answer = weight_kg / (height_meters * height_meters)
answer_floored = weight_kg // (height_meters * height_meters)
if ((answer - answer_floored) > 0.5):
answer += 1
bmi = Bmi(user=User.objects.get(pk=User.objects.get(
username=request.user.get_username()).pk),
height_feet=height_feet,
height_inches=height_inches,
weight_pounds=weight_pounds,
bmi_user=request.user.get_username(),
user_bmi=answer,
time_of_bmi=now) # Makes an instance of the blog
except (KeyError): # Error handling
form = BmiForm()
context = {'form': form, 'bmis': Bmi,
'error': "An error has occurred"}
return render(request, 'exercise/bmi.html', context)
else:
bmi.save() # Saves the blog to the database
return HttpResponseRedirect(reverse('exercise:bmidisplay'))
form = BmiForm()
bmi = Bmi.objects.filter(user=request.user)
bmi_nums = []
for x in bmi:
bmi_nums.append(x.user_bmi)
context = {'form': form, 'bmis': bmi, "bmi_nums": bmi_nums}
return render(request, 'exercise/bmi.html', context)
@login_required(login_url='exercise:login')
def send_friend_request(request):
sent_requests = Friend.objects.sent_requests(user=request.user)
# rejected_list = Friend.objects.rejected_requests(user=request.user)
friend_requested = False
# friend_rejected = False
for item in sent_requests:
if (str(request.POST.get('friendusername')) == str(item.to_user)):
friend_requested = True
# for item in rejected_list:
# if (str(request.POST.get('friendusername')) == str(item.from_user)):
# friend_rejected = True
if (friend_requested):
update_xp(request)
all_friends = Friend.objects.friends(request.user)
unread_friend_requests_amount = Friend.objects.unrejected_request_count(
user=request.user)
my_user_id = User.objects.get(
username=request.user.get_username()).pk
try:
friend_requests = FriendshipRequest.objects.get(
to_user=my_user_id)
except:
friend_requests = None
context = {'error': 'You already requested friendship with the user', 'sports': sports_list, 'all_friends': all_friends,
'number_unread_requests': unread_friend_requests_amount,
'total': total_xp}
return render(request, 'exercise/friendship.html', context)
if (str(request.user.get_username()) == str(request.POST.get('friendusername'))):
update_xp(request)
all_friends = Friend.objects.friends(request.user)
unread_friend_requests_amount = Friend.objects.unrejected_request_count(
user=request.user)
my_user_id = User.objects.get(
username=request.user.get_username()).pk
try:
friend_requests = FriendshipRequest.objects.get(
to_user=my_user_id)
except:
friend_requests = None
context = {'error': 'You cannot be friends with yourself', 'sports': sports_list, 'all_friends': all_friends,
'number_unread_requests': unread_friend_requests_amount,
'total': total_xp}
return render(request, 'exercise/friendship.html', context)
try:
action_user_name_val = User.objects.get(pk=User.objects.get(
username=request.POST.get('friendusername')).pk)
if Friend.objects.are_friends(request.user, action_user_name_val) == True:
update_xp(request)
all_friends = Friend.objects.friends(request.user)
unread_friend_requests_amount = Friend.objects.unrejected_request_count(
user=request.user)
my_user_id = User.objects.get(
username=request.user.get_username()).pk
try:
friend_requests = FriendshipRequest.objects.get(
to_user=my_user_id)
except:
friend_requests = None
context = {'error': 'You are already friends with the user', 'sports': sports_list, 'all_friends': all_friends,
'number_unread_requests': unread_friend_requests_amount, 'friend_requests': friend_requests,
'total': total_xp}
return render(request, 'exercise/friendship.html', context)
except:
update_xp(request)
all_friends = Friend.objects.friends(request.user)
unread_friend_requests_amount = Friend.objects.unrejected_request_count(
user=request.user)
my_user_id = User.objects.get(
username=request.user.get_username()).pk
try:
friend_requests = FriendshipRequest.objects.get(
to_user=my_user_id)
except:
friend_requests = None
context = {'error': 'The username entered could not be found, please try again', 'sports': sports_list, 'all_friends': all_friends,
'number_unread_requests': unread_friend_requests_amount,
'total': total_xp}
return render(request, 'exercise/friendship.html', context)
else:
Friend.objects.add_friend(
request.user, # The sender
action_user_name_val, # The recipient
message=request.POST.get('friendmessage'))
all_friends = Friend.objects.friends(request.user)
unread_friend_requests_amount = Friend.objects.unrejected_request_count(
user=request.user)
context = {'success_sent': 'Request Sent to user', 'sports': sports_list, 'all_friends': all_friends,
'number_unread_requests': unread_friend_requests_amount,
'total': total_xp}
return render(request, 'exercise/friendship.html', context)
@login_required(login_url='exercise:login')
def accept_deny_block_request(request, action_user_name):
if request.method == 'POST':
decision = request.POST.get('Decision')
action_user_name_val = User.objects.get(pk=User.objects.get(
username=action_user_name).pk)
my_user_id = User.objects.get(
username=request.user.get_username()).pk
action_user_id = User.objects.get(username=action_user_name_val).pk
if decision == "Accept":
if Friend.objects.are_friends(request.user, action_user_name_val) != True:
friend_request = FriendshipRequest.objects.get(
from_user=action_user_id, to_user=my_user_id)
friend_request.accept()
elif decision == "Deny":
friend_request = FriendshipRequest.objects.get(
from_user=action_user_id, to_user=my_user_id)
friend_request.reject()
elif decision == "Unfriend":
Friend.objects.remove_friend(request.user, action_user_name_val)
elif decision == "Block":
Block.objects.add_block(request.user, action_user_name_val)
elif decision == "Unblock":
Block.objects.remove_block(request.user, action_user_name_val)
return HttpResponseRedirect(reverse('exercise:friendrequest'))
@login_required(login_url='exercise:login')
def search_youtube(request):
if request.user.profile.first_login:
return HttpResponseRedirect(reverse('exercise:firstlogin'))
videos = []
if request.method == 'POST':
search_url = 'https://www.googleapis.com/youtube/v3/search'
video_url = 'https://www.googleapis.com/youtube/v3/videos'
search_params = {
'part': 'snippet',
'q': request.POST['search'],
'key': settings.YOUTUBE_DATA_API_KEY,
'maxResults': 9,
'type': 'video'
}
r = requests.get(search_url, params=search_params)
results = r.json()['items']
video_ids = []
for result in results:
video_ids.append(result['id']['videoId'])
if request.POST['submit'] == 'lucky':
return redirect(f'https://www.youtube.com/watch?v={ video_ids[0] }')
video_params = {
'key': settings.YOUTUBE_DATA_API_KEY,
'part': 'snippet,contentDetails',
'id': ','.join(video_ids),
'maxResults': 9
}
r = requests.get(video_url, params=video_params)
results = r.json()['items']
for result in results:
video_data = {
'title': result['snippet']['title'],
'id': result['id'],
'url': f'https://www.youtube.com/watch?v={ result["id"] }',
'duration': int(parse_duration(result['contentDetails']['duration']).total_seconds() // 60),
'thumbnail': result['snippet']['thumbnails']['high']['url']
}
videos.append(video_data)
context = {
'videos': videos
}
return render(request, 'exercise/youtube.html', context)
@login_required(login_url='exercise:login')
def cardioView(request):
if request.user.profile.first_login:
return HttpResponseRedirect(reverse('exercise:firstlogin'))
context = {}
return render(request, 'exercise/cardio.html', context)
@login_required(login_url='exercise:login')
def bodyView(request):
if request.user.profile.first_login:
return HttpResponseRedirect(reverse('exercise:firstlogin'))
context = {}
return render(request, 'exercise/bodybuilding.html', context)
@login_required(login_url='exercise:login')
def sportView(request):
if request.user.profile.first_login:
return HttpResponseRedirect(reverse('exercise:firstlogin'))
context = {}
return render(request, 'exercise/sport.html', context)
#------ Views that can be accessed by users that have not been authenticated ------#
def blog_display(request):
'''
Method to display all the blog posts possible
'''
blog = Blog.objects.all() # Gets all the blog posts
context = {'blogs': blog} # Sets them as context into the file
return render(request, 'exercise/blog.html', context)
def route_to_landing_or_home(request):
'''
Method to route the user back to their home page if logged in
or back to the landing page if not logged in
'''
context = {}
if request.user.is_authenticated: # Check authentication
return HttpResponseRedirect(reverse('exercise:home'))
return render(request, 'exercise/index.html', context)
def login_user(request):
'''
View to handle the manual login of users who have been created
Note: This method will sign in a user regardless of if they manually created an account or used google authentication
'''
if request.user.is_authenticated: # Check authentication
if request.user.profile.first_login:
return HttpResponseRedirect(reverse('exercise:firstlogin'))
return HttpResponseRedirect(reverse('exercise:home'))
else: # Logins in user
if request.method == 'POST':
username = request.POST.get('username')
password = request.POST.get('password')
# Calls the Django authentication method
user = authenticate(request, username=username, password=password)
if user is not None: # If user exists
login(request, user) # Logs them in
if user.profile.first_login:
return HttpResponseRedirect(reverse('exercise:firstlogin'))
return HttpResponseRedirect(reverse('exercise:home'))
else: # If something went wrong
messages.info(
request, 'Username OR Password is Incorrect') # Sets message to display
context = {}
# Redirects to the same page
return render(request, 'exercise/login.html', context)
def register_user(request):
'''
View to handle the manual creation of a user
Note: This method will fail if a user being created already exists
'''
if request.user.is_authenticated: # Checks authentication
return HttpResponseRedirect(reverse('exercise:home'))
else: # Creates a new user from the template given
form = CreateUserForm()
if request.method == 'POST':
# Calls our form with the post data
form = CreateUserForm(request.POST)
if form.is_valid(): # Checks validity
form.save() # Saves the form
# Checks for unique username status
user = form.cleaned_data.get('username')
messages.success(request, "User Created for " + user)
return HttpResponseRedirect(reverse('exercise:login'))
context = {'form': form}
# redirects the page
return render(request, 'exercise/register.html', context)
def logout_user(request):
'''
Method to logout a user
Note: This method will logout any user regardless of if they signed in with google or not
'''
logout(request) # Logs out the user
# Redirects the page
return HttpResponseRedirect(reverse('exercise:landing'))
#-----------------Views for progress bar feature-----------------------#
def update_xp(request):
global sports_list
global total_xp
user = User.objects.get(pk=User.objects.get(
username=request.user.get_username()).pk)
for key, value in sports_list.items():
value_of_field = getattr(user.sportsxp, value[0])
sports_list[key][1] = value_of_field
value_of_total_xp = getattr(user.sportsxp, 'total_xp')
total_xp = value_of_total_xp
def sortxp(request):
global sports_list
print(request.POST)
sorted_sports_list = dict(
sorted(sports_list.items(), key=lambda e: e[1][1]))
context = {"sports": sorted_sports_list}
return render(request, 'exercise/home.html', context)
# XP points for finishing the instructions page
def complete_instructions(request):
global total_xp
update_xp(request)
user = User.objects.get(pk=User.objects.get(username=request.user.get_username()).pk)
user.sportsxp.total_xp = user.sportsxp.total_xp + 1
user.sportsxp.save()
return HttpResponseRedirect(reverse('exercise:home'))
|
984,265 | 23ce8ec75e5f61a15fc1988bf4811f68d2995c32 | import pygal
from chart_super_class import ChartSuperClass
class DiceHistogram(ChartSuperClass):
def __init__(self):
self.chart = pygal.Bar()
def set_style(self, style):
self.chart.style = style
|
984,266 | d543fcca4fa52b7a70897575a2626bffeeaf4721 | noun = input("Enter a noun: ")
verb = input("Enter a verb: ")
adjective = input("Enter an adjective: ")
adverb = input("Enter an adverb: ")
print(f"Do you {verb} your {adjective} {noun} {adverb}? That's hilarious!")
|
984,267 | 290968cf75e9c5b35808ad01fe020024c7f89d85 | from azureml.core import Dataset, Workspace
from dotnetcore2 import runtime
runtime.version = ("18", "04", "0")
runtime.dist = "ubuntu"
ws = Workspace.from_config()
default_ds = ws.get_default_datastore()
data_ref = default_ds.upload(src_dir='data',target_path='/data/files', overwrite=True, show_progress=True)
housing_ds = Dataset.Tabular.from_delimited_files(path=(default_ds,'/data/files/boston_housing.csv'))
housing_ds.register(workspace=ws, name='ds_boston_housing') |
984,268 | aa420d7893b4b678a33554bbff6e531816982f2c | #!/usr/bin/env python
# -*- coding:utf-8 -*-
##
# Copyright (C) 2018 All rights reserved.
#
import random
from CorpApi import *
from weConf import *
api = CorpApi(TestConf['CORP_ID'], TestConf["CONTACT_SYNC_SECRET"])
try :
##
response = api.httpCall(
CORP_API_TYPE['DEPARTMENT_LIST']
)
#print (response)
department = response["department"]
for item in department:
print(item["name"],item['parentid'])
#chatid = response['chatid']
except ApiException as e :
print (e.errCode, e.errMsg)
|
984,269 | 8f0d497e2e7c2e18d4b7c26c92c9060138ceca25 | #! /usr/bin/env python3
"""
Docblock
"""
# import built in modules
# import Third party
# import local
from DualTimer.Src.Config.App import App as Config
__author__ = "John Evans <john@grandadevans.com?"
__copyright__ = "Copyright 2015, John Evans"
__credits__ = ["John Evans <john@grandadevans.com>"]
__license__ = "GPL"
__version__ = "0.0.1"
__maintainer__ = "John Evans"
__email__ = "john@grandadevans.com"
__status__ = "Development"
class TestAppConfig:
def test_base_directory_set(self):
dir_ = Config().paths()["base"]
print(dir_)
assert len(dir_) is not 0,\
"Base Directory is not set in config"
|
984,270 | cb388ba49c818703bd48eb53f3ca820a449e8279 | """
WARNING: OpenFace requires Python 2.7
Module for managing the SqueezeNet recognition method.
Obtained from https://github.com/kgrm/face-recog-eval
"""
import os
import cv2
import numpy as np
from keras import backend as K
from .networks_def import squeezenet
class SqueezeNet:
def __init__(self):
self.model = squeezenet(50, output="denseFeatures",
simple_bypass=True, fire11_1024=True)
model_path = os.path.join(os.path.dirname(
__file__), "weights", "luksface-weights.h5")
self.model.load_weights(model_path, by_name=True)
print('--- Weights loaded ---')
K.set_image_dim_ordering('th')
def predict(self, image, normalize=True):
"""
Get encoding of the face.
Image will be resized to 299x299 using bicubic interpolation
:param np.array image: Face image
:param bool normalize: Return normalized vector
:return: Face encoding
"""
# Image preprocessing
image = cv2.resize(image, (224, 224), interpolation=cv2.INTER_CUBIC)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
# Array preprocesing
image = np.moveaxis(image, -1, 0)
image = np.array([image], dtype=np.float64)
rep = self.model.predict(image)
if normalize:
return rep.astype(np.float64) / np.linalg.norm(rep)
else:
return rep.astype(np.float64)
|
984,271 | 0b1b6aece091ea191b3ddd3fed843deb5cda0346 | from unittest import TestCase
from enemies import *
__author__ = 'p076085'
class TestEnemy(TestCase):
def setUp(self):
self.soul = Soul()
self.specter = Specter()
def test_enemy_init(self):
self.assertDictEqual(self.soul.stats, Soul.stats)
self.assertDictEqual(self.soul.items, Soul.items)
self.assertDictEqual(self.soul.skills, Soul.skills)
self.assertDictEqual(self.specter.stats, Specter.stats)
self.assertDictEqual(self.specter.items, Specter.items)
self.assertDictEqual(self.specter.skills, Specter.skills)
|
984,272 | 870b8f2dbe5a2803e2e61fa49aa5bd8b7384dfc3 | import os
os.system('cls')
class Node:
def __init__(self,data):
self.data=data
self.next=None
'''
find the kth to last element of a singly linked list.
'''
class LinkedList:
def __init__(self):
self.head=None
def appendnode(self,data):
New_Node = Node(data)
if self.head==None:
self.head=New_Node
return
last_node=self.head
while last_node.next!=None:
last_node=last_node.next
last_node.next=New_Node
def printnode(self):
Curr_Node=self.head
while Curr_Node!=None:
print("(",Curr_Node.data,end=" ) --> ")
Curr_Node=Curr_Node.next
print("\n")
def kth_element(self,num):
if num == 0: return None
Curr_Node=self.head
for num in range(0,num-1):
Curr_Node=Curr_Node.next
while Curr_Node is not None:
print("(",Curr_Node.data,end=" ) --> ")
Curr_Node=Curr_Node.next
print("\n")
ll=LinkedList()
ll.appendnode(1)
ll.appendnode(2)
ll.appendnode(3)
ll.appendnode(4)
ll.appendnode(5)
ll.printnode()
print("Priting from 2 to last element . . .")
ll.kth_element(2)
print("Priting from 4 to last element . . .")
ll.kth_element(4) |
984,273 | a7486f10c3e4f6ccbf9f1f4981147c08c6492cfb | #读取文件第一行
import csv
from datetime import datetime
from matplotlib import pyplot as plt
filename="forme.csv"
with open(filename) as f:
reader=csv.reader(f)
header_row=next(reader)
#一列一列读取表格中数据
dates,opens,highs,lows,closes,adjcloses=[],[],[],[],[],[]
for row in reader:
current_date=datetime.strptime(row[0],"%m/%d/%Y")
dates.append(current_date)
a=float(row[1])
opens.append(a)
b=float(row[2])
highs.append(b)
d=float(row[3])
lows.append(d)
e=float(row[4])
closes.append(e)
g=float(row[6])
adjcloses.append(g)
#根据数据绘制图形
fig=plt.figure(dpi=128,figsize=(10,6))
plt.plot(dates,opens,c='orange',label="open")#label与legend()来标注每种数据的折线颜色
plt.plot(dates,highs,c='green',label="high")
plt.plot(dates,lows,c='yellow',label="low")
plt.plot(dates,closes,c='black',label="close")
plt.plot(dates,adjcloses,c='purple',label="adj close")
plt.xlabel("Date",fontsize=16)
fig.autofmt_xdate()
plt.ylabel("",fontsize=16)
plt.tick_params (axis="both",which="major",labelsize=16)
plt.legend(loc='center left', bbox_to_anchor=(0.12, 1.12),ncol=5)#ncol=5 5个标识 行排列
plt.show()
|
984,274 | 22ed30ce358914f0632f2593f533f3bf97ee0198 | """
sphinx-simulink.directives
~~~~~~~~~~~~~~~~~~~~~~~
Embed Simulink diagrams on your documentation.
:copyright:
Copyright 2016 by Dennis Edward Kalinowski <dekalinowski@gmail.com>.
:license:
MIT, see LICENSE for details.
"""
import hashlib
import os
import tempfile
from docutils.parsers.rst import directives
from docutils.parsers.rst.directives import images
from sphinx.util.osutil import ensuredir
from sphinxsimulink.diagram import nodes
def pathlist(argument):
paths = []
list = argument.split(';')
for path in list:
paths.append( directives.path(path) )
return paths
class SimulinkDiagramDirective(images.Figure):
required_arguments = 1
optional_arguments = 0
option_spec = dict(
images.Figure.option_spec, **{
'dir': directives.path,
'addpath': pathlist,
'preload': directives.path,
'subsystem': directives.unchanged,
}
)
# content used by images.Figure as caption
has_content = True
@staticmethod
def generate_uri(app, diagram_options, fileformat):
# give a unique folder name for the specific srcdir, housed under the
# system's temporary directory
outdir = os.path.join(
tempfile.gettempdir(),
'sphinxsimulink',
hashlib.sha1(
os.path.abspath( app.builder.srcdir ).encode('utf-8')
).hexdigest()
)
# FIXME: change filename hash to include contents of preload script,
# simulink system model, and other dependencies...
# use as mechanism to reuse cache, and delete on clean job
# make a unique filename for the Simulink model
hash = hashlib.sha1( repr( sorted( diagram_options.items() ) )
.encode('utf-8') ).hexdigest()
filename = "simulink-diagram-{}.{}".format( hash, fileformat )
# combine the directory and filename
uri = os.path.join(outdir, filename)
return uri
def run(self):
env = self.state.document.settings.env
app = env.app
# pop these keys out of self.options;
# place into diagram_options
diagram_options = dict(
(popped_key, self.options.pop(popped_key, None))
for popped_key in
('dir','addpath','preload','subsystem')
)
# generate image at this location; Sphinx will relocate later
uri = SimulinkDiagramDirective.generate_uri(
app, diagram_options, 'png'
)
# make an empty file, if needed, to avoid warning from Sphinx's image
# processing
ensuredir( os.path.dirname( uri ) )
open( uri, 'a' ).close()
# SimulinkDiagramDirective takes system from argument[0]
system = self.arguments[0]
# images.Figure expects uri in argument[0]
self.arguments[0] = uri;
(figure_node,) = images.Figure.run(self)
# escalate system messages
if isinstance(figure_node, nodes.system_message):
return [figure_node]
diagram_node = nodes.diagram('', figure_node, **diagram_options)
diagram_node['uri'] = uri
diagram_node['system'] = system
return [diagram_node]
|
984,275 | 37314b76832dfe58c3b99e3ab4d593ff69ed2f02 | from django.http import HttpResponse
from models import Process
import json
import os
import subprocess
import time
from django.conf import settings
REPO_DIR = settings.MUNKI_REPO_DIR
MAKECATALOGS = settings.MAKECATALOGS_PATH
def pid_exists(pid):
"""Check whether pid exists in the current process table."""
# http://stackoverflow.com/questions/568271/how-to-check-if-there-exists-a-process-with-a-given-pid
if os.name == 'posix':
# OS X and Linux
import errno
if pid < 0:
return False
try:
os.kill(pid, 0)
except OSError as e:
return e.errno == errno.EPERM
else:
return True
else:
# Windows
import ctypes
kernel32 = ctypes.windll.kernel32
HANDLE = ctypes.c_void_p
DWORD = ctypes.c_ulong
LPDWORD = ctypes.POINTER(DWORD)
class ExitCodeProcess(ctypes.Structure):
_fields_ = [ ('hProcess', HANDLE),
('lpExitCode', LPDWORD)]
SYNCHRONIZE = 0x100000
process = kernel32.OpenProcess(SYNCHRONIZE, 0, pid)
if not process:
return False
ec = ExitCodeProcess()
out = kernel32.GetExitCodeProcess(process, ctypes.byref(ec))
if not out:
err = kernel32.GetLastError()
if kernel32.GetLastError() == 5:
# Access is denied.
logging.warning("Access is denied to get pid info.")
kernel32.CloseHandle(process)
return False
elif bool(ec.lpExitCode):
# print ec.lpExitCode.contents
# There is an exist code, it quit
kernel32.CloseHandle(process)
return False
# No exit code, it's running.
kernel32.CloseHandle(process)
return True
def index(request):
return HttpResponse(json.dumps('view not implemented'),
content_type='application/json')
def run(request):
if request.method == 'POST':
print 'got run request for makecatalogs'
# remove records for exited processes
Process.objects.filter(name='makecatalogs', exited=True).delete()
while True:
# Loop until there are no more running processes
processes = Process.objects.filter(name='makecatalogs',
exited=False)
if not processes:
break
# clean up any processes no longer in the process table
for process in processes:
if not pid_exists(process.pid):
process.delete()
processes = Process.objects.filter(name='makecatalogs',
exited=False)
if not processes:
break
time.sleep(1)
proc = subprocess.Popen(
[MAKECATALOGS, REPO_DIR],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
record = Process(name='makecatalogs')
record.pid = proc.pid
record.save()
while True:
output = proc.stdout.readline().decode('utf-8').rstrip('\n')
if output:
record.statustext = output.rstrip('\n')
record.save()
if proc.poll() != None:
break
record.statustext = 'Done'
record.exited = True
record.exitcode = proc.returncode
record.save()
return HttpResponse(json.dumps('done'),
content_type='application/json')
return HttpResponse(json.dumps('must be a POST request'),
content_type='application/json')
def status(request):
print 'got status request for makecatalogs'
status_response = {}
processes = Process.objects.filter(name='makecatalogs', exited=False)
if processes:
# display status from one of the active processes
# (hopefully there is only one!)
process = processes[0]
status_response['exited'] = process.exited
status_response['statustext'] = process.statustext
status_response['exitcode'] = process.exitcode
else:
status_response['exited'] = True
status_response['statustext'] = 'no such process'
status_response['exitcode'] = -1
return HttpResponse(json.dumps(status_response),
content_type='application/json')
def delete(request):
print 'got delete request for makecatalogs'
try:
record = Process.objects.get(name='makecatalogs')
record.delete()
except Process.DoesNotExist:
pass
return HttpResponse(json.dumps('done'),
content_type='application/json') |
984,276 | d4bfe6634e61e34ca60b274b6279d0434333a50a | import sys
sys.path.append('../')
import functions as fc
import matplotlib.pyplot as plt
import numpy as np
import os
print os.getcwd()
#Tlist=[10,20,30]
#A,T,eT=fc.data3('Data_and_Plots/April 23rd Aperture/ApertureData.txt')
#
#plt.figure(5)
#for indx,Av in enumerate(A):
# plt.errorbar(A[indx],T[indx],eT[indx],fmt='.')
#for T in Tlist: plt.axhline(y=T)
#plt.xlabel('ApertureSize (cm)')
#plt.ylabel('Fitted Temperature (K)')
#plt.axis([1.75,5.25,0,35])
Arangelist=[]
Brangelist=[]
DiffList=[]
Aperturelist=[]
Rlist=[]
for ApertureSize in fc.ApertureRange:
ParentFolderName='Aperture=%1.2f' %ApertureSize
fc.GPTrun(ParentFolderName,'A',InitialZ=0.025)
fc.GPTrun(ParentFolderName,'B',InitialZ=0.025)
AfileName='AVals/Adata(%s).txt' %ParentFolderName
BfileName='BVals/Bdata(%s).txt' %ParentFolderName
fc.ABSet(AfileName,BfileName)
UB,B,eB=fc.data3(BfileName)
UA,A,eA=fc.data3(AfileName)
Afit,Bfit=fc.ABFIT()
if len(Afit.roots())==1:
DiffList.append((Afit.roots() - Bfit.roots())[0])
Arange=np.max(A)-np.min(A)
Brange=np.max(B)-np.min(B)
Arangelist.append(Arange)
Brangelist.append(Brange)
Rlist.append((Brange/Arange)**2)
plt.figure(3)
plt.plot(fc.ApertureRange,Brangelist,'.',label='Range of B')
plt.plot(fc.ApertureRange,Arangelist,'.',label='Range of A')
plt.plot(fc.ApertureRange,Rlist,'.',label=r'$\frac{R(B)}{R(A)}^2}$ ')
plt.xlim([1.75,5.25])
plt.xlabel('Aperture Radius (cm)')
plt.legend()
plt.ylabel('Value')
plt.figure(4)
plt.plot(fc.ApertureRange[0:len(DiffList)],DiffList,'.')
plt.xlim([1.75,5.25])
plt.xlabel('Plate Seperation (cm)')
plt.ylabel(r'Difference between $A_0$ $B_0$ (keV)')
plt.show()
|
984,277 | 80890afaa6e7752c404eb2d1aa163347ef76145f | """
Pre-process raw reddit data into tfrecord.
"""
import argparse
import os
import random
import tensorflow as tf
import numpy as np
import bert.tokenization as tokenization
import reddit.data_cleaning.reddit_posts as rp
rng = random.Random(0)
def process_without_response_task(row_dict, tokenizer):
context_features = {}
op_tokens = tokenizer.tokenize(row_dict['post_text'])
text_features = {'op_text': op_tokens}
for key in row_dict:
if key not in {'post_text', 'response_text'}:
context_features[key] = row_dict[key]
return text_features, context_features
def process_row_record(row_dict, tokenizer, random_response=None, use_response_task=True):
if not use_response_task:
return process_without_response_task(row_dict, tokenizer)
context_features = {}
op_tokens = tokenizer.tokenize(row_dict['post_text'])
if random_response:
response_tokens = tokenizer.tokenize(random_response)
context_features['has_random_resp'] = 1
else:
response_tokens = tokenizer.tokenize(row_dict['response_text'])
context_features['has_random_resp'] = 0
if len(op_tokens) < 2 or len(response_tokens) < 2:
return None, None
text_features = {'op_text': op_tokens,
'resp_text': response_tokens}
for key in row_dict:
if key not in {'post_text', 'response_text'}:
context_features[key] = row_dict[key]
# add hand crafted features from PeerRead
return text_features, context_features
def bert_process_sentence(example_tokens, max_seq_length, tokenizer, segment=1):
"""
Tokenization and pre-processing of text as expected by Bert
Parameters
----------
example_tokens
max_seq_length
tokenizer
Returns
-------
"""
# Account for [CLS] and [SEP] with "- 2"
if len(example_tokens) > max_seq_length - 2:
example_tokens = example_tokens[0:(max_seq_length - 2)]
# The convention in BERT for single sequences is:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence.
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
'''
If we need sentence detection logic:
if token in string.punctuation:
# if (tidx < len(example_tokens) - 1) and (example_tokens[tidx + 1] in string.punctuation):
# tokens.append(token)
# else:
# tokens.append("[SEP]")
# else:
'''
tokens = []
segment_ids = []
tokens.append("[CLS]")
segment_ids.append(segment)
for tidx, token in enumerate(example_tokens):
tokens.append(token)
segment_ids.append(segment)
tokens.append("[SEP]")
segment_ids.append(segment)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
while len(input_ids) < max_seq_length:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
return input_ids, input_mask, segment_ids
def reddit_to_bert_Example(text_features, context_features, max_seq_length, tokenizer, use_response_task=True):
"""
Parses the input paper into a tf.Example as expected by Bert
Note: the docs for tensorflow Example are awful ¯\_(ツ)_/¯
"""
features = {}
op_tokens, op_padding_mask, op_segments = \
bert_process_sentence(text_features['op_text'], max_seq_length, tokenizer)
features["op_token_ids"] = _int64_feature(op_tokens)
features["op_token_mask"] = _int64_feature(op_padding_mask)
features["op_segment_ids"] = _int64_feature(op_segments)
if use_response_task:
resp_tokens, resp_padding_mask, resp_segments = \
bert_process_sentence(text_features['resp_text'], max_seq_length, tokenizer, segment=0)
features["resp_token_ids"] = _int64_feature(resp_tokens)
features["resp_token_mask"] = _int64_feature(resp_padding_mask)
features["resp_segment_ids"] = _int64_feature(resp_segments)
# abstract_features["segment_ids"] = create_int_feature(feature.segment_ids) TODO: ommission may cause bugs
# abstract_features["label_ids"] = _int64_feature([feature.label_id])
# non-sequential features
tf_context_features, tf_context_features_types = _dict_of_nonlist_numerical_to_tf_features(context_features)
features = {**tf_context_features, **features}
tf_example = tf.train.Example(features=tf.train.Features(feature=features))
return tf_example
def _int64_feature(value):
"""Wrapper for inserting an int64 Feature into a SequenceExample proto,
e.g, An integer label.
"""
if isinstance(value, list):
return tf.train.Feature(int64_list=tf.train.Int64List(value=value))
else:
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
def _float_feature(value):
"""Wrapper for inserting a float Feature into a SequenceExample proto,
e.g, An integer label.
"""
if isinstance(value, list):
return tf.train.Feature(float_list=tf.train.FloatList(value=value))
else:
return tf.train.Feature(float_list=tf.train.FloatList(value=[value]))
def _bytes_feature(value):
"""Wrapper for inserting a bytes Feature into a SequenceExample proto,
e.g, an image in byte
"""
# return tf.train.Feature(bytes_list=tf.train.BytesList(value=[str(value)]))
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def _dict_of_nonlist_numerical_to_tf_features(my_dict):
"""
Strip out non-numerical features
Returns tf_features_dict: a dictionary suitable for passing to tf.train.example
tf_types_dict: a dictionary of the tf types of previous dict
"""
tf_types_dict = {}
tf_features_dict = {}
for k, v in my_dict.items():
if isinstance(v, int) or isinstance(v, bool):
tf_features_dict[k] = _int64_feature(v)
tf_types_dict[k] = tf.int64
elif isinstance(v, float):
tf_features_dict[k] = _float_feature(v)
tf_types_dict[k] = tf.float32
else:
pass
return tf_features_dict, tf_types_dict
def process_reddit_dataset(data_dir, out_dir, out_file, max_abs_len, tokenizer, subsample, use_latest_reddit):
if not os.path.exists(out_dir):
os.makedirs(out_dir)
if use_latest_reddit:
if data_dir:
reddit_df = rp.load_reddit(path=data_dir, use_latest=use_latest_reddit, convert_columns=True)
else:
reddit_df = rp.load_reddit(use_latest=use_latest_reddit, convert_columns=True)
else:
if data_dir:
reddit_df = rp.load_reddit(path=data_dir, convert_columns=True)
else:
reddit_df = rp.load_reddit(convert_columns=True)
# add persistent record of the index of the data examples
reddit_df['index'] = reddit_df.index
reddit_records = reddit_df.to_dict('records')
random_example_indices = np.arange(len(reddit_records))
np.random.shuffle(random_example_indices)
random_response_mask = np.random.randint(0, 2, len(reddit_records))
with tf.python_io.TFRecordWriter(out_dir + "/" + out_file) as writer:
for idx, row_dict in enumerate(reddit_records):
if subsample and idx >= subsample:
break
if (random_response_mask[idx]) and (random_example_indices[idx] != idx):
random_response = reddit_records[random_example_indices[idx]]['response_text']
text_features, context_features = process_row_record(row_dict, tokenizer,
random_response=random_response)
else:
text_features, context_features = process_row_record(row_dict, tokenizer)
'''
TODO: is this needed?
many_split = rng.randint(0, 100) # useful for easy data splitting later
extra_context = {'id': idx, 'many_split': many_split}
context_features.update(extra_context)
'''
# turn it into a tf.data example
if text_features and context_features:
many_split = rng.randint(0, 100) # useful for easy data splitting later
extra_context = {'many_split': many_split}
context_features.update(extra_context)
row_ex = reddit_to_bert_Example(text_features, context_features,
max_seq_length=max_abs_len,
tokenizer=tokenizer)
writer.write(row_ex.SerializeToString())
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--data-dir', type=str, default=None)
parser.add_argument('--out-dir', type=str, default='../dat/reddit')
parser.add_argument('--out-file', type=str, default='proc.tf_record')
parser.add_argument('--vocab-file', type=str, default='../../bert/pre-trained/uncased_L-12_H-768_A-12/vocab.txt')
parser.add_argument('--max-abs-len', type=int, default=128)
parser.add_argument('--subsample', type=int, default=0)
parser.add_argument('--use-latest-reddit', type=bool, default=True)
args = parser.parse_args()
tokenizer = tokenization.FullTokenizer(
vocab_file=args.vocab_file, do_lower_case=True)
process_reddit_dataset(args.data_dir, args.out_dir, args.out_file,
args.max_abs_len, tokenizer, args.subsample, args.use_latest_reddit)
if __name__ == "__main__":
main()
|
984,278 | e450ed450d3a6649174039100eb3562a8bd46494 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.contrib import admin
from .models import Cuenta, Partida, Periodo, Catalogo, Transaccion
# Register your models here.
class CuentaAdmin(admin.ModelAdmin):
list_display = ('nombre','codigo','naturaleza','debe','haber')
admin.site.register(Cuenta,CuentaAdmin)
class PartidaAdmin(admin.ModelAdmin):
list_display = ('fecha','descripcion')
admin.site.register(Partida,PartidaAdmin)
class PeriodoAdmin(admin.ModelAdmin):
list_display = ('get_partidas',)
admin.site.register(Periodo,PeriodoAdmin)
class CatalogoAdmin(admin.ModelAdmin):
list_display = ('get_cuentas',)
admin.site.register(Catalogo,CatalogoAdmin)
class TransaccionAdmin(admin.ModelAdmin):
list_display = ('partida','descripcion','monto')
admin.site.register(Transaccion,TransaccionAdmin)
admin.site.site_header = 'Sistema Contable' |
984,279 | 7d458ac2b85f31942a2b47ac8cc94ee11be9c238 | # main.py
# Entry point for the game loop.
# The Blender World Object should have a custom property called __main__ that refers to this script. This causes bge to defer the render and logic loops to this script.
import bpy
import os
import bge
import sys
import time
import json
# Make sure api script is found by appending this script's folder to sys.path
dir = os.path.dirname(bpy.data.filepath)
if not dir in sys.path:
sys.path.append(dir )
import api as api
# this re-loads the "api.py" Text every time this script gets loaded. Useful for code changes while running the script from within the blender editor.
# (needs the "normal" import statement above to define the module name)
import importlib
importlib.reload(api)
server = api.Server()
print(sys.argv, flush=True)
# Call each time message queue should be checked (as often as possible)
def update():
global server
lines = input().split("\r\n",)
print("Received Input",flush=True)
print("Received " + str(lines),flush=True)
for line in lines:
if line == "":
break
message = json.loads(line)
latency_in = round(time.time()*1000) - message['time']
#print('Latency in is ' + str(latency_in))
try:
getattr(server, message['method'])(message['params'])
except Exception as e:
print(e,flush=True)
#print("Processin " + str(message['method']) + " " + str((time.time_ns() - start)/1000000) + "\n",flush=True)
# This loop checks for commands from the electron thread.
def LOOP():
time.sleep(0.0001) # Check up to 10 times per ms
update()
while(True):
LOOP()
|
984,280 | 5c1bf58755725b18d8620c8ad08084f2a6ff7461 | import sys
W, P = sys.stdin.readline().split(' ')
W = int(W)
parts = [False] * 101
dists = list(map(int, sys.stdin.readline().split(' ')))
dists.insert(0, 0)
dists.append(W)
for i in range(len(dists) - 1):
for j in range(i + 1, len(dists)):
parts[dists[j] - dists[i]] = True
for i in range(1, len(parts)):
if parts[i]:
print(i)
|
984,281 | e4c6d4d18f4c0f6772768587a478f2153dcb4984 | import dmarc_metrics_exporter.model as m
SAMPLE_XML = """
<?xml version="1.0" encoding="UTF-8" ?>
<feedback>
<report_metadata>
<org_name>google.com</org_name>
<email>noreply-dmarc-support@google.com</email>
<extra_contact_info>https://support.google.com/a/answer/2466580</extra_contact_info>
<report_id>12598866915817748661</report_id>
<date_range>
<begin>1607299200</begin>
<end>1607385599</end>
</date_range>
</report_metadata>
<policy_published>
<domain>mydomain.de</domain>
<adkim>r</adkim>
<aspf>r</aspf>
<p>none</p>
<sp>none</sp>
<pct>100</pct>
</policy_published>
<record>
<row>
<source_ip>dead:beef:1:abc::</source_ip>
<count>1</count>
<policy_evaluated>
<disposition>none</disposition>
<dkim>pass</dkim>
<spf>fail</spf>
</policy_evaluated>
</row>
<identifiers>
<header_from>mydomain.de</header_from>
</identifiers>
<auth_results>
<dkim>
<domain>mydomain.de</domain>
<result>pass</result>
<selector>default</selector>
</dkim>
<spf>
<domain>my-spf-domain.de</domain>
<result>pass</result>
</spf>
</auth_results>
</record>
</feedback>
"""
SAMPLE_DATACLASS = m.Feedback(
report_metadata=m.ReportMetadataType(
org_name="google.com",
email="noreply-dmarc-support@google.com",
extra_contact_info="https://support.google.com/a/answer/2466580",
report_id="12598866915817748661",
date_range=m.DateRangeType(
begin=1607299200,
end=1607385599,
),
),
policy_published=m.PolicyPublishedType(
domain="mydomain.de",
adkim=m.AlignmentType.R,
aspf=m.AlignmentType.R,
p=m.DispositionType.NONE_VALUE,
sp=m.DispositionType.NONE_VALUE,
pct=100,
),
record=[
m.RecordType(
row=m.RowType(
source_ip="dead:beef:1:abc::",
count=1,
policy_evaluated=m.PolicyEvaluatedType(
disposition=m.DispositionType.NONE_VALUE,
dkim=m.DmarcresultType.PASS_VALUE,
spf=m.DmarcresultType.FAIL,
),
),
identifiers=m.IdentifierType(
header_from="mydomain.de",
),
auth_results=m.AuthResultType(
dkim=[
m.DkimauthResultType(
domain="mydomain.de",
result=m.DkimresultType.PASS_VALUE,
selector="default",
)
],
spf=[
m.SpfauthResultType(
domain="my-spf-domain.de", result=m.SpfresultType.PASS_VALUE
)
],
),
)
],
)
|
984,282 | 66a988233c520c12b39f4e97b00f2c4450670778 | """treelstm.py - TreeLSTM RNN models
Written by Riddhiman Dasgupta (https://github.com/dasguptar/treelstm.pytorch)
Rewritten in 2018 by Long-Huei Chen <longhuei@g.ecc.u-tokyo.ac.jp>
To the extent possible under law, the author(s) have dedicated all copyright
and related and neighboring rights to this software to the public domain
worldwide. This software is distributed without any warranty.
You should have received a copy of the CC0 Public Domain Dedication along with
this software. If not, see <http://creativecommons.org/publicdomain/zero/1.0/>.
"""
import torch
import torch.nn as nn
class TreeLSTMBase(nn.Module):
@staticmethod
def extract_tree(parse):
"""
Args:
line: A list of tokens, where each token consists of a word,
optionally followed by u"│"-delimited features.
Returns:
A sequence of words, a sequence of features, and num of features.
"""
if parse is None:
return [], [], -1
parents = parse.cpu().numpy()
trees = dict()
root = None
for i in range(1, len(parents) + 1):
if i - 1 not in trees.keys() and parents[i - 1] != -1:
idx = i
prev = None
while True:
parent = parents[idx - 1]
if parent == -1:
break
tree = Tree()
if prev is not None:
tree.add_child(prev)
trees[idx - 1] = tree
tree.idx = idx - 1
if parent - 1 in trees.keys():
trees[parent - 1].add_child(tree)
break
elif parent == 0:
root = tree
break
else:
prev = tree
idx = parent
return root
class ChildSumTreeLSTM(TreeLSTMBase):
def __init__(self, rnn_type, input_size, hidden_size, bias=True):
super(ChildSumTreeLSTM, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.ioux = nn.Linear(input_size, 3 * self.hidden_size, bias=bias)
self.iouh = nn.Linear(
self.hidden_size, 3 * self.hidden_size, bias=bias)
self.fx = nn.Linear(input_size, self.hidden_size, bias=bias)
self.fh = nn.Linear(self.hidden_size, self.hidden_size, bias=bias)
def forward(self, parses, embeds):
states_c, states_h = zip(*[
self.tree_forward(
ChildSumTreeLSTM.extract_tree(parses[:, j]), embeds[:, j, :])
for j in range(parses.size(1))
])
states_c = torch.cat(states_c, dim=1)
states_h = torch.cat(states_h, dim=1)
return (states_c, states_h)
def tree_forward(self, tree, embed):
for idx in range(tree.num_children):
self.tree_forward(tree.children[idx], embed)
if tree.num_children > 0:
child_c, child_h = zip(*map(lambda x: x.state, tree.children))
child_c = torch.cat(child_c, dim=0)
child_h = torch.cat(child_h, dim=0)
else: # leaf nodes
child_c = embed[0].detach().new_zeros(
1, self.hidden_size).requires_grad_()
child_h = embed[0].detach().new_zeros(
1, self.hidden_size).requires_grad_()
tree.state = self.node_forward(embed[tree.idx], child_c, child_h)
return tree.state
def node_forward(self, embeds, child_c, child_h):
child_h_sum = torch.sum(child_h, dim=0, keepdim=True)
iou = self.ioux(embeds) + self.iouh(child_h_sum)
i, o, u = torch.chunk(iou, 3, dim=1)
i, o, u = torch.sigmoid(i), torch.sigmoid(o), torch.tanh(u)
f = self.fh(child_h) + self.fx(embeds).repeat(len(child_h), 1)
fc = torch.mul(torch.sigmoid(f), child_c)
c = torch.mul(i, u) + torch.sum(fc, dim=0, keepdim=True)
h = torch.mul(o, torch.tanh(c))
return c, h
class BinaryTreeLSTM(TreeLSTMBase):
def __init__(self, rnn_type, hidden_size, bias=False):
super(BinaryTreeLSTM, self).__init__()
self.hidden_size = hidden_size
self.iou0 = nn.Linear(
self.hidden_size, 3 * self.hidden_size, bias=bias)
self.iou1 = nn.Linear(
self.hidden_size, 3 * self.hidden_size, bias=bias)
self.f0 = nn.Linear(self.hidden_size, self.hidden_size, bias=bias)
self.f1 = nn.Linear(self.hidden_size, self.hidden_size, bias=bias)
def forward(self, child_c, child_h):
iou = self.iou0(child_h[0]) + self.iou1(child_h[1])
i, o, u = torch.chunk(iou, 3, dim=2)
i, o, u = torch.sigmoid(i), torch.sigmoid(o), torch.tanh(u)
f = torch.cat((self.f0(child_h[0]), self.f1(child_h[1])), dim=0)
fc = torch.mul(torch.sigmoid(f), torch.cat(child_c, dim=0)).sum(
dim=0, keepdim=True)
c = torch.mul(i, u) + fc
h = torch.mul(o, torch.tanh(c))
return c, h
class Tree():
def __init__(self):
self.parent = None
self.num_children = 0
self.children = list()
self.state = None
self.idx = None
def add_child(self, child):
child.parent = self
self.num_children += 1
self.children.append(child)
def __len__(self):
if getattr(self, '_size'):
return self._size
count = 1
for i in range(self.num_children):
count += self.children[i].size()
self._size = count
return self._size
def depth(self):
if getattr(self, '_depth'):
return self._depth
count = 0
if self.num_children > 0:
for i in range(self.num_children):
child_depth = self.children[i].depth()
if child_depth > count:
count = child_depth
count += 1
self._depth = count
return self._depth
|
984,283 | 987a93823b5d4c812d1b36d1ef30f81ee34ca818 | from .transfer import Transfer
__all__ = ["Transfer"]
|
984,284 | e3a3782a9ffd0e27c2cb6a65ea9a72285f2d555e | # string = 'My name is Azamat. I am a developer'
# # print(string.replace('a', '*'))
# list_ = []
# for i in string:
# if i.lower() == 'a':
# list_.append('*')
# else:
# list_.append(i)
# print(''.join(list_))
name = input()
last_name = input()
age = input()
city = input()
print(f" You are {name} {last_name} {age} You live in {city}") |
984,285 | a0247abe679df952b923d3eae3449df5d8dcd741 | from sklearn.svm import LinearSVC
from matplotlib.backends.backend_pdf import PdfPages
import matplotlib.pyplot as plt
import numpy as np
import wx
import os
import sys
import nltk
import math
import copy
import wx.lib.platebtn as platebutton
import cStringIO
reload(sys)
sys.setdefaultencoding('latin-1')
tr = []
new_autho = []
docss = []
author_list = []
novel_list = [[]]
path = os.path.dirname(os.path.realpath(sys.argv[0]))
class main_window(wx.Frame) :
def __init__(self,parent,id) :
wx.Frame.__init__(self,parent,id,'Stylometry',size=(700,500),style=wx.DEFAULT_FRAME_STYLE^wx.RESIZE_BORDER^wx.MAXIMIZE_BOX)
panel=wx.Panel(self,-1)
font = wx.Font(10, wx.DEFAULT, wx.NORMAL, wx.FONTWEIGHT_NORMAL)
font.SetPointSize(15)
status_bar=self.CreateStatusBar()
menubar=wx.MenuBar()
file_menu=wx.Menu()
tools_menu=wx.Menu()
help_menu=wx.Menu()
quit = wx.MenuItem(file_menu, wx.NewId(), '&Quit\tCtrl+Q')
exit_img=wx.Bitmap('icons/exit_ico.png')
quit.SetBitmap(exit_img)
file_menu.AppendItem(quit)
self.Bind(wx.EVT_MENU, self.close_window, id=quit.GetId())
training = wx.MenuItem(help_menu, wx.NewId(), '&Training')
tools_menu.AppendItem(training)
testing=wx.MenuItem(help_menu, wx.NewId(), '&Testing')
tools_menu.AppendItem(testing)
self.Bind(wx.EVT_MENU, self.show_testing_window, id=testing.GetId())
self.Bind(wx.EVT_MENU, self.show_training_window, id=training.GetId())
imageFile = "icons/Investigation.jpg"
data = open(imageFile, "rb").read()
# convert to a data stream
stream = cStringIO.StringIO(data)
# convert to a bitmap
bmp = wx.BitmapFromImage( wx.ImageFromStream( stream ))
# show the bitmap, (5, 5) are upper left corner coordinates
wx.StaticBitmap(self, -1, bmp, (0, 0))
# alternate (simpler) way to load and display a jpg image from a file
# actually you can load .jpg .png .bmp or .gif files
jpg1 = wx.Image(imageFile, wx.BITMAP_TYPE_ANY).ConvertToBitmap()
# bitmap upper left corner is in the position tuple (x, y) = (5, 5)
wx.StaticBitmap(self, -1, jpg1, (10 + jpg1.GetWidth(), 5), (600,600))
#tools_menu.AppendMenu(wx.NewId(),'Testing',testing_menu)
help_topics = wx.MenuItem(help_menu, wx.NewId(), '&Help Topics')
help_topics.SetBitmap(wx.Bitmap('icons/help_ico.jpg'))
help_menu.AppendItem(help_topics)
about = wx.MenuItem(help_menu, wx.NewId(), '&About')
help_menu.AppendItem(about)
self.Bind(wx.EVT_MENU, self.show_about_window, id=about.GetId())
menubar.Append(file_menu,"File")
menubar.Append(tools_menu,"Tools")
menubar.Append(help_menu,"Help")
self.SetMenuBar(menubar)
font = wx.Font(10, wx.DEFAULT, wx.NORMAL, wx.BOLD)
font.SetPointSize(30)
appname=wx.StaticText(panel,-1,"Authorship Predictor",(10,30),(460,-1),wx.ALIGN_CENTER)
appname.SetFont(font)
appname.SetForegroundColour(wx.Colour(250,100,150))
self.Centre()
def close_window(self,event) :
self.Close()
def show_about_window(self,event) :
about_frame=about_window(parent=None,id=0)
about_frame.Show()
def show_training_window(self,event) :
training_frame=training_window(parent=None,id=1)
training_frame.Show()
def show_testing_window(self,event) :
testing_frame=testing_window(parent=None,id=1)
testing_frame.Show()
class training_window(wx.Frame) :
global author_list
global novel_list
global docss
def __init__(self,parent,id) :
self.author_list=[]
self.novel_list=[]
self.numberOfAuthors=0
self.authors=[]
wx.Frame.__init__(self,parent,id,'TRAINING',size=(600,600),style=wx.DEFAULT_FRAME_STYLE^wx.RESIZE_BORDER^wx.MAXIMIZE_BOX)
self.panel=wx.Panel(self)
self.panel.SetBackgroundColour(wx.Colour(255,255,255))
font1 = wx.Font(10, wx.DEFAULT, wx.NORMAL,wx.BOLD)
font1.SetPointSize(12)
self.authorNameText=wx.StaticText(self.panel,-1,"AUTHOR NAME ",pos=(50,50),size=(40,50))
self.authorNameText.SetForegroundColour(wx.Colour(40,49,153))
self.authorNameText.SetFont(font1)
self.authorNameChoices=wx.Choice(self.panel,-1,pos=(185,50),size=(190,35),choices=self.author_list)
self.authorNameChoices.SetSelection(0)
self.novelNameText=wx.StaticText(self.panel,-1,"FILE NAME ",pos=(50,100),size=(40,50))
self.novelNameText.SetFont(font1)
self.novelNameText.SetForegroundColour(wx.Colour(40,49,153))
self.novelNameChoices=wx.Choice(self.panel,-1,pos=(185,100),size=(300,45))
self.novelNameChoices.SetSelection(0)
add_author_button=wx.Button(self.panel,label="+ AUTHOR",pos=(380,50),size=(100,30))
add_author_button.SetBackgroundColour(wx.Colour(150,177,250))
add_author_button.SetForegroundColour(wx.Colour(40,49,153))
self.Bind(wx.EVT_BUTTON, self.show_add_author, add_author_button)
self.novelPrev=wx.TextCtrl(self.panel,-1,"",pos=(50,150),size=(500,300),style=wx.TE_MULTILINE)
self.novelPrev.SetInsertionPoint(0)
self.Bind(wx.EVT_CHOICE, self.set_new_author_novel_preview, self.authorNameChoices)
self.Bind(wx.EVT_CHOICE, self.set_new_novel_preview, self.novelNameChoices)
extract_features_button=wx.Button(self.panel,label="EXTRACT FEATURES",pos=(210,470),size=(165,30))
extract_features_button.SetBackgroundColour(wx.Colour(150,177,250))
extract_features_button.SetForegroundColour(wx.Colour(40,49,153))
self.Bind(wx.EVT_BUTTON, self.start_extract_features_dialog, extract_features_button)
start_training_button=wx.Button(self.panel,label="START TRAINING",pos=(390,470),size=(160,30))
start_training_button.SetBackgroundColour(wx.Colour(150,177,250))
start_training_button.SetForegroundColour(wx.Colour(40,49,153))
self.Bind(wx.EVT_BUTTON, self.start_training, start_training_button)
def set_new_author_novel_preview(self,event) :
self.novelNameChoices.SetItems(self.novel_list[self.authorNameChoices.GetSelection()])
self.novelNameChoices.SetSelection(0)
file1 = self.authors[self.authorNameChoices.GetSelection()][0]+"/"+self.authors[self.authorNameChoices.GetSelection()][1+self.novelNameChoices.GetSelection()]
#print file1
text1 = open(file1,"r").read()
#print text1
self.novelPrev.SetValue(text1)
self.Refresh()
def show_features_window(self) :
global author_list
global novel_list
global docss
try :
tmp=self.show_features_frame.GetSize()
except :
self.docs = []
for auth in self.authors :
for doc in auth[1:-1] :
#print doc
self.docs.append(features(doc,auth[-1],auth[0]))
author_list = self.author_list
novel_list = self.novel_list
docss = self.docs
self.show_features_frame=self.features_window(parent=None,id=1)
self.show_features_frame.Show()
#self.show_features_frame.Bind(wx.EVT_CLOSE, self.add_new_author,self.new_author_frame)
def set_new_novel_preview(self,event) :
file1 = self.authors[self.authorNameChoices.GetSelection()][0]+"/"+self.authors[self.authorNameChoices.GetSelection()][1+self.novelNameChoices.GetSelection()]
#print file1
text1 = open(file1,"r").read()
#print text1
self.novelPrev.SetValue(text1)
self.Refresh()
def close_all(self,event) :
try :
self.new_author_frame.Destroy()
self.Destroy()
except :
self.Destroy()
def show_add_author(self,event) :
try :
tmp=self.new_author_frame.GetSize()
except :
self.new_author_frame=self.select_new_author_window(parent=None,id=1)
self.new_author_frame.Show()
self.new_author_frame.Bind(wx.EVT_CLOSE, self.add_new_author,self.new_author_frame)
def add_new_author(self,event) :
try :
global new_autho
if len(new_autho)>=3 and len(new_autho[-1])>0 :
self.numberOfAuthors+=1
self.authors.append(new_autho)
#print new_autho[0::-1]
self.novel_list.append(new_autho[1:-1])
self.author_list.append(new_autho[-1])
self.authorNameChoices.SetItems(self.author_list)
self.authorNameChoices.SetSelection(0)
#print self.novel_list
self.novelNameChoices.SetItems(self.novel_list[self.authorNameChoices.GetSelection()])
self.novelNameChoices.SetSelection(0)
file1 = self.authors[self.authorNameChoices.GetSelection()][0]+"/"+self.authors[self.authorNameChoices.GetSelection()][1+self.novelNameChoices.GetSelection()]
#print file1
text1 = open(file1,"r").read()
#print text1
self.novelPrev.SetValue(text1)
self.Refresh()
self.new_author_frame.Destroy()
self.numberAuthors.SetLabel("Number Of Authors Selected : "+str(self.numberOfAuthors))
except :
self.new_author_frame.Destroy()
def start_training(self,event) :
try :
#print path+'generated_files'
tmp = os.listdir(path+'/generated_files')[0]
box=wx.MessageDialog(None,"Start Training..!!???",'Alert',wx.YES_NO)
answer=box.ShowModal()
box.Destroy()
if answer==wx.ID_YES :
#print "Training Started"
## Place to call the start trainin Function!!!!!!!
global tr
tr = TrainingTesting()
tr.train()
box=wx.MessageDialog(None,"Training Completed..!!",'Alert',wx.OK)
answer=box.ShowModal()
box.Destroy()
except :
box=wx.MessageDialog(None,"Please extract features first.",'Alert',wx.OK)
answer=box.ShowModal()
box.Destroy()
def start_extract_features_dialog(self,event) :
#self.show_features_window()
if self.numberOfAuthors==0 :
box=wx.MessageDialog(None,"Please input atleast one author details..!!!",'Alert',wx.OK)
answer=box.ShowModal()
box.Destroy()
else :
box=wx.MessageDialog(None,"Extract Features..!!???",'Alert',wx.YES_NO)
answer=box.ShowModal()
box.Destroy()
#print "haiiii"
if answer==wx.ID_YES :
#pass
#print "Feature extraction Started with data!!!!","\n",self.authors
## Place to call the feature extraction Function!!!!!!!
#box=wx.MessageDialog(None,"Feature extraction Started!!!",'Alert',wx.OK)
#answer=box.ShowModal()
#box.Destroy()
self.show_features_window()
class select_new_author_window(wx.Frame) :
new_author=[]
author_name=""
def __init__(self,parent,id) :
self.new_author=[]
wx.Frame.__init__(self,parent,id,"+ AUTHOR",size=(500,200),style=wx.DEFAULT_FRAME_STYLE^wx.RESIZE_BORDER^wx.MAXIMIZE_BOX)
panel=wx.Panel(self)
panel.SetBackgroundColour(wx.Colour(255,255,250))
font = wx.Font(10, wx.DEFAULT, wx.NORMAL, wx.BOLD)
font.SetPointSize(12)
font1 = wx.Font(10, wx.DEFAULT, wx.NORMAL,wx.BOLD)
font1.SetPointSize(12)
self.authorText=wx.StaticText(panel,-1,"AUTHOR NAME",pos=(8,30),size=(130,25))
self.authorText.SetForegroundColour(wx.Colour(40,49,153))
self.authorText.SetFont(font1)
self.nameText=wx.TextCtrl(panel,-1,"",pos=(150,30),size=(300,-1))
self.nameText.SetInsertionPoint(0)
self.novelTextLabel=wx.StaticText(panel,-1,"FILE NAME",pos=(8,70),size=(130,25))
self.novelTextLabel.SetForegroundColour(wx.Colour(40,49,153))
self.novelTextLabel.SetFont(font1)
self.novelText=wx.TextCtrl(panel,-1,"",pos=(150,70),size=(300,-1))#,style=wx.TE_READONLY)
self.novelText.Bind(wx.EVT_LEFT_DOWN, self.show_select_novels)
self.novelText.SetInsertionPoint(0)
select_novels_button=wx.Button(panel,label="SELECT FILES",pos=(150,120),size=(140,25))
select_novels_button.SetBackgroundColour(wx.Colour(150,177,250))
select_novels_button.SetForegroundColour(wx.Colour(40,49,153))
self.Bind(wx.EVT_BUTTON, self.show_select_novels, select_novels_button)
submit_novels_button=wx.Button(panel,label="SUBMIT",pos=(300,120),size=(90,25))
submit_novels_button.SetBackgroundColour(wx.Colour(150,177,250))
submit_novels_button.SetForegroundColour(wx.Colour(40,49,153))
self.Bind(wx.EVT_BUTTON, self.return_new_author, submit_novels_button)
def show_select_novels(self,event) :
wcd = 'Text Files (*.txt)|*.txt'
open_dlg = wx.FileDialog(self, message='Choose Novels', defaultDir=os.getcwd(), defaultFile='',wildcard=wcd, style=wx.FD_OPEN|wx.FD_CHANGE_DIR|wx.FD_MULTIPLE)
ans=open_dlg.ShowModal()
self.new_author=[]
novels=""
novels+=open_dlg.GetDirectory()
self.new_author.append(open_dlg.GetDirectory())
for i in range(len(open_dlg.GetFilenames())) :
novels+=open_dlg.GetFilenames()[i]
novels+=","
self.new_author.append(open_dlg.GetFilenames()[i])
self.novelText.SetValue(novels)
def return_new_author(self,event) :
if self.nameText.GetValue()=="":
box=wx.MessageDialog(None,"PLEASE ENTER THE NAME OF THE AUTHOR.",'SELECT NOVELS',wx.OK | wx.ICON_WARNING)
answer=box.ShowModal()
box.Destroy()
elif self.novelText.GetValue()=="":
box=wx.MessageDialog(None,"PLEASE SELECT A NOVEL.",'SELECT NOVELS',wx.OK | wx.ICON_WARNING)
answer=box.ShowModal()
box.Destroy()
else:
self.new_author.append(self.nameText.GetValue())
global new_autho
new_autho=self.new_author
self.Close()
class features_window(wx.Frame) :
global docss
global author_list
global novel_list
def __init__(self,parent,id) :
global author_list
global novel_list
self.author_list = copy.copy(author_list)
self.novel_list = copy.copy(novel_list)
self.features_list = []
for i in self.novel_list :
a = []
for j in i :
a.append(0)
self.features_list.append(a)
#print self.author_list
#print self.novel_list
global docss
self.docs = docss
for doc in self.docs :
doc.extract_features()
doc.create_csv_file()
i = self.author_list.index(doc.authorname)
j = self.novel_list[i].index(doc.docname)
self.features_list[i][j] = doc.full_features
wx.Frame.__init__(self,parent,id,'FEATURE EXTRACTION',size=(600,450),style=wx.DEFAULT_FRAME_STYLE^wx.RESIZE_BORDER^wx.MAXIMIZE_BOX)
self.panel=wx.Panel(self)
self.panel.SetBackgroundColour(wx.Colour(255,255,255))
font1 = wx.Font(10, wx.DEFAULT, wx.NORMAL,wx.FONTWEIGHT_NORMAL)
font1.SetPointSize(12)
self.authorNameText=wx.StaticText(self.panel,-1,"AUTHOR NAME ",pos=(20,30),size=(30,50))
self.authorNameText.SetFont(font1)
self.authorNameChoices=wx.Choice(self.panel,-1,pos=(155,30),size=(290,30),choices=self.author_list)
self.authorNameChoices.SetSelection(0)
self.novelNameText=wx.StaticText(self.panel,-1,"FILE NAME ",pos=(20,80),size=(30,50))
self.novelNameText.SetFont(font1)
self.novelNameChoices=wx.Choice(self.panel,-1,pos=(155,80),size=(290,30),choices=self.novel_list[self.authorNameChoices.GetSelection()])
self.novelNameChoices.SetSelection(0)
self.novelPrev=wx.TextCtrl(self.panel,-1,self.features_list[0][0],pos=(50,130),size=(500,200),style=wx.TE_MULTILINE)
self.novelPrev.SetInsertionPoint(0)
self.Bind(wx.EVT_CHOICE, self.set_new_author_features_preview, self.authorNameChoices)
self.Bind(wx.EVT_CHOICE, self.set_new_novel_features_preview, self.novelNameChoices)
start_training_button=wx.Button(self.panel,label="START TRAINING",pos=(300,370),size=(200,40))
self.Bind(wx.EVT_BUTTON, self.start_training, start_training_button)
start_training_button.SetFont(font1)
save_features_button=wx.Button(self.panel,label="SAVE FEATURES",pos=(70,370),size=(190,40))
save_features_button.SetFont(font1)
self.Bind(wx.EVT_BUTTON, self.save_features_as_a_file, save_features_button)
font = wx.Font(10, wx.DEFAULT, wx.NORMAL, wx.BOLD)
font.SetPointSize(15)
#self.numberAuthors.SetFont(font)
#self.Bind(wx.EVT_CLOSE, self.close_all)
#print self.authors
def save_features_as_a_file(self,event) :
global path
def __init__(self,parent,id) :
global author_list
global novel_list
self.author_list = copy.copy(author_list)
self.novel_list = copy.copy(novel_list)
self.features_list = []
for i in self.novel_list :
a = []
for j in i :
a.append(0)
self.features_list.append(a)
#print self.author_list
#print self.novel_list
global docss
self.docs = docss
for doc in self.docs :
doc.extract_features()
doc.create_csv_file()
i = self.author_list.index(doc.authorname)
j = self.novel_list[i].index(doc.docname)
self.features_list[i][j] = doc.full_features
os.mkdir(path+"/Features")
for doc in self.docs :
#print doc.full_features
#print os.path.dirname(os.path.abspath(os.__file__))
#print os.path.dirname(os.path.realpath(os.__file__))
#print os.path.dirname(os.path.realpath(sys.argv[0]))
#print os.getcwd()
try :
os.mkdir(path+"/Features/"+doc.authorname)
except :
pass
#print path+"/output/"+doc.authorname+"/"+doc.docname
file1 = open(path+"/Features/"+doc.authorname+"/"+doc.docname,"w")
file1.write(doc.full_features)
file1.close()
box=wx.MessageDialog(None,"Features saved in a folder named Features.",'Alert',wx.OK)
answer=box.ShowModal()
box.Destroy()
def start_training(self,event) :
try :
tmp = os.listdir(path+'/generated_files')[0]
box=wx.MessageDialog(None,"Start Training..!!???",'Alert',wx.YES_NO)
answer=box.ShowModal()
box.Destroy()
if answer==wx.ID_YES :
#print "Training Started"
## Place to call the start trainin Function!!!!!!!
global tr
tr = TrainingTesting()
tr.train()
box=wx.MessageDialog(None,"Training Completed..!!",'Alert',wx.OK)
answer=box.ShowModal()
box.Destroy()
except :
box=wx.MessageDialog(None,"Please extract features first.",'Alert',wx.OK)
answer=box.ShowModal()
box.Destroy()
def set_new_author_features_preview(self,event) :
self.novelNameChoices.SetItems(self.novel_list[self.authorNameChoices.GetSelection()])
self.novelNameChoices.SetSelection(0)
self.novelPrev.SetValue(self.features_list[self.authorNameChoices.GetSelection()][0])
self.Refresh()
def set_new_novel_features_preview(self,event) :
self.novelPrev.SetValue(self.features_list[self.authorNameChoices.GetSelection()][self.novelNameChoices.GetSelection()])
self.Refresh()
def close_all(self,event) :
try :
self.Destroy()
except :
pass
class feature_analysis_window(wx.Frame) :
def __init__(self,parent,id) :
self.author_list = []
self.feature_name_list = []
self.draw_graph = DrawGraph()
docs = os.listdir(path+"/generated_files/")
for doc in docs :
self.author_list.append(doc[:-4])
feats = open(path+"/generated_files/"+docs[0],"r").read().split("\n")[0].split(",")
self.feature_name_list = feats[1:]
self.feature_list = []
for doc in docs :
t = []
feats = open(path+"/generated_files/"+doc,"r").read().split("\n")
for feat in feats[1:-1] :
tt = []
feat = feat.split(",")
for f in feat [1:-1] :
tt.append(float(f))
t.append(tt)
self.feature_list.append(t)
#print self.feature_list[0]
wx.Frame.__init__(self,parent,id,'Features Analysis',size=(600,600),style=wx.DEFAULT_FRAME_STYLE^wx.RESIZE_BORDER^wx.MAXIMIZE_BOX)
self.panel=wx.Panel(self)
self.panel.SetBackgroundColour(wx.Colour(220,220,250))
font1 = wx.Font(10, wx.DEFAULT, wx.NORMAL,wx.FONTWEIGHT_NORMAL)
font1.SetPointSize(10)
font2 = wx.Font(10, wx.DEFAULT, wx.NORMAL,wx.FONTWEIGHT_NORMAL)
font2.SetPointSize(12)
self.Bind(wx.EVT_CLOSE, self.close_all)
self.type1=wx.RadioButton(self.panel, -1, 'Single Author VS documents feature analysis',pos=(120,20), style=wx.RB_GROUP)
self.type1.SetFont(font1)
self.type2=wx.RadioButton(self.panel, -1, 'Multiple authors feature analysis',pos=(120,55))
self.type2.SetFont(font1)
self.Bind(wx.EVT_RADIOBUTTON, self.draw_new_graph, self.type2)
self.Bind(wx.EVT_RADIOBUTTON, self.draw_new_graph, self.type1)
self.authorNameText=wx.StaticText(self.panel,-1,"Author Name\t : ",pos=(50,105))
self.authorNameText.SetFont(font2)
self.authorNameChoices=wx.Choice(self.panel,-1,pos=(185,105),size=(290,30),choices=self.author_list)
self.authorNameChoices.SetSelection(0)
self.featureNameText=wx.StaticText(self.panel,-1,"Feature Name\t : ",pos=(50,145))
self.featureNameText.SetFont(font2)
self.featureNameChoices=wx.Choice(self.panel,-1,pos=(185,145),size=(290,30),choices=self.feature_name_list)
self.featureNameChoices.SetSelection(0)
self.Bind(wx.EVT_CHOICE, self.draw_new_graph, self.authorNameChoices)
self.Bind(wx.EVT_CHOICE, self.draw_new_graph, self.featureNameChoices)
tt = self.feature_list[self.authorNameChoices.GetSelection()]
tt = np.array(tt)
y_data = tt.T[self.featureNameChoices.GetSelection()]
x_data = []
t = 1
for i in y_data :
x_data.append(t)
t+=1
self.draw_graph.draw_single_graph(x_data,y_data,'Books','feature_value',self.feature_name_list[self.featureNameChoices.GetSelection()])
png = wx.Image(path+"/temp_img.png",wx.BITMAP_TYPE_ANY)
png = png.Scale(400,300,wx.IMAGE_QUALITY_HIGH)
png = png.ConvertToBitmap()
self.graph_img = wx.StaticBitmap(self.panel,-1,png,(100,200),(png.GetWidth(),png.GetHeight()))
self.graph_img.Bind(wx.EVT_LEFT_DOWN, self.show_graph_photo_viewer)
save_features_button = wx.Button(self.panel,label="Save Graphs as PDF",pos=(180,530),size=(250,40))
save_features_button.SetFont(font2)
self.Bind(wx.EVT_BUTTON, self.save_graph_as_a_file, save_features_button)
def save_graph_as_a_file(self,event) :
graph_data = []
if self.type1.GetValue() :
graph_data.append(True)
graph_data.append(path+"/graph_analysis1")
tmp = []
temp2 = 0
for tt in self.feature_list :
tt = np.array(tt)
temp1 = 0
for t in tt.T :
y_data = t
x_data = []
ttt = 1
for i in y_data :
x_data.append(ttt)
ttt+=1
#print self.feature_name_list[temp]
tmp.append([x_data,y_data, 'Books of '+self.author_list[temp2],'features value',self.author_list[temp2]+"'s "+self.feature_name_list[temp1]])
temp1+=1
temp2+=1
graph_data.append(tmp)
#print graph_data[2][0]
self.draw_graph.save_set_of_graphs(graph_data)
#os.system("gnome-open graph_analysis1.pdf")
box=wx.MessageDialog(None,"Graphs saved to file named 'graph_analysis1.pdf'..! Open it now.?",'Alert',wx.YES_NO)
answer=box.ShowModal()
box.Destroy()
if answer==wx.ID_YES :
os.system("gnome-open "+path+"/graph_analysis1.pdf")
elif self.type2.GetValue() :
graph_data.append(True)
graph_data.append(path+"/graph_analysis2")
tmp = []
for i in range(len(self.feature_name_list)) :
ttt = 1
y_data = []
x_data = []
for t in self.feature_list :
t = np.array(t)
y_data.append(float(sum(t.T[i]))/float(len(t.T[i])))
x_data.append(ttt)
ttt+=1
tmp.append([x_data,y_data,'Authors','feature value',self.feature_name_list[i] + " of authors"])
graph_data.append(tmp)
#print graph_data[2][0]
self.draw_graph.save_set_of_graphs(graph_data)
#os.system("gnome-open graph_analysis2.pdf")
box=wx.MessageDialog(None,"Graphs saved to file named 'graph_analysis2.pdf'..! Open it now.?",'Alert',wx.YES_NO)
answer=box.ShowModal()
box.Destroy()
if answer==wx.ID_YES :
os.system("gnome-open "+path+"/graph_analysis2.pdf")
def show_graph_photo_viewer(self,event) :
os.system("gnome-open "+path+"/temp_img.png")
def draw_new_graph(self,event) :
#self.draw_graph()
if self.type1.GetValue() :
self.enable_author_choices()
tt = self.feature_list[self.authorNameChoices.GetSelection()]
tt = np.array(tt)
y_data = tt.T[self.featureNameChoices.GetSelection()]
x_data = []
t = 1
for i in y_data :
x_data.append(t)
t+=1
self.draw_graph.draw_single_graph(x_data,y_data,'Books','feature_value',self.feature_name_list[self.featureNameChoices.GetSelection()])
png = wx.Image(path+"/temp_img.png",wx.BITMAP_TYPE_ANY)
png = png.Scale(400,300,wx.IMAGE_QUALITY_HIGH)
png = png.ConvertToBitmap()
self.graph_img.SetBitmap(png)
elif self.type2.GetValue() :
self.disable_author_choices()
tt = self.feature_list
tt = np.array(tt)
y_data = []
x_data = []
ttt = 1
for t in tt :
t = np.array(t)
y_data.append(float(sum(t.T[self.featureNameChoices.GetSelection()]))/float(len(t.T[self.featureNameChoices.GetSelection()])))
x_data.append(ttt)
ttt += 1
#print x_data
self.draw_graph.draw_single_graph(x_data,y_data,'Authors','feature_value',self.feature_name_list[self.featureNameChoices.GetSelection()])
png = wx.Image(path+"/temp_img.png",wx.BITMAP_TYPE_ANY)
png = png.Scale(400,300,wx.IMAGE_QUALITY_HIGH)
png = png.ConvertToBitmap()
self.graph_img.SetBitmap(png)
def disable_author_choices(self) :
self.authorNameChoices.Disable()
def enable_author_choices(self) :
self.authorNameChoices.Enable()
def close_all(self,event) :
try :
self.Destroy()
except :
pass
class testing_window(wx.Frame) :
def __init__(self,parent,id) :
self.author_list = []
try :
a = os.listdir(path+"/generated_files/")
for b in a :
self.author_list.append(b[:-4])
except :
self.author_list.append(' ')
self.author_list.append(' ')
self.testing_novel=[]
self.novel1=[]
wx.Frame.__init__(self,parent,id,'TESTING',size=(480,150),style=wx.DEFAULT_FRAME_STYLE^wx.RESIZE_BORDER^wx.MAXIMIZE_BOX)
panel = wx.Panel(self)
panel.SetBackgroundColour(wx.Colour(255,255,255))
font1 = wx.Font(10, wx.DEFAULT, wx.NORMAL,wx.BOLD)
font1.SetPointSize(12)
self.authorText=wx.StaticText(panel,-1,"TEST FILE",pos=(20,20),size=(80,25))
self.authorText.SetForegroundColour(wx.Colour(40,49,153))
self.authorText.SetFont(font1)
select_novels_button=wx.Button(panel,label="SELECT TEST FILE",pos=(120,60),size=(130,25))
select_novels_button.SetBackgroundColour(wx.Colour(150,177,250))
select_novels_button.SetForegroundColour(wx.Colour(40,49,153))
self.Bind(wx.EVT_BUTTON, self.show_select_novel, select_novels_button)
self.novelText=wx.TextCtrl(panel,-1,"",pos=(120,20),size=(300,-1))
self.novelText.Bind(wx.EVT_LEFT_DOWN, self.show_select_novel)
self.novelText.SetInsertionPoint(0)
start_test_button=wx.Button(panel,label="START TESTING",pos=(260,60),size=(130,25))
start_test_button.SetBackgroundColour(wx.Colour(150,177,250))
start_test_button.SetForegroundColour(wx.Colour(40,49,153))
self.Bind(wx.EVT_BUTTON, self.start_test_dialog, start_test_button)
def start_test_dialog(self,event) :
if len(self.novel1) <2 :
box=wx.MessageDialog(None,"PLEASE SELECT 1 FILE",'START TESTING',wx.OK)
answer=box.ShowModal()
box.Destroy()
else :
box=wx.MessageDialog(None,"DO YOU WANT TO START TESTING?",'START TESTING',wx.YES_NO)
answer=box.ShowModal()
box.Destroy()
if answer==wx.ID_YES :
self.testing_novel=[]
self.testing_novel.append(self.novel1)
try :
if self.testing_novel[0] :
self.start_all_testing()
except :
box=wx.MessageDialog(None,"PLEASE TRAIN THE SYSTEM FIRST",'START TESTING',wx.OK)
answer=box.ShowModal()
box.Destroy()
def start_binary_testing(self) :
doc = features(self.testing_novel[1][1],'unknown',self.testing_novel[1][0])
doc.extract_features()
#print doc.number_comas
#pass
self.test_data = []
self.test_data.append(float(sum(doc.number_comas))/float(len(doc.number_comas)))
self.test_data.append(float(sum(doc.number_semicolans))/float(len(doc.number_semicolans)))
self.test_data.append(float(sum(doc.number_quotations))/float(len(doc.number_quotations)))
self.test_data.append(float(sum(doc.number_exclamations))/float(len(doc.number_exclamations)))
self.test_data.append(float(sum(doc.number_hyphens))/float(len(doc.number_hyphens)))
self.test_data.append(float(sum(doc.number_ands))/float(len(doc.number_ands)))
self.test_data.append(float(sum(doc.number_buts))/float(len(doc.number_buts)))
self.test_data.append(float(sum(doc.number_howevers))/float(len(doc.number_howevers)))
self.test_data.append(float(sum(doc.number_ifs))/float(len(doc.number_ifs)))
self.test_data.append(float(sum(doc.number_thats))/float(len(doc.number_thats)))
self.test_data.append(float(sum(doc.number_mores))/float(len(doc.number_mores)))
self.test_data.append(float(sum(doc.number_musts))/float(len(doc.number_musts)))
self.test_data.append(float(sum(doc.number_mights))/float(len(doc.number_mights)))
self.test_data.append(float(sum(doc.number_thiss))/float(len(doc.number_thiss)))
self.test_data.append(float(sum(doc.number_verys))/float(len(doc.number_verys)))
self.test_data.append(doc.mean_word_length)
self.test_data.append(doc.mean_sentence_length)
self.test_data.append(doc.standard_deviation_sentence)
docs = []
docs.append(self.author_list[self.author1Choices.GetSelection()]+".csv")
docs.append(self.author_list[self.author2Choices.GetSelection()]+".csv")
y = []
noa = 0
author_names = []
train_data = []
author_files = os.listdir(path+"/generated_files")
#print author_names
for author in docs :
author_names.append(author[:-4])
text1 = open(path+"/generated_files/"+author,"r").read().split("\n")
#print text1[1:-1]
for txt in text1[1:-1] :
t = []
y.append(noa)
#t.append(self.noa)
for i in txt.split(",")[1:-1] :
t.append(float(i))
train_data.append(t)
noa += 1
clfr1 = LinearSVC()
clfr1.fit(train_data,y)
auth_name = author_names[clfr1.predict(np.array(self.test_data).reshape(1,-1))[0]]
box=wx.MessageDialog(None,"Author of the document is '"+auth_name+"'.",'message',wx.OK)
answer=box.ShowModal()
box.Destroy()
def start_all_testing(self) :
doc = features(self.testing_novel[0][1],'unknown',self.testing_novel[0][0])
doc.extract_features()
#print doc.number_comas
#pass
self.test_data = []
self.test_data.append(float(sum(doc.number_comas))/float(len(doc.number_comas)))
self.test_data.append(float(sum(doc.number_semicolans))/float(len(doc.number_semicolans)))
self.test_data.append(float(sum(doc.number_quotations))/float(len(doc.number_quotations)))
self.test_data.append(float(sum(doc.number_exclamations))/float(len(doc.number_exclamations)))
self.test_data.append(float(sum(doc.number_hyphens))/float(len(doc.number_hyphens)))
self.test_data.append(float(sum(doc.number_ands))/float(len(doc.number_ands)))
self.test_data.append(float(sum(doc.number_buts))/float(len(doc.number_buts)))
self.test_data.append(float(sum(doc.number_howevers))/float(len(doc.number_howevers)))
self.test_data.append(float(sum(doc.number_ifs))/float(len(doc.number_ifs)))
self.test_data.append(float(sum(doc.number_thats))/float(len(doc.number_thats)))
self.test_data.append(float(sum(doc.number_mores))/float(len(doc.number_mores)))
self.test_data.append(float(sum(doc.number_musts))/float(len(doc.number_musts)))
self.test_data.append(float(sum(doc.number_mights))/float(len(doc.number_mights)))
self.test_data.append(float(sum(doc.number_thiss))/float(len(doc.number_thiss)))
self.test_data.append(float(sum(doc.number_verys))/float(len(doc.number_verys)))
self.test_data.append(doc.mean_word_length)
self.test_data.append(doc.mean_sentence_length)
self.test_data.append(doc.standard_deviation_sentence)
tr.test(self.test_data)
box=wx.MessageDialog(None,"Author of the document is '"+tr.correct_author_name+"'.",'message',wx.OK)
answer=box.ShowModal()
box.Destroy()
def disable_choices(self,event) :
self.author1Choices.Disable()
self.author2Choices.Disable()
def enable_choices(self,event) :
self.author1Choices.Enable()
self.author2Choices.Enable()
def show_select_novel(self,event) :
wcd = 'Text Files (*.txt)|*.txt'
open_dlg = wx.FileDialog(self, message='Choose Novels', defaultDir=os.getcwd(), defaultFile='',wildcard=wcd, style=wx.FD_OPEN|wx.FD_CHANGE_DIR|wx.FD_MULTIPLE)
ans=open_dlg.ShowModal()
if open_dlg.GetFilename()!="" :
self.novel1=[]
novels=""
novels+=open_dlg.GetDirectory()
novels+=open_dlg.GetFilename()
self.novel1.append(open_dlg.GetDirectory())
self.novel1.append(open_dlg.GetFilename())
self.novelText.SetValue(novels)
class features() :
def __init__(self,docnamee,authornamee,pathh) :
self.docname = docnamee
self.authorname = authornamee
self.path = pathh
self.file1 = open(self.path+"/"+self.docname,"r")
self.data = self.file1.read().replace("\n"," ").lower()
self.tokenized_data = nltk.tokenize.word_tokenize(self.data)
def print_content(self) :
#print self.tokenized_data
#print self.data
pass
def create_csv_file(self) :
global path
try :
os.mkdir(path+"/generated_files")
except :
pass
try :
file1 = open(path+"/generated_files/"+self.authorname+".csv","r")
except :
file1 = open(path+"/generated_files/"+self.authorname+".csv","a+")
#file1.write(self.authorname+"\n")
a = ","
a += "Average Number of comas per thousand tokens,"
a += "Average Number of semicolons per thousand tokens,"
a += "Average Number of quotation marks per thousand tokens,"
a += "Average Number of exclamation marks per thousand tokens,"
a += "Average Number of hyphens per thousand tokens,"
a += "Average Number of ands per thousand tokens,"
a += "Average Number of buts per thousand tokens,"
a += "Average Number of howevers per thousand tokens,"
a += "Average Number of ifs per thousand tokens,"
a += "Average Number of thats per thousand tokens,"
a += "Average Number of mores per thousand tokens,"
a += "Average Number of musts per thousand tokens,"
a += "Average Number of mights per thousand tokens,"
a += "Average Number of thiss per thousand tokens,"
a += "Average Number of verys per thousand tokens,"
a += "Mean Word Length,"
a += "Mean Sentence Length,"
a += "Standard deviation of Sentence Length\n"
file1.write(a)
file1.close()
file1 = open(path+"/generated_files/"+self.authorname+".csv","a+")
#file1.write(self.authorname)
#file1.write(self.authorname+""\n)
a = self.docname
a += ","
a += str(float(sum(self.number_comas))/float(len(self.number_comas)))+","
a += str(float(sum(self.number_semicolans))/float(len(self.number_semicolans)))+","
a += str(float(sum(self.number_quotations))/float(len(self.number_quotations)))+","
a += str(float(sum(self.number_exclamations))/float(len(self.number_exclamations)))+","
a += str(float(sum(self.number_hyphens))/float(len(self.number_hyphens)))+","
a += str(float(sum(self.number_ands))/float(len(self.number_ands)))+","
a += str(float(sum(self.number_buts))/float(len(self.number_buts)))+","
a += str(float(sum(self.number_howevers))/float(len(self.number_howevers)))+","
a += str(float(sum(self.number_ifs))/float(len(self.number_ifs)))+","
a += str(float(sum(self.number_thats))/float(len(self.number_thats)))+","
a += str(float(sum(self.number_mores))/float(len(self.number_mores)))+","
a += str(float(sum(self.number_musts))/float(len(self.number_musts)))+","
a += str(float(sum(self.number_mights))/float(len(self.number_mights)))+","
a += str(float(sum(self.number_thiss))/float(len(self.number_thiss)))+","
a += str(float(sum(self.number_verys))/float(len(self.number_verys)))+","
a += str(self.mean_word_length)+","
a += str(self.mean_sentence_length)+","
a += str(self.standard_deviation_sentence)+","
a += "\n"
file1.write(a)
file1.close()
def extract_features(self) :
self.full_features = "----Features-----\n\n"
## Number of comas per thousand tokens
self.number_comas = []
count1 = 0
count2 = 0
for token in self.tokenized_data :
count1 += 1
if token == ',' :
count2 += 1
if count1 == 1000 :
self.number_comas.append(count2)
count1=0
count2=0
self.full_features += "Number of comas per thousand tokens = "
self.full_features += str(self.number_comas)
self.full_features += "\n\n"
#print self.number_comas
## Number of semicolons per thousand tokens
self.number_semicolans = []
count1 = 0
count2 = 0
for token in self.tokenized_data :
count1 += 1
if token == ';' :
count2 += 1
if count1 == 1000 :
self.number_semicolans.append(count2)
count1=0
count2=0
self.full_features += "Number of semicolons per thousand tokens = "
self.full_features += str(self.number_semicolans)
self.full_features += "\n\n"
#print self.number_semicolans
## Number of quotation marks per thousand tokens
self.number_quotations = []
count1 = 0
count2 = 0
for token in self.tokenized_data :
count1 += 1
if token == '"' or token =="'":
count2 += 1
if count1 == 1000 :
self.number_quotations.append(count2)
count1=0
count2=0
self.full_features += "Number of quotation marks per thousand tokens = "
self.full_features += str(self.number_quotations)
self.full_features += "\n\n"
#print self.number_quotations
## Number of exclamation marks per thousand tokens
self.number_exclamations = []
count1 = 0
count2 = 0
for token in self.tokenized_data :
count1 += 1
if token == '!' :
count2 += 1
if count1 == 1000 :
self.number_exclamations.append(count2)
count1=0
count2=0
self.full_features += "Number of exclamation marks per thousand tokens = "
self.full_features += str(self.number_exclamations)
self.full_features += "\n\n"
#print self.number_exclamations
## Number of hyphens per thousand tokens
self.number_hyphens = []
count1 = 0
count2 = 0
for token in self.tokenized_data :
count1 += 1
if token == '-' :
count2 += 1
if count1 == 1000 :
self.number_hyphens.append(count2)
count1=0
count2=0
self.full_features += "Number of hyphens per thousand tokens = "
self.full_features += str(self.number_hyphens)
self.full_features += "\n\n"
#print self.number_hyphens
## Number of ands per thousand tokens
self.number_ands = []
count1 = 0
count2 = 0
for token in self.tokenized_data :
count1 += 1
if token == 'and' :
count2 += 1
if count1 == 1000 :
self.number_ands.append(count2)
count1=0
count2=0
self.full_features += "Number of ands per thousand tokens = "
self.full_features += str(self.number_ands)
self.full_features += "\n\n"
#print self.number_ands
## Number of buts per thousand tokens
self.number_buts = []
count1 = 0
count2 = 0
for token in self.tokenized_data :
count1 += 1
if token == 'but' :
count2 += 1
if count1 == 1000 :
self.number_buts.append(count2)
count1=0
count2=0
self.full_features += "Number of buts per thousand tokens = "
self.full_features += str(self.number_buts)
self.full_features += "\n\n"
#print self.number_buts
## Number of howevers per thousand tokens
self.number_howevers = []
count1 = 0
count2 = 0
for token in self.tokenized_data :
count1 += 1
if token == 'however' :
count2 += 1
if count1 == 1000 :
self.number_howevers.append(count2)
count1=0
count2=0
self.full_features += "Number of howevers per thousand tokens = "
self.full_features += str(self.number_howevers)
self.full_features += "\n\n"
#print self.number_howevers
## Number of ifs per thousand tokens
self.number_ifs = []
count1 = 0
count2 = 0
for token in self.tokenized_data :
count1 += 1
if token == 'if' :
count2 += 1
if count1 == 1000 :
self.number_ifs.append(count2)
count1=0
count2=0
self.full_features += "Number of ifs per thousand tokens = "
self.full_features += str(self.number_ifs)
self.full_features += "\n\n"
#print self.number_ifs
## Number of thats per thousand tokens
self.number_thats = []
count1 = 0
count2 = 0
for token in self.tokenized_data :
count1 += 1
if token == 'that' :
count2 += 1
if count1 == 1000 :
self.number_thats.append(count2)
count1=0
count2=0
self.full_features += "Number of thats per thousand tokens = "
self.full_features += str(self.number_thats)
self.full_features += "\n\n"
#print self.number_thats
## Number of mores per thousand tokens
self.number_mores = []
count1 = 0
count2 = 0
for token in self.tokenized_data :
count1 += 1
if token == 'more' :
count2 += 1
if count1 == 1000 :
self.number_mores.append(count2)
count1=0
count2=0
self.full_features += "Number of mores per thousand tokens = "
self.full_features += str(self.number_mores)
self.full_features += "\n\n"
#print self.number_mores
## Number of musts per thousand tokens
self.number_musts = []
count1 = 0
count2 = 0
for token in self.tokenized_data :
count1 += 1
if token == 'must' :
count2 += 1
if count1 == 1000 :
self.number_musts.append(count2)
count1=0
count2=0
self.full_features += "Number of musts per thousand tokens = "
self.full_features += str(self.number_musts)
self.full_features += "\n\n"
#print self.number_musts
## Number of mights per thousand tokens
self.number_mights = []
count1 = 0
count2 = 0
for token in self.tokenized_data :
count1 += 1
if token == 'might' :
count2 += 1
if count1 == 1000 :
self.number_mights.append(count2)
count1=0
count2=0
self.full_features += "Number of mights per thousand tokens = "
self.full_features += str(self.number_mights)
self.full_features += "\n\n"
#print self.number_mights
## Number of thiss per thousand tokens
self.number_thiss = []
count1 = 0
count2 = 0
for token in self.tokenized_data :
count1 += 1
if token == 'this' :
count2 += 1
if count1 == 1000 :
self.number_thiss.append(count2)
count1=0
count2=0
self.full_features += "Number of thiss per thousand tokens = "
self.full_features += str(self.number_thiss)
self.full_features += "\n\n"
#print self.number_thiss
## Number of verys per thousand tokens
self.number_verys = []
count1 = 0
count2 = 0
for token in self.tokenized_data :
count1 += 1
if token == 'very' :
count2 += 1
if count1 == 1000 :
self.number_verys.append(count2)
count1=0
count2=0
self.full_features += "Number of verys per thousand tokens = "
self.full_features += str(self.number_verys)
self.full_features += "\n\n"
#print self.number_verys
## Mean word length
data = str(self.data)
data = data.replace("."," ")
data = data.replace(","," ")
data = data.replace("!"," ")
words = data.split()
words.sort()
count1=0
count2=0
for word in words :
if word[:1].isalpha() == False :
words.remove(word)
else :
#print word
count1+=len(word)
count2+=1
self.mean_word_length = float(float(count1)/float(count2))
self.full_features += "Mean word length = "
self.full_features += str(self.mean_word_length)
self.full_features += "\n\n"
## Mean Sentence Length
data = str(self.data)
#print data
#data = data.replace(".",".")
#data = data.replace("!",".")
#data = data.replace("?",".")
sentences = nltk.tokenize.sent_tokenize(data)
sentences.sort()
#print sentences
count1=0
count2=0
for sentence in sentences :
#print sentence
#if len(sentence)>5 :
count1+=len(sentence)
count2+=1
self.mean_sentence_length = float(float(count1)/float(count2))
self.full_features += "Mean Sentence length = "
self.full_features += str(self.mean_sentence_length)
self.full_features += "\n\n"
## Standard Deviation of Sentence Length
count1=0
count2=0
for sentence in sentences :
t = float(len(sentence))-self.mean_sentence_length
tt = t*t
count1+=tt
count2+=1
self.standard_deviation_sentence = math.sqrt(float(float(count1)/(float(count2))))
self.full_features += "Standard Deviation of Sentence Length = "
self.full_features += str(self.standard_deviation_sentence)
self.full_features += "\n\n"
#print self.full_features
#print "Features of ",self.docname," is extracted."
#self.create_csv_file()
class DrawGraph() :
def __init__(self) :
pass
def draw_single_graph(self,x_data,y_data,x_label,y_label,title) :
try :
plt.close()
except :
pass
fig = plt.figure()
axis = fig.add_subplot(111)
axis.set_title(title)
axis.set_xlabel(x_label)
axis.set_ylabel(y_label)
axis.grid(True)
plt.xticks(x_data)
plt.plot(x_data,y_data,marker='*',c = 'red')
plt.savefig(path+'/temp_img.png')
def save_set_of_graphs(self,graph_data) :
#pp = PdfPages(graph_data[0]+'.pdf')
if graph_data[0] :
pp = PdfPages(graph_data[1]+'.pdf')
for data in graph_data[2] :
try :
plt.close()
except :
pass
fig = plt.figure()
axis = fig.add_subplot(111)
axis.set_title(data[4])
axis.set_xlabel(data[2])
axis.set_ylabel(data[3])
axis.grid(True)
plt.xticks(data[0])
plt.plot(data[0],data[1],marker='*',c = 'red')
pp.savefig(fig)
pp.close()
class TrainingTesting() :
def __init__(self) :
self.y = []
self.noa = 0
self.author_names = []
self.train_data = []
self.author_files = os.listdir(path+"/generated_files")
#print author_names
for author in self.author_files :
self.author_names.append(author[:-4])
text1 = open(path+"/generated_files/"+author,"r").read().split("\n")
#print text1[1:-1]
for txt in text1[1:-1] :
t = []
self.y.append(self.noa)
#t.append(self.noa)
for i in txt.split(",")[1:-1] :
t.append(float(i))
self.train_data.append(t)
self.noa += 1
#print self.y
#print self.train_data
def train(self) :
self.clfr = LinearSVC()
self.clfr.fit(self.train_data,self.y)
#print self.author_names[clfr.predict(self.train_data[0])[0]]
def test(self,test_data) :
self.correct_author_name = self.author_names[self.clfr.predict(np.array(test_data).reshape(1,-1))[0]]
def main() :
app=wx.App()
frame=main_window(parent=None,id=-1)
frame.Show()
app.MainLoop()
if __name__ == '__main__' :
main()
|
984,286 | 77805457ef0ec51cafc97754f3d22b85daba3672 | # 编译日期:2020-10-27 16:15:31
# 版权所有:www.i-search.com.cn
# coding=utf-8
|
984,287 | 9c1cd44d742dd57ad34ec6834d2fb867e6f0c7fc | # Generated by Django 3.1.7 on 2021-02-27 18:19
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('envdaq', '0006_controllerdef_component_map'),
('envdatasystem', '0005_controllersystem_daqsystem'),
]
operations = [
migrations.CreateModel(
name='ComponentMap',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
],
options={
'verbose_name': 'ComponentMap',
'verbose_name_plural': 'ComponentMaps',
},
),
migrations.CreateModel(
name='ControllerComponent',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50, verbose_name='Name')),
],
options={
'verbose_name': 'ControllerComponent',
'verbose_name_plural': 'ControllerComponents',
},
),
migrations.CreateModel(
name='InstrumentComponent',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50, verbose_name='Name')),
],
options={
'verbose_name': 'ControllerComponent',
'verbose_name_plural': 'ControllerComponents',
},
),
migrations.AddField(
model_name='controllersystem',
name='daq',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='controllersystem_daq', to='envdatasystem.daqsystem', verbose_name='DAQ System'),
),
migrations.AddField(
model_name='controllersystem',
name='parent_controller',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='controllersystem_parentcontroller', to='envdatasystem.controllersystem', verbose_name='Parent Controller'),
),
migrations.AlterField(
model_name='controllersystem',
name='controller',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='controllersystem_controller', to='envdaq.controller', verbose_name='Controller'),
),
migrations.CreateModel(
name='InstrumentSystem',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50, verbose_name='Name')),
('controller', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='instrumentsystem_controller', to='envdatasystem.controllersystem', verbose_name='Controller System')),
('instrument', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='instrumentsystem_instrument', to='envdaq.instrumentalias', verbose_name='Instrument')),
],
options={
'verbose_name': 'Instrument System',
'verbose_name_plural': 'Instrument Systems',
},
),
migrations.CreateModel(
name='InstrumentComponentItem',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('primary', models.BooleanField(default=False, verbose_name='Primary Component')),
('component', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='instrumentcomponentitem_component', to='envdatasystem.instrumentcomponent', verbose_name='Component')),
('instrument', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='instrumentcomponentitem_controller', to='envdatasystem.instrumentsystem', verbose_name='Controller')),
],
options={
'verbose_name': 'instrumentcomponentitem',
'verbose_name_plural': 'instrumentcomponentitems',
},
),
migrations.AddField(
model_name='instrumentcomponent',
name='Instrument',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='instrumentcomponent_instrument', to='envdatasystem.instrumentsystem', verbose_name='Controller'),
),
migrations.CreateModel(
name='ControllerComponentItem',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('primary', models.BooleanField(default=False, verbose_name='Primary Component')),
('component', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='controllercomponentitem_component', to='envdatasystem.controllercomponent', verbose_name='Component')),
('controller', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='controllercomponentitem_controller', to='envdatasystem.controllersystem', verbose_name='Controller')),
],
options={
'verbose_name': 'controllercomponentitem',
'verbose_name_plural': 'controllercomponentitems',
},
),
migrations.AddField(
model_name='controllercomponent',
name='controller',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='controllercomponent_controller', to='envdatasystem.controllersystem', verbose_name='Controller'),
),
]
|
984,288 | c15fa5f674af82ee599227c35a5798192e372208 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function
import requests
from baiduocr.result import LocateRecognizeResult
_API_URL = 'http://apis.baidu.com/idl_baidu/baiduocrpay/idlocrpaid'
class BaiduOcr(object):
"""百度 OCR 客户端"""
_IMAGE_FOR_TEST = '/9j/4AAQSkZJRgABAQEAYABgAAD/2wBDABMNDxEPDBMREBEWFRMXHTAfHRsbHTsqLSMwRj5KSUU+RENNV29eTVJpU0NEYYRiaXN3fX59S12Jkoh5kW96fXj/2wBDARUWFh0ZHTkfHzl4UERQeHh4eHh4eHh4eHh4eHh4eHh4eHh4eHh4eHh4eHh4eHh4eHh4eHh4eHh4eHh4eHh4eHj/wAARCAAfACEDAREAAhEBAxEB/8QAGAABAQEBAQAAAAAAAAAAAAAAAAQDBQb/xAAjEAACAgICAgEFAAAAAAAAAAABAgADBBESIRMxBSIyQXGB/8QAFAEBAAAAAAAAAAAAAAAAAAAAAP/EABQRAQAAAAAAAAAAAAAAAAAAAAD/2gAMAwEAAhEDEQA/APawEBAQEBAgy8i8ZTVV3UY6V1eU2XoWDDZB19S646Gz39w9fkKsW1r8Wm2yo1PYis1be0JG9H9QNYCAgc35Cl3yuVuJZl0cB41rZQa32dt2y6OuOiOxo61vsLcVblxaVyXD3hFFjL6La7I/sDWAgICAgICB/9k=' # pylint: disable=line-too-long
_IMAGE_MAX_SIZE = 307200 # 300K
_SERVICE_LIST = set(['LocateRecognize', 'Locate'])
_LANG_LIST = set(['CHN_ENG', 'ENG', 'JAP', 'KOR'])
def __init__(self, url=_API_URL, key=''):
"""初始化客户端
:type api_key: str
:param api_key: API 服务授权代码
:type environment: str
:param environment: API 服务类型,设定为 'online' 时使用企业版 API;
否则使用个人免费版 API(默认)
"""
self.api_key = key
self.url = url
def ping(self):
"""使用 API Store 提供的示例测试服务是否可用
当 API 测试正常时,将会输出 'pong'。同时 API 返回的结果将会被返回,方便
出错时进行调试
"""
header = {'apikey': self.api_key}
data = {}
data['fromdevice'] = 'pc'
data['clientip'] = '10.10.10.0'
data['detecttype'] = 'LocateRecognize'
data['languagetype'] = 'CHN_ENG'
data['imagetype'] = '1'
data['image'] = self._IMAGE_FOR_TEST
resp = requests.post(self.url, headers=header, data=data)
res = resp.json() if resp is not None else {}
res = LocateRecognizeResult(res)
if res.status == 0 and res.message == u'success':
print('pong')
return res
def recog(self, image, service='LocateRecognize', lang='CHN_ENG'):
"""调用百度 OCR API 进行图片文字识别
:type image: str
:param image: 待识别文字的图像,可为本地文件或网络文件链接,若该
图像大小超过 300K ,将会抛出异常
:type service: str
:param service: 请求的服务类型,可用的服务类型即说明如下
+ LocateRecognize: 整图文字检测、识别,以行为单位
+ Locate: 整图文字行定位
:type lang: str
:param lang: 指定要检测的文字类型,目前仅支持以下类型
+ CHN_ENG: 中英文
+ ENG: 英文
+ JAP: 日文
+ KOR: 韩文
返回结果是一个 Result 类型,见 result.py
"""
if service not in self._SERVICE_LIST:
raise ValueError('wrong service type')
if lang not in self._LANG_LIST:
raise ValueError('unsupported language')
header = {'apikey': self.api_key}
data = {}
data['fromdevice'] = 'pc'
data['clientip'] = '10.10.10.0'
data['detecttype'] = service
data['languagetype'] = lang
data['imagetype'] = '2'
image_file = None
try:
if image.startswith('http://') or image.startswith('https://'):
req = requests.get(image)
image_file = req.content
else:
image_file = open(image, 'rb')
except Exception:
return {}
resp = requests.post(self.url, headers=header, data=data,
files={'image': ('ocr.jpg', image_file)})
resp = {} if not resp else resp.json()
return LocateRecognizeResult(resp)
|
984,289 | 71fe875273a55ec2b0fcad979ffd922b70cd766e |
# Import Libraries
from keras.models import Sequential
from keras.layers import Convolution2D
from keras.layers import MaxPooling2D
from keras.layers import Flatten
from keras.layers import Dense
# Initialize CNN
classifier = Sequential()
# Convolution Layer
classifier.add(Convolution2D(32,3,3,input_shape = (64,64,3), activation='relu'))
# Max Pooling
classifier.add(MaxPooling2D(pool_size=(2,2)))
# Adding second Convolutional layer to improve accuracy
classifier.add(Convolution2D(32,3,3, activation='relu'))
classifier.add(MaxPooling2D(pool_size=(2,2)))
# Flattening
classifier.add(Flatten())
# Full Connection
classifier.add(Dense(output_dim = 128 , activation='relu'))
classifier.add(Dense(output_dim = 1 , activation='sigmoid'))
# Complie CNN
classifier.compile(optimizer= 'adam' , loss = 'binary_crossentropy' , metrics= ['accuracy'])
# Fitting the CNN to images
from keras.preprocessing.image import ImageDataGenerator
train_datagen = ImageDataGenerator(
rescale=1./255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True)
test_datagen = ImageDataGenerator(rescale=1./255)
train_set = train_datagen.flow_from_directory(
'Convolutional_Neural_Networks/dataset/training_set',
target_size=(64, 64),
batch_size=32,
class_mode='binary')
test_set = test_datagen.flow_from_directory(
'Convolutional_Neural_Networks/dataset/test_set',
target_size=(64, 64),
batch_size=32,
class_mode='binary')
classifier.fit_generator(
train_set,
steps_per_epoch=8000,
epochs=25,
validation_data=test_set ,
validation_steps=2000)
# Making New Predictions
import numpy as np
from keras.preprocessing import image
test_image = image.load_img('Convolutional_Neural_Networks/dataset/single_prediction/cat_or_dog_1.jpg',target_size = (64,64))
test_image = image.img_to_array(test_image)
test_image = np.expand_dims(test_image, axis=0)
result = classifier.predict(test_image)
train_set.class_indices
if result[0][0] == 1:
prediction = 'dog'
else:
prediction = 'cat'
from tensorflow.python.client import device_lib
print(device_lib.list_local_devices()
pip install tensorflow-gpu
|
984,290 | f27326208ec6b87df705a283f041244411bb68e6 | from flask_wtf import FlaskForm
from wtforms import PasswordField, StringField, SubmitField, ValidationError,SelectField,IntegerField,validators,FormField
from wtforms.validators import DataRequired, Email, EqualTo
from ..models import Employee
from wtforms.fields.html5 import TelField
from wtforms_alchemy import PhoneNumberField
class ProjectInfo(FlaskForm):
type = SelectField('Type of Service', choices=[('frame instance only', 'frame instance only'),
('frame instance and engineer', 'frame instance and engineer')],
validators=[DataRequired()])
numberOfEngs = IntegerField('Number of Enginners', default=0)
def __init__(self, *args, **kwargs):
# We need to desactivate token evaluation
kwargs['csrf_enabled'] = False
super(ProjectInfo, self).__init__(*args, **kwargs)
class PersonalInformation(FlaskForm):
username = StringField('Username', validators=[DataRequired()])
first_name = StringField('First Name', validators=[DataRequired()])
last_name = StringField('Last Name', validators=[DataRequired()])
phone = PhoneNumberField(region='FI', display_format='national')
password = PasswordField('Password', validators=[DataRequired(), EqualTo('confirm_password')])
confirm_password = PasswordField('Confirm Password')
class BuisnessInformation(FlaskForm):
Company=StringField('Company Name ', validators=[DataRequired()])
country = SelectField('Country', choices=[('Be', 'Belguim'), ('ROU', 'Roumania'), ('OTH', 'Others')])
adress = StringField(label="Address", validators=[DataRequired()])
Description = StringField('Buisness Description')
class RegistrationForm(FlaskForm):
"""
Form for users to create new account
"""
names = StringField('Names', validators=[DataRequired()])
username = StringField('Username', validators=[DataRequired()])
email = StringField('Email', validators=[DataRequired(), Email()])
password = PasswordField('Password', validators=[DataRequired(), EqualTo('confirm_password')])
confirm_password = PasswordField('Confirm Password')
project = FormField(ProjectInfo)
submit = SubmitField('Sign Up')
def validate_email(self, field):
if Employee.query.filter_by(email=field.data).first():
raise ValidationError('Email is already in use.')
def validate_username(self, field):
if Employee.query.filter_by(username=field.data).first():
raise ValidationError('Username is already in use.')
class LoginForm(FlaskForm):
"""
Form for users to login
"""
email = StringField('Email', validators=[DataRequired(), Email()])
password = PasswordField('Password', validators=[DataRequired()])
submit = SubmitField('Login')
class ChangeEmailForm(FlaskForm):
email = StringField('New Email', validators=[DataRequired(), Email()])
password = PasswordField('Password', validators=[DataRequired()])
submit = SubmitField('Login')
def validate_mail(self,field):
if Employee.query.filter_by(email=field.data).first():
raise ValidationError("email already in use")
class ResetPasswordFormRequest(FlaskForm):
email = StringField("Your mail",validators=[DataRequired(),Email()])
submit=SubmitField("Continue")
def validate_mail(self,field):
if Employee.query.filter_by(email=field.data).first() is None:
raise ValidationError("You are not Registred Yet in Our System")
return True
class ResetPasswordForm(FlaskForm):
email = StringField("Your mail", validators=[DataRequired(), Email()])
password = PasswordField('New Password', validators=[DataRequired(),EqualTo('confirm_password'),])
confirm_password = PasswordField('Confirm Password')
submit=SubmitField("Reset password")
|
984,291 | 79a44f4823e2b18d344ef669d8c2f0fbdf5e56df |
"""query_rcsb.py:
Query rscb
Last modified: Fri Aug 29, 2014 11:57PM
"""
__author__ = "Dilawar Singh"
__copyright__ = "Copyright 2013, Dilawar Singh and NCBS Bangalore"
__credits__ = ["NCBS Bangalore"]
__license__ = "GNU GPL"
__version__ = "1.0.0"
__maintainer__ = "Dilawar Singh"
__email__ = "dilawars@ncbs.res.in"
__status__ = "Development"
import urllib2
import logging
import sys
import os
import xml.etree.ElementTree as ET
from query import Query
from downloader import Downloader
logger = logging.getLogger('rscb')
def fetchIF(id, download_dir):
logger.info("Fetching id: {}".format(id))
d = Downloader(id, download_dir)
def main():
import argparse
# Argument parser.
description = 'A script to download PDB files from RCSB Protein Data Bank'
parser = argparse.ArgumentParser(description=description)
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('--query', '-q', metavar='query'
, nargs = '+'
, help = 'Query text'
)
parser.add_argument('--query_type', '-qt', metavar='queryType'
, default = 'Unspecified'
, help = "Type of this query, prefixed by org.pdb.query.simple"
)
parser.add_argument('--download_dir', '-d', metavar = "downloadDir"
, default = os.getcwd()
, help = "Directory. All PDB files are downloaded into this dir"
)
group.add_argument('--fetch', '-f', metavar = 'fetchID'
, nargs = '+'
, help = 'Download these ids'
)
class Args: pass
args = Args()
parser.parse_args(namespace=args)
# get the arguments.
if args.query:
q = Query(args)
q.getStructureReport()
q.printReport()
elif args.fetch:
d = Downloader(args.fetch, args.download_dir)
d.downloadFiles()
else:
raise UserWarning("Unknown option")
if __name__ == "__main__":
main()
|
984,292 | cfef25d61ccdb426a9c5c186de9a1470e120227a | class BST:
def __init__(self, value):
self.value = value
self.left = None
self.right = None
def insert(self, value):
'''
node.insert(5) is the same as BST.insert(node, 5)
We use this when recursively calling, e.g. self.left.insert
'''
if value < self.value:
if self.left == None:
self.left = BST(value)
else:
self.left.insert(value)
else:
if self.right == None:
self.right = BST(value)
else:
self.right.insert(value)
def __repr__(self):
'''The string representation of a node.
Here, we convert the value of the node to a string and make that
the representation.
We can now use
a = BST(4)
print(a) # prints 4
'''
return str(self.value)
def del_max_node(node):
root = node #preserving the root of the tree
while (node.right.right != None):
node = node.right
#now node is the parent node of the max node
#we will now delete this max node:
if node.right.left == None:
node.right = None
else:
node.right = node.right.left
return root
def third_largest(node):
root = del_max_node(node)
root = del_max_node(node)
#2 largest nodes have been deleted now
while (root.right != None):
root = root.right
return root.value
'''
this function runs in O(h) time because in the worst case, the complexity is O(3h), which is O(h)
root = BST(4)
root.insert(2)
root.insert(3)
root.insert(5)
root.insert(7)
root.insert(6)
root.insert(8)
root.insert(9)
root.insert(10)
root.insert(69)
root.insert(11)
print(third_largest(root))
''' |
984,293 | f382582d503cf648b57b48f0bf60a5532fa614f2 | import pandas as pd
collist = ['Team name','Intuitiveness','Creativity','Responsiveness','Novelilty'] #column names list
csvlist = ['jacob.csv','cyril.csv'] # csv files list
# Dictionary variables
Intuitiveness = 0
Creativity = 0
Responsiveness = 0
Novelilty = 0
# initalize Dictionary
dict = {
'Intuitiveness':Intuitiveness,
'Creativity':Creativity,
'Responsiveness':Responsiveness,
'Novelilty':Novelilty
}
#initalize DataFrame
df = pd.DataFrame()
# Read through each file
for i in range(len(csvlist)):
df = pd.read_csv(csvlist[i],usecols=collist,skipinitialspace=True)
print(df.iloc[0:21])
# jf = pd.read_csv('jacob.csv',usecols=collist,skipinitialspace=True,) # set path of your csv
# cf = pd.read_csv('cyril.csv',usecols=collist,skipinitialspace=True) # set path of your csv
#print(jf.iloc[0:21])
#print("################################################################################")
#print(cf.iloc[0:21])
|
984,294 | 7f0a5df8025c6445d59316b29b049b1d73a6d15e | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from nnsubspace.nndataset.dataset import Dataset
from nnsubspace.nnmodel.model import NNModel
dataset_ = Dataset(dataset='mnist')
model_ = NNModel(dataset='imagenet', model_id='0') |
984,295 | ec35baecc590aa98782014ced8f2c5b20be9e4dd | from collections import Counter
lst = [2, 2, 2, 7, 23, 1, 44, 44, 3, 2, 10, 7, 4, 11]
lst1 = []
for el in lst:
if lst.count(el) > 1:
lst1 = lst1.append(el)
print(lst1) |
984,296 | f23526a8ed19d09b5d199e8efd7a1231a5e2e537 | from django.db import models
class ReadOnly(models.Model):
num_jobs = models.IntegerField()
city_name = models.TextField()
state = models.TextField()
latitude = models.FloatField()
longitude = models.FloatField()
job_title = models.TextField()
created_at = models.DateField()
class Meta:
db_table = 'ReadOnly'
class WriteOnly(models.Model):
num_jobs = models.IntegerField()
city_name = models.TextField()
state = models.TextField()
latitude = models.FloatField()
longitude = models.FloatField()
job_title = models.TextField()
created_at = models.DateField()
class Meta:
db_table = 'WriteOnly'
class DiceJobs(models.Model):
title = models.TextField()
skills = models.TextField()
salary = models.IntegerField()
location = models.TextField()
posted = models.TextField()
created_at = models.DateField()
class Meta:
db_table = 'DiceJobs' |
984,297 | a3cf9a43854e1e83b1679e2e3bc9dba9370f11c5 | from django.db import models
from common.base_model import BaseModel
from online_store.models_manager import AvailableObjectsManager
class Product(BaseModel):
title = models.CharField('наименование', max_length=128)
description = models.TextField('описание')
weight = models.IntegerField('вес')
price = models.DecimalField('цена', max_digits=13, decimal_places=2)
file_link = models.CharField('файл', max_length=256)
is_deleted = models.BooleanField(default=False)
available_objects = AvailableObjectsManager()
objects = models.Manager()
class Meta:
verbose_name = 'товар'
verbose_name_plural = 'товары'
def __str__(self):
return self.title
|
984,298 | f34b0ce23d57c8f707d2991e105017dbfdf29c63 | import cv2
import os
import logging
import numpy as np
import matplotlib.pyplot as plt
import sqlite3
from skimage import color, measure, feature
from skimage import io
from sklearn.cluster import KMeans, MeanShift
from zipfile import ZipFile, ZIP_DEFLATED
from yellowbrick.cluster import KElbowVisualizer
import math
# plt.style.use('dark_background')
# config for logging
# https://docs.python.org/3/library/logging.html#logrecord-attributes
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
, level=logging.INFO)
log = logging.getLogger(__name__)
array_x_t = []
array_y_t = []
def km(img, number, g, dr, opa, parametr_p, rz_x):
# plt.cla()
# plt.clf()
x = g[0]
y = g[1]
# Если имеется массив центроидов
if len(x) > 0 and len(y) > 0:
mkm_width, caff = rz(1214.6, img, rz_x)
# zip (..., ..., img[x, y])
z = [list(hhh) for hhh in zip(x, y)]
# elbow method
model = KMeans()
vis = KElbowVisualizer(model, k=(1, 15))
vis.fit(np.array(z))
contours = measure.find_contours(img, 0.5)
k = KMeans(n_clusters=vis.elbow_value_).fit(z)
x_t = list(k.cluster_centers_[:, 0])
y_t = list(k.cluster_centers_[:, 1])
array_x_t.append(x_t)
array_y_t.append(y_t)
log.info('Параметр порога - {}'.format(parametr_p))
return img, contours, y_t, x_t, parametr_p, mkm_width, caff, k.cluster_centers_
else:
log.info("Не можем определить центроиды")
def rz(mkm, img, rz_x):
iw, ih = img.shape[0], img.shape[1]
# поиск сколько приходится на 1 пиксель мкм
caff = mkm / iw
mkm_width = round(caff * rz_x)
return mkm_width, caff
def gen_video(img_folder, vn, fd):
if not os.path.isfile(video_name):
imgs = [img for img in os.listdir(img_folder)]
frame = cv2.imread(os.path.join(img_folder, imgs[0]))
height, width, layers = frame.shape
video = cv2.VideoWriter(vn, 0, 1, (width, height))
for image in imgs:
video.write(cv2.imread(os.path.join(img_folder, image)))
cv2.destroyAllWindows()
video.release()
if not os.path.isfile(fd):
log.info('Создание zip файла - %s', fd)
with ZipFile(fd, mode='w', compression=ZIP_DEFLATED) as misfile:
misfile.write(video_name)
def f_dir(d, p, od, vn, fd, rz_x):
# log.info('Сканирование директории для исследования - %s', d)
# remove_ds_store = [name for name in os.listdir(d) if not name.startswith(('.', 'ORG'))]
# sort_list = sorted(remove_ds_store)
# log.info('Найдено %s образца', len(sort_list))
log.info('Поиск центроидов начат')
# ЧБ
path = 'konstantin/2019.10.02 ФИ-59/2019.10.02_actReg/2019.10.02_2/B2 97_ac.png'
image = color.rgb2gray(io.imread(path))
# calculate
fast = image.max() - p
# load
raze = image <= fast
image = np.where(raze, 0, image)
gosh = np.where(image >= fast)
fig = km(image, number=91001, g=gosh, dr=od, opa=d, parametr_p=p, rz_x=rz_x)
log.info('Поиск центроидов окончен')
return fig
# config directory, files and etc.
directory = "2020-2/A4 98 um 20200325/"
output_dir = 'a11'
video_name = '1.avi'
fileid = 'video.zip'
log.info('Директория для исследования - %s', directory)
log.info('Директория для выходных изображений - %s', output_dir)
# f_dir(d=directory, p=0.6, od=output_dir, vn=video_name, fd=fileid)
|
984,299 | 2e39d4aa52ff2d5aefca208b7249585e6188fddb | from datetime import datetime
import calendar
import time
while True:
now = datetime.utcnow()
unixtime = calendar.timegm(now.utctimetuple())
minstamp = unixtime - (now.second)
print('Current timestamp: %s' % unixtime)
print('Current timestamp on the minute: %s' % minstamp)
print('---')
time.sleep(1)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.