index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
4,700 | e12ca2c4592a629ce78cae7211fedaf02352a603 | from .lasot import Lasot
from .got10k import Got10k
from .tracking_net import TrackingNet
from .imagenetvid import ImagenetVID
from .imagenetdet import ImagenetDET
from .coco_seq import MSCOCOSeq
from .vot import VOT
from .youtube_vos import YoutubeVOS
from .youtube_bb import YoutubeBB
|
4,701 | 9e793bd0faef65dfe8ac4b722e50d2055837449f | from googleAPI.drive import *
class GoogleSheet(GoogleDrive):
"""
The base class of Google Sheet API.
It aims at dealing with the Google Sheet data extract and append.
It is not tied to a specific spreadsheet.
This class is powered by pandas. Thus, make sure the data in the
spreadsheet is able to be processed by pandas.
Terminology:
Spreadsheet: The whole file. Same level as an Microsoft Excel file.
Sheet: A tab inside the spreadsheet. Same as Excel sheet.
A1 notation: A string like `Sheet1!A1:B2`, that refers to a group of
cells in the spreadsheet, and is typically used in formulas.
https://developers.google.com/sheets/api/guides/concepts#a1_notation
"""
def __init__(
self,
creds=None,
credential_path="",
credential_scopes=["https://www.googleapis.com/auth/drive"],
token_prefix="GoogleDrive_",
token_suffix="",
):
"""
Initialize the credential.
If credential `creds` is provided, this method will use it directly
if it is valid.
Otherwise, it will use `credential_path` and `credential_scopes` to
get the token.
Args:
creds: None or google.oauth2.credentials.Credentials, default None
credential_path: String, default ''
Path to the credential with either 'token.pickle' or
'credentials.json' in it.
credential_scopes: List of strings, default ['https://www.googleapis.com/auth/drive']
Scope of the credential. Default scope can
'See, edit, create, and delete all of your Google Drive files'.
Details:
https://developers.google.com/identity/protocols/oauth2/scopes#sheets
token_prefix: String, default 'GoogleDrive_'
Prefix of token file. eg. '{token_prefix}token.pickle'.
token_suffix: String, default ''
Suffix of token file. eg. 'token{token_suffix}.pickle'.
"""
if creds is not None and self.credential_validation(creds):
self.creds = creds
else:
self.creds = self.credential(
credential_path, credential_scopes, token_prefix, token_suffix
)
def create_spreadsheet(self, spreadsheet_name: str):
"""
Creates a spreadsheet, returning the newly created spreadsheet's ID.
Official API guide:
https://developers.google.com/sheets/api/reference/rest/v4/spreadsheets/create
Args:
spreadsheet_name: String
The name of the spreadsheet.
Return:
spreadsheet ID: String
"""
service = build("sheets", "v4", credentials=self.creds)
spreadsheet = {"properties": {"title": spreadsheet_name}}
spreadsheet = (
service.spreadsheets()
.create(body=spreadsheet, fields="spreadsheetId")
.execute()
)
return spreadsheet.get("spreadsheetId")
def search_spreadsheet(self, spreadsheet_name: str):
"""
Searche for the spreadsheet in Google Drive and return the spreadsheet ID.
Since it is using Google Drive API, the scope must include reading
files in Google Drive.
If you want customized query, use `GoogleDrive.search_file()` instead.
Args:
spreadsheet_name: String
The name of the spreadsheet. There is no file extension.
Return:
Dictionary.
Key: Spreadsheet name.
Value: List of spreadsheet ID in case there are duplicate file names.
"""
result = self.search_file(file_name=spreadsheet_name)
return result
def get_spreadsheet_property(self, spreadsheet_id: str):
"""
Get spreadsheet property and sheet property.
Spreadsheet property includes the title, locale, timeZone, defaultFormat, etc.
Sheet property includes sheetID, sheetIndex, sheetRowCount, and sheetColCount.
Official API guide:
https://developers.google.com/sheets/api/reference/rest/v4/spreadsheets/get
Args:
spreadsheet_id: String
Spreadsheet ID.
Return:
Tuple: (spreadsheet_property, sheet_property)
spreadsheet_property: Dictionary
The entire spreadsheet property. It is the superset of the sheet property.
Structure of the response:
https://developers.google.com/sheets/api/reference/rest/v4/spreadsheets#Spreadsheet
sheet_property: Dictionary
sheetId: Dictionary, key: sheet name, value: sheet ID
The unique ID of each sheet regardless of position.
sheetIndex: Dictionary, key: sheet name, value: sheet index
The position of the sheet starting from 0.
sheetRowCount: Dictionary, key: sheet name, value: sheet row count
The numbder of rows in sheet. Note that this is not the number of
rows that contains data.It is the boundary of the sheet.
sheetColCount: Dictionary, key: sheet name, value: sheet column count
The numbder of columns in sheet. Note that this is not the number
of columns that contains data.It is the boundary of the sheet.
"""
service = build("sheets", "v4", credentials=self.creds)
request = service.spreadsheets().get(
spreadsheetId=spreadsheet_id, includeGridData=False
)
# Spreadsheet property
spreadsheet_property = request.execute()
# Sheet property
sheetId = {
d["properties"]["title"]: d["properties"]["sheetId"]
for d in spreadsheet_property["sheets"]
}
sheetIndex = {
d["properties"]["title"]: d["properties"]["index"]
for d in spreadsheet_property["sheets"]
}
sheetRowCount = {
d["properties"]["title"]: d["properties"]["gridProperties"]["rowCount"]
for d in spreadsheet_property["sheets"]
}
sheetColCount = {
d["properties"]["title"]: d["properties"]["gridProperties"]["columnCount"]
for d in spreadsheet_property["sheets"]
}
sheet_property = {
"sheetId": sheetId,
"sheetIndex": sheetIndex,
"sheetRowCount": sheetRowCount,
"sheetColCount": sheetColCount,
}
return spreadsheet_property, sheet_property
def download_spreadsheet(self, spreadsheet_id: str, save_as=""):
"""
Download the spreadsheet by given the spreadsheet ID
and return a file pointer or save it as a file.
Supported file formats: .xlsx, .csv, .pdf.
For unsupported file formats i.e. Open Office sheet,
sheet only, and HTML, use `GoogleDrive.download_file()`.
Official API guide:
https://developers.google.com/drive/api/v3/manage-downloads#download_a_file_stored_on_google_drive
Args:
spreadsheet_id: String
The spreadsheet ID.
save_as: String, default ''
'': Return a file pointer.
'Excel': Save as '{Spreadsheet name}.xlsx'. Return None.
'CSV': Save as '{Spreadsheet name}.csv'. Return None.
First sheet only.
'PDF': Save as '{Spreadsheet name}.pdf'. Return None.
'*.xlsx': Save as '*.xlsx'. Return None.
'*.csv': Save as '*.csv'. Return None.
'*.pdf': Save as '*.pdf'. Return None.
Return:
None or file pointer depending on the `save_as`
"""
spreadsheet_name = self.get_file_metadata(
file_id=spreadsheet_id, fields="name"
)["name"]
mimeType = {
"Excel": "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet",
"Open Office sheet": "application/x-vnd.oasis.opendocument.spreadsheet",
"PDF": "application/pdf",
"CSV": "text/csv",
}
if save_as == "":
result = self.download_file(
file_id=spreadsheet_id, mimeType=mimeType["Excel"]
)
elif save_as == "Excel":
result = self.download_file(
file_id=spreadsheet_id,
mimeType=mimeType["Excel"],
save_as="{0}.xlsx".format(spreadsheet_name),
)
elif save_as == "CSV":
result = self.download_file(
file_id=spreadsheet_id,
mimeType=mimeType["CSV"],
save_as="{0}.csv".format(spreadsheet_name),
)
elif save_as == "PDF":
result = self.download_file(
file_id=spreadsheet_id,
mimeType=mimeType["PDF"],
save_as="{0}.pdf".format(spreadsheet_name),
)
elif save_as[-5:] == ".xlsx":
result = self.download_file(
file_id=spreadsheet_id, mimeType=mimeType["Excel"], save_as=save_as
)
elif save_as[-4:] == ".csv":
result = self.download_file(
file_id=spreadsheet_id, mimeType=mimeType["CSV"], save_as=save_as
)
elif save_as[-4:] == ".pdf":
result = self.download_file(
file_id=spreadsheet_id, mimeType=mimeType["PDF"], save_as=save_as
)
else:
raise Exception(
textwrap.dedent(
"""\
{0} is not a supported file format.
Please check the `GoogleSheet.download_spreadsheet()` docstring.
Or you may want to use `GoogleDrive.download_file()` method.\
""".format(
save_as
)
)
)
return result
def get_values(
self,
spreadsheet_id: str,
range_,
value_render_option=None,
date_time_render_option=None,
):
"""
Get a single value, a range of values, and several ranges of values.
Use `GoogleSheet.download_spreadsheet()` if you want to get the
entire spreadsheet.
Official API guide:
For single range:
https://developers.google.com/sheets/api/reference/rest/v4/spreadsheets.values/get
For multiple ranges:
https://developers.google.com/sheets/api/reference/rest/v4/spreadsheets.values/batchGet
Example:
Get the entire sheet of `Sheet 1`.
>>> gs.get_values(spreadsheet_id, "'Sheet 1'")
Get the value of cell `A5` in `Sheet 1`.
>>> gs.get_values(spreadsheet_id, "'Sheet 1'!A5")
Args:
spreadsheet_id: String
range_: String or List of strings in A1 notation
String: A single sheet, A single range
List of strings: Several ranges
value_render_option: String, default None
How values should be represented in the output.
The default render option is `ValueRenderOption.FORMATTED_VALUE`.
Details:
https://developers.google.com/sheets/api/reference/rest/v4/ValueRenderOption
date_time_render_option: String, default None
How dates, times, and durations should be represented in the output.
Details:
https://developers.google.com/sheets/api/reference/rest/v4/ValueRenderOption
Return:
ValueRange in Dictionary
Details:
https://developers.google.com/sheets/api/reference/rest/v4/spreadsheets.values#resource:-valuerange
"""
service = build("sheets", "v4", credentials=self.creds)
# How values should be represented in the output.
# The default render option is ValueRenderOption.FORMATTED_VALUE.
value_render_option = value_render_option
# How dates, times, and durations should be represented in the output.
# This is ignored if value_render_option is
# FORMATTED_VALUE.
# The default dateTime render option is [DateTimeRenderOption.SERIAL_NUMBER].
date_time_render_option = date_time_render_option
request = (
service.spreadsheets()
.values()
.batchGet(
spreadsheetId=spreadsheet_id,
ranges=range_,
valueRenderOption=value_render_option,
dateTimeRenderOption=date_time_render_option,
)
)
result = request.execute()
return result
def clear_values(self, spreadsheet_id: str, range_):
"""
Clear values from a spreadsheet.
Only values are cleared -- all other properties of
the cell (such as formatting, data validation, etc..) are kept.
Official API guide:
https://developers.google.com/sheets/api/reference/rest/v4/spreadsheets.values/clear
Args:
spreadsheet_id: String
range_: String, A1 notation
Return:
Dictionary, cleared range
{
"spreadsheetId": string,
"clearedRange": string
}
"""
service = build("sheets", "v4", credentials=self.creds)
batch_clear_values_request_body = {
# The ranges to clear, in A1 notation.
"ranges": range_
# TODO: Add desired entries to the request body.
}
request = (
service.spreadsheets()
.values()
.batchClear(
spreadsheetId=spreadsheet_id, body=batch_clear_values_request_body
)
)
result = request.execute()
return result
def update_values(self, spreadsheet_id: str, data, value_input_option="RAW"):
"""
Sets values in a range of a spreadsheet.
Official API guide:
https://developers.google.com/sheets/api/reference/rest/v4/spreadsheets.values/update
Args:
spreadsheet_id: String
data: ValueRange in Dictionary
{
"range": string,
"majorDimension": enum (Dimension),
"values": [
array
]
}
Details:
https://developers.google.com/sheets/api/reference/rest/v4/spreadsheets.values#ValueRange
Return:
Dictionary in structure:
{
"spreadsheetId": string,
"totalUpdatedRows": integer,
"totalUpdatedColumns": integer,
"totalUpdatedCells": integer,
"totalUpdatedSheets": integer,
"responses": [
{
object (UpdateValuesResponse)
}
]
}
"""
service = build("sheets", "v4", credentials=self.creds)
batch_update_values_request_body = {
# How the input data should be interpreted.
"value_input_option": value_input_option, # 'USER_ENTERED'
# The new values to apply to the spreadsheet.
"data": data,
}
request = (
service.spreadsheets()
.values()
.batchUpdate(
spreadsheetId=spreadsheet_id, body=batch_update_values_request_body
)
)
result = request.execute()
return result
def update_column_format(self):
"""
Update the column format.
Supported format: date, number, currency
Officail API guide:
https://developers.google.com/sheets/api/samples/formatting
https://developers.google.com/sheets/api/guides/formats
https://developers.google.com/sheets/api/guides/batchupdate
"""
pass
|
4,702 | 8054ccb07d0130b75927a4bb9b712ce3d564b8fe | """
Test cases for ldaptor.protocols.ldap.delta
"""
from twisted.trial import unittest
from ldaptor import delta, entry, attributeset, inmemory
from ldaptor.protocols.ldap import ldapsyntax, distinguishedname, ldaperrors
class TestModifications(unittest.TestCase):
def setUp(self):
self.foo = ldapsyntax.LDAPEntry(
None,
dn="cn=foo,dc=example,dc=com",
attributes={
"objectClass": ["person"],
"cn": ["foo", "thud"],
"sn": ["bar"],
"more": ["junk"],
},
)
def testAddOld(self):
mod = delta.Add("cn", ["quux"])
mod.patch(self.foo)
self.assertFalse("stuff" in self.foo)
self.assertEqual(self.foo["cn"], ["foo", "thud", "quux"])
def testAddNew(self):
mod = delta.Add("stuff", ["val1", "val2"])
mod.patch(self.foo)
self.assertEqual(self.foo["stuff"], ["val1", "val2"])
self.assertEqual(self.foo["cn"], ["foo", "thud"])
def testDelete(self):
mod = delta.Delete("cn", ["thud"])
mod.patch(self.foo)
self.assertFalse("stuff" in self.foo)
self.assertEqual(self.foo["cn"], ["foo"])
def testDeleteAll(self):
mod = delta.Delete("more")
mod.patch(self.foo)
self.assertFalse("stuff" in self.foo)
self.assertEqual(self.foo["cn"], ["foo", "thud"])
def testDelete_FailOnNonExistingAttributeType_All(self):
mod = delta.Delete("notexist", [])
self.assertRaises(KeyError, mod.patch, self.foo)
def testDelete_FailOnNonExistingAttributeType_OneValue(self):
mod = delta.Delete("notexist", ["a"])
self.assertRaises(KeyError, mod.patch, self.foo)
def testDelete_FailOnNonExistingAttributeValue(self):
mod = delta.Delete("cn", ["notexist"])
self.assertRaises(LookupError, mod.patch, self.foo)
def testReplace_Add(self):
mod = delta.Replace("stuff", ["val1", "val2"])
mod.patch(self.foo)
self.assertEqual(self.foo["stuff"], ["val1", "val2"])
self.assertEqual(self.foo["sn"], ["bar"])
self.assertEqual(self.foo["more"], ["junk"])
def testReplace_Modify(self):
mod = delta.Replace("sn", ["baz"])
mod.patch(self.foo)
self.assertFalse("stuff" in self.foo)
self.assertEqual(self.foo["sn"], ["baz"])
self.assertEqual(self.foo["more"], ["junk"])
def testReplace_Delete_Existing(self):
mod = delta.Replace("more", [])
mod.patch(self.foo)
self.assertFalse("stuff" in self.foo)
self.assertEqual(self.foo["sn"], ["bar"])
self.assertFalse("more" in self.foo)
def testReplace_Delete_NonExisting(self):
mod = delta.Replace("nonExisting", [])
mod.patch(self.foo)
self.assertFalse("stuff" in self.foo)
self.assertEqual(self.foo["sn"], ["bar"])
self.assertEqual(self.foo["more"], ["junk"])
class TestModificationOpLDIF(unittest.TestCase):
def testAdd(self):
m = delta.Add("foo", ["bar", "baz"])
self.assertEqual(
m.asLDIF(),
b"""\
add: foo
foo: bar
foo: baz
-
""",
)
def testDelete(self):
m = delta.Delete("foo", ["bar", "baz"])
self.assertEqual(
m.asLDIF(),
b"""\
delete: foo
foo: bar
foo: baz
-
""",
)
def testDeleteAll(self):
m = delta.Delete("foo")
self.assertEqual(
m.asLDIF(),
b"""\
delete: foo
-
""",
)
def testReplace(self):
m = delta.Replace("foo", ["bar", "baz"])
self.assertEqual(
m.asLDIF(),
b"""\
replace: foo
foo: bar
foo: baz
-
""",
)
def testReplaceAll(self):
m = delta.Replace("thud")
self.assertEqual(
m.asLDIF(),
b"""\
replace: thud
-
""",
)
def testAddBase64(self):
"""
LDIF attribute representation is base64 encoded
if attribute value contains nonprintable characters
or starts with reserved characters
"""
m = delta.Add("attr", [":value1", "value\n\r2"])
self.assertEqual(
m.asLDIF(),
b"""\
add: attr
attr:: OnZhbHVlMQ==
attr:: dmFsdWUKDTI=
-
""",
)
class OperationTestCase(unittest.TestCase):
"""
Test case for operations on a LDAP tree.
"""
def getRoot(self):
"""
Returns a new LDAP root for dc=example,dc=com.
"""
return inmemory.ReadOnlyInMemoryLDAPEntry(
dn=distinguishedname.DistinguishedName("dc=example,dc=com")
)
class TestAddOpLDIF(OperationTestCase):
"""
Unit tests for `AddOp`.
"""
def testAsLDIF(self):
"""
It will return the LDIF representation of the operation.
"""
sut = delta.AddOp(
entry.BaseLDAPEntry(
dn="dc=example,dc=com",
attributes={
"foo": ["bar", "baz"],
"quux": ["thud"],
},
)
)
result = sut.asLDIF()
self.assertEqual(
b"""dn: dc=example,dc=com
changetype: add
foo: bar
foo: baz
quux: thud
""",
result,
)
def testAddOpEqualitySameEntry(self):
"""
Objects are equal when the have the same LDAP entry.
"""
first_entry = entry.BaseLDAPEntry(
dn="ou=Duplicate Team, dc=example,dc=com",
attributes={"foo": ["same", "attributes"]},
)
second_entry = entry.BaseLDAPEntry(
dn="ou=Duplicate Team, dc=example,dc=com",
attributes={"foo": ["same", "attributes"]},
)
first = delta.AddOp(first_entry)
second = delta.AddOp(second_entry)
self.assertEqual(first, second)
def testAddOpInequalityDifferentEntry(self):
"""
Objects are not equal when the have different LDAP entries.
"""
first_entry = entry.BaseLDAPEntry(
dn="ou=First Team, dc=example,dc=com",
attributes={"foo": ["same", "attributes"]},
)
second_entry = entry.BaseLDAPEntry(
dn="ou=First Team, dc=example,dc=com",
attributes={"foo": ["other", "attributes"]},
)
first = delta.AddOp(first_entry)
second = delta.AddOp(second_entry)
self.assertNotEqual(first, second)
def testAddOpInequalityNoEntryObject(self):
"""
Objects is not equal with random objects.
"""
team_entry = entry.BaseLDAPEntry(
dn="ou=Duplicate Team, dc=example,dc=com",
attributes={"foo": ["same", "attributes"]},
)
sut = delta.AddOp(team_entry)
self.assertNotEqual(sut, {"foo": ["same", "attributes"]})
def testAddOpHashSimilar(self):
"""
Objects which are equal have the same hash.
"""
first_entry = entry.BaseLDAPEntry(
dn="ou=Duplicate Team, dc=example,dc=com",
attributes={"foo": ["same", "attributes"]},
)
second_entry = entry.BaseLDAPEntry(
dn="ou=Duplicate Team, dc=example,dc=com",
attributes={"foo": ["same", "attributes"]},
)
first = delta.AddOp(first_entry)
second = delta.AddOp(second_entry)
self.assertEqual(hash(first), hash(second))
def testAddOpHashDifferent(self):
"""
Objects which are not equal have different hash.
"""
first_entry = entry.BaseLDAPEntry(
dn="ou=Duplicate Team, dc=example,dc=com",
attributes={"foo": ["one", "attributes"]},
)
second_entry = entry.BaseLDAPEntry(
dn="ou=Duplicate Team, dc=example,dc=com",
attributes={"foo": ["other", "attributes"]},
)
first = delta.AddOp(first_entry)
second = delta.AddOp(second_entry)
self.assertNotEqual(hash(first), hash(second))
def testAddOp_DNExists(self):
"""
It fails to perform the `add` operation for an existing entry.
"""
root = self.getRoot()
root.addChild(
rdn="ou=Existing Team",
attributes={
"objectClass": ["a", "b"],
"ou": ["HR"],
},
)
hr_entry = entry.BaseLDAPEntry(
dn="ou=Existing Team, dc=example,dc=com",
attributes={"foo": ["dont", "care"]},
)
sut = delta.AddOp(hr_entry)
deferred = sut.patch(root)
failure = self.failureResultOf(deferred)
self.assertIsInstance(failure.value, ldaperrors.LDAPEntryAlreadyExists)
def testRepr(self):
"""
Getting string representation
"""
sut = delta.AddOp(
entry.BaseLDAPEntry(
dn="dc=example,dc=com",
attributes={
"bar": ["foo"],
"foo": ["bar"],
},
)
)
self.assertEqual(
repr(sut),
"AddOp(BaseLDAPEntry('dc=example,dc=com', "
"{'bar': ['foo'], 'foo': ['bar']}))",
)
class TestDeleteOpLDIF(OperationTestCase):
"""
Unit tests for DeleteOp.
"""
def testAsLDIF(self):
"""
It return the LDIF representation of the delete operation.
"""
sut = delta.DeleteOp("dc=example,dc=com")
result = sut.asLDIF()
self.assertEqual(
b"""dn: dc=example,dc=com
changetype: delete
""",
result,
)
def testDeleteOpEqualitySameDN(self):
"""
Objects are equal when the have the same DN.
"""
first_entry = entry.BaseLDAPEntry(dn="ou=Team, dc=example,dc=com")
second_entry = entry.BaseLDAPEntry(dn="ou=Team, dc=example,dc=com")
first = delta.DeleteOp(first_entry)
second = delta.DeleteOp(second_entry)
self.assertEqual(first, second)
def testDeleteOpEqualityEqualDN(self):
"""
DeleteOp objects are equal if their DNs are equal.
"""
first_dn = distinguishedname.DistinguishedName(
stringValue="ou=Team,dc=example,dc=com"
)
first = delta.DeleteOp(first_dn)
second_entry = entry.BaseLDAPEntry(dn="ou=Team, dc=example, dc=com")
second = delta.DeleteOp(second_entry)
third = delta.DeleteOp("ou=Team, dc=example,dc=com")
self.assertEqual(first, second)
self.assertEqual(first, third)
def testDeleteOpInequalityDifferentEntry(self):
"""
DeleteOp objects are not equal when the have different LDAP entries.
"""
first_entry = entry.BaseLDAPEntry(dn="ou=Team, dc=example,dc=com")
second_entry = entry.BaseLDAPEntry(dn="ou=Cowboys, dc=example,dc=com")
first = delta.DeleteOp(first_entry)
second = delta.DeleteOp(second_entry)
self.assertNotEqual(first, second)
def testDeleteOpInequalityNoEntryObject(self):
"""
DeleteOp objects is not equal with random objects.
"""
team_entry = entry.BaseLDAPEntry(dn="ou=Team, dc=example,dc=com")
sut = delta.DeleteOp(team_entry)
self.assertNotEqual(sut, "ou=Team, dc=example,dc=com")
def testDeleteOpHashSimilar(self):
"""
Objects which are equal have the same hash.
"""
first_entry = entry.BaseLDAPEntry(dn="ou=Team, dc=example,dc=com")
second_entry = entry.BaseLDAPEntry(dn="ou=Team, dc=example,dc=com")
first = delta.DeleteOp(first_entry)
second = delta.DeleteOp(second_entry)
self.assertEqual(hash(first), hash(second))
def testDeleteOpHashDifferent(self):
"""
Objects which are not equal have different hash.
"""
first_entry = entry.BaseLDAPEntry(dn="ou=Team, dc=example,dc=com")
second_entry = entry.BaseLDAPEntry(dn="ou=Cowboys, dc=example,dc=com")
first = delta.DeleteOp(first_entry)
second = delta.DeleteOp(second_entry)
self.assertNotEqual(hash(first), hash(second))
def testDeleteOp_DNNotFound(self):
"""
If fail to delete when the RDN does not exists.
"""
root = self.getRoot()
sut = delta.DeleteOp("cn=nope,dc=example,dc=com")
deferred = sut.patch(root)
failure = self.failureResultOf(deferred)
self.assertIsInstance(failure.value, ldaperrors.LDAPNoSuchObject)
def testDeleteOpInvalidDN(self):
"""
Invalid type of DN raises AssertionError
"""
self.assertRaises(AssertionError, delta.DeleteOp, 0)
def testRepr(self):
"""
Getting string representation
"""
sut = delta.DeleteOp("dc=example,dc=com")
self.assertEqual(repr(sut), "DeleteOp('dc=example,dc=com')")
class TestModifyOp(OperationTestCase):
"""
Unit tests for ModifyOp.
"""
def testAsLDIF(self):
"""
It will return a LDIF representation of the contained operations.
"""
sut = delta.ModifyOp(
"cn=Paula Jensen, ou=Dev Ops, dc=airius, dc=com",
[
delta.Add(
"postaladdress",
["123 Anystreet $ Sunnyvale, CA $ 94086"],
),
delta.Delete("description"),
delta.Replace(
"telephonenumber",
["+1 408 555 1234", "+1 408 555 5678"],
),
delta.Delete("facsimiletelephonenumber", ["+1 408 555 9876"]),
],
)
result = sut.asLDIF()
self.assertEqual(
b"""dn: cn=Paula Jensen,ou=Dev Ops,dc=airius,dc=com
changetype: modify
add: postaladdress
postaladdress: 123 Anystreet $ Sunnyvale, CA $ 94086
-
delete: description
-
replace: telephonenumber
telephonenumber: +1 408 555 1234
telephonenumber: +1 408 555 5678
-
delete: facsimiletelephonenumber
facsimiletelephonenumber: +1 408 555 9876
-
""",
result,
)
def testInequalityDiffertnDN(self):
"""
Modify operations for different DN are not equal.
"""
first = delta.ModifyOp(
"cn=john,dc=example,dc=com", [delta.Delete("description")]
)
second = delta.ModifyOp(
"cn=doe,dc=example,dc=com", [delta.Delete("description")]
)
self.assertNotEqual(first, second)
def testInequalityDifferentModifications(self):
"""
Modify operations with different modifications are not equal
"""
first = delta.ModifyOp("cn=john,dc=example,dc=com", [delta.Add("description")])
second = delta.ModifyOp(
"cn=john,dc=example,dc=com", [delta.Delete("description")]
)
self.assertNotEqual(first, second)
def testInequalityNotModifyOP(self):
"""
Modify operations are not equal with other object types.
"""
sut = delta.ModifyOp("cn=john,dc=example,dc=com", [delta.Delete("description")])
self.assertNotEqual("cn=john,dc=example,dc=com", sut)
def testInequalityDiffertnOperations(self):
"""
Modify operations for same DN but different operations are not equal.
"""
first = delta.ModifyOp(
"cn=john,dc=example,dc=com", [delta.Delete("description")]
)
second = delta.ModifyOp(
"cn=doe,dc=example,dc=com", [delta.Delete("homeDirectory")]
)
self.assertNotEqual(first, second)
def testHashEquality(self):
"""
Modify operations can be hashed and equal objects have the same
hash.
"""
first = delta.ModifyOp(
"cn=john,dc=example,dc=com", [delta.Delete("description")]
)
second = delta.ModifyOp(
"cn=john,dc=example,dc=com", [delta.Delete("description")]
)
self.assertEqual(first, second)
self.assertEqual(
first.asLDIF(),
second.asLDIF(),
"LDIF equality is a precondition for valid hash values",
)
self.assertEqual(hash(first), hash(second))
def testHashInequality(self):
"""
Different modify operations have different hash values.
"""
first = delta.ModifyOp(
"cn=john,dc=example,dc=com", [delta.Delete("description")]
)
second = delta.ModifyOp(
"cn=john,dc=example,dc=com", [delta.Delete("homeDirectory")]
)
self.assertNotEqual(first.asLDIF(), second.asLDIF())
self.assertNotEqual(hash(first), hash(second))
def testModifyOp_DNNotFound(self):
"""
If fail to modify when the RDN does not exists.
"""
root = self.getRoot()
sut = delta.ModifyOp(
"cn=nope,dc=example,dc=com",
[delta.Add("foo", ["bar"])],
)
deferred = sut.patch(root)
failure = self.failureResultOf(deferred)
self.assertIsInstance(failure.value, ldaperrors.LDAPNoSuchObject)
def testRepr(self):
"""
Getting string representation
"""
sut = delta.ModifyOp("cn=john,dc=example,dc=com", [delta.Delete("description")])
self.assertEqual(
repr(sut),
"ModifyOp(dn='cn=john,dc=example,dc=com', "
"modifications=[Delete('description', [])])",
)
class TestModificationComparison(unittest.TestCase):
def testEquality_Add_True(self):
a = delta.Add("k", ["b", "c", "d"])
b = delta.Add("k", ["b", "c", "d"])
self.assertEqual(a, b)
def testEquality_AddVsDelete_False(self):
a = delta.Add("k", ["b", "c", "d"])
b = delta.Delete("k", ["b", "c", "d"])
self.assertNotEqual(a, b)
def testEquality_AttributeSet_False(self):
a = delta.Add("k", ["b", "c", "d"])
b = attributeset.LDAPAttributeSet("k", ["b", "c", "d"])
self.assertNotEqual(a, b)
def testEquality_List_False(self):
a = delta.Add("k", ["b", "c", "d"])
b = ["b", "c", "d"]
self.assertNotEqual(a, b)
|
4,703 | 18366633489d905c96b0c30d65442bc2e2b188ea | from datetime import datetime
from iohelpers import lines_to_textfile
from typing import Iterator, List, Sequence
from zhmodules import ZhTopolectSynonyms, MandarinPronunciations, ZhTopolectPronunciations
def missing_philippine_hokkien_words_generator(synonyms: ZhTopolectSynonyms, hokprons: ZhTopolectPronunciations):
all_hokkien = set()
for word, syn_data in synonyms.all_words():
minnan = set(syn_data['Philippine-MN'])
minnan.update(syn_data['Quanzhou'])
minnan.update(syn_data['Xiamen'])
for hokkien in minnan:
banlamoe = hokkien.split(':')
all_hokkien.add(banlamoe[0])
return words_missing_prons(all_hokkien, hokprons)
def words_missing_prons(corpus: Sequence[str], prons: ZhTopolectPronunciations):
return [word for word in corpus if prons.pronunciation(word) is None and all(ord(char) > 255 for char in word)]
if __name__ == '__main__':
synonyms = ZhTopolectSynonyms.from_local_folder('../data/enwiktionary/module-zh-data-json/dial-syn')
mp = MandarinPronunciations.from_local_json_file('../data/enwiktionary/module-zh-data-json/combined-mandarin-pron.json')
missing_mandarin_prons = iter(words_missing_prons(synonyms.mandarin_words(), mp))
h = ZhTopolectPronunciations.from_local_json_folder('../data/enwiktionary/module-zh-data-json/nan-pron')
missing_hokkien_prons = iter(missing_philippine_hokkien_words_generator(synonyms, h))
today = datetime.today().strftime("%Y%m%d")
lines_to_textfile(f'../data/enwiktionary/words-search/missing-hokkien.{today}.txt', missing_hokkien_prons)
lines_to_textfile(f'../data/enwiktionary/words-search/missing-mandarin.{today}.txt', missing_mandarin_prons)
|
4,704 | 987579da6b7ae208a66e375e0c9eca32b97199c5 | import json
from django import template
from django.core.serializers.json import DjangoJSONEncoder
from django.utils.safestring import mark_safe
register = template.Library()
@register.filter
def jsonify(object):
return mark_safe(json.dumps(object, cls=DjangoJSONEncoder))
@register.simple_tag
def get_crop_url(crop, width=None, scale=1):
if width:
return crop.url_at_width(width * scale)
else:
return crop.url_at_width(crop.width * scale)
@register.assignment_tag
def get_available_crop_scales(crop, width):
return crop.available_scales(width=width)
|
4,705 | ee2cf6c472fa955ba3718bf3a3f60b66811b4907 | import logging
from blogofile.cache import bf
github = bf.config.controllers.github
from github2.client import Github
github_api = Github()
config = {
"name": "Github",
"description": "Makes a nice github project listing for the sidebar",
"priority": 95.0,
}
def get_list(user):
"""
Each item in the list has:
name, url, description, forks, watchers, homepage, open_issues
"""
return [g for g in github_api.repos.list(user) if not g.fork]
def run():
github.logger = logging.getLogger(config['name'])
github.repo_list = get_list(github.user)
github.full_repo_list = github_api.repos.list(github.user)
|
4,706 | 6efc7ff304a05dfc5a7bed7d646e5d6ac034ce85 | ''' 단어 수학
시간 : 68ms (~2초), 메모리 : 29200KB (~256MB)
분류 : greedy
'''
import sys
input = sys.stdin.readline
# 입력
N = int(input()) # 단어의 개수
arr = [list(input().strip()) for _ in range(N)]
# 풀이
alphabet = []
for word in arr:
for a in word:
if a not in alphabet:
alphabet.append(a)
value_list = []
for a in alphabet:
value = 0
for word in arr:
if a not in word: # 알파벳 없으면 넘어감
continue
s = ""
for w in word:
s += "1" if w == a else "0"
value += int(s)
value_list.append(value)
value_list.sort(reverse=True) # 내림차순 정렬
answer = 0
value = 9
for s in value_list:
answer += value * s
value -= 1
# 출력
print(answer)
|
4,707 | 15c61dbf51d676b4c339dd4ef86a76696adfc998 |
class MiniMaxSearch(object):
def __init__(self):
self.count = 0
self.explored = set()
def max_value(self, state, a, b):
self.count += 1
value = float('-inf')
if state in self.explored:
return state.evaluate()
if state.terminal():
self.explored.add(state)
return state.evaluate()
for action in state.actions():
result = state.result(action)
if result in self.explored:
return state.evaluate()
value = max(value, self.min_value(result, a, b))
self.explored.add(result)
if value >= b:
return value
else:
a = max(a, value)
return value
def min_value(self, state, a, b):
self.count += 1
value = float('inf')
if state in self.explored:
return state.evaluate()
if state.terminal():
self.explored.add(state)
return state.evaluate()
for action in state.actions():
result = state.result(action)
if result in self.explored:
return state.evaluate()
value = min(value, self.max_value(result, a, b))
self.explored.add(result)
if value <= a:
return value
else:
b = min(b, value)
return value
def decide_min(self, state):
self.count = 0
best = self.max_value(state, float('-inf'), float('inf'))
for action in state.actions():
if best == self.min_value(state.result(action), float('-inf'), float('inf')):
print self.count
return action
|
4,708 | 791935f63f7a0ab2755ad33369d2afa8c10dffbb | #! /usr/bin/env python
import roslib
roslib.load_manifest('learning_tf')
import rospy
import actionlib
from geometry_msgs.msg import Twist
from turtlesim.msg import Pose
from goal.msg import moveAction, moveGoal
if __name__ == '__main__':
rospy.init_node('move_client')
client = actionlib.SimpleActionClient('moveTo', turtlesim_)
client.wait_for_server()
goal = DoDishesGoal()
# Fill in the goal here
client.send_goal(goal)
client.wait_for_result(rospy.Duration.from_sec(5.0)) |
4,709 | 18f9e55b62b30ce8c9d4a57cd9c159543a738770 | from flask import Flask,render_template, redirect, url_for,request, jsonify, abort,request
from flask_sqlalchemy import SQLAlchemy
from src.flaskbasic import *
from src.flaskbasic.form import StudentForm
from src.flaskbasic.models import Student
import sys
import logging
# logging.basicConfig(filename='app.log', filemode='w', format='%(asctime)s - %(levelname)s - %(message)s',datefmt='%d-%b-%y %H:%M:%S')
_logger_adding = logging.getLogger('Adding results')
_logger_getting = logging.getLogger('Get results')
_logger_update = logging.getLogger('Update results')
_logger_delete = logging.getLogger('Delete results')
# class Student(db.Model):
# id = db.Column(db.Integer, primary_key=True)
# name = db.Column(db.String(50), nullable= False)
# physics = db.Column(db.Integer)
# maths = db.Column(db.Integer)
# chemistry = db.Column(db.Integer)
@application.route('/', methods=['GET','POST'])
def add_results():
form = StudentForm()
_logger_adding.warning("Inside Add Results function")
_logger_adding.warning("Student form waiting for Input")
if form.validate_on_submit():
_logger_adding.warning("When form is submitted with data")
student = Student(name=form.name.data, physics=form.physics.data, maths=form.maths.data,chemistry=form.chemistry.data,)
_logger_adding.warning("Student: {} , physics: {} , maths: {}, chemistry: {}".format(form.name.data,form.physics.data,form.maths.data,form.chemistry.data))
db.session.add(student)
_logger_adding.warning('student results was added to database')
db.session.commit()
_logger_adding.warning("database commit")
return redirect(url_for("add_results"))
else:
return render_template('home.html', form=form)
@application.route('/results', methods=['GET','POST'])
def get_results():
_logger_getting.warning('retrieving all student results')
data = Student.query.all()
_logger_getting.warning('the students results have been collected for {}'.format(data))
return render_template('results.html', data = data)
@application.route('/edit_results/<int:student_id>', methods=['GET','POST'])
def edit_student(student_id):
form = StudentForm()
data = Student.query.get_or_404(student_id)
return render_template('edit_results.html',data=data)
@application.route('/edit_results/<int:student_id>/update_results',methods=['GET','PUT','POST'])
def update_results(student_id):
student_data = Student.query.get_or_404(student_id)
form = StudentForm()
if form.validate_on_submit():
student_data.name = form.name.data
student_data.physics = form.physics.data
student_data.maths = form.maths.data
student_data.chemistry = form.chemistry.data
db.session.commit()
return redirect(url_for('edit_student', student_id=student_data.id))
elif request.method == 'GET':
form.name.data = student_data.name
form.physics.data = student_data.physics
form.maths.data = student_data.maths
form.chemistry.data = student_data.chemistry
# return render_template('edit_results.html', student_data=student_data)
return render_template('update_page.html',form=form)
@application.route("/edit_results/<int:student_id>/delete", methods=['GET'])
def delete_post(student_id):
if request.method == 'GET':
student_results = Student.query.get_or_404(student_id)
db.session.delete(student_results)
db.session.commit()
return redirect(url_for('get_results'))
# @application.route('/results/<int:indexId>/update_results', methods=['PUT'])
# def update_results(indexId):
# _logger_update.warning("Inside Update function")
# student = Student.query.filter_by(id = indexId).first()
# if not student:
# _logger_update.warning("No Students in database")
# return render_template('home.html',form=form)
# student.name = request.json['name']
# student.physics = request.json.get('physics', "")
# student.maths = request.json.get('maths', "")
# student.chemistry = request.json.get('chemistry', "")
# _logger_update.warning("The updated results are Student Name: {}, Physics: {}, Maths: {}, Chemistry: {}".format(student.name,student.physics,student.maths,student.chemistry))
# db.session.commit()
# return jsonify({'student':'Pass'})
@application.route('/results/<int:indexId>', methods=['DELETE'])
def delete_student(indexId):
_logger_delete.warning("Inside Delete function")
student = Student.query.filter_by(id = indexId).first()
if not student:
_logger_delete.warning("No Students in database")
return jsonify({'message':'No user found'})
db.session.delete(student)
_logger_delete.warning("Deleted Student {} and commit to database".format(student))
db.session.commit()
return jsonify({'message':'Student found and Deleted'})
|
4,710 | 9ca5c052db43c1d8b0cafa18038b3ebcd80067f7 | import json
import os
import ssl
from ldap3 import Server, Connection, Tls, SUBTREE, ALL
# Include root CA certificate path if you use a self signed AD certificate
SSL_CERT_PATH = "path/to/cert.pem"
# Include the FQDN of your Domain Controller here
FQDN = "ad.example.com"
# Search base is the CN of the container where your users live
search_base='OU=Sites,DC=ad,DC=example,DC=com'
def deprovision_AD(email):
memberOf_list = []
ad_info = get_secret("TS/Active-Directory-Offboarding-Info")
# TODO: Get the into from the secret above and turn into env variables instead
ad_info_dict = json.loads(ad_info)
# Binding to AD with latest form of TLS available
tls_configuration = Tls(ca_certs_file=SSL_CERT_PATH, version=ssl.PROTOCOL_TLS)
server = Server(FQDN, use_ssl=True, tls=tls_configuration)
conn = Connection(server, ad_info_dict["sa_username_dn"], ad_info_dict["sa_password"], auto_bind=True,
raise_exceptions=True)
# Find user in AD based off of 'mail' attribute
search_filter = "(&(objectClass=user)(mail={}))".format(email)
entry_generator = conn.extend.standard.paged_search(search_base=search_base,
search_filter=search_filter,
search_scope=SUBTREE,
attributes=['memberOf'],
paged_size=5,
generator=True)
for entry in entry_generator:
dn = entry['dn']
relative_dn = dn.split(',')[0]
groups = entry['raw_attributes']['memberOf']
for group in groups:
group_str = str(group)
memberOf_list.append(group_str[2:-1])
# There is a comment before each offboarding task. Comment out the ones you'd like to skip
try:
# Loop through groups and remove user from those groups
for group in memberOf_list:
conn.extend.microsoft.remove_members_from_groups(dn, group)
# Add user to security group
conn.extend.microsoft.add_members_to_groups(dn, "<dn of new group>")
# Disable account
conn.modify(dn, changes={'userAccountControl': (2, '514')})
# Move to different OU
conn.modify_dn(dn=dn, relative_dn=relative_dn, new_superior="<dn of new OU>")
# Delete account
## TODO: Figure out the command to delete the AD account
# Close connection
conn.unbind()
return "Success"
except NameError:
return "A user with that email address does not exist inside Active Directory"
def __main__():
# TODO: Figure out how to populate this as an env
email = input("Please input the departing user's email address: ")
ad_result = deprovision_AD(email)
print(ad_result)
|
4,711 | ecc001394c1f3bba78559cba7eeb216dd3a942d8 | #(C)Inspire Search 2020/5/31 Coded by Tsubasa Kato (@_stingraze)
#Last edited on 2020/6/1 11:36AM JST
import sys
import spacy
import re
#gets query from argv[1]
text = sys.argv[1]
nlp = spacy.load('en_core_web_sm')
doc = nlp(text)
ahref = "<a href=\""
ahref2 = "\"\>"
#arrays for storing subject and object types
subj_array = []
obj_array = []
for d in doc:
#print((d.text, d.pos_, d.dep_))
word = d.text
pos = d.pos_
dep = d.dep_
#If it matches subject, do this
if re.search(r'subj', dep):
#URL to SuperAI Search
word2 = ahref + 'http://www.superai.online/solr/search.php?query='+ word + ahref2 + word + '</a>'
subj_array.append(word)
print (word2)
print (pos)
print (dep)
#If it matches object, do this
if re.search(r'obj', dep):
#URL to SuperAI Search
word2 = ahref + 'http://www.superai.online/solr/search.php?query='+ word + ahref2 + word + '</a>'
obj_array.append(word)
print (word2)
print (pos)
print (dep)
#Sorts both arrays
#ToDo & Note to self:
#Study more of sorting so I can visualize this as table etc.
subj_array.sort()
obj_array.sort()
for subj in subj_array:
print (subj)
for obj in obj_array:
print (obj)
|
4,712 | 36e5b0f40b8016f39120f839766db0ac518c9bed | # Author: Sam Erickson
# Date: 2/23/2016
#
# Program Description: This program gives the integer coefficients x,y to the
# equation ax+by=gcd(a,b) given by the extended Euclidean Algorithm.
def extendedEuclid(a,b):
"""
Preconditions - a and b are both positive integers.
Posconditions - The equation for ax+by=gcd(a,b) has been returned where
x and y are solved.
Input - a : int, b : int
Output - ax+by=gcd(a,b) : string
"""
b,a=max(a,b),min(a,b)
# Format of euclidList is for back-substitution
euclidList=[[b%a,1,b,-1*(b//a),a]]
while b%a>0:
b,a=a,b%a
euclidList.append([b%a,1,b,-1*(b//a),a])
if len(euclidList)>1:
euclidList.pop()
euclidList=euclidList[::-1]
for i in range(1,len(euclidList)):
euclidList[i][1]*=euclidList[i-1][3]
euclidList[i][3]*=euclidList[i-1][3]
euclidList[i][3]+=euclidList[i-1][1]
expr=euclidList[len(euclidList)-1]
strExpr=str(expr[1])+"*"+str(expr[2])+" + "+str(expr[3])+"*"+str(expr[4]) \
+" = "+str(euclidList[0][0])
return strExpr
|
4,713 | 3ce9c0aeb6b4e575fbb3fced52a86a1dcec44706 | import datetime
from collections import defaultdict
from django.db.models import Prefetch
from urnik.models import Termin, Rezervacija, Ucilnica, DNEVI, MIN_URA, MAX_URA, Srecanje, Semester, RezervacijaQuerySet
class ProsteUcilniceTermin(Termin):
HUE_PRAZEN = 120 # zelena
HUE_POLN = 0 # rdeca
def __init__(self, dan, ura, ustrezne_ucilnice, zasedene_ucilnice, rezervirane_ucilnice):
super().__init__(dan, ura)
zasedene_pks = {u.pk for u in zasedene_ucilnice}
rezervirane_pks = {u.pk for u in rezervirane_ucilnice}
# Vse ustrezne proste ucilnice.
self.proste = [u for u in ustrezne_ucilnice if u.pk not in zasedene_pks and u.pk not in rezervirane_pks]
# Vse ustrezne ucilnice, ki so pa zasedene, ker je tam stalno srečanje. Vrednosti so razlogi za zasedenost.
self.zasedene = [(u, r) for u, r in zasedene_ucilnice.items() if u.pk not in rezervirane_pks]
# Vse ustrezne ucilnice, ki so pa zasedene, ker so rezervirane. Vrednosti so razlogi za zasedenost.
self.rezervirane = list(rezervirane_ucilnice.items())
# ucilnice, ki bodo prikazane, skupaj s stanjem in razlogom
self.prikazane_ucilnice = []
def filtriraj_ucilnice(self, pokazi_zasedene):
vse = [('prosta', u, None) for u in self.proste]
if pokazi_zasedene:
vse.extend([('rezervirana', u, r) for u, r in self.rezervirane])
vse.extend([('zasedena', u, r) for u, r in self.zasedene])
self.prikazane_ucilnice = sorted(vse, key=lambda x: x[1])
def hue(self):
h = self.HUE_PRAZEN if self.proste else self.HUE_POLN
return "{:.0f}".format(h)
class ProsteUcilnice(object):
"""Zgradi strukturo, ki omogoca hitro iskanje prekrivanj za dane ucilnice glede na uro in dan v tednu."""
def __init__(self, ucilnice):
self.ucilnice = set(ucilnice)
self.zasedenost_ucilnic = defaultdict(dict)
self.rezerviranost_ucilnic = defaultdict(dict)
def dodaj_srecanja_semestra(self, semester, teden=None):
for srecanje in semester.srecanja.select_related('ucilnica', 'predmet').prefetch_related('ucitelji'
).filter(ucilnica__in=[u.pk for u in self.ucilnice]).exclude(ura__isnull=True):
if teden is None or semester.od <= teden + datetime.timedelta(days=srecanje.dan-1) <= semester.do:
for i in range(srecanje.trajanje):
self.zasedenost_ucilnic[srecanje.dan, srecanje.ura + i][srecanje.ucilnica] = srecanje
def upostevaj_rezervacije_za_teden(self, teden):
self.upostevaj_rezervacije(Rezervacija.objects.v_tednu(teden))
def upostevaj_rezervacije(self, rezervacije):
for rezervacija in rezervacije.prefetch_related(
Prefetch(
'ucilnice',
queryset=Ucilnica.objects.filter(pk__in=[u.pk for u in self.ucilnice]),
to_attr='ustrezne_ucilnice'),
'osebe'):
for ucilnica in rezervacija.ustrezne_ucilnice:
for dan in rezervacija.dnevi():
for ura in range(rezervacija.od, rezervacija.do):
self.rezerviranost_ucilnic[dan.isoweekday(), ura][ucilnica] = rezervacija
def dobi_termine(self):
termini = [ProsteUcilniceTermin(d, u, self.ucilnice, self.zasedenost_ucilnic[d, u],
self.rezerviranost_ucilnic[d, u])
for d in range(1, len(DNEVI) + 1) for u in range(MIN_URA, MAX_URA)]
return termini
class Konflikt(object):
def __init__(self):
self.srecanja = []
self.rezervacije = []
@property
def st_konfliktov(self):
return len(self.srecanja) + len(self.rezervacije)
def __bool__(self):
return self.st_konfliktov > 0
def __str__(self):
return "Konflikti:\n rezervacije:\n{}\n predmeti:\n{}".format("\n ".join(map(str, self.rezervacije)),
"\n ".join(map(str, self.srecanja)))
class IskalnikKonfliktov(object):
"""Zgradi strukturo, ki omogoca hitro iskanje prekrivanj glede na datum in učilnico."""
def __init__(self, ucilnice, min_datum, max_datum):
self.ucilnice = set(ucilnice)
self.min_datum = min_datum
self.max_datum = max_datum
self.zasedenost_ucilnic = defaultdict(list)
self.rezerviranost_ucilnic = defaultdict(list)
def dodaj_srecanja(self):
self.dodaj_srecanja_semestrov(Semester.objects.v_obdobju(self.min_datum, self.max_datum))
def dodaj_srecanja_semestrov(self, semestri):
for s in Srecanje.objects.filter(semester__in=semestri, ucilnica__in=self.ucilnice
).exclude(ura__isnull=True).select_related('semester', 'predmet', 'ucilnica'):
for d in s.dnevi_med(self.min_datum, self.max_datum):
self.zasedenost_ucilnic[s.ucilnica_id, d].append(s)
def dodaj_rezervacije(self, rezervacije):
"""Queryset rezervacije mora biti prefetchan tako, da obstaja atribut seznam_ucilnic"""
for r in rezervacije:
for u in r.seznam_ucilnic:
for d in r.dnevi_med(self.min_datum, self.max_datum):
self.rezerviranost_ucilnic[u.pk, d].append(r)
@staticmethod
def za_rezervacije(rezervacije: RezervacijaQuerySet):
"""Queryset rezervacije mora biti prefetchan tako, da obstaja atribut seznam_ucilnic"""
min_datum = datetime.date.max
max_datum = datetime.date.min
ucilnice = set()
for r in rezervacije:
if r.zacetek < min_datum:
min_datum = r.zacetek
if r.konec > max_datum:
max_datum = r.konec
ucilnice.update(r.seznam_ucilnic)
iskalnik = IskalnikKonfliktov(ucilnice, min_datum, max_datum)
iskalnik.dodaj_srecanja()
iskalnik.dodaj_rezervacije(rezervacije)
return iskalnik
def konflikti_z_rezervacijo(self, r: Rezervacija):
if not hasattr(r, 'seznam_ucilnic'):
r.seznam_ucilnic = r.ucilnice.all()
for u in r.seznam_ucilnic:
for d in r.dnevi():
k = self.konflikti(u, d, r.od, r.do, r)
if k:
yield u, d, k
def konflikti(self, ucilnica, datum, od, do, ignore=None):
"""Vrne konflikte z dejavnostjo, ki bi v ucilnici `ucilnica` potekala dne `datum` od ure `od` do `do`."""
konflikti = Konflikt()
if ucilnica not in self.ucilnice:
raise ValueError("Struktura iskanja ni bila pripravljena za iskanje konfliktov v učilnici {}".format(ucilnica))
if not (self.min_datum <= datum <= self.max_datum):
raise ValueError("Struktura iskanja ni bila pripravljena za iskanje konfliktov dne {}".format(datum))
for s in self.zasedenost_ucilnic[ucilnica.pk, datum]:
if s != ignore and s.se_po_urah_prekriva(od, do):
konflikti.srecanja.append(s)
for r in self.rezerviranost_ucilnic[ucilnica.pk, datum]:
if r != ignore and r.se_po_urah_prekriva(od, do):
konflikti.rezervacije.append(r)
return konflikti
|
4,714 | 25532102cc36da139a22a61d226dff613f06ab31 | import time, json, glob, os, enum
import serial
import threading
import responder
# 環境によって書き換える変数
isMCUConnected = True # マイコンがUSBポートに接続されているか
SERIALPATH_RASPI = '/dev/ttyACM0' # ラズパイのシリアルポート
SERIALPATH_WIN = 'COM16' # Windowsのシリアルポート
# 各種定数
PIN_SERVO1 = 12 # GPIO12 PWM0 Pin
PIN_SERVO2 = 13 # GPIO13 PWM1 Pin
PIN_LED = 16 # GPIO25 LED Pin
SERVO_MIN = 115000 # サーボの最小duty
SERVO_MAX = 26000 # サーボの最大duty
SPEED_MAX = 30 # 速度の最大値 [km/h]
IMM_MAX = 7.5 # 電流の最大値(プラスとマイナス両方に振れる) [A]
RXBUF0 = open("rxdata.json", "r").read().replace("\n","") # シリアル通信しないときにダミーで読み込む受信結果
class Meters():
def __init__(self):
self.pi = None # pigpioオブジェクト
# pigpioのセットアップ
if os.name == 'posix': # ラズパイで動かした場合にはpigpioをインポート
import pigpio
self.pi = pigpio.pi()
self.pi.set_mode(PIN_SERVO1, pigpio.OUTPUT)
self.pi.set_mode(PIN_SERVO2, pigpio.OUTPUT)
self.pi.set_mode(PIN_LED, pigpio.OUTPUT)
def indicate(self, kmh=None, amp=None, led=None):
if self.pi:
if kmh != None:
kmh = SPEED_MAX if (kmh > SPEED_MAX) else kmh # constrain upbound and lowbound
kmh = 0 if (kmh < 0) else kmh
self.pi.hardware_PWM(PIN_SERVO1, 50, int(SERVO_MIN + kmh/SPEED_MAX * (SERVO_MAX - SERVO_MIN))) # 速度計
if amp != None:
amp = IMM_MAX if (amp > IMM_MAX) else amp
amp = -IMM_MAX if (amp < -IMM_MAX) else amp
self.pi.hardware_PWM(PIN_SERVO2, 50, int(SERVO_MIN + 0.5*(1 + amp/IMM_MAX) * (SERVO_MAX - SERVO_MIN))) # 電流計
if led != None:
self.pi.write(PIN_LED, led)
class SerialCom():
def __init__(self, meterObj=None):
self.ser = None # シリアル通信オブジェクト
self.rxdata = {} # 受信したデータを入れておく辞書型変数。外部からこれにアクセスすることでデータを取り出す
self.flagrx = True # Trueの間シリアル通信を実行
self.t1 = None # シリアルの受信を行うThreadingオブジェクト
self.METERS = meterObj # 速度を表示するMetersオブジェクトへの参照をセット # Metersオブジェクトへの参照
# MCUが接続されていればシリアルポートをオープン
print("[serialcom.__init__] open serial port")
if isMCUConnected:
try:
# OSによってポートを切り替え
if os.name == 'posix':
portpath = SERIALPATH_RASPI
elif os.name == 'nt':
portpath = SERIALPATH_WIN
# ポートを開く
self.ser = serial.Serial(portpath, 115200, timeout=None)
# ポートオープン失敗時
except serial.serialutil.SerialException:
print("[serialcom.__init__] failed to open port")
self.rxdata = {"serialfailed":1}
else:
print("[serialcom.__init__] port wasn't opened because isMCUConnected==False.")
def recieve_loop(self):
# シリアルポートから受信を行う無限ループ
if self.ser:
print("[serialcom.recieve_loop] start recieving")
self.ser.readline() # 1回目は不完全なデータなので空読み
while self.flagrx:
rxbuf = self.ser.readline().decode('ascii','ignore')
print(rxbuf)
try:
self.rxdata = json.loads(rxbuf) # JSON形式へデコード
self.rxdata['serialfailed'] = 0
if self.METERS: # メーターに表示
self.METERS.indicate(self.rxdata['speed'], self.rxdata['Imm'], self.rxdata['invstate'])
except json.decoder.JSONDecodeError:
print("[serialcom.recieve_loop] when decoding, error has occured")
self.rxdata['serialfailed'] = 1
self.ser.close()
# シリアルポートが開いていないときは、 rxdataとしてRXBUF0を代入する
else:
print("[serialcom.recieve_loop] Because MCU is not connected, RXBUF0 is set to rxdata.")
self.rxdata = json.loads(RXBUF0)
self.rxdata['serialfailed'] = 0
while self.flagrx:
time.sleep(0.5)
print("[serialcom.recieve_loop] end recieving")
def recieve_start(self):
if not(self.t1):
self.flagrx = True
self.t1 = threading.Thread(target=self.recieve_loop, daemon=True)
self.t1.start()
def recieve_end(self):
if self.t1:
self.flagrx = False
self.t1.join()
del self.t1
def send(self, txbuf):
if self.ser:
print(bytes(txbuf,"ascii"))
return self.ser.write(bytes(txbuf,"ascii"))
def main():
class Mode(enum.IntEnum):
DEMO = 0
EBIKE = 1
ASSIST = 2
mode = Mode.DEMO # 動作モード
# メーターとシリアル通信のインスタンスを生成、初期化
meters = Meters()
meters.indicate(0, 0, 0)
serialcom = SerialCom(meters)
serialcom.recieve_start()
# サーバを立てる
api = responder.API()
@api.route("/reset")
def reset(req,resp):
serialcom.send("invoff\n")
@api.route("/info")
def get_info(req,resp):
resp.headers = {"Content-Type": "application/json; charset=utf-8"}
resp.media = serialcom.rxdata
@api.route("/cardata")
def get_cardata(req,resp):
text = open("static/cars/cardata.json", "r", encoding='utf-8').read()
resp.headers = {"Content-Type": "application/json; charset=utf-8"}
resp.text = text
@api.route("/command")
async def post_command(req,resp):
data = await req.media()
print(data)
if 'carno' in data:
serialcom.send("invoff\n")
time.sleep(0.5)
while serialcom.rxdata['invstate'] == 1:
time.sleep(0.1)
serialcom.send(f"carno={data['carno']}\n")
if 'mode' in data:
serialcom.send("invoff\n")
time.sleep(0.5)
while serialcom.rxdata['invstate'] == 1:
time.sleep(0.1)
serialcom.send(f"mode={data['mode']}\n")
if 'notch' in data:
if data['notch'] == 'P':
serialcom.send("P\n")
elif data['notch'] == 'N':
serialcom.send("N\n")
elif data['notch'] == 'B':
serialcom.send("B\n")
else:
serialcom.send(f"notch={data['notch']}\n")
if 'invoff' in data:
serialcom.send("invoff\n")
@api.route("/")
def hello_html(req,resp):
resp.html = api.template('index.html')
# web server start
api.run(address='0.0.0.0', port=5042) # 0.0.0.0にすると外部からアクセスできる
if __name__ == '__main__':
main()
|
4,715 | 6abc8b97117257e16da1f7b730b09ee0f7bd4c6e | import datetime
import traceback
import sys
import os
def getErrorReport():
errorReport = ErrorReport()
return errorReport
class ErrorReport():
def __init__(self):
return
def startLog(self):
timestamp = str(datetime.datetime.now())
fileName = 'Log_'+timestamp+'.txt.'
self.logFile = open(fileName,'w')
def endLog(self):
self.logFile.close()
def writeError(self):
traceback.print_exc(file=self.logFile)
self.logFile.write('\n')
self.logFile.flush()
os.fsync(self.logFile)
def writeMessage(self, message=''):
self.logFile.write(message)
self.logFile.write('\n\n')
self.logFile.flush()
os.fsync(self.logFile)
|
4,716 | b6529dc77d89cdf2d49c689dc583b78c94e31c4d | from django import forms
class CriteriaForm(forms.Form):
query = forms.CharField(widget=forms.Textarea)
|
4,717 | c9079f27e3c0aca09f99fa381af5f35576b4be75 | from __future__ import unicode_literals
import json
class BaseModel(object):
def get_id(self):
return unicode(self.id)
@classmethod
def resolve(cls, id_):
return cls.query.filter_by(id=id_).first()
@classmethod
def resolve_all(cls):
return cls.query.all() |
4,718 | 6822a0a194e8b401fecfed2b617ddd5489302389 | import numpy as np
# Read in training data and labels
# Some useful parsing functions
# male/female -> 0/1
def parseSexLabel(string):
if (string.startswith('male')):
return 0
if (string.startswith('female')):
return 1
print("ERROR parsing sex from " + string)
# child/teen/adult/senior -> 0/1/2/3
def parseAgeLabel(string):
if (string.startswith('child')):
return 0
if (string.startswith('teen')):
return 1
if (string.startswith('adult')):
return 2
if (string.startswith('senior')):
return 3
print("ERROR parsing age from " + string)
# serious/smiling -> 0/1
def parseExpLabel(string):
if (string.startswith('serious')):
return 0
if (string.startswith('smiling') or string.startswith('funny')):
return 1
print("ERROR parsing expression from " + string)
# Count number of training instances
numTraining = 0
for line in open ("MITFaces/faceDR"):
numTraining += 1
dimensions = 128*128
trainingFaces = np.zeros([numTraining,dimensions])
trainingSexLabels = np.zeros(numTraining) # Sex - 0 = male; 1 = female
trainingAgeLabels = np.zeros(numTraining) # Age - 0 = child; 1 = teen; 2 = male
trainingExpLabels = np.zeros(numTraining) # Expression - 0 = serious; 1 = smiling
index = 0
for line in open ("MITFaces/faceDR"):
# Parse the label data
parts = line.split()
trainingSexLabels[index] = parseSexLabel(parts[2])
trainingAgeLabels[index] = parseAgeLabel(parts[4])
trainingExpLabels[index] = parseExpLabel(parts[8])
# Read in the face
fileName = "MITFaces/rawdata/" + parts[0]
fileIn = open(fileName, 'rb')
trainingFaces[index,:] = np.fromfile(fileIn, dtype=np.uint8,count=dimensions)/255.0
fileIn.close()
# And move along
index += 1
# Count number of validation/testing instances
numValidation = 0
numTesting = 0
# Assume they're all Validation
for line in open ("MITFaces/faceDS"):
numValidation += 1
# And make half of them testing
numTesting = int(numValidation/2)
numValidation -= numTesting
validationFaces = np.zeros([numValidation,dimensions])
validationSexLabels = np.zeros(numValidation) # Sex - 0 = male; 1 = female
validationAgeLabels = np.zeros(numValidation) # Age - 0 = child; 1 = teen; 2 = male
validationExpLabels = np.zeros(numValidation) # Expression - 0 = serious; 1 = smiling
testingFaces = np.zeros([numTesting,dimensions])
testingSexLabels = np.zeros(numTesting) # Sex - 0 = male; 1 = female
testingAgeLabels = np.zeros(numTesting) # Age - 0 = child; 1 = teen; 2 = male
testingExpLabels = np.zeros(numTesting) # Expression - 0 = serious; 1 = smiling
index = 0
for line in open ("MITFaces/faceDS"):
# Parse the label data
parts = line.split()
if (index < numTesting):
testingSexLabels[index] = parseSexLabel(parts[2])
testingAgeLabels[index] = parseAgeLabel(parts[4])
testingExpLabels[index] = parseExpLabel(parts[8])
# Read in the face
fileName = "MITFaces/rawdata/" + parts[0]
fileIn = open(fileName, 'rb')
testingFaces[index,:] = np.fromfile(fileIn, dtype=np.uint8,count=dimensions)/255.0
fileIn.close()
else:
vIndex = index - numTesting
validationSexLabels[vIndex] = parseSexLabel(parts[2])
validationAgeLabels[vIndex] = parseAgeLabel(parts[4])
validationExpLabels[vIndex] = parseExpLabel(parts[8])
# Read in the face
fileName = "MITFaces/rawdata/" + parts[0]
fileIn = open(fileName, 'rb')
validationFaces[vIndex,:] = np.fromfile(fileIn, dtype=np.uint8,count=dimensions)/255.0
fileIn.close()
# And move along
index += 1
'''Trains a simple convnet on the MNIST dataset.
Gets to 99.25% test accuracy after 12 epochs
(there is still a lot of margin for parameter tuning).
16 seconds per epoch on a GRID K520 GPU.
'''
import tensorflow as tf
from tensorflow import keras
batch_size = 128
epochs = 12
x_train = trainingFaces
y_train = trainingSexLabels
x_test = testingFaces
y_test = testingSexLabels
y_train = keras.utils.to_categorical(y_train, num_classes=2)
y_test = keras.utils.to_categorical(y_test, num_classes=2)
model = keras.models.Sequential()
model.add(keras.layers.Dense(32, activation='relu'))
model.add(keras.layers.Conv2D(16, kernel_size=(1,1),activation='relu'))
model.add(keras.layers.Dropout(0.5))
model.add(keras.layers.Dense(2, activation='softmax'))
model.compile(loss=keras.losses.categorical_crossentropy,
optimizer=keras.optimizers.Adadelta(),
metrics=['accuracy'])
model.fit(x_train, y_train,
epochs=epochs,
verbose=1)
score = model.evaluate(x_test, y_test, verbose=0)
print('Test loss:', score[0])
print('Test accuracy:', score[1]) |
4,719 | e7bb5e9a91ec6a1644ddecd52a676c8136087941 | # Generated by Django 3.0.6 on 2020-06-23 10:58
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('printer', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='printers_stat',
name='type_printers',
),
]
|
4,720 | 14cac4f11830511923ee1ce0d49ec579aec016fd | #!/usr/bin/python
# -*- coding:utf-8 -*-
import epd2in7
import time
from PIL import Image,ImageDraw,ImageFont
import traceback
try:
epd = epd2in7.EPD()
epd.init()
epd.Clear(0xFF)
time.sleep(2)
epd.sleep()
except:
print 'traceback.format_exc():\n%s' % traceback.format_exc()
exit()
|
4,721 | 6ac13665c2348bf251482f250c0fcc1fc1a8af75 | import torch
from torch import nn
import torch.nn.functional as F
import numpy as np
from config_pos import config
from backbone.resnet50 import ResNet50
from backbone.fpn import FPN
from module.rpn import RPN
from layers.pooler import roi_pooler
from det_oprs.bbox_opr import bbox_transform_inv_opr
from det_oprs.bbox_opr import bbox_transform_inv_opr_v2
from det_oprs.fpn_roi_target import fpn_roi_target
from det_oprs.loss_opr import softmax_loss, smooth_l1_loss
from det_oprs.utils import get_padded_tensor
class Network(nn.Module):
def __init__(self):
super().__init__()
self.resnet50 = ResNet50(config.backbone_freeze_at, False)
self.FPN = FPN(self.resnet50, 2, 6)
self.RPN = RPN(config.rpn_channel)
self.RCNN = RCNN()
def forward(self, image, im_info, gt_boxes=None):
image = (image - torch.tensor(config.image_mean[None, :, None, None]).type_as(image)) / (
torch.tensor(config.image_std[None, :, None, None]).type_as(image))
image = get_padded_tensor(image, 64)
if self.training:
return self._forward_train(image, im_info, gt_boxes)
else:
return self._forward_test(image, im_info)
def _forward_train(self, image, im_info, gt_boxes):
loss_dict = {}
fpn_fms = self.FPN(image)
# fpn_fms stride: 64,32,16,8,4, p6->p2
rpn_rois, loss_dict_rpn = self.RPN(fpn_fms, im_info, gt_boxes)
rcnn_rois, rcnn_labels, rcnn_bbox_targets = fpn_roi_target(
rpn_rois, im_info, gt_boxes, top_k=1)
loss_dict_rcnn = self.RCNN(fpn_fms, rcnn_rois,
rcnn_labels, rcnn_bbox_targets)
loss_dict.update(loss_dict_rpn)
loss_dict.update(loss_dict_rcnn)
return loss_dict
def _forward_test(self, image, im_info):
fpn_fms = self.FPN(image)
rpn_rois = self.RPN(fpn_fms, im_info)
pred_bbox, num_classes = self.RCNN(fpn_fms, rpn_rois)
return pred_bbox.cpu().detach(), num_classes
class RCNN(nn.Module):
def __init__(self):
super().__init__()
# roi head
self.fc1 = nn.Linear(256*7*7, 1024)
self.fc2 = nn.Linear(1024, 1024)
for l in [self.fc1, self.fc2]:
nn.init.kaiming_uniform_(l.weight, a=1)
nn.init.constant_(l.bias, 0)
# box predictor
self.pred_cls = nn.Linear(1024, config.num_classes)
self.pred_delta = nn.Linear(1024, config.num_classes * 4)
for l in [self.pred_cls]:
nn.init.normal_(l.weight, std=0.01)
nn.init.constant_(l.bias, 0)
for l in [self.pred_delta]:
nn.init.normal_(l.weight, std=0.001)
nn.init.constant_(l.bias, 0)
def forward(self, fpn_fms, rcnn_rois, labels=None, bbox_targets=None):
# input p2-p5
fpn_fms = fpn_fms[1:][::-1]
stride = [4, 8, 16, 32]
pool_features = roi_pooler(fpn_fms, rcnn_rois, stride, (7, 7), "ROIAlignV2")
flatten_feature = torch.flatten(pool_features, start_dim=1)
flatten_feature = F.relu_(self.fc1(flatten_feature))
flatten_feature = F.relu_(self.fc2(flatten_feature))
pred_cls = self.pred_cls(flatten_feature)
pred_delta = self.pred_delta(flatten_feature)
if self.training:
# loss for regression
labels = labels.long().flatten()
fg_masks = labels > 0
valid_masks = labels >= 0
# multi class
pred_delta = pred_delta.reshape(-1, config.num_classes, 4)
fg_gt_classes = labels[fg_masks]
pred_delta = pred_delta[fg_masks, fg_gt_classes, :]
localization_loss = smooth_l1_loss(
# pred_regression,
pred_delta,
bbox_targets[fg_masks],
config.rcnn_smooth_l1_beta)
# loss for classification
objectness_loss = softmax_loss(pred_cls, labels, num_classes=config.num_classes)
objectness_loss = objectness_loss * valid_masks
normalizer = 1.0 / valid_masks.sum().item()
loss_rcnn_loc = localization_loss.sum() * normalizer
loss_rcnn_cls = objectness_loss.sum() * normalizer
loss_dict = {}
loss_dict['loss_rcnn_loc'] = loss_rcnn_loc
loss_dict['loss_rcnn_cls'] = loss_rcnn_cls
return loss_dict
else:
class_num = pred_cls.shape[-1] - 1
tag = torch.arange(class_num).type_as(pred_cls)+1
tag = tag.repeat(pred_cls.shape[0], 1).reshape(-1,1)
pred_scores = F.softmax(pred_cls, dim=-1)[:, 1:].reshape(-1, 1)
pred_delta = pred_delta[:, 4:].reshape(-1, 4)
base_rois = rcnn_rois[:, 1:5].repeat(1, class_num).reshape(-1, 4)
pred_bbox = restore_bbox(base_rois, pred_delta, True)
pred_bbox = torch.cat([pred_bbox, pred_scores, tag], axis=1)
return pred_bbox, class_num
def restore_bbox(rois, deltas, unnormalize=True):
if unnormalize:
std_opr = torch.tensor(config.bbox_normalize_stds[None, :]).type_as(deltas)
mean_opr = torch.tensor(config.bbox_normalize_means[None, :]).type_as(deltas)
deltas = deltas * std_opr
deltas = deltas + mean_opr
pred_bbox = bbox_transform_inv_opr(rois, deltas)
return pred_bbox
|
4,722 | 8271935901896256b860f4e05038763709758296 | ## CreateDGNode.py
# This files creates the boilerplate code for a Dependency Graph Node
import FileCreator
## Class to create Maya DG node plugin files
class DGNodeFileCreator(FileCreator.FileCreator):
## Constructor
def __init__(self):
FileCreator.FileCreator.__init__(self, "DGNodePluginData.json")
self.writePluginDetails()
self.writeClass()
self.writeInitialisation()
## Create a separator for the plugin and then write the node details
def writePluginDetails(self):
# Write a separator for the plugin
self.writeLine("#----------------------------------------------------------")
self.writeLine("# Plugin")
self.writeLine("#----------------------------------------------------------")
self.writeLine()
# write the plugin name
self.writeLine("# Node info")
kPluginNodeName = self.getFromJSON("nodeName", "string")
self.writeLine("kPluginNodeName = " + "\"" + kPluginNodeName + "\"")
kPluginNodeID = self.getFromJSON("nodeID", "string")
self.writeLine("kPluginNodeID = om.MTypeId(" + kPluginNodeID + ")")
self.writeLine()
# write the default attribute values if it is not None, i.e. it is defined
self.writeLine("# Default attribute values")
self.inputAttributes = self.getFromJSON("inputAttributes", "array")
for attr in self.inputAttributes:
if (attr["defaultValue"] != None):
variableName = attr["longName"] + "DefaultValue"
variableValue = attr["defaultValue"]
self.writeLine(variableName + " = " + str(variableValue))
self.writeLine()
## Write the class definition
def writeClass(self):
cDescription = self.getFromJSON("classDescription", "string")
self.writeLine("## " + cDescription)
cName = self.getFromJSON("className", "string")
self.writeLine("class " + cName + "(om.MPxNode):")
self.writeLine("# Define the attributes", 1)
# Write all the input attributes first with the prefix in
for attr in self.inputAttributes:
variableName = "in" + self.capitalise(attr["longName"])
self.writeLine(variableName + " = om.MObject()", 1)
# Write all the output attributes with the prefix out
self.outputAttributes = self.getFromJSON("outputAttributes", "array")
for attr in self.outputAttributes:
variableName = "out" + self.capitalise(attr["longName"])
self.writeLine(variableName + " = om.MObject()", 1)
self.writeLine()
# write the init function
self.writeLine("def __init__(self):", 1)
self.writeLine("om.MPxNode.__init__(self)", 2)
self.writeLine()
# write the compute function
self.writeComputeFunction()
## Write the compute class function
def writeComputeFunction(self):
# write the comments
self.writeLine("## The function that is called when the node is dirty", 1)
self.writeLine("# @param _plug A plug for one of the i/o attributes", 1)
self.writeLine("# @param _dataBlock The data used for the computations", 1)
self.writeLine("def compute(self, _plug, _dataBlock):", 1)
# loop through each output attribute and create an if statement for each one
className = self.getFromJSON("className", "string")
for attr in self.outputAttributes:
self.writeLine("# Check if the plug is the %s attribute" % attr["longName"], 2)
self.writeLine("if (_plug == " + className + ".out" + self.capitalise(attr["longName"]) + "):", 2)
# Get the handles for the attributes
self.writeLine("# Get handles for the attributes", 3)
# Get the input values
for dependency in attr["dependencies"]:
# Check if the dependency is an input attribute
try:
d = [x["longName"] for x in self.inputAttributes if (x["longName"] == dependency or x["shortName"] == dependency)][0]
self.writeLine(d + "DataHandle = _dataBlock.inputValue(" + className + ".in" + self.capitalise(d) + ")", 3)
except:
print "Warning: ", dependency, "is not an input attribute."
self.writeLine(attr["longName"] + "DataHandle = _dataBlock.outputValue(" + className + ".out" + self.capitalise(attr["longName"]) + ")", 3)
self.writeLine()
# Extract the values
self.writeLine("# Get values for the attributes", 3)
for dependency in attr["dependencies"]:
# Check if the dependency is an input attribute
try:
dName = [x["longName"] for x in self.inputAttributes if (x["longName"] == dependency or x["shortName"] == dependency)][0]
dType = [x["type"] for x in self.inputAttributes if (x["longName"] == dependency or x["shortName"] == dependency)][0]
# Check for multiple values, e.g. 2Float, and put the digit at the end of the string
if dType[0].isdigit():
dType = dType[1:] + dType[0]
self.writeLine(dName + "Value = " + dName + "DataHandle.as" + dType + "()", 3)
except:
pass
self.writeLine()
# Perform the desired computation
self.writeLine("# Perform the desired computation here", 3)
self.writeLine("# " + attr["longName"] + "Value =", 3)
self.writeLine()
# Set the output value
self.writeLine("# Set the output value", 3)
self.writeLine(attr["longName"] + "DataHandle.set" + attr["type"] + "(" + attr["longName"] + "Value)", 3)
self.writeLine()
# Mark the output data handle as clean
self.writeLine("# Mark the output data handle as clean", 3)
self.writeLine(attr["longName"] + "DataHandle.setClean()", 3)
self.writeLine()
## Write the plugin initialisation functions
def writeInitialisation(self):
# Write a separator for the plugin initialisation
self.writeLine("#----------------------------------------------------------")
self.writeLine("# Plugin Initialisation")
self.writeLine("#----------------------------------------------------------")
self.writeLine()
# Function to use API 2.0
self.writeLine("## This function tells Maya to use the Python API 2.0")
self.writeLine("def maya_useNewAPI():")
self.writeLine("pass", 1)
self.writeLine("")
# node creator function
self.writeLine("## Create an instance of the node")
self.writeLine("def nodeCreator():")
className = self.getFromJSON("className", "string")
self.writeLine("return " + className + "()", 1)
self.writeLine()
# write the nodeInitializer function
self.writeNodeInitialiser()
# write the load and unload plugin functions
self.writeInitialiseUninitialiseFunctions()
## Write the nodeInitializer function
def writeNodeInitialiser(self):
self.writeLine("## Initialise the node attributes")
self.writeLine("def nodeInitializer():")
# Decide if a numeric function set or a typed function set is needed or both
numericFn = False
typedFn = False
numericTypes = self.getFromJSON("validNumericTypes", "array")
nonNumericTypes = self.getFromJSON("validNonNumericTypes", "array")
for attr in self.inputAttributes + self.outputAttributes:
if attr["type"] in numericTypes:
numericFn = True
break
if attr["type"] in nonNumericTypes:
typedFn = True
# Check if there is a typed function set needed
if typedFn == False:
for attr in self.inputAttributes + self.outputAttributes:
if attr["type"] in nonNumericTypes:
typedFn = True
break
if (numericFn):
self.writeLine("# Create a numeric attribute function set", 1)
self.writeLine("mFnNumericAttribute = om.MFnNumericAttribute()", 1)
if (typedFn):
self.writeLine("# Create a non-numeric attribute function set", 1)
self.writeLine("mFnTypedAttribute = om.MFnTypedAttribute()", 1)
self.writeLine()
className = self.getFromJSON("className", "string")
# Write the input attributes
self.writeLine("# Input node attributes", 1)
for attr in self.inputAttributes:
# Check if the attribute is numeric or non-numeric (typed)
if attr["type"] in numericTypes:
attrType = ["Numeric", "Numeric"]
else:
attrType = ["Typed", ""]
variableName = className + ".in" + self.capitalise(attr["longName"])
fnParameters = "\"" + attr["longName"] + "\", \"" + attr["shortName"] + "\", om.MFn" + attrType[1] + "Data.k" + attr["type"]
if attr["defaultValue"] != None:
fnParameters += ", " + attr["longName"] + "DefaultValue"
self.writeLine(variableName + " = mFn" + attrType[0] + "Attribute.create(" + fnParameters + ")", 1)
self.writeLine("mFn" + attrType[0] + "Attribute.readable = False", 1)
self.writeLine("mFn" + attrType[0] + "Attribute.writable = True", 1)
self.writeLine("mFn" + attrType[0] + "Attribute.storable = True", 1)
if attr["keyable"]:
self.writeLine("mFn" + attrType[0] + "Attribute.keyable = True", 1)
else:
self.writeLine("mFn" + attrType[0] + "Attribute.keyable = False", 1)
if attr["minValue"] != None:
self.writeLine("mFn" + attrType[0] + "Attribute.minValue = " + str(attr["minValue"]), 1)
if attr["maxValue"] != None:
self.writeLine("mFn" + attrType[0] + "Attribute.maxValue = " + str(attr["minValue"]), 1)
self.writeLine()
# Write the output node attributes
self.writeLine("# Output node attributes", 1)
for attr in self.outputAttributes:
# Check if the attribute is numeric or non-numeric (typed)
if attr["type"] in numericTypes:
attrType = ["Numeric", "Numeric"]
else:
attrType = ["Typed", ""]
variableName = className + ".out" + self.capitalise(attr["longName"])
fnParameters = "\"" + attr["longName"] + "\", \"" + attr["shortName"] + "\", om.MFn" + attrType[1] + "Data.k" + attr["type"]
self.writeLine(variableName + " = mFn" + attrType[0] + "Attribute.create(" + fnParameters + ")", 1)
self.writeLine("mFn" + attrType[0] + "Attribute.readable = True", 1)
self.writeLine("mFn" + attrType[0] + "Attribute.writable = False", 1)
self.writeLine("mFn" + attrType[0] + "Attribute.storable = False", 1)
self.writeLine()
# Add the attributes to the class
self.writeLine("# Add the attributes to the class", 1)
for attr in self.inputAttributes:
self.writeLine(className + ".addAttribute(" + className + ".in" + self.capitalise(attr["longName"]) + ")", 1)
for attr in self.outputAttributes:
self.writeLine(className + ".addAttribute(" + className + ".out" + self.capitalise(attr["longName"]) + ")", 1)
self.writeLine()
# Write the dependencies
self.writeLine("# Connect input/output dependencies", 1)
for attr in self.outputAttributes:
for dependency in attr["dependencies"]:
# Check if the dependency is an input attribute
try:
d = [x["longName"] for x in self.inputAttributes if (x["longName"] == dependency or x["shortName"] == dependency)][0]
self.writeLine(className + ".attributeAffects(" + className + ".in" + self.capitalise(d) + ", " + className + ".out" + self.capitalise(attr["longName"]) + ")", 1)
except:
pass
self.writeLine()
## Write the functions for initializePlugin and uninitializePlugin
def writeInitialiseUninitialiseFunctions(self):
# Write the function for initializePlugin
self.writeLine("## Initialise the plugin when Maya loads it")
self.writeLine("def initializePlugin(mobject):")
self.writeLine("mplugin = om.MFnPlugin(mobject)", 1)
self.writeLine("try:", 1)
self.writeLine("mplugin.registerNode(kPluginNodeName, kPluginNodeID, nodeCreator, nodeInitializer)", 2)
self.writeLine("except:", 1)
self.writeLine("sys.stderr.write(\"Failed to register node: \" + kPluginNodeName)", 2)
self.writeLine("raise", 2)
self.writeLine()
# Write the function for uninitializePlugin
self.writeLine("## Uninitialise the plugin when Maya unloads it")
self.writeLine("def uninitializePlugin(mobject):")
self.writeLine("mplugin = om.MFnPlugin(mobject)", 1)
self.writeLine("try:", 1)
self.writeLine("mplugin.deregisterNode(kPluginNodeID)", 2)
self.writeLine("except:", 1)
self.writeLine("sys.stderr.write(\"Failed to unregister node: \" + kPluginNodeName)", 2)
self.writeLine("raise", 2)
self.writeLine()
# Main
DGNodeFileCreator()
|
4,723 | 3775ba538d6fab13e35e2f0761a1cacbe087f339 | # This file is Copyright (c) 2020 LambdaConcept <contact@lambdaconcept.com>
# License: BSD
from math import log2
from nmigen import *
from nmigen.utils import log2_int
from nmigen_soc import wishbone
from nmigen_soc.memory import MemoryMap
from lambdasoc.periph import Peripheral
class gramWishbone(Peripheral, Elaboratable):
def __init__(self, core, data_width=32, granularity=8):
super().__init__(name="wishbone")
self.native_port = core.crossbar.get_native_port()
self.ratio = self.native_port.data_width//data_width
addr_width = log2_int(core.size//(self.native_port.data_width//data_width))
self.bus = wishbone.Interface(addr_width=addr_width+log2_int(self.ratio),
data_width=data_width, granularity=granularity)
map = MemoryMap(addr_width=addr_width+log2_int(self.ratio)+log2_int(data_width//granularity),
data_width=granularity)
self.bus.memory_map = map
def elaborate(self, platform):
m = Module()
# Write datapath
m.d.comb += [
self.native_port.wdata.valid.eq(self.bus.cyc & self.bus.stb & self.bus.we),
]
ratio_bitmask = Repl(1, log2_int(self.ratio))
sel = Signal.like(self.bus.sel)
with m.If(self.bus.sel == 0):
m.d.comb += sel.eq(Repl(1, sel.width))
with m.Else():
m.d.comb += sel.eq(self.bus.sel)
with m.Switch(self.bus.adr & ratio_bitmask):
for i in range(self.ratio):
with m.Case(i):
m.d.comb += self.native_port.wdata.we.eq(Repl(sel, self.bus.granularity//8) << (self.ratio*i))
with m.Switch(self.bus.adr & ratio_bitmask):
for i in range(self.ratio):
with m.Case(i):
m.d.comb += self.native_port.wdata.data.eq(self.bus.dat_w << (self.bus.data_width*i))
# Read datapath
m.d.comb += [
self.native_port.rdata.ready.eq(1),
]
with m.Switch(self.bus.adr & ratio_bitmask):
for i in range(self.ratio):
with m.Case(i):
m.d.comb += self.bus.dat_r.eq(self.native_port.rdata.data >> (self.bus.data_width*i))
with m.FSM():
with m.State("Send-Cmd"):
m.d.comb += [
self.native_port.cmd.valid.eq(self.bus.cyc & self.bus.stb),
self.native_port.cmd.we.eq(self.bus.we),
self.native_port.cmd.addr.eq(self.bus.adr >> log2_int(self.bus.data_width//self.bus.granularity)),
]
with m.If(self.native_port.cmd.valid & self.native_port.cmd.ready):
with m.If(self.bus.we):
m.next = "Wait-Write"
with m.Else():
m.next = "Wait-Read"
with m.State("Wait-Read"):
with m.If(self.native_port.rdata.valid):
m.d.comb += self.bus.ack.eq(1)
m.next = "Send-Cmd"
with m.State("Wait-Write"):
with m.If(self.native_port.wdata.ready):
m.d.comb += self.bus.ack.eq(1)
m.next = "Send-Cmd"
return m
|
4,724 | b2eb2d006d6285947cc5392e290af50f25a9f566 | from app_auth.recaptcha.services.recaptcha_service import validate_recaptcha
from django.shortcuts import render, redirect
from django.contrib import auth
from django.views import View
from rest_framework.permissions import IsAuthenticated
from rest_framework.views import APIView
from rest_framework.response import Response
from .common.bearer_authentication import CustomBearerAuthentication
from .models import User
from .forms import UserCreationForm
from .serializers import UserSerializer
from .user_backend import UserBackend
from .common.token_utils import get_or_set_token
from app.common.meta_config import get_meta
class Auth(View):
auth_class = UserBackend()
# Create your views here.
def authenticate(self, request, username, password):
user = self.auth_class.authenticate(username=username, password=password)
if user is not None:
if user.is_active:
auth.login(request, user)
return True
return False
class Login(Auth):
def post(self, request):
username = request.POST.get('username', '')
password = request.POST.get('password', '')
# just so we can send back errors
if self.authenticate(request, username, password):
get_or_set_token(username)
return redirect('/')
return redirect('/')
def logout(request):
auth.logout(request)
return redirect('/')
class Signup(Auth):
form_class = UserCreationForm
def post(self, request):
form = self.form_class(request.POST)
if form.is_valid():
user = form.save(commit=False) # not saved permanently in db yet
# clean normalised data.
email = form.cleaned_data['email']
password = form.cleaned_data['password1']
# password setting.
user.set_password(password)
# register user.
user.save()
if self.authenticate(request, email, password):
return redirect('/')
return redirect('/')
class UserViewSet(APIView):
authentication_classes = [CustomBearerAuthentication]
permission_classes = [IsAuthenticated]
def get(self, request, format=None):
queryset = User.objects.all().order_by('-created_at')
serializer = UserSerializer(queryset, many=True)
content = {
'users': {
'data': serializer.data,
'page': 1,
'count': len(serializer.data)
},
'auth': str(request.auth),
}
return Response(content)
class LoginView(Auth):
template_name = 'app/login.html'
def get(self, request):
return render(
request,
self.template_name,
{ 'meta': get_meta('LoginView') }
)
def post(self, request):
username = request.POST.get('username', '')
password = request.POST.get('password', '')
recaptcha = request.POST.get('g-recaptcha-response')
valid = validate_recaptcha(recaptcha)
if (not valid): return redirect('/errors/unverified')
# just so we can send back errors
if self.authenticate(request, username, password):
get_or_set_token(username)
return redirect('/')
return render(
request,
self.template_name,
{
'errors': {
'authentication': 'Username or password is incorrect.'
},
'meta': get_meta('LoginView')
}
)
class SignupView(Auth):
template_name = 'app/signup.html'
form_class = UserCreationForm
def get(self, request):
return render(request, self.template_name, { 'meta': get_meta('SignupView') })
def post(self, request):
form = self.form_class(request.POST)
recaptcha = request.POST.get('g-recaptcha-response')
valid = validate_recaptcha(recaptcha)
if (not valid): return redirect('/errors/unverified')
if form.is_valid():
user = form.save(commit=False) # not saved permanently in db yet
# clean normalised data.
email = form.cleaned_data['email']
password = form.cleaned_data['password1']
# password setting.
user.set_password(password)
# register user.
user.save()
if self.authenticate(request, email, password):
return redirect('/')
else:
return render(
request,
self.template_name,
{
'errors': {
'authentication': 'Username or password is incorrect.'
},
'meta': get_meta('SignupView')
}
)
return render(
request,
self.template_name,
{
'errors': form.errors.get_json_data(),
'meta': get_meta('SignupView')
}
) |
4,725 | b2db622596d0dff970e44759d25360a62f5fea83 | ALPHABET = 'abcdefghijklmnopqrstuvwxyz'
# Convert the ALPHABET to list
ALPHABET = [i for i in ALPHABET]
output_string = ''
input_string = input('Enter a String : ')
key = int(input('Enter the key: '))
for letter in input_string:
if letter in input_string:
# ALPHABET.index(letter) returns the index of that letter in the ALPHABET list
# then we can add the key to that index to get the letter
# then we take the mod of that so if the letter is x and 10 it cycle back to the beginning of the list
output_string += ALPHABET[(ALPHABET.index(letter)+key) % 26]
else:
output_string += letter
print(f'Encoded String is {output_string}')
|
4,726 | 176120d4f40bc02b69d7283b7853b74adf369141 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2019/6/26 16:11
# @Author : Micky
# @Site :
# @File : 01_压缩相关知识.py
# @Software: PyCharm
import numpy as np
from PIL import Image
from scipy import misc
if __name__ == '__main__':
# 图像加载
image = Image.open('../datas/xiaoren.png')
# 图像转换为numpy数组
img = np.asarray(image)
print(img.shape)
# 构建一个新的图像
imageNew = np.zeros((600,100,3))
imageNew = imageNew.astype(np.uint8)
misc.imsave('m.png',imageNew)
|
4,727 | eda8bde048f3d4c4af4bd1c296e4cc02b92eaa17 | # Kai Joseph
# Loop Practice
# Since I worked on my own, I did not have to complete all 25 challenges (with Ms. Healey's permission). I completed a total of 14 challenges.
import sys
import random
''' 1.
Write a for loop that will print out all the integers from 0-4 in ascending order.
'''
if sys.argv[1] == '1':
for x in range(5):
print(str(x))
''' 2.
Write a for loop that will print out all the integers from 0-4 in descending order.
'''
if sys.argv[1] == '2':
for x in range(5):
print(str(4-x))
''' 3.
Write a for loop that will print out all the integers from 5-15 in descending order.
'''
if sys.argv[1] == '3':
for x in range(11):
print(str(15-x))
''' 4.
Write a for loop that will print out all the integers from -5 to 5 in ascending order.
'''
if sys.argv[1] == '4':
for x in range(11):
print(str(-5+x))
''' 5.
Write two for loops that will both print out odd numbers from 25 to 49. The loops themselves must be different, but they will have the same output.
'''
if sys.argv[1] == '5':
for x in range(25,50):
if x%2 != 0:
print(x)
for x in range(26):
if x%2 == 0:
print(str(25+x))
''' 6.
Write a for loop that prints out the squares of the numbers from 1 to 10. ie 1, 4, 9, 16, ... 100
'''
if sys.argv[1] == '6':
for x in range(1,11):
print(str(x**2))
''' 8.
A number starts at 4 and increases by one every day after the day it was created. Write a loop and use the variable days (int) that will print out how many days it will take for number to reach 57.
'''
if sys.argv[1] == '8':
for x in range(4,58):
print(x)
days = 57-x
print("Days remaining to reach 57:",str(days))
''' 9.
A girl in your class has jellybeans in a jar. The number of jellybeans is stored in int beans. Every day she shares one jellybean with every student in the class, and she herself takes two. The number of students in the class is held in variable students (int). Write a loop that determines how many days it will take for her to run out of jellybeans. You can store the result in variable numDays (int).
'''
if sys.argv[1] == '9':
while True:
students = input("Number of students (excluding the girl): ")
jellybeans = input("Number of jelly beans: ")
try:
students = int(students)
jellybeans = int(jellybeans)
break
except ValueError:
print("Please enter an integer for jelly beans and students.")
days = 0
while jellybeans > 0:
jellybeans = jellybeans - students - 2
days = days + 1
print(days)
''' 17.
Write a loop that will print out the decimal equivalents of 1/2, 1/3, 1/4, 1/5, 1/6, ... 1/20. The output for each iteration should look like:
"1/2 = .5" "1/3 = .666666666667" etc.
'''
if sys.argv[1] == '17':
for x in range(2,21):
num = 1/x
print("1/"+str(x),"=",str(num))
''' 18.
Write a loop that determines the sum of all the numbers from 1-100, as well as the average. Store the sum in variable total (int) and the average in variable avg (float).
'''
if sys.argv[1] == '18':
total = 0
for x in range(1,101):
total = total+x
print("Total: "+str(total))
avg = total/x
print("Average: " + str(avg))
''' 19.
A friend tells you that PI can be computed with the following equation:
PI = 4 * (1-1/3+1/5-1/7+1/9-1/11+1/13-1/15...)
Write a loop that will calculate this output for n-iterations of the pattern (n being an int), that could help you determine if your friend is right or wrong. Are they right or wrong?
'''
if sys.argv[1] == '19':
it = int(input("Enter the number of iterations: "))
num = 0
for x in range(1,it*2):
if x%2 != 0:
if (x-3)%4 == 0:
num = num - (1/x)
else:
num = num + (1/x)
print(str(4*num))
''' 22.
Write a loop which prints the numbers 1 to 110, 11 numbers per line. The program shall print "Coza" in place of the numbers which are multiples of 3, "Loza" for multiples of 5, "Woza" for multiples of 7, "CozaLoza" for multiples of 3 and 5, and so on. Sample output:
1 2 Coza 4 Loza Coza Woza 8 Coza Loza 11
Coza 13 Woza CozaLoza 16 17 Coza 19 Loza CozaWoza 22
23 Coza Loza 26 Coza Woza 29 CozaLoza 31 32 Coza
......
'''
if sys.argv[1] == '22':
numbers = []
for x in range(10):
numbers.append([])
for x in range(1,111):
if x < 12:
numbers[0].append(x)
elif x < 23:
numbers[1].append(x)
elif x < 34:
numbers[2].append(x)
elif x < 45:
numbers[3].append(x)
elif x < 56:
numbers[4].append(x)
elif x < 67:
numbers[5].append(x)
elif x < 78:
numbers[6].append(x)
elif x < 89:
numbers[7].append(x)
elif x < 100:
numbers[8].append(x)
elif x < 111:
numbers[9].append(x)
for x in range(len(numbers)):
for y in range(11):
word = ""
tampered = False
if int(numbers[x][y])%3 == 0:
word = word + "Coza"
tampered = True
if int(numbers[x][y])%5 == 0:
word = word + "Loza"
tampered = True
if int(numbers[x][y])%7 == 0:
word = word + "Woza"
tampered = True
if tampered:
numbers[x][y] = word
for x in range(len(numbers)):
print(*numbers[x])
''' 23.
Write code that will print out a times-table for practice and reference. It should look like this:
* | 1 2 3 4 5 6 7 8 9
-------------------------------
1 | 1 2 3 4 5 6 7 8 9
2 | 2 4 6 8 10 12 14 16 18
3 | 3 6 9 12 15 18 21 24 27
4 | 4 8 12 16 20 24 28 32 36
5 | 5 10 15 20 25 30 35 40 45
6 | 6 12 18 24 30 36 42 48 54
7 | 7 14 21 28 35 42 49 56 63
8 | 8 16 24 32 40 48 56 64 72
9 | 9 18 27 36 45 54 63 72 81
'''
if sys.argv[1] == '23':
x = [1,2,3,4,5,6,7,8,9]
y = x
numbers = []
for r in range(len(x)):
for z in range(len(y)):
print((int(x[r])*int(y[z])),end=" ")
print("")
''' 25.
Write code that will extract each digit from an int stored in variable number, in the reverse order. For example, if the int is 15423, the output shall be "3 2 4 5 1", with a space separating the digits.
'''
if sys.argv[1] == '25':
number = input("Enter the number that you wish to reverse: ")
number = str(number)
n = []
for x in range(len(number)):
n.append(number[len(number)-1-x])
for x in range(len(n)):
print(n[x],end=" ")
print("")
|
4,728 | 5cec9e82aa994d07e25d8356a8218fc461bb8b4e | #!/usr/bin/python
#import Bio
def findLCS(read, cassette, rIndex, cIndex,cassettes):
LCS=''
while True:
if read[rIndex] == cassette[cIndex]:
LCS+= read[rIndex]
rIndex= rIndex +1
cIndex= cIndex +1
#elif checkLCS(cIndex,cassettes)==True:
else:
break
#print(LCS)
return LCS
def findMaxLCS(read, cassettes, rIndex, cIndex):
#print(read)
maxLCS=''
#print(len(cassettes))
for i in range (0,len(cassettes)):
LCS=findLCS(read, cassettes[i],rIndex, cIndex,cassettes)
if len(LCS) > len(maxLCS):
maxLCS=LCS
rIndex= rIndex+len(maxLCS)
cIndex= cIndex+len(maxLCS)
return maxLCS ,rIndex ,cIndex
def findConsensus(cassettes, cIndex):
#print (cassettes)
con=[]
for i in range(0,len(cassettes[1])-26):
holder=[]
for j in range(0,len(cassettes)):
holder.append(cassettes[j][i])
con.append(holder)
con2=[]
for k in range (0,len(con)):
if con[k].count('G')==16 or (con[k].count('G')==14) :
con2.append('g')
elif con[k].count('A')==16 or (con[k].count('A')==14): #con[k][1]=='-'
con2.append('a')
elif con[k].count('C')==16 or (con[k].count('C')==14):
con2.append('c')
elif con[k].count('T')==16 or (con[k].count('T')==14):
con2.append('t')
elif con[k].count('-')>=10:
con2.append('-')
else:
con2.append('n')
#print(con)
#print(con2)
return con2[cIndex]
def checkGap(LCS, cassettes, cIndex):
#print(rIndex)
#print(cIndex)
#nuc= findConsensus(cassettes, cIndex)
#LCS=LCS+ str(nuc)
#cIndex=cIndex+1
if findConsensus(cassettes, cIndex)== '-':
LCS=LCS+'-'
cIndex=cIndex+1
return LCS, cIndex
else:
return LCS, cIndex
#print(rIndex)
#elif findConsens
#elif (findConsensus(cassettes, cIndex)).isalpha():
def deletenuc(read, cassettes, rIndex, cIndex):
if len(findMaxLCS(read, cassettes, rIndex+1, cIndex))>=3:
return True
else:
return False
def insertnuc(LCS, read, cassettes, rIndex, cIndex):
if len(findMaxLCS(read, cassettes, rIndex, cIndex+1))>=3:
return True
else:
return False
#def subsnuc(
#def checkgaps(
def align(read, cassettes):
#print(read)
#print('hi')
#print(cassettes)
rIndex=0
cIndex=0
alignedRead=''
LCS=''
delrec=[]
insertrec=[]
substrec=[]
#print(read)
while rIndex<= len(read):
#print(read)
#print(len(read))
#print(rIndex)
LCS, rIndex, cIndex= findMaxLCS(read, cassettes,rIndex, cIndex)
#print(rIndex)
#print(cIndex)
#print(LCS)
LCS, cIndex= checkGap(LCS, cassettes,cIndex)
#print(rIndex,cIndex)
#print(LCS)
#if deletenuc(read, cassettes, rIndex,cIndex)==True:
#delrec.append(rIndex)
#rIndex= rIndex+1
if len(LCS)<=6 :
#print (LCS, rIndex)
#print('enter')
if insertnuc(LCS, read, cassettes, rIndex, cIndex)==True:
#print(True, LCS)
insertrec.append(rIndex)
nuc= findConsensus(cassettes, cIndex)
cIndex=cIndex+1
LCS= LCS+nuc
else:
LCS, cIndex= checkGap(LCS, cassettes,cIndex)
#elif subsnuc(LCS, read, cassettes, rIndex, cIndex)==True:
#else:
# LCS, cIndex= checkLCS(LCS, cassettes,cIndex)
# nuc= findConsensus(cassettes, cIndex)
# LCS= LCS+nuc
# cIndex=cIndex+1
# rIndex=rIndex+1
alignedRead= alignedRead+ str(LCS)
print(alignedRead)
return alignedRead
def main():
FASTA=input('Enter FASTA file:')
reference=input('Enter reference file:')
in_file=open(FASTA, 'r')
in_file1=open(reference,'r')
line_list=[]
line_list1=[]
for line in in_file:
line=line.strip()
line_list.append(line)
readnames=line_list[::2] #list of the read headers
reads=line_list[1::2] #list of sequences only
for line1 in in_file1:
line1=line1.strip()
line_list1.append(line1)
cassettes=line_list1[1::2]
refnames=line_list1[::2]
#for i in cassettes:
# print(len(i))
#print(cassettes)
#print(reads)
A=[]
for i in reads:
#print(i[0])
alignedRead=align(i,cassettes)
A.append(alignedRead)
#print(align(i,cassettes))
#out = open("out.txt", "w")
#out.write(align(i, cassettes)
#out.close()
#print(A)
#con=findConsensus(0,cassettes)
#print(con)
|
4,729 | 7d4d5ca14c3e1479059f77c6a7f8dcfad599443b | import os
import csv
from django.core.management.base import BaseCommand, CommandError
from django.conf import settings
from django.core.exceptions import ObjectDoesNotExist
from aec.apps.vocabulary.serializers import DictionarySerializer
from aec.apps.vocabulary.models import Word
from aec.apps.library.serializers import LibrarySerializer
from aec.apps.library.models import Library
class Command(BaseCommand):
args = ''
help = 'load vocabulary from csv_file'
def __init__(self, *args, **kwargs):
super(Command, self).__init__(*args, **kwargs)
self.input_options = None
def add_arguments(self, parser):
parser.add_argument(
'-p', '--print',
default=False,
action='store_true',
dest='print',
help='Print info.'
)
parser.add_argument(
'-f', '--file',
dest='file',
help='File for load to db.'
)
parser.add_argument(
'--level',
dest='level',
help='Level for data.'
)
parser.add_argument(
'--lesson',
dest='lesson',
help='Lesson for data.'
)
def print_info(self, template='', context=None):
if self.input_options['print']:
context = context or {}
print str(template).format(**context)
def handle(self, *args, **options):
self.input_options = options
if not options['level']:
raise CommandError("Option `--level=...` must be specified.")
if not options['lesson']:
raise CommandError("Option `--lesson=...` must be specified.")
if not options['file']:
raise CommandError("Option `--file=...` must be specified.")
file_path = os.path.join(settings.BASE_DIR,
'data/{f}'.format(f=options['file']))
if not os.path.isfile(file_path):
raise CommandError("File does not exist at the specified path.")
try:
library = Library.objects.get(level=options['level'],
lesson=options['lesson'])
except ObjectDoesNotExist:
library_serializer = LibrarySerializer(data=options)
if library_serializer.is_valid():
library_serializer.save()
library = Library.objects.get(pk=library_serializer.data['id'])
else:
raise CommandError(library_serializer.errors)
with open(file_path) as dict_file:
csv_data = csv.DictReader(dict_file)
for row in csv_data:
row['english'] = row['english'].lower()
self.print_info('***\n{english}', row)
try:
vocabulary = Word.objects.get(english=row['english'])
self.print_info('{english} - lexicon already exist', row)
vocabulary.library.add(library)
vocabulary.save()
except ObjectDoesNotExist:
row['translate'] = row['translate'].decode('utf-8')
row['library'] = [library.id, ]
vocabulary_serializer = DictionarySerializer(data=row)
if vocabulary_serializer.is_valid():
vocabulary_serializer.save()
else:
self.print_info('error - {error}', dict(
word=row['english'],
error=vocabulary_serializer.errors))
|
4,730 | e97bcf31657317f33f4a138ede80bb9171337f52 | import qrcode
def generate_qr(query):
img = qrcode.make(query)
|
4,731 | f4fca5ce20db0e27da11d76a7a2fd402c33d2e92 | # Dependancies
import pandas as pd
# We can use the read_html function in Pandas
# to automatically scrape any tabular data from a page.
# URL of website to scrape
url = 'https://en.wikipedia.org/wiki/List_of_capitals_in_the_United_States'
# Read HTML
tables = pd.read_html(url)
tables
# What we get in return is a list of dataframes for any tabular data that Pandas found.
# We can slice off any of those dataframes that we want using normal indexing.
# Select first table as df
df = tables[0]
# Establish columns
df.columns = ['State', 'Abr.', 'State-hood Rank', 'Capital',
'Capital Since', 'Area (sq-mi)', 'Municipal Population', 'Metropolitan',
'Metropolitan Population', 'Population Rank', 'Notes']
# Display
df.head()
# Cleanup of extra rows
df = df.iloc[2:]
df.head()
# Set the index to the State column
df.set_index('State', inplace=True)
df.head()
# That way we can display all info about a row
df.loc['Alabama']
# Pandas also had a to_html method that we can use to generate HTML tables from DataFrames.
html_table = df.to_html()
html_table
# You may have to strip unwanted newlines to clean up the table.
html_table.replace('\n', '')
# You can also save the table directly to a file.
df.to_html('table.html') |
4,732 | 96ef95d8997eeab3d85a1bb6e4f8c86c9bfbb0a2 | import sys
from PIL import Image
from pr_common import *
file_name = sys.argv[1]
saturation_color = sys.argv[2]
saturation_modifier = int(sys.argv[3])
img = getImage(file_name)
pixels = pixelValues(img)
for i in range(img.height):
for j in range(img.width):
pixel_val = pixels[i][j]
color_idx = None
if (saturation_color == "R"):
color_idx = 0
elif (saturation_color == "G"):
color_idx = 1
elif (saturation_color == "B"):
color_idx = 2
color_val = pixel_val[color_idx] + saturation_modifier
if (color_val > 255):
color_val = 255
pixel_list = list(pixel_val)
pixel_list[color_idx] = color_val
pixels[i][j] = tuple(pixel_list)
savePixelsToImage(editedFilePath(file_name, "saturated"), pixels)
|
4,733 | dcc85b143f2394b7839f2fb9c2079a7dd9fa8e88 | from binance.client import Client
from binance.websockets import BinanceSocketManager
from binance.enums import *
import time
import threading
import winsound
# Replace your_api_key, your_api_secret with your api_key, api_secret
client = Client(your_api_key, your_api_secret)
# Calculate list of symbols
def calculate_data_list():
counter=0
btc='BTC'
symbols=[]
all_positions=[]
positions_final=[]
volume=[]
c=[]
price_change = []
data=client.get_ticker()
for x in range(len(data)):
if (btc in data[x]['symbol']) and data[x]['symbol'] != 'BTCUSDT'and data[x]['symbol'] != 'VENBTC':
if float(data[x]['quoteVolume'])>100:
all_positions.append(x)
for x in all_positions:
c.append(float(data[x]['priceChangePercent']))
i = sorted(range(len(c)), key=lambda k: c[k])
i.reverse()
while (len(positions_final) < 20 and len(positions_final) < len(all_positions)):
symbols.append(data[all_positions[i[counter]]]['symbol'])
positions_final.append(all_positions[i[counter]])
volume.append(data[all_positions[i[counter]]]['quoteVolume'])
price_change.append(data[all_positions[i[counter]]]['priceChangePercent'])
counter += 1
return symbols, volume, positions_final, price_change
# Get candlestick data from Binance
def get_kline():
symbols, volume, pozitii,price_change = calculate_data_list()
prices = []
prices1 = []
k=[]
for x in symbols:
try:
order = client.get_klines( # Get 1 minute candlestick data from server
symbol=x,
interval='1m')
except BinanceAPIException as e:
print (e.status_code)
print (e.message)
try:
order1 = client.get_klines( # Get 15 minute candlestick data from server
symbol=x,
limit= 1000,
interval='15m')
except BinanceAPIException as e:
print (e.status_code)
print (e.message)
if len(order1) < 970: # check if coin have at least 10 days of data
a = symbols.index(x) # get index of x in symbols
k.append(a)
else:
prices.append([]) # add empty list to list of 1 minute
prices1.append([]) # add empty list to list of 15 minutes
for i in range(len(order)):
prices[-1].append(float(order[i][1])) # save 1 minute data
for i in range(len(order1)):
prices1[-1].append(float(order1[i][1])) # save 15 minute data
k.reverse()
for x in k:
symbols.pop(x)
volume.pop(x)
all_positions.pop(x)
price_change.pop(x)
return symbols, volume, pozitii, prices, prices1,price_change
# Calculate report between bid and ask offers
def process_depth(msg):
sums5=0
sumb5=0
m=-1
for x in range(5):
if float(msg['data']['bids'][x][1])>m:
m=float(msg['data']['bids'][x][1])
sums5 = sums5 + float(msg['data']['bids'][x][1])
sumb5 = sumb5 + float(msg['data']['asks'][x][1])
ratio1 = sums5 / sumb5
if (ratio1 < 1):
ratio1 = ((1 / ratio1) * -1) + 1
else:
ratio1 -= 1
sums20 = 0
sumb20 = 0
ratio2 = 0
try:
for x in range(17):
sums20 = sums20 + float(msg['data']['bids'][x][1])
sumb20 = sumb20 + float(msg['data']['asks'][x][1])
ratio2 = sums20 / sumb20
if (ratio2 < 1):
ratio2 = ((1 / ratio2) * -1) + 1
else:
ratio2 -= 1
except Exception as e:
print("")
for i in range(len(symbols)):
simbol = symbols[i].lower() + '@depth20'
if simbol == msg['stream']:
ratio5[i] = round(ratio1, 2)
ratio20[i] = round(ratio2, 2)
max_order5[i] = m
ratio5_sum[i] = round(float(sums5) * float(current_price[i]) * 100 / float(volume[i]),2)
current_price[i] = float(msg['data']['bids'][0][0])
# Refresh price and volume to current price and volume
def process_ticker(msg):
i=0
for x in symbols:
for y in range(len(msg)):
if x == str(msg[y]['s']):
volume[i] = int(float(msg[y]['q']))
price_change[i] = int(float(msg[y]['P']))
i+=1
symbols,volume,pozitii,k_line_1m,k_line_15m,price_change =get_kline()
# Declaring lists necessary for storing data
max_order5=[0 for x in range(len(symbols))]
current_price= [0 for x in range(len(symbols))]
price_chance_2_min = [0 for x in range(len(symbols))]
price_chance_5_min = [0 for x in range(len(symbols))]
price_chance_15_min = [0 for x in range(len(symbols))]
price_chance_30_min = [0 for x in range(len(symbols))]
price_change_25_30_min = [0 for x in range(len(symbols))]
price_chance_1_hour = [0 for x in range(len(symbols))]
price_chance_3_hour = [0 for x in range(len(symbols))]
price_chance_8_hour = [0 for x in range(len(symbols))]
price_change_1_days = [0 for x in range(len(symbols))]
price_change_3_days = [0 for x in range(len(symbols))]
price_change_5_days = [0 for x in range(len(symbols))]
price_change_7_days = [0 for x in range(len(symbols))]
price_change_10_days = [0 for x in range(len(symbols))]
average_10_min = [0 for x in range(len(symbols))]
average_20_min = [0 for x in range(len(symbols))]
average_50_min = [0 for x in range(len(symbols))]
average_100_min = [0 for x in range(len(symbols))]
average_change_10_min = [0 for x in range(len(symbols))]
average_change_20_min = [0 for x in range(len(symbols))]
average_change_50_min = [0 for x in range(len(symbols))]
average_change_100_min = [0 for x in range(len(symbols))]
total_score = [0 for x in range(len(symbols))]
ratio5=[0 for x in range(len(symbols))]
ratio5_10sec=[[] for y in range(len(symbols))]
ratio5_sum = [0 for x in range(len(symbols))]
ratio5_sum_10sec = [[] for y in range(len(symbols))]
ratio20= [0 for x in range(len(symbols))]
# Create list neccessary for depth socked
list=[]
for x in symbols:
list.append(x.lower()+'@depth20') # append @depth20 to each symbol and add it into list
bm = BinanceSocketManager(client)
bm.start()
depth_socket = bm.start_multiplex_socket(list,process_depth) # start depth socket
ticker_socket = bm.start_ticker_socket(process_ticker) # start price socket
# maintain candlestick lists
def kline_continuum():
i=0
while True:
time.sleep(60)
for x in range(len(symbols)):
k_line_1m[x].pop(0)
k_line_1m[x].append(current_price[x]) # add price to list of 1 minute candlestick every 1 minute
if i%15==0:
k_line_15m[x].pop(0)
k_line_15m[x].append(current_price[x]) # add price to list of 15 minute candlestick every 15 minute
i+=1
# Save report between ask and bit for the last 10 seconds
def report_10_seconds():
while True:
for x in range(len(symbols)):
if len(ratio5_10sec[x])>10:
ratio5_10sec[x].pop(0)
if len(ratio5_sum_10sec[x]) > 10:
ratio5_sum_10sec[x].pop(0)
ratio5_10sec[x].append(ratio5[x])
ratio5_sum_10sec[x].append(ratio5_sum[x])
time.sleep(1)
# Calculate score for each symbol, you can add as many parameters as you want
def calculate_score():
for x in range(len(symbols)):
score = 0
# 2 minute change parameter score calculation
a = float(price_chance_2_min[x])
if a > 0 and a < 0.5:
score += 1
elif a >= 0.5 and a < 1:
score += 1.25
elif a >= 1 and a < 1.5:
score += 1.5
elif a >= 1.5 and a < 2:
score += 0.5
elif a >= 3:
score += 0.25
# 5 minute change parameter score calculation
a = float(price_chance_5_min[x])
if a > 0 and a < 0.5:
score += 1
elif a >= 0.5 and a < 1:
score += 1.25
elif a >= 1 and a < 2:
score += 1.5
elif a >= 2 and a < 3:
score += 0.5
elif a >= 3:
score += 0.25
# 15 minute change parameter score calculation
a = float(price_chance_15_min[x])
if a <= 1 and a > -0.5:
score += 0.25
elif a <= -0.5 and a > -1:
score += 0.5
elif a <= -1 and a > -1.5:
score += 0.75
elif a <= -1.5:
score += 1
# change between 25 and 30 minutes ago parameter score calculation
a = float(price_change_25_30_min[x])
if a <= 2 and a > -0.75:
score += 0.25
elif a <= -0.75 and a > -1.25:
score += 0.5
elif a <= -1.25 and a > -1.75:
score += 0.75
elif a <= -1.75:
score += 1
# 1 hour change parameter score calculation
a = float(price_chance_1_hour[x])
if a <= 2 and a >= 0:
score += 0.5
elif a <= 0 and a > -2:
score += 0.75
elif a <= -2:
score += 1
# 3 hour change parameter score calculation
a = float(price_chance_3_hour[x])
if a <= 5 and a > -1:
score += 0.25
elif a <= -1 and a > -3:
score += 0.5
elif a <= -3 and a > -6:
score += 0.75
elif a <= -6:
score += 1
# 8 hour change parameter score calculation
a = float(price_chance_8_hour[x])
if a <= 0 and a > -4:
score += 0.25
elif a <= -4 and a > -6:
score += 0.5
elif a <= -6:
score += 0.75
if float(ratio5[x]) > 0:
score += 1
a = 0
for i in range(len(ratio5_10sec[x])):
if float(price_chance_2_min[x]) > 0.55 or float(price_chance_5_min[x]) > 1:
if float(ratio5_10sec[x][i]) > 0:
a += 1
if float(ratio5_sum_10sec[x][i]) > 0.3:
a += 1
score += a / len(ratio5_sum_10sec[x])
if float(ratio20[x]) > 0:
score += 1
a = 0
for i in range(len(ratio5_10sec[x])-1):
if float(ratio5_10sec[x][i]) > 0:
a += 1
if a <= 2:
score += 0.25
elif a > 2 and a <= 4:
score += 0.5
elif a > 4 and a <= 7:
score += 0.75
elif a > 7:
score += 1
a = 0
for i in range(20, 1, -1):
if float(k_line_1m[x][-i]) > float(k_line_1m[x][-(i - 1)]):
a += 1
score += a / 10
# 1 day change parameter score calculation
if float(price_change_1_days[x]) > 5:
score+=0.3
# 3 day change parameter score calculation
if float(price_change_3_days[x]) > 10:
score += 0.25
# 5 day change parameter score calculation
if float(price_change_5_days[x]) > 15:
score += 0.25
# 7 day change parameter score calculation
if float(price_change_7_days[x]) > 20:
score += 0.25
# 10 day change parameter score calculation
if float(price_change_10_days[x]) > -25:
score += 0.25
# 10 minutes moving average parameter score calculation
a=float(average_change_10_min[x])
if a<0.2 and a>-0.3:
score+=0.1
# 20 minutes moving average parameter score calculation
a = float(average_change_20_min[x])
if a < 0.2 and a > -0.3:
score += 0.1
# 50 minutes moving average parameter score calculation
a = float(average_change_50_min[x])
if a < 0.2 and a > -0.3:
score += 0.1
# 100 minutes moving average parameter score calculation
a = float(average_change_100_min[x])
if a < 0.2 and a > -0.3:
score += 0.1
# save score
total_score[x] = score
def print_results():
# sleep time before starting calculations
time.sleep(10)
while True:
for x in range(len(symbols)):
# calculate parameters percentages
try:
price_chance_2_min[x] = round(float(current_price[x]) * 100 / float(k_line_1m[x][- 2]) - 100, 2)
price_chance_5_min[x] = round(float(current_price[x]) * 100 / float(k_line_1m[x][- 5]) - 100, 2)
price_chance_15_min[x] = round(float(current_price[x]) * 100 / float(k_line_1m[x][- 15]) - 100, 2)
price_chance_30_min[x] = round(float(current_price[x]) * 100 / float(k_line_1m[x][- 30]) - 100, 2)
price_chance_1_hour[x] = round(float(current_price[x]) * 100 / float(k_line_1m[x][- 60]) - 100, 2)
price_chance_3_hour[x] = round(float(current_price[x]) * 100 / float(k_line_1m[x][- 180]) - 100, 2)
price_chance_8_hour[x] = round(float(current_price[x]) * 100 / float(k_line_1m[x][20]) - 100, 2)
price_change_25_30_min[x] = round(float(k_line_1m[x][- 6]) * 100 / float(k_line_1m[x][- 30]) - 100, 2)
price_change_1_days[x] = round(float(current_price[x]) * 100 / float(k_line_15m[x][- 96]) - 100, 1)
price_change_3_days[x] = round(float(current_price[x]) * 100 / float(k_line_15m[x][- 288]) - 100, 1)
price_change_5_days[x] = round(float(current_price[x]) * 100 / float(k_line_15m[x][- 480] )- 100, 1)
price_change_7_days[x] = round(float(current_price[x]) * 100 / float(k_line_15m[x][- 672]) - 100, 1)
price_change_10_days[x] = round(float(current_price[x]) * 100 / float(k_line_15m[x][- 960]) - 100, 1)
average_10_min[x] = round(float(sum(k_line_1m[x][- 10:])) / 10, 8)
average_20_min[x] = round(float(sum(k_line_1m[x][- 20:])) / 20, 8)
average_50_min[x] = round(float(sum(k_line_1m[x][- 50:])) / 50, 8)
average_100_min[x] = round(float(sum(k_line_1m[x][- 100:])) / 100, 8)
average_change_10_min[x] = round(float(current_price[x]) * 100 / float(average_10_min[x]) - 100, 2)
average_change_20_min[x] = round(float(current_price[x]) * 100 / float(average_20_min[x]) - 100, 2)
average_change_50_min[x] = round(float(current_price[x]) * 100 / float(average_50_min[x]) - 100, 2)
average_change_100_min[x] = round(float(current_price[x]) * 100 / float(average_100_min[x]) - 100, 2)
except Exception as e:
print(e)
# call function for score calculation
calculate_score()
# select parameter for which data is sorted
sort_by = total_score
# sort data
sorted_data = sorted(range(len(sort_by)), key=lambda k: sort_by[k])
# sort data in reverse order
sorted_data.reverse()
#print table header
print (time.ctime())
print ('%5s %5s %6s %6s %6s %6s %6s %6s %6s %6s %6s %6s %6s %6s %6s %5s %6s %6s %6s %6s %6s' % (
'Symbol', 'score', 'r5', 'r20', '2m_ch', '5m_ch', '15m_ch', '30m_ch', '1h_ch', '10MA', '20MA', '50MA', '100MA', '8h_ch',
'25-30m', 'r5sum', '1d_ch', '3d_ch','5d_ch', '7d_ch', '10d_ch'))
# print top 10 cryptocurrencies data
for k in range(10):
i = sorted_data[k]
print ('%5s %6s %6s %6s %6s %6s %6s %6s %6s %6s %6s %6s %6s %6s %6s %5s %6s %6s %6s %6s %6s' % (
symbols[i][:-3], total_score[i], ratio5[i], ratio20[i], price_chance_2_min[i], price_chance_5_min[i],
price_chance_15_min[i],price_chance_30_min[i], price_chance_1_hour[i], average_change_10_min[i],
average_change_20_min[i],average_change_50_min[i], average_change_100_min[i], price_chance_8_hour[i],
price_change_25_30_min[i], ratio5_sum[i], price_change_1_days[i], price_change_3_days[i],
price_change_5_days[i], price_change_7_days[i], price_change_10_days[i]))
# if score for one coin is > 10 will play sound
try:
if float(total_score[sorted_data[0]]) > 10:
winsound.PlaySound('\\Sound.wav', winsound.SND_FILENAME)
except Exception as e:
print(e)
# Seconds to wait before repeating while loop
time.sleep(1)
# Declaring threads
threads = [threading.Thread(target=kline_continuum),
threading.Thread(target=report_10_seconds),
threading.Thread(target=print_results)]
# Starting threads
[thread.start() for thread in threads]
[thread.join() for thread in threads]
|
4,734 | a63718ba5f23d6f180bdafcb12b337465d6fa052 | from bs4 import BeautifulSoup
import os, re, json
import pandas as pd
from urllib import request
from openpyxl import load_workbook
from bilibili.append_xlsx import append_df_to_excel
# 获取页面的所有的avid, title, url
def parse_html(content):
arr = []
# 使用beautifulsoup解析html文档
soup = BeautifulSoup(content)
# 获取指定标签
tag_list = soup.find_all("a", attrs={'title': True, 'href': True, "class": "title"})
# tag_list = soup.find_all("span", attrs={'class': 'type avid'})
for tag in tag_list:
# print(tag.get("title"), tag.get("href"))
# 获取标签内容,并去除首尾空格
title = tag.get("title")
href = tag.get("href")[2:]
avid = re.search("av([0-9]*)", href).group(0)
base_dict[avid] = [avid, title, href]
return base_dict.keys()
# 读取路径文件名
def read_path(path):
path_set = set()
dir_path = os.listdir(path)
for item in dir_path:
child = os.path.join('%s/%s' % (path, item))
path_set.add(child)
return path_set
# 提取html文件
def filter(path_set):
filterable = []
pattern = re.compile(r'.*\.[html|htm]+', re.I)
for path in path_set:
m = pattern.match(path)
if m:
filterable.append(m.group(0).strip())
return filterable
# 读取文件内容
def read_html(path):
file = open(path.encode('utf-8').strip(), 'r', encoding="utf-8")
content = file.read()
return content
# 写入csv
def storeCSV(filename=r'/Users/robbin/Desktop/bilibili/bilibili.xlsx'):
df_base = pd.DataFrame.from_dict(base_dict, orient="index")
df_base.columns = ['avid', 'title', 'href']
df_tags = pd.DataFrame.from_dict(tags_dict, orient="index")
df_tags.columns = ['tags']
df_info = pd.DataFrame.from_dict(info_dict, orient='index')
df_info.columns = ['like', 'his_rank', 'view', 'now_rank', 'coin', 'reply', 'aid', 'no_reprint', 'favorite', 'danmaku', 'copyright', 'share']
df = df_base.join([df_tags, df_info])
append_df_to_excel(filename, df, index=False)
# 根据avid请求api获得视频信息
def query_info(avid):
stat_url = "https://api.bilibili.com/x/web-interface/archive/stat?aid="
id = avid[2:]
url = stat_url + id
response = request.urlopen(url)
return response.read().decode("utf-8")
# 根据avid请求api获得视频标签
def query_tags(avid):
stat_url = "https://api.bilibili.com/x/tag/archive/tags?aid="
id = avid[2:]
url = stat_url + id
response = request.urlopen(url)
return response.read().decode("utf-8")
if __name__ == '__main__':
print("now read folder...")
path_set = read_path("/Users/robbin/Desktop/bilibili")
print("parse file path finshed...")
filterable = filter(path_set)
for path in filterable:
base_dict = {}
tags_dict = {}
info_dict = {}
print("now parse the file:", path)
content = read_html(path)
avid_list = parse_html(content)
for avid in avid_list:
print("Proccessing:", avid)
tags_json = query_tags(avid)
tags_obj = json.loads(tags_json)
tags_row_list = tags_obj.get("data")
if tags_row_list:
# print(data)
tag_list = []
for item in tags_row_list:
tag_name = item.get("tag_name")
tag_list.append(tag_name)
tag = ",".join(tag_list)
tags_dict[avid] = tag
info_json = query_info(avid)
info_obj = json.loads(info_json)
info_row_dict = info_obj.get("data")
if info_row_dict:
info_dict[avid] = list(info_row_dict.values())
print("Start to writing ", path, " to xls")
storeCSV()
print("End of writing ", path, " to xls")
|
4,735 | a929bfbe2be6d8f93cafa5b6cc66c7506037ffca | # Sets up directories
MusicDir = "AudioFiles\\"
ModelsDir = "Models\\"
MonstersDir = "Models\\Monsters\\" |
4,736 | 2cb0f2fbf3ceddb2f1ee65614506dbfb3b5c8089 | # player input is: Word made, starting tile position of the word made, horizontal or vertical
# example: playerinput = ['STRING', (0, 1), 'v']
import numpy as np
import string
def boundarytester(playerinput): # to check whether the player is placing the tiles within the confines of the board
if playerinput[1][0] > 14 or playerinput[1][0] < 0 or playerinput[1][1] > 14 or playerinput[1][1] < 0:
return False
if playerinput[2] == 'h':
if (playerinput[1][1] + len(list(playerinput[0])) - 1) > 14:
return False
if playerinput[2] == 'v':
if (playerinput[1][0] + len(list(playerinput[0])) - 1) > 14:
return False
return True
def moveconverter(playerinput, board): # converting player input to internal lingo for a move
word = list(playerinput[0])
if playerinput[2] == 'v':
# p is the list of positions of the tiles in the entered word
p = [(x, playerinput[1][1]) for x in range(playerinput[1][0], playerinput[1][0] + len(word))]
# bmask is a boolean mask to find out which positions are not occupied on the board at the location of the word
bmask = board[p[0][0]:p[-1][0] + 1, p[0][1]] == 52
letters = np.array(word)[bmask].tolist()
positions = np.array(p)[bmask].tolist()
elif playerinput[2] == 'h':
p = [(playerinput[1][0], x) for x in range(playerinput[1][1], playerinput[1][1] + len(word))]
bmask = board[p[0][0], p[0][1]:p[-1][1] + 1] == 52
letters = np.array(word)[bmask].tolist()
positions = np.array(p)[bmask].tolist()
return letters, positions
def island_words_tester(positions, board): # to check if the entered word is in continuation with at least 1 existing word on the board
test = False
for i in positions:
if i[0] == 7 and i[1] == 7: # to ensure the first move involves the centre tile
test = True
else:
adjacent_positions = [(i[0] - 1, i[1]), (i[0] + 1, i[1]),(i[0], i[1] - 1), (i[0], i[1] + 1)] # checking for adjacency
adjacent_positions = [x for x in adjacent_positions if ((x[0] >= 0) and (x[0] < 15)) and ((x[1] >= 0) and (x[1] < 15))]
for j in adjacent_positions:
if board[j[0], j[1]] < 52:
test = True
return test
# This function takes the positions of the letters placed on the board, and returns a list of the words made
# by the placement of the letters. The format of the returned list is that it is a list of tuples, each a word
# represented by the positions of the corresponding letters.
def wordsmade(letters, positions, mainboard):
# prepping stuff
l1 = dict(zip(string.ascii_uppercase, list(range(0, 26, 1))))
l2 = dict(zip(string.ascii_lowercase, list(range(26, 52, 1))))
letternumberkey = {**l1, **l2}
letternumberkey[' '] = 52
board = mainboard.copy()
# prepped and ready!
for i in range(len(letters)):
board[positions[i][0], positions[i][1]] = letternumberkey[letters[i]] # locally modify the board
wordsp = []
for i in positions:
# horizontally looking for words made
ph = np.array(list(zip([i[0]] * 15, list(range(0, 15)))))[board[i[0], :] < 52].tolist()
# a list of all occupied places on the board
r = list(map(tuple, ph))
# trimming the list so that any places after an unoccupied place from the placed letters are removed
if len(r) > 1:
for j in range(r.index(tuple(i)), 0, -1):
if (r[j][1] - r[j - 1][1]) > 1:
r = r[j:]
break
for j in range(r.index(tuple(i)), len(r), 1):
try:
if (r[j + 1][1] - r[j][1]) > 1:
r = r[:j + 1]
break
except IndexError: # if the +1 causes the index to exceed the limit
pass
if len(r) > 1:
wordsp.append(r)
# vertically looking for words made
pv = np.array(list(zip(list(range(0, 15)), [i[1]] * 15)))[board[:, i[1]] < 52].tolist()
s = list(map(tuple, pv))
if len(s) > 1:
for j in range(s.index(tuple(i)), 0, -1):
if (s[j][0] - s[j - 1][0]) > 1:
s = s[j:]
break
for j in range(s.index(tuple(i)), len(s), 1):
try:
if (s[j + 1][0] - s[j][0]) > 1:
s = s[:j + 1]
break
except IndexError:
pass
if len(s) > 1:
wordsp.append(s)
wordspq = []
for i in wordsp:
wordspq.append(tuple(i))
return list(set(wordspq)) # set is used here to remove redundant words.
def validword(words, filename='wordlist/sowpods.txt'): # checks if the word is present in the wordlist
with open(filename, 'r') as f:
rd = f.read()
rd = rd.split('\n')
rd = set(rd)
for j in words:
if j not in rd:
return False, j
return True, True
def racksufficiency(letters, rack): # check if the desired move can be achieved based on the rack of the player
rackblanks = [x for x in rack if x == ' ']
blanks = [x for x in letters if x != x.upper()]
if len(blanks) > len(rackblanks):
return False, 'You entered special tiles (lower case letters) more than you have.'
for i in [x for x in letters if x == x.upper()]:
if i not in [x for x in letters if x != ' ']:
return False, 'You do not have the tiles to play the word you want to.'
return True, True
def overlaptester(playerinput, board): # if the desired word conflicts with letters already on board
# prepping stuff
l_1 = dict(zip(list(range(0, 26, 1)), string.ascii_uppercase))
l_2 = dict(zip(list(range(26, 52, 1)), string.ascii_lowercase))
numberletterkey = {**l_1, **l_2}
numberletterkey[52] = ' '
# prepped and ready!
word = playerinput[0]
if playerinput[2] == 'v':
p = [(x, playerinput[1][1]) for x in range(playerinput[1][0], playerinput[1][0] + len(word))]
# bmask is a boolean mask to find out which positions are occupied on the board
bmask = board[p[0][0]:p[-1][0] + 1, p[0][1]] != 52
overlapletters = ''.join(np.array(list(word))[bmask].tolist()).lower()
existingletters = ''.join(
list(map(lambda x: numberletterkey[x], board[p[0][0]:p[-1][0] + 1, p[0][1]][bmask]))).lower()
if overlapletters != existingletters:
return False
elif playerinput[2] == 'h':
p = [(playerinput[1][0], x) for x in range(playerinput[1][1], playerinput[1][1] + len(word))]
bmask = board[p[0][0], p[0][1]:p[-1][1] + 1] != 52
overlapletters = ''.join(np.array(list(word))[bmask].tolist()).lower()
existingletters = ''.join(
list(map(lambda x: numberletterkey[x], board[p[0][0], p[0][1]:p[-1][1] + 1][bmask]))).lower()
if overlapletters != existingletters:
return False
return True
def mainrules(playerinput, board, rack, validity=True, filename='wordlist/sowpods.txt'): # applies all the checks on the input
# prepping
l_1 = dict(zip(list(range(0, 26, 1)), string.ascii_uppercase))
l_2 = dict(zip(list(range(26, 52, 1)), string.ascii_lowercase))
numberletterkey = {**l_1, **l_2}
numberletterkey[52] = ' '
l1 = dict(zip(string.ascii_uppercase, list(range(0, 26, 1))))
l2 = dict(zip(string.ascii_lowercase, list(range(26, 52, 1))))
letternumberkey = {**l1, **l2}
letternumberkey[' '] = 52
# prepped and ready!
if not boundarytester(playerinput):
return False, False, "Your word extends outside the board."
if not overlaptester(playerinput, board):
return False, False, "The word you want to put requires different letters at places where there already are letters."
move = moveconverter(playerinput, board)
words = wordsmade(move[0], move[1], board)
if not island_words_tester(move[1], board):
return False, False, "The first move has to contain the middle square(6,6). Rest must be connected via at least 1 letter to the words on the board."
internal_board = board.copy() # to modify the board locally
for i in range(len(move[0])):
internal_board[move[1][i][0], move[1][i][1]] = letternumberkey[move[0][i]]
actual_words = [] # the words in actuality (no position/letters-represented-by-numbers)
for i in words:
w = []
for j in i:
w.append(numberletterkey[internal_board[j[0], j[1]]])
actual_words.append(''.join(w).lower())
if validity:
if not validword(actual_words, filename)[0]:
return False, False, "Validity mode is on. One of the words you formed is not valid: " + validword(actual_words, filename)[1]
if not racksufficiency(move[0], rack)[0]:
return False, False, racksufficiency(move[0], rack)[1]
return True, move, words
|
4,737 | e403a84ec2a3104cb908933f6949458cccc791c3 | # encoding: utf-8
# -*- coding: utf-8 -*-
"""
The flask application package.
"""
#parse arguments
from flask import Flask
from flask_cors import CORS
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-t', '--testing', action='store_true') #to use the testing database
parser.add_argument('-i', '--init', action='store_true') #to use the testing database
parser.add_argument('-r', '--reinit', action='store_true') #to use the testing database
args = parser.parse_known_args()
#remove arguments to not interfere with unittest
import sys
try:
sys.argv.remove('-t')
except:
pass
try:
sys.argv.remove('--testing')
except:
pass
try:
sys.argv.remove('-i')
except:
pass
try:
sys.argv.remove('--init')
except:
pass
try:
sys.argv.remove('-r')
except:
pass
try:
sys.argv.remove('--reinit')
except:
pass
app = Flask(__name__)
app.config['TOKEN_SECRET'] = 'Secret_Token' #Change this
app.config['SECRET_KEY'] = 'Secret_Key' #Change this
app.config['CORS_HEADERS'] = ['Content-Type', 'Authorization']
app.config['CORS_AUTOMATIC_OPTIONS'] = True
CORS(app)
app.config['TESTING'] = args[0].testing
app.config['INIT'] = args[0].init
app.config['REINIT'] = args[0].reinit
from SmartRecruiting_BackEnd.data import DatabaseManager
dbManager = DatabaseManager()
import SmartRecruiting_BackEnd.api.routes
import SmartRecruiting_BackEnd.data
import SmartRecruiting_BackEnd.deeplearning.preprocess
|
4,738 | 2dcb2d8d41096f0affe569d8ddbdd190885d5f14 | """deserialization tools"""
import typing as t
from datetime import datetime
from functools import partial
from toolz import compose, flip, valmap
from valuable import load, xml
from . import types
registry = load.PrimitiveRegistry({
bool: dict(true=True, false=False).__getitem__,
datetime: partial(flip(datetime.strptime), '%Y-%m-%dT%H:%M:%S%z'),
str: str.strip,
**{
c: c for c in [
int,
float,
types.Journey.Status,
types.Journey.Component.Status
]
}
}) | load.GenericRegistry({
t.List: load.list_loader,
}) | load.get_optional_loader | load.DataclassRegistry({
types.Station: {**valmap(xml.textgetter, {
'code': 'Code',
'type': 'Type',
'country': 'Land',
'uic': 'UICCode',
'lat': 'Lat',
'lon': 'Lon',
'name': 'Namen/Middel',
'full_name': 'Namen/Lang',
'short_name': 'Namen/Kort',
}), **{
'synonyms': xml.textsgetter('Synoniemen/Synoniem'),
}},
types.Journey: {**valmap(xml.textgetter, {
'transfer_count': 'AantalOverstappen',
'planned_duration': 'GeplandeReisTijd',
'planned_departure': 'GeplandeVertrekTijd',
'planned_arrival': 'GeplandeAankomstTijd',
'actual_duration': 'ActueleReisTijd',
'actual_departure': 'ActueleVertrekTijd',
'actual_arrival': 'ActueleAankomstTijd',
'status': 'Status',
}), **{
'components': xml.elemsgetter('ReisDeel'),
'notifications': xml.elemsgetter('Melding'),
}, **{
'optimal': xml.textgetter('Optimaal', default='false')
}},
types.Departure: {**valmap(xml.textgetter, {
'ride_number': 'RitNummer',
'time': 'VertrekTijd',
'destination': 'EindBestemming',
'train_type': 'TreinSoort',
'carrier': 'Vervoerder',
'platform': 'VertrekSpoor',
}), **{
'platform_changed': xml.attribgetter('VertrekSpoor', 'wijziging'),
'comments': xml.textsgetter('Opmerkingen/Opmerking'),
'delay': xml.textgetter('VertrekVertragingTekst',
default=None),
'travel_tip': xml.textgetter('ReisTip', default=None),
'route_text': xml.textgetter('RouteTekst', default=None),
}},
types.Journey.Component: {**valmap(xml.textgetter, {
'carrier': 'Vervoerder',
'type': 'VervoerType',
'ride_number': 'RitNummer',
'status': 'Status',
}), **{
'details': xml.textsgetter('Reisdetails/Reisdetail'),
'kind': xml.attribgetter('.', 'reisSoort'),
'stops': xml.elemsgetter('ReisStop'),
}},
types.Journey.Component.Stop: {
'name': xml.textgetter('Naam'),
'time': compose(lambda x: x or None,
xml.textgetter('Tijd')),
'platform_changed': xml.attribgetter('Spoor', 'wijziging',
default=None),
'delay': xml.textgetter('VertrekVertraging', default=None),
'platform': xml.textgetter('Spoor', default=None)
},
types.Journey.Notification: valmap(xml.textgetter, {
'id': 'Id',
'serious': 'Ernstig',
'text': 'Text',
})
})
|
4,739 | 68319663aad13b562e56b8ee25f25c7b548417df | from django.contrib import admin
from django.urls import path, include
from accounts import views
urlpatterns = [
path('google/login', views.google_login),
path('google/callback/', views.google_callback),
path('accounts/google/login/finish/', views.GoogleLogin.as_view(), name = 'google_login_todjango'),
]
|
4,740 | 1913bbffd8c3c9864a8eeba36c6f06e30d2dd2c8 | # phase 3 control unit
#Dennis John Salewi,Olaniyi Omiwale, Nobert Kimario
from MIPSPhase1 import BoolArray
class RegisterFile:
def __init__(self):
# The register file is a list of 32 32-bit registers (BoolArray)
# register 29 is initialized to "000003E0" the rest to "00000000"
# an instance vaariable for writeReg is initialized to 0
self.regFile=[]
for i in range(0,32):
if i==29:
self.regFile.append(BoolArray("000003E0",32))
else:
self.regFile.append(BoolArray("00000000",32))
self. writeReg=0
def readCycle(self,readReg1,readReg2,writeReg):
# readReg1, readReg2, and writeReg are integers 0..31
# writeReg is 'remembered' in the instance variable
# copies of the two read registers are returned (BoolArrays)
self.writeReg=writeReg
read1,read2=self.regFile[readReg1],self.regFile[readReg2]
return read1,read2
def writeCycle(self,RegWrite,data):
# RegWrite is a boolean indicating that a write should occur
# data is a BoolArray to be written if RegWrite is true
# if the RegWrite control is True,
# the value of data is copied into the writeReg register
# where writeReg is the value most remembered from the most
# recent read cycle
# if RegWrite is false, no values change
# NOTE: if writeReg is 0, nothing happens since $zero is constant
if RegWrite==True:
self.regFile[self.writeReg]=data
else:
return
def getReg(self,i):
# returns a copy of the BoolArray in register i
return self.regFile[i]
def showRegisters(self):
# prints the index and hex contents of all non-zero registers
# printing all registers is ok too but wastes screen real estate
h=self.regFile(self)
for i in range(len(regFile)):
h=self.regFile(self)
if h !=0:
print(str(h))
else:
return
class Memory:
# lets use 2K memory and map
# 3FC,3Fd,3FE,3FF for inbuff,inflag,outbuff,outflag
# sp is 3E0 and grows down
def __init__(self):
# creates a dictionary of BoolArrays whose
# keys are 0, 4, 8, ... , 1020 and
# values are all BoolArray("0",32)
self.dicti={}
for i in range(0,1021):
self.dicti[i]=BoolArray("0",32)
def showMemory(self):
# print a nicely formatted table of non-zero memory
print(" RegLocation", " ", "Data")
print("+---------------------------------------------------------------+")
for i in range(0,1021):
if int(self.dicti[i]) !=0:
print("| ",i," | ",self.dicti[i].toHex(), " |")
else:
return -1
print("+---------------------------------------------------------------+")
def loadProgram(self,fname):
# fname is the name of a text file that contains
# one 8-digit hexidecimal string per line
# they are cnverted to BoolArray and
# read sequentially into memory in
# locations 0, 4, 8, 12, ...
f=open(fname,"r")
temp=f.read()
boolVals=[BoolArray(x[:8],32) for x in temp.split()]
for i in range(0,len(boolVals)*4,4):
self.dicti[i]=boolVals[i//4]
f.close()
def instrCycle(self,PC):
# PC is a BoolArray
# returns a copy of the memory address int(PC)
p=PC
return self.dicti[int(p)]
def dataCycle(self,Address,WriteData,ReadMem,WriteMem):
# Address and Write Data are BoolArrays
# ReadMem and WriteMem are booleans (not both True)
# if ReadMem True:
# return a copy of the BoolArray at mem[int(address)]
# if WriteMem is True:
# a copy of WriteData is placed at mem[int(address)]
# a BoolArray("00000000",32) is returned
if ReadMem:
p= self.dicti[int(Address)]
return p
elif WriteMem:
k=WriteData
self.dicti[int(Address)]=k
return BoolArray("00000000",32)
else:
return BoolArray("0",32)
if __name__=="__main__":
d=Memory()
d.showMemory()
|
4,741 | 5fc097518b6069131e1ca58fa885c6ad45ae143c | #!/usr/bin/env python
#lesson4.py
# See original source and C based tutorial at http://nehe.gamedev.net
#This code was created by Richard Campbell '99
#(ported to Python/PyOpenGL by John Ferguson 2000)
#John Ferguson at hakuin@voicenet.com
#Code ported for use with pyglet by Jess Hill (Jestermon) 2009
#jestermon.weebly.com
#jestermonster@gmail.com
#because these lessons sometimes need openGL GLUT, you need to install
#pyonlgl as well as pyglet, in order for this sample them to work
#pyopengl ~ http://pyopengl.sourceforge.net
#pyglet ~ http://www.pyglet.org
import pyglet
from pyglet.gl import *
from pyglet.window import key
from OpenGL.GLUT import * #<<<==Needed for GLUT calls
from objloader import *
from numpy import sin
##################################World
class World(pyglet.window.Window):
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
objfile1 = 'resource/predator.obj'
objfile2 = 'resource/A10.obj'
# objfile = 'resource/complex2.obj'
obj = OBJ(objfile1)
# obj2 = OBJ(objfile2)
def __init__(self):
config = Config(sample_buffers=1, samples=4,
depth_size=16, double_buffer=True,)
try:
super(World, self).__init__(resizable=True, config=config)
except:
super(World, self).__init__(resizable=True)
self.setup()
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def setup(self):
self.width = 640
self.height = 480
self.rtri = 0.0 # (was global)
self.rquad = 0.0 # (was global)
self.InitGL(self.width, self.height)
pyglet.clock.schedule_interval(self.update, 1/60.0) # update at 60Hz
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def update(self,dt):
self.DrawGLScene()
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def on_draw(self):
self.DrawGLScene()
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def on_resize(self,w,h):
self.ReSizeGLScene(w,h)
def MakeTransparent(self):
glDisable(GL_DEPTH_TEST)
glBlendFunc (GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)
glEnable (GL_BLEND)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# A general OpenGL initialization function. Sets all of the initial parameters.
def InitGL(self,Width, Height): # We call this right after our OpenGL window is created.
glClearColor(0.0, 0.0, 0.0, 0.0) # This Will Clear The background Color To Black
# glClearColor(0.0, 0.0, 0.5, 1.0) # This Will Clear The background Color To Black
glClearDepth(1.0) # Enables Clearing Of The Depth Buffer
glDepthFunc(GL_LESS) # The Type Of Depth Test To Do
glEnable(GL_DEPTH_TEST) # Enables Depth Testing
glShadeModel(GL_SMOOTH) # Enables Smooth Color Shading
glMatrixMode(GL_PROJECTION)
glLoadIdentity() # Reset The Projection Matrix
# Calculate The Aspect Ratio Of The Window
#(pyglet initializes the screen so we ignore this call)
#gluPerspective(45.0, float(Width)/float(Height), 0.1, 100.0)
glMatrixMode(GL_MODELVIEW)
# for realisitic light diffusion effect
specLight0 = [0.5, 0.5, 0.5, 1.0];
glLightfv(GL_LIGHT0, GL_SPECULAR, specLight0);
glMaterialfv(GL_FRONT, GL_SHININESS, 10.0);
glLightfv(GL_LIGHT0, GL_POSITION, (0, 200, 100, 0.0))
dens = 0.3
glLightfv(GL_LIGHT0, GL_AMBIENT, (dens,dens,dens, 0.0))
# glLightfv(GL_LIGHT0, GL_DIFFUSE, (0.5, 0.5, 0.5, 0.0))
glEnable(GL_LIGHT0)
glEnable(GL_LIGHTING)
glEnable(GL_COLOR_MATERIAL)
# # glutFullScreenToggle()
# self.MakeTransparent()
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# The function called when our window is resized (which shouldn't happen if you enable fullscreen, below)
def ReSizeGLScene(self,Width, Height):
if Height == 0: # Prevent A Divide By Zero If The Window Is Too Small
Height = 1
glViewport(0, 0, Width, Height) # Reset The Current Viewport And Perspective Transformation
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
gluPerspective(45.0, float(Width)/float(Height), 0.1, 100.0)
glMatrixMode(GL_MODELVIEW)
def DrawHUD(self,basicT=(0,0,0)):
# glMatrixMode(GL_PROJECTION)
# glLoadIdentity()
# glOrtho ( 0, 640, 480, 0, 0, 1 )
glMatrixMode(GL_MODELVIEW)
# glTranslatef(0, 0, -30.0)
pyglet.gl.glColor4f(0.0,1,0,1.0)
glEnable (GL_LINE_SMOOTH);
glHint (GL_LINE_SMOOTH_HINT, GL_DONT_CARE)
glLineWidth (3)
pyglet.graphics.draw ( 2, pyglet.gl.GL_LINES, ('v2i',(10, 15, 300, 305)) )
# glClear(GL_COLOR_BUFFER_BIT)
glLoadIdentity()
glTranslatef(1.0, 1.0, -6.0)
# Draw a square (quadrilateral) rotated on the X axis.
glRotatef(self.rquad, 0.0, 1.0, 0.0) # Rotate
glColor3f(1.0, 1.0, 1.0) # Bluish shade
glPointSize(3.0)
glBegin(GL_QUADS) # Start drawing a 4 sided polygon
glVertex3f(-1.0, 1.0, 0.0) # Top Left
glVertex3f(1.0, 1.0, 0.0) # Top Right
glVertex3f(1.0, -1.0, 0.0) # Bottom Right
glVertex3f(-1.0, -1.0, 0.0) # Bottom Left
glEnd() # We are done with the polygon
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# The main drawing function.
def DrawGLScene(self):
global rtri, rquad
# Clear The Screen And The Depth Buffer
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
basicT = (1,1,1)
self.DrawHUD(basicT)
glLoadIdentity() # Reset The View
glTranslatef(15.0, -5, -50.0)
# glTranslatef(15.0, 2*sin(self.rquad/50.)-5, -50.0)
glRotatef(20*sin(self.rquad/20.), 0.1, 0.1, -1.0) # Rotate
glCallList(self.obj.gl_list)
# ---------------------------------------------------------------------------------
# We are "undoing" the rotation so that we may rotate the quad on its own axis.
# We also "undo" the prior translate.
# This could also have been done using the matrix stack.
# # # glLoadIdentity()
# # # glTranslatef(-15.0, 0.0, -50.0)
# # # glRotatef(self.rquad, 0.1, -1.0, 0.0) # Rotate
# # # glCallList(self.obj2.gl_list)
# glLoadIdentity()
# # Move Right 1.5 units and into the screen 6.0 units.
# glTranslatef(1.0, 1.0, -6.0)
#
# # Draw a square (quadrilateral) rotated on the X axis.
# glRotatef(self.rquad, 0.0, 1.0, 0.0) # Rotate
# glColor3f(0.3, 0.5, 1.0) # Bluish shade
# glBegin(GL_QUADS) # Start drawing a 4 sided polygon
# glVertex3f(-1.0, 1.0, 0.0) # Top Left
# glVertex3f(1.0, 1.0, 0.0) # Top Right
# glVertex3f(1.0, -1.0, 0.0) # Bottom Right
# glVertex3f(-1.0, -1.0, 0.0) # Bottom Left
# glEnd() # We are done with the polygon
# What values to use? Well, if you have a FAST machine and a FAST 3D Card, then
# large values make an unpleasant display with flickering and tearing. I found that
# smaller values work better, but this was based on my experience.
#(2009.. 9 years after this code was written, this still applies.. unless you use)
#(a timed display, as done here with pyglet.clock.schedule_interval(self.update, 1/60.0) #updates at 60Hz)
# self.rtri = self.rtri + 1.0 # Increase The Rotation Variable For The Triangle
self.rquad = self.rquad + 1.3 # Decrease The Rotation Variable For The Quad
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def on_key_press(self, symbol, modifiers):
if symbol == key.ESCAPE:
self.dispatch_event('on_close')
# since this is double buffered, swap the buffers to display what just got drawn.
#(pyglet provides the swap, so we dont use the swap here)
#glutSwapBuffers()
default_size = 1024,768
screen_size1 = 640,480
if __name__ == "__main__":
window = World()
window.set_location(10,30)
window.set_size(*screen_size1)
# window.set_fullscreen(True)
pyglet.app.run()
|
4,742 | 933758002c5851a2655ed4c51b2bed0102165116 | def entete():
entete='''
<!DOCTYPE HTML>
<html lang=“fr”>
<head>
<title>AMAP'PATATE</title>
<meta charset="UTF-8" />
<link rel="stylesheet" type="text/css" href="/IENAC15/amapatate/css/font-awesome.min.css" />
<link rel="stylesheet" type="text/css" href="/IENAC15/amapatate/css/bootstrap.min.css" />
<link rel="stylesheet" type="text/css" href="/IENAC15/amapatate/css/style.css" />
<link rel="stylesheet" type="text/css" href="/IENAC15/amapatate/css/menu.css" />
<link rel="stylesheet" type="text/css" href="/IENAC15/amapatate/css/form.css" />
<link rel="stylesheet" type="text/css" href="/IENAC15/amapatate/css/button.css" />
<script type="text/javascript" src= " /IENAC15/amapatate/js/jquery-2.2.0.min.js" ></script>
<script type="text/javascript" src= " /IENAC15/amapatate/js/bootstrap.min.js" ></script>
</head>
<body>
'''
return entete
def nav():
nav='''
<nav>
<ul>
<li><a href="/IENAC15/amapatate/index.py">
<span class="fa-stack fa-lg">
<i class="fa fa-circle fa-stack-2x"></i>
<i class="fa fa-home fa-stack-1x fa-inverse"></i>
</span>
Accueil</a>
</li>
<li><a href="/IENAC15/amapatate/index.py#ecole">
<span class="fa-stack fa-lg">
<i class="fa fa-circle fa-stack-2x"></i>
<i class="fa fa-plane fa-stack-1x fa-inverse"></i>
</span>
L'école</a>
<ul>
<li><a href="http://www.eag-tournament.com">
<i class="fa fa-soccer-ball-o fa-fw"></i>EAG</a>
</li>
<li><a href="index.html#contacter">
<i class="fa fa-phone fa-fw"></i>Nous Contacter</a>
</li>
</ul>
</li>
<li><a href="/IENAC15/amapatate/python/clubs.py">
<span class="fa-stack fa-lg">
<i class="fa fa-circle fa-stack-2x"></i>
<i class="fa fa-bicycle fa-stack-1x fa-inverse"></i>
</span>
Les clubs</a>
</li>
<li><a href="/IENAC15/amapatate/python/connecter.py">
<span class="fa-stack fa-lg">
<i class="fa fa-circle fa-stack-2x"></i>
<i class="fa fa-user fa-stack-1x fa-inverse"></i>
</span>
Se connecter</a>
</li>
'''
if "nom" in Session() and Session()["nom"]!='':
nav+='''
<li><a href="/IENAC15/amapatate/python/page_prive.py">
<span class="fa-stack fa-lg">
<i class="fa fa-circle fa-stack-2x"></i>
<i class="fa fa-user fa-stack-1x fa-inverse"></i>
</span>
Page privée</a>
</li>
'''
nav+='''
</ul>
</nav>
'''
return nav
def titre(intitule):
titre='''
<header>
<h1>'''+intitule+'''</h1>
<p>L'AMAP fruits et légumes qui vous donne la patate </p>
</header>
'''
return titre
def footer():
footer='''
<footer>© All right reserved ENAC
</footer>
</body>
</html>
'''
return footer
|
4,743 | 51f171b3847b3dbf5657625fdf3b7fe771e0e004 | from pointsEau.models import PointEau
from django.contrib.auth.models import User
from rest_framework import serializers
class PointEauSerializer(serializers.ModelSerializer):
class Meta:
model = PointEau
fields = [
'pk',
'nom',
'lat',
'long',
'desc',
'owner'
]
nom = serializers.CharField(max_length=100)
long = serializers.DecimalField(max_digits=10, decimal_places=8)
lat = serializers.DecimalField(max_digits=10, decimal_places=8)
desc = serializers.CharField(max_length=255)
owner = serializers.ReadOnlyField(source='owner.username')
class UserSerializer(serializers.ModelSerializer):
pointseau = serializers.PrimaryKeyRelatedField(many=True, queryset=PointEau.objects.all())
class Meta:
model = User
fields = ('id', 'username', 'pointseau')
|
4,744 | 9e77385933cf6e381f25bea9020f909d5dc6817d | # -*- coding: utf-8 -*-
"""
Description: This modules is used for testing. Testing is performed based on the list of commands given to perform in a website
Version : v1.5
History :
v1.0 - 08/01/2016 - Initial version
v1.1 - 08/05/2016 - Modified to accept List input.
v1.2 - 08/05/2016 - Removed dead code in feed_input
v1.3 - 08/05/2016 - Added function get_data_dictionary to return the fetched values
v1.4 - 09/01/2016 - updated _print_ function and added log_process_status variable
v1.5 - 09/22/2016 - variable to suppress output running. Default - output will be written to file.
Open Issues: None.
Pending : Enhance coding standards. Clean up dead code in feed_input function
"""
__version__ = "1.0.0"
from selenium import webdriver
from selenium.common.exceptions import NoSuchElementException
from selenium.webdriver.common.keys import Keys
from URL_Status import *
import time # for sleep
import requests #to check status of the page
from Utilities import *
class PatternScraping():
def __init__(self,output_filename=None,developer_mode=False,print_instance=None,browser_instance=None,log_process_status=True,write_output=True):
self.developer_mode = developer_mode
self.log_process_status=log_process_status
if output_filename:
self.output_filename=output_filename
else:
self.output_filename='PatternScraping.' + get_timestamp_for_file() + '.testing.txt'
self.write_output=write_output
self.possible_commands = ['GO', 'GET_VALUE', 'CLICK', 'ENTER_VALUE','EXIT', 'SLEEP', 'GET_VALUES','GET_LINKS']
self.possible_command_types = ['ID', 'XPATH', 'NAME', 'CLASS', 'CSS']
self.browser = None
self.ins_browser=browser_instance
self.initiate_print_instance(instance_instance=print_instance)
def _print_(self,input_string_in,skip_timestamp=False,add_leading_space=True,message_priority=''):
module_name='PatternScraping'
input_string=input_string_in
if isinstance(input_string,str):
input_string = get_html_to_unicode_string(input_string)
if self.print_instance:
self.print_instance.customPrint(input_string,skip_timestamp=skip_timestamp,add_leading_space=add_leading_space,module_name=module_name,message_priority=message_priority)
else:
print_string=u'' + module_name + '\t' + message_priority + '\t' + input_string
if not skip_timestamp:
print_string = log_time_stamp() + print_string
print get_printable_string(print_string)
def initiate_print_instance(self,instance_instance=None):
self.print_instance=None
if instance_instance:
try:
if instance_instance.check():
self.print_instance=instance_instance
return True
except:
return False
return False
def validate_input_commands(self,list_of_commands):#commands have tupple
print_prefix='validate_input_commands\t'
for i in range(len(list_of_commands)):
if self.developer_mode:
self._print_(print_prefix + 'Current Input:' + str(list_of_commands[i]))
if list_of_commands[i][0] not in self.possible_commands:
self._print_(print_prefix + 'Command not in list:' + str(list_of_commands[i][0]))
custom_exit()
line_no = str(i + 1)
list_length = len(list_of_commands[i])
command_name=list_of_commands[i][0]
if command_name not in ['GO','SLEEP','EXIT'] and list_of_commands[i][1] not in self.possible_command_types:
status="Unknown command type"+" in line number "+ line_no
self._print_(print_prefix + status)
custom_exit()
if command_name == 'GO':
if not list_of_commands[i][1]:
status = "no link provided" + " in line number "+ line_no
self._print_(print_prefix + status)
custom_exit()
if command_name == 'GET_VALUE':
if list_length != 4 or any(list_of_commands[i]) is False:
status = "no data provided"+" in line number "+ line_no
self._print_(print_prefix + status)
custom_exit()
if command_name == 'GET_VALUES':
if list_length != 4 or any(list_of_commands[i]) is False:
status = "no link provided"+" in line number "+ line_no
self._print_(print_prefix + status)
custom_exit()
if command_name == 'CLICK':
if list_length != 3 and list_length != 5:
status = "click command length error "+" in line number "+ line_no
self._print_(print_prefix + status)
custom_exit()
if any(list_of_commands[i]) is False:
status = "click syntax error"+" in line number "+ line_no
self._print_(print_prefix + status)
custom_exit()
if command_name == 'ENTER_VALUE':
if not (list_length == 4 and list_of_commands[i][2]
and list_of_commands[i][3]):
status = "ENTER VALUE syntax error"+" in line number "+ line_no
self._print_(print_prefix + status)
custom_exit()
if command_name == 'SLEEP':
if not (list_of_commands[i][1] and (list_length == 2)):
status = "SLEEP time not provided"+" in line number "+ line_no
self._print_(print_prefix + status)
custom_exit()
if command_name == 'EXIT':
if list_length != 1:
status = "Exit syntax error"+" in line number "+ line_no
self._print_(print_prefix + status)
custom_exit()
return True
def feed_input(self, input_commands):
print_prefix='feed_input\t'
self.data_dict = {}
#if self.developer_mode: self._print_(self.browser.page_source)
if isinstance(input_commands,str):
with open(input_commands, "r") as fopen:
self.base_list_of_lists = []
self.command_list = fopen.readlines()
for each_line in self.command_list:
self.base_list_of_lists.append((each_line.replace("\n", "")).split("\t"))
elif isinstance(input_commands,list):
self.base_list_of_lists=input_commands
else:
self._print_(print_prefix + ' Input argument should be either string(filename) or list(commands). Passed:' + str(type(input_commands)))
custom_exit()
input_status=self.validate_input_commands(self.base_list_of_lists)
if self.developer_mode and input_status:
self._print_(print_prefix + 'Input is Valid')
return True
def run(self):
if not self.ins_browser:
if not self.browser:
self.browser = webdriver.PhantomJS()#Chrome()
else:
self.browser=self.ins_browser
i = 0
for each_list in self.base_list_of_lists:
if self.developer_mode:
self._print_('Input:\t' + str(i + 1) + '\t' + str(each_list))
line = '\t'.join(each_list)
if each_list[0] == 'GO':
try:
status = self.go(each_list)
if self.developer_mode: self._print_('Command:\tGO\tStatus\t' + str(status))
self.file_write(line, status)
if status == 'Not available':
return 'Not available'
except Exception as e:
self.file_write(line, str(e))
return str(e)
elif each_list[0] == 'GET_VALUE':
try:
status = self.get_value(each_list)
if self.developer_mode: self._print_('Command:\tGET_VALUE\tStatus\t' + str(status))
self.file_write(line, status)
except Exception as e:
self.file_write(line, str(e))
return str(e)
elif each_list[0] == 'GET_VALUES':
# self._print_(self.browser.page_source.encode('utf-8')
try:
status = self.get_values(each_list)
if self.developer_mode: self._print_('Command:\tGET_VALUES\tStatus\t' + str(status))
self.file_write(line, status)
except Exception as e:
self.file_write(line, str(e))
return str(e)
elif each_list[0] == 'GET_LINKS':
try:
self.file_write(line, "Links as below")
status = self.get_links(each_list)
if self.developer_mode: self._print_('Command:\tGET_LINKS\tStatus\t' + str(status))
except Exception as e:
self.file_write(line, str(e))
return str(e)
elif each_list[0] == 'CLICK':
try:
status = self.click(each_list)
if self.developer_mode: self._print_('Command:\tCLICK\tStatus\t' + str(status))
self.file_write(line, status)
if status == 'Not available':
return 'Not available'
except Exception as e:
self.file_write(line, str(e))
return str(e)
elif each_list[0] == 'ENTER_VALUE':
try:
status = self.enter_value(each_list)
if self.developer_mode: self._print_('Command:\tENTER_VALUE\tStatus\t' + str(status))
self.file_write(line, status)
if status == 'Not available':
return 'Not available'
except Exception as e:
self.file_write(line, str(e))
return str(e)
elif each_list[0] == 'SLEEP':
self.sleep(each_list[1])
status = "Slept for " + each_list[1] + " second(s)"
if self.developer_mode: self._print_('Command:\tSLEEP\tStatus\t' + str(status))
self.file_write(line, status)
elif each_list[0] == 'EXIT':
self.file_write("EXIT", "OK")
if self.developer_mode: self._print_('Command:\tEXIT')
self.browser.quit()
i += 1
def go(self, list_of_values):
self.browser.get(list_of_values[1])
r = requests.get(list_of_values[1])
time.sleep(2)
link_status = r.status_code
return link_status
def close(self):
if not self.ins_browser:
if self.browser:
self.browser.quit()
def click(self, list_of_values):
try:
if list_of_values[1] == 'ID':
a_obj = self.find_by_id(list_of_values[2])
elif list_of_values[1] == 'XPATH':
a_obj = self.find_by_xpath(list_of_values[2])
elif list_of_values[1] == 'NAME':
a_obj = self.find_by_name(list_of_values[2])
elif list_of_values[1] == 'CLASS':
a_obj = self.find_by_class(list_of_values[2])
if len(list_of_values) == 3:
a_obj.click()
return "OK"
elif len(list_of_values) > 3:
if list_of_values[4] == 'Available':
if list_of_values[3] in self.data_dict.keys():
a_obj.click()
return "OK"
else:
return "Not available"
elif list_of_values[4] == 'Not Available':
if list_of_values[3] not in self.data_dict.keys():
a_obj.click()
self._print_('Function:\tclick\tCondition:\t' + 'Available')
return "OK"
else:
return "Not available"
else:
if list_of_values[4] == self.data_dict[list_of_values[3]]:
a_obj.click()
return "OK"
else:
return "Not available"
except NoSuchElementException as e:
self._print_('Function:\tclick\tError:\t' + str(e) + '\t Input:' + str(list_of_values))
return "Not available"
def get_value(self, list_of_values):
if list_of_values[1] == 'ID':
a_obj = self.find_by_id(list_of_values[2])
elif list_of_values[1] == 'XPATH':
a_obj = self.find_by_xpath(list_of_values[2])
elif list_of_values[1] == 'NAME':
a_obj = self.find_by_name(list_of_values[2])
if a_obj:
self.data_dict[list_of_values[3]] = a_obj.text
if self.developer_mode: self._print_('Function\tget_value\tData:\t' + str(self.data_dict))
return a_obj.text
return "Not available"
def get_values(self, list_of_values):
edge_list = []
new_news_list = []
if list_of_values[1] == 'CLASS':
elements = self.find_by_css_selector(list_of_values[2])
elif list_of_values[1] == 'XPATH':
elements = self.find_by_xpath(list_of_values[2])
elif list_of_values[1] == 'NAME':
elements = self.find_by_name(list_of_values[2])
elif list_of_values[1] == 'CSS':
elements = self.find_by_css_selector(list_of_values[2])
if elements:
edge_list = [a.get_attribute("href") for a in elements]
for each in edge_list:
if each and (not each.startswith('mailto')) and each not in new_news_list:
new_news_list.append(each)
return new_news_list
def get_links(self, list_of_values):
edge_list = []
new_news_list = []
if list_of_values[1] == 'CLASS':
path = "div."+list_of_values[2]+" a"
elements = self.find_by_css_selector(path)
elif list_of_values[1] == 'ID':
path = "div#"+list_of_values[2]+" a"
elements = self.find_by_css_selector(path)
if elements:
edge_list = [a.get_attribute("href") for a in elements]
for each in edge_list:
if each and (not each.startswith('mailto')) and each not in new_news_list:
new_news_list.append(each)
if new_news_list: #do we need to check the 4th argument
self.data_dict[list_of_values[3]]=new_news_list
main_window = self.browser.current_window_handle
if self.developer_mode: self._print_('Function\tget_links\tData:\t' + str(new_news_list))
self.file_write("",str(len(new_news_list))+ " link(s) found. Their status are: (link"+"\t"+"is_url_active"+"\t"+"is_redirected"+"\t"+"redirected_to"+")")
for each_link in new_news_list:
res_dict = url_check_status(each_link)
line = each_link+"\t"+res_dict['URL_Active']+"\t"+res_dict['Redirected']
self.file_write(line, res_dict['Redirected_into'])
return new_news_list
def enter_value(self, list_of_values):
if list_of_values[1] == 'ID':
a_obj = self.find_by_id(list_of_values[2])
elif list_of_values[1] == 'XPATH':
a_obj = self.find_by_xpath(list_of_values[2])
elif list_of_values[1] == 'NAME':
a_obj = self.find_by_name(list_of_values[2])
if a_obj:
if list_of_values[3] == "Keys.ENTER":
a_obj.send_keys(Keys.ENTER)
else:
a_obj.send_keys(list_of_values[3])
return "Value entered"
return "Not available"
def sleep(self, sleep_time):
time.sleep(float(sleep_time))
return True
def find_by_id(self, input_id):
input_id_obj = self.browser.find_element_by_id(input_id)
return input_id_obj
def find_elements_by_id(self, input_id):
input_id_obj = self.browser.find_elements_by_id(input_id)
return input_id_obj
def find_by_xpath(self, input_xpath):
input_xpath_obj = self.browser.find_element_by_xpath(input_xpath)
return input_xpath_obj
def find_by_name(self, input_name):
input_id_obj = self.browser.find_element_by_name(input_name)
return input_id_obj
def find_by_class(self, input_name):
input_class_obj = self.browser.find_element_by_class_name(input_name)
return input_class_obj
def find_by_css_selector(self, input_name):
input_class_obj = self.browser.find_elements_by_css_selector(input_name)
return input_class_obj
def file_write(self, command_line, status):
if self.write_output:
with open(self.output_filename, "a") as result_file:
result_file.write(command_line + "\t" + str(status) + "\n")
def get_data_dictionary(self):
return self.data_dict
if __name__ == '__main__':
# input_filename = 'input.txt'
input_filename = 'input_22.txt'
output_filename = 'output.txt'
obj = PatternScraping(developer_mode=True)
obj.feed_input([['GO','https://www.google.com'],['SLEEP','1'],['ENTER_VALUE','ID','lst-ib','Testing Automation'],['CLICK','NAME','btnG'],['SLEEP','5'],['EXIT']])
obj.run() |
4,745 | 972a063bab35926472be592e6a17d450034fbf37 | import graphene
from django.core.exceptions import ValidationError
from ....app import models
from ....app.error_codes import AppErrorCode
from ....permission.enums import AppPermission, get_permissions
from ....webhook.event_types import WebhookEventAsyncType
from ...account.utils import can_manage_app
from ...core.mutations import ModelMutation
from ...core.types import AppError
from ...core.utils import WebhookEventInfo
from ...plugins.dataloaders import get_plugin_manager_promise
from ...utils import get_user_or_app_from_context, requestor_is_superuser
from ..types import App
from ..utils import ensure_can_manage_permissions
from .app_create import AppInput
class AppUpdate(ModelMutation):
class Arguments:
id = graphene.ID(description="ID of an app to update.", required=True)
input = AppInput(
required=True,
description="Fields required to update an existing app.",
)
class Meta:
description = "Updates an existing app."
model = models.App
object_type = App
permissions = (AppPermission.MANAGE_APPS,)
error_type_class = AppError
error_type_field = "app_errors"
webhook_events_info = [
WebhookEventInfo(
type=WebhookEventAsyncType.APP_UPDATED,
description="An app was updated.",
),
]
@classmethod
def clean_input(cls, info, instance, data, **kwargs):
cleaned_input = super().clean_input(info, instance, data, **kwargs)
requestor = get_user_or_app_from_context(info.context)
if not requestor_is_superuser(requestor) and not can_manage_app(
requestor, instance
):
msg = "You can't manage this app."
code = AppErrorCode.OUT_OF_SCOPE_APP.value
raise ValidationError({"id": ValidationError(msg, code=code)})
# clean and prepare permissions
if "permissions" in cleaned_input:
permissions = cleaned_input.pop("permissions")
cleaned_input["permissions"] = get_permissions(permissions)
ensure_can_manage_permissions(requestor, permissions)
return cleaned_input
@classmethod
def post_save_action(cls, info, instance, cleaned_input):
manager = get_plugin_manager_promise(info.context).get()
cls.call_event(manager.app_updated, instance)
|
4,746 | 594fdec916520014faff80dd06c7a5553320664d | #recapitulare polimorfism
class Caine:
def sunet(self):
print("ham ham")
class Pisica:
def sunet(self):
print("miau")
def asculta_sunet(tipul_animalului):# astapta obiect tipul animalului
tipul_animalului.sunet()#
CaineObj=Caine()#dau obiect
PisicaObj=Pisica()
asculta_sunet(CaineObj)
asculta_sunet(PisicaObj)
|
4,747 | 2f16c74e51789dd06bfc1fe1c6173fa5b0ac38cd | import numpy as np
import heapq
class KdNode:
"""
node of kdtree.
"""
def __init__(self, depth, splitting_feature, splitting_value, idx, parent):
"""
:param depth: depth of the node.
:param splitting_feature: split samples by which feature.
:param splitting_value: split samples by which feature value.
:param idx: indices of samples in the dataset.
:param parent: the parent node if it exists.
"""
self.depth = depth
self.splitting_feature = splitting_feature
self.splitting_value = splitting_value
self.idx = idx
self.parent = parent
# left and right children
self.left, self.right = None, None
class KdTree:
"""an efficient algorithm of find k-nearest-neighbours
https://en.wikipedia.org/wiki/K-d_tree
pseudo-code: (construct)
input: X, shape is (n_samples, n_features). dimension k
output: k-d tree
(1) start: divide all samples in X into two equal-sized collections by the median of the
first feature. Construct a root whose depth is 1. For samples equal to the median,
store them at the root. Store samples < median at the left child of the root,
and those > median at the right child.
(2) repeat: for nodes of depth j, select the l-th feature as splitting axis. l = j(mod k).
divide samples in the node by the median of the l-th feature. store samples equal to
the median at the node, and split other samples into left and right children on whether
they < median.
(3) terminate: terminate until no samples in left and right subtrees of the node.
pseudo-code: (search)
input: k-d tree, target sample x.
output: k nearest neighbours of x. (a list 'k-nn')
(1) top-down: starting from the root. if the feature value of the splitting axis of x is smaller
than the splitting threshold (the median of 1st feature) of the root, move it to the left
child. else to the right child. go down recursively until reach a leaf. append samples of
the leaf to a list 'k-nn'.
(2) bottom-up: move to the parent of current node. If the max distance from x to samples in
'k-nn' is larger than the distance from x to the splitting threshold of the parent, search
for samples in the right subtree which is closer to x than some samples in 'k-nn'. If
successfully find some, replace those 'furthest' samples in 'k-nn' with closer samples
if the size of 'k-nn' > k.
(3) terminate: terminate if reach the root and finish checking its right subtree.
"""
def __init__(self):
self.root = None
def create(self, X, dimensions=None):
"""
create a kd-tree on data X.
:param X: shape is (n_samples, n_features).
:param dimensions: the max number of features chosen for splitting samples. if None, set to
be n_features.
:return: None
"""
n_samples, n_features = X.shape
self.X = X
if not dimensions:
dimensions = n_features
self.root = KdNode(depth=0,
splitting_feature=0,
splitting_value=np.median(X[:, 0]),
idx=np.arange(n_samples),
parent=None)
# grow the tree by DFS
stack = [self.root]
while stack:
node = stack.pop()
# splitting samples in the node into two children
sample_values = X[node.idx, node.splitting_feature]
left_idx = node.idx[sample_values < node.splitting_value]
right_idx = node.idx[sample_values > node.splitting_value]
node.idx = node.idx[sample_values == node.splitting_value]
# since left and right subtrees are divided by the median of their parent,
# the sizes of the two subtrees are expected to be equal
assert len(left_idx) == len(right_idx),\
'left and right subtrees should have the same number of samples'
# append left and right children
if len(left_idx):
child_depth = node.depth + 1
child_feature = (node.depth + 1) % dimensions
left_value = np.median(X[left_idx, child_feature])
node.left = KdNode(depth=child_depth, splitting_feature=child_feature,
splitting_value=left_value, idx=left_idx, parent=node)
right_value = np.median(X[right_idx, child_feature])
node.right = KdNode(depth=child_depth, splitting_feature=child_feature,
splitting_value=right_value, idx=right_idx, parent=node)
stack.append(node.left)
stack.append(node.right)
def _search(self, x, k=3):
"""
:param x: the target sample point. shape is (n_features,)
:param k: the number of nearest neighbours to find.
:return: a list of k nearest neighbours.
"""
# top-down
cur_node = self.root
# kd-tree is actually a full binary tree
while cur_node.left:
if x[cur_node.splitting_feature] <= cur_node.splitting_value:
cur_node = cur_node.left
else:
cur_node = cur_node.right
# append samples in cur_node into k_nn. k_nn is a max heap
k_nn = []
# bottom-top
while cur_node:
for idx in cur_node.idx:
# Euclidean distance
dist = np.linalg.norm(self.X[idx] - x)
# negate the dist to construct a max heap
heapq.heappush(k_nn, (-dist, idx))
if abs(x[cur_node.splitting_feature] - cur_node.splitting_value) < -k_nn[0][0] or len(k_nn) < k:
# the max distance from x to samples in 'k-nn' > the distance from x to the splitting threshold
# check samples of another child
if x[cur_node.splitting_feature] <= cur_node.splitting_value:
checking_samples = self._samples_of_subtree(cur_node.right, x, k)
else:
checking_samples = self._samples_of_subtree(cur_node.left, x, k)
k_nn.extend(checking_samples)
heapq.heapify(k_nn)
# keep the size of k_nn <= k
while len(k_nn) > k:
heapq.heappop(k_nn)
cur_node = cur_node.parent
# sort k_nn
k_nn.sort(reverse=True)
dists, idxs = zip(*k_nn)
return [-d for d in dists], list(idxs)
def search(self, X, k=3):
"""
:param X: the target sample points. shape is (n_samples, n_features)
:param k: the number of nearest neighbours to find.
:return: lists of k nearest neighbours for each sample point.
"""
assert self.root, 'must create a tree before search'
result = [self._search(x, k) for x in X]
dists, idxs = zip(*result)
return np.array(dists), np.array(idxs)
def _samples_of_subtree(self, root, x, k):
# get k nearest neighbours from the subtree rooted at root
k_nn = []
def dfs(node):
if not node:
return
for idx in node.idx:
dist = np.linalg.norm(x - self.X[idx])
heapq.heappush(k_nn, (-dist, idx))
while len(k_nn) > k:
heapq.heappop(k_nn)
if len(k_nn) < k or \
(0 < len(k_nn) and abs(x[node.splitting_feature] - node.splitting_value) < -k_nn[0][0]):
# have to search both two children
dfs(node.left)
dfs(node.right)
else:
if x[node.splitting_feature] <= node.splitting_value:
dfs(node.left)
else:
dfs(node.right)
dfs(root)
return k_nn
if __name__ == '__main__':
from sklearn.neighbors import NearestNeighbors
n_samples, n_features = 2000, 10
n_test = 100
K = 5
X = np.random.random((n_samples, n_features))
test_X = np.random.random((n_test, n_features))
nbrs = NearestNeighbors(n_neighbors=K, algorithm='ball_tree').fit(X)
distances, indices = nbrs.kneighbors(test_X)
tree = KdTree()
tree.create(X)
dists, idxs = tree.search(test_X, k=K)
print(np.all(distances == dists))
print(np.all(indices == idxs))
|
4,748 | 30d891c18f3635b7419fa0d0539b2665ad60b22c | l = input().split("+")
l.sort()
print('+'.join(l))
|
4,749 | a4f56b1f93f62d80707367eaba0bba7ef4b2caca | import scipy.io as sio
import glob
import numpy as np
import matplotlib.pyplot as plt
import math
import os,sys
BIN = os.path.expanduser("../tools/")
sys.path.append(BIN)
import myfilemanager as mfm
import mystyle as ms
import propsort as ps
from functools import partial
from scipy.ndimage import gaussian_filter1d
from scipy.constants import c as clight
plt.close('all')
# Scan Parameters
fraction_device_quad_vect = [0.07, 0.16, 0.26]
n_slices_vect = np.array([250., 500., 750., 1000.])
betax_vect = [50, 100, 150, 200, 300, 400, 500, 600]
# Simulations Parameters
PyPICmode_tag = 'Tblocked'
# If you want to save the figures with all the scan parameters choose: savefigures = True and mode = 'auto'
savefigure = True
mode = 'auto'
#~ # Comment this part if you want to save the plots. You can choose only some scan parameters
#~ savefigure = False
#~ fraction_device_quad_vect = [0.26]
#~ n_slices_vect = np.array([1000.,])
#~ betax_vect = [100]
#~ mode = 'manual'
#~ turn_obs = 350
betay_vect = betax_vect
folder_plot = 'intrabunch_modes/'
if not os.path.exists(folder_plot) and savefigure:
os.makedirs(folder_plot)
# choice of the window of turns
# import the dictionary elements
dic = sio.loadmat('tt_complete.mat')
tt = np.squeeze(dic['tt_first'])
smooth = partial(gaussian_filter1d, sigma=2, mode='nearest')
n_turns_window = 20
n_sigmaz_sim = 10. #we are simulating 10 long sigmas
i_want_to_count_over = 4.
flag_weighted = True
#Figure parameters
ii_fig = 0
tick_size = 20
axis_font = {'fontname':'Arial', 'size':'24'}
fig_size = (15, 5)
line_width = 3.5
ms.mystyle_arial(16)
# calculate intra-bunch modes
for fraction_device_quad in fraction_device_quad_vect:
kk = np.argmin(np.abs(dic['fraction_device_quad_vect']-fraction_device_quad))
for betax, betay in zip(betax_vect, betay_vect):
jj = np.argmin(np.abs(dic['betax_vect']-betax))
subfolder_plot = folder_plot + 'betaxy_%d_length_%.2f/'%(betax,fraction_device_quad)
if not os.path.exists(subfolder_plot) and savefigure:
os.makedirs(subfolder_plot)
for n_slices in n_slices_vect:
ii = np.argmin(np.abs(dic['n_slices_vect']-n_slices))
if not math.isnan(tt[ii,jj,kk]):
if mode == 'auto':
wind_center = int(tt[ii,jj,kk])
elif mode == 'manual':
wind_center = turn_obs
else:
raise ValueError("I don't understand!?")
start = [wind_center + n_turns_window/2]
if int(tt[ii,jj,kk]) - n_turns_window/2 < 0:
window_min = 1
window = [np.s_[1:s] for s in start]
else:
window_min = wind_center - n_turns_window/2
window = [np.s_[s-n_turns_window:s] for s in start]
window_max = wind_center + n_turns_window/2
folder_curr_sim = '../simulations_PyPARIS/transverse_grid_%s_betaxy_%.0fm_length%.2f_slices_%d'%(PyPICmode_tag, betax,fraction_device_quad,n_slices)
sim_curr_list = ps.sort_properly(glob.glob(folder_curr_sim+'/slice_evolution_*.h5'))
print sim_curr_list[0]
try:
data = mfm.monitorh5list_to_obj(sim_curr_list, key='Slices', flag_transpose=True)
if flag_weighted:
bpm_x = data.mean_x * data.n_macroparticles_per_slice
bpm_y = data.mean_y * data.n_macroparticles_per_slice
else:
bpm_x = data.mean_x
bpm_y = data.mean_y
xfft = np.fft.rfft(bpm_x, axis=0)
yfft = np.fft.rfft(bpm_y, axis=0)
xfft = np.abs(xfft)**2 #Power
yfft = np.abs(yfft)**2 #Power
for wd in window:
print wd
n_slices, n_turns = data.mean_z.shape
zz = np.linspace(-2.5e-9*clight/2, 2.5e-9*clight/2, n_slices)
xx, yy = bpm_x, bpm_y
# Setting to plot the fft
xftt_to_plot = np.log10(xfft.T)
yftt_to_plot = np.log10(yfft.T)
minval_x = np.max([xftt_to_plot])-3
minval_y = np.max([yftt_to_plot])-3
xftt_to_plot[xftt_to_plot<minval_x] = minval_x
yftt_to_plot[yftt_to_plot<minval_y] = minval_y
YY_to_plot, XX_to_plot = xftt_to_plot.shape
XX_to_plot = np.arange(XX_to_plot)
YY_to_plot = np.arange(YY_to_plot)
fig, ((ax1, ax2)) = plt.subplots(1, 2, figsize=fig_size)
fig.patch.set_facecolor('w')
fig.subplots_adjust(left=0.05, right=0.95, wspace=0.3)
xmin, xmax = wd.start, wd.stop
col = plt.cm.rainbow_r(np.linspace(0, 1, xmax-xmin))
for i, t in enumerate(range(n_turns)[wd]):
ax1.plot(zz, smooth(bpm_x[:, t]), c=col[i], linewidth=line_width)
ax2.plot(zz, smooth(bpm_y[:, t]), c=col[i], linewidth=line_width)
ax1.set_xlabel('z [m]')
ax2.set_xlabel('z [m]')
ax1.set_title('Turns %.0f - %.0f'%(window_min, window_max))
ax2.set_title('Turns %.0f - %.0f'%(window_min, window_max))
if flag_weighted:
ax1.set_xlim(-2.5e-9*c/2, 2.5e-9*c/2)
ax2.set_xlim(-2.5e-9*c/2, 2.5e-9*c/2)
ax1.set_ylabel('Charge weighted\nhorizontal signal')
ax2.set_ylabel('Charge weighted\nvertical signal')
else:
ax1.set_xlim(-0.30, 0.30)
ax2.set_xlim(-0.30, 0.30)
#~ ax1.set_ylim(-.0001,.0001)
#~ ax2.set_ylim(-.0001,.0001)
ax1.set_ylabel('Horizontal signal')
ax2.set_ylabel('Vertical signal')
title = fig.suptitle('Beta_xy = %.0f Fraction Device = %.3f Slices = %.0f\n'%(betax, fraction_device_quad, n_slices))
if flag_weighted and savefigure:
plt.savefig(subfolder_plot + 'charge_weighted_betaxy_%d_length_%.2f_slices_%.0f.png'%(betax, fraction_device_quad, n_slices), dpi=300, bbox_inches='tight')
except IOError as goterror:
print 'Skipped. Got:', goterror
plt.show()
|
4,750 | 45d69194e14e8c20161e979d4ff34d0b90df4672 | #!/usr/bin/env python3
import re
import subprocess
PREFIX = "Enclave/"
OBJ_FILES = [
# "Enclave.o",
"p_block.o",
# "symbols.o",
"runtime.o",
"primitives.o",
"unary_op.o",
"unary/isna.o",
"unary/mathgen.o",
"unary/mathtrig.o",
"unary/plusminus.o",
"unary/summary.o",
"unary/print.o", # data dependent by design
"unary/ustats.o", # only the opcode for the dispatch, not the actual.
"binary_op.o",
"binary/arith.o",
"binary/bstats.o", # only the opcode for the dispatch, not the actual.
"binary/log_bin.o",
"binary/logic.o",
"binary/matmul.o",
"binary/compare.o",
"binary/pminmax.o",
"binary/bstats.o",
]
CONDITIONALS = [
]
# LIBFTFP = set(['add','mov','pop','setg','and','movabs','push','setl', 'call','movsd','rep','setle','cdqe','movsx','ret','setne','cmp','movsxd','sar','shl', 'imul','movzx','sbb','shr','je','mul','seta','sub', 'jmp','neg','setae','test', 'jne','not','setbe','xor', 'lea','or','sete']) - set(['jne', 'je'])
LIBFTFP = set(['add','mov','pop','setg','and','movabs','push','setl', 'call','movsd','rep','setle','cdqe','movsx','ret','setne','cmp','movsxd','sar','shl', 'imul','movzx','sbb','shr','je','mul','seta','sub', 'jmp','neg','setae','test', 'jne','not','setbe','xor', 'lea','or','sete'])
SKIP = ['nop',]
opcodes = set()
cond_results = {}
# subprocess.run(["make", "-f", "split.makefile", "clean"], check=True, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
# subprocess.run(["make", "-f", "split.makefile", "all"], check=True)
for obj_file in OBJ_FILES:
cond_results[obj_file] = set()
dump = subprocess.run(["objdump", "-M", "intel", "-dr", PREFIX + obj_file], stdout=subprocess.PIPE, check=True).stdout
for line in dump.decode("utf-8").split("\n"):
cols = line.split('\t')
if len(cols) > 2:
new_code = re.sub(' .*', '', cols[2])
if new_code == '':
continue
# if new_code in CONDITIONALS:
if new_code not in LIBFTFP and new_code not in SKIP:
cond_results[obj_file].add(new_code)
opcodes.add(new_code)
# print(sorted(opcodes))
print(sorted(opcodes - LIBFTFP))
for k,v in cond_results.items():
print(k,sorted(v))
combo = LIBFTFP.copy()
# for s in ['ja', 'jae', 'jb', 'je', 'jne', 'jge', 'jle', 'repz', 'cmovne', 'movq', 'jns']:
# combo.add(s)
combo.add("cmovne")
combo = sorted(combo)
for i in range(0, len(combo)):
print(r'\texttt{' + combo[i] + '}', end='')
if combo[i] not in LIBFTFP:
print('*', end='')
if i % 5 == 4:
print(r' \\')
else:
print(' & ', end='')
|
4,751 | 0c3947a1699c78080661a55bbaa9215774b4a18e | import argparse
from flower_classifier import FlowerClassifier
from util import *
parser = argparse.ArgumentParser()
parser.add_argument("data_dir", help="path to training images")
parser.add_argument("--save_dir", default=".", help="path where checkpoint is saved")
parser.add_argument("--arch", default="vgg11", help="which pre-trained model to use as a base. vgg11 or alexnet")
parser.add_argument("--learning_rate", type=float, default=0.003, help="learning rate of the model")
parser.add_argument("--hidden_units", type=int, default=1024, help="size of hidden layer")
parser.add_argument("--gpu", default=False, action="store_true", help="size of hidden layer")
parser.add_argument("--epochs", type=int, default=1, help="number of training epochs")
args = parser.parse_args()
print(args)
def main():
f_class = FlowerClassifier(args.arch, args.hidden_units, args.gpu)
f_class.train(data_dir=args.data_dir, epochs=args.epochs, learning_rate=args.learning_rate)
save_checkpoint(f_class, 'checkpoint.pth')
#print(model.cat_to_name)
top_probs, top_classes = f_class.predict('flowers/valid/1/image_06765.jpg', 3, 'cat_to_name.json')
print(top_probs, top_classes)
if __name__ == "__main__": main()
|
4,752 | 6767302869d73d041e2d7061722e05484d19f3e0 | import datetime,os
def GetDatetimeFromMyFormat(l):
# l = "2018-5-17 19:18:45"
l_words = l.split()
l_days = l_words[0].split('-')
l_times = l_words[1].split(':')
out = datetime.datetime(int(l_days[0]),int(l_days[1]),int(l_days[2]),int(l_times[0]),int(l_times[1]),int(l_times[2]))
return out
|
4,753 | 2362c9a12f97f32f6136aaf16a55cf4acbaf9294 | # coding: utf-8
"""
Idomoo API
OpenAPI spec version: 2.0
Contact: dev.support@idomoo.com
"""
import pprint
import six
class GIFOutput(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'gif_fps': 'float',
'color_depth': 'float',
'gif_loop': 'int',
'height': 'float',
'start': 'float',
'duration': 'float',
'suffix': 'str',
'overlay': 'str',
'overlay_alignment': 'list[str]',
'overlay_scale': 'str',
'label': 'str'
}
attribute_map = {
'gif_fps': 'gif_fps',
'color_depth': 'color_depth',
'gif_loop': 'gif_loop',
'height': 'height',
'start': 'start',
'duration': 'duration',
'suffix': 'suffix',
'overlay': 'overlay',
'overlay_alignment': 'overlay_alignment',
'overlay_scale': 'overlay_scale',
'label': 'label'
}
def __init__(self, gif_fps=None, color_depth=None, gif_loop=None, height=None, start=None, duration=None,
suffix=None, overlay=None, overlay_alignment=None, overlay_scale='fit', label=None):
"""GIFOutput - a model defined in Swagger"""
self._gif_fps = None
self._color_depth = None
self._gif_loop = None
self._height = None
self._start = None
self._duration = None
self._suffix = None
self._overlay = None
self._overlay_alignment = None
self._overlay_scale = None
self._label = None
self.discriminator = None
if gif_fps is not None:
self.gif_fps = gif_fps
if color_depth is not None:
self.color_depth = color_depth
if gif_loop is not None:
self.gif_loop = gif_loop
self.height = height
self.start = start
if duration is not None:
self.duration = duration
if suffix is not None:
self.suffix = suffix
if overlay is not None:
self.overlay = overlay
if overlay_alignment is not None:
self.overlay_alignment = overlay_alignment
if overlay_scale is not None:
self.overlay_scale = overlay_scale
if label is not None:
self.label = label
@property
def gif_fps(self):
"""Gets the gif_fps of this GIFOutput.
The frame rate of the GIF. Default is the Video frame rate
:return: The gif_fps of this GIFOutput.
:rtype: float
"""
return self._gif_fps
@gif_fps.setter
def gif_fps(self, gif_fps):
"""Sets the gif_fps of this GIFOutput.
The frame rate of the GIF. Default is the Video frame rate
:param gif_fps: The gif_fps of this GIFOutput.
:type: float
"""
if gif_fps is not None and gif_fps > 30:
raise ValueError("Invalid value for `gif_fps`, must be a value less than or equal to `30`")
self._gif_fps = gif_fps
@property
def color_depth(self):
"""Gets the color_depth of this GIFOutput.
Amount of colors in palette
:return: The color_depth of this GIFOutput.
:rtype: float
"""
return self._color_depth
@color_depth.setter
def color_depth(self, color_depth):
"""Sets the color_depth of this GIFOutput.
Amount of colors in palette
:param color_depth: The color_depth of this GIFOutput.
:type: float
"""
self._color_depth = color_depth
@property
def gif_loop(self):
"""Gets the gif_loop of this GIFOutput.
If to loop the GIF. -1 is no loop, 0 is infinite loops, and other numbers are number of loops.
:return: The gif_loop of this GIFOutput.
:rtype: int
"""
return self._gif_loop
@gif_loop.setter
def gif_loop(self, gif_loop):
"""Sets the gif_loop of this GIFOutput.
If to loop the GIF. -1 is no loop, 0 is infinite loops, and other numbers are number of loops.
:param gif_loop: The gif_loop of this GIFOutput.
:type: int
"""
if gif_loop is not None and gif_loop < -1:
raise ValueError("Invalid value for `gif_loop`, must be a value greater than or equal to `-1`")
self._gif_loop = gif_loop
@property
def height(self):
"""Gets the height of this GIFOutput.
Height of the media to be rendered, in pixels. Should be the height of your scenes unless a smaller
resolution is needed. Resolution higher than the scene resolution reduces quality. The width is automatically
calculated to keep the aspect ratio.
:return: The height of this GIFOutput.
:rtype: float
"""
return self._height
@height.setter
def height(self, height):
"""Sets the height of this GIFOutput.
Height of the media to be rendered, in pixels. Should be the height of your scenes unless a smaller
resolution is needed. Resolution higher than the scene resolution reduces quality. The width is automatically
calculated to keep the aspect ratio.
:param height: The height of this GIFOutput.
:type: float
"""
if height is None:
raise ValueError("Invalid value for `height`, must not be `None`")
self._height = height
@property
def start(self):
"""Gets the start of this GIFOutput.
What second of the storyboard timeline to start the GIF.
:return: The start of this GIFOutput.
:rtype: float
"""
return self._start
@start.setter
def start(self, start):
"""Sets the start of this GIFOutput.
What second of the storyboard timeline to start the GIF.
:param start: The start of this GIFOutput.
:type: float
"""
if start is None:
raise ValueError("Invalid value for `start`, must not be `None`")
self._start = start
@property
def duration(self):
"""Gets the duration of this GIFOutput.
Seconds for the duration of the GIF. Can't be longer than the video.
:return: The duration of this GIFOutput.
:rtype: float
"""
return self._duration
@duration.setter
def duration(self, duration):
"""Sets the duration of this GIFOutput.
Seconds for the duration of the GIF. Can't be longer than the video.
:param duration: The duration of this GIFOutput.
:type: float
"""
self._duration = duration
@property
def suffix(self):
"""Gets the suffix of this GIFOutput.
Unique ending of the file name so several outputs can be created then identified. Required if there is more
then 1 video output.
:return: The suffix of this GIFOutput.
:rtype: str
"""
return self._suffix
@suffix.setter
def suffix(self, suffix):
"""Sets the suffix of this GIFOutput.
Unique ending of the file name so several outputs can be created then identified. Required if there is more
then 1 video output.
:param suffix: The suffix of this GIFOutput.
:type: str
"""
self._suffix = suffix
@property
def overlay(self):
"""Gets the overlay of this GIFOutput.
Path to overlay image, such as: play button or watermark.
:return: The overlay of this GIFOutput.
:rtype: str
"""
return self._overlay
@overlay.setter
def overlay(self, overlay):
"""Sets the overlay of this GIFOutput.
Path to overlay image, such as: play button or watermark.
:param overlay: The overlay of this GIFOutput.
:type: str
"""
self._overlay = overlay
@property
def overlay_alignment(self):
"""Gets the overlay_alignment of this GIFOutput.
Alignment for overlay image in case the image doesn't fit the video perfectly. The first item in the array is
X. The second is Y.
:return: The overlay_alignment of this GIFOutput.
:rtype: list[str]
"""
return self._overlay_alignment
@overlay_alignment.setter
def overlay_alignment(self, overlay_alignment):
"""Sets the overlay_alignment of this GIFOutput.
Alignment for overlay image in case the image doesn't fit the video perfectly. The first item in the array is
X. The second is Y.
:param overlay_alignment: The overlay_alignment of this GIFOutput.
:type: list[str]
"""
allowed_values = ["left", "center", "right", "top", "middle", "bottom"]
if not set(overlay_alignment).issubset(set(allowed_values)):
raise ValueError(
"Invalid values for `overlay_alignment` [{0}], must be a subset of [{1}]"
.format(", ".join(map(str, set(overlay_alignment) - set(allowed_values))),
", ".join(map(str, allowed_values)))
)
self._overlay_alignment = overlay_alignment
@property
def overlay_scale(self):
"""Gets the overlay_scale of this GIFOutput.
Scale the overlay image if it's not the same size as the video. * Fit: scale the image up or down so it's
completely visible in the video's resolution. If not the same aspect ratio, transparency is added around the
image according to the alignment settings. * Fill: scale the image up or down so it completely fills the
video. If not the same aspect ratio, the image is cropped according to the alignment settings. * None: don't
resize the overlay image.
:return: The overlay_scale of this GIFOutput.
:rtype: str
"""
return self._overlay_scale
@overlay_scale.setter
def overlay_scale(self, overlay_scale):
"""Sets the overlay_scale of this GIFOutput.
Scale the overlay image if it's not the same size as the video. * Fit: scale the image up or down so it's
completely visible in the video's resolution. If not the same aspect ratio, transparency is added around the
image according to the alignment settings. * Fill: scale the image up or down so it completely fills the
video. If not the same aspect ratio, the image is cropped according to the alignment settings. * None: don't
resize the overlay image.
:param overlay_scale: The overlay_scale of this GIFOutput.
:type: str
"""
allowed_values = ["fit", "fill", "none"]
if overlay_scale not in allowed_values:
raise ValueError(
"Invalid value for `overlay_scale` ({0}), must be one of {1}"
.format(overlay_scale, allowed_values)
)
self._overlay_scale = overlay_scale
@property
def label(self):
"""Gets the label of this GIFOutput.
This label is another way to identify this specific output. The label is returned in the response,
but does not appear in the file name.
:return: The label of this GIFOutput.
:rtype: str
"""
return self._label
@label.setter
def label(self, label):
"""Sets the label of this GIFOutput.
This label is another way to identify this specific output. The label is returned in the response,
but does not appear in the file name.
:param label: The label of this GIFOutput.
:type: str
"""
self._label = label
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, GIFOutput):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
4,754 | df92166378c8a8cc0ba02d0ba33d75bbd94510a7 | from flask import Flask, render_template , request
import joblib
# importing all the important libraires
import numpy as np
import pandas as pd
import nltk
import string
from nltk.corpus import stopwords
from sklearn.model_selection import train_test_split
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
# download the model
nltk.download('wordnet')
from nltk.stem import WordNetLemmatizer
lemma = WordNetLemmatizer()
# initialse the app
app = Flask(__name__)
#load the model
tfidf = joblib.load('tfidf_vector_model.pkl')
model = joblib.load('netflix_75.pkl')
@app.route('/')
def hello():
return render_template('form.html')
@app.route('/submit' , methods = ["POST"])
def form_data():
user_data = request.form.get('user_data')
user_data1 = [user_data]
vector = tfidf.transform(user_data1)
my_pred = model.predict(vector)
if my_pred[0] == 1:
out = 'positve review'
else:
out = 'negative review'
return render_template('predict.html' , data = f' {out}')
if __name__ == '__main__':
app.run(debug = True)
|
4,755 | c0ebf10b8c0cb4af11608cafcdb85dbff4abdf90 | """
Find two distinct numbers in values whose sum is equal to 100.
Assign one of them to value1 and the other one to value2.
If there are several solutions, any one will be marked as correct.
Optional step to check your answer:
Print the value of value1 and value2.
"""
values = [72, 50, 48, 50, 7, 66, 62, 32, 33, 75, 30, 85, 6, 85, 82, 88, 30, 32, 78, 39, 57, 96, 45, 57, 61, 10, 62, 48, 32, 96, 75, 15, 50, 50]
value1 = None
value2 = None
for x in values:
for y in values:
if x + y == 100 and x != y:
value1 = x
value2 = y
print(value1)
print(value2) |
4,756 | 6f53a989ddf179b699186a78b5d8cf6d3d08cbb2 | import os
import urllib.request
import zipfile
import tarfile
import matplotlib.pyplot as plt
%matplotlib inline
from PIL import Image
import numpy as np
# フォルダ「data」が存在しない場合は作成する
data_dir = "./data/"
if not os.path.exists(data_dir):
os.mkdir(data_dir)
# MNIStをダウンロードして読み込む
from sklearn.datasets import fetch_openml
mnist = fetch_openml("mnist_784", version = 1, data_home = "./data")
data_dir_path = "./data/img_78/"
if not os.path.exists(data_dir_path):
os.mkdir(data_dir_path)
# MNIST1から数字の7, 8の画像だけフォルダ img_78に保存するよ
count_7 = 0
count_8 = 0
N = 200 # 200枚ずつ作成
X = mnist.data
y = mnist.target
for i in range(len(X)):
# generate image of 7
if (y[i] is "7") and (count_7 < N):
file_path = "./data/img_78/img_7_" + str(count_7) + ".jpg"
im_f = (X[i].reshape(28, 28))
pil_img_f = Image.fromarray(im_f.astype(np.uint8)) # 画像をPILに
pil_img_f = pil_img_f.resize((64, 64), Image.BICUBIC) # 64×64に拡大
pil_img_f.save(file_path) # 保存
count7+=1
# 画像8の作成
if (y[i] is "8") and (count8<max_num):
file_path="./data/img_78/img_8_"+str(count_8)+".jpg"
im_f=(X[i].reshape(28, 28)) # 画像を28*28の形に変形
pil_img_f = Image.fromarray(im_f.astype(np.uint8)) # 画像をPILに
pil_img_f = pil_img_f.resize((64, 64), Image.BICUBIC) # 64×64に拡大
pil_img_f.save(file_path) # 保存
count8+=1 |
4,757 | b0818b545ab47c27c705f2ccfa3b9edb741602f7 | from django.shortcuts import render, render_to_response, get_object_or_404, redirect
from .models import Club
from .forms import InputForm
# Create your views here.
def base(request):
return render(request, 'VICHealth_app/base.html')
def index(request):
return render(request, 'VICHealth_app/index.html')
def check_activity_level(request):
return render(request, 'VICHealth_app/check_activity_level.html')
def health_tips(request):
return render(request, 'VICHealth_app/health_tips.html')
def sub_info(request):
club=Club.objects.all()
form=InputForm()
context = { "club":club, "form":form }
return render(request, 'VICHealth_app/sub_info.html', context)
|
4,758 | 8e3b26826752b6b3482e8a29b9b58f5025c7ef58 | """
File: ex17_map_reduce.py
Author: TonyDeep
Date: 2020-07-21
"""
from functools import reduce
print('#1 map')
a_list = [2, 18, 9, 22, 17, 24, 8, 12, 27]
map_data = map(lambda x: x * 2 + 1, a_list)
new_list = list(map_data)
print(new_list)
print('\n#2 reduce')
b_list = [1, 2, 3, 4, 5]
reduce_data = reduce(lambda x, y: x + y, b_list)
print(reduce_data)
|
4,759 | 8f7b1313ba31d761edcadac7b0d04b62f7af8dff | """Sherlock Tests
This package contains various submodules used to run tests.
"""
import sys
import os
import subprocess as sp
from time import sleep
# uncomment this if using nose
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '../sherlock')))
# import sherlock |
4,760 | 7247ef463998f6738c21ad8efa988a32f7fb99c0 | from share_settings import Settings
import urllib.request,json
import pprint as p
s = Settings()
prefix = "http://finance.google.com/finance?client=ig&output=json&q="
def get(symbol,exchange):
url = prefix+"%s:%s"%(exchange,symbol)
u = urllib.request.urlopen(url)
#translates url to string
c = u.read().decode('utf-8')
#slices string to remove characters at start/end of string
con=(c[5:-2])
#removes '\' from the text
cont=con.replace("\\","")
content = json.loads(cont)
result = (content['l'])
return result
def get_lp(s):
"""gets latest prices from google"""
sl = []
for stock in s.symbols:
#creates a list of latest stock prices
quote = get(stock,"LON")
#changes string to integer and removes ','
x = (quote.replace(',',''))
x = float(x)
sl.append(x)
return sl
#print(get_lp(s)) |
4,761 | 35921b081e8e8c4da2b16afc20b27b636e9a6676 | import numpy
from scipy.optimize import OptimizeResult
from logging import getLogger
logger = getLogger(__name__)
def minimize_neldermead(func, x0, args=(), callback=None,
maxiter=None, maxfev=None, disp=False,
return_all=False, initial_simplex=None,
xatol=1e-4, fatol=1e-4, **unknown_options):
"""
Minimization of scalar function of one or more variables using the
Nelder-Mead algorithm.
Options
-------
disp : bool
Set to True to print convergence messages.
maxiter, maxfev : int
Maximum allowed number of iterations and function evaluations.
Will default to ``N*200``, where ``N`` is the number of
variables, if neither `maxiter` or `maxfev` is set. If both
`maxiter` and `maxfev` are set, minimization will stop at the
first reached.
initial_simplex : array_like of shape (N + 1, N)
Initial simplex. If given, overrides `x0`.
``initial_simplex[j,:]`` should contain the coordinates of
the j-th vertex of the ``N+1`` vertices in the simplex, where
``N`` is the dimension.
xatol : float, optional
Absolute error in xopt between iterations that is acceptable for
convergence.
fatol : number, optional
Absolute error in func(xopt) between iterations that is acceptable for
convergence.
"""
maxfun = maxfev
retall = return_all
rho = 1
chi = 2
psi = 0.5
sigma = 0.5
nonzdelt = 0.05
zdelt = 0.00025
if initial_simplex is None:
N = len(x0)
sim = numpy.zeros((N + 1, N), dtype=x0.dtype)
sim[0] = x0
for k in range(N):
y = numpy.array(x0, copy=True)
if y[k] != 0:
y[k] = (1 + nonzdelt) * y[k]
else:
y[k] = zdelt
sim[k + 1] = y
maxiter = 10
maxfun = 10
one2np1 = list(range(1, N + 1))
fsim = numpy.zeros((N + 1,), float)
for k in range(N + 1):
fsim[k] = func(sim[k])
ind = numpy.argsort(fsim)
fsim = numpy.take(fsim, ind, 0)
# sort so sim[0,:] has the lowest function value
sim = numpy.take(sim, ind, 0)
raise Exception()
print('aaaaffaaaaaa')
iterations = 1
while iterations < maxiter:
if (numpy.max(numpy.ravel(numpy.abs(sim[1:] - sim[0]))) <= xatol and
numpy.max(numpy.abs(fsim[0] - fsim[1:])) <= fatol):
break
logger.debug('itr: %s' % iterations)
print('aaaaaaaaaa')
xbar = numpy.add.reduce(sim[:-1], 0) / N
xr = (1 + rho) * xbar - rho * sim[-1]
fxr = func(xr)
doshrink = 0
if fxr < fsim[0]:
xe = (1 + rho * chi) * xbar - rho * chi * sim[-1]
fxe = func(xe)
if fxe < fxr:
sim[-1] = xe
fsim[-1] = fxe
else:
sim[-1] = xr
fsim[-1] = fxr
else: # fsim[0] <= fxr
if fxr < fsim[-2]:
sim[-1] = xr
fsim[-1] = fxr
else: # fxr >= fsim[-2]
# Perform contraction
if fxr < fsim[-1]:
xc = (1 + psi * rho) * xbar - psi * rho * sim[-1]
fxc = func(xc)
if fxc <= fxr:
sim[-1] = xc
fsim[-1] = fxc
else:
doshrink = 1
else:
# Perform an inside contraction
xcc = (1 - psi) * xbar + psi * sim[-1]
fxcc = func(xcc)
if fxcc < fsim[-1]:
sim[-1] = xcc
fsim[-1] = fxcc
else:
doshrink = 1
if doshrink:
for j in one2np1:
sim[j] = sim[0] + sigma * (sim[j] - sim[0])
fsim[j] = func(sim[j])
ind = numpy.argsort(fsim)
sim = numpy.take(sim, ind, 0)
fsim = numpy.take(fsim, ind, 0)
if callback is not None:
callback(sim[0])
iterations += 1
x = sim[0]
fval = numpy.min(fsim)
warnflag = 0
result = OptimizeResult(fun=fval, nit=iterations, nfev=0,
status=warnflag, success=(warnflag == 0),
message=None, x=x, final_simplex=(sim, fsim))
return result
|
4,762 | 9d07fd14825ed1e0210fa1f404939f68a3bb039c | import wizard
import report
|
4,763 | ea86a2a9068c316d3efcbcb165a8ef3d3516ba1b | from HurdleRace import hurdleRace
from ddt import ddt, data, unpack
import unittest
class test_AppendAndDelete3(unittest.TestCase):
def test_hurdleRace(self):
height = [1, 6, 3, 5, 2]
k = 4
sum_too_high = hurdleRace(k, height)
self.assertEqual(2, sum_too_high)
|
4,764 | 52bb10e19c7a5645ca3cf91705b9b0affe75f570 | # Opus/UrbanSim urban simulation software.
# Copyright (C) 2010-2011 University of California, Berkeley, 2005-2009 University of Washington
# See opus_core/LICENSE
from opus_core.variables.variable import Variable
from variable_functions import my_attribute_label
class total_land_value_if_in_plan_type_group_SSS(Variable):
"""Sum of land values of locations if in plan_type_group SSS, 0 otherwise."""
def __init__(self, group):
self.group = group
Variable.__init__(self)
def dependencies(self):
return [my_attribute_label("is_in_plan_type_group_%s" % self.group),
my_attribute_label("total_land_value")]
def compute(self, dataset_pool):
return self.get_dataset().get_attribute("is_in_plan_type_group_%s" % self.group) * \
self.get_dataset().get_attribute("total_land_value")
def post_check(self, values, dataset_pool):
self.do_check("x >= 0", values)
from opus_core.tests import opus_unittest
from opus_core.tests.utils.variable_tester import VariableTester
from numpy import array
class Tests(opus_unittest.OpusTestCase):
def test_my_inputs(self):
total_land_value = array([100, 200, 300])
is_in_plan_type_group_residential = array([1, 0, 1])
tester = VariableTester(
__file__,
package_order=['urbansim'],
test_data={
"gridcell":{
"grid_id":array([1,2,3]),
"total_land_value":total_land_value,
"is_in_plan_type_group_residential":is_in_plan_type_group_residential
}
}
)
should_be = array([100, 0, 300])
instance_name = "urbansim.gridcell.total_land_value_if_in_plan_type_group_residential"
tester.test_is_equal_for_family_variable(self, should_be, instance_name)
if __name__=='__main__':
opus_unittest.main() |
4,765 | b240e328ee6c5677991d3166c7b00f1b3a51787e | import numpy as np
from matplotlib import pylab as plt
from os import listdir,path
from os.path import isfile,join,isdir
def get_files(directory_path):
dirpath=directory_path
files=[f for f in listdir(dirpath) if (isfile(join(dirpath, f)) and ".npy" in f)]
files=sorted(files)
n_files=len(files)
print ("number of files="+str(n_files))
return files,n_files
fig1,ax1=plt.subplots(2,1,figsize=(5,8))
ax1[0].set_yscale('log')
ax1[1].set_yscale('log')
dirpath="../output_HDR_noMask"
files, n_files=get_files(dirpath)
for j,file_iter in enumerate(files):
print(j,file_iter)
noisehist=False
if("BPM" in file_iter):
continue
with open(dirpath+'/'+file_iter,'rb') as f:
input_data=np.load(f)
if "noise" in file_iter:
noisehist=True
print ("noise")
print(file_iter[:9]+"BPMOnly.npy")
with open(dirpath+'/'+file_iter[:9]+"BPMOnly.npy",'rb') as f:
BPM_data=np.load(f)
if(noisehist):
histvals=ax1[0].hist(input_data[BPM_data>0].ravel(),bins=100,histtype='step',label=file_iter[:-4],density=True)
else:
histvals=ax1[1].hist(input_data[BPM_data>0].ravel(),bins=100,range=(-10,50),histtype='step',label=file_iter[:-4])
# maxval= histvals[1][histvals[0].argmax()]
# input_data=input_data/maxval
# # plt.clf()
# _=ax1.hist(input_data,bins=100,range=(-10,40),histtype='step')
ax1[0].set_xlim([-150,400])
# ax1.set_ylim([0.002,0.5])
ax1[0].set_xlabel('Pedestal subtracted ADC')
# ax1.set_xlabel('Sigma')
ax1[0].set_ylabel('Entries')
# ax1[1].set_ylim([1,40000])
# ax1.set_ylim([0.002,0.5])
ax1[1].set_xlabel('Signal/Noise')
# ax1.set_xlabel('Sigma')
ax1[1].set_ylabel('Entries')
# leg = ax1[0].legend(fancybox=True, loc='upper right')
# leg = ax1[1].legend(fancybox=True, loc='upper right')
plt.pause(0.01)
input("pause") |
4,766 | 3f4f396d1d18611e0248a08b42328422ca4b8146 | import copy
from typing import List, Optional, Tuple, NamedTuple, Union, Callable
import torch
from torch import Tensor
from torch_sparse import SparseTensor
import time
import torch_quiver as qv
from torch.distributed import rpc
def subgraph_nodes_n(nodes, i):
row, col, edge_index = None, None, None
return row, col, edge_index
class Comm:
def __init__(self, rank, world_size):
self.rank = rank
self.world_size = world_size
subgraph_nodes = subgraph_nodes_n
class EdgeIndex(NamedTuple):
edge_index: Tensor
e_id: Optional[Tensor]
size: Tuple[int, int]
def to(self, *args, **kwargs):
edge_index = self.edge_index.to(*args, **kwargs)
e_id = self.e_id.to(*args, **kwargs) if self.e_id is not None else None
return EdgeIndex(edge_index, e_id, self.size)
class Adj(NamedTuple):
adj_t: SparseTensor
e_id: Optional[Tensor]
size: Tuple[int, int]
def to(self, *args, **kwargs):
adj_t = self.adj_t.to(*args, **kwargs)
e_id = self.e_id.to(*args, **kwargs) if self.e_id is not None else None
return Adj(adj_t, e_id, self.size)
class RandomIndexSampler(torch.utils.data.Sampler):
def __init__(self, num_nodes: int, num_parts: int, shuffle: bool = False):
self.N = num_nodes
self.num_parts = num_parts
self.shuffle = shuffle
self.n_ids = self.get_node_indices()
def get_node_indices(self):
n_id = torch.randint(self.num_parts, (self.N, ), dtype=torch.long)
n_ids = [(n_id == i).nonzero(as_tuple=False).view(-1)
for i in range(self.num_parts)]
return n_ids
def __iter__(self):
if self.shuffle:
self.n_ids = self.get_node_indices()
return iter(self.n_ids)
def __len__(self):
return self.num_parts
class distributeCudaRandomNodeSampler(torch.utils.data.DataLoader):
r"""A data loader that randomly samples nodes within a graph and returns
their induced subgraph.
.. note::
For an example of using :obj:`RandomNodeSampler`, see
`examples/ogbn_proteins_deepgcn.py
<https://github.com/rusty1s/pytorch_geometric/blob/master/examples/
ogbn_proteins_deepgcn.py>`_.
Args:
data (torch_geometric.data.Data): The graph data object.
num_parts (int): The number of partitions.
shuffle (bool, optional): If set to :obj:`True`, the data is reshuffled
at every epoch (default: :obj:`False`).
**kwargs (optional): Additional arguments of
:class:`torch.utils.data.DataLoader`, such as :obj:`num_workers`.
"""
def __init__(self, comm,
graph,
feature_func,
device,
num_parts: int,
shuffle: bool = False,
**kwargs):
self.comm = comm
data, local2global, global2local, node2rank = graph
self.local2global = local2global
self.global2local = global2local
self.node2rank = node2rank
self.node_feature = feature_func
self.cuda_device = torch.device('cuda:' + str(device))
assert data.edge_index is not None
self.N = N = data.num_nodes
self.E = data.num_edges
self.adj = SparseTensor(
row=data.edge_index[0], col=data.edge_index[1],
value=torch.arange(self.E, device=data.edge_index.device),
sparse_sizes=(N, N)).to(self.cuda_device)
self.data = copy.copy(data)
self.data.edge_index = None
super(distributeCudaRandomNodeSampler, self).__init__(
self, batch_size=1,
sampler=RandomIndexSampler(self.N, num_parts, shuffle),
collate_fn=self.__collate__, **kwargs)
self.deg_out = self.adj.storage.rowcount()
def __getitem__(self, idx):
return idx
def __cuda_saint_subgraph__(
self, node_idx: torch.Tensor) -> Tuple[SparseTensor, torch.Tensor]:
rows = []
cols = []
edge_indices = []
# splite node idx
ranks = self.node2rank(node_idx)
local_nodes = None
futures = []
adj_row, adj_col, adj_value = self.adj.coo()
adj_rowptr = self.adj.storage.rowptr()
cpu = torch.device('cpu')
for i in range(self.comm.world_size):
# for every device check how many nodes on the device
mask = torch.eq(ranks, i)
part_nodes = torch.masked_select(node_idx, mask)
# nodes as the the current, pointer ordered inputs to accumulate the partial nodes
if part_nodes.size(0) >= 1:
# if current server then local
if i == self.comm.rank:
local_nodes = part_nodes
futures.append((torch.LongTensor([]), torch.LongTensor([]), torch.LongTensor([])))
# remote server
else:
futures.append(
rpc.rpc_async(f"worker{i}",
subgraph_nodes,
args=(part_nodes, 1),
kwargs=None,
timeout=-1.0))
else:
futures.append((torch.LongTensor([]), torch.LongTensor([]), torch.LongTensor([])))
# local server has nodes
if local_nodes is not None:
nodes = self.global2local(local_nodes)
nodes = nodes.to(self.cuda_device)
deg = torch.index_select(self.deg_out, 0, nodes)
row, col, edge_index = qv.saint_subgraph(nodes, adj_rowptr, adj_row, adj_col, deg)
row = row.to(cpu)
col = col.to(cpu)
edge_index = edge_index.to(cpu)
futures[self.comm.rank] = row, col, edge_index
for i in range(len(futures)):
if not isinstance(futures[i], tuple):
futures[i] = futures[i].wait()
row, col, edge_index = futures[i]
rows.append(row)
cols.append(col)
edge_indices.append(edge_index)
ret_row = torch.cat(rows)
ret_cols = torch.cat(cols)
ret_edgeindex = torch.cat(edge_indices)
if adj_value is not None:
ret_vals = adj_value[ret_edgeindex].to(cpu)
out = SparseTensor(row = ret_row,
rowptr = None,
col= ret_cols,
value = ret_vals,
sparse_sizes=(node_idx.size(0), node_idx.size(0)),
is_sorted=False)
return out, ret_edgeindex
def __collate__(self, node_idx):
node_idx = node_idx[0]
data = self.data.__class__()
data.num_nodes = node_idx.size(0)
node_idx = node_idx.unique()
adj, _ = self.__cuda_saint_subgraph__(node_idx)
row, col, edge_idx = adj.coo()
data.edge_index = torch.stack([row, col], dim=0)
data.node_idx = node_idx
data.train_mask = self.data.train_mask[node_idx]
for key, item in self.data:
if isinstance(item, Tensor) and item.size(0) == self.N:
data[key] = item[node_idx]
elif isinstance(item, Tensor) and item.size(0) == self.E:
data[key] = item[edge_idx]
else:
data[key] = item
return data |
4,767 | e488761c15ee8cddbb7577d5340ee9001193c1a4 | print(10-10)
print(1000-80)
print(10/5)
print(10/6)
print(10//6) # remoção das casas decimais
print(10*800)
print(55*5)
|
4,768 | dc9b5fbe082f7cf6cd0a9cb0d1b5a662cf3496f0 | from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.decorators import login_required
from django.contrib.auth.mixins import LoginRequiredMixin
from django.contrib.auth.models import User
from django.shortcuts import render, redirect
from django.urls import reverse_lazy
from django.views.generic import TemplateView, ListView, DetailView, CreateView, DeleteView, UpdateView
from .models import PayFor, PayItem
from .forms import SignupForm
# Create your views here.
def signup_func(request):
if request.method == 'POST':
input_username = request.POST['username']
input_password = request.POST['password']
try:
User.objects.get(username=input_username)
return render(request, 'registration/signup.html', {'error': 'このユーザーは登録されています'})
except:
user = User.objects.create_user(input_username, '', input_password)
return redirect('money_easy:login')
return render(request, 'registration/signup.html', {})
def login_func(request):
if request.method == 'POST':
input_username = request.POST['username']
input_password = request.POST['password']
user = authenticate(request, username=input_username, password=input_password)
if user is not None:
login(request, user)
return redirect('money_easy:pay_item_list')
else:
return render(request, 'registration/login.html', {'error': 'ユーザー名かパスワードが間違っています。もう一度入力してください。'})
else:
return render(request, 'registration/login.html')
@login_required()
def logout_func(request):
logout(request)
return redirect('money_easy:login')
# class SignupView(CreateView):
# form_class = SignupForm
# success_url = reverse_lazy('home')
# template_name = 'registration/signup.html'
#
# def form_valid(self, form):
# # self.objectにsave()されたユーザーオブジェクトを格納
# valid = super().form_valid(form)
# login(self.request, self.object)
# return valid
class IndexView(LoginRequiredMixin, TemplateView):
template_name = 'money_easy/index.html'
index = IndexView.as_view()
class PayForList(LoginRequiredMixin,ListView):
template_name = 'money_easy/payfor_list.html'
model = PayFor
pay_for_list = PayForList.as_view()
class PayForDetailView(LoginRequiredMixin, DetailView):
template_name = 'money_easy/payfor_detail.html'
model = PayFor
pay_for_detail = PayForDetailView.as_view()
class PayForCreate(LoginRequiredMixin,CreateView):
template_name = 'money_easy/payfor_create.html'
model = PayFor
fields = ('name', 'description')
success_url = reverse_lazy('money_easy:pay_item_list')
pay_for_create = PayForCreate.as_view()
class PayForDelete(LoginRequiredMixin, DeleteView):
template_name = 'money_easy/payfor_delete.html'
model = PayFor
success_url = reverse_lazy('money_easy:pay_for_list')
pay_for_delete = PayForDelete.as_view()
class PayForUpdate(LoginRequiredMixin, UpdateView):
template_name = 'money_easy/payfor_update.html'
model = PayFor
fields = ('name', 'description')
success_url = reverse_lazy('money_easy:pay_for_list')
pay_for_update = PayForUpdate.as_view()
class PayItemList(LoginRequiredMixin, ListView):
template_name = 'money_easy/payitem_list.html'
model = PayItem
pay_item_list = PayItemList.as_view()
class PayForDetailView(LoginRequiredMixin, DetailView):
template_name = 'money_easy/payfor_detail.html'
model = PayFor
payfor_detail = PayForDetailView.as_view()
class PayItemDetailView(LoginRequiredMixin, DetailView):
template_name = 'money_easy/payitem_detail.html'
model = PayItem
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
# context['priority'] = PayItem.get_priority_display()
return context
pay_item_detail = PayItemDetailView.as_view()
class PayItemCreate(LoginRequiredMixin,CreateView):
template_name = 'money_easy/payitem_create.html'
model = PayItem
fields = ('title', 'payfor', 'money', 'rate', 'priority', 'duedate')
success_url = reverse_lazy('money_easy:pay_item_list')
pay_item_create = PayItemCreate.as_view()
class PayItemDelete(LoginRequiredMixin, DeleteView):
template_name = 'money_easy/payitem_delete.html'
model = PayItem
success_url = reverse_lazy('money_easy:pay_item_list')
pay_item_delete = PayItemDelete.as_view()
class PayItemUpdate(LoginRequiredMixin, UpdateView):
template_name = 'money_easy/payitem_update.html'
model = PayItem
fields = ('title', 'payfor', 'money', 'rate', 'priority', 'duedate')
success_url = reverse_lazy('money_easy:pay_item_list')
pay_item_update = PayItemUpdate.as_view()
# class LoginView(AuthLoginView):
# template_name = 'money_easy/login.html'
#
#
# login = LoginView.as_view()
# def hello(request):
# if request.method == 'GET':
# context = {
# 'message': 'Hello World',
# }
# return render(request, 'hello.html',context)
#
#
# class HelloView(View):
# def get(self, request, *args, **kwargs):
# context = {
# 'message': 'Hello World',
# }
# return render(request,'hello.html',context)
#
#
# hello = HelloView.as_view()
|
4,769 | 9e2485554a5a8de07dd3df39cc255f2a1ea2f164 | import numpy as np
x = np.zeros(10)
idx = [1,4,5,9]
np.put(x,ind=idx,v=1)
print(x) |
4,770 | c81fde7fb5d63233c633b8e5353fe04477fef2af | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import json
import urllib2
# Es importante agregar la variable de ambiente:
# export PYTHONIOENCODING='UTF-8'
# para redireccionar la salida std a un archivo.
def call(url):
try:
request = urllib2.Request(url)
response = urllib2.urlopen(request)
output = response.read()
jout = json.loads(output, 'utf-8')
return jout
except Exception as e:
print "Error en el llamado a la api con los parámetros:\n> url: %s"%(url)
sys.exit()
def call_api(id, attr=""):
url = '%s%s'%('https://api.mercadolibre.com/categories/', id)
response = call(url)
response['attribute'] = call(url+'/attributes')
return response
def process(jout):
categories = jout.get('children_categories', '')
if categories:
list = [ process(call_api(category.get('id'))) for category in categories]
jout['children_categories'] = list
return jout
else:
return jout
if __name__ == '__main__':
response = call_api(sys.argv[1])
jout = process(response)
print json.dumps(jout, sort_keys=False, indent=4, separators=(',', ': '), encoding="utf-8", ensure_ascii=False) |
4,771 | 8098b9c27689dd4168ef05c03d4ec00f67f8090e | # using python3
class Rational:
def __init__(self, numer, denom):
self.numer = numer
self.denom = denom
def __add__(self, other):
return Rational(
self.numer * other.denom + other.numer * self.denom,
self.denom * other.denom
)
def __sub__(self, other):
return Rational(
self.numer * other.denom - other.numer * self.denom,
self.denom * other.denom
)
def __mul__(self, other):
return Rational(
self.numer * other.numer,
self.denom * other.denom
)
def __truediv__(self, other):
return Rational(
self.numer * other.denom,
self.denom * other.numer
)
def __str__(self):
return "{numer}/{denom}".format(
numer=self.numer, denom=self.denom
)
def __repr__(self):
return "Rational({numer}/{denom})".format(
numer=self.numer, denom=self.denom
)
|
4,772 | 66cdfdfa797c9991e5cb169c4b94a1e7041ca458 | from tornado import gen
import rethinkdb as r
from .connection import connection
from .utils import dump_cursor
@gen.coroutine
def get_promotion_keys():
conn = yield connection()
result = yield r.table('promotion_keys').run(conn)
result = yield dump_cursor(result)
return result
@gen.coroutine
def pop_promotion_key(promotion_key):
conn = yield connection()
result = yield r.table('promotion_keys').\
get(promotion_key).delete(return_changes=True).run(conn)
if result['changes']:
return result['changes'][0]['old_val']
return None
@gen.coroutine
def create_promotion_key(showtime_id):
conn = yield connection()
data = {
'showtime_id': showtime_id
}
result = yield r.table('promotion_keys').insert(data).run(conn)
promotion_key = result['generated_keys'][0]
return promotion_key
|
4,773 | 2b8f4e0c86adfbf0d4ae57f32fa244eb088f2cee |
from locals import *
from random import choice, randint
import pygame
from gameobjects.vector2 import Vector2
from entity.block import Block
def loadImage(filename):
return pygame.image.load(filename).convert_alpha()
class MapGrid(object):
def __init__(self, world):
self.grid = []
self.images = map(lambda f: loadImage("images/" + f), [
"tree1.png",
"tree2.png",
"tree3.png",
"tree4.png",
"tree5.png",
"tree6.png",
"tree8.png",
"tree9.png",
"tree10.png"])
print self.images
for line_num in xrange(WORLD_SIZE[1]):
line = []
y = line_num * BLOCK_SIZE
for cell in xrange(WORLD_SIZE[0]):
on_edge = False
if cell==0 or cell==WORLD_SIZE[0]-1:
on_edge = True
if line_num==0 or line_num==WORLD_SIZE[1]-1:
on_edge = True
if on_edge or randint(0, 99) < 5:
x = cell * BLOCK_SIZE
block = Block(world, choice(self.images))
image_size = block.image.get_size()
block.location = Vector2(x+image_size[0]/2, y+BLOCK_SIZE)
line.append(block)
else:
line.append(None)
self.grid.append(line)
def getBlock(self, x, y):
if x<0 or x>=WORLD_SIZE[0] or y<0 or y>=WORLD_SIZE[1]:
return None
return self.grid[y][x]
def render(self, line_num, surface, offset):
start_index = min(int(offset.x-64) / BLOCK_SIZE, WORLD_SIZE[0])
start_index = max(0, start_index)
end_index = min(start_index + 12, WORLD_SIZE[0])
line = self.grid[line_num]
for cell in xrange(start_index, end_index):
if line[cell]:
line[cell].render(surface, offset)
|
4,774 | ef04e808a2a0e6570b28ef06784322e0b2ca1f8f | import numpy as np
from sklearn.decomposition import PCA
import pandas as pd
from numpy.testing import assert_array_almost_equal
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from sklearn import decomposition
from sklearn import datasets
def transform(x):
if x == 'Kama':
return 0
elif x == 'Rosa':
return 1
else:
return 2
original = pd.read_csv("seeds.csv")
original["Class"] = original["Class"].apply(lambda x: transform(x))
X = original.drop("Class", 1)
y = original["Class"]
fig = plt.figure(1, figsize=(4,3))
plt.clf()
ax = Axes3D(fig, rect=[0, 0, 0.95, 1], elev=48, azim=134)
plt.cla()
pca= PCA(n_components = 3)
pca.fit(X)
X = pca.transform(X)
for name, label in [('Kama', 0), ('Rosa', 1), ('Canadian', 2)]:
ax.text3D(X[y == label, 0].mean(),
X[y == label, 1].mean() + 1.5,
X[y == label, 2].mean(), name,
horizontalalignment='center',
bbox=dict(alpha=.5, edgecolor='w', facecolor='w'))
# Reorder the labels to have colors matching the cluster results
y = np.choose(y, [1, 2, 0]).astype(np.float)
ax.scatter(X[:, 0], X[:, 1], X[:, 2], c=y, cmap=plt.cm.spectral,
edgecolor='k')
ax.w_xaxis.set_ticklabels([])
ax.w_yaxis.set_ticklabels([])
ax.w_zaxis.set_ticklabels([])
plt.show()
# pca = PCA(n_components=3)
# pca.fit(df)
# U, S, VT = np.linalg.svd(df - df.mean(0))
# #assert_array_almost_equal(VT[:6], pca.components_)
# X_train_pca = pca.transform(df)
# X_train_pca2 = (df - pca.mean_).dot(pca.components_.T)
# #assert_array_almost_equal(X_train_pca, X_train_pca2)
# X_projected = pca.inverse_transform(X_train_pca)
# X_projected2 = X_train_pca.dot(pca.components_) + pca.mean_
# #assert_array_almost_equal(X_projected, X_projected2)
# loss = ((df - X_projected) ** 2).mean()
# print(loss)
# sse_loss = np.sum((df-X_projected)**2)
# print(sse_loss)
# print(pca.components_)
# print(pca.explained_variance_ratio_)
# # loadings
# loadings = pca.components_.T * np.sqrt(pca.explained_variance_)
# print(loadings)
# print(X_projected)
# print(len(X_projected))
# print(len(X_projected[0]))
# # We center the data and compute the sample covariance matrix.
# X_centered = df - np.mean(df, axis=0)
# cov_matrix = np.dot(X_centered.T, X_centered) / 569
# eigenvalues = pca.explained_variance_
# for eigenvalue, eigenvector in zip(eigenvalues, pca.components_):
# print(np.dot(eigenvector.T, np.dot(cov_matrix, eigenvector)))
# print(eigenvalue)
#np.savetxt("wdbc_ica.csv", X_projected, delimiter=",")
# print(pca)
# print(pca.explained_variance_ratio_)
# print(pca.singular_values_)
# print(len(pca.transform(df)))
# print(len(pca.transform(df)[0]))
|
4,775 | ee0cf2325c94821fa9f5115e8848c71143eabdbf |
from .plutotv_html import PlutoTV_HTML
class Plugin_OBJ():
def __init__(self, fhdhr, plugin_utils):
self.fhdhr = fhdhr
self.plugin_utils = plugin_utils
self.plutotv_html = PlutoTV_HTML(fhdhr, plugin_utils)
|
4,776 | 1c8622167240243da05a241e3630f79cdf36d7a8 | import pytest
import sys
sys.path.insert(0, '..')
from task_05 import task5
def test_mults():
assert task5.mults(3, 5, 10) == 23
assert task5.mults(5, 3, 10) == 23
assert task5.mults(3, 2, 10) == 32
assert task5.mults(7, 8, 50) == 364
|
4,777 | 3d7ca468a1f7aa1602bff22167e9550ad515fa79 | run=[] #Creating a empty list
no_players=int(input("enter the number of the players in the team :"))
for i in range (no_players):
run_score=int(input("Enter the runs scored by the player "+str(i+1)+":"))
run.append(run_score)
#code for the average score of the team
def average(run):
print("____________________________________")
sum=0
for i in range (0,len(run)):
sum+=run[i]
avg=sum/len(run)
print("Average score of the team is :",avg)
#code for the maximun runs scored by the players in the team
def high(run):
print("______________________________________")
max=run[0]
for i in range(len(run)):
if max<run[i]:
max=run[i]
print("Highest run score by the player is :",max)
#code for the minimum runs scored by the players in the team
def low(run):
print("____________________________________")
mim=run[0]
for i in range(len(run)):
if mim>run[i]:
mim=run[i]
print("Lowest runs scored by the player is :",mim)
#code for the runs scored more than 50 runs in the the team
def check(run):
print("_______________________________________")
count=0
for i in range(0,len(run)):
if run[i]>=50:
count+=1
else:
pass
print("Count of the player score more than '50' are :",count)
#code for the runs scored for higher number of the frequency
def feq(run):
print("___________________________________")
max=0
result=run[0]
for i in run:
freq=run.count(i)
if freq>max:
max=freq
result=i
print(f"run scored with the highest frequncy {result} is",max)
print("-------------'THANKYOU---------------")
average(run)
high(run)
low(run)
check(run)
feq(run)
|
4,778 | 9aecf297ed36784d69e2be6fada31f7c1ac37500 | import nox
@nox.session(python=["3.9", "3.8", "3.7", "3.6"], venv_backend="conda", venv_params=["--use-local"])
def test(session):
"""Add tests
"""
session.install()
session.run("pytest")
@nox.session(python=["3.9", "3.8", "3.7", "3.6"])
def lint(session):
"""Lint the code with flake8.
"""
session.install("flake8")
session.run("flake8", "")
|
4,779 | bf7e3ddaf66f4c325d3f36c6b912b47f4ae22cba | """
Exercício 1 - Facebook
Você receberá uma lista de palavras e uma string . Escreva uma função que
decida quais palavras podem ser formadas com os caracteres da string (cada
caractere só pode ser utilizado uma vez). Retorne a soma do comprimento das
palavras escolhidas.
Exemplo 1:
"""
# words = ["cat", "bt", "hat", "tree"], chars = "atach"
# saída: 6
"""Explicação: As palavras que podem ser formadas com os caracteres da string
são "cat" (tamanho 3) e "hat" (tamanho 3)."""
"""Exemplo 2:"""
# words = ["hello", "world", "students"], chars = "welldonehoneyr"
# saída: 10
"""Explicação: As palavras que podem ser formadas com os caracteres da string
são "hello" (tamanho 5) e "world" (tamanho 5)."""
def count_words(words, chars):
ans = 0
alphabet = {}
for char in chars:
if char not in alphabet:
alphabet[char] = 1
else:
alphabet[char] += 1
print(f"Montamos o alfabeto: {alphabet}")
for word in words:
print(f"Analisando a palavra {word}")
str_count = {}
for char in word:
if char not in alphabet:
print(f"'{char}' não está no alfabeto. Desconsiderar palavra")
break
if char not in str_count:
str_count[char] = 1
else:
str_count[char] += 1
if str_count[char] > alphabet[char]:
print(
f"'{char}' c/ + freq do que alfabeto. Desconsiderar"
)
break
else:
print(f"Considerar {word}")
ans += len(word)
return ans
words = ["cat", "bt", "hat", "tree", "caaat"]
chars = "atach"
print(f"Resposta final: {count_words(words, chars)}")
print()
words = ["hello", "world", "students"]
chars = "welldonehoneyr"
print(f"Resposta final: {count_words(words, chars)}")
|
4,780 | 74ad2ec2cd7cd683a773b0affde4ab0b150d74c5 | from rest_framework.generics import ListCreateAPIView, RetrieveUpdateDestroyAPIView
from .serializers import ConcertSerializer
from .models import Concert
from .permissions import IsOwnerOrReadOnly
class ConcertList(ListCreateAPIView):
queryset = Concert.objects.all()
serializer_class = ConcertSerializer
class ConcertDetail(RetrieveUpdateDestroyAPIView):
permission_classes = (IsOwnerOrReadOnly,)
queryset = Concert.objects.all()
serializer_class = ConcertSerializer
|
4,781 | 0b7e858eb6d4a5f3cf6aca4fea994dae9f889caa | from django.urls import path
from group import views
app_name = 'group'
urlpatterns = [
path('group/',views.CreateGroup.as_view(), name='group_create'),
path('shift/',views.CreateShift.as_view(), name='shift_create'),
path('subject/',views.createSubject.as_view(), name='subject_create'),
] |
4,782 | 1573af9cdf4817acbe80031e22489386ea7899cf | # -*- coding: utf-8 -*-
# Generated by Django 1.10.1 on 2017-12-01 16:51
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('monitor', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='cpu',
name='deleted_ad',
),
migrations.RemoveField(
model_name='disk',
name='deleted_ad',
),
migrations.RemoveField(
model_name='ram',
name='deleted_ad',
),
migrations.AlterField(
model_name='server',
name='deleted_ad',
field=models.DateTimeField(blank=True, default=None),
),
]
|
4,783 | ca00091b7ebcb9ee45b77c919c458c75e3db5b1e | #!/usr/bin/python3
"""
Test of Rectangle class
"""
from contextlib import redirect_stdout
import io
import unittest
from random import randrange
from models.base import Base
from models.rectangle import Rectangle
from models.square import Square
class TestRectangle(unittest.TestCase):
""" Test Rectangle methods """
def setUp(self):
""" setUp """
Base._Base__nb_objects = 0
def tearDown(self):
""" tearDown destroys any existing objects and processes """
pass
def test_type(self):
""" Test type """
r1 = Rectangle(1, 2)
self.assertTrue(type(r1) is Rectangle)
def test_inheritance(self):
"""Tests if Rectangle inherits Base."""
self.assertTrue(issubclass(Rectangle, Base))
def test_constructor_no_args(self):
"""Tests constructor signature."""
with self.assertRaises(TypeError) as e:
r = Rectangle()
s = "__init__() missing 2 required positional arguments: 'width' \
and 'height'"
self.assertEqual(str(e.exception), s)
def test_constructor_many_args(self):
"""Tests constructor signature."""
with self.assertRaises(TypeError) as e:
r = Rectangle(1, 2, 3, 4, 5, 6)
s = "__init__() takes from 3 to 6 positional arguments but 7 were \
given"
self.assertEqual(str(e.exception), s)
def test_constructor_one_args(self):
"""Tests constructor signature."""
with self.assertRaises(TypeError) as e:
r = Rectangle(1)
s = "__init__() missing 1 required positional argument: 'height'"
self.assertEqual(str(e.exception), s)
def test_instantiation(self):
"""Tests instantiation."""
r = Rectangle(10, 20)
self.assertEqual(str(type(r)), "<class 'models.rectangle.Rectangle'>")
self.assertTrue(isinstance(r, Base))
d = {'_Rectangle__height': 20, '_Rectangle__width': 10,
'_Rectangle__x': 0, '_Rectangle__y': 0, 'id': 1}
self.assertDictEqual(r.__dict__, d)
with self.assertRaises(TypeError) as e:
r = Rectangle("1", 2)
msg = "width must be an integer"
self.assertEqual(str(e.exception), msg)
with self.assertRaises(TypeError) as e:
r = Rectangle(1, "2")
msg = "height must be an integer"
self.assertEqual(str(e.exception), msg)
with self.assertRaises(TypeError) as e:
r = Rectangle(1, 2, "3")
msg = "x must be an integer"
self.assertEqual(str(e.exception), msg)
with self.assertRaises(TypeError) as e:
r = Rectangle(1, 2, 3, "4")
msg = "y must be an integer"
self.assertEqual(str(e.exception), msg)
with self.assertRaises(ValueError) as e:
r = Rectangle(-1, 2)
msg = "width must be > 0"
self.assertEqual(str(e.exception), msg)
with self.assertRaises(ValueError) as e:
r = Rectangle(1, -2)
msg = "height must be > 0"
self.assertEqual(str(e.exception), msg)
with self.assertRaises(ValueError) as e:
r = Rectangle(0, 2)
msg = "width must be > 0"
self.assertEqual(str(e.exception), msg)
with self.assertRaises(ValueError) as e:
r = Rectangle(1, 0)
msg = "height must be > 0"
self.assertEqual(str(e.exception), msg)
with self.assertRaises(ValueError) as e:
r = Rectangle(1, 2, -3)
msg = "x must be >= 0"
self.assertEqual(str(e.exception), msg)
with self.assertRaises(ValueError) as e:
r = Rectangle(1, 2, 3, -4)
msg = "y must be >= 0"
self.assertEqual(str(e.exception), msg)
def test_id_inherited(self):
"""Tests if id is inherited from Base."""
Base._Base__nb_objects = 98
r = Rectangle(2, 4)
self.assertEqual(r.id, 99)
# -- #
def test_validate_type(self):
"""Tests property validation."""
r = Rectangle(1, 2)
attributes = ["x", "y", "width", "height"]
t = (3.14, -1.1, float('inf'), float('-inf'), True, "str", (2,),
[4], {5}, {6: 7}, None)
for attribute in attributes:
s = "{} must be an integer".format(attribute)
for invalid_type in t:
with self.assertRaises(TypeError) as e:
setattr(r, attribute, invalid_type)
self.assertEqual(str(e.exception), s)
def test_validate_value_negative_gt(self):
"""Tests property validation."""
r = Rectangle(1, 2)
attributes = ["width", "height"]
for attribute in attributes:
s = "{} must be > 0".format(attribute)
with self.assertRaises(ValueError) as e:
setattr(r, attribute, -(randrange(10) + 1))
self.assertEqual(str(e.exception), s)
def test_validate_value_negative_ge(self):
"""Tests property validation."""
r = Rectangle(1, 2)
attributes = ["x", "y"]
for attribute in attributes:
s = "{} must be >= 0".format(attribute)
with self.assertRaises(ValueError) as e:
setattr(r, attribute, -(randrange(10) + 1))
self.assertEqual(str(e.exception), s)
def test_validate_value_zero(self):
"""Tests property validation."""
r = Rectangle(1, 2)
attributes = ["width", "height"]
for attribute in attributes:
s = "{} must be > 0".format(attribute)
with self.assertRaises(ValueError) as e:
setattr(r, attribute, 0)
self.assertEqual(str(e.exception), s)
def test_property(self):
"""Tests property setting/getting."""
r = Rectangle(1, 2)
attributes = ["x", "y", "width", "height"]
for attribute in attributes:
n = randrange(10) + 1
setattr(r, attribute, n)
self.assertEqual(getattr(r, attribute), n)
def test_property_range_zero(self):
"""Tests property setting/getting."""
r = Rectangle(1, 2)
r.x = 0
r.y = 0
self.assertEqual(r.x, 0)
self.assertEqual(r.y, 0)
def test_area_no_args(self):
"""Tests area() method signature."""
r = Rectangle(5, 6)
with self.assertRaises(TypeError) as e:
Rectangle.area()
s = "area() missing 1 required positional argument: 'self'"
self.assertEqual(str(e.exception), s)
def test_area(self):
"""Tests area() method compuation."""
r = Rectangle(5, 6)
self.assertEqual(r.area(), 30)
w = randrange(10) + 1
h = randrange(10) + 1
r.width = w
r.height = h
self.assertEqual(r.area(), w * h)
w = randrange(10) + 1
h = randrange(10) + 1
r = Rectangle(w, h, 7, 8, 9)
self.assertEqual(r.area(), w * h)
w = randrange(10) + 1
h = randrange(10) + 1
r = Rectangle(w, h, y=7, x=8, id=9)
self.assertEqual(r.area(), w * h)
def test_display_no_args(self):
"""Tests display() method signature."""
r = Rectangle(9, 8)
with self.assertRaises(TypeError) as e:
Rectangle.display()
s = "display() missing 1 required positional argument: 'self'"
self.assertEqual(str(e.exception), s)
def test_display_simple(self):
"""Tests display() method output."""
r = Rectangle(1, 1)
f = io.StringIO()
with redirect_stdout(f):
r.display()
s = "#\n"
self.assertEqual(f.getvalue(), s)
r.width = 2
r.height = 2
f = io.StringIO()
with redirect_stdout(f):
r.display()
s = "##\n##\n"
self.assertEqual(f.getvalue(), s)
r = Rectangle(2, 2, 2, 2)
f = io.StringIO()
with redirect_stdout(f):
r.display()
s = "\n\n ##\n ##\n"
self.assertEqual(f.getvalue(), s)
def test_K_str_no_args(self):
"""Tests __str__() method signature."""
r = Rectangle(5, 2)
with self.assertRaises(TypeError) as e:
Rectangle.__str__()
s = "__str__() missing 1 required positional argument: 'self'"
self.assertEqual(str(e.exception), s)
def test_K_str(self):
"""Tests __str__() method return."""
r = Rectangle(5, 2)
s = '[Rectangle] (1) 0/0 - 5/2'
self.assertEqual(str(r), s)
r = Rectangle(1, 1, 1)
s = '[Rectangle] (2) 1/0 - 1/1'
self.assertEqual(str(r), s)
r = Rectangle(3, 4, 5, 6)
s = '[Rectangle] (3) 5/6 - 3/4'
self.assertEqual(str(r), s)
Base._Base__nb_objects = 0
r1 = Rectangle(4, 6, 2, 1, 12)
self.assertEqual(str(r1), "[Rectangle] (12) 2/1 - 4/6")
r2 = Rectangle(5, 5, 1)
self.assertEqual(str(r2), "[Rectangle] (1) 1/0 - 5/5")
def test_update_no_args(self):
"""Tests update() method """
r = Rectangle(5, 2)
with self.assertRaises(TypeError) as e:
Rectangle.update()
s = "update() missing 1 required positional argument: 'self'"
self.assertEqual(str(e.exception), s)
d = r.__dict__.copy()
r.update()
self.assertEqual(r.__dict__, d)
def test_update_args(self):
"""Tests update() postional args."""
r = Rectangle(5, 2)
d = r.__dict__.copy()
r.update(10)
d["id"] = 10
self.assertEqual(r.__dict__, d)
r.update(10, 5)
d["_Rectangle__width"] = 5
self.assertEqual(r.__dict__, d)
r.update(10, 5, 17)
d["_Rectangle__height"] = 17
self.assertEqual(r.__dict__, d)
r.update(10, 5, 17, 20)
d["_Rectangle__x"] = 20
self.assertEqual(r.__dict__, d)
r.update(10, 5, 17, 20, 25)
d["_Rectangle__y"] = 25
self.assertEqual(r.__dict__, d)
def test_update_args_bad(self):
"""Tests update() positional arg bad values."""
r = Rectangle(5, 2)
d = r.__dict__.copy()
r.update(10)
d["id"] = 10
self.assertEqual(r.__dict__, d)
with self.assertRaises(ValueError) as e:
r.update(10, -5)
s = "width must be > 0"
self.assertEqual(str(e.exception), s)
with self.assertRaises(ValueError) as e:
r.update(10, 5, -17)
s = "height must be > 0"
self.assertEqual(str(e.exception), s)
with self.assertRaises(ValueError) as e:
r.update(10, 5, 17, -20)
s = "x must be >= 0"
self.assertEqual(str(e.exception), s)
with self.assertRaises(ValueError) as e:
r.update(10, 5, 17, 20, -25)
s = "y must be >= 0"
self.assertEqual(str(e.exception), s)
def test_update_kwargs(self):
"""Tests update() keyword args."""
r = Rectangle(5, 2)
d = r.__dict__.copy()
r.update(id=10)
d["id"] = 10
self.assertEqual(r.__dict__, d)
r.update(width=5)
d["_Rectangle__width"] = 5
self.assertEqual(r.__dict__, d)
r.update(height=17)
d["_Rectangle__height"] = 17
self.assertEqual(r.__dict__, d)
r.update(x=20)
d["_Rectangle__x"] = 20
self.assertEqual(r.__dict__, d)
r.update(y=25)
d["_Rectangle__y"] = 25
self.assertEqual(r.__dict__, d)
def test_update_kwargs_2(self):
"""Tests update() keyword args."""
r = Rectangle(5, 2)
d = r.__dict__.copy()
r.update(id=10)
d["id"] = 10
self.assertEqual(r.__dict__, d)
r.update(id=10, width=5)
d["_Rectangle__width"] = 5
self.assertEqual(r.__dict__, d)
r.update(id=10, width=5, height=17)
d["_Rectangle__height"] = 17
self.assertEqual(r.__dict__, d)
r.update(id=10, width=5, height=17, x=20)
d["_Rectangle__x"] = 20
self.assertEqual(r.__dict__, d)
r.update(id=10, width=5, height=17, x=20, y=25)
d["_Rectangle__y"] = 25
self.assertEqual(r.__dict__, d)
r.update(y=25, id=10, height=17, x=20, width=5)
self.assertEqual(r.__dict__, d)
Base._Base__nb_objects = 0
r1 = Rectangle(10, 10, 10, 10)
self.assertEqual(str(r1), "[Rectangle] (1) 10/10 - 10/10")
r1.update(height=1)
self.assertEqual(str(r1), "[Rectangle] (1) 10/10 - 10/1")
r1.update(width=1, x=2)
self.assertEqual(str(r1), "[Rectangle] (1) 2/10 - 1/1")
r1.update(y=1, width=2, x=3, id=89)
self.assertEqual(str(r1), "[Rectangle] (89) 3/1 - 2/1")
r1.update(x=1, height=2, y=3, width=4)
self.assertEqual(str(r1), "[Rectangle] (89) 1/3 - 4/2")
Base._Base__nb_objects = 0
r1 = Rectangle(10, 10, 10, 10)
self.assertEqual(str(r1), "[Rectangle] (1) 10/10 - 10/10")
r1.update(89)
self.assertEqual(str(r1), "[Rectangle] (89) 10/10 - 10/10")
r1.update(89, 2)
self.assertEqual(str(r1), "[Rectangle] (89) 10/10 - 2/10")
r1.update(89, 2, 3)
self.assertEqual(str(r1), "[Rectangle] (89) 10/10 - 2/3")
r1.update(89, 2, 3, 4)
self.assertEqual(str(r1), "[Rectangle] (89) 4/10 - 2/3")
r1.update(89, 2, 3, 4, 5)
self.assertEqual(str(r1), "[Rectangle] (89) 4/5 - 2/3")
def test_to_dictionary(self):
"""Tests to_dictionary() """
with self.assertRaises(TypeError) as e:
Rectangle.to_dictionary()
s = "to_dictionary() missing 1 required positional argument: 'self'"
self.assertEqual(str(e.exception), s)
r = Rectangle(1, 2)
d = {'x': 0, 'y': 0, 'width': 1, 'id': 1, 'height': 2}
self.assertEqual(r.to_dictionary(), d)
r = Rectangle(1, 2, 3, 4, 5)
d = {'x': 3, 'y': 4, 'width': 1, 'id': 5, 'height': 2}
self.assertEqual(r.to_dictionary(), d)
r.x = 10
r.y = 20
r.width = 30
r.height = 40
d = {'x': 10, 'y': 20, 'width': 30, 'id': 5, 'height': 40}
self.assertEqual(r.to_dictionary(), d)
r1 = Rectangle(10, 2, 1, 9)
r1_dictionary = r1.to_dictionary()
r2 = Rectangle(1, 1)
r2.update(**r1_dictionary)
self.assertEqual(str(r1), str(r2))
self.assertNotEqual(r1, r2)
|
4,784 | 80f9c4b7261a894aad2c738d976cfb8efc4d228c | import pyForp
import pprint
pp = pprint.PrettyPrinter(indent=4)
def fib(n):
if n < 2:
return n
return fib(n-2) + fib(n-1)
forp = pyForp.pyForp()
forp.start()
print fib(2)
forp.stop()
pp.pprint(forp.dump())
|
4,785 | 919239391c6f74d0d8627d3b851beb374eb11d25 | import tensorflow as tf
from tensorflow.keras import Model
from tensorflow.keras.layers import Dense, Flatten, Conv2D, BatchNormalization, LeakyReLU, Reshape, Conv2DTranspose
import tensorflow_hub as hub
from collections import Counter
import numpy as np
import sys
sys.path.append('../data')
from imageio import imwrite
import os
import argparse
from preprocessing import *
# this time, katherine is here T_TTTT
# Killing optional CPU driver warnings
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
gpu_available = tf.test.is_gpu_available()
print("GPU Available: ", gpu_available)
performance_dict = {}
parser = argparse.ArgumentParser(description='DCGAN')
parser.add_argument('--img-dir', type=str, default='./data/celebA',
help='Data where training images live')
parser.add_argument('--out-dir', type=str, default='./output',
help='Data where sampled output images will be written')
parser.add_argument('--mode', type=str, default='train',
help='Can be "train" or "test"')
parser.add_argument('--restore-checkpoint', action='store_true',
help='Use this flag if you want to resuming training from a previously-saved checkpoint')
parser.add_argument('--z-dim', type=int, default=100,
help='Dimensionality of the latent space')
parser.add_argument('--batch-size', type=int, default=128,
help='Sizes of image batches fed through the network')
parser.add_argument('--num-data-threads', type=int, default=2,
help='Number of threads to use when loading & pre-processing training images')
parser.add_argument('--num-epochs', type=int, default=10,
help='Number of passes through the training data to make before stopping')
parser.add_argument('--learn-rate', type=float, default=0.0002,
help='Learning rate for Adam optimizer')
parser.add_argument('--beta1', type=float, default=0.5,
help='"beta1" parameter for Adam optimizer')
parser.add_argument('--num-gen-updates', type=int, default=2,
help='Number of generator updates per discriminator update')
parser.add_argument('--log-every', type=int, default=7,
help='Print losses after every [this many] training iterations')
parser.add_argument('--save-every', type=int, default=500,
help='Save the state of the network after every [this many] training iterations')
parser.add_argument('--device', type=str, default='GPU:0' if gpu_available else 'CPU:0',
help='specific the device of computation eg. CPU:0, GPU:0, GPU:1, GPU:2, ... ')
args = parser.parse_args()
class DeepFont(tf.keras.Model):
def __init__(self):
super(DeepFont, self).__init__()
self.batch_size = 128
self.model = tf.keras.Sequential()
self.model.add(tf.keras.layers.Reshape((96, 96, 1)))
self.model.add(tf.keras.layers.Conv2D(trainable=False, filters=64, strides=(2,2), kernel_size=(3,3), padding='same', name='conv_layer1', input_shape=(96, 96,1)))
self.model.add(tf.keras.layers.BatchNormalization())
self.model.add(tf.keras.layers.MaxPooling2D(pool_size=(2,2), strides=None, padding='same'))
self.model.add(tf.keras.layers.Conv2D(trainable=False, filters=128, strides=(1,1), kernel_size=(3,3), padding='same', name='conv_layer2'))
self.model.add(tf.keras.layers.BatchNormalization())
self.model.add(tf.keras.layers.MaxPooling2D(pool_size=(2,2), strides=None, padding='same'))
self.model.add(tf.keras.layers.Conv2D(256, kernel_size=(3,3), strides=(1,1), padding='same'))
self.model.add(tf.keras.layers.Conv2D(256, kernel_size=(3,3), strides=(1,1), padding='same'))
self.model.add(tf.keras.layers.Conv2D(256, kernel_size=(3,3), strides=(1,1), padding='same'))
self.model.add(tf.keras.layers.Flatten())
self.model.add(tf.keras.layers.Dense(512, activation='relu'))
self.model.add(tf.keras.layers.Dense(512, activation='relu'))
self.model.add(tf.keras.layers.Dense(150, activation='softmax'))
self.optimizer = tf.keras.optimizers.Adam(learning_rate = 0.01)
def call(self, inputs):
""" input: batch of preprocessed 96x96 images
output: probabilities for each batch image and its classification distribution
Runs the model on a batch of inputs.
"""
return self.model(inputs)
def loss_function(self, probs, labels):
""" input: probs - probabilities generated by the model
labels - true labels for every imag
output: return loss of the batch being processed
Uses sparse categorical crossentropy loss.
"""
loss = tf.keras.losses.sparse_categorical_crossentropy(labels, probs)
return tf.reduce_mean(loss)
def total_accuracy(self, probs, labels):
""" input: probs - batch of probs (batch size x 150)
labels - batch of true labels for images(batch size x 150)
output: the accuracy of the model (+1 if correct label) over a batch
"""
acc = 0
top_five = np.argsort(probs, axis = 1) # 256 x 150
top_five = np.array(top_five).reshape((self.batch_size, 150))
top_five = top_five[:, -1:] # 5 x 150
for i in range (len(labels)):
if labels[i] not in performance_dict:
performance_dict[labels[i]] = 0
if labels[i] in top_five[i]:
acc += 1
performance_dict[labels[i]] += 1
else:
performance_dict[labels[i]] -= 1
return (acc / float(self.batch_size))
def get_top_five(self, predictions):
""" input: predictions - prbs generated by the model
output: array of top 5 font families that the model thinks the image belongs to
Runs the model on a batch of inputs.
"""
predictions = np.sum(predictions, axis = 0) # sums the columns of the logits shape is (150,)
top_five = np.argsort(predictions, axis = 0)
top_five = np.array(top_five)
top_five = top_five[-5:]
with open('150_fonts_backwards.json') as json_file:
font_subset = json.load(json_file)
top_five_fonts = []
for num in top_five:
top_five_fonts.append(font_subset[str(num)])
return top_five_fonts
def train(model, train_inputs, train_labels):
""" input: train_inputs - batch of training images
train_labels - batch of training labels
output: none
Trains the model for a certain number of batches.
"""
average_loss = 0
num_batches = len(train_inputs)//model.batch_size
for i in range(num_batches):
with tf.GradientTape() as tape:
temp_inputs = train_inputs[i*model.batch_size:(i+1)*model.batch_size]
temp_train_labels = train_labels[i*model.batch_size:(i+1)*model.batch_size]
predictions = model.call(temp_inputs)
loss = model.loss_function(predictions, temp_train_labels)
average_loss += loss
if i % 1000 == 0:
print("---Batch", i, " Loss: ", loss)
gradients = tape.gradient(loss, model.trainable_variables)
model.optimizer.apply_gradients(zip(gradients, model.trainable_variables))
print("****AVERAGE LOSS: ", average_loss / float(num_batches))
def test(model, test_inputs, test_labels):
""" input: test_inputs - batch of testing images
test_labels - batch of testing labels
output: accuracy across the entire set of batches
Tests the training inputs against the model's prediction of what font class it thinks each training image
belongs to.
"""
num_batches = len(test_inputs) // (model.batch_size)
acc = 0
for i in range(num_batches):
batch_inputs = test_inputs[i * model.batch_size: (i+1) * model.batch_size]
batch_labels = test_labels[i * model.batch_size: (i+1) * model.batch_size]
batch_inputs = np.array(batch_inputs)
batch_labels = np.array(batch_labels)
predictions = model.call(batch_inputs) # prediction for a single image
batch_accuracy = model.total_accuracy(predictions, batch_labels)
if i % 100 == 0:
print("batch accuracy", batch_accuracy)
acc += batch_accuracy
average_accuracy = acc / float(num_batches)
return average_accuracy
def test_single_img(model, image_path):
""" input: image_path - the image path of whatever image file you would like to test
output: none
Prints the top 5 fonts the model predicts for a particular image.
"""
crops = []
image = alter_image(image_path)
image = resize_image(image, 96)
cropped_images = generate_crop(image, 96, 10)
for c in cropped_images:
crops.append(c)
predictions = model.call(crops) # prediction for a single image
print(predictions.shape)
top_5 = model.get_top_five(predictions)
print(top_5)
## --------------------------------------------------------------------------------------
def main():
model = DeepFont()
model.load_weights('weights_leaky_relu.h5', by_name=True)
# For saving/loading models
checkpoint_dir = './checkpoints_df'
checkpoint_prefix = os.path.join(checkpoint_dir, "ckpt")
checkpoint = tf.train.Checkpoint(model = model)
manager = tf.train.CheckpointManager(checkpoint, checkpoint_dir, max_to_keep=3)
# Ensure the output directory exists
if not os.path.exists(args.out_dir):
os.makedirs(args.out_dir)
if args.restore_checkpoint or args.mode == 'test' or args.mode == 'single_img':
# restores the lates checkpoint using from the manager
print("Running test mode...")
checkpoint.restore(manager.latest_checkpoint)
try:
# Specify an invalid GPU device
with tf.device('/device:' + args.device):
if args.mode == 'train':
train_inputs, train_labels = get_train_df('./shuffled_train_inputs.hdf5', './shuffled_train_labels.hdf5')
for epoch in range(0, args.num_epochs):
print('========================== EPOCH %d ==========================' % epoch)
train(model, train_inputs, train_labels)
# Save at the end of the epoch, too
print("**** SAVING CHECKPOINT AT END OF EPOCH ****")
manager.save()
if args.mode == 'test':
test_inputs, test_labels = get_test_df("./combined_test_inputs.hdf5", "./combined_test_labels.hdf5")
print("--test accuracy--", test(model, test_inputs, test_labels))
if args.mode == "single_img":
test_single_img(model, './0.png')
except RuntimeError as e:
print(e)
if __name__ == '__main__':
main()
|
4,786 | 515c14fcf2c3e9da31f6aba4b49296b18f04f262 |
#! /usr/bin/env python
def see_great_place_about_large_man(str_arg):
own_day_and_last_person(str_arg)
print('own_case')
def own_day_and_last_person(str_arg):
print(str_arg)
if __name__ == '__main__':
see_great_place_about_large_man('use_work_of_next_way')
|
4,787 | 496c58e68d3ac78a3eb1272d61ca3603c5d843b6 | """
# listbinmin.py
# Sam Connolly 04/03/2013
#===============================================================================
# bin data according a given column in an ascii file of column data, such that
# each bin has a minimum number of points, giving the bin of each data point as
# a LIST. UNEVEN BINS.
#===============================================================================
"""
# Import packages
import numpy as np
#================ PARAMETERS ===================================================
# read variables
header = 0 # number of header lines to ignore
outdata = [1, 3] # column numbers to output
bincolumn = 3 # column to bin along
errorcolumn = 4
binmin = 18
# File routes
route = "/disks/raid/raid1/xray/raid/sdc1g08/NetData/ngc1365/"\
+ "lightcurve/refinedCounts/"
# file name
infilename = "NGC1365_lcurve_4_0.5-10keV.qdp"
# Save output?
# histogram of binning?
hist = True
# Save output?
save = True
savefile = "binsout.dat"
outroute = "/disks/raid/raid1/xray/raid/sdc1g08/NetData/ngc1365/"\
#+ "spectra/"
# Highlighted lightcurve?
lc = True
timecolumn = 2
labels = False
#==================== Load data ================================================
# create file routes
location = route+infilename
savelocation = outroute+savefile
# read data intoarray
start = 0
infile= open(location, 'r')
for line in infile:
linedata = line.split()
if start == header:
columns = len(linedata)
data = [[] for x in range(columns)]
if start >= header:
for column in range(columns):
if len(linedata) == columns:
data[column].append(float(linedata[column]))
start += 1
infile.close()
outdata = np.array(outdata)
outdata -= 1
bincolumn -= 1
errorcolumn -= 1
#========================= Sort ================================================
start = True
for index in range(len(data[0])):
if start == True:
sortindex = [index]
start = False
else:
i = 0
if data[bincolumn][index] < data[bincolumn][sortindex[-1]]:
while data[bincolumn][index] > data[bincolumn][sortindex[i]]:
i += 1
sortindex.insert(i,index)
else:
sortindex.append(index)
#======================== Bin ==================================================
bins = []
for index in np.arange(0,int(len(sortindex)),binmin):
this = [[],0,0,0,0,0]
err = []
total = 0
for i in range(binmin):
if index+i <= len(sortindex) - 1:
this[0].append(sortindex[index+i])
err.append(data[errorcolumn][sortindex[index+i]])
total += data[countscolumn][sortindex[index+i]
this[1] = data[bincolumn][sortindex[index]] # bin min
if index+binmin-1 <= len(sortindex) - 1: # bin max
this[2] = data[bincolumn][sortindex[index+binmin-1]]
else:
this[2] = max(data[bincolumn])
this[3] = (this[2]+this[1])/2.0
err = np.array(err)
this[4] = sum(err**2)
this[5] = total
bins.append(this)
print bins
#======================== print output =========================================
if save == True:
out = open(savelocation,'w')
for b in range(len(bins)):
low = bins[b][1]
high = bins[b][2]
mid = bins[b][3]
errs = bins[b][4]
print low, " >= x > ", high, " ==> ",mid, ' +/- ', errs , " :\n"
if save == True:
out.write(str(low) + " >=x> " + str(high) + " :\n")
output = ''
for index in bins[b][0]:
for dat in outdata:
if dat != bincolumn:
output = output + str(data[dat][index]) + '\t'
print output, "\n"
if save == True:
out.write(output+"\n")
print "number of bins: ", len(bins)
if save == True:
out.write("number of bins: " + str(len(bins)))
out.close()
# plots
nplots = 0
if hist:
nplots += 1
if lc:
nplots += 1
if nplots == 1:
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
if nplots == 2:
fig = plt.figure()
# histogram
if hist:
if nplots == 2:
ax = fig.add_subplot(1,2,1)
edges = []
counts = []
widths = []
for b in range(len(bins)):
edges.append(bins[b][1])
counts.append(bins[b][2])
try:
widths.append(data[bincolumn][bins[b+1][0][0]]-\
data[bincolumn][bins[b][0][0]])
except IndexError:
widths.append(data[bincolumn][bins[b][0][-1]]-\
data[bincolumn][bins[b][0][0]])
plt.bar(edges,counts,widths)
# highlighted lightcurve
if lc:
if nplots == 2:
ax = fig.add_subplot(1,2,2)
plt.scatter(data[timecolumn],data[bincolumn])
for b in range(len(bins)):
try:
plt.axhspan(data[bincolumn][bins[b][0][0]], \
data[bincolumn][bins[b+1][0][0]],alpha = 0.3)
except IndexError:
plt.axhspan(data[bincolumn][bins[b][0][0]], \
data[bincolumn][bins[b][0][-1]],alpha = 0.3)
if labels:
for index in range(len(data[0])):
plt.annotate(data[-1][index],
xy = (data[timecolumn][index],data[bincolumn][index]),
xytext = (-20,20),
textcoords = 'offset points', ha = 'right', va = 'bottom',
bbox = dict(boxstyle = 'round,pad=0.5', fc = 'blue',
alpha = 0.5), arrowprops = dict(arrowstyle = '->',
connectionstyle = 'arc3,rad=0'))
if nplots > 0:
plt.show()
|
4,788 | b6b8dfaa9644fa4f4c250358b89f4a30c26c317f | import sqlite3
if __name__ == '__main__':
conn = sqlite3.connect('donations.sqlite')
c = conn.cursor()
query = """DROP TABLE IF EXISTS factions;"""
c.execute(query)
query = """DROP TABLE IF EXISTS members;"""
c.execute(query)
query = """DROP TABLE IF EXISTS bank;"""
c.execute(query)
conn.commit()
query = """CREATE TABLE factions(
id INTEGER PRIMARY KEY,
faction INTEGER UNIQUE,
faction_name TEXT);"""
c.execute(query)
conn.commit()
query = """CREATE TABLE members(
id INTEGER PRIMARY KEY,
member INTEGER UNIQUE,
member_name TEXT,
faction INTEGER,
FOREIGN KEY(faction) REFERENCES factions(faction));"""
c.execute(query)
conn.commit()
query = """CREATE TABLE bank(
id INTEGER PRIMARY KEY,
stored_timestamp TEXT DEFAULT CURRENT_TIMESTAMP,
member INTEGER UNIQUE,
money_balance INTEGER,
point_balance INTEGER,
FOREIGN KEY (member) REFERENCES members(member));"""
c.execute(query)
conn.commit()
|
4,789 | 576bb15ad081cd368265c98875be5d032cdafd22 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
MIT License
Copyright (c) 2016 Matt Menzenski
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from __future__ import unicode_literals
def fuzzy_match_simple(pattern, instring):
"""Return True if each character in pattern is found in order in instring.
:param pattern: the pattern to be matched
:type pattern: ``str``
:param instring: the containing string to search against
:type instring: ``str``
:return: True if there is a match, False otherwise
:rtype: ``bool``
"""
p_idx, s_idx, p_len, s_len = 0, 0, len(pattern), len(instring)
while (p_idx != p_len) and (s_idx != s_len):
if pattern[p_idx].lower() == instring[s_idx].lower():
p_idx += 1
s_idx += 1
return p_len != 0 and s_len != 0 and p_idx == p_len
def fuzzy_match(pattern, instring, adj_bonus=5, sep_bonus=10, camel_bonus=10,
lead_penalty=-3, max_lead_penalty=-9, unmatched_penalty=-1):
"""Return match boolean and match score.
:param pattern: the pattern to be matched
:type pattern: ``str``
:param instring: the containing string to search against
:type instring: ``str``
:param int adj_bonus: bonus for adjacent matches
:param int sep_bonus: bonus if match occurs after a separator
:param int camel_bonus: bonus if match is uppercase
:param int lead_penalty: penalty applied for each letter before 1st match
:param int max_lead_penalty: maximum total ``lead_penalty``
:param int unmatched_penalty: penalty for each unmatched letter
:return: 2-tuple with match truthiness at idx 0 and score at idx 1
:rtype: ``tuple``
"""
score, p_idx, s_idx, p_len, s_len = 0, 0, 0, len(pattern), len(instring)
prev_match, prev_lower = False, False
prev_sep = True # so that matching first letter gets sep_bonus
best_letter, best_lower, best_letter_idx = None, None, None
best_letter_score = 0
matched_indices = []
while s_idx != s_len:
p_char = pattern[p_idx] if (p_idx != p_len) else None
s_char = instring[s_idx]
p_lower = p_char.lower() if p_char else None
s_lower, s_upper = s_char.lower(), s_char.upper()
next_match = p_char and p_lower == s_lower
rematch = best_letter and best_lower == s_lower
advanced = next_match and best_letter
p_repeat = best_letter and p_char and best_lower == p_lower
if advanced or p_repeat:
score += best_letter_score
matched_indices.append(best_letter_idx)
best_letter, best_lower, best_letter_idx = None, None, None
best_letter_score = 0
if next_match or rematch:
new_score = 0
# apply penalty for each letter before the first match
# using max because penalties are negative (so max = smallest)
if p_idx == 0:
score += max(s_idx * lead_penalty, max_lead_penalty)
# apply bonus for consecutive matches
if prev_match:
new_score += adj_bonus
# apply bonus for matches after a separator
if prev_sep:
new_score += sep_bonus
# apply bonus across camelCase boundaries
if prev_lower and s_char == s_upper and s_lower != s_upper:
new_score += camel_bonus
# update pattern index iff the next pattern letter was matched
if next_match:
p_idx += 1
# update best letter match (may be next or rematch)
if new_score >= best_letter_score:
# apply penalty for now-skipped letter
if best_letter is not None:
score += unmatched_penalty
best_letter = s_char
best_lower = best_letter.lower()
best_letter_idx = s_idx
best_letter_score = new_score
prev_match = True
else:
score += unmatched_penalty
prev_match = False
prev_lower = s_char == s_lower and s_lower != s_upper
prev_sep = s_char in '_ '
s_idx += 1
if best_letter:
score += best_letter_score
matched_indices.append(best_letter_idx)
return p_idx == p_len, score |
4,790 | 47b2857ac20e46897cc1f64371868ce5174799d6 | from flask_wtf import FlaskForm
from wtforms import (
StringField, TextAreaField, PasswordField, HiddenField)
from wtforms.fields.html5 import URLField, EmailField
from flask_wtf.file import FileField
from wtforms.validators import (
InputRequired, Length, Email,
Optional, URL, ValidationError, Regexp)
from models import User
from flask import g
class UserBaseForm(FlaskForm):
email = EmailField("Email", validators=[
InputRequired(message="Email cannot be blank."),
Length(min=5, max=320),
Email(check_deliverability=True,
message="Invalid Email address")])
username = StringField("Username", validators=[
InputRequired(message="Username cannot be blank."),
Length(min=2, max=30)])
class AddUserForm(UserBaseForm):
password = PasswordField("Password", validators=[
InputRequired(message="Password cannot be blank."),
Length(min=8, max=60),
Regexp("^(?=.*[A-Za-z])(?=.*\d)(?=.*[$@$!%*#?&])[A-Za-z\d$@$!%*#?&]{8,}$", message='Please match the given requirements for password.')], # noqa e501
description="Minimum one each - uppercase letter, lowercase letter, number, special character.") # noqa e501
def validate_email(form, field):
"""Make sure email not in use."""
if User.query.filter_by(email=form.email.data).first():
form.email.errors.append(
"Email already associated with account!")
raise ValidationError
def validate_username(form, field):
"""Make sure username not in use."""
if User.query.filter_by(username=form.username.data).first():
form.username.errors.append("Username already taken!")
raise ValidationError
class EditUserForm(UserBaseForm):
"""Edit User Form."""
avatar_url = URLField("Avatar Image URL", validators=[
Length(min=6, max=255), Optional()],
description="Online image address")
banner_url = URLField("Banner Image URL", validators=[
Length(min=6, max=255), Optional()],
description="Online image address")
byline = StringField("User Byline", validators=[
Length(min=2, max=200), Optional()],
description="A short snippet shown under your username")
bio = TextAreaField("User Bio", validators=[
Length(min=2, max=500), Optional()],
description="500 character max")
city = StringField("City", validators=[Length(min=2, max=50), Optional()])
state = StringField("State", validators=[
Length(min=2, max=50), Optional()])
country = StringField("Country", validators=[
Length(min=2, max=50), Optional()])
def validate_email(form, field):
"""Make sure email is not in use
unless it's the current user's email."""
user = User.query.filter_by(email=form.email.data).first()
if user and not user == g.user:
form.email.errors = [
"Email already associated with account!",
*form.email.errors
]
raise ValidationError
def validate_username(form, field):
"""Make sure username is not in use
unless it's the current user's username."""
user = User.query.filter_by(username=form.username.data).first()
if user and not user == g.user:
form.username.errors = [
"Username already taken!",
*form.username.errors
]
raise ValidationError
class LoginForm(FlaskForm):
email = EmailField("Email", validators=[
InputRequired(message="Email cannot be blank."),
Length(min=5, max=320),
Email(check_deliverability=True,
message="Invalid Email address")])
password = PasswordField("Password", validators=[
InputRequired(
message="Password cannot be blank."),
Length(min=8, max=60)])
class ReportBaseForm(FlaskForm):
"""Form for adding new report."""
text = TextAreaField("Report", validators=[
InputRequired(message="Report cannot be blank."),
Length(min=2)])
photo_url = URLField(
"Photo URL", validators=[URL(), Optional()],
description="""
Either enter a photo URL or
choose an image file to include an image.""")
photo_file = FileField(
"Upload Photo", validators=[Optional()],
description="""
Either enter a photo URL or
choose an image file to include an image. 4MB max.""")
def validate(self):
if not super().validate():
return False
if self.photo_url.data and self.photo_file.data:
msg = 'Please specify Photo URL or upload a photo, not both'
self.photo_url.errors.append(msg)
self.photo_file.errors.append(msg)
return False
return True
class AddReportForm(ReportBaseForm):
"""Form for adding new report."""
pass
class EditReportForm(ReportBaseForm):
"""Form for editing a report."""
cleared_file = HiddenField('cleared_file')
|
4,791 | f3b194bbc3c174549b64d6e6b1a8f4438a0c9d38 | from ctypes import *
class GF_AVCConfigSlot(Structure):
_fields_=[
("size", c_uint16),
("data", c_char),
("id", int)
] |
4,792 | c6b98cf309e2f1a0d279ec8dc728ffd3fe45dfdb | from io import StringIO
from pathlib import Path
from unittest import TestCase
from doculabs.samon import constants
from doculabs.samon.elements import BaseElement, AnonymusElement
from doculabs.samon.expressions import Condition, ForLoop, Bind
class BaseElementTest(TestCase):
def assertXmlEqual(self, generated_xml: str, xml_benchmark: Path):
xml_benchmark = Path(__file__).parent / xml_benchmark
with xml_benchmark.open('r', encoding='utf-8') as f:
xml_benchmark = f.read()
self.assertEqual(generated_xml, xml_benchmark)
def test_parse_xml_attributes(self):
xml_attrs = { # AttributesNSImpl like object
(None, 'attr1'): 'val1', # NS, attr_name
('http://example.org', 'attr2'): 'val2'
}
element = BaseElement(xml_tag='tag', xml_attrs=xml_attrs)
self.assertEqual(element.xml_attrs, {'attr1': 'val1', '{http://example.org}attr2': 'val2'})
def test_parse_expressions(self):
xml_attrs = {
(constants.XML_NAMESPACE_FLOW_CONTROL, 'if'): 'val == 7',
(constants.XML_NAMESPACE_FLOW_CONTROL, 'for'): 'a in val',
(constants.XML_NAMESPACE_DATA_BINDING, 'attr2'): 'val'
}
e = BaseElement(xml_tag='tag', xml_attrs=xml_attrs)
self.assertIsInstance(e.xml_attrs['{https://doculabs.io/2020/xtmpl#control}if'], Condition)
self.assertIsInstance(e.xml_attrs['{https://doculabs.io/2020/xtmpl#control}for'], ForLoop)
self.assertIsInstance(e.xml_attrs['{https://doculabs.io/2020/xtmpl#data-binding}attr2'], Bind)
def test_data_binding(self):
xml_attrs = {
(constants.XML_NAMESPACE_DATA_BINDING, 'attr2'): 'val'
}
e = BaseElement(xml_tag='tag', xml_attrs=xml_attrs)
xml = e.to_string(context={'val': 'example_value'}, indent=0).getvalue()
self.assertEqual(xml, '<tag attr2="example_value">\n</tag>\n')
def test_eval_forloop(self):
xml_attrs = {
(constants.XML_NAMESPACE_FLOW_CONTROL, 'for'): 'a in val',
(None, 'class'): 'class_name'
}
e = BaseElement(xml_tag='tag', xml_attrs=xml_attrs)
xml = e.to_string(context={'val': [1, 2, 3]}, indent=0).getvalue()
self.assertXmlEqual(xml, 'assets/elements/forloop.xml')
def test_eval_if(self):
xml_attrs = {
(constants.XML_NAMESPACE_FLOW_CONTROL, 'if'): 'val == 7',
(None, 'class'): 'class_name'
}
e = BaseElement(xml_tag='tag', xml_attrs=xml_attrs)
xml = e.to_string(context={'val': 8}, indent=0).getvalue()
self.assertEqual(xml, '')
xml = e.to_string(context={'val': 7}, indent=0).getvalue()
self.assertXmlEqual(xml, 'assets/elements/ifcond.xml')
def test_if_and_for_precedence(self):
xml_attrs = {
(constants.XML_NAMESPACE_FLOW_CONTROL, 'if'): 'val > 7',
(constants.XML_NAMESPACE_FLOW_CONTROL, 'for'): 'val in val2',
(constants.XML_NAMESPACE_DATA_BINDING, 'attr1'): 'val',
}
e = BaseElement(xml_tag='tag', xml_attrs=xml_attrs)
xml = e.to_string(context={'val2': [7, 8, 9], 'val': 7}, indent=0).getvalue()
self.assertXmlEqual(xml, 'assets/elements/if_and_for.xml')
def test_render_anonymuselement(self):
e = AnonymusElement(text='example')
self.assertEqual(e.to_string(context={}).getvalue(), 'example\n')
|
4,793 | 0a6cb6d3fad09ab7f0e19b6c79965315c0e0d634 | import json
import sqlite3
import time
import shelve
import os
from constants import *
VEC_TYPES = [
'''
CREATE TABLE "{}"
(ID TEXT PRIMARY KEY NOT NULL,
num TEXT NOT NULL);
''',
'''
CREATE TABLE "{}"
(ID INT PRIMARY KEY NOT NULL,
num TEXT NOT NULL);
'''
]
class Vector():
def __init__(self, name, type, url_path):
self._name = name
self._conn = sqlite3.connect(url_path)
self._cur = self._conn.cursor()
# check if table exists, if not create TABLE
self._cur.execute("SELECT name FROM sqlite_master WHERE type='table';")
tables = self._cur.fetchall()
if name not in [val[0] for val in tables]:
self._conn.execute(VEC_TYPES[type].format(self._name.replace('"', '""')))
def __setitem__(self, index, edges):
try:
self._conn.execute(
"""
INSERT INTO "{}" (ID, num)
VALUES (?, ?);
""".format(self._name.replace('"', '""')), (index, edges)
)
except Exception as e:
print(e)
print("Update Failed")
def __getitem__(self, index):
self._cur.execute(
"""
SELECT * FROM "{}"
WHERE ID = ?;
""".format(self._name.replace('"', '""')), (index,)
)
try:
return self._cur.fetchall()[0][1]
except Exception as e:
print(e)
return None
def get_multiple(self, keys):
print(keys)
if len(keys) == 0:
return []
keys = [(key,) for key in keys]
print(keys)
self._cur.executemany(
"""
SELECT * FROM "{}"
WHERE ID = ?;
""".format(self._name.replace('"', '""')), keys
)
try:
a = [val[1] for val in self._cur.fetchall()]
print(a)
return a
except Exception as e:
print(e)
return []
def save(self):
self._conn.commit()
def close(self):
self._conn.close()
"""
vec = Vector("yoav_table", 0, EDGES_VECTOR_PATH)
print(vec[0])
vec[0] = "yo"
print(vec[0])
vec.save()
""" |
4,794 | 14e336005da1f1f3f54ea5f2892c27b58f2babf0 | from flask import Flask
import rq
from redis import Redis
from app.lib.job import Job
from app.config import Config
app = Flask(__name__)
app.config.from_object(Config)
app.redis = Redis.from_url('redis://')
app.task_queue = rq.Queue('ocr-tasks', connection=app.redis, default_timeout=43200)
app.task_queue.empty()
app.eval_queue = rq.Queue('ocr-evals', connection=app.redis, default_timeout=43200)
app.eval_queue.empty()
app.job_id2file = {}
app.job_file2id = {}
app.job_id2err = {}
app.eval_id2file = {}
app.eval_file2id = {}
app.eval_id2err = {}
from app import routes
from app.api import bp as api_bp
app.register_blueprint(api_bp, url_prefix='/api') |
4,795 | 8fac4571a3a1559e297754e89375be06d6c45c2d | #これは明日20200106に走らせましょう!
import numpy as np
import sys,os
import config2
CONSUMER_KEY = config2.CONSUMER_KEY
CONSUMER_SECRET = config2.CONSUMER_SECRET
ACCESS_TOKEN = config2.ACCESS_TOKEN
ACCESS_TOKEN_SECRET = config2.ACCESS_TOKEN_SECRET
import tweepy
auth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)
auth.set_access_token(ACCESS_TOKEN, ACCESS_TOKEN_SECRET)
api = tweepy.API(auth,wait_on_rate_limit = True)#,wait_on_rate_limit_notify= True #PLOT TIME
#user = sys.argv[1]
import pandas as pd
df=pd.read_csv("df_mix2.csv")
df=df.drop("Unnamed: 0",axis=1)[1:]
df=df.reset_index()
df=df.drop("index",axis=1)
df.columns = list(range(199))
df.head()
list_real=[]
list_real=df[0]
list_real=list_real[:1440]
list_real
follower_list=[]
def skip_func(list):
cnt=0
for i in list:
padd= [0] * 200
try :
#got=api.followers_ids(i,count=200)
got=api.friends_ids(i,count=200)
except :
print('========NG=============',cnt)# pass (Real code == pass ,Practice code == print(NG))
follower_list.append(padd)
else :
print('==========OK==========',cnt)
#followerData["Follower_id"] = str(a)
#followerDatas.append(followerData)
#followerDatas.append(a)
follower_list.append(got)
#print(str(i))
#print(followerData)
cnt+=1
skip_func(list_real)
pd.set_option("display.max_rows", 250)
df=pd.DataFrame(follower_list)
df1=df.fillna(0)
df1=df1.astype(int)
df0=pd.DataFrame(list_real)
df0.columns=["ID"]
#print(df0.head())# core user's follow user
#pd.options.display.precision = 21
df2=pd.concat([df0, df1],axis=1)
df2=df2.fillna(0)
df2=df2.astype(int)
df2.head()
df2.to_csv("ANetwork_moto_2020.csv")
|
4,796 | cc5ad95419571d3eb2689b428e5805ad69958806 | import os
import sys
from tensor2tensor.bin import t2t_trainer
def problem_args(problem_name):
args = [
'--generate_data',
'--model=transformer',
'--hparams_set=transformer_librispeech_v1',
'--problem=%s' % problem_name,
'--data_dir=/tmp/refactor_test/problems/%s/data' % problem_name,
'--tmp_dir=/tmp/refactor_test/problems/%s/tmp' % problem_name,
'--output_dir=/tmp/refactor_test/models/%s/data' % problem_name,
'--hparams=batch_shuffle_size=0,batch_size=1000000'
]
return args
def main():
sys.argv += problem_args('librispeech_clean_small')
# sys.argv += problem_args('common_voice')
t2t_trainer.main(None)
print('All done.')
if __name__ == '__main__':
main()
|
4,797 | 64b254db6d8f352b2689385e70f2ea7d972c9191 | # Copyright (c) 2019 Uber Technologies, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import random as pyrandom
import warnings
import numpy as np
from hypothesis import given
from hypothesis.strategies import dictionaries, floats, lists, text, tuples
import bayesmark.signatures as ss
from bayesmark.experiment import OBJECTIVE_NAMES
from util import space_configs
N_SIG = ss.N_SUGGESTIONS
def bsigs():
S = lists(floats(allow_infinity=False, allow_nan=False), min_size=N_SIG, max_size=N_SIG)
return S
def sigs():
S = lists(bsigs(), min_size=1)
return S
def sig_pair():
def separate(D):
signatures, signatures_ref = {}, {}
for kk in D:
if len(D[kk]) == 1:
v_ref, = D[kk]
signatures_ref[kk] = np.asarray(v_ref)
elif len(D[kk]) == 2:
v, v_ref = D[kk]
signatures[kk] = np.asarray(v)
signatures_ref[kk] = np.asarray(v_ref)
else:
assert False
return signatures, signatures_ref
sig_dict = dictionaries(text(), tuples(bsigs()) | tuples(bsigs(), bsigs()))
S = sig_dict.map(separate)
return S
def some_mock_f(x):
"""Some arbitrary deterministic test function.
"""
random_stream = pyrandom.Random(json.dumps(x, sort_keys=True))
y = [random_stream.gauss(0, 1) for _ in OBJECTIVE_NAMES]
return y
@given(space_configs())
def test_get_func_signature(api_config):
api_config, _, _, _ = api_config
signature_x, signature_y = ss.get_func_signature(some_mock_f, api_config)
@given(dictionaries(text(), sigs()))
def test_analyze_signatures(signatures):
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=RuntimeWarning)
sig_errs, signatures_median = ss.analyze_signatures(signatures)
@given(sig_pair())
def test_analyze_signature_pair(args):
signatures, signatures_ref = args
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=RuntimeWarning)
sig_errs, signatures_pair = ss.analyze_signature_pair(signatures, signatures_ref)
|
4,798 | dc600763b12edda05820721098e7e5bc80f74c89 | from typing import List
class Solution:
def grayCode(self, n: int) -> List[int]:
res = [0] * 2 ** n
exp = 0
l = r = 1
for i in range(1, 2 ** n):
res[i] += res[r - i] + 2 ** exp
if i == r:
exp += 1
l = r + 1
r = l + 2 ** exp - 1
return res
|
4,799 | ee272fe1a023d85d818a8532055dcb5dbcb6a707 | import numpy as np
import tkinter as tk
import time
HEIGHT = 100
WIDTH = 800
ROBOT_START_X = 700
ROBOT_START_Y = 50
SLEEP_TIME = 0.00001
SLEEP_TIME_RESET = 0.2
class Environment(tk.Tk, object):
def __init__(self):
super(Environment, self).__init__()
self.action_space = ['g', 'b'] # go, break
self.num_actions = len(self.action_space)
self.title('Environment')
self.geometry('{0}x{1}'.format(WIDTH, HEIGHT))
self._build_environment()
def _build_environment(self):
self.canvas = tk.Canvas(self, bg='white', height=HEIGHT, width=WIDTH)
# create obstacle
obstacle_center = np.array([20, 50])
self.obstacle = self.canvas.create_rectangle(
obstacle_center[0] - 10, obstacle_center[1] - 40,
obstacle_center[0] + 10, obstacle_center[1] + 40,
fill='black'
)
# create robot
robot_center = np.array([ROBOT_START_X, ROBOT_START_Y])
self.robot = self.canvas.create_polygon([
robot_center[0] - 25, robot_center[1] + 10, robot_center[0] - 25, robot_center[1] - 10,
robot_center[0] - 15, robot_center[1] - 10, robot_center[0] - 15, robot_center[1] - 25,
robot_center[0] + 25, robot_center[1] - 25, robot_center[0] + 25, robot_center[1] + 25,
robot_center[0] - 15, robot_center[1] + 25, robot_center[0] - 15, robot_center[1] + 10
],
fill='blue'
)
# pack
self.canvas.pack()
def stop_robot(self):
# change outline to show the robot slows down
self.canvas.itemconfig(self.robot, outline='red')
# slow down robot
for i in range(50):
self.canvas.move(self.robot, -1, 0)
time.sleep(SLEEP_TIME * 10 * i)
self.render()
# change outline back again
self.canvas.itemconfig(self.robot, outline='')
self.render()
time.sleep(0.2)
def perform_action(self, action):
stopped = False
done = False
reward = 0
if action == 0: # drive
self.canvas.move(self.robot, -1, 0)
elif action == 1: # break
# if you want to speed up the process comment the next line in and the function stop_robot out
#self.canvas.move(self.robot, -50, 0) # move further because of stop distance
self.stop_robot()
stopped = True
nextState = self.canvas.coords(self.robot)
obstCoords = self.canvas.coords(self.obstacle)
dist = nextState[0] - obstCoords[2]
if stopped:
if (dist >= 15 and dist <= 40): # if enough space to obstacle
reward = 1
done = True
elif dist < 15: # if too close to obstacle
reward = -1
done = True
else: # if too far away to obstacle
reward = -1
done = False
elif nextState[0] <= obstCoords[2]: # if robot hits obstacle
reward = -1
done = True
return dist, reward, done
def reset(self):
self.update()
time.sleep(SLEEP_TIME_RESET)
self.canvas.delete(self.robot)
# create robot
robot_center = np.array([ROBOT_START_X, ROBOT_START_Y])
self.robot = self.canvas.create_polygon([
robot_center[0] - 25, robot_center[1] + 10, robot_center[0] - 25, robot_center[1] - 10,
robot_center[0] - 15, robot_center[1] - 10, robot_center[0] - 15, robot_center[1] - 25,
robot_center[0] + 25, robot_center[1] - 25, robot_center[0] + 25, robot_center[1] + 25,
robot_center[0] - 15, robot_center[1] + 25, robot_center[0] - 15, robot_center[1] + 10
],
fill='blue'
)
robotCoords = self.canvas.coords(self.robot)
obstCoords = self.canvas.coords(self.obstacle)
dist = robotCoords[0] - obstCoords[2]
return dist
def render(self):
time.sleep(SLEEP_TIME)
self.update() |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.