text
stringlengths 27
775k
|
|---|
from datetime import datetime, timedelta
from functools import wraps
import logging
import icat.client
from icat.entities import getTypeMap
from icat.exception import (
ICATInternalError,
ICATNoObjectError,
ICATObjectExistsError,
ICATParameterError,
ICATSessionError,
ICATValidationError,
)
from datagateway_api.common.config import config
from datagateway_api.common.date_handler import DateHandler
from datagateway_api.common.exceptions import (
AuthenticationError,
BadRequestError,
MissingRecordError,
PythonICATError,
)
from datagateway_api.common.filter_order_handler import FilterOrderHandler
from datagateway_api.common.icat.filters import (
PythonICATLimitFilter,
PythonICATWhereFilter,
)
from datagateway_api.common.icat.query import ICATQuery
log = logging.getLogger()
def requires_session_id(method):
"""
Decorator for Python ICAT backend methods that looks out for session errors when
using the API. The API call runs and an ICATSessionError may be raised due to an
expired session, invalid session ID etc.
The session ID from the request is set here, so there is no requirement for a user
to use the login endpoint, they can go straight into using the API so long as they
have a valid session ID (be it created from this API, or from an alternative such as
scigateway-auth).
This assumes the session ID is the second argument of the function where this
decorator is applied, which is reasonable to assume considering the current method
signatures of all the endpoints.
:param method: The method for the backend operation
:raises AuthenticationError: If a valid session_id is not provided with the request
"""
@wraps(method)
def wrapper_requires_session(*args, **kwargs):
try:
client = create_client()
client.sessionId = args[1]
# Client object put into kwargs so it can be accessed by backend functions
kwargs["client"] = client
# Find out if session has expired
session_time = client.getRemainingMinutes()
log.info("Session time: %d", session_time)
if session_time < 0:
raise AuthenticationError("Forbidden")
else:
return method(*args, **kwargs)
except ICATSessionError:
raise AuthenticationError("Forbidden")
return wrapper_requires_session
def create_client():
client = icat.client.Client(
config.get_icat_url(), checkCert=config.get_icat_check_cert(),
)
return client
def get_session_details_helper(client):
"""
Retrieve details regarding the current session within `client`
:param client: ICAT client containing an authenticated user
:type client: :class:`icat.client.Client`
:return: Details of the user's session, ready to be converted into a JSON response
body
"""
session_time_remaining = client.getRemainingMinutes()
session_expiry_time = (
datetime.now() + timedelta(minutes=session_time_remaining)
).replace(microsecond=0)
username = client.getUserName()
return {
"id": client.sessionId,
"expireDateTime": DateHandler.datetime_object_to_str(session_expiry_time),
"username": username,
}
def logout_icat_client(client):
"""
Logout a user of the currently authenticated user within `client`
:param client: ICAT client containing an authenticated user
:type client: :class:`icat.client.Client`
"""
client.logout()
def refresh_client_session(client):
"""
Refresh the session of the currently authenticated user within `client`
:param client: ICAT client containing an authenticated user
:type client: :class:`icat.client.Client`
"""
client.refresh()
def get_icat_entity_name_as_camel_case(client, entity_name):
"""
From the entity name, this function returns a camelCase version of its input
Due to the case sensitivity of Python ICAT, a camelCase version of the entity name
is required for creating ICAT entities in ICAT (e.g. `client.new("parameterType")`).
:param client: ICAT client containing an authenticated user
:type client: :class:`icat.client.Client`
:param entity_name: Entity name to fetch a camelCase version of
:type entity_name: :class:`str`
:return: Entity name (of type string) in the correct casing ready to be passed into
Python ICAT
:raises BadRequestError: If the entity cannot be found
"""
entity_names = getTypeMap(client).keys()
lowercase_entity_name = entity_name.lower()
python_icat_entity_name = None
for entity_name in entity_names:
lowercase_name = entity_name.lower()
if lowercase_name == lowercase_entity_name:
python_icat_entity_name = entity_name
# Raise a 400 if a valid entity cannot be found
if python_icat_entity_name is None:
raise BadRequestError(
f"Bad request made, cannot find {entity_name} entity within Python ICAT",
)
return python_icat_entity_name
def update_attributes(old_entity, new_entity):
"""
Updates the attribute(s) of a given object which is a record of an entity from
Python ICAT
:param old_entity: An existing entity record from Python ICAT
:type object: :class:`icat.entities.ENTITY` (implementation of
:class:`icat.entity.Entity`)
:param new_entity: Dictionary containing the new data to be modified
:type new_entity: :class:`dict`
:raises BadRequestError: If the attribute cannot be found, or if it cannot be edited
- typically if Python ICAT doesn't allow an attribute to be edited (e.g. modId &
modTime)
"""
log.debug("Updating entity attributes: %s", list(new_entity.keys()))
for key in new_entity:
try:
original_data_attribute = getattr(old_entity, key)
if isinstance(original_data_attribute, datetime):
new_entity[key] = DateHandler.str_to_datetime_object(new_entity[key])
except AttributeError:
raise BadRequestError(
f"Bad request made, cannot find attribute '{key}' within the"
f" {old_entity.BeanName} entity",
)
try:
setattr(old_entity, key, new_entity[key])
except AttributeError:
raise BadRequestError(
f"Bad request made, cannot modify attribute '{key}' within the"
f" {old_entity.BeanName} entity",
)
return old_entity
def push_data_updates_to_icat(entity):
try:
entity.update()
except ICATInternalError as e:
raise PythonICATError(e)
except ICATValidationError as e:
raise BadRequestError(e)
def get_entity_by_id(
client,
entity_type,
id_,
return_json_formattable_data,
return_related_entities=False,
):
"""
Gets a record of a given ID from the specified entity
:param client: ICAT client containing an authenticated user
:type client: :class:`icat.client.Client`
:param entity_type: The type of entity requested to manipulate data with
:type entity_type: :class:`str`
:param id_: ID number of the entity to retrieve
:type id_: :class:`int`
:param return_json_formattable_data: Flag to determine whether the data should be
returned as a list of data ready to be converted straight to JSON (i.e. if the
data will be used as a response for an API call) or whether to leave the data in
a Python ICAT format
:type return_json_formattable_data: :class:`bool`
:param return_related_entities: Flag to determine whether related entities should
automatically be returned or not. Returning related entities used as a bug fix
for an `IcatException` where ICAT attempts to set a field to null because said
field hasn't been included in the updated data
:type return_related_entities: :class:`bool`
:return: The record of the specified ID from the given entity
:raises: MissingRecordError: If Python ICAT cannot find a record of the specified ID
"""
log.info("Getting %s of the ID %s", entity_type, id_)
log.debug("Return related entities set to: %s", return_related_entities)
# Set query condition for the selected ID
id_condition = PythonICATWhereFilter.create_condition("id", "=", id_)
includes_value = "1" if return_related_entities else None
id_query = ICATQuery(
client, entity_type, conditions=id_condition, includes=includes_value,
)
entity_by_id_data = id_query.execute_query(client, return_json_formattable_data)
if not entity_by_id_data:
# Cannot find any data matching the given ID
raise MissingRecordError("No result found")
else:
return entity_by_id_data[0]
def delete_entity_by_id(client, entity_type, id_):
"""
Deletes a record of a given ID of the specified entity
:param client: ICAT client containing an authenticated user
:type client: :class:`icat.client.Client`
:param entity_type: The type of entity requested to manipulate data with
:type entity_type: :class:`str`
:param id_: ID number of the entity to delete
:type id_: :class:`int`
"""
log.info("Deleting %s of ID %s", entity_type, id_)
entity_id_data = get_entity_by_id(client, entity_type, id_, False)
client.delete(entity_id_data)
def update_entity_by_id(client, entity_type, id_, new_data):
"""
Gets a record of a given ID of the specified entity
:param client: ICAT client containing an authenticated user
:type client: :class:`icat.client.Client`
:param entity_type: The type of entity requested to manipulate data with
:type entity_type: :class:`str`
:param id_: ID number of the entity to retrieve
:type id_: :class:`int`
:param new_data: JSON from request body providing new data to update the record with
the specified ID
:return: The updated record of the specified ID from the given entity
"""
log.info("Updating %s of ID %s", entity_type, id_)
entity_id_data = get_entity_by_id(
client, entity_type, id_, False, return_related_entities=True,
)
# There will only ever be one record associated with a single ID - if a record with
# the specified ID cannot be found, it'll be picked up by the MissingRecordError in
# get_entity_by_id()
updated_icat_entity = update_attributes(entity_id_data, new_data)
push_data_updates_to_icat(updated_icat_entity)
# The record is re-obtained from Python ICAT (rather than using entity_id_data) to
# show to the user whether the change has actually been applied
return get_entity_by_id(client, entity_type, id_, True)
def get_entity_with_filters(client, entity_type, filters):
"""
Gets all the records of a given entity, based on the filters provided in the request
:param client: ICAT client containing an authenticated user
:type client: :class:`icat.client.Client`
:param entity_type: The type of entity requested to manipulate data with
:type entity_type: :class:`str`
:param filters: The list of filters to be applied to the request
:type filters: List of specific implementations :class:`QueryFilter`
:return: The list of records of the given entity, using the filters to restrict the
result of the query
"""
log.info("Getting entity using request's filters")
query = ICATQuery(client, entity_type)
filter_handler = FilterOrderHandler()
filter_handler.manage_icat_filters(filters, query.query)
data = query.execute_query(client, True)
return data
def get_count_with_filters(client, entity_type, filters):
"""
Get the number of results of a given entity, based on the filters provided in the
request. This acts very much like `get_entity_with_filters()` but returns the number
of results, as opposed to a JSON object of data.
:param client: ICAT client containing an authenticated user
:type client: :class:`icat.client.Client`
:param entity_type: The type of entity requested to manipulate data with
:type entity_type: :class:`str`
:param filters: The list of filters to be applied to the request
:type filters: List of specific implementations :class:`QueryFilter`
:return: The number of records of the given entity (of type integer), using the
filters to restrict the result of the query
"""
log.info(
"Getting the number of results of %s, also using the request's filters",
entity_type,
)
query = ICATQuery(client, entity_type, aggregate="COUNT")
filter_handler = FilterOrderHandler()
filter_handler.manage_icat_filters(filters, query.query)
data = query.execute_query(client, True)
# Only ever 1 element in a count query result
return data[0]
def get_first_result_with_filters(client, entity_type, filters):
"""
Using filters in the request, get results of the given entity, but only show the
first one to the user
Since only one result will be outputted, inserting a `PythonICATLimitFilter` in the
query will make Python ICAT's data fetching more snappy and prevent a 500 being
caused by trying to fetch over the number of records limited by ICAT (currently
10000).
:param client: ICAT client containing an authenticated user
:type client: :class:`icat.client.Client`
:param entity_type: The type of entity requested to manipulate data with
:type entity_type: :class:`str`
:param filters: The list of filters to be applied to the request
:type filters: List of specific implementations :class:`QueryFilter`
:return: The first record of the given entity, using the filters to restrict the
result of the query
"""
log.info(
"Getting only first result of %s, making use of filters in request",
entity_type,
)
limit_filter = PythonICATLimitFilter(1)
filters.append(limit_filter)
entity_data = get_entity_with_filters(client, entity_type, filters)
if not entity_data:
raise MissingRecordError("No results found")
else:
return entity_data[0]
def update_entities(client, entity_type, data_to_update):
"""
Update one or more results for the given entity using the JSON provided in
`data_to_update`
If an exception occurs while sending data to icatdb, an attempt will be made to
restore a backup of the data made before making the update.
:param client: ICAT client containing an authenticated user
:type client: :class:`icat.client.Client`
:param entity_type: The type of entity requested to manipulate data with
:type entity_type: :class:`str`
:param data_to_update: The data that to be updated in ICAT
:type data_to_update: :class:`list` or :class:`dict`
:return: The updated record(s) of the given entity
"""
log.info("Updating certain results in %s", entity_type)
updated_data = []
if not isinstance(data_to_update, list):
data_to_update = [data_to_update]
icat_data_backup = []
updated_icat_data = []
for entity_request in data_to_update:
try:
entity_data = get_entity_by_id(
client,
entity_type,
entity_request["id"],
False,
return_related_entities=True,
)
icat_data_backup.append(entity_data.copy())
updated_entity_data = update_attributes(entity_data, entity_request)
updated_icat_data.append(updated_entity_data)
except KeyError:
raise BadRequestError(
"The new data in the request body must contain the ID (using the key:"
" 'id') of the entity you wish to update",
)
# This separates the local data updates from pushing these updates to icatdb
for updated_icat_entity in updated_icat_data:
try:
updated_icat_entity.update()
except (ICATValidationError, ICATInternalError) as e:
# Use `icat_data_backup` to restore data trying to updated to the state
# before this request
for icat_entity_backup in icat_data_backup:
try:
icat_entity_backup.update()
except (ICATValidationError, ICATInternalError) as e:
# If an error occurs while trying to restore backup data, just throw
# a 500 immediately
raise PythonICATError(e)
raise PythonICATError(e)
updated_data.append(
get_entity_by_id(client, entity_type, updated_icat_entity.id, True),
)
return updated_data
def create_entities(client, entity_type, data):
"""
Add one or more results for the given entity using the JSON provided in `data`
`created_icat_data` is data of `icat.entity.Entity` type that is collated to be
pushed to ICAT at the end of the function - this avoids confusion over which data
has/hasn't been created if the request returns an error. When pushing the data to
ICAT, there is still risk an exception might be caught, so any entities already
pushed to ICAT will be deleted. Python ICAT doesn't support a database rollback (or
the concept of transactions) so this is a good alternative.
:param client: ICAT client containing an authenticated user
:type client: :class:`icat.client.Client`
:param entity_type: The type of entity requested to manipulate data with
:type entity_type: :class:`str`
:param data: The data that needs to be created in ICAT
:type data_to_update: :class:`list` or :class:`dict`
:return: The created record(s) of the given entity
"""
log.info("Creating ICAT data for %s", entity_type)
created_data = []
created_icat_data = []
if not isinstance(data, list):
data = [data]
for result in data:
new_entity = client.new(get_icat_entity_name_as_camel_case(client, entity_type))
for attribute_name, value in result.items():
log.debug("Preparing data for %s", attribute_name)
try:
entity_info = new_entity.getAttrInfo(client, attribute_name)
if entity_info.relType.lower() == "attribute":
# Short circuiting ensures is_str_date() will only be executed if
# value is a string
if isinstance(value, str) and DateHandler.is_str_a_date(value):
value = DateHandler.str_to_datetime_object(value)
setattr(new_entity, attribute_name, value)
else:
# This means the attribute has a relationship with another object
try:
related_object = client.get(entity_info.type, value)
except ICATNoObjectError as e:
raise BadRequestError(e)
if entity_info.relType.lower() == "many":
related_object = [related_object]
setattr(new_entity, attribute_name, related_object)
except ValueError as e:
raise BadRequestError(e)
created_icat_data.append(new_entity)
for entity in created_icat_data:
try:
entity.create()
except ICATInternalError as e:
for entity_json in created_data:
# Delete any data that has been pushed to ICAT before the exception
delete_entity_by_id(client, entity_type, entity_json["id"])
raise PythonICATError(e)
except (ICATObjectExistsError, ICATParameterError, ICATValidationError) as e:
for entity_json in created_data:
delete_entity_by_id(client, entity_type, entity_json["id"])
raise BadRequestError(e)
created_data.append(get_entity_by_id(client, entity_type, entity.id, True))
return created_data
def get_facility_cycles_for_instrument(
client, instrument_id, filters, count_query=False,
):
"""
Given an Instrument ID, get the Facility Cycles where there are Instruments that
have investigations occurring within that cycle
:param client: ICAT client containing an authenticated user
:type client: :class:`icat.client.Client`
:param instrument_id: ID of the instrument from the request
:type instrument_id: :class:`int`
:param filters: The list of filters to be applied to the request
:type filters: List of specific implementations :class:`QueryFilter`
:param count_query: Flag to determine if the query in this function should be used
as a count query. Used for `get_facility_cycles_for_instrument_count()`
:type count_query: :class:`bool`
:return: A list of Facility Cycles that match the query
"""
log.info("Getting a list of facility cycles from the specified instrument for ISIS")
query_aggregate = "COUNT:DISTINCT" if count_query else "DISTINCT"
query = ICATQuery(client, "FacilityCycle", aggregate=query_aggregate)
instrument_id_check = PythonICATWhereFilter(
"facility.instruments.id", instrument_id, "eq",
)
investigation_instrument_id_check = PythonICATWhereFilter(
"facility.investigations.investigationInstruments.instrument.id",
instrument_id,
"eq",
)
investigation_start_date_check = PythonICATWhereFilter(
"facility.investigations.startDate", "o.startDate", "gte",
)
investigation_end_date_check = PythonICATWhereFilter(
"facility.investigations.startDate", "o.endDate", "lte",
)
facility_cycle_filters = [
instrument_id_check,
investigation_instrument_id_check,
investigation_start_date_check,
investigation_end_date_check,
]
filters.extend(facility_cycle_filters)
filter_handler = FilterOrderHandler()
filter_handler.manage_icat_filters(filters, query.query)
data = query.execute_query(client, True)
return data
def get_facility_cycles_for_instrument_count(client, instrument_id, filters):
"""
Given an Instrument ID, get the number of Facility Cycles where there's Instruments
that have investigations occurring within that cycle
:param client: ICAT client containing an authenticated user
:type client: :class:`icat.client.Client`
:param instrument_id: ID of the instrument from the request
:type instrument_id: :class:`int`
:param filters: The list of filters to be applied to the request
:type filters: List of specific implementations :class:`QueryFilter`
:return: The number of Facility Cycles that match the query
"""
log.info(
"Getting the number of facility cycles from the specified instrument for ISIS",
)
return get_facility_cycles_for_instrument(
client, instrument_id, filters, count_query=True,
)[0]
def get_investigations_for_instrument_in_facility_cycle(
client, instrument_id, facilitycycle_id, filters, count_query=False,
):
"""
Given Instrument and Facility Cycle IDs, get investigations that use the given
instrument in the given cycle
:param client: ICAT client containing an authenticated user
:type client: :class:`icat.client.Client`
:param instrument_id: ID of the instrument from the request
:type instrument_id: :class:`int`
:param facilitycycle_id: ID of the facilityCycle from the request
:type facilitycycle_id: :class:`int`
:param filters: The list of filters to be applied to the request
:type filters: List of specific implementations :class:`QueryFilter`
:param count_query: Flag to determine if the query in this function should be used
as a count query. Used for
`get_investigations_for_instrument_in_facility_cycle_count()`
:type count_query: :class:`bool`
:return: A list of Investigations that match the query
"""
log.info(
"Getting a list of investigations from the specified instrument and facility"
" cycle, for ISIS",
)
query_aggregate = "COUNT:DISTINCT" if count_query else "DISTINCT"
query = ICATQuery(client, "Investigation", aggregate=query_aggregate)
instrument_id_check = PythonICATWhereFilter(
"facility.instruments.id", instrument_id, "eq",
)
investigation_instrument_id_check = PythonICATWhereFilter(
"investigationInstruments.instrument.id", instrument_id, "eq",
)
facility_cycle_id_check = PythonICATWhereFilter(
"facility.facilityCycles.id", facilitycycle_id, "eq",
)
facility_cycle_start_date_check = PythonICATWhereFilter(
"facility.facilityCycles.startDate", "o.startDate", "lte",
)
facility_cycle_end_date_check = PythonICATWhereFilter(
"facility.facilityCycles.endDate", "o.startDate", "gte",
)
required_filters = [
instrument_id_check,
investigation_instrument_id_check,
facility_cycle_id_check,
facility_cycle_start_date_check,
facility_cycle_end_date_check,
]
filters.extend(required_filters)
filter_handler = FilterOrderHandler()
filter_handler.manage_icat_filters(filters, query.query)
data = query.execute_query(client, True)
return data
def get_investigations_for_instrument_in_facility_cycle_count(
client, instrument_id, facilitycycle_id, filters,
):
"""
Given Instrument and Facility Cycle IDs, get the number of investigations that use
the given instrument in the given cycle
:param client: ICAT client containing an authenticated user
:type client: :class:`icat.client.Client`
:param instrument_id: ID of the instrument from the request
:type instrument_id: :class:`int`
:param facilitycycle_id: ID of the facilityCycle from the request
:type facilitycycle_id: :class:`int`
:param filters: The list of filters to be applied to the request
:type filters: List of specific implementations :class:`QueryFilter`
:return: The number of Investigations that match the query
"""
log.info(
"Getting the number of investigations from the specified instrument and"
" facility cycle, for ISIS",
)
return get_investigations_for_instrument_in_facility_cycle(
client, instrument_id, facilitycycle_id, filters, count_query=True,
)[0]
|
#!/usr/bin/ruby
require 'fileutils'
def usage
puts "usage: #{File.basename $0} <destination-to-update>"
puts
puts "<destination-to-update> values:"
puts
puts " Tools - Copy the UserInterface files to the Tools directory"
puts " UserInterface - Copy the Tools files to the UserInterface directory"
exit 1
end
if ARGV.size != 1
usage
end
destination = ARGV[0]
if destination != "Tools" && destination != "UserInterface"
usage
end
# Copy the formatter and CodeMirror files from UserInterface to Tools.
USER_INTERFACE_TO_TOOLS_MAP = {
"UserInterface/CodeMirrorFormatters.js" => "Tools/PrettyPrinting/CodeMirrorFormatters.js",
"UserInterface/Formatter.js" => "Tools/PrettyPrinting/Formatter.js",
"UserInterface/FormatterContentBuilder.js" => "Tools/PrettyPrinting/FormatterContentBuilder.js",
"UserInterface/External/CodeMirror/codemirror.css" => "Tools/PrettyPrinting/codemirror.css",
"UserInterface/External/CodeMirror/codemirror.js" => "Tools/PrettyPrinting/codemirror.js",
"UserInterface/External/CodeMirror/javascript.js" => "Tools/PrettyPrinting/javascript.js",
"UserInterface/External/CodeMirror/css.js" => "Tools/PrettyPrinting/css.js",
}
# Copy only the formatter files from Tools to UserInterface.
TOOLS_TO_USER_INTERFACE_MAP = {
"Tools/PrettyPrinting/CodeMirrorFormatters.js" => "UserInterface/CodeMirrorFormatters.js",
"Tools/PrettyPrinting/Formatter.js" => "UserInterface/Formatter.js",
"Tools/PrettyPrinting/FormatterContentBuilder.js" => "UserInterface/FormatterContentBuilder.js"
}
web_inspector_path = File.expand_path File.join(File.dirname(__FILE__), "..")
map = destination == "Tools" ? USER_INTERFACE_TO_TOOLS_MAP : TOOLS_TO_USER_INTERFACE_MAP
all_success = true
map.each do |from, to|
from_path = File.join web_inspector_path, from
to_path = File.join web_inspector_path, to
begin
puts "Copying #{from} to #{to}..."
FileUtils.cp from_path, to_path
rescue Exception => e
puts "WARNING: #{e}"
all_success = false
end
end
exit all_success ? 0 : 1
|
<?php
namespace App\Http\Controllers;
use Tightenco\Ziggy\Ziggy;
class ZiggyController
{
public function index()
{
return response()->json(new Ziggy());
}
}
|
```jsx
import React from 'react';
import { AmplifyAuthenticator, AmplifySignUp, AmplifySignIn } from '@aws-amplify/ui-react';
const App = () => {
return (
<AmplifyAuthenticator usernameAlias="email">
<AmplifySignUp
slot="sign-up"
usernameAlias="email"
formFields={[
{
type: "email",
label: "Custom email Label",
placeholder: "custom email placeholder",
required: true,
},
{
type: "password",
label: "Custom Password Label",
placeholder: "custom password placeholder",
required: true,
},
{
type: "phone_number",
label: "Custom Phone Label",
placeholder: "custom Phone placeholder",
required: false,
},
]}
/>
<AmplifySignIn slot="sign-in" usernameAlias="email" />
</AmplifyAuthenticator>
);
};
```
|
#include <QApplication>
#include <QQmlApplicationEngine>
#include <QQuickWindow>
#include <QQmlContext>
#include <QtQml>
#include "ganalytics.h"
int main(int argc, char* argv[])
{
QApplication::setApplicationName("QtQuick-App");
QApplication::setApplicationVersion("0.1");
QApplication app(argc, argv);
qmlRegisterType<GAnalytics>("analytics", 0, 1, "Tracker");
QQmlApplicationEngine engine(QUrl("qrc:/qml/MainWindow.qml"));
return app.exec();
}
|
package com.shibuiwilliam.firebase_tflite_arcore
import android.Manifest
import android.content.pm.PackageManager
import android.graphics.Bitmap
import android.graphics.BitmapFactory
import android.graphics.Matrix
import android.graphics.Rect
import androidx.appcompat.app.AppCompatActivity
import android.os.Bundle
import android.os.Handler
import android.os.HandlerThread
import android.util.Log
import android.util.Rational
import android.util.Size
import android.view.Gravity
import android.view.Surface
import android.view.TextureView
import android.view.ViewGroup
import androidx.camera.core.*
import androidx.core.app.ActivityCompat
import androidx.core.content.ContextCompat
import android.view.View
import com.shibuiwilliam.firebase_tflite_arcore.common.*
import java.util.*
import android.os.Environment
import android.widget.*
import java.io.File
class SegmentationActivity : AppCompatActivity() {
private val TAG = "SegmentationActivity"
private val REQUEST_CODE_PERMISSIONS = 10
private val REQUIRED_PERMISSIONS = arrayOf(
Manifest.permission.CAMERA,
Manifest.permission.INTERNET)
private var globals: Globals? = null
private lateinit var cameraTextureView: TextureView
private lateinit var segmentationView: SegmentationView
private lateinit var alphaSpinner: Spinner
private lateinit var colorSpinner: Spinner
private lateinit var porterduffSpinner: Spinner
private var alpha = 0
private var color = ""
private var porterDuff = ""
private var facingCameraX = Constants.FACING_CAMERAX
private var analyzerThread = HandlerThread("AnalysisThread")
override fun onCreate(savedInstanceState: Bundle?) {
super.onCreate(savedInstanceState)
if (!allPermissionsGranted()) {
ActivityCompat.requestPermissions(
this,
REQUIRED_PERMISSIONS,
REQUEST_CODE_PERMISSIONS
)
return
}
setContentView(R.layout.activity_segmentation)
initializeGlobals()
cameraTextureView = findViewById(R.id.cameraTextureView)
segmentationView = findViewById(R.id.segmentation_view)
segmentationView.makeVisible()
segmentationView.displayBitmap = BitmapFactory.decodeResource(resources, R.drawable.saturn)
configureSpinner()
cameraTextureView.post { startCameraX() }
cameraTextureView.addOnLayoutChangeListener { _, _, _, _, _, _, _, _, _ ->
updateTransform()
}
}
private fun initializeGlobals(){
if (globals == null){
globals = application as Globals
globals!!.initialize(this)
}
}
private fun startCameraX() {
CameraX.unbindAll()
val screenSize = Size(cameraTextureView.width, cameraTextureView.height)
val screenAspectRatio = Rational(1, 1)
Log.i(TAG, "Screen size: (${screenSize.width}, ${screenSize.height}).")
val previewConfig = buildPreviewConfig(screenSize,
screenAspectRatio)
val preview = Preview(previewConfig)
preview.setOnPreviewOutputUpdateListener {
val parent = cameraTextureView.parent as ViewGroup
parent.removeView(cameraTextureView)
cameraTextureView.surfaceTexture = it.surfaceTexture
parent.addView(cameraTextureView, 0)
updateTransform()
}
val analyzerConfig = buildAnalyzerConfig()
val imageAnalysis = ImageAnalysis(analyzerConfig)
imageAnalysis.analyzer = ImageAnalysis.Analyzer {
image: ImageProxy, rotationDegrees: Int ->
segmentAwait(image)
}
CameraX.bindToLifecycle(this, preview, imageAnalysis)
}
private fun updateTransform() {
val matrix = Matrix()
val centerX = cameraTextureView.width / 2f
val centerY = cameraTextureView.height / 2f
val rotationDegrees = when (cameraTextureView.display.rotation) {
Surface.ROTATION_0 -> 0
Surface.ROTATION_90 -> 90
Surface.ROTATION_180 -> 180
Surface.ROTATION_270 -> 270
else -> return
}
matrix.postRotate(-rotationDegrees.toFloat(), centerX, centerY)
cameraTextureView.setTransform(matrix)
}
private fun configureSpinner(){
alphaSpinner = findViewById(R.id.alpha_spinner)
val alphaAdapter = ArrayAdapter(
applicationContext,
android.R.layout.simple_spinner_item,
Constants.IMAGE_SEGMENTATION_ALPHA_ARRAY
)
alphaAdapter.setDropDownViewResource(android.R.layout.simple_spinner_dropdown_item)
alphaSpinner.adapter = alphaAdapter
alpha = Constants.IMAGE_SEGMENTATION_ALPHA
alphaSpinner.onItemSelectedListener = object : AdapterView.OnItemSelectedListener{
override fun onItemSelected(parent: AdapterView<*>?,
view: View?,
position: Int,
id: Long) {
val spinnerParent = parent as Spinner
alpha = spinnerParent.selectedItem as Int
globals!!.imageSegmentation!!.setAlpha(alpha)
Log.i(TAG, "Selected alpha ${alpha}")
}
override fun onNothingSelected(parent: AdapterView<*>?) {
alpha = Constants.IMAGE_SEGMENTATION_ALPHA
}
}
colorSpinner = findViewById(R.id.color_spinner)
val colorAdapter = ArrayAdapter(
applicationContext,
android.R.layout.simple_spinner_item,
Constants.IMAGE_SEGMENTATION_COLOR_ARRAY
)
colorAdapter.setDropDownViewResource(android.R.layout.simple_spinner_dropdown_item)
colorSpinner.adapter = colorAdapter
color = Constants.IMAGE_SEGMENTATION_COLOR
colorSpinner.onItemSelectedListener = object : AdapterView.OnItemSelectedListener{
override fun onItemSelected(parent: AdapterView<*>?,
view: View?,
position: Int,
id: Long) {
val spinnerParent = parent as Spinner
color = spinnerParent.selectedItem as String
globals!!.imageSegmentation!!.setColor(color)
Log.i(TAG, "Selected color config ${color}")
}
override fun onNothingSelected(parent: AdapterView<*>?) {
color = Constants.IMAGE_SEGMENTATION_COLOR
}
}
porterduffSpinner = findViewById(R.id.porterduff_spinner)
val porterduffAdapter = ArrayAdapter(
applicationContext,
android.R.layout.simple_spinner_item,
Constants.IMAGE_SEGMENTATION_PORTERDUFF_ARRAY
)
porterduffAdapter.setDropDownViewResource(android.R.layout.simple_spinner_dropdown_item)
porterduffSpinner.adapter = porterduffAdapter
porterDuff = Constants.IMAGE_SEGMENTATION_PORTERDUFF
porterduffSpinner.onItemSelectedListener = object : AdapterView.OnItemSelectedListener{
override fun onItemSelected(parent: AdapterView<*>?,
view: View?,
position: Int,
id: Long) {
val spinnerParent = parent as Spinner
porterDuff = spinnerParent.selectedItem as String
globals!!.imageSegmentation!!.setPorterDuff(porterDuff)
Log.i(TAG, "Selected porterduff mode ${porterDuff}")
}
override fun onNothingSelected(parent: AdapterView<*>?) {
porterDuff = Constants.IMAGE_SEGMENTATION_PORTERDUFF
}
}
}
private fun buildPreviewConfig(screenSize: Size,
screenAspectRatio: Rational): PreviewConfig {
return PreviewConfig
.Builder()
.apply {
setLensFacing(facingCameraX)
setTargetResolution(screenSize)
setTargetAspectRatio(screenAspectRatio)
setTargetRotation(windowManager.defaultDisplay.rotation)
setTargetRotation(cameraTextureView.display.rotation)
}.build()
}
private fun buildAnalyzerConfig(): ImageAnalysisConfig {
return ImageAnalysisConfig.Builder().apply {
analyzerThread.start()
setCallbackHandler(Handler(analyzerThread.looper))
setImageReaderMode(ImageAnalysis.ImageReaderMode.ACQUIRE_LATEST_IMAGE)
}.build()
}
fun segmentAwait(image: ImageProxy){
if (globals!!.imageSegmentation == null){
return
}
val inputBitmap = ImageUtils.imageToBitmap(image)
Log.i(TAG, "input size: ${inputBitmap.width}, ${inputBitmap.height}")
val scaledInputBitmap = Bitmap.createScaledBitmap(inputBitmap,
Constants.IMAGE_SEGMENTATION_DIM_SIZE,
Constants.IMAGE_SEGMENTATION_DIM_SIZE,
true)
val segmented = globals!!
.imageSegmentation!!
.segmentAwait(scaledInputBitmap)
if(segmented == null){
return
}
val results = globals!!
.imageSegmentation!!
.extractSegmentation(segmented)
val segmentedBitmap = globals!!
.imageSegmentation!!
.postProcess(results)
val output = globals!!
.imageSegmentation!!
.maskWithSegmentation(inputBitmap,
segmentedBitmap)
val imageBitmap = ImageUtils.scaleBitmapWithRotation(output,
segmentationView)
segmentationView.set(imageBitmap)
output.recycle()
segmentedBitmap.recycle()
scaledInputBitmap.recycle()
inputBitmap.recycle()
}
override fun onRequestPermissionsResult(
requestCode: Int,
permissions: Array<String>,
grantResults: IntArray) {
if (requestCode == REQUEST_CODE_PERMISSIONS) {
if (!allPermissionsGranted()) {
Utils.logAndToast(this,
TAG,
"Permissions not granted by the user.",
"e",
Toast.LENGTH_SHORT,
Gravity.TOP)
finish()
}
}
}
private fun allPermissionsGranted() = REQUIRED_PERMISSIONS.all {
for (permission in REQUIRED_PERMISSIONS) {
if (ContextCompat.checkSelfPermission(
this, permission) != PackageManager.PERMISSION_GRANTED) {
Utils.logAndToast(this,
TAG,
"Permissions not granted by the user.",
"e",
Toast.LENGTH_SHORT,
Gravity.TOP)
return false
}
}
Log.i(TAG, "Permitted to use camera")
return true
}
override fun onStop() {
super.onStop()
analyzerThread.interrupt()
}
override fun onDestroy() {
super.onDestroy()
analyzerThread.quitSafely()
}
}
|
import 'package:asset_repository/asset_repository.dart';
import 'package:flutter/material.dart';
import 'package:polist/detail_page.dart';
class HomeItem extends StatelessWidget {
HomeItem({Key key, @required Asset asset})
: assert(asset != null),
_asset = asset,
super(key: key);
final Asset _asset;
@override
Widget build(BuildContext context) {
return Card(
child: InkWell(
onTap: () {
Navigator.of(context).push(DetailPage.route(_asset));
},
child: Container(
padding: EdgeInsets.fromLTRB(16, 8, 16, 8),
child: Row(
children: [
ClipRRect(
borderRadius: BorderRadius.circular(8),
child: Image.network(
_asset.thumbnailUrl,
fit: BoxFit.fitHeight,
width: 56,
height: 56,
),
),
Expanded(
child: ListTile(
title: Text(_asset.displayName),
subtitle: Text(_asset.description ?? ""),
),
),
],
),
),
),
);
}
}
|
// Copyright by Barry G. Becker, 2017. Licensed under MIT License: http://www.opensource.org/licenses/MIT
package com.barrybecker4.simulation.verhulst
/**
* Everything we need to know about a population of creatures.
* @author Barry Becker
*/
abstract class Population() {
var birthRate = .0
private var population = .0
reset()
def reset(): Unit = {
population = getInitialPopulation
birthRate = getInitialBirthRate
}
def getName: String
def getPopulation: Double = population
def setPopulation(value: Double): Unit = { population = value}
def getInitialPopulation: Double
def getInitialBirthRate: Double
def getMaxBirthRate = 3.0
}
|
#!/usr/bin/env bash
python train_parse_generator.py \
--model_dir ./model \
--output_dir ./output_pg \
--dictionary_path ./data/dictionary.pkl \
--train_data_path ./data/train_data.h5 \
--valid_data_path ./data/valid_data.h5 \
--max_sent_len 40 \
--max_tmpl_len 100 \
--max_synt_len 160 \
--word_dropout 0.2 \
--n_epoch 5 \
--batch_size 32 \
--lr 1e-4 \
--weight_decay 1e-5 \
--log_interval 250 \
--gen_interval 5000 \
--save_interval 10000 \
--temp 0.5 \
--seed 0
|
import { getSession, withApiAuthRequired } from "@auth0/nextjs-auth0";
import { addUserUserGoal } from "../../utils/fauna";
export default withApiAuthRequired(async function handler(req, res) {
const { user } = getSession(req, res);
if (req.method !== "POST") {
return res
.status(405)
.json({ error: { message: "HTTP Method not allowed" } });
}
const userGoal = await addUserUserGoal(user.sub, req.body);
return res.status(200).json(userGoal);
});
|
#!/usr/bin/env ruby
# encoding: utf-8
# frozen_string_literal: true
require 'spec_helper'
describe CloudPayments::Namespaces::Orders do
subject{ described_class.new(CloudPayments.client) }
describe '#create' do
let(:attributes) do
{
amount: 10.0,
currency: 'RUB',
description: 'Оплата на сайте example.com',
email: 'client@test.local',
require_confirmation: true,
send_email: false,
invoice_id: 'invoice_100',
account_id: 'account_200',
phone: '+7(495)765-4321',
send_sms: false,
send_whats_app: false
}
end
context do
before{ stub_api_request('orders/create/successful').perform }
specify{ expect(subject.create(attributes)).to be_instance_of(CloudPayments::Order) }
context do
let(:sub){ subject.create(attributes) }
specify{ expect(sub.id).to eq('f2K8LV6reGE9WBFn') }
specify{ expect(sub.amount).to eq(10.0) }
specify{ expect(sub.currency).to eq('RUB') }
specify{ expect(sub.currency_code).to eq(0) }
specify{ expect(sub.email).to eq('client@test.local') }
specify{ expect(sub.description).to eq('Оплата на сайте example.com') }
specify{ expect(sub.require_confirmation).to eq(true) }
specify{ expect(sub.url).to eq('https://orders.cloudpayments.ru/d/f2K8LV6reGE9WBFn') }
end
end
end
end
|
module I18n
# Implemented to support method call on translation keys
INTERPOLATION_WITH_METHOD_PATTERN = Regexp.union(
/%%/,
/%\{(\w+)\}/,
/%<(\w+)>(.*?\d*\.?\d*[bBdiouxXeEfgGcps])/,
/%\{(\w+)\.(\w+)\}/
)
class << self
def interpolate_hash(string, values)
string.gsub(INTERPOLATION_WITH_METHOD_PATTERN) do |match|
if match == '%%'
'%'
else
@last_match = Regexp.last_match
check_value_valid(string, values)
end
end
end
def check_value_valid(string, values)
@key = (@last_match[1] || @last_match[2] || @last_match[4]).to_sym
if values.key?(@key)
value = values[@key]
value = value.call(values) if value.respond_to?(:call)
build_value(value)
else
raise(MissingInterpolationArgument.new(values, string, @key))
end
end
def build_value(value)
if @last_match[3]
sprintf("%#{@last_match[3]}", value)
elsif @last_match[5]
value.send(@last_match[5])
else
value
end
end
end
end
|
require 'spec_helper'
describe Typhoeus do
before(:each) do
Typhoeus.configure { |config| config.verbose = false; config.block_connection = false }
end
describe ".configure" do
it "yields config" do
Typhoeus.configure do |config|
expect(config).to be_a(Typhoeus::Config)
end
end
it "sets values config" do
Typhoeus::Config.verbose = true
expect(Typhoeus::Config.verbose).to be_true
end
end
describe ".stub" do
let(:base_url) { "www.example.com" }
shared_examples "lazy response construction" do
it "calls the block to construct a response when a request matches the stub" do
expected_response = Typhoeus::Response.new
Typhoeus.stub(base_url) do |request|
expected_response
end
response = Typhoeus.get(base_url)
expect(response).to be(expected_response)
end
end
context "when no similar expectation exists" do
include_examples "lazy response construction"
it "returns expectation" do
expect(Typhoeus.stub(base_url)).to be_a(Typhoeus::Expectation)
end
it "adds expectation" do
Typhoeus.stub(:get, "")
expect(Typhoeus::Expectation.all).to have(1).item
end
end
context "when similar expectation exists" do
include_examples "lazy response construction"
let(:expectation) { Typhoeus::Expectation.new(base_url) }
before { Typhoeus::Expectation.all << expectation }
it "returns expectation" do
expect(Typhoeus.stub(base_url)).to be_a(Typhoeus::Expectation)
end
it "doesn't add expectation" do
Typhoeus.stub(base_url)
expect(Typhoeus::Expectation.all).to have(1).item
end
end
end
describe ".before" do
it "adds callback" do
Typhoeus.before { true }
expect(Typhoeus.before).to have(1).item
end
end
describe ".with_connection" do
it "executes block with block connection is false" do
Typhoeus.with_connection { expect(Typhoeus::Config.block_connection).to be(false) }
end
it "sets block connection back to previous value" do
Typhoeus::Config.block_connection = true
Typhoeus.with_connection {}
expect(Typhoeus::Config.block_connection).to be(true)
end
it "returns result of block" do
expect(Typhoeus.with_connection { "123" }).to eq("123")
end
end
[:get, :post, :put, :delete, :head, :patch, :options].each do |name|
describe ".#{name}" do
let(:response) { Typhoeus::Request.method(name).call("http://localhost:3001") }
it "returns ok" do
expect(response.return_code).to eq(:ok)
end
unless name == :head
it "makes #{name.to_s.upcase} requests" do
expect(response.response_body).to include("\"REQUEST_METHOD\":\"#{name.to_s.upcase}\"")
end
end
end
end
end
|
// ----------------------------------------------------------------------------------
//
// Copyright Microsoft Corporation
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// ----------------------------------------------------------------------------------
namespace Microsoft.Azure.Commands.Common.Compute.Tests
{
using Compute.Version2016_04_preview;
using Microsoft.WindowsAzure.Commands.ScenarioTest;
using Xunit;
using System.Linq;
namespace Version2016_04_preview
{
public class ComputeManagementClientShould
{
private IComputeManagementClient Client {get;}
public ComputeManagementClientShould()
{
var credManager = CredentialManager.FromServicePrincipalEnvVariable();
Client = new ComputeManagementClient(credManager.TokenCredentials)
{
SubscriptionId = credManager.SubscriptionId
};
}
[Fact]
[Trait(Category.RunType, Category.LiveOnly)]
public void ListVirtualMachine()
{
var vmClient = Client.VirtualMachines;
var vms = vmClient.ListAll().ToList();
Assert.True(vms.Count > 0);
}
[Fact]
[Trait(Category.RunType, Category.LiveOnly)]
public void ListVirtualMachineSizes()
{
var vmSizeClient = Client.VirtualMachineSizes;
var vmSizes = vmSizeClient.List("WestUs").ToList();
Assert.True(vmSizes.Count > 0);
}
[Fact]
[Trait(Category.RunType, Category.LiveOnly)]
public void ListVirtualMachineImagePublishers()
{
var vmImagesClient = Client.VirtualMachineImages;
var vmImagePublisers = vmImagesClient.ListPublishers("WestUs").ToList();
Assert.True(vmImagePublisers.Count > 0);
}
[Fact]
[Trait(Category.RunType, Category.LiveOnly)]
public void ListDisks()
{
var disksClient = Client.Disks;
var disks = disksClient.List().ToList();
Assert.True(disks.Count > 0);
}
}
}
}
|
<!-- Do not edit this file. It is automatically generated by API Documenter. -->
[Home](./index.md) > [bullmq](./bullmq.md) > [JobsOptions](./bullmq.jobsoptions.md) > [priority](./bullmq.jobsoptions.priority.md)
## JobsOptions.priority property
Ranges from 1 (highest priority) to MAX\_INT (lowest priority). Note that using priorities has a slight impact on performance, so do not use it if not required.
<b>Signature:</b>
```typescript
priority?: number;
```
|
package com.telenor.possumlib.abstractservices;
import android.content.Intent;
import com.telenor.possumlib.functionality.AmazonFunctionality;
import com.telenor.possumlib.interfaces.IAmazonIdentityConfirmed;
/**
* Service dealing with amazon's S3 service
*/
public abstract class AbstractAmazonService extends AbstractBasicService implements IAmazonIdentityConfirmed {
protected AmazonFunctionality amazonFunctionality;
@Override
public void onCreate() {
super.onCreate();
amazonFunctionality = new AmazonFunctionality(this, this);
}
@Override
public int onStartCommand(Intent intent, int flags, int startId) {
String identityPoolId = intent.getStringExtra("identityPoolId");
if (identityPoolId == null) throw new RuntimeException("Missing identityPoolId on Amazon Service start");
amazonFunctionality.setCognitoProviderWithIdentityPoolId(identityPoolId);
return super.onStartCommand(intent, flags, startId);
}
}
|
const fs = require('fs');
const tour = JSON.parse(fs.readFileSync(`${__dirname}/../data/tours.json`, 'utf-8'));
// user相关API模拟
exports.getAllUser = (req, res) => {
console.log('返回所有的数据!');
res.status(200).json({
status: 'success',
nowTime: req.nowTime,
data: 'all user!',
});
};
exports.getItemUser = (req, res) => {
const { id } = req.params;
const tourItem = tour.find((item) => item._id === id);
if (!id || !tourItem) {
res.status(404).json({
status: 'fail',
data: 'Not find !',
});
}
res.status(200).json({
status: 'success',
data: 'user item !',
});
};
exports.createItemUser = (req, res) => {
const { id } = req.params;
const tourItem = tour.find((item) => item._id === id);
if (!id || !tourItem) {
res.status(404).json({
status: 'fail',
data: 'Create fail !',
});
}
res.status(200).json({
status: 'success',
data: {
tour: 'Create user success !',
},
});
};
exports.updateItemUser = (req, res) => {
const { id } = req.params;
const tourItem = tour.find((item) => item._id === id);
if (!id || !tourItem) {
res.status(404).json({
status: 'fail',
data: 'Update fail !',
});
}
res.status(200).json({
status: 'success',
data: {
tour: 'Update user success !',
},
});
};
exports.deleteItemUser = (req, res) => {
const { id } = req.params;
const tourItem = tour.find((item) => item._id === id);
if (!id || !tourItem) {
res.status(404).json({
status: 'fail',
data: 'Delete fail !',
});
}
res.status(204).json({
status: 'success',
data: null,
});
};
|
package com.daasuu.sample;
import android.graphics.Bitmap;
import android.graphics.Canvas;
import com.daasuu.mp4compose.filter.GlOverlayFilter;
/**
* Created by sudamasayuki on 2018/01/07.
*/
public class GlBitmapOverlaySampleFilter extends GlOverlayFilter {
private Bitmap bitmap;
public GlBitmapOverlaySampleFilter(Bitmap bitmap) {
this.bitmap = bitmap;
}
@Override
protected void drawCanvas(Canvas canvas) {
if (bitmap != null && !bitmap.isRecycled()) {
canvas.drawBitmap(bitmap, 0, 0, null);
}
}
@Override
public void release() {
if (bitmap != null && !bitmap.isRecycled()) {
bitmap.recycle();
}
}
}
|
const VotingConverter = {
type: 'x-my/voting',
tagName: 'object',
matchElement: function(el) {
//console.log("TEST" + el.is('object[type="x-my/voting"]'))
return el.is('object[type="x-my/voting"]')
},
import: function(el, node) {
node.id = el.attr('id')
node.question = el.attr('question')
node.answers = JSON.parse(el.attr('answers'))
node.fields = JSON.parse(el.attr('fields'))
},
export: function(node, el, converter) {
const $$ = converter.$$
el.attr({
id: node.id,
type: 'x-my/voting',
question: node.question,
answers: JSON.stringify(node.answers),
fields: JSON.stringify(node.fields)
})
}
}
export {VotingConverter}
|
/*
* Copyright (c) 2018 Texas Instruments Incorporated - http://www.ti.com
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* * Neither the name of Texas Instruments Incorporated nor the names of
* its contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
* THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
* EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
/*
* ======== UARTMSP432.syscfg.js ========
*/
"use strict";
/* $super is used to call generic module's methods */
let $super = {};
/* get Common /ti/drivers utility functions */
let Common = system.getScript("/ti/drivers/Common.js");
let Power = system.getScript("/ti/drivers/Power");
let intPriority = Common.newIntPri()[0];
intPriority.name = "interruptPriority";
intPriority.displayName = "Interrupt Priority";
intPriority.description = "UART peripheral interrupt priority";
let logError = Common.logError;
let logInfo = Common.logInfo;
/*
* ======== devSpecific ========
* Device-specific extensions to be added to base UART configuration
*/
let devSpecific = {
config:
[
/*
* This field is inter-related with the baudRates field. Ideally
* the user specifies the baudRates and leaves this field as 'Any',
* which allows the config tool to solve for clock source. However,
* for expert users, we allow clockSource to be locked which will
* 1) disable auto solving based on baudRates, and potentially
* 2) prevent the configuration from migrating to an alternate
* board/device which does not support the locked clockSource
* value.
*/
{
name : "clockSource",
displayName : "Clock Source",
default : "Any",
description : "If 'Any' is selected, an appropriate clock source " +
"will be auto selected. For maximum portability to " +
"other devices, it is recommended that this option " +
"be left in it's default state of 'Any'.",
/* These are ordered by preference. If multiple sources can
* satisfy the baud rate, then the first one in the list
* that can satisfy the baud rate will be selected.
*/
options:
[
{ name: "Any" },
{ name: "ACLK" },
{ name: "SMCLK" }
// { name : "HSMCLK" }, // Not Yet Supported by Driverlib
// { name : "MCLK" }, // Not Yet Supported by Driverlib
]
},
{
name : "ringBufferSize",
displayName : "Ring Buffer Size",
description : "Number of bytes in the ring buffer",
default : 32
},
{
name : "bitOrder",
displayName : "Bit Order",
default : "LSB_FIRST",
description : "Almost always LSB First",
options : [
{
name : "MSB_FIRST",
},
{
name : "LSB_FIRST",
}
]
},
intPriority
],
/* override generic pin requirements */
pinmuxRequirements : pinmuxRequirements,
/* override device-specific templates */
templates: {
boardc : "/ti/drivers/uart/UARTMSP432.Board.c.xdt",
boardh : "/ti/drivers/uart/UART.Board.h.xdt"
},
/* override generic validation with ours */
validate : validate,
filterHardware : filterHardware,
maxInstances : 4,
autoAssignClockSource : autoAssignClockSource,
genBaudRateTable : genBaudRateTable
};
/*
* ======== autoAssignClockSource ========
* Assign a clock source for a UART.
*
* Assign the clock source based on a required baud rate and
* known frequencies of the clock sources from the Power module.
*
* param baudRates - The Baud Rate for the UART
*
* returns clockSourceName - The name of the most appropriate clock source.
*/
function autoAssignClockSource(baudRates)
{
/* Find the clock source options from the module */
let clockSources;
for (let idx = 0; idx < devSpecific.config.length; idx++) {
if (devSpecific.config[idx].name === 'clockSource') {
clockSources = devSpecific.config[idx].options;
}
}
if (clockSources === undefined) {
throw new Error('Uart Metacode is not in sync with Uart Metadata. ');
}
let choices = [];
/* Walk through the clock source options for this uart.
* This walks through the module exports, not the instance
*/
for (let i = 0; i < clockSources.length; i++) {
let optionName = clockSources[i].name;
if (optionName === 'Any') {
continue;
}
let frequencies = Power.getClockFrequencies(optionName);
if (frequencies.length === 0) {
throw new Error(optionName +
' cannot be found in Performance_levels tables. ');
}
let numGoodFreqs = 0;
for (let f = 0; f < frequencies.length; f++) {
for (let bidx = 0; bidx < baudRates.length; bidx++) {
if (frequencies[f] >= baudRates[bidx]) {
numGoodFreqs++;
}
}
}
/* If we have a clock where all freqs cover all baudRates, then done */
if (numGoodFreqs == (frequencies.length * baudRates.length)) {
return optionName;
}
/* else save the coverage factor so that a later choice can be made */
choices.push({
clock: optionName,
quality: (numGoodFreqs / (frequencies.length * baudRates.length))
});
}
let bestIdx = 0;
for (let c = 1; c < choices.length; c++) {
if (choices[c].quality > choices[bestIdx].quality) {
bestIdx = c;
}
}
return choices[bestIdx].clock;
}
/*
* ======== genBaudRateTable ========
* Generate a baud rate table for a UART instance
*
* Given a baud rate and a clock source, generate baud rate table entries for
* the cross product of baud rates x clock source frequencies. Currently the
* only a single baud rate is supported.
*
* param baudRates - The Baud Rates for the UART
* param clockSource - The name of the clock source.
*
* returns baudRateTable - The baud rate table entries for this UART instance.
*/
function genBaudRateTable(baudRates, clockSource)
{
let frequencies = Power.getClockFrequencies(clockSource);
if (frequencies.length === 0) {
throw new Error(clockSource +
' cannot be found in Performance_Levels tables. ');
}
let baudRateTable = [];
for (let b = 0; b < baudRates.length; b++) {
for (let j = 0; j < frequencies.length; j++) {
let res = eusci_calcBaudDividers(frequencies[j], baudRates[b]);
res.baud = baudRates[b];
res.freq = frequencies[j];
baudRateTable.push(res);
}
}
return baudRateTable;
}
/*
* ======== bitPosition ========
* Determine if a bit in an integer is set
*
* param value - The integer to check
* param position - The position of the bit to check.
*
* returns boolean - true, if the bit in the position is set,
* false, otherwise.
*/
function bitPosition(value, position)
{
if ((value & (1 << position)) !== 0) {
return (1);
}
return (0);
}
/*
* ======== eusci_calcBaudDividers ========
* Computes the eUSCI_UART register settings for a given clock and baud rate
*
* This function returns a JavaScript object containing the fields:
* UCOS16: the oversampling bit (0 or 1)
* UCBRx: the Baud Rate Control Word
* UCFx: the First modulation stage select (UCBRFx)
* UCSx: the Second modulation stage select (UCBRSx)
* maxAbsError: the maximum TX error for the register setting above
*
* The first four field names match the names used in Table 18-5,
* "Recommended Settings for Typical Crystals and Baudrates", of the
* MSP430FR57xx Family User's Guide (SLAU272A).
*
* param clockRate - The input clock frequency
* param baudRate - The desired output baud rate
*
* returns baudRateTableEntry - A baud rate table entriy
*/
function eusci_calcBaudDividers(clockRate, baudRate)
{
let result = { UCOS16: 0, UCBRx: 0, UCFx: 0, UCSx: 0, maxAbsError: 0 };
let N = Math.floor(clockRate / baudRate);
let baudPeriod = 1 / baudRate;
let clockPeriod = 1 / clockRate;
let minAbsError = 100000;
for (let jj = 0; jj <= 255; jj++) {
let maxAbsErrorInByte = 0;
let count = 0;
for (let ii = 0; ii <= 10; ii++) {
count += N + bitPosition(jj, 7 - (ii % 8));
let error = (ii + 1) * baudPeriod - count * clockPeriod;
error = Math.abs(error);
if (error > maxAbsErrorInByte) {
maxAbsErrorInByte = error;
}
}
if (maxAbsErrorInByte < minAbsError) {
minAbsError = maxAbsErrorInByte;
result.UCSx = jj;
}
}
if (N < 20) {
result.UCOS16 = 0;
result.UCBRx = N;
result.UCFx = 0;
}
else {
result.UCOS16 = 1;
result.UCBRx = Math.floor(N / 16);
result.UCFx = N - (result.UCBRx * 16);
}
result.maxAbsError = minAbsError * baudRate * 100;
return (result);
}
/*
* ======== pinmuxRequirements ========
* Control RX, TX pin usage by the user specified dataDirection.
*
* param inst - UART instance
*
* returns req[] - array of requirements needed by inst
*/
function pinmuxRequirements(inst)
{
let tx = {
name : "txPin", /* config script name */
displayName : "TX Pin", /* GUI name */
interfaceNames : ["TXD"] /* pinmux tool name */
};
let rx = {
name : "rxPin",
displayName : "RX Pin",
interfaceNames : ["RXD"]
};
let resources = [];
if (inst.dataDirection != 'Receive Only') {
resources.push(tx);
}
if (inst.dataDirection != 'Send Only') {
resources.push(rx);
}
let uart = {
name : "uart",
displayName : "UART Peripheral",
interfaceName : "UART",
resources : resources,
signalTypes : {
txPin : ['UART_TXD'],
rxPin : ['UART_RXD']
}
};
return [uart];
}
/*
* ======== filterHardware ========
* Check 'component' signals for compatibility with UART
*
* param component - hardware object describing signals and
* resources they're attached to
*
* returns Boolean indicating whether or not to allow the component to
* be assigned to an instance's $hardware config
*/
function filterHardware(component)
{
return (Common.typeMatches(component.type, ["UART"]));
}
/*
* ======== validate ========
* Validate this instance's configuration
*
* param inst - UART instance to be validated
* param validation - object to hold detected validation issues
*/
function validate(inst, validation)
{
let baudRates = inst.baudRates;
let clockName = inst.clockSource;
if (clockName == 'Any') {
clockName = autoAssignClockSource(baudRates);
}
validateBaudRates(inst, validation, clockName);
if (inst.ringBufferSize < 0) {
logError(validation, inst, "ringBufferSize", "value must be positive");
}
/* don't allow an unreasonably large ring buffer size */
if (inst.ringBufferSize > 1024) {
logInfo(validation, inst, "ringBufferSize",
"consider reducing size for space optimization");
}
if ($super.validate) {
$super.validate(inst, validation);
}
}
/*
* ======== validateBaudRates ========
* Validate the clockName vs. the selected baud rates.
*
* param inst - UART instance to be validated
* param validation - Issue reporting object
* param clockName - The selected clock source name
*/
function validateBaudRates(inst, validation, clockName)
{
let frequencies = Power.getClockFrequencies(clockName);
let baudRates = inst.baudRates;
let message = '';
let numGoodFreqs = 0;
for (let f = 0; f < frequencies.length; f++) {
for (let b = 0; b < baudRates.length; b++) {
if (frequencies[f] >= baudRates[b]) {
numGoodFreqs++;
}
}
}
/* If clock has all freqs covering all baudRates, then no problems */
if (numGoodFreqs == (frequencies.length * baudRates.length)) {
return; // no error or warning
}
/* if the clock source cannot support the baudRates at all */
if (numGoodFreqs == 0) {
message = "Clock source " + clockName +
" cannot support any of the specified baud rates";
logError(validation, inst, ["baudRates","clockSource"], message);
}
/* if the clock source can support some baudRates at only some
* performance levels, but not all of them
*/
else {
message = "Clock source " + clockName +
" can only support some baud rates at some performance levels.";
logInfo(validation, inst, ["baudRates","clockSource"], message);
}
}
/*
* ======== extend ========
*/
function extend(base)
{
/* save base properies/methods, to use in our methods */
$super = base;
/* concatenate device-specific configs */
devSpecific.config = base.config.concat(devSpecific.config);
/* override baudRates table */
for (let i = 0; i < devSpecific.config.length; i++) {
let cfg = devSpecific.config[i];
if (cfg.name == "baudRates") {
cfg.hidden = false;
break;
}
}
/* merge and overwrite base module attributes */
return (Object.assign({}, base, devSpecific));
}
/*
* ======== exports ========
* Export device-specific extensions to base exports
*/
exports = {
/* required function, called by base UART module */
extend: extend
};
|
/*eslint no-unused-vars: ["error", { "vars": "local" }]*/
//http://stackoverflow.com/a/14853974
window.arrayEquals = function(a, b) {
'use strict';
// if the other array is a falsy value, return
if (!b)
return false;
// compare lengths - can save a lot of time
if (a.length != b.length)
return false;
for (var i = 0, l=a.length; i < l; i++) {
// Check if we have nested arrays
if (a[i] instanceof Array && b[i] instanceof Array) {
// recurse into the nested arrays
if (!a[i].equals(b[i]))
return false;
}
else if (a[i] != b[i]) {
// Warning - two different object instances will never be equal: {x:20} != {x:20}
return false;
}
}
return true;
};
|
#!/bin/bash
set -e
function debug_and_die {
OUTPUT_PATH=$HOME/.zkg/testing/external_dns/clones/external_dns
if [ -s $OUTPUT_PATH/zkg.test_command.stdout ]; then
echo "zkg test command stdout"
echo "-----------------------"
cat $OUTPUT_PATH/zkg.test_command.stdout
fi
if [ -s $OUTPUT_PATH/zkg.test_command.stderr ]; then
echo "zkg test command stderr"
echo "-----------------------"
cat $OUTPUT_PATH/zkg.test_command.stderr
fi
exit 1
}
export PATH=/usr/local/zeek/bin:/opt/zeek/bin:/opt/zeek-nightly/bin:$PATH
echo "Running zkg test..."
zkg test "$PWD" || debug_and_die
echo "Tests succeeded. Running zkg install..."
zkg install --force --skiptests "$PWD" || debug_and_die
echo "Install succeeded."
|
# ImagePlot
[](https://travis-ci.org/rened/ImagePlot.jl)
A very simple package for drawing on top of images, based on work by @cdsousa.
#### Lets draw a sun
```jl
using ImagePlot, TestImages
img = plot(testimage("lighthouse"), 30*randn(2,10000).+[80;600])
img = plot(img, [60 60 90; 580 620 600], radius = 6, fillcolor = "black")
```

|
# encoding: UTF-8
import urllib
import requests
import hashlib
import hmac
import base64
import json
from datetime import datetime
from collections import OrderedDict
import pandas as pd
class HuobiAgent:
'''
symbol: btcusdt, bchbtc
period: 1min, 5min, 15min, 30min, 60min, 1day, 1mon, 1week, 1year
'''
def __init__(self):
self.AccessKeyId = 'b720f47b-8acefaa6-fdd41817-a6dd7'
self.PrivateKey = 'b78240de-03ade0eb-07c00462-cabe0'
self.SignatureMethod = 'HmacSHA256'
self.SignatureVersion = 2
self.BaseUrl = 'https://api.huobipro.com'
# do rest request
def do_request(self, api, param):
# prepare request data
URL = self.BaseUrl + api
# request header
USER_AGENT = "Mozilla/5.0 (Windows NT 6.1; WOW64) " \
"AppleWebKit/537.36 (KHTML, like Gecko) " \
"Chrome/57.0.2987.133 Safari/537.36 "
# simulate http request
session = requests.Session()
session.headers['User-Agent'] = USER_AGENT
session.headers['Content-Type'] = 'application/json'
res = session.get(URL, params=param)
if res.status_code != 200:
print("query_error, status_code = ", res.status_code)
return None, res.status_code
# return http response
rsp = res.text
return rsp, ''
def make_param(self, req_api, req_dict):
param = OrderedDict()
param['AccessKeyId'] = self.AccessKeyId
param['SignatureMethod'] = self.SignatureMethod
param['SignatureVersion'] = self.SignatureVersion
param.update(req_dict)
param_encode = urllib.parse.urlencode(param)
# message for digital signature
'''
GET\n
api.huobi.pro\n
api\n
'''
method = 'GET\n'
url = 'api.huobi.pro\n'
api = req_api + '\n'
message = method + url + api + param_encode
# make signature by hashlib and hmac
hmac_value = hmac.new(self.PrivateKey.encode(), message.encode(), digestmod=hashlib.sha256).digest();
signature = base64.encodebytes(hmac_value)
param['Signature'] = signature
return param
# get realtime market quote
def get_quote(self, symbol):
req_api = '/market/detail/merged'
req_dict = {'symbol' : symbol}
param = self.make_param(req_api, req_dict)
response, msg = self.do_request(req_api, param)
if response is None:
return None, msg
# 载入数据并记录
rsp_json = json.loads(response)
status = rsp_json['status']
if status != 'ok':
msg = rsp_json['err-code'] + ":" + rsp_json['err-msg']
return None, msg
raw_records = rsp_json['tick']
raw_records['askprice'] = raw_records['ask'][0]
raw_records['askvolume'] = raw_records['ask'][1]
raw_records['bidprice'] = raw_records['bid'][0]
raw_records['bidvolume'] = raw_records['bid'][1]
raw_records.pop('ask')
raw_records.pop('bid')
df = pd.DataFrame().from_dict(raw_records, orient='index').T
return df, ''
def get_kline(self, symbol, period, size):
req_api = '/market/history/kline'
req_dict = {
'symbol' : symbol,
'period' : period,
'size' : size
}
param = self.make_param(req_api, req_dict)
response, msg = self.do_request(req_api, param)
if response is None:
return None, msg
# 载入数据并记录
rsp_json = json.loads(response)
status = rsp_json['status']
if status != 'ok':
msg = rsp_json['err-code'] + ":" + rsp_json['err-msg']
return None, msg
raw_records = rsp_json['data']
df = pd.DataFrame().from_dict(raw_records)
df['time'] = df['id'].apply(lambda x: datetime.fromtimestamp(x))
df.set_index('time', inplace=True)
df.sort_index(ascending=True, inplace=True)
return df, ''
def get_market_depth(self, symbol, dep_type):
'''
type: step0, step1, step2, step3, step4, step5(合并深度0-5);step0时,不合并深度
'''
req_api = '/market/depth'
req_dict = {
'symbol' : symbol,
'type' : dep_type
}
param = self.make_param(req_api, req_dict)
response = self.do_request(req_api, param)
return response
def get_trade(self, symbol):
req_api = '/market/trade'
req_dict = {
'symbol' : symbol
}
param = self.make_param(req_api, req_dict)
response, msg = self.do_request(req_api, param)
if response is None:
return None, msg
# 载入数据并记录
rsp_json = json.loads(response)
status = rsp_json['status']
if status != 'ok':
msg = rsp_json['err-code'] + ":" + rsp_json['err-msg']
return None, msg
raw_records = rsp_json['tick']['data']
data_records = []
for record in raw_records:
record['id'] = str(record['id'])
data_records.append(record)
df = pd.DataFrame().from_dict(data_records)
df['time'] = df['ts'].apply(lambda x: datetime.fromtimestamp(x/1000))
return df,''
def get_hist_trade(self, symbol, size=2000):
req_api = '/market/history/trade'
req_dict = {
'symbol' : symbol,
'size' : size
}
param = self.make_param(req_api, req_dict)
response, msg = self.do_request(req_api, param)
if response is None:
return None, msg
# 载入数据并记录
rsp_json = json.loads(response)
status = rsp_json['status']
if status != 'ok':
msg = rsp_json['err-code'] + ":" + rsp_json['err-msg']
return None, msg
raw_records = rsp_json['data']
data_records = []
for record in raw_records:
elems = record['data']
for elem in elems:
elem['id'] = str(elem['id'])
data_records.append(elem)
df = pd.DataFrame().from_dict(data_records)
df['time'] = df['ts'].apply(lambda x: datetime.fromtimestamp(x/1000))
df.set_index('time', inplace=True)
df.sort_index(ascending=True, inplace=True)
return df,''
|
HelixToolkit.Extended
=====================
Loading models from ZIP file
----------------------------
using ICSharpCode.SharpZipLib.Zip;
using System.Windows.Threading;
---
using (var zipFile = new ZipFile("Resources.zip"))
{
var reader = new ObjReaderZip(zipFile, Dispatcher.CurrentDispatcher);
var model1 = reader.Read("models\\body.obj");
var model2 = reader.Read("models/head.obj");
}
|
using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using System.Threading;
using System.Threading.Tasks;
namespace Cosmos.Awaitable
{
internal class CoroutineAwaiterMonitor : MonoSingleton<CoroutineAwaiterMonitor>
{
/// <summary>
/// 用于线程同步;多线程的数据发送到unity的主线程中;
/// </summary>
SynchronizationContext synchronizationContext;
protected override void Awake()
{
base.Awake();
gameObject.hideFlags = UnityEngine.HideFlags.HideInHierarchy;
DontDestroyOnLoad(gameObject);
synchronizationContext = SynchronizationContext.Current;
}
public void PostToMainThread(Action<object> sendOrPostCallback)
{
synchronizationContext.Post(state => sendOrPostCallback.Invoke(state), null);
}
public void StartAwaitableCoroutine<T>(CoroutineAwaiter<T> awaiterCoroutine)
{
StartCoroutine(awaiterCoroutine.Coroutine);
}
public void StopAwaitableCoroutine<T>(CoroutineAwaiter<T> awaiterCoroutine)
{
StopCoroutine(awaiterCoroutine.Coroutine);
}
}
}
|
using NUnit.Framework;
namespace Svg.UnitTests
{
[TestFixture]
public class SvgAttributeCollectionTests
{
[Test]
public void TestGetInheritedAttribute()
{
var owner = new SvgCircle();
var parent = new SvgFragment();
parent.Children.Add(owner);
parent.Attributes["test"] = "parent";
owner.Attributes["test"] = "owner";
Assert.AreEqual("owner", owner.Attributes.GetInheritedAttribute("test", true, "default"));
Assert.AreEqual("owner", owner.Attributes.GetInheritedAttribute("test", false, "default"));
owner.Attributes["test"] = "inherit";
Assert.AreEqual("parent", owner.Attributes.GetInheritedAttribute("test", true, "default"));
Assert.AreEqual("parent", owner.Attributes.GetInheritedAttribute("test", false, "default"));
owner.Attributes.Remove("test");
Assert.AreEqual("parent", owner.Attributes.GetInheritedAttribute("test", true, "default"));
Assert.AreEqual("default", owner.Attributes.GetInheritedAttribute("test", false, "default"));
}
[Test]
public void TestGetAttribute()
{
var owner = new SvgCircle();
var parent = new SvgFragment();
parent.Children.Add(owner);
parent.Attributes["test"] = "parent";
owner.Attributes["test"] = "owner";
Assert.AreEqual("owner", owner.Attributes.GetAttribute<string>("test"));
owner.Attributes["test"] = "inherit";
Assert.AreEqual("inherit", owner.Attributes.GetAttribute<string>("test"));
owner.Attributes.Remove("test");
Assert.IsNull(owner.Attributes.GetAttribute<string>("test"));
}
}
}
|
val flyway_version: String by project
dependencies {
api("org.flywaydb:flyway-core:$flyway_version")
api(project(":ktor-sql"))
}
|
/*
* Copyright (c) 2017 - 2021 Pedro Falcato
* This file is part of Onyx, and is released under the terms of the MIT License
* check LICENSE at the root directory for more information
*
* SPDX-License-Identifier: MIT
*/
#include <assert.h>
#include <onyx/limits.h>
#include <stdio.h>
#include <errno.h>
#include <string.h>
#include <assert.h>
#include <onyx/log.h>
#include <onyx/pagecache.h>
#include <onyx/utility.hpp>
#include "ext2.h"
void ext2_set_inode_size(struct ext2_inode *inode, size_t size)
{
inode->size_hi = size >> 32;
inode->size_lo = size & 0xFFFFFFFF;
}
unsigned int ext2_detect_block_type(uint32_t block, struct ext2_superblock *fs)
{
const unsigned int entries = (fs->block_size / sizeof(uint32_t));
unsigned int min_singly_block = direct_block_count;
unsigned int min_doubly_block = entries + direct_block_count;
unsigned int min_trebly_block = entries * entries + entries + direct_block_count;
if(block < min_singly_block)
return EXT2_TYPE_DIRECT_BLOCK;
else if(block >= min_singly_block && block < min_doubly_block)
return EXT2_TYPE_SINGLY_BLOCK;
else if(block >= min_doubly_block && block < min_trebly_block)
return EXT2_TYPE_DOUBLY_BLOCK;
return EXT2_TYPE_TREBLY_BLOCK;
}
/* Inspired by linux's ext2_block_to_path, essentially does something like it. */
unsigned int ext2_get_block_path(ext2_superblock *sb, ext2_block_no offsets[4], ext2_block_no block_nr)
{
unsigned int type = ext2_detect_block_type(block_nr, sb);
const unsigned int entries = (sb->block_size / sizeof(uint32_t));
unsigned int min_singly_block = direct_block_count;
unsigned int min_doubly_block = entries + direct_block_count;
unsigned int min_trebly_block = entries * entries + entries + direct_block_count;
unsigned int idx = 0;
if(type == EXT2_TYPE_DIRECT_BLOCK)
offsets[idx++] = block_nr;
else if(type == EXT2_TYPE_SINGLY_BLOCK)
{
offsets[idx++] = EXT2_IND_BLOCK;
offsets[idx++] = block_nr - min_singly_block;
}
else if(type == EXT2_TYPE_DOUBLY_BLOCK)
{
block_nr -= min_doubly_block;
unsigned int doubly_table_index = block_nr >> sb->entry_shift;
unsigned int singly_table_index = block_nr & (entries - 1);
offsets[idx++] = EXT2_DIND_BLOCK;
offsets[idx++] = doubly_table_index;
offsets[idx++] = singly_table_index;
}
else if(type == EXT2_TYPE_TREBLY_BLOCK)
{
block_nr -= min_trebly_block;
unsigned int trebly_table_index = block_nr >> (sb->entry_shift * 2);
unsigned int doubly_table_index = (block_nr >> sb->entry_shift)
& (entries - 1);
unsigned int singly_table_index = block_nr & (entries - 1);
offsets[idx++] = EXT2_TIND_BLOCK;
offsets[idx++] = trebly_table_index;
offsets[idx++] = doubly_table_index;
offsets[idx++] = singly_table_index;
}
return idx;
}
expected<ext2_block_no, int> ext2_get_block_from_inode(ext2_inode *ino, ext2_block_no block, ext2_superblock *sb)
{
ext2_block_no offsets[4];
unsigned int len = ext2_get_block_path(sb, offsets, block);
uint32_t *curr_block = ino->i_data;
auto_block_buf buf;
ext2_block_no dest_block_nr = 0;
for(unsigned int i = 0; i < len; i++)
{
ext2_block_no off = offsets[i];
/* We have to check if we're the last level, as to not read the dest block */
if(i + 1 != len)
{
auto b = curr_block[off];
if(b == EXT2_ERR_INV_BLOCK)
return EXT2_ERR_INV_BLOCK;
buf = sb_read_block(sb, b);
if(!buf)
return unexpected<int>{-errno};
curr_block = static_cast<uint32_t *>(block_buf_data(buf));
}
else
{
dest_block_nr = curr_block[off];
}
}
return dest_block_nr;
}
expected<ext2_block_no, int> ext2_create_path(struct inode *ino, ext2_block_no block, ext2_superblock *sb)
{
auto preferred_bg = ext2_inode_number_to_bg(ino->i_inode, sb);
auto raw_inode = ext2_get_inode_from_node(ino);
ext2_block_no offsets[4];
unsigned int len = ext2_get_block_path(sb, offsets, block);
uint32_t *curr_block = raw_inode->i_data;
auto_block_buf buf;
ext2_block_no dest_block_nr = 0;
for(unsigned int i = 0; i < len; i++)
{
ext2_block_no off = offsets[i];
/* We have to check if we're the last level, as to not read the dest block */
if(i + 1 != len && len != 1)
{
auto b = curr_block[off];
bool should_zero_block = false;
if(b == EXT2_ERR_INV_BLOCK)
{
auto block = sb->allocate_block(preferred_bg);
if(block == EXT2_ERR_INV_BLOCK)
{
return unexpected<int>{-ENOSPC};
}
should_zero_block = true;
b = curr_block[off] = block;
ino->i_blocks += sb->block_size >> 9;
if(buf) block_buf_dirty(buf);
else
{
inode_update_ctime(ino);
inode_mark_dirty(ino);
}
}
buf = sb_read_block(sb, b);
if(!buf)
return unexpected<int>{-errno};
curr_block = static_cast<uint32_t *>(block_buf_data(buf));
if(should_zero_block) [[unlikely]]
{
memset(curr_block, 0, sb->block_size);
block_buf_dirty(buf);
}
}
else
{
dest_block_nr = curr_block[off];
if(dest_block_nr == EXT2_FILE_HOLE_BLOCK)
{
auto block = sb->allocate_block();
if(block == EXT2_ERR_INV_BLOCK)
return unexpected<int>{-ENOSPC};
dest_block_nr = curr_block[off] = block;
ino->i_blocks += sb->block_size >> 9;
//printk("Block: %u\n", block);
//printk("Iblocks %lu\n", ino->i_blocks);
inode_update_ctime(ino);
inode_mark_dirty(ino);
}
}
}
return dest_block_nr;
}
int ext2_prepare_write(inode *ino, struct page *page, size_t page_off, size_t offset, size_t len)
{
auto end = offset + len;
auto sb = ext2_superblock_from_inode(ino);
auto bufs = block_buf_from_page(page);
auto base_block = page_off / sb->block_size;
auto nr_blocks = PAGE_SIZE / sb->block_size;
/* Handle pages that haven't been mapped yet */
if(!bufs)
{
auto curr_off = 0;
for(size_t i = 0; i < nr_blocks; i++)
{
struct block_buf *b = nullptr;
if(!(b = page_add_blockbuf(page, curr_off)))
{
page_destroy_block_bufs(page);
return -ENOMEM;
}
//printk("Adding block for page offset %u\n", b->page_off);
b->block_nr = EXT2_FILE_HOLE_BLOCK;
b->block_size = sb->block_size;
b->dev = sb->s_bdev;
curr_off += sb->block_size;
}
bufs = block_buf_from_page(page);
}
while(bufs)
{
if(bufs->page_off >= offset && bufs->page_off < end)
{
auto relative_block = bufs->page_off / sb->block_size;
auto block_number = bufs->block_nr;
if(block_number == EXT2_FILE_HOLE_BLOCK)
{
auto res = ext2_create_path(ino, base_block + relative_block, sb);
//printk("creating path for poff %u file off %lu\n", bufs->page_off, offset);
if(res.has_error())
return res.error();
bufs->block_nr = res.value();
}
}
bufs = bufs->next;
}
return 0;
}
int ext2_truncate(size_t len, inode *ino);
int ext2_free_space(size_t new_len, inode *ino);
void ext2_free_inode_space(inode *inode_, ext2_superblock *fs)
{
ext2_free_space(0, inode_);
assert(inode_->i_blocks == 0);
}
struct ext2_block_coords
{
ext2_block_no coords[4];
int size;
ext2_block_coords() : coords{0, 0, 0, 0} {}
bool operator==(const ext2_block_coords &rhs) const
{
return coords[0] == rhs.coords[0] && coords[1] == rhs.coords[1]
&& coords[2] == rhs.coords[2] && coords[3] == rhs.coords[3];
}
ext2_block_no& operator[](int idx)
{
return coords[idx];
}
size_t to_offset(const ext2_superblock *sb) const
{
/* Essentially this function mirrors ext2_get_block_path. I hope it's correct. */
if(size == 1)
return coords[0] << sb->block_size_shift;
const unsigned int entries = (sb->block_size / sizeof(uint32_t));
unsigned int min_singly_block = direct_block_count;
unsigned int min_doubly_block = entries + direct_block_count;
unsigned int min_trebly_block = entries * entries + entries + direct_block_count;
if(size == 2)
{
return (coords[1] + min_singly_block) << sb->entry_shift;
}
else if(size == 3)
{
auto block_number = coords[2] << sb->entry_shift | coords[1];
return (block_number + min_doubly_block) << sb->entry_shift;
}
else if(size == 4)
{
return ((coords[3] << (sb->entry_shift * 2) |
((coords[2] << sb->entry_shift) & (entries - 1)) | (coords[1] & (entries - 1)))
+ min_trebly_block) << sb->entry_shift;
}
else
__builtin_unreachable();
}
};
enum class ext2_trunc_result
{
continue_trunc = 0,
stop,
};
expected<ext2_trunc_result, int>
ext2_trunc_indirect_block(ext2_block_no block, unsigned int indirection_level,
const ext2_block_coords &boundary, ext2_block_coords &curr_coords,
inode *ino, ext2_superblock *sb)
{
auto block_off = curr_coords.to_offset(sb);
if(indirection_level == 0)
{
if(curr_coords == boundary)
return ext2_trunc_result::stop;
#if 0
printk("Freeing block off %lu\n", block_off);
printk("coords %u\n", curr_coords.coords[0]);
#endif
inode_truncate_range(ino, block_off, block_off + sb->block_size);
//printk("Iblocks %lu\n", ino->i_blocks);
return ext2_trunc_result::continue_trunc;
}
auto_block_buf buf = sb_read_block(sb, block);
if(!buf)
{
sb->error("I/O error");
return unexpected<int>{-EIO};
}
buf_dirty_trigger dirty_trig{buf};
uint32_t *blockbuf = (uint32_t *) block_buf_data(buf);
unsigned int nr_entries = sb->block_size / sizeof(uint32_t);
/* The basic algorithm for this is: We start from the end of the table,
* and we keep going backwards until we either reach a stop/error, or we run out of table.
* If we don't reach a stop, we free the block/block table
* under us (check the indirection_level check).
*/
for(int i = nr_entries - 1; i >= 0; i--)
{
curr_coords.coords[indirection_level] = i;
if(curr_coords == boundary)
return ext2_trunc_result::stop;
if(blockbuf[i] == EXT2_FILE_HOLE_BLOCK)
continue;
if(indirection_level != 1)
{
auto st = ext2_trunc_indirect_block(blockbuf[i],
indirection_level - 1, boundary, curr_coords, ino, sb);
if(st.has_error())
return unexpected<int>{st.error()};
else if(st.value() == ext2_trunc_result::stop)
return st;
sb->free_block(blockbuf[i]);
ino->i_blocks -= sb->block_size >> 9;
blockbuf[i] = 0;
}
else
{
inode_truncate_range(ino, block_off, block_off + sb->block_size);
sb->free_block(blockbuf[i]);
ino->i_blocks -= sb->block_size >> 9;
//printk("Iblocks %lu\n", ino->i_blocks);
blockbuf[i] = 0;
}
}
/* If we got here, we've cleared the whole table and as such we don't need to dirty it
* since we're going to be free'd anyway by our caller.
*/
dirty_trig.do_not_dirty();
return ext2_trunc_result::continue_trunc;
}
/**
* @brief Checks if the ext2 inode has data blocks.
* In ext2, several types of inodes (namely, symlinks and devices) can simply only have
* inline data.
*
* @param ino Pointer to the inode struct
* @param raw_ino Pointer to the ext2 inode
* @param sb Pointer to the ext2 superblock
* @return True if it has data blocks, else false.
*/
bool ext2_has_data_blocks(inode *ino, ext2_inode *raw_ino, ext2_superblock *sb)
{
int ea_blocks = raw_ino->i_file_acl ? (sb->block_size >> 9) : 0;
return ino->i_blocks - ea_blocks != 0;
}
int ext2_free_space(size_t new_len, inode *ino)
{
auto sb = ext2_superblock_from_inode(ino);
auto raw_inode = ext2_get_inode_from_node(ino);
// If the inode only has inline data, just return success.
if (!ext2_has_data_blocks(ino, raw_inode, sb))
{
return 0;
}
ext2_block_coords curr_coords{};
ext2_block_coords boundary_coords;
auto boundary_block = cul::align_down2(new_len - 1, sb->block_size) >> sb->block_size_shift;
/* We don't have a boundary block if we're truncating to zero. See below. */
if(new_len == 0)
boundary_block = 0;
auto len = ext2_get_block_path(sb, boundary_coords.coords, boundary_block);
boundary_coords.size = len;
for(int i = EXT2_NR_BLOCKS - 1; i != 0; i--)
{
int indirection_level = 3;
if(i < EXT2_IND_BLOCK)
{
indirection_level = 0;
curr_coords.size = 1;
}
else if(i == EXT2_IND_BLOCK)
{
indirection_level = 1;
curr_coords.size = 2;
}
else if(i == EXT2_DIND_BLOCK)
{
indirection_level = 2;
curr_coords.size = 3;
}
else if(i == EXT2_TIND_BLOCK)
{
indirection_level = 3;
curr_coords.size = 4;
}
curr_coords[0] = i;
curr_coords[1] = curr_coords[2] = curr_coords[3] = 0;
/* Test this here since the EXT2_FILE_HOLE_BLOCK check may elide the one inside
* ext2_trunc_indirect_block and because of that we start deleting blocks before the file hole.
*/
if(curr_coords == boundary_coords)
break;
auto block = raw_inode->i_data[i];
if(block == EXT2_FILE_HOLE_BLOCK)
continue;
auto res = ext2_trunc_indirect_block(block, indirection_level,
boundary_coords, curr_coords, ino, sb);
if(res.has_error())
{
ERROR("ext2", "Error truncating file: %d\n", res.error());
sb->error("Error truncating file");
return res.error();
}
else if(res.value() == ext2_trunc_result::stop)
break;
else
{
/* If we're told to continue going down the tables, we'll remove this
* one from i_data since it's been freed.
*/
sb->free_block(block);
ino->i_blocks -= sb->block_size >> 9;
raw_inode->i_data[i] = EXT2_FILE_HOLE_BLOCK;
}
}
if(new_len == 0)
{
/* If new_len is zero, we're going to get told to stop at (0, 0, 0, 0) even though
* we want to delete that block too, so do so right now.
*/
if(raw_inode->i_data[0])
{
sb->free_block(raw_inode->i_data[0]);
inode_truncate_range(ino, 0, sb->block_size);
ino->i_blocks = 0;
//printk("zero Iblocks %lu\n", ino->i_blocks);
raw_inode->i_data[0] = 0;
}
}
if(new_len & (sb->block_size - 1))
{
auto page_off = new_len;
inode_truncate_range(ino, page_off, ino->i_size);
}
return 0;
}
int ext2_truncate(size_t len, inode *ino)
{
int st = 0;
#if 0
printk("truncating to %lu\n", len);
#endif
if(ino->i_size > len)
{
if((st = ext2_free_space(len, ino)) < 0)
{
return st;
}
}
/* **fallthrough**
* The space freeing code will need this anyway, because you'll need to mark the inode dirty.
*/
ino->i_size = len;
vmo_truncate(ino->i_pages, len, VMO_TRUNCATE_DONT_PUNCH);
inode_mark_dirty(ino);
return st;
}
int ext2_ftruncate(size_t len, file *f)
{
return ext2_truncate(len, f->f_ino);
}
|
package com.github.pshirshov.izumi.distage.testkit.services
import org.scalatest.exceptions.TestCanceledException
private[testkit] trait IgnoreSupport {
protected final def ignoreThisTest(cause: Throwable): Nothing = {
ignoreThisTest(None, Some(cause))
}
protected final def ignoreThisTest(message: String): Nothing = {
ignoreThisTest(Some(message), None)
}
protected final def ignoreThisTest(message: String, cause: Throwable): Nothing = {
ignoreThisTest(Some(message), Some(cause))
}
protected final def ignoreThisTest(message: Option[String] = None, cause: Option[Throwable] = None): Nothing = {
throw new TestCanceledException(message, cause, failedCodeStackDepth = 0)
}
}
|
import React, { useEffect, useState } from 'react';
import { AiOutlineLoading3Quarters } from 'react-icons/ai';
import { Link } from 'react-router-dom';
import { toast } from 'react-toastify';
import { Form } from '@unform/web';
import { InputPrice } from '~/components';
import api from '~/services/api';
import { convertFloatInPrice, convertPrice } from '~/utils/convert';
import documentTitle from '~/utils/documentTitle';
import { Container } from './styles';
export default function Loads() {
documentTitle('Tipos de cargas');
const [loads, setLoads] = useState([]);
const [price, setPrice] = useState('');
const [loading, setLoading] = useState(false);
const [error, setError] = useState(false);
useEffect(() => {
async function getData() {
const response = await api.get('/loads');
setLoads(response.data);
const responseConfig = await api.get('/configurations');
const { price_per_kilometer } = responseConfig.data;
setPrice(convertFloatInPrice(price_per_kilometer));
}
getData();
}, []);
async function handleDelete(id) {
const response = await api.delete(`/loads/${id}`);
if (response.data.error) {
toast.error(response.data.error);
return;
}
const newLoad = loads.filter((item) => item.id !== id);
setLoads(newLoad);
toast.success(response.data.success);
}
async function handleSubmit() {
if (!price) {
setError(true);
return;
}
setError(false);
setLoading(true);
const response = await api.put('/configurations', {
price_per_kilometer: convertPrice(price),
});
const { success } = response.data;
if (success) {
toast.success(success);
} else {
toast.error('Algo deu errado! Tente novamente!');
}
setLoading(false);
}
return (
<Container className="animated fadeIn">
<div className="container">
<div className="d-flex justify-content-between align-items-center">
<h1 className="mb-5">Tipos de carga</h1>
<Link className="btn btn-success" to="/loads/create">
Adicionar
</Link>
</div>
{loads.length > 0 ? (
<div className="row">
{loads.map((item, index) => (
<div className="col-lg-4" key={String(index)}>
<div className="box-load">
<div className="title">{item.name}</div>
<p>{item.description}</p>
<div className="d-flex justify-content-between align-items-center">
<div className="price">
{convertFloatInPrice(item.price)}
</div>
<div className="d-flex">
<Link
className="btn btn-primary mr-3"
to={`/loads/edit/${item.id}`}
>
Editar
</Link>
<button
type="button"
className="btn btn-danger"
onClick={() => handleDelete(item.id)}
>
Deletar
</button>
</div>
</div>
</div>
</div>
))}
</div>
) : (
<p className="mb-5">Não existe tipos de cargas cadastrados!</p>
)}
<Form onSubmit={handleSubmit}>
<div className="d-flex justify-content-between align-items-center">
<h1>Preço por Quilômetro </h1>
<button
type="submit"
className={`btn btn-success ${loading && 'disabled btn-loading'}`}
disabled={loading}
>
{loading ? (
<AiOutlineLoading3Quarters color="#fff" size={14} />
) : (
'Salvar'
)}
</button>
</div>
<InputPrice
id="preco"
name="preco"
value={price}
onChange={(e) => setPrice(e.target.value)}
error={error}
/>
</Form>
</div>
</Container>
);
}
|
# frozen_string_literal: true
require 'test_helper'
module SexpExamples
def self.included(base)
base.let(:sexp_with_initialize) { parse_file('road_bike.rb') }
base.let(:sexp_without_initialize) do
RubyParser.new.parse(<<~EMPTY_CLASS_DEFINITION)
class Scooter
end
EMPTY_CLASS_DEFINITION
end
end
end
describe 'SexpCliTools::Matchers::MethodImplementation.satisfy?' do
subject { SexpCliTools::Matchers::MethodImplementation }
include SexpExamples
it 'is satisfied by a ruby file which implements initialize' do
_(subject.satisfy?(sexp_with_initialize, 'initialize')).must_equal true
end
it 'is not satisfied by a ruby file without an implementation of initialize' do
_(subject.satisfy?(sexp_without_initialize, 'initialize')).must_equal false
end
end
describe 'SexpCliTools::Matchers::MethodImplementation#satisfy?(sexp)' do
subject { SexpCliTools::Matchers::MethodImplementation.new(target_method) }
let(:target_method) { :initialize }
include SexpExamples
it 'is satisfied by a ruby file which implements initialize' do
_(subject).must_be :satisfy?, sexp_with_initialize
end
it 'is not satisfied by a ruby file without an implementation of initialize' do
_(subject).wont_be :satisfy?, sexp_without_initialize
end
end
|
describe 'NSData' do
before do
@file_name = 'write_to_file'.temporary_path
@url = NSURL.alloc.initFileURLWithPath 'write_to_file'.temporary_path
end
describe 'writing to a file' do
it 'should write data to a file' do
subject = 'data'.dataUsingEncoding(NSUTF8StringEncoding)
subject.write_to(@file_name).should == true
@file_name.file_exists?.should == true
end
end
describe 'writing to a url' do
it 'should write data to a url' do
subject = 'data'.dataUsingEncoding(NSUTF8StringEncoding)
subject.write_to(@url).should == true
@file_name.file_exists?.should == true
end
end
describe 'reading from a file' do
it 'should read data from a file' do
subject = 'data'.dataUsingEncoding(NSUTF8StringEncoding)
subject.write_to(@file_name).should == true
contents = NSData.read_from(@file_name)
contents.should == subject
end
it 'should not have a file called "wtf is this"' do
'wtf is this'.document_path.file_exists?.should == false
end
it 'should return nil for files that don\'t exist' do
file_name = 'wtf is this'.document_path
contents = NSData.read_from(file_name)
contents.should == nil
end
end
end
|
using WebGL;
namespace THREE
{
public class LineDashedMaterial : Material
{
public Color color;
public dynamic linewidth;
public dynamic scale;
public dynamic dashSize;
public dynamic gapSize;
public dynamic vertexColors;
public dynamic fog;
public LineDashedMaterial(dynamic parameters = null)
{
parameters = parameters ?? new JSObject();
color = new Color(parameters.color ?? 0xffffff);
linewidth = parameters.linewidth ?? 1;
scale = parameters.scale ?? 1;
dashSize = parameters.dashSize ?? 3;
gapSize = parameters.gapSize ?? 1;
vertexColors = parameters.vertexColors ?? false;
fog = parameters.fog ?? true;
setValues(parameters);
}
public LineDashedMaterial clone()
{
var material = (LineDashedMaterial)clone(new LineDashedMaterial());
material.color.copy(color);
material.linewidth = linewidth;
material.scale = scale;
material.dashSize = dashSize;
material.gapSize = gapSize;
material.vertexColors = vertexColors;
material.fog = fog;
return material;
}
}
}
|
# frozen_string_literal: true
require 'rails_helper'
RSpec.describe 'injuries/index', type: :view do
before(:each) do
bridge = FactoryBot.create(:bridge)
@regular_inspection = FactoryBot.create(:regular_inspection, bridge: bridge)
component1 = FactoryBot.create(:component, bridge: bridge)
component2 = FactoryBot.create(:component, bridge: bridge)
assign(:injuries, [
FactoryBot.create(:injury, regular_inspection: @regular_inspection, component: component1),
FactoryBot.create(:injury, regular_inspection: @regular_inspection, component: component2)
])
end
it 'renders a list of injuries' do
render
end
end
|
import React, { Component } from "react";
import { bindActionCreators } from "redux";
import { connect } from "react-redux";
import { Divider } from "antd";
import PropTypes from "prop-types";
import queryString from "query-string";
import { getMineRegionHash } from "@common/selectors/staticContentSelectors";
import { fetchMineNoticeOfWorkApplications } from "@common/actionCreators/noticeOfWorkActionCreator";
import { getNoticeOfWorkList } from "@common/selectors/noticeOfWorkSelectors";
import { getMineGuid, getMines } from "@common/selectors/mineSelectors";
import { formatQueryListParams } from "@common/utils/helpers";
import * as router from "@/constants/routes";
import AuthorizationWrapper from "@/components/common/wrappers/AuthorizationWrapper";
import * as Permission from "@/constants/permissions";
import AddButton from "@/components/common/AddButton";
import CustomPropTypes from "@/customPropTypes";
import MineNoticeOfWorkTable from "@/components/mine/NoticeOfWork/MineNoticeOfWorkTable";
const propTypes = {
mineGuid: PropTypes.string.isRequired,
mines: PropTypes.objectOf(CustomPropTypes.mine).isRequired,
fetchMineNoticeOfWorkApplications: PropTypes.func.isRequired,
history: PropTypes.shape({ push: PropTypes.func }).isRequired,
location: PropTypes.shape({ search: PropTypes.string }).isRequired,
noticeOfWorkApplications: PropTypes.arrayOf(CustomPropTypes.importedNOWApplication).isRequired,
mineRegionHash: PropTypes.objectOf(PropTypes.string).isRequired,
};
export class MineNOWApplications extends Component {
params = queryString.parse(this.props.location.search);
listQueryParams = [];
splitListParams = formatQueryListParams("split", this.listQueryParams);
state = {
isLoaded: false,
params: {
submissions_only: true,
...this.params,
},
};
componentDidMount() {
const params = this.props.location.search;
const parsedParams = queryString.parse(params);
const {
page = this.state.params.page,
per_page = this.state.params.per_page,
submissions_only = this.state.params.submissions_only,
} = parsedParams;
if (params) {
this.renderDataFromURL();
} else {
this.props.history.push(
router.MINE_NOW_APPLICATIONS.dynamicRoute(this.props.mineGuid, {
page,
per_page,
submissions_only,
})
);
}
}
componentWillReceiveProps(nextProps) {
const locationChanged = nextProps.location !== this.props.location;
if (locationChanged) {
this.renderDataFromURL(nextProps.location.search);
}
}
renderDataFromURL = (queryParams) => {
const params = queryParams || this.props.location.search;
const parsedParams = queryString.parse(params);
this.setState(
{
params: this.splitListParams(parsedParams),
isLoaded: false,
},
() =>
this.props
.fetchMineNoticeOfWorkApplications({ mine_guid: this.props.mineGuid, ...parsedParams })
.then(() => {
this.setState({ isLoaded: true });
})
);
};
handleSearch = (searchParams = {}, clear = false) => {
const persistedParams = clear ? {} : this.state.params;
const updatedParams = {
// Start from existing state
...persistedParams,
// Overwrite prev params with any newly provided search params
...searchParams,
submissions_only: true,
};
this.props.history.push(
router.MINE_NOW_APPLICATIONS.dynamicRoute(this.props.mineGuid, updatedParams)
);
};
render() {
const isMajorMine = this.props.mines[this.props.mineGuid].major_mine_ind;
const title = isMajorMine ? "Permit Applications" : "Notice of Work Applications";
return (
<div className="tab__content">
<div>
<h2>{title}</h2>
<AuthorizationWrapper isMajorMine={isMajorMine} permission={Permission.EDIT_PERMITS}>
<AddButton
onClick={() =>
this.props.history.push(router.CREATE_NOTICE_OF_WORK_APPLICATION.route, {
mineGuid: this.props.mineGuid,
})
}
>
Add a Permit Application
</AddButton>
</AuthorizationWrapper>
</div>
<Divider />
<MineNoticeOfWorkTable
isMajorMine={isMajorMine}
isLoaded={this.state.isLoaded}
handleSearch={this.handleSearch}
noticeOfWorkApplications={this.props.noticeOfWorkApplications}
sortField={this.state.params.sort_field}
sortDir={this.state.params.sort_dir}
searchParams={this.state.params}
mineRegionHash={this.props.mineRegionHash}
/>
</div>
);
}
}
const mapStateToProps = (state) => ({
mineGuid: getMineGuid(state),
mines: getMines(state),
noticeOfWorkApplications: getNoticeOfWorkList(state),
mineRegionHash: getMineRegionHash(state),
});
const mapDispatchToProps = (dispatch) =>
bindActionCreators(
{
fetchMineNoticeOfWorkApplications,
},
dispatch
);
MineNOWApplications.propTypes = propTypes;
export default connect(mapStateToProps, mapDispatchToProps)(MineNOWApplications);
|
package org.fisco.bcos.sdk.amop;
import io.netty.util.Timeout;
public abstract class AmopResponseCallback {
private Timeout timeout;
public Timeout getTimeout() {
return timeout;
}
public void setTimeout(Timeout timeout) {
this.timeout = timeout;
}
public abstract void onResponse(AmopResponse response);
}
|
import {
IAIREP,
IAVT7Metar,
IAirSigmet,
IAirSigmetOptions,
IAircraftReportsOptions,
IClientOptions,
IDatasourceType,
IGAirMet,
IGAirmetOptions,
IMetaSkyCondition,
IMetar,
IMetarOptions,
IOptions,
IStation,
IStationOptions,
ITaf,
ITafOptions,
} from "./index.js";
import axios from "axios";
import { XMLParser } from "fast-xml-parser";
import { skyConditions } from "./Identifiers.js";
const parser = new XMLParser({
ignoreAttributes: false,
attributeNamePrefix: "",
});
export class Client {
private options?: IClientOptions;
static api = {
AW: "https://www.aviationweather.gov/adds/dataserver_current/httpparam",
AVT7: "http://www.avt7.com/Home/AirportMetarInfo",
};
constructor(options?: IClientOptions) {
this.options = options;
}
getSkyCondition(identifier: string): IMetaSkyCondition {
const search = skyConditions.find((s) => s.code === identifier);
if (!search) {
return { code: identifier, description: "unknown" };
}
return search;
}
private selectField = (type: IDatasourceType) => {
switch (type) {
case "AIRCRAFTREPORTS":
return "AircraftReport";
case "AIRSIGMETS":
return "AIRSIGMET";
case "GAIRMETS":
return "GAIRMET";
case "METARS":
return "METAR";
case "TAFS":
return "TAF";
case "STATIONS":
return "Station";
default:
return "METAR";
}
};
// eslint-disable-next-line @typescript-eslint/no-explicit-any
private FormatOutput = (type: IDatasourceType, data: any[]) => {
switch (type) {
case "METARS": {
return data.map((metar) => {
// make sure sky_condition exist
if (!metar?.sky_condition) {
metar.sky_condition = [];
} else if (!(metar?.sky_condition instanceof Array)) {
metar.sky_condition = [metar?.sky_condition];
}
return metar;
});
}
case "TAFS": {
return data.map((taf) => {
// make sure forecast exist
if (!taf?.forecast) {
taf.forecast = [];
} else if (!(taf?.forecast instanceof Array)) {
taf.forecast = [taf?.forecast];
}
// make sure sky_condition exist for each forecast
for (let index = 0; index < taf.forecast.length; index++) {
const item = taf.forecast[index];
if (!item?.sky_condition) {
item.sky_condition = [];
} else if (!(item?.sky_condition instanceof Array)) {
item.sky_condition = [item?.sky_condition];
}
}
return taf;
});
}
case "STATIONS": {
return data.map((station) => {
// combination of station type
if (station?.site_type) {
station.site_type = Object.keys(station.site_type);
}
return station;
});
}
}
return data;
};
URI = {
AW: <T extends IOptions>(options: T): string => {
return axios.getUri({
url: Client.api.AW,
params: { ...options, requestType: "retrieve", format: "xml" },
});
},
AVT7: (AirportCode: string): string => {
return axios.getUri({
url: Client.api.AVT7,
params: { airport4Code: AirportCode },
});
},
};
async AW<T extends IOptions>(
options: T
): Promise<
T extends IMetarOptions
? IMetar[]
: T extends ITafOptions
? ITaf[]
: T extends IAircraftReportsOptions
? IAIREP[]
: T extends IAirSigmetOptions
? IAirSigmet[]
: T extends IGAirmetOptions
? IGAirMet[]
: T extends IStationOptions
? IStation[]
: never
> {
const res = await axios.get(Client.api.AW, {
params: { ...options, requestType: "retrieve", format: "xml" },
});
if (this.options?.debug) {
console.log("API Response\n\n", res.data, "\n\n\n");
}
// parse xml
const parsedData = parser.parse(res.data);
const finalData =
parsedData?.response?.data?.[this.selectField(options.datasource)];
if (this.options?.debug) {
console.log("Parsed Data\n\n", finalData, "\n\n\n");
}
// final output
if (!finalData) {
return [] as never;
}
const output = finalData instanceof Array ? finalData : [finalData];
return this.FormatOutput(options.datasource, output) as never;
}
AVT7 = async (AirportCode: string): Promise<IAVT7Metar> => {
const res = await axios.get<IAVT7Metar>(Client.api.AVT7, {
params: {
airport4Code: AirportCode,
},
});
return res.data;
};
}
|
import * as Either from 'fp-ts/lib/Either';
import * as Option from 'fp-ts/lib/Option';
import { pipe } from 'fp-ts/lib/pipeable';
import * as TaskEither from 'fp-ts/lib/TaskEither';
import { defaults } from 'lodash';
import { IPrism, IPrismComponents, IPrismConfig, IPrismDiagnostic, IPrismProxyConfig } from './types';
import { validateSecurity } from './utils/security';
import { sequenceT } from 'fp-ts/lib/Apply';
import { getSemigroup } from 'fp-ts/lib/NonEmptyArray';
const sequenceValidation = sequenceT(Either.getValidation(getSemigroup<IPrismDiagnostic>()));
function isProxyConfig(p: IPrismConfig): p is IPrismProxyConfig {
return !p.mock;
}
export function factory<Resource, Input, Output, Config extends IPrismConfig>(
defaultConfig: Config,
components: IPrismComponents<Resource, Input, Output, Config>
): IPrism<Resource, Input, Output, Config> {
type ResourceAndValidation = {
resource: Resource;
inputValidations: IPrismDiagnostic[];
};
const inputValidation = (
resource: Resource,
input: Input,
config: Config
): TaskEither.TaskEither<Error, ResourceAndValidation> =>
pipe(
sequenceValidation(
config.validateRequest ? components.validateInput({ resource, element: input }) : Either.right(input),
config.checkSecurity ? validateSecurity(input, resource) : Either.right(input)
),
Either.fold(inputValidations => inputValidations as IPrismDiagnostic[], () => []),
inputValidations => TaskEither.right({ resource, inputValidations })
);
const mockOrForward = (
resource: Resource,
input: Input,
config: Config,
inputValidations: IPrismDiagnostic[]
): TaskEither.TaskEither<Error, ResourceAndValidation & { output: Output }> => {
const produceOutput = isProxyConfig(config)
? components.forward(input, config.upstream.href)
: TaskEither.fromEither(
components.mock({
resource,
input: {
validations: inputValidations,
data: input,
},
config: config.mock,
})(components.logger.child({ name: 'NEGOTIATOR' }))
);
return pipe(
produceOutput,
TaskEither.map(output => ({ output, resource, inputValidations }))
);
};
return {
request: (input: Input, resources: Resource[], c?: Config) => {
// build the config for this request
const config = defaults<unknown, Config>(c, defaultConfig);
return pipe(
TaskEither.fromEither(components.route({ resources, input })),
TaskEither.chain(resource => inputValidation(resource, input, config)),
TaskEither.chain(({ resource, inputValidations }) => mockOrForward(resource, input, config, inputValidations)),
TaskEither.map(({ output, resource, inputValidations }) => {
const outputValidations = config.validateResponse
? pipe(
Option.fromEither(Either.swap(components.validateOutput({ resource, element: output }))),
Option.getOrElse<IPrismDiagnostic[]>(() => [])
)
: [];
return {
input,
output,
validations: {
input: inputValidations,
output: outputValidations,
},
};
})
)().then(v =>
pipe(
v,
Either.fold(
e => {
throw e;
},
o => o
)
)
);
},
};
}
|
# -*- coding:utf-8 -*-
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
class ValidationError(ValueError):
pass
|
package com.fintechplatform.api.profile.models
data class UserDocuments(val userId: String?,
val documentId: String?,
val docType: DocType?,
val bucketObjectIdPages: List<String>?,
val imagesBase64: List<String>? = null)
|
import ServiceSettings from "../settings";
const BaseURL = ServiceSettings.apiURL;
export {BaseURL};
|
--
-- this test shows the dependency system in action;
--
autocommit off;
create table t(i int);
create table s(i int);
prepare ins as 'insert into t (i) values (1956)';
prepare ins_s as 'insert into s (i) values (1956)';
prepare sel as 'select i from t';
prepare sel2 as 'select i from (select i from t) a';
prepare sel_s as 'select i from s where i = (select i from t)';
prepare upd as 'update t set i = 666 where i = 1956';
prepare del as 'delete from t where i = 666';
prepare ins_sel as 'insert into t select * from s';
execute ins;
execute ins_s;
execute sel;
execute sel2;
execute sel_s;
execute upd;
execute sel;
execute del;
execute sel;
execute ins_sel;
execute sel;
drop table t;
-- these should fail, can't find table
execute ins;
execute sel;
execute sel2;
execute upd;
execute del;
execute sel_s;
execute ins_sel;
create table t(i int);
-- these should recompile and work, table now found
execute ins;
-- expect one row only
execute sel;
execute sel2;
execute sel_s;
execute upd;
-- test update
execute sel;
execute del;
-- test delete
execute sel;
execute ins_sel;
execute sel;
rollback;
-- these should fail, the table will disappear at the rollback
execute ins;
execute sel;
execute sel2;
execute sel_s;
execute upd;
execute del;
-- recreate t again
create table t(i int);
-- these should recompile and work, table now found
execute ins;
-- open a cursor on t
get cursor c1 as 'select * from t';
-- dropping t should fail, due to open cursor
drop table t;
-- insert should still succeed, since table not dropped
execute ins;
-- close cursor
close c1;
-- drop table should succeed
drop table t;
-- verify that invalidate worked this time
execute ins;
execute sel;
execute sel2;
execute upd;
execute del;
execute ins_sel;
-- cleanup, roll everything back to the beginning
rollback;
-- verify that cascading invalidations work
create table t1(c1 int);
insert into t1 values 1, 2;
get cursor c1 as 'select c1 from t1 for update of c1';
-- positioned update dependent on cursor c1
prepare u1 as 'update t1 set c1 = c1 + 1 where current of c1';
next c1;
close c1;
execute u1;
-- cleanup, roll everything back to the beginning
rollback;
-- verify that create index invalidates based on table and
-- drop index invalidates based on the index
create table t1(c1 int, c2 int);
insert into t1 values (1,1), (2, 1), (3,3);
create index i1 on t1(c1);
get cursor c1 as 'select c1 from t1 where c2 = 1 for update of c1';
next c1;
prepare u1 as 'update t1 set c1 = c1 + 1 ';
prepare i1 as 'insert into t1 values (4, 4)';
prepare d1 as 'delete from t1 where c2 = 3';
drop index i1;
-- u1 should be recompiled succesfully
execute u1;
select * from t1;
-- recreate index i1, this time on c2
create index i1 on t1(c2);
next c1;
close c1;
-- i1 and d1 should have been invalidated and recompiled
execute i1;
-- check the state of the index
select * from t1 where c2 > 0;
execute d1;
-- check the state of the index
select * from t1 where c2 > 0;
-- cleanup, roll everything back to the beginning
rollback;
|
-- phpMyAdmin SQL Dump
-- version 5.0.2
-- https://www.phpmyadmin.net/
--
-- 主机: localhost
-- 生成日期: 2020-12-22 22:33:28
-- 服务器版本: 8.0.19
-- PHP 版本: 7.4.4
SET SQL_MODE = "NO_AUTO_VALUE_ON_ZERO";
START TRANSACTION;
SET time_zone = "+00:00";
/*!40101 SET @OLD_CHARACTER_SET_CLIENT=@@CHARACTER_SET_CLIENT */;
/*!40101 SET @OLD_CHARACTER_SET_RESULTS=@@CHARACTER_SET_RESULTS */;
/*!40101 SET @OLD_COLLATION_CONNECTION=@@COLLATION_CONNECTION */;
/*!40101 SET NAMES utf8mb4 */;
--
-- 数据库: `course_schedule`
--
-- --------------------------------------------------------
--
-- 表的结构 `courses`
--
CREATE TABLE `courses` (
`id` tinyint NOT NULL COMMENT '课程id',
`name` char(6) NOT NULL COMMENT '课程名称',
`teacher` char(10) CHARACTER SET utf8 COLLATE utf8_general_ci NOT NULL COMMENT '授课老师',
`sub_course` tinyint(1) NOT NULL COMMENT '是否为副课'
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
--
-- 转存表中的数据 `courses`
--
INSERT INTO `courses` (`id`, `name`, `teacher`, `sub_course`) VALUES
(1, '语文', '语文老师', 1),
(2, '数学', '数学老师', 1),
(3, '外语', '外语老师', 1),
(4, '物理', '物理老师', 0),
(5, '化学', '化学老师', 0),
(6, '生物', '生物老师', 0),
(7, '政治', '政治老师', 0),
(8, '历史', '历史老师', 0),
(9, '地理', '地理老师', 0),
(10, '音乐', '音乐老师', 0),
(11, '美术', '美术老师', 0),
(12, '体育', '体育老师', 0),
(13, '班会', '班主任', 1),
(14, '自习', '无', 0);
-- --------------------------------------------------------
--
-- 表的结构 `timetable`
--
CREATE TABLE `timetable` (
`cid` tinyint NOT NULL COMMENT '对应课程id',
`number` tinyint NOT NULL COMMENT '第几节课',
`week` tinyint(1) NOT NULL COMMENT '课程星期',
`round` tinyint(1) NOT NULL COMMENT '第几轮周的课'
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
--
-- 转存表中的数据 `timetable`
--
INSERT INTO `timetable` (`cid`, `number`, `week`, `round`) VALUES
(1, 3, 1, 0),
(1, 5, 1, 0),
(1, 1, 2, 0),
(1, 2, 3, 0),
(1, 4, 4, 0),
(1, 5, 4, 0),
(2, 2, 1, 0),
(2, 3, 2, 0),
(2, 1, 3, 0),
(2, 2, 4, 0),
(2, 6, 4, 0),
(2, 2, 5, 0),
(3, 1, 1, 0),
(3, 2, 2, 0),
(3, 4, 3, 0),
(3, 3, 4, 0),
(3, 7, 4, 0),
(3, 7, 5, 0),
(4, 7, 1, 0),
(4, 5, 2, 0),
(4, 1, 5, 0),
(5, 4, 1, 0),
(5, 3, 3, 0),
(5, 3, 5, 0),
(6, 7, 2, 0),
(6, 8, 3, 0),
(6, 9, 3, 0),
(6, 5, 5, 0),
(7, 6, 1, 0),
(7, 5, 3, 0),
(7, 6, 5, 0),
(8, 1, 4, 0),
(9, 6, 2, 0),
(9, 6, 3, 0),
(9, 7, 3, 0),
(9, 4, 5, 0),
(10, 8, 5, 0),
(11, 8, 2, 0),
(12, 8, 1, 0),
(12, 8, 4, 0),
(13, 9, 1, 0),
(14, 4, 2, 0),
(14, 9, 2, 0),
(14, 9, 4, 0),
(3, 10, 1, 0),
(3, 11, 1, 0),
(5, 10, 2, 0),
(1, 11, 2, 0),
(2, 10, 3, 0),
(4, 11, 3, 0),
(9, 10, 4, 0),
(6, 11, 4, 0),
(1, 1, 7, 0),
(1, 2, 7, 0),
(2, 4, 7, 0),
(2, 5, 7, 0),
(3, 6, 7, 1),
(7, 7, 7, 1),
(3, 8, 7, 1),
(2, 9, 7, 1),
(3, 6, 7, 2),
(7, 7, 7, 2),
(3, 8, 7, 2),
(1, 9, 7, 2),
(2, 6, 7, 3),
(7, 7, 7, 3),
(3, 8, 7, 3),
(1, 9, 7, 3),
(10, 11, 7, 0),
(9, 10, 7, 0);
--
-- 转储表的索引
--
--
-- 表的索引 `courses`
--
ALTER TABLE `courses`
ADD PRIMARY KEY (`id`);
--
-- 在导出的表使用AUTO_INCREMENT
--
--
-- 使用表AUTO_INCREMENT `courses`
--
ALTER TABLE `courses`
MODIFY `id` tinyint NOT NULL AUTO_INCREMENT COMMENT '课程id', AUTO_INCREMENT=16;
COMMIT;
/*!40101 SET CHARACTER_SET_CLIENT=@OLD_CHARACTER_SET_CLIENT */;
/*!40101 SET CHARACTER_SET_RESULTS=@OLD_CHARACTER_SET_RESULTS */;
/*!40101 SET COLLATION_CONNECTION=@OLD_COLLATION_CONNECTION */;
|
#!/usr/bin/env python
'''
tags
----
Extract all tags from the file.
'''
import json
import os
from collections import Counter
# CONSTANTS
# ---------
HOME = os.path.dirname(os.path.realpath(__file__))
JSON = os.path.join(HOME, "json")
TAGS = os.path.join(HOME, "tags")
# FUNCTIONS
# ---------
def process(path, tags):
'''Update taglist with tags'''
with open(path) as f:
data = json.load(f)
for item in data["tags"]:
tags[item] += 1
def main():
'''Walk over all posts and process the taglists.'''
# iterate over posts
tags = Counter()
for year in os.listdir(JSON):
for month in os.listdir(os.path.join(JSON, year)):
for name in os.listdir(os.path.join(JSON, year, month)):
path = os.path.join(JSON, year, month, name)
process(path, tags)
common = tags.most_common()
with open(os.path.join(TAGS, "tags.json"), "w") as f:
json.dump(common, f)
with open(os.path.join(TAGS, "tags.csv"), "w") as f:
f.write(u"Tag\tCounts\n")
for item in common:
f.write(u"{}\t{}\n".format(item[0], item[1]).encode("utf-8"))
if __name__ == '__main__':
main()
|
/*
Myrtille: A native HTML4/5 Remote Desktop Protocol client.
Copyright(c) 2014-2021 Cedric Coste
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
using System;
using System.Diagnostics;
using System.IO;
using System.Security.Cryptography;
using System.Text;
using System.Text.RegularExpressions;
namespace Myrtille.Helpers
{
public static class CryptoHelper
{
#region RDP
/*
adapted from https://gallery.technet.microsoft.com/scriptcenter/Password-Text-String-34711a5e
original script by Ken Sweet
the powershell script ("password51.ps1", located into the myrtille bin folder at runtime or into the Myrtille.Services project under Visual Studio)
was modified to use "LocalMachine" instead of "CurrentUser" because the powershell user account is different than IIS
it was also modified to use unicode (UTF-16LE) instead of UTF-8 in order to work with .rdp files ("password 51:b:")
to generate a password hash, you can use the script on the myrtille gateway (requires access to the machine)
you may require to update your script execution policy: https://technet.microsoft.com/en-us/library/ee176961.aspx
to run the script (from its location folder): ". .\password51.ps1" (more info about the functions usage into the script itself)
the password hash is only valid on the machine which generated it (the myrtille gateway); it won't work on another machine
the password hash is 492 chars length
there is another, older, method to generate rdp passwords: https://www.remkoweijnen.nl/blog/2007/10/18/how-rdp-passwords-are-encrypted/
for a .NET/C# implementation (Remko Weijnen's tool is written in Delphi/Pascal), see https://msdn.microsoft.com/en-us/library/aa302402.aspx
the script method described above seems to be backward compatible with it (the 492 chars hashed passwords are decrypted by the Remko Weijnen's tool and work into .rdp files)
the password hash is 1329 chars length
mstsc.exe may not save the hashed passwords by default into .rdp files; see https://superuser.com/questions/139665/windows-7-group-policy-allow-rdp-credentials-to-be-saved/140322
to create .rdp files with remoteapp: https://technet.microsoft.com/en-us/library/gg674996(v=ws.10).aspx
full remoteapp walkthrough: https://mizitechinfo.wordpress.com/2013/07/26/fast-and-easy-how-to-deploy-remoteapp-on-windows-server-2012/
*/
public static string RDP_Encrypt(string password)
{
try
{
var bytes = ProtectedData.Protect(Encoding.Unicode.GetBytes(password), null, DataProtectionScope.LocalMachine);
var hex = new StringBuilder(bytes.Length * 2);
foreach (var _byte in bytes)
{
hex.AppendFormat("{0:X2}", _byte);
}
return hex.ToString();
}
catch (Exception exc)
{
Trace.TraceError("Failed to encrypt rdp password {0} ({1})", password, exc);
throw;
}
}
public static string RDP_Decrypt(string passwordHash)
{
try
{
var bytes = new byte[passwordHash.Length / 2];
var i = 0;
foreach (var hex in Regex.Matches(passwordHash, "(..)"))
{
bytes[i++] = Convert.ToByte(hex.ToString(), 16);
}
return Encoding.Unicode.GetString(ProtectedData.Unprotect(bytes, null, DataProtectionScope.LocalMachine));
}
catch (Exception exc)
{
Trace.TraceError("Failed to decrypt rdp password hash {0} ({1})", passwordHash, exc);
throw;
}
}
#endregion
#region AES
public static string AES_Encrypt(string stringToBeEncrypted, string passwordString)
{
string encrypted;
var passwordBytes = Encoding.UTF8.GetBytes(passwordString);
var bytesToBeEncrypted = Encoding.UTF8.GetBytes(stringToBeEncrypted);
var saltBytes = new byte[] { 1, 2, 3, 4, 5, 6, 7, 8 };
using (var ms = new MemoryStream())
{
using (var AES = new RijndaelManaged())
{
AES.KeySize = 256;
AES.BlockSize = 128;
var key = new Rfc2898DeriveBytes(passwordBytes, saltBytes, 1000);
AES.Key = key.GetBytes(AES.KeySize / 8);
AES.IV = key.GetBytes(AES.BlockSize / 8);
AES.Mode = CipherMode.CBC;
using (var cs = new CryptoStream(ms, AES.CreateEncryptor(), CryptoStreamMode.Write))
{
cs.Write(bytesToBeEncrypted, 0, bytesToBeEncrypted.Length);
cs.Close();
}
encrypted = Convert.ToBase64String(ms.ToArray());
}
}
return encrypted;
}
public static string AES_Decrypt(string stringToBeDecrypted, string passwordString)
{
string decrypted;
var bytesToBeDecrypted = Convert.FromBase64String(stringToBeDecrypted);
var passwordBytes = Encoding.UTF8.GetBytes(passwordString);
// Set your salt here, change it to meet your flavor:
// The salt bytes must be at least 8 bytes.
var saltBytes = new byte[] { 1, 2, 3, 4, 5, 6, 7, 8 };
using (var ms = new MemoryStream())
{
using (var AES = new RijndaelManaged())
{
AES.KeySize = 256;
AES.BlockSize = 128;
var key = new Rfc2898DeriveBytes(passwordBytes, saltBytes, 1000);
AES.Key = key.GetBytes(AES.KeySize / 8);
AES.IV = key.GetBytes(AES.BlockSize / 8);
AES.Mode = CipherMode.CBC;
using (var cs = new CryptoStream(ms, AES.CreateDecryptor(), CryptoStreamMode.Write))
{
cs.Write(bytesToBeDecrypted, 0, bytesToBeDecrypted.Length);
cs.Close();
}
decrypted = Encoding.UTF8.GetString(ms.ToArray());
}
}
return decrypted;
}
#endregion
}
}
|
#!/bin/bash
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
docker images thoughtworks/strongauth-tests | grep thoughtworks/strongauth-tests
if [ $? == 0 ]; then
echo "deleting go image"
docker rmi -f thoughtworks/strongauth-tests
else
echo "go image doesn't exist"
fi
docker build --tag "thoughtworks/strongauth-tests" $DIR/..
|
#include "PolyphonyGroup.h"
#include <absl/algorithm/container.h>
sfz::PolyphonyGroup::PolyphonyGroup()
{
voices.reserve(config::maxVoices);
}
void sfz::PolyphonyGroup::setPolyphonyLimit(unsigned limit) noexcept
{
polyphonyLimit = limit;
}
void sfz::PolyphonyGroup::registerVoice(Voice* voice) noexcept
{
if (absl::c_find(voices, voice) == voices.end())
voices.push_back(voice);
}
void sfz::PolyphonyGroup::removeVoice(const Voice* voice) noexcept
{
swapAndPopFirst(voices, [voice](const Voice* v) { return v == voice; });
}
void sfz::PolyphonyGroup::removeAllVoices() noexcept
{
voices.clear();
}
unsigned sfz::PolyphonyGroup::numPlayingVoices() const noexcept
{
return absl::c_count_if(voices, [](const Voice* v) {
return !v->offedOrFree();
});
}
|
// Developed by Softeq Development Corporation
// http://www.softeq.com
using System;
using System.IO;
using Autofac;
using CorrelationId;
using EnsureThat;
using Softeq.NetKit.Auth.AppServices.Utility;
using Softeq.NetKit.Auth.Common.EmailTemplates;
using Softeq.NetKit.Auth.Common.Utility.Hashing;
using Softeq.NetKit.Auth.Common.Utility.TokenProvider;
using Softeq.NetKit.Auth.Domain.Models.User;
using Softeq.NetKit.Auth.Integration.Edc;
using Softeq.NetKit.Auth.Integration.Edc.Handlers;
using Softeq.NetKit.Auth.Integration.Email;
using Softeq.NetKit.Auth.Web.Middleware;
using Softeq.NetKit.Auth.Web.Utility;
using Microsoft.AspNetCore.Authorization;
using Microsoft.AspNetCore.DataProtection;
using Microsoft.Extensions.Configuration;
using Microsoft.Extensions.Options;
namespace Softeq.NetKit.Auth.Web.DI
{
public class StartupModule : Module
{
protected override void Load(ContainerBuilder builder)
{
#region IntegrationServices
builder
.Register(context =>
{
var configuration = context.Resolve<IConfiguration>();
var connectionString = configuration["AzureServiceBus:ConnectionString"];
Ensure.That(connectionString).IsNotNullOrWhiteSpace();
var topicClientName = configuration["AzureServiceBus:TopicName"];
Ensure.That(topicClientName).IsNotNullOrWhiteSpace();
var subscriptionClientName = configuration["AzureServiceBus:SubscriptionName"];
Ensure.That(subscriptionClientName).IsNotNullOrWhiteSpace();
var callAutoCancelQueue = configuration["AzureServiceBus:QueueName"];
Ensure.That(callAutoCancelQueue).IsNotNullOrWhiteSpace();
var messageTimeToLiveInMinutes = configuration["AzureServiceBus:MessageTimeToLiveInMinutes"];
Ensure.That(messageTimeToLiveInMinutes).IsNotNullOrWhiteSpace();
var eventPublisherId = configuration["AzureServiceBus:EventPublisherId"];
Ensure.That(eventPublisherId).IsNotNullOrWhiteSpace();
return new EdcConfiguration(connectionString, topicClientName, subscriptionClientName,
callAutoCancelQueue, Convert.ToInt32(messageTimeToLiveInMinutes), eventPublisherId);
})
.As<EdcConfiguration>()
.SingleInstance();
builder.Register(x =>
{
var dataProtectorTokenProvider = x.Resolve<IDataProtectionProvider>();
var options = x.Resolve<IOptions<DefaultDataProtectorTokenProviderOptions>>();
return new DefaultDataProtectorTokenProvider<User>(dataProtectorTokenProvider, options);
});
builder.Register(x =>
{
var options = x.Resolve<IOptions<DefaultPasswordHasherOptions>>();
return new DefaultPasswordHasher<User>(options);
});
builder.Register(context =>
{
var appConfig = context.Resolve<IConfiguration>();
return new AuthApiUrlConfiguration(
appConfig[ConfigurationSettings.ApplicationUrl],
appConfig[ConfigurationSettings.AuthResetPasswordPath],
appConfig[ConfigurationSettings.ConfirmEmailPath]);
})
.As<AuthApiUrlConfiguration>();
builder.Register(context =>
{
var appConfig = context.Resolve<IConfiguration>();
return new PasswordConfiguration
{
UniqueCount = Convert.ToInt32(appConfig[ConfigurationSettings.PasswordUniqueCount]),
ActivePeriodInDays = Convert.ToInt32(appConfig[ConfigurationSettings.PasswordActivePeriodInDays]),
TokenLifeTimeInMinutes = Convert.ToInt32(appConfig[ConfigurationSettings.DataProtectorProviderTokenLifespan])
};
})
.As<PasswordConfiguration>();
builder.Register(context =>
{
var appConfig = context.Resolve<IConfiguration>();
return new UserPasswordConfiguration
{
RequiredLength = Convert.ToInt32(appConfig[ConfigurationSettings.UserPasswordRequiredLength]),
MaximumLength = Convert.ToInt32(appConfig[ConfigurationSettings.UserPasswordMaximumLength]),
Regex = appConfig[ConfigurationSettings.UserPasswordRegex]
};
})
.As<UserPasswordConfiguration>();
builder.Register(context =>
{
var appConfig = context.Resolve<IConfiguration>();
return new UserEmailConfiguration
{
MaximumLength = Convert.ToInt32(appConfig[ConfigurationSettings.UserEmailMaximumLength])
};
})
.As<UserEmailConfiguration>();
builder.Register(context =>
{
var configuration = context.Resolve<IConfiguration>();
return new EmailConfiguration(
configuration[ConfigurationSettings.SendGridApiKey],
configuration[ConfigurationSettings.SendGridSenderEmail],
configuration[ConfigurationSettings.SendGridSenderEmailName]);
})
.As<EmailConfiguration>()
.SingleInstance();
builder.Register(context =>
{
var templatesFolderPath = Path.Combine(AppDomain.CurrentDomain.BaseDirectory, "Templates");
return new EmailTemplateConfiguration(templatesFolderPath);
})
.As<EmailTemplateConfiguration>()
.SingleInstance();
builder.Register(context =>
{
var configuration = context.Resolve<IConfiguration>();
return new EmailTemplatesConfiguration(
configuration[ConfigurationSettings.EmailConfirmationEmailTemplateName],
configuration[ConfigurationSettings.ResetPasswordEmailTemplateName],
configuration[ConfigurationSettings.ChangePasswordEmailTemplateName],
configuration[ConfigurationSettings.PasswordHasExpiredEmailTemplateName]);
})
.As<EmailTemplatesConfiguration>()
.SingleInstance();
#endregion
#region Handlers
builder.RegisterType<ApiKeyAuthorizationHandler>().As<IAuthorizationHandler>();
builder.RegisterType<UserStatusAuthorizationHandler>().As<IAuthorizationHandler>();
builder.RegisterType<PendingAllowedAuthorizationRequirementHandler>().As<IAuthorizationHandler>();
builder.RegisterType<CompletedEventHandler>();
#endregion
builder.RegisterType<CorrelationContextAccessor>().As<ICorrelationContextAccessor>().SingleInstance();
builder.RegisterType<CorrelationContextFactory>().As<ICorrelationContextFactory>().InstancePerDependency();
}
}
}
|
<?php
namespace Nwidart\Themes\Commands;
use Illuminate\Console\Command;
class SetupCommand extends Command
{
/**
* The console command name.
*
* @var string
*/
protected $name = 'theme:setup';
/**
* The console command description.
*
* @var string
*/
protected $description = 'Setting up themes folders for first use.';
/**
* Execute the console command.
*/
public function handle()
{
$this->generateThemesFolder();
$this->generateAssetsFolder();
}
/**
* Generate the themes folder.
*/
public function generateThemesFolder()
{
$this->generateDirectory(
$this->laravel['themes']->config('paths.themes'),
'Themes directory created successfully',
'Themes directory already exist'
);
}
/**
* Generate the assets folder.
*/
public function generateAssetsFolder()
{
$this->generateDirectory(
$this->laravel['themes']->config('paths.assets'),
'Assets directory created successfully',
'Assets directory already exist'
);
}
/**
* Generate the specified directory by given $dir.
*
* @param $dir
* @param $success
* @param $error
*/
protected function generateDirectory($dir, $success, $error)
{
if (!$this->laravel['files']->isDirectory($dir)) {
$this->laravel['files']->makeDirectory($dir, 0755, true, true);
$this->info($success);
return;
}
$this->error($error);
}
}
|
## [coolq-cross-bot](https://github.com/kizx/coolq-cross-bot)
基于[酷Q](https://cqp.cc/)和[nonebot](https://nonebot.cqp.moe/)的QQ机器人
## 功能
* handsome时光机功能
> 功能说明见[我的博客](https://www.2bboy.com/archives/124.html)
* 群管理功能
> 目前包括群成员加好友自动同意、入群欢迎和退群提示、清除酷Q图片缓存、智能复读机
|
Pod::Spec.new do |s|
s.name = 'UseDesk_SDK_Swift'
s.version = '0.3.10'
s.summary = 'A short description of UseDesk.'
s.description = <<-DESC
TODO: Add long description of the pod here.
DESC
s.homepage = 'https://github.com/usedesk/UseDeskSwift'
s.license = { :type => 'MIT', :file => 'LICENSE' }
s.author = { 'serega@budyakov.com' => 'kon.sergius@gmail.com' }
s.source = { :git => 'https://github.com/usedesk/UseDeskSwift.git', :tag => s.version.to_s }
s.ios.deployment_target = '10.0'
s.swift_version = '4.0'
s.static_framework = true
s.ios.source_files = 'UseDesk/Classes/*.{m,h,swift}'
s.resource_bundles = {
'UseDesk' => ['UseDesk/Assets/*.{png,xcassets,imageset,jpeg,jpg}', 'UseDesk/Classes/*.{storyboard,xib,bundle}']
}
s.frameworks = 'UIKit', 'MapKit' ,'AVFoundation'
s.dependency 'MBProgressHUD', '~> 1.0'
s.dependency 'NYTPhotoViewer', '1.2.0'
s.dependency 'ProgressHUD'
s.dependency 'Socket.IO-Client-Swift'
s.dependency 'Alamofire', '~> 4.0'
s.dependency 'QBImagePickerController', '~> 3.4'
s.dependency 'UIAlertController+Blocks'
s.dependency 'SDWebImage', '~> 4.0'
end
|
using System;
using System.Collections.Generic;
using System.Reflection;
using System.Threading.Tasks;
using System.Web.Cors;
using System.Web.Hosting;
using Autofac;
using Autofac.Extras.FileSystemRegistration;
using Microsoft.Owin;
using Microsoft.Owin.Cors;
using MirrorSharp;
using MirrorSharp.Advanced;
using MirrorSharp.Owin;
using Owin;
using SharpLab.Server;
using SharpLab.Server.Common;
using SharpLab.Server.Monitoring;
[assembly: OwinStartup(typeof(Startup), nameof(Startup.Configuration))]
namespace SharpLab.Server {
public class Startup {
public void Configuration(IAppBuilder app) {
var corsPolicyTask = Task.FromResult(new CorsPolicy {
AllowAnyHeader = true,
AllowAnyMethod = true,
AllowAnyOrigin = true,
PreflightMaxAge = 60 * 60 * 1000 // 1 hour, though Chrome would limit to 10 mins I believe
});
var corsOptions = new CorsOptions {
PolicyProvider = new CorsPolicyProvider {
PolicyResolver = r => corsPolicyTask
}
};
app.UseCors(corsOptions);
var container = CreateContainer();
var mirrorSharpOptions = CreateMirrorSharpOptions(container);
app.UseMirrorSharp(mirrorSharpOptions);
app.Map("/status", a => a.Use((c, next) => {
c.Response.ContentType = "text/plain";
return c.Response.WriteAsync("OK");
}));
var monitor = container.Resolve<IMonitor>();
monitor.Event("Application Startup", null);
HostingEnvironment.RegisterObject(new ShutdownMonitor(monitor));
}
public static MirrorSharpOptions CreateMirrorSharpOptions(IContainer container) {
var options = new MirrorSharpOptions {
SetOptionsFromClient = container.Resolve<ISetOptionsFromClientExtension>(),
SlowUpdate = container.Resolve<ISlowUpdateExtension>(),
IncludeExceptionDetails = true,
ExceptionLogger = container.Resolve<IExceptionLogger>()
};
var languages = container.Resolve<ILanguageAdapter[]>();
foreach (var language in languages) {
language.SlowSetup(options);
}
return options;
}
public static IContainer CreateContainer() {
var builder = new ContainerBuilder();
var assembly = Assembly.GetExecutingAssembly();
builder.RegisterAssemblyModulesInDirectoryOf(assembly);
return builder.Build();
}
private class ShutdownMonitor : IRegisteredObject {
private readonly IMonitor _monitor;
public ShutdownMonitor(IMonitor monitor) {
_monitor = monitor;
}
public void Stop(bool immediate) {
if (immediate)
return;
try {
_monitor.Event("Application Shutdown", null, new Dictionary<string, string> {
{ "Reason", HostingEnvironment.ShutdownReason.ToString() }
});
}
catch (Exception ex) {
_monitor.Exception(ex, null);
}
}
}
}
}
|
/**
* Copyright (C) 2016-2020 Xilinx, Inc
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may
* not use this file except in compliance with the License. A copy of the
* License is located at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
#include <boost/test/unit_test.hpp>
#include "../test_helpers.h"
#include "xrt/device/hal.h"
#include "xrt/device/hal2.h"
#include <vector>
#include <iostream>
#include <cstring>
#include <list>
#include <future>
#include <thread>
using namespace xrt_xocl::test;
namespace {
static void
run(xrt_xocl::device* mydev, unsigned count)
{
std::thread::id tid = std::this_thread::get_id();
std::cout << "Thread ID: " << tid << "\n";
std::cout << "Running BO tests ...\n";
std::hash<std::thread::id> hasher;
unsigned randomChar = hasher(tid) % 127;
if (randomChar < 32)
randomChar += 32;
const int bufSize = 1024;
std::vector<xrt_xocl::hal::buffer_object_handle> boArray(count);
for (unsigned i = 0; i < count; i++) {
boArray[i] = mydev->alloc(bufSize);
char *data1 = new char[bufSize];
char *data2 = new char[bufSize];
std::memset(data1, randomChar, bufSize);
std::memset(data2, 0, bufSize);
xrt_xocl::event ev1 = mydev->write(boArray[i], data1, bufSize, 0);
ev1.wait();
xrt_xocl::event ev2 = mydev->read(boArray[i], data2, bufSize, 0);
ev1.wait();
int result = std::memcmp(data1, data2, bufSize);
BOOST_CHECK_EQUAL(result, 0);
std::memset(data2, 0, bufSize);
xrt_xocl::event ev3 = mydev->sync(boArray[i], bufSize, 0, xrt_xocl::device::direction::HOST2DEVICE);
ev3.wait();
xrt_xocl::event ev4 = mydev->write(boArray[i], data2, bufSize, 0);
ev4.wait();
ev4 = mydev->sync(boArray[i], bufSize, 0, xrt_xocl::device::direction::DEVICE2HOST);
ev4.wait();
void *data3 = mydev->map(boArray[i]);
result = std::memcmp(data1, data3, bufSize);
BOOST_CHECK_EQUAL(result, 0);
randomChar++;
if (randomChar >= 127)
randomChar /= 2;
std::memset(data3, randomChar, bufSize);
std::memset(data1, randomChar, bufSize);
xrt_xocl::event ev5 = mydev->sync(boArray[i], bufSize, 0, xrt_xocl::device::direction::HOST2DEVICE);
ev5.wait();
std::memset(data3, 0, bufSize);
xrt_xocl::event ev6 = mydev->sync(boArray[i], bufSize, 0, xrt_xocl::device::direction::DEVICE2HOST);
ev6.wait();
xrt_xocl::event ev7 = mydev->read(boArray[i], data2, bufSize, 0);
ev7.wait();
result = std::memcmp(data2, data3, bufSize);
BOOST_CHECK_EQUAL(result, 0);
result = std::memcmp(data1, data3, bufSize);
BOOST_CHECK_EQUAL(result, 0);
delete [] data1;
delete [] data2;
}
for (xrt_xocl::hal::buffer_object_handle bo : boArray) {
mydev->unmap(bo);
mydev->free(bo);
}
}
static void
runThreads(xrt_xocl::device* mydev, unsigned count)
{
std::cout << "Launching concurrent BO tests ...\n";
auto future0 = std::async(std::launch::async, run, mydev, count);
auto future1 = std::async(std::launch::async, run, mydev, count);
auto future2 = std::async(std::launch::async, run, mydev, count);
auto future3 = std::async(std::launch::async, run, mydev, count);
future0.get();
future1.get();
future2.get();
future3.get();
}
}
BOOST_AUTO_TEST_SUITE(test_bo_stress)
BOOST_AUTO_TEST_CASE(bo1)
{
auto pred = [](const xrt_xocl::hal::device& hal) {
return (hal.getDriverLibraryName().find("xcldrv")!=std::string::npos);
};
auto devices = xrt_xocl::test::loadDevices(std::move(pred));
for (auto& device : devices) {
device.open();
device.setup(); // this creates the worker threads
device.printDeviceInfo(std::cout) << "\n";
std::string libraryName = device.getDriverLibraryName();
std::cout << libraryName << "\n";
run(&device, 10000);
device.close();
}
}
BOOST_AUTO_TEST_CASE(bo2)
{
auto pred = [](const xrt_xocl::hal::device& hal) {
return (hal.getDriverLibraryName().find("xcldrv")!=std::string::npos);
};
auto devices = xrt_xocl::test::loadDevices(std::move(pred));
for (auto& device : devices) {
device.open();
device.setup(); // this creates the worker threads
device.printDeviceInfo(std::cout) << "\n";
std::string libraryName = device.getDriverLibraryName();
std::cout << libraryName << "\n";
runThreads(&device, 1000);
device.close();
}
}
BOOST_AUTO_TEST_SUITE_END()
|
---
layout: post
title: "使用Lua管理不同的渠道"
tags: [Lua, cocos]
---
最近做的项目需要去接不同渠道. 由于各种因素, 导致逻辑代码里分布了各种 if .. else 去进行不同渠道的需求处理, 时间一长会造成原有的代码逻辑会更加混乱.
为了防止之前的代码更加混乱, 所以把不用渠道的逻辑进行了相对统一的处理, 对于游戏逻辑代码, 只需要使用SDKManager:XXX就可自动调用相应的渠道代码, 如:
``` lua
SDKManager:getOpenId()
```
# 分析
和接SDK的妹子了聊了聊, 基本的SDK都会有以下的功能
* 获取openid
* 支付
* 向渠道发送玩家信息
* 调用Back的实体键
* etc
# 实现思路
生成一个基本的SDKManager, 游戏启动时可以从Android的层面去读取出相应的渠道号, 再通过setmetatable把SDKManager转成相应的Manager
生成流程如下:

## 关键代码
* 渠道号对应的Manager的代码, 如果没有Manager需要一个默认的渠道,所以就要使用setmetatable在获取相应的key的时候进行判断
``` lua
-- 根据相应的渠道号实例相应的Manager
local ChannelDic = {
["DEV"] = DevManager,
["TEST"] = TestManager,
["UC"] = UCManager
}
ChannelDic = setmetatable(ChannelDic, {
__index = function(t, key)
local f = rawget(ChannelDic, key) -- 为了防止递归读取要使用rawget
return f or DevManager
end})
```
* 生成相应的渠道Manager的代码, 注意: SDKManager里需要做个mock的方法, 防止在游戏逻辑调用时发生错误
``` lua
SDKManager = {
__channel = "DEV"
}
function SDKManager:InitByChannel(channel)
SDKManager = setmetatable(ChannelDic[channel], {__index = self}) -- 更新元表
self.__channel = channel
end
```
* 对于需要分不同渠道实现的游戏逻辑做了一个特殊的方法去进行处理,如果是当前的渠道调用funcSucess, 否则funcFail.
``` lua
-- 处理对应渠道的特殊需求
function SDKManager:handleSpecialFunc(channel, funcSucess, funcFail)
if (channel ~= self.__channel) then
if (type(funcFail) ~= "function") then return nil end
return funcFail()
end
if (type(funcSucess) ~= "function") then return nil end
return funcSucess()
end
```
为什么不用table去做也是为了代码的美观性
``` lua
SDKManager:handleSpecialFunc("DEV", DEVFunc);
SDKManager:handleSpecialFunc("TEST", TESTFunc);
SDKManager:handleSpecialFunc("UC", UCFunc);
```
个人觉得这样看起来比较整齐一些.
# 相关资料
[LUA程序设计](http://book.douban.com/subject/3076942/)
[相关代码](https://github.com/samael65535/toy_code/tree/master/lua/SDKManager)
|
package io.github.danielpeach.plugin
import com.google.protobuf.Empty
import io.github.danielpeach.plugin.grpc.GRPCStdioGrpcKt
import io.grpc.ManagedChannel
import kotlinx.coroutines.CoroutineScope
import kotlinx.coroutines.channels.Channel
import kotlinx.coroutines.channels.consumeEach
import kotlinx.coroutines.flow.collect
import kotlinx.coroutines.launch
import java.lang.IllegalStateException
import io.github.danielpeach.plugin.grpc.GrpcStdio.StdioData.Channel as StdioChannel
internal class Stdio(
private val scope: CoroutineScope,
private val config: ClientConfig,
channel: ManagedChannel,
) {
private val stub = GRPCStdioGrpcKt.GRPCStdioCoroutineStub(channel)
fun start() {
val stdoutChannel = Channel<String>()
val stderrChannel = Channel<String>()
scope.launch { stream(stdoutChannel, stderrChannel) }
scope.launch { handle(stdoutChannel, StdioChannel.STDOUT, config.stdioMode) }
scope.launch { handle(stderrChannel, StdioChannel.STDERR, config.stdioMode) }
}
private suspend fun stream(stdoutChannel: Channel<String>, stderrChannel: Channel<String>) {
val stream = stub.streamStdio(Empty.getDefaultInstance())
stream.collect { s ->
when (s.channel) {
StdioChannel.INVALID -> throw IllegalStateException("Invalid data")
StdioChannel.STDOUT -> stdoutChannel.send(s.data.toStringUtf8())
StdioChannel.STDERR -> stderrChannel.send(s.data.toStringUtf8())
else -> throw IllegalStateException("Unknown channel: ${s.channel}")
}
}
}
private suspend fun handle(channel: Channel<String>, type: StdioChannel, mode: StdioMode) {
when (mode) {
is Drop -> channel.consumeEach {}
is PipeToWriter -> {
val writer = (if (type == StdioChannel.STDOUT) mode.syncStdout else mode.syncStderr)
writer.use { w -> channel.consumeEach { line -> w.write(line) } }
}
is Log -> {
channel.consumeEach { line -> logPlugin(line) }
}
}
}
}
|
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// This is an attempt at an implementation following the ideal
//
// ```
// struct BTreeMap<K, V> {
// height: usize,
// root: Option<Box<Node<K, V, height>>>
// }
//
// struct Node<K, V, height: usize> {
// keys: [K; 2 * B - 1],
// vals: [V; 2 * B - 1],
// edges: if height > 0 {
// [Box<Node<K, V, height - 1>>; 2 * B]
// } else { () },
// parent: *const Node<K, V, height + 1>,
// parent_idx: u16,
// len: u16,
// }
// ```
//
// Since Rust doesn't actually have dependent types and polymorphic recursion,
// we make do with lots of unsafety.
// A major goal of this module is to avoid complexity by treating the tree as a generic (if
// weirdly shaped) container and avoiding dealing with most of the B-Tree invariants. As such,
// this module doesn't care whether the entries are sorted, which nodes can be underfull, or
// even what underfull means. However, we do rely on a few invariants:
//
// - Trees must have uniform depth/height. This means that every path down to a leaf from a
// given node has exactly the same length.
// - A node of length `n` has `n` keys, `n` values, and (in an internal node) `n + 1` edges.
// This implies that even an empty internal node has at least one edge.
use core::marker::PhantomData;
use core::mem;
use core::nonzero::NonZero;
use core::ptr::{self, Unique};
use core::slice;
use boxed::Box;
use heap;
const B: usize = 6;
pub const MIN_LEN: usize = B - 1;
pub const CAPACITY: usize = 2 * B - 1;
/// The underlying representation of leaf nodes. Note that it is often unsafe to actually store
/// these, since only the first `len` keys and values are assumed to be initialized. As such,
/// these should always be put behind pointers, and specifically behind `BoxedNode` in the owned
/// case.
///
/// See also rust-lang/rfcs#197, which would make this structure significantly more safe by
/// avoiding accidentally dropping unused and uninitialized keys and values.
struct LeafNode<K, V> {
/// The arrays storing the actual data of the node. Only the first `len` elements of each
/// array are initialized and valid.
keys: [K; CAPACITY],
vals: [V; CAPACITY],
/// We use `*const` as opposed to `*mut` so as to be covariant in `K` and `V`.
/// This either points to an actual node or is null.
parent: *const InternalNode<K, V>,
/// This node's index into the parent node's `edges` array.
/// `*node.parent.edges[node.parent_idx]` should be the same thing as `node`.
/// This is only guaranteed to be initialized when `parent` is nonnull.
parent_idx: u16,
/// The number of keys and values this node stores.
///
/// This is at the end of the node's representation and next to `parent_idx` to encourage
/// the compiler to join `len` and `parent_idx` into the same 32-bit word, reducing space
/// overhead.
len: u16,
}
impl<K, V> LeafNode<K, V> {
/// Creates a new `LeafNode`. Unsafe because all nodes should really be hidden behind
/// `BoxedNode`, preventing accidental dropping of uninitialized keys and values.
unsafe fn new() -> Self {
LeafNode {
// As a general policy, we leave fields uninitialized if they can be, as this should
// be both slightly faster and easier to track in Valgrind.
keys: mem::uninitialized(),
vals: mem::uninitialized(),
parent: ptr::null(),
parent_idx: mem::uninitialized(),
len: 0
}
}
}
/// The underlying representation of internal nodes. As with `LeafNode`s, these should be hidden
/// behind `BoxedNode`s to prevent dropping uninitialized keys and values. Any pointer to an
/// `InternalNode` can be directly casted to a pointer to the underlying `LeafNode` portion of the
/// node, allowing code to act on leaf and internal nodes generically without having to even check
/// which of the two a pointer is pointing at. This property is enabled by the use of `repr(C)`.
#[repr(C)]
struct InternalNode<K, V> {
data: LeafNode<K, V>,
/// The pointers to the children of this node. `len + 1` of these are considered
/// initialized and valid.
edges: [BoxedNode<K, V>; 2 * B],
}
impl<K, V> InternalNode<K, V> {
/// Creates a new `InternalNode`.
///
/// This is unsafe for two reasons. First, it returns an `InternalNode` by value, risking
/// dropping of uninitialized fields. Second, an invariant of internal nodes is that `len + 1`
/// edges are initialized and valid, meaning that even when the node is empty (having a
/// `len` of 0), there must be one initialized and valid edge. This function does not set up
/// such an edge.
unsafe fn new() -> Self {
InternalNode {
data: LeafNode::new(),
edges: mem::uninitialized()
}
}
}
/// An owned pointer to a node. This basically is either `Box<LeafNode<K, V>>` or
/// `Box<InternalNode<K, V>>`. However, it contains no information as to which of the two types
/// of nodes is acutally behind the box, and, partially due to this lack of information, has no
/// destructor.
struct BoxedNode<K, V> {
ptr: Unique<LeafNode<K, V>>
}
impl<K, V> BoxedNode<K, V> {
fn from_leaf(node: Box<LeafNode<K, V>>) -> Self {
unsafe {
BoxedNode { ptr: Unique::new(Box::into_raw(node)) }
}
}
fn from_internal(node: Box<InternalNode<K, V>>) -> Self {
unsafe {
BoxedNode { ptr: Unique::new(Box::into_raw(node) as *mut LeafNode<K, V>) }
}
}
unsafe fn from_ptr(ptr: NonZero<*const LeafNode<K, V>>) -> Self {
BoxedNode { ptr: Unique::new(ptr.get() as *mut LeafNode<K, V>) }
}
fn as_ptr(&self) -> NonZero<*const LeafNode<K, V>> {
unsafe {
NonZero::new(self.ptr.as_ptr())
}
}
}
/// An owned tree. Note that despite being owned, this does not have a destructor,
/// and must be cleaned up manually.
pub struct Root<K, V> {
node: BoxedNode<K, V>,
height: usize
}
unsafe impl<K: Sync, V: Sync> Sync for Root<K, V> { }
unsafe impl<K: Send, V: Send> Send for Root<K, V> { }
impl<K, V> Root<K, V> {
pub fn new_leaf() -> Self {
Root {
node: BoxedNode::from_leaf(Box::new(unsafe { LeafNode::new() })),
height: 0
}
}
pub fn as_ref(&self)
-> NodeRef<marker::Immut, K, V, marker::LeafOrInternal> {
NodeRef {
height: self.height,
node: self.node.as_ptr(),
root: self as *const _ as *mut _,
_marker: PhantomData,
}
}
pub fn as_mut(&mut self)
-> NodeRef<marker::Mut, K, V, marker::LeafOrInternal> {
NodeRef {
height: self.height,
node: self.node.as_ptr(),
root: self as *mut _,
_marker: PhantomData,
}
}
pub fn into_ref(self)
-> NodeRef<marker::Owned, K, V, marker::LeafOrInternal> {
NodeRef {
height: self.height,
node: self.node.as_ptr(),
root: ptr::null_mut(), // FIXME: Is there anything better to do here?
_marker: PhantomData,
}
}
/// Adds a new internal node with a single edge, pointing to the previous root, and make that
/// new node the root. This increases the height by 1 and is the opposite of `pop_level`.
pub fn push_level(&mut self)
-> NodeRef<marker::Mut, K, V, marker::Internal> {
let mut new_node = Box::new(unsafe { InternalNode::new() });
new_node.edges[0] = unsafe { BoxedNode::from_ptr(self.node.as_ptr()) };
self.node = BoxedNode::from_internal(new_node);
self.height += 1;
let mut ret = NodeRef {
height: self.height,
node: self.node.as_ptr(),
root: self as *mut _,
_marker: PhantomData
};
unsafe {
ret.reborrow_mut().first_edge().correct_parent_link();
}
ret
}
/// Removes the root node, using its first child as the new root. This cannot be called when
/// the tree consists only of a leaf node. As it is intended only to be called when the root
/// has only one edge, no cleanup is done on any of the other children are elements of the root.
/// This decreases the height by 1 and is the opposite of `push_level`.
pub fn pop_level(&mut self) {
debug_assert!(self.height > 0);
let top = self.node.ptr.as_ptr() as *mut u8;
self.node = unsafe {
BoxedNode::from_ptr(self.as_mut()
.cast_unchecked::<marker::Internal>()
.first_edge()
.descend()
.node)
};
self.height -= 1;
self.as_mut().as_leaf_mut().parent = ptr::null();
unsafe {
heap::deallocate(
top,
mem::size_of::<InternalNode<K, V>>(),
mem::align_of::<InternalNode<K, V>>()
);
}
}
}
// N.B. `NodeRef` is always covariant in `K` and `V`, even when the `BorrowType`
// is `Mut`. This is technically wrong, but cannot result in any unsafety due to
// internal use of `NodeRef` because we stay completely generic over `K` and `V`.
// However, whenever a public type wraps `NodeRef`, make sure that it has the
// correct variance.
/// A reference to a node.
///
/// This type has a number of paramaters that controls how it acts:
/// - `BorrowType`: This can be `Immut<'a>` or `Mut<'a>` for some `'a` or `Owned`.
/// When this is `Immut<'a>`, the `NodeRef` acts roughly like `&'a Node`,
/// when this is `Mut<'a>`, the `NodeRef` acts roughly like `&'a mut Node`,
/// and when this is `Owned`, the `NodeRef` acts roughly like `Box<Node>`.
/// - `K` and `V`: These control what types of things are stored in the nodes.
/// - `Type`: This can be `Leaf`, `Internal`, or `LeafOrInternal`. When this is
/// `Leaf`, the `NodeRef` points to a leaf node, when this is `Internal` the
/// `NodeRef` points to an internal node, and when this is `LeafOrInternal` the
/// `NodeRef` could be pointing to either type of node.
pub struct NodeRef<BorrowType, K, V, Type> {
height: usize,
node: NonZero<*const LeafNode<K, V>>,
// This is null unless the borrow type is `Mut`
root: *const Root<K, V>,
_marker: PhantomData<(BorrowType, Type)>
}
impl<'a, K: 'a, V: 'a, Type> Copy for NodeRef<marker::Immut<'a>, K, V, Type> { }
impl<'a, K: 'a, V: 'a, Type> Clone for NodeRef<marker::Immut<'a>, K, V, Type> {
fn clone(&self) -> Self {
*self
}
}
unsafe impl<BorrowType, K: Sync, V: Sync, Type> Sync
for NodeRef<BorrowType, K, V, Type> { }
unsafe impl<'a, K: Sync + 'a, V: Sync + 'a, Type> Send
for NodeRef<marker::Immut<'a>, K, V, Type> { }
unsafe impl<'a, K: Send + 'a, V: Send + 'a, Type> Send
for NodeRef<marker::Mut<'a>, K, V, Type> { }
unsafe impl<K: Send, V: Send, Type> Send
for NodeRef<marker::Owned, K, V, Type> { }
impl<BorrowType, K, V> NodeRef<BorrowType, K, V, marker::Internal> {
fn as_internal(&self) -> &InternalNode<K, V> {
unsafe {
&*(self.node.get() as *const InternalNode<K, V>)
}
}
}
impl<'a, K, V> NodeRef<marker::Mut<'a>, K, V, marker::Internal> {
fn as_internal_mut(&mut self) -> &mut InternalNode<K, V> {
unsafe {
&mut *(self.node.get() as *mut InternalNode<K, V>)
}
}
}
impl<BorrowType, K, V, Type> NodeRef<BorrowType, K, V, Type> {
/// Finds the length of the node. This is the number of keys or values. In an
/// internal node, the number of edges is `len() + 1`.
pub fn len(&self) -> usize {
self.as_leaf().len as usize
}
/// Returns the height of this node in the whole tree. Zero height denotes the
/// leaf level.
pub fn height(&self) -> usize {
self.height
}
/// Removes any static information about whether this node is a `Leaf` or an
/// `Internal` node.
pub fn forget_type(self) -> NodeRef<BorrowType, K, V, marker::LeafOrInternal> {
NodeRef {
height: self.height,
node: self.node,
root: self.root,
_marker: PhantomData
}
}
/// Temporarily takes out another, immutable reference to the same node.
fn reborrow<'a>(&'a self) -> NodeRef<marker::Immut<'a>, K, V, Type> {
NodeRef {
height: self.height,
node: self.node,
root: self.root,
_marker: PhantomData
}
}
fn as_leaf(&self) -> &LeafNode<K, V> {
unsafe {
&*self.node.get()
}
}
pub fn keys(&self) -> &[K] {
self.reborrow().into_slices().0
}
pub fn vals(&self) -> &[V] {
self.reborrow().into_slices().1
}
/// Finds the parent of the current node. Returns `Ok(handle)` if the current
/// node actually has a parent, where `handle` points to the edge of the parent
/// that points to the current node. Returns `Err(self)` if the current node has
/// no parent, giving back the original `NodeRef`.
///
/// `edge.descend().ascend().unwrap()` and `node.ascend().unwrap().descend()` should
/// both, upon success, do nothing.
pub fn ascend(self) -> Result<
Handle<
NodeRef<
BorrowType,
K, V,
marker::Internal
>,
marker::Edge
>,
Self
> {
if self.as_leaf().parent.is_null() {
Err(self)
} else {
Ok(Handle {
node: NodeRef {
height: self.height + 1,
node: unsafe {
NonZero::new(self.as_leaf().parent as *mut LeafNode<K, V>)
},
root: self.root,
_marker: PhantomData
},
idx: self.as_leaf().parent_idx as usize,
_marker: PhantomData
})
}
}
pub fn first_edge(self) -> Handle<Self, marker::Edge> {
Handle::new_edge(self, 0)
}
pub fn last_edge(self) -> Handle<Self, marker::Edge> {
let len = self.len();
Handle::new_edge(self, len)
}
/// Note that `self` must be nonempty.
pub fn first_kv(self) -> Handle<Self, marker::KV> {
debug_assert!(self.len() > 0);
Handle::new_kv(self, 0)
}
/// Note that `self` must be nonempty.
pub fn last_kv(self) -> Handle<Self, marker::KV> {
let len = self.len();
debug_assert!(len > 0);
Handle::new_kv(self, len - 1)
}
}
impl<K, V> NodeRef<marker::Owned, K, V, marker::Leaf> {
/// Similar to `ascend`, gets a reference to a node's parent node, but also
/// deallocate the current node in the process. This is unsafe because the
/// current node will still be accessible despite being deallocated.
pub unsafe fn deallocate_and_ascend(self) -> Option<
Handle<
NodeRef<
marker::Owned,
K, V,
marker::Internal
>,
marker::Edge
>
> {
let ptr = self.as_leaf() as *const LeafNode<K, V> as *const u8 as *mut u8;
let ret = self.ascend().ok();
heap::deallocate(ptr, mem::size_of::<LeafNode<K, V>>(), mem::align_of::<LeafNode<K, V>>());
ret
}
}
impl<K, V> NodeRef<marker::Owned, K, V, marker::Internal> {
/// Similar to `ascend`, gets a reference to a node's parent node, but also
/// deallocate the current node in the process. This is unsafe because the
/// current node will still be accessible despite being deallocated.
pub unsafe fn deallocate_and_ascend(self) -> Option<
Handle<
NodeRef<
marker::Owned,
K, V,
marker::Internal
>,
marker::Edge
>
> {
let ptr = self.as_internal() as *const InternalNode<K, V> as *const u8 as *mut u8;
let ret = self.ascend().ok();
heap::deallocate(
ptr,
mem::size_of::<InternalNode<K, V>>(),
mem::align_of::<InternalNode<K, V>>()
);
ret
}
}
impl<'a, K, V, Type> NodeRef<marker::Mut<'a>, K, V, Type> {
/// Unsafely asserts to the compiler some static information about whether this
/// node is a `Leaf`.
unsafe fn cast_unchecked<NewType>(&mut self)
-> NodeRef<marker::Mut, K, V, NewType> {
NodeRef {
height: self.height,
node: self.node,
root: self.root,
_marker: PhantomData
}
}
/// Temporarily takes out another, mutable reference to the same node. Beware, as
/// this method is very dangerous, doubly so since it may not immediately appear
/// dangerous.
///
/// Because mutable pointers can roam anywhere around the tree and can even (through
/// `into_root_mut`) mess with the root of the tree, the result of `reborrow_mut`
/// can easily be used to make the original mutable pointer dangling, or, in the case
/// of a reborrowed handle, out of bounds.
// FIXME(@gereeter) consider adding yet another type parameter to `NodeRef` that restricts
// the use of `ascend` and `into_root_mut` on reborrowed pointers, preventing this unsafety.
unsafe fn reborrow_mut(&mut self) -> NodeRef<marker::Mut, K, V, Type> {
NodeRef {
height: self.height,
node: self.node,
root: self.root,
_marker: PhantomData
}
}
fn as_leaf_mut(&mut self) -> &mut LeafNode<K, V> {
unsafe {
&mut *(self.node.get() as *mut LeafNode<K, V>)
}
}
pub fn keys_mut(&mut self) -> &mut [K] {
unsafe { self.reborrow_mut().into_slices_mut().0 }
}
pub fn vals_mut(&mut self) -> &mut [V] {
unsafe { self.reborrow_mut().into_slices_mut().1 }
}
}
impl<'a, K: 'a, V: 'a, Type> NodeRef<marker::Immut<'a>, K, V, Type> {
pub fn into_slices(self) -> (&'a [K], &'a [V]) {
unsafe {
(
slice::from_raw_parts(
self.as_leaf().keys.as_ptr(),
self.len()
),
slice::from_raw_parts(
self.as_leaf().vals.as_ptr(),
self.len()
)
)
}
}
}
impl<'a, K: 'a, V: 'a, Type> NodeRef<marker::Mut<'a>, K, V, Type> {
/// Gets a mutable reference to the root itself. This is useful primarily when the
/// height of the tree needs to be adjusted. Never call this on a reborrowed pointer.
pub fn into_root_mut(self) -> &'a mut Root<K, V> {
unsafe {
&mut *(self.root as *mut Root<K, V>)
}
}
pub fn into_slices_mut(mut self) -> (&'a mut [K], &'a mut [V]) {
unsafe {
(
slice::from_raw_parts_mut(
&mut self.as_leaf_mut().keys as *mut [K] as *mut K,
self.len()
),
slice::from_raw_parts_mut(
&mut self.as_leaf_mut().vals as *mut [V] as *mut V,
self.len()
)
)
}
}
}
impl<'a, K, V> NodeRef<marker::Mut<'a>, K, V, marker::Leaf> {
/// Adds a key/value pair the end of the node.
pub fn push(&mut self, key: K, val: V) {
// Necessary for correctness, but this is an internal module
debug_assert!(self.len() < CAPACITY);
let idx = self.len();
unsafe {
ptr::write(self.keys_mut().get_unchecked_mut(idx), key);
ptr::write(self.vals_mut().get_unchecked_mut(idx), val);
}
self.as_leaf_mut().len += 1;
}
/// Adds a key/value pair to the beginning of the node.
pub fn push_front(&mut self, key: K, val: V) {
// Necessary for correctness, but this is an internal module
debug_assert!(self.len() < CAPACITY);
unsafe {
slice_insert(self.keys_mut(), 0, key);
slice_insert(self.vals_mut(), 0, val);
}
self.as_leaf_mut().len += 1;
}
}
impl<'a, K, V> NodeRef<marker::Mut<'a>, K, V, marker::Internal> {
/// Adds a key/value pair and an edge to go to the right of that pair to
/// the end of the node.
pub fn push(&mut self, key: K, val: V, edge: Root<K, V>) {
// Necessary for correctness, but this is an internal module
debug_assert!(edge.height == self.height - 1);
debug_assert!(self.len() < CAPACITY);
let idx = self.len();
unsafe {
ptr::write(self.keys_mut().get_unchecked_mut(idx), key);
ptr::write(self.vals_mut().get_unchecked_mut(idx), val);
ptr::write(self.as_internal_mut().edges.get_unchecked_mut(idx + 1), edge.node);
self.as_leaf_mut().len += 1;
Handle::new_edge(self.reborrow_mut(), idx + 1).correct_parent_link();
}
}
fn correct_childrens_parent_links(&mut self, first: usize, after_last: usize) {
for i in first..after_last {
Handle::new_edge(unsafe { self.reborrow_mut() }, i).correct_parent_link();
}
}
fn correct_all_childrens_parent_links(&mut self) {
let len = self.len();
self.correct_childrens_parent_links(0, len + 1);
}
/// Adds a key/value pair and an edge to go to the left of that pair to
/// the beginning of the node.
pub fn push_front(&mut self, key: K, val: V, edge: Root<K, V>) {
// Necessary for correctness, but this is an internal module
debug_assert!(edge.height == self.height - 1);
debug_assert!(self.len() < CAPACITY);
unsafe {
slice_insert(self.keys_mut(), 0, key);
slice_insert(self.vals_mut(), 0, val);
slice_insert(
slice::from_raw_parts_mut(
self.as_internal_mut().edges.as_mut_ptr(),
self.len()+1
),
0,
edge.node
);
self.as_leaf_mut().len += 1;
self.correct_all_childrens_parent_links();
}
}
}
impl<'a, K, V> NodeRef<marker::Mut<'a>, K, V, marker::LeafOrInternal> {
/// Removes a key/value pair from the end of this node. If this is an internal node,
/// also removes the edge that was to the right of that pair.
pub fn pop(&mut self) -> (K, V, Option<Root<K, V>>) {
// Necessary for correctness, but this is an internal module
debug_assert!(self.len() > 0);
let idx = self.len() - 1;
unsafe {
let key = ptr::read(self.keys().get_unchecked(idx));
let val = ptr::read(self.vals().get_unchecked(idx));
let edge = match self.reborrow_mut().force() {
ForceResult::Leaf(_) => None,
ForceResult::Internal(internal) => {
let edge = ptr::read(internal.as_internal().edges.get_unchecked(idx + 1));
let mut new_root = Root { node: edge, height: internal.height - 1 };
new_root.as_mut().as_leaf_mut().parent = ptr::null();
Some(new_root)
}
};
self.as_leaf_mut().len -= 1;
(key, val, edge)
}
}
/// Removes a key/value pair from the beginning of this node. If this is an internal node,
/// also removes the edge that was to the left of that pair.
pub fn pop_front(&mut self) -> (K, V, Option<Root<K, V>>) {
// Necessary for correctness, but this is an internal module
debug_assert!(self.len() > 0);
let old_len = self.len();
unsafe {
let key = slice_remove(self.keys_mut(), 0);
let val = slice_remove(self.vals_mut(), 0);
let edge = match self.reborrow_mut().force() {
ForceResult::Leaf(_) => None,
ForceResult::Internal(mut internal) => {
let edge = slice_remove(
slice::from_raw_parts_mut(
internal.as_internal_mut().edges.as_mut_ptr(),
old_len+1
),
0
);
let mut new_root = Root { node: edge, height: internal.height - 1 };
new_root.as_mut().as_leaf_mut().parent = ptr::null();
for i in 0..old_len {
Handle::new_edge(internal.reborrow_mut(), i).correct_parent_link();
}
Some(new_root)
}
};
self.as_leaf_mut().len -= 1;
(key, val, edge)
}
}
fn into_kv_pointers_mut(mut self) -> (*mut K, *mut V) {
(
self.keys_mut().as_mut_ptr(),
self.vals_mut().as_mut_ptr()
)
}
}
impl<BorrowType, K, V> NodeRef<BorrowType, K, V, marker::LeafOrInternal> {
/// Checks whether a node is an `Internal` node or a `Leaf` node.
pub fn force(self) -> ForceResult<
NodeRef<BorrowType, K, V, marker::Leaf>,
NodeRef<BorrowType, K, V, marker::Internal>
> {
if self.height == 0 {
ForceResult::Leaf(NodeRef {
height: self.height,
node: self.node,
root: self.root,
_marker: PhantomData
})
} else {
ForceResult::Internal(NodeRef {
height: self.height,
node: self.node,
root: self.root,
_marker: PhantomData
})
}
}
}
/// A reference to a specific key/value pair or edge within a node. The `Node` parameter
/// must be a `NodeRef`, while the `Type` can either be `KV` (signifying a handle on a key/value
/// pair) or `Edge` (signifying a handle on an edge).
///
/// Note that even `Leaf` nodes can have `Edge` handles. Instead of representing a pointer to
/// a child node, these represent the spaces where child pointers would go between the key/value
/// pairs. For example, in a node with length 2, there would be 3 possible edge locations - one
/// to the left of the node, one between the two pairs, and one at the right of the node.
pub struct Handle<Node, Type> {
node: Node,
idx: usize,
_marker: PhantomData<Type>
}
impl<Node: Copy, Type> Copy for Handle<Node, Type> { }
// We don't need the full generality of `#[derive(Clone)]`, as the only time `Node` will be
// `Clone`able is when it is an immutable reference and therefore `Copy`.
impl<Node: Copy, Type> Clone for Handle<Node, Type> {
fn clone(&self) -> Self {
*self
}
}
impl<Node, Type> Handle<Node, Type> {
/// Retrieves the node that contains the edge of key/value pair this handle pointes to.
pub fn into_node(self) -> Node {
self.node
}
}
impl<BorrowType, K, V, NodeType> Handle<NodeRef<BorrowType, K, V, NodeType>, marker::KV> {
/// Creates a new handle to a key/value pair in `node`. `idx` must be less than `node.len()`.
pub fn new_kv(node: NodeRef<BorrowType, K, V, NodeType>, idx: usize) -> Self {
// Necessary for correctness, but in a private module
debug_assert!(idx < node.len());
Handle {
node: node,
idx: idx,
_marker: PhantomData
}
}
pub fn left_edge(self) -> Handle<NodeRef<BorrowType, K, V, NodeType>, marker::Edge> {
Handle::new_edge(self.node, self.idx)
}
pub fn right_edge(self) -> Handle<NodeRef<BorrowType, K, V, NodeType>, marker::Edge> {
Handle::new_edge(self.node, self.idx + 1)
}
}
impl<BorrowType, K, V, NodeType, HandleType> PartialEq
for Handle<NodeRef<BorrowType, K, V, NodeType>, HandleType> {
fn eq(&self, other: &Self) -> bool {
self.node.node == other.node.node && self.idx == other.idx
}
}
impl<BorrowType, K, V, NodeType, HandleType>
Handle<NodeRef<BorrowType, K, V, NodeType>, HandleType> {
/// Temporarily takes out another, immutable handle on the same location.
pub fn reborrow(&self)
-> Handle<NodeRef<marker::Immut, K, V, NodeType>, HandleType> {
// We can't use Handle::new_kv or Handle::new_edge because we don't know our type
Handle {
node: self.node.reborrow(),
idx: self.idx,
_marker: PhantomData
}
}
}
impl<'a, K, V, NodeType, HandleType>
Handle<NodeRef<marker::Mut<'a>, K, V, NodeType>, HandleType> {
/// Temporarily takes out another, mutable handle on the same location. Beware, as
/// this method is very dangerous, doubly so since it may not immediately appear
/// dangerous.
///
/// Because mutable pointers can roam anywhere around the tree and can even (through
/// `into_root_mut`) mess with the root of the tree, the result of `reborrow_mut`
/// can easily be used to make the original mutable pointer dangling, or, in the case
/// of a reborrowed handle, out of bounds.
// FIXME(@gereeter) consider adding yet another type parameter to `NodeRef` that restricts
// the use of `ascend` and `into_root_mut` on reborrowed pointers, preventing this unsafety.
pub unsafe fn reborrow_mut(&mut self)
-> Handle<NodeRef<marker::Mut, K, V, NodeType>, HandleType> {
// We can't use Handle::new_kv or Handle::new_edge because we don't know our type
Handle {
node: self.node.reborrow_mut(),
idx: self.idx,
_marker: PhantomData
}
}
}
impl<BorrowType, K, V, NodeType>
Handle<NodeRef<BorrowType, K, V, NodeType>, marker::Edge> {
/// Creates a new handle to an edge in `node`. `idx` must be less than or equal to
/// `node.len()`.
pub fn new_edge(node: NodeRef<BorrowType, K, V, NodeType>, idx: usize) -> Self {
// Necessary for correctness, but in a private module
debug_assert!(idx <= node.len());
Handle {
node: node,
idx: idx,
_marker: PhantomData
}
}
pub fn left_kv(self)
-> Result<Handle<NodeRef<BorrowType, K, V, NodeType>, marker::KV>, Self> {
if self.idx > 0 {
Ok(Handle::new_kv(self.node, self.idx - 1))
} else {
Err(self)
}
}
pub fn right_kv(self)
-> Result<Handle<NodeRef<BorrowType, K, V, NodeType>, marker::KV>, Self> {
if self.idx < self.node.len() {
Ok(Handle::new_kv(self.node, self.idx))
} else {
Err(self)
}
}
}
impl<'a, K, V> Handle<NodeRef<marker::Mut<'a>, K, V, marker::Leaf>, marker::Edge> {
/// Inserts a new key/value pair between the key/value pairs to the right and left of
/// this edge. This method assumes that there is enough space in the node for the new
/// pair to fit.
///
/// The returned pointer points to the inserted value.
fn insert_fit(&mut self, key: K, val: V) -> *mut V {
// Necessary for correctness, but in a private module
debug_assert!(self.node.len() < CAPACITY);
unsafe {
slice_insert(self.node.keys_mut(), self.idx, key);
slice_insert(self.node.vals_mut(), self.idx, val);
self.node.as_leaf_mut().len += 1;
self.node.vals_mut().get_unchecked_mut(self.idx)
}
}
/// Inserts a new key/value pair between the key/value pairs to the right and left of
/// this edge. This method splits the node if there isn't enough room.
///
/// The returned pointer points to the inserted value.
pub fn insert(mut self, key: K, val: V)
-> (InsertResult<'a, K, V, marker::Leaf>, *mut V) {
if self.node.len() < CAPACITY {
let ptr = self.insert_fit(key, val);
(InsertResult::Fit(Handle::new_kv(self.node, self.idx)), ptr)
} else {
let middle = Handle::new_kv(self.node, B);
let (mut left, k, v, mut right) = middle.split();
let ptr = if self.idx <= B {
unsafe {
Handle::new_edge(left.reborrow_mut(), self.idx).insert_fit(key, val)
}
} else {
unsafe {
Handle::new_edge(
right.as_mut().cast_unchecked::<marker::Leaf>(),
self.idx - (B + 1)
).insert_fit(key, val)
}
};
(InsertResult::Split(left, k, v, right), ptr)
}
}
}
impl<'a, K, V> Handle<NodeRef<marker::Mut<'a>, K, V, marker::Internal>, marker::Edge> {
/// Fixes the parent pointer and index in the child node below this edge. This is useful
/// when the ordering of edges has been changed, such as in the various `insert` methods.
fn correct_parent_link(mut self) {
let idx = self.idx as u16;
let ptr = self.node.as_internal_mut() as *mut _;
let mut child = self.descend();
child.as_leaf_mut().parent = ptr;
child.as_leaf_mut().parent_idx = idx;
}
/// Unsafely asserts to the compiler some static information about whether the underlying
/// node of this handle is a `Leaf`.
unsafe fn cast_unchecked<NewType>(&mut self)
-> Handle<NodeRef<marker::Mut, K, V, NewType>, marker::Edge> {
Handle::new_edge(self.node.cast_unchecked(), self.idx)
}
/// Inserts a new key/value pair and an edge that will go to the right of that new pair
/// between this edge and the key/value pair to the right of this edge. This method assumes
/// that there is enough space in the node for the new pair to fit.
fn insert_fit(&mut self, key: K, val: V, edge: Root<K, V>) {
// Necessary for correctness, but in an internal module
debug_assert!(self.node.len() < CAPACITY);
debug_assert!(edge.height == self.node.height - 1);
unsafe {
// This cast is a lie, but it allows us to reuse the key/value insertion logic.
self.cast_unchecked::<marker::Leaf>().insert_fit(key, val);
slice_insert(
slice::from_raw_parts_mut(
self.node.as_internal_mut().edges.as_mut_ptr(),
self.node.len()
),
self.idx + 1,
edge.node
);
for i in (self.idx+1)..(self.node.len()+1) {
Handle::new_edge(self.node.reborrow_mut(), i).correct_parent_link();
}
}
}
/// Inserts a new key/value pair and an edge that will go to the right of that new pair
/// between this edge and the key/value pair to the right of this edge. This method splits
/// the node if there isn't enough room.
pub fn insert(mut self, key: K, val: V, edge: Root<K, V>)
-> InsertResult<'a, K, V, marker::Internal> {
// Necessary for correctness, but this is an internal module
debug_assert!(edge.height == self.node.height - 1);
if self.node.len() < CAPACITY {
self.insert_fit(key, val, edge);
InsertResult::Fit(Handle::new_kv(self.node, self.idx))
} else {
let middle = Handle::new_kv(self.node, B);
let (mut left, k, v, mut right) = middle.split();
if self.idx <= B {
unsafe {
Handle::new_edge(left.reborrow_mut(), self.idx).insert_fit(key, val, edge);
}
} else {
unsafe {
Handle::new_edge(
right.as_mut().cast_unchecked::<marker::Internal>(),
self.idx - (B + 1)
).insert_fit(key, val, edge);
}
}
InsertResult::Split(left, k, v, right)
}
}
}
impl<BorrowType, K, V>
Handle<NodeRef<BorrowType, K, V, marker::Internal>, marker::Edge> {
/// Finds the node pointed to by this edge.
///
/// `edge.descend().ascend().unwrap()` and `node.ascend().unwrap().descend()` should
/// both, upon success, do nothing.
pub fn descend(self) -> NodeRef<BorrowType, K, V, marker::LeafOrInternal> {
NodeRef {
height: self.node.height - 1,
node: unsafe { self.node.as_internal().edges.get_unchecked(self.idx).as_ptr() },
root: self.node.root,
_marker: PhantomData
}
}
}
impl<'a, K: 'a, V: 'a, NodeType>
Handle<NodeRef<marker::Immut<'a>, K, V, NodeType>, marker::KV> {
pub fn into_kv(self) -> (&'a K, &'a V) {
let (keys, vals) = self.node.into_slices();
unsafe {
(keys.get_unchecked(self.idx), vals.get_unchecked(self.idx))
}
}
}
impl<'a, K: 'a, V: 'a, NodeType>
Handle<NodeRef<marker::Mut<'a>, K, V, NodeType>, marker::KV> {
pub fn into_kv_mut(self) -> (&'a mut K, &'a mut V) {
let (mut keys, mut vals) = self.node.into_slices_mut();
unsafe {
(keys.get_unchecked_mut(self.idx), vals.get_unchecked_mut(self.idx))
}
}
}
impl<'a, K, V, NodeType> Handle<NodeRef<marker::Mut<'a>, K, V, NodeType>, marker::KV> {
pub fn kv_mut(&mut self) -> (&mut K, &mut V) {
unsafe {
let (mut keys, mut vals) = self.node.reborrow_mut().into_slices_mut();
(keys.get_unchecked_mut(self.idx), vals.get_unchecked_mut(self.idx))
}
}
}
impl<'a, K, V> Handle<NodeRef<marker::Mut<'a>, K, V, marker::Leaf>, marker::KV> {
/// Splits the underlying node into three parts:
///
/// - The node is truncated to only contain the key/value pairs to the right of
/// this handle.
/// - The key and value pointed to by this handle and extracted.
/// - All the key/value pairs to the right of this handle are put into a newly
/// allocated node.
pub fn split(mut self)
-> (NodeRef<marker::Mut<'a>, K, V, marker::Leaf>, K, V, Root<K, V>) {
unsafe {
let mut new_node = Box::new(LeafNode::new());
let k = ptr::read(self.node.keys().get_unchecked(self.idx));
let v = ptr::read(self.node.vals().get_unchecked(self.idx));
let new_len = self.node.len() - self.idx - 1;
ptr::copy_nonoverlapping(
self.node.keys().as_ptr().offset(self.idx as isize + 1),
new_node.keys.as_mut_ptr(),
new_len
);
ptr::copy_nonoverlapping(
self.node.vals().as_ptr().offset(self.idx as isize + 1),
new_node.vals.as_mut_ptr(),
new_len
);
self.node.as_leaf_mut().len = self.idx as u16;
new_node.len = new_len as u16;
(
self.node,
k, v,
Root {
node: BoxedNode::from_leaf(new_node),
height: 0
}
)
}
}
/// Removes the key/value pair pointed to by this handle, returning the edge between the
/// now adjacent key/value pairs to the left and right of this handle.
pub fn remove(mut self)
-> (Handle<NodeRef<marker::Mut<'a>, K, V, marker::Leaf>, marker::Edge>, K, V) {
unsafe {
let k = slice_remove(self.node.keys_mut(), self.idx);
let v = slice_remove(self.node.vals_mut(), self.idx);
self.node.as_leaf_mut().len -= 1;
(self.left_edge(), k, v)
}
}
}
impl<'a, K, V> Handle<NodeRef<marker::Mut<'a>, K, V, marker::Internal>, marker::KV> {
/// Splits the underlying node into three parts:
///
/// - The node is truncated to only contain the edges and key/value pairs to the
/// right of this handle.
/// - The key and value pointed to by this handle and extracted.
/// - All the edges and key/value pairs to the right of this handle are put into
/// a newly allocated node.
pub fn split(mut self)
-> (NodeRef<marker::Mut<'a>, K, V, marker::Internal>, K, V, Root<K, V>) {
unsafe {
let mut new_node = Box::new(InternalNode::new());
let k = ptr::read(self.node.keys().get_unchecked(self.idx));
let v = ptr::read(self.node.vals().get_unchecked(self.idx));
let height = self.node.height;
let new_len = self.node.len() - self.idx - 1;
ptr::copy_nonoverlapping(
self.node.keys().as_ptr().offset(self.idx as isize + 1),
new_node.data.keys.as_mut_ptr(),
new_len
);
ptr::copy_nonoverlapping(
self.node.vals().as_ptr().offset(self.idx as isize + 1),
new_node.data.vals.as_mut_ptr(),
new_len
);
ptr::copy_nonoverlapping(
self.node.as_internal().edges.as_ptr().offset(self.idx as isize + 1),
new_node.edges.as_mut_ptr(),
new_len + 1
);
self.node.as_leaf_mut().len = self.idx as u16;
new_node.data.len = new_len as u16;
let mut new_root = Root {
node: BoxedNode::from_internal(new_node),
height: height
};
for i in 0..(new_len+1) {
Handle::new_edge(new_root.as_mut().cast_unchecked(), i).correct_parent_link();
}
(
self.node,
k, v,
new_root
)
}
}
/// Returns whether it is valid to call `.merge()`, i.e., whether there is enough room in
/// a node to hold the combination of the nodes to the left and right of this handle along
/// with the key/value pair at this handle.
pub fn can_merge(&self) -> bool {
(
self.reborrow()
.left_edge()
.descend()
.len()
+ self.reborrow()
.right_edge()
.descend()
.len()
+ 1
) <= CAPACITY
}
/// Combines the node immediately to the left of this handle, the key/value pair pointed
/// to by this handle, and the node immediately to the right of this handle into one new
/// child of the underlying node, returning an edge referencing that new child.
///
/// Assumes that this edge `.can_merge()`.
pub fn merge(mut self)
-> Handle<NodeRef<marker::Mut<'a>, K, V, marker::Internal>, marker::Edge> {
let self1 = unsafe { ptr::read(&self) };
let self2 = unsafe { ptr::read(&self) };
let mut left_node = self1.left_edge().descend();
let left_len = left_node.len();
let mut right_node = self2.right_edge().descend();
let right_len = right_node.len();
// necessary for correctness, but in a private module
debug_assert!(left_len + right_len + 1 <= CAPACITY);
unsafe {
ptr::write(left_node.keys_mut().get_unchecked_mut(left_len),
slice_remove(self.node.keys_mut(), self.idx));
ptr::copy_nonoverlapping(
right_node.keys().as_ptr(),
left_node.keys_mut().as_mut_ptr().offset(left_len as isize + 1),
right_len
);
ptr::write(left_node.vals_mut().get_unchecked_mut(left_len),
slice_remove(self.node.vals_mut(), self.idx));
ptr::copy_nonoverlapping(
right_node.vals().as_ptr(),
left_node.vals_mut().as_mut_ptr().offset(left_len as isize + 1),
right_len
);
slice_remove(&mut self.node.as_internal_mut().edges, self.idx + 1);
for i in self.idx+1..self.node.len() {
Handle::new_edge(self.node.reborrow_mut(), i).correct_parent_link();
}
self.node.as_leaf_mut().len -= 1;
left_node.as_leaf_mut().len += right_len as u16 + 1;
if self.node.height > 1 {
ptr::copy_nonoverlapping(
right_node.cast_unchecked().as_internal().edges.as_ptr(),
left_node.cast_unchecked()
.as_internal_mut()
.edges
.as_mut_ptr()
.offset(left_len as isize + 1),
right_len + 1
);
for i in left_len+1..left_len+right_len+2 {
Handle::new_edge(
left_node.cast_unchecked().reborrow_mut(),
i
).correct_parent_link();
}
heap::deallocate(
right_node.node.get() as *mut u8,
mem::size_of::<InternalNode<K, V>>(),
mem::align_of::<InternalNode<K, V>>()
);
} else {
heap::deallocate(
right_node.node.get() as *mut u8,
mem::size_of::<LeafNode<K, V>>(),
mem::align_of::<LeafNode<K, V>>()
);
}
Handle::new_edge(self.node, self.idx)
}
}
/// This removes a key/value pair from the left child and replaces it with the key/value pair
/// pointed to by this handle while pushing the old key/value pair of this handle into the right
/// child.
pub fn steal_left(&mut self) {
unsafe {
let (k, v, edge) = self.reborrow_mut().left_edge().descend().pop();
let k = mem::replace(self.reborrow_mut().into_kv_mut().0, k);
let v = mem::replace(self.reborrow_mut().into_kv_mut().1, v);
match self.reborrow_mut().right_edge().descend().force() {
ForceResult::Leaf(mut leaf) => leaf.push_front(k, v),
ForceResult::Internal(mut internal) => internal.push_front(k, v, edge.unwrap())
}
}
}
/// This removes a key/value pair from the right child and replaces it with the key/value pair
/// pointed to by this handle while pushing the old key/value pair of this handle into the left
/// child.
pub fn steal_right(&mut self) {
unsafe {
let (k, v, edge) = self.reborrow_mut().right_edge().descend().pop_front();
let k = mem::replace(self.reborrow_mut().into_kv_mut().0, k);
let v = mem::replace(self.reborrow_mut().into_kv_mut().1, v);
match self.reborrow_mut().left_edge().descend().force() {
ForceResult::Leaf(mut leaf) => leaf.push(k, v),
ForceResult::Internal(mut internal) => internal.push(k, v, edge.unwrap())
}
}
}
/// This does stealing similar to `steal_left` but steals multiple elements at once.
pub fn bulk_steal_left(&mut self, count: usize) {
unsafe {
let mut left_node = ptr::read(self).left_edge().descend();
let left_len = left_node.len();
let mut right_node = ptr::read(self).right_edge().descend();
let right_len = right_node.len();
// Make sure that we may steal safely.
debug_assert!(right_len + count <= CAPACITY);
debug_assert!(left_len >= count);
let new_left_len = left_len - count;
// Move data.
{
let left_kv = left_node.reborrow_mut().into_kv_pointers_mut();
let right_kv = right_node.reborrow_mut().into_kv_pointers_mut();
let parent_kv = {
let kv = self.reborrow_mut().into_kv_mut();
(kv.0 as *mut K, kv.1 as *mut V)
};
// Make room for stolen elements in the right child.
ptr::copy(right_kv.0,
right_kv.0.offset(count as isize),
right_len);
ptr::copy(right_kv.1,
right_kv.1.offset(count as isize),
right_len);
// Move elements from the left child to the right one.
move_kv(left_kv, new_left_len + 1, right_kv, 0, count - 1);
// Move parent's key/value pair to the right child.
move_kv(parent_kv, 0, right_kv, count - 1, 1);
// Move the left-most stolen pair to the parent.
move_kv(left_kv, new_left_len, parent_kv, 0, 1);
}
left_node.reborrow_mut().as_leaf_mut().len -= count as u16;
right_node.reborrow_mut().as_leaf_mut().len += count as u16;
match (left_node.force(), right_node.force()) {
(ForceResult::Internal(left), ForceResult::Internal(mut right)) => {
// Make room for stolen edges.
let right_edges = right.reborrow_mut().as_internal_mut().edges.as_mut_ptr();
ptr::copy(right_edges,
right_edges.offset(count as isize),
right_len + 1);
right.correct_childrens_parent_links(count, count + right_len + 1);
move_edges(left, new_left_len + 1, right, 0, count);
},
(ForceResult::Leaf(_), ForceResult::Leaf(_)) => { }
_ => { unreachable!(); }
}
}
}
/// The symmetric clone of `bulk_steal_left`.
pub fn bulk_steal_right(&mut self, count: usize) {
unsafe {
let mut left_node = ptr::read(self).left_edge().descend();
let left_len = left_node.len();
let mut right_node = ptr::read(self).right_edge().descend();
let right_len = right_node.len();
// Make sure that we may steal safely.
debug_assert!(left_len + count <= CAPACITY);
debug_assert!(right_len >= count);
let new_right_len = right_len - count;
// Move data.
{
let left_kv = left_node.reborrow_mut().into_kv_pointers_mut();
let right_kv = right_node.reborrow_mut().into_kv_pointers_mut();
let parent_kv = {
let kv = self.reborrow_mut().into_kv_mut();
(kv.0 as *mut K, kv.1 as *mut V)
};
// Move parent's key/value pair to the left child.
move_kv(parent_kv, 0, left_kv, left_len, 1);
// Move elements from the right child to the left one.
move_kv(right_kv, 0, left_kv, left_len + 1, count - 1);
// Move the right-most stolen pair to the parent.
move_kv(right_kv, count - 1, parent_kv, 0, 1);
// Fix right indexing
ptr::copy(right_kv.0.offset(count as isize),
right_kv.0,
new_right_len);
ptr::copy(right_kv.1.offset(count as isize),
right_kv.1,
new_right_len);
}
left_node.reborrow_mut().as_leaf_mut().len += count as u16;
right_node.reborrow_mut().as_leaf_mut().len -= count as u16;
match (left_node.force(), right_node.force()) {
(ForceResult::Internal(left), ForceResult::Internal(mut right)) => {
move_edges(right.reborrow_mut(), 0, left, left_len + 1, count);
// Fix right indexing.
let right_edges = right.reborrow_mut().as_internal_mut().edges.as_mut_ptr();
ptr::copy(right_edges.offset(count as isize),
right_edges,
new_right_len + 1);
right.correct_childrens_parent_links(0, new_right_len + 1);
},
(ForceResult::Leaf(_), ForceResult::Leaf(_)) => { }
_ => { unreachable!(); }
}
}
}
}
unsafe fn move_kv<K, V>(
source: (*mut K, *mut V), source_offset: usize,
dest: (*mut K, *mut V), dest_offset: usize,
count: usize)
{
ptr::copy_nonoverlapping(source.0.offset(source_offset as isize),
dest.0.offset(dest_offset as isize),
count);
ptr::copy_nonoverlapping(source.1.offset(source_offset as isize),
dest.1.offset(dest_offset as isize),
count);
}
// Source and destination must have the same height.
unsafe fn move_edges<K, V>(
mut source: NodeRef<marker::Mut, K, V, marker::Internal>, source_offset: usize,
mut dest: NodeRef<marker::Mut, K, V, marker::Internal>, dest_offset: usize,
count: usize)
{
let source_ptr = source.as_internal_mut().edges.as_mut_ptr();
let dest_ptr = dest.as_internal_mut().edges.as_mut_ptr();
ptr::copy_nonoverlapping(source_ptr.offset(source_offset as isize),
dest_ptr.offset(dest_offset as isize),
count);
dest.correct_childrens_parent_links(dest_offset, dest_offset + count);
}
impl<BorrowType, K, V, HandleType>
Handle<NodeRef<BorrowType, K, V, marker::LeafOrInternal>, HandleType> {
/// Check whether the underlying node is an `Internal` node or a `Leaf` node.
pub fn force(self) -> ForceResult<
Handle<NodeRef<BorrowType, K, V, marker::Leaf>, HandleType>,
Handle<NodeRef<BorrowType, K, V, marker::Internal>, HandleType>
> {
match self.node.force() {
ForceResult::Leaf(node) => ForceResult::Leaf(Handle {
node: node,
idx: self.idx,
_marker: PhantomData
}),
ForceResult::Internal(node) => ForceResult::Internal(Handle {
node: node,
idx: self.idx,
_marker: PhantomData
})
}
}
}
impl<'a, K, V> Handle<NodeRef<marker::Mut<'a>, K, V, marker::LeafOrInternal>, marker::Edge> {
/// Move the suffix after `self` from one node to another one. `right` must be empty.
/// The first edge of `right` remains unchanged.
pub fn move_suffix(&mut self,
right: &mut NodeRef<marker::Mut<'a>, K, V, marker::LeafOrInternal>) {
unsafe {
let left_new_len = self.idx;
let mut left_node = self.reborrow_mut().into_node();
let right_new_len = left_node.len() - left_new_len;
let mut right_node = right.reborrow_mut();
debug_assert!(right_node.len() == 0);
debug_assert!(left_node.height == right_node.height);
let left_kv = left_node.reborrow_mut().into_kv_pointers_mut();
let right_kv = right_node.reborrow_mut().into_kv_pointers_mut();
move_kv(left_kv, left_new_len, right_kv, 0, right_new_len);
left_node.reborrow_mut().as_leaf_mut().len = left_new_len as u16;
right_node.reborrow_mut().as_leaf_mut().len = right_new_len as u16;
match (left_node.force(), right_node.force()) {
(ForceResult::Internal(left), ForceResult::Internal(right)) => {
move_edges(left, left_new_len + 1, right, 1, right_new_len);
},
(ForceResult::Leaf(_), ForceResult::Leaf(_)) => { }
_ => { unreachable!(); }
}
}
}
}
pub enum ForceResult<Leaf, Internal> {
Leaf(Leaf),
Internal(Internal)
}
pub enum InsertResult<'a, K, V, Type> {
Fit(Handle<NodeRef<marker::Mut<'a>, K, V, Type>, marker::KV>),
Split(NodeRef<marker::Mut<'a>, K, V, Type>, K, V, Root<K, V>)
}
pub mod marker {
use core::marker::PhantomData;
pub enum Leaf { }
pub enum Internal { }
pub enum LeafOrInternal { }
pub enum Owned { }
pub struct Immut<'a>(PhantomData<&'a ()>);
pub struct Mut<'a>(PhantomData<&'a mut ()>);
pub enum KV { }
pub enum Edge { }
}
unsafe fn slice_insert<T>(slice: &mut [T], idx: usize, val: T) {
ptr::copy(
slice.as_ptr().offset(idx as isize),
slice.as_mut_ptr().offset(idx as isize + 1),
slice.len() - idx
);
ptr::write(slice.get_unchecked_mut(idx), val);
}
unsafe fn slice_remove<T>(slice: &mut [T], idx: usize) -> T {
let ret = ptr::read(slice.get_unchecked(idx));
ptr::copy(
slice.as_ptr().offset(idx as isize + 1),
slice.as_mut_ptr().offset(idx as isize),
slice.len() - idx - 1
);
ret
}
|
#!/bin/bash
set -e
# Read the current version from the file VERSION
VERSION=$(cat ../VERSION | tr -d '[[:space:]]')
APP=presi-aoke
[ -z $QMAKE ] && QMAKE=qmake
umask 0022
# Put all of the files in the right places for the Debian package
INSTALL_ROOT=../pkg-debian make -C ../build install
APP_SIZE=$(du -k usr/bin/presi-aoke | cut -f 1)
ARCH=$(uname -p)
mkdir DEBIAN
cat > DEBIAN/control <<EOF
Package: presi-aoke
Version: $VERSION
Section: games
Priority: optional
Architecture: amd64
Depends: libqt5widgets5, libc6
Installed-Size: $APP_SIZE
Maintainer: Frank Hunleth <fhunleth@troodon-software.com>
Description: Presentation Karaoke Player
Take turns presenting randomized slide decks.
EOF
mkdir -p usr/share/doc/presi-aoke
cat > usr/share/doc/presi-aoke/copyright <<'EOF'
Format: http://dep.debian.net/deps/dep5/
Upstream-Name: Presi-aoke
Upstream-Contact: Frank Hunleth <fhunleth@troodon-software.com>
Source: https://github.com/fhunleth/presi-aoke
Files: *
Copyright: 2015 Frank Hunleth
License: Apache-2.0
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
.
http://www.apache.org/licenses/LICENSE-2.0
.
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
.
On Debian systems, the full text of the Apache License can be found in the file
`/usr/share/common-licenses/Apache-2.0'.
EOF
# Compute all of the checksums
find usr -type f | xargs md5sum > DEBIAN/md5sums
# Build the package
dpkg -b . ../presi-aoke_${VERSION}_${ARCH}.deb
|
---
title: "About"
permalink: "/about.html"
image: "/assets/images/myAvatar.svg"
---
I'm nine and I like playing Fashion Famous and Royal High for fun. I also like
playing outside and hanging out with my friends. I like plaing with American
girl dolls. My favriote food is pepperoni pizza.
Made with <i class="fa fa-heart text-danger"></i>.
|
<?php
require_once("../config/header.php");
$professorDAO = new ProfessorDAO($con);
$turmas = $professorDAO->getTurmas(getUserId());
//echo "Turma ".$_POST['select'];
foreach ($turmas as $turma) {
if($turma->getId() === $_POST['select']) {
$selected = $turma;
}
}
$target_dir = "../arquivos/".$selected->getId()."/".$_SESSION['id'];
if(!file_exists($target_dir)) {
mkdir($target_dir, 0777, true);
}
$target_file = $target_dir."/" . basename($_FILES["file"]["name"]);
$uploadOk = 1;
if (file_exists($target_file)) {
die("Arquivo ja existe");
}
if ($uploadOk == 0) {
echo "Sorry, your file was not uploaded.";
} else {
echo "<br />".$target_file;
if (move_uploaded_file($_FILES["file"]["tmp_name"], $target_file)) {
echo "</br> Arquivo ". basename( $_FILES["file"]["name"]). " has been uploaded.";
} else {
echo "Sorry, there was an error uploading your file.";
}
}
?>
|
package com.purbon.kafka.connect.smt;
import org.apache.kafka.common.config.ConfigDef;
import org.apache.kafka.common.config.ConfigDef.Importance;
import org.apache.kafka.common.config.ConfigDef.Type;
import java.util.Map;
public class DebeziumTimeTransformConfig extends AbstractConnectConfig {
public DebeziumTimeTransformConfig(ConfigDef config, Map<?, ?> originals) {
super(config, originals);
}
public DebeziumTimeTransformConfig(Map<?, ?> originals) {
this(config(), originals);
}
static ConfigDef config() {
return AbstractConnectConfig.config();
}
}
|
package main
import (
"fmt"
"github.com/dynport/gocli"
)
type regionsList struct {
}
func (r *regionsList) Run() error {
cl, e := client()
if e != nil {
return e
}
rsp, e := cl.Regions()
if e != nil {
return e
}
t := gocli.NewTable()
for _, r := range rsp.Regions {
t.Add(r.Slug, r.Name, r.Available)
}
fmt.Println(t)
return nil
}
|
/****************************************************************************
*
* Copyright (c) 2013-2017 PX4 Development Team. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* 3. Neither the name PX4 nor the names of its contributors may be
* used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
* OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
* AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
****************************************************************************/
/**
* @file spinner.hpp
* Controller prototype for single rotor drone
*
* @author Evandro Bernardes <evandro.bernardes@univ-amu.fr>
*/
#ifndef SPINNER_H
#define SPINNER_H
#include "params.h"
#include <poll.h>
#include <drivers/drv_hrt.h>
// #include <lib/ecl/geo/geo.h>
#include <matrix/math.hpp>
#include <matrix/matrix/math.hpp>
#include <px4_platform_common/px4_config.h>
#include <px4_platform_common/tasks.h>
#include <systemlib/err.h>
#include <parameters/param.h>
#include <perf/perf_counter.h>
#include <uORB/Subscription.hpp>
#include <uORB/topics/actuator_controls.h>
#include <uORB/topics/vehicle_attitude.h>
#include <uORB/topics/vehicle_angular_velocity.h>
#include <uORB/topics/manual_control_setpoint.h>
#include <uORB/topics/parameter_update.h>
#include <uORB/topics/position_setpoint_triplet.h>
#include <uORB/topics/vehicle_attitude_setpoint.h>
#include <uORB/topics/vehicle_global_position.h>
#include <uORB/topics/vehicle_rates_setpoint.h>
#include <uORB/topics/vehicle_status.h>
#include <uORB/uORB.h>
#include <uORB/topics/vehicle_odometry.h>
#include "spinner_parameters.hpp"
#define QUAT_EKF 0
#define QUAT_VISUAL 1
#define QUAT_NULL -1
#define THRUST_SPIN 0
#define THRUST_CONSTANT 1
#define GOAL_MANUAL 2
#define GOAL_SETPOINT 5
/* Prototypes */
extern "C" int parameters_init(struct param_handles *h);
extern "C" int parameters_update(const struct param_handles *h, struct params *p);
extern "C" __EXPORT int spinner_main(int argc, char *argv[]);
extern int parameter_load(char *param_name, double *param_value);
static void usage(const char *reason);
static int spinner_thread_main(int argc, char *argv[]);
static int init_system();
static int publish_actuators();
static int read_topics();
static matrix::Vector<float, 3> get_torque();
static void control_attitude();
static bool thread_should_exit = false; /**< Daemon exit flag */
static bool thread_running = false; /**< Daemon status flag */
static int deamon_task; /**< Handle of deamon task / thread */
static struct actuator_controls_s actuator_manual;
static struct actuator_controls_s actuator_control;
static struct vehicle_attitude_s att;
static struct vehicle_odometry_s visual_odom;
static struct vehicle_angular_velocity_s ang_vel;
static struct vehicle_attitude_setpoint_s att_sp;
static orb_advert_t actuator_pub;
static int att_sub_fd;
static int ang_vel_sub_fd;
static int ctrl_sub_fd;
static int att_sp_sub_fd;
static int visual_odom_sub_fd;
static px4_pollfd_struct_t fds[5];
matrix::Quaternionf Q_a(matrix::Vector3f v);
matrix::Quaternionf Q_a(matrix::Quaternionf q);
// matrix::SquareMatrix<float, 3> get_B_inv(matrix::Quaternionf q, matrix::Vector3f w);
float wrapTo180(float angle_deg);
static matrix::Vector3f tau(0.0f,0.0f,0.0f);
static matrix::Vector3f thrust(0.0f,0.0f,0.0f);
static matrix::Vector3f e(0.0f,0.0f,1.0f);
static matrix::Quatf eq(0.0, e(0), e(1), e(2));
static matrix::Quatf q(1,0,0,0);
static matrix::Quatf qd(1,0,0,0);
static matrix::Vector3f w(e*0);
static matrix::Vector3f v(e);
static matrix::Vector3f vdot(0, 0, 0);
static matrix::Vector3f p(e);
static matrix::Vector3f wd(0, 0, 0);
static matrix::Vector3f vd(e);
static matrix::Vector3f pd(e);
float Id_data[3*3] = {
1.0, 0.0, 0.0,
0.0, 1.0, 0.0,
0.0, 0.0, 1.0};
static matrix::SquareMatrix<float, 3> Id(Id_data);
float B_inv_data[3*3] = {
+0.5, 0.5, 0.0,
-0.5, 0.5, 0.0,
+0.0, 0.0, 1.0};
static matrix::SquareMatrix<float, 3>
Binv(B_inv_data);
// static matrix::SquareMatrix<float, 3> B_;
// static matrix::SquareMatrix<float, 3> J_;
short int quat_mode = QUAT_EKF;
short int thrust_mode = THRUST_SPIN;
short int goal_mode = GOAL_SETPOINT;
double rotor_velocity = 0.0f;
#endif /* SPINNER_H */
|
import React from "react"
import PropTypes from "prop-types"
import { Box, ChakraProvider, useColorMode } from "@chakra-ui/core"
// import { merge } from "@chakra-ui/utils"
import { SlideFade } from "@chakra-ui/transition"
import theme from "../theme"
import { Global } from "./global"
import SEO from "../../components/seo"
import {
Header,
Footer,
ModeToggle,
Sidebar,
PreFooter,
} from "../../components"
import useSiteMetadata from "../../hooks/use-site-metadata"
// import { isDev } from "../../utils"
import { PanelContext, PanelProvider } from "../../contexts/panel-context"
import { isClient } from "../../utils/tools/tools"
import "../../styles/main.scss"
const DefaultLayout = ({
header = true,
prefooter = true,
seo,
allowPanelUpdate = false,
children,
...rest
}) => {
const { colorMode } = useColorMode()
const site = useSiteMetadata()
const {
title,
// tagline,
brandColors,
} = site
const themeUpdate = {
...theme,
colors: {
...theme.colors,
brand: {
...theme.colors.brand,
...brandColors, // : { primary, secondary }
},
},
}
isClient && console.log("⭕️themeUpdate", themeUpdate)
return (
<>
<Global />
<SEO seo={{ siteTitle: title, ...seo }} />
<ChakraProvider resetCSS theme={themeUpdate}>
<ModeToggle />
<PanelProvider {...{ allowPanelUpdate }}>
<Sidebar context={PanelContext} />
<Box
id="content-wrapper"
color={`mode.${colorMode}.text`}
fontFamily="body"
>
{header ? (
<SlideFade
// placement="top"
initialOffset="-20px"
timeout={800}
in={true}
>
{styles => (
<Header
siteTitle={title}
siteTagline={seo?.siteTagline}
sx={styles}
/>
)}
</SlideFade>
) : null}
<Box as="main" {...rest} mt={10}>
{children}
</Box>
</Box>
</PanelProvider>
{prefooter ? <PreFooter /> : null}
<SlideFade initialOffset="20px" timeout={2000} in={true}>
{styles => (
<Footer
siteTitle={title}
siteTagline={seo?.siteTagline}
sx={styles}
/>
)}
</SlideFade>
</ChakraProvider>
</>
)
}
DefaultLayout.propTypes = {
header: PropTypes.bool,
pageTagline: PropTypes.string,
children: PropTypes.node.isRequired,
}
export default DefaultLayout
|
# Week09
## 基于 react & react-dnd & monaco 实现的基础版low-code 工具
### 实现方式
* 借助 react-dnd 实现拖拽
* 使用 monaco 显示 json
### 启动方式
* 启动应用
```code
yarn dev
```
* 鼠标拖拽的配置的工具
* 拖动左侧列表的组件至中间 放置部分
* 显示对应组件
* 实时生成对应的配置json
* 一个能渲染 JSON 配置的 form 组件
* 复制以下 json 粘贴至 monaco 编辑器中
* 显示对应组件
* 删除部分json 中的顶级 key 会让 form 回显部分变化
```json
{
"1631125534465": {
"type": "button",
"props": {}
},
"1631125536069": {
"type": "select",
"props": {
"options": [
{
"title": "jack",
"key": "select-0",
"value": "jack"
},
{
"title": "rose",
"key": "select-1",
"value": "rose"
},
{
"title": "tom",
"key": "select-2",
"value": "tom"
}
]
}
}
}
```
|
import { createServiceFactory, SpectatorService } from '@ngneat/spectator';
import { MockMaster } from '@wa/app/common/mock-master';
import { ComponentService } from './component.service';
describe('ComponentService', () => {
let spectator: SpectatorService<ComponentService>;
const createService = createServiceFactory(ComponentService);
const localizationBasePath = 'foo.bar';
beforeEach(() => {
const { configServiceProvider } = new MockMaster().mockConfig();
spectator = createService({
providers: [configServiceProvider],
});
spectator.service.init({ localizationBasePath });
});
it('should be defined', () => {
expect(spectator.service).toBeDefined();
});
describe('getLocalizationPath', () => {
it('should compose expected localization path ', () => {
const end = 'foo';
const localizationPath = spectator.service.getLocalizationPath(end);
expect(localizationPath).toEqual(`${localizationBasePath}.${end}`);
});
});
});
|
require 'faker'
product_types = ["Product", "Component", "Sub component"]
FactoryBot.define do
factory :product, class: Product do
name { Faker::Commerce.product_name }
product_type { product_types[Faker::Number.between(0, 2)] }
is_assessed {false}
factory :product_with_tags, class: Product do
transient do
tag_count { Faker::Number.between(1, 10) }
end
# the after(:create) yields two values; the user instance itself and the
# evaluator, which stores all values from the factory, including transient
# attributes; `create_list`'s second argument is the number of records
# to create and we make sure the user is associated properly to the post
after(:create) do |product, evaluator|
create_list(:tag, evaluator.tag_count, product: product)
end
end
end
end
|
import { Module } from '@nestjs/common';
import { RequerimientosService } from './requerimientos.service';
import { RequerimientosController } from './requerimientos.controller';
import { RequerimientosProvider } from './requerimientos.provider';
@Module({
providers: [RequerimientosProvider, RequerimientosService],
controllers: [RequerimientosController]
})
export class RequerimientosModule {}
|
#!/usr/bin/perl
# Test "sha1sum".
# Copyright (C) 2000-2015 Free Software Foundation, Inc.
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
use strict;
my $prog = 'sha1sum';
# Turn off localization of executable's output.
@ENV{qw(LANGUAGE LANG LC_ALL)} = ('C') x 3;
my $sha_degenerate = "da39a3ee5e6b4b0d3255bfef95601890afd80709";
my @Tests =
(
['s1', {IN=> {f=> ''}},
{OUT=>"$sha_degenerate f\n"}],
['s2', {IN=> {f=> 'a'}},
{OUT=>"86f7e437faa5a7fce15d1ddcb9eaeaea377667b8 f\n"}],
['s3', {IN=> {f=> 'abc'}},
{OUT=>"a9993e364706816aba3e25717850c26c9cd0d89d f\n"}],
['s4',
{IN=> {f=> 'abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq'}},
{OUT=>"84983e441c3bd26ebaae4aa1f95129e5e54670f1 f\n"}],
['s5', {IN=> {f=> 'abcdefghijklmnopqrstuvwxyz'}},
{OUT=>"32d10c7b8cf96570ca04ce37f2a19d84240d3a89 f\n"}],
['s6', {IN=> {f=> join ('', 'A'..'Z', 'a'..'z', '0'..'9')}},
{OUT=>"761c457bf73b14d27e9e9265c46f4b4dda11f940 f\n"}],
['s7', {IN=> {f=> '1234567890' x 8}},
{OUT=>"50abf5706a150990a08b2c5ea40fa0e585554732 f\n"}],
['million-a', {IN=> {f=> 'a' x 1000000}},
{OUT=>"34aa973cd4c4daa4f61eeb2bdbad27316534016f f\n"}],
['bs-sha-1', {IN=> {".\nfoo"=> ''}},
{OUT=>"\\$sha_degenerate .\\nfoo\n"}],
['bs-sha-2', {IN=> {".\\foo"=> ''}},
{OUT=>"\\$sha_degenerate .\\\\foo\n"}],
# The sha1sum and md5sum drivers share a lot of code.
# Ensure that sha1sum does *not* share the part that makes
# md5sum accept BSD format.
['check-bsd', '--check', {IN=> {'f.md5' => "MD5 (f) = $sha_degenerate\n"}},
{AUX=> {f=> ''}},
{ERR=>"sha1sum: f.md5: no properly formatted "
. "SHA1 checksum lines found\n"},
{EXIT=> 1}],
['check-bsd2', '--check',
{IN=> {'f.sha1' => "SHA1 (f) = $sha_degenerate\n"}},
{AUX=> {f=> ''}}, {OUT=>"f: OK\n"}],
['check-bsd3', '--check', '--status',
{IN=> {'f.sha1' => "SHA1 (f) = $sha_degenerate\n"}},
{AUX=> {f=> 'bar'}}, {EXIT=> 1}],
['check-openssl', '--check',
{IN=> {'f.md5' => "MD5(f)= $sha_degenerate\n"}},
{AUX=> {f=> ''}},
{ERR=>"sha1sum: f.md5: no properly formatted "
. "SHA1 checksum lines found\n"},
{EXIT=> 1}],
['check-openssl2', '--check',
{IN=> {'f.sha1' => "SHA1(f)= $sha_degenerate\n"}},
{AUX=> {f=> ''}}, {OUT=>"f: OK\n"}],
['check-openssl3', '--check', '--status',
{IN=> {'f.sha1' => "SHA1(f)= $sha_degenerate\n"}},
{AUX=> {f=> 'bar'}}, {EXIT=> 1}],
['bsd-segv', '--check', {IN=> {'z' => "SHA1 ("}}, {EXIT=> 1},
{ERR=> "$prog: z: no properly formatted SHA1 checksum lines found\n"}],
);
# Insert the '--text' argument for each test.
my $t;
foreach $t (@Tests)
{
splice @$t, 1, 0, '--text' unless @$t[1] =~ /--check/;
}
my $save_temps = $ENV{DEBUG};
my $verbose = $ENV{VERBOSE};
my $fail = run_tests ($prog, $prog, \@Tests, $save_temps, $verbose);
exit $fail;
|
# archer
Written in 2018, this is my first "real" program. While I've come a long way
since this, it's fun to look back! archer is built on PyGame, and is an
arcade-style game where the objective is to dodge the projectiles shot at you
by enemy Cubes while also destroying as many Cubes as you can.
### Installation
To play the game:
- Download the newest installer from the [releases page](https://github.com/brycenaddison/archer/releases/)
- Just run the installer! All dependencies are included and installed for you.
To run the code in Python:
- Make sure you have a version of Python 3.6 or newer installed
- Make sure you have pipenv installed
- Navigate to the main folder and type `pipenv install --python [YOUR_VERSION]`
- Type `pipenv run archer` to open the program
### Controls
`esc`: Quit
`tab`: Pause
`←/→ or a/d`: Move left/right
`Space`: Shoot (capped at 7.5 shots/sec)
`Shift`: Dash (1.5 second cooldown)
`Enter`: Start
### Enemies
**Red** 1 hit to kill, 1 shot per 2 seconds
**Green** 1 hit to kill, 1 shot per 2.5 seconds, gives 1 health on kill
**Yellow** 3 hits to kill, 1 shot per 1.5 seconds (turns orange at 2 hits and red at 1 hit)
**Purple** 1 hit to kill, 1 shot per second, moves from side to side
**Blinking** If not killed in time, take 4 damage
**Yellow/Gray** Dodge the red line!
|
import 'package:flutter/material.dart';
extension VisibilitySpreadExtension on Widget {
Visibility setVisible(bool visible) =>
Visibility(visible: visible, child: this);
}
|
<?php
namespace App\Http\Controllers;
use Illuminate\Http\Request;
use App\data;
use App\category;
use App\client;
use App\jurnal_masuk;
use App\jurnal_keluar;
use Alert;
use Excel;
class jurnalController extends Controller
{
// versi 2.0
public function __construct(){
$this->middleware('auth');
}
public function addclient(Request $req){
$client=$req->except('_token');
client::insert($client);
return redirect('home');
}
public function jurnal_masuk(Request $req){
$debet=$req->debet;
$kredit=$req->kredit;
$tgl=$req->tgl_transaksi;
// explode tanggal dan uang
$tr=explode('-', $tgl);
$tgl_transaksi=$tr[2].'-'.$tr[1].'-'.$tr[0];
if ($debet!==null) {
$d=str_replace('.','', $debet);
$debet=(int)$d;
}
if ($kredit!==null) {
$d=str_replace('.','', $kredit);
$kredit=(int)$d;
}
$saldo=$debet-$kredit;
$data=[
'no_cek'=>$req->no_cek,
'tgl_transaksi'=>$tgl_transaksi,
'kode_client'=>$req->kode_client,
'debet'=>$debet,
'kredit'=>$kredit,
'saldo'=>$saldo,
'perkiraan'=>$req->perkiraan,
'nama_perkiraan'=>$req->nama_perkiraan,
'id_jurnal'=>$req->id_categories,
'uraian'=>$req->uraian,
];
jurnal_masuk::create($data);
return redirect('/home');
}
public function jurnal_keluar(Request $req){
$debet=$req->debet;
$kredit=$req->kredit;
$tgl=$req->tgl_transaksi;
// explode tanggal dan uang
$tr=explode('-', $tgl);
$tgl_transaksi=$tr[2].'-'.$tr[1].'-'.$tr[0];
if ($debet!==null) {
$d=str_replace('.','', $debet);
$debet=(int)$d;
}
if ($kredit!==null) {
$d=str_replace('.','', $kredit);
$kredit=(int)$d;
}
$saldo=$debet-$kredit;
$data=[
'no_cek'=>$req->no_cek,
'tgl_transaksi'=>$tgl_transaksi,
'kode_client'=>$req->kode_client,
'debet'=>$debet,
'kredit'=>$kredit,
'saldo'=>$saldo,
'perkiraan'=>$req->perkiraan,
'nama_perkiraan'=>$req->nama_perkiraan,
'id_jurnal'=>$req->id_categories,
'uraian'=>$req->uraian,
];
jurnal_keluar::create($data);
return redirect('/home');
}
}
|
use std::f32;
struct Solution {}
impl Solution {
pub fn bulb_switch(n: i32) -> i32 {
let m = n as f32;
return m.sqrt() as i32;
}
}
fn main() {
let input: i32 = 16;
println!("Input: {}", input);
println!("Output: {}", Solution::bulb_switch(input));
}
|
package tests
import testkit.{TestFile, Specs}
import syntactic.Checker
import syntactic.CheckResult
import syntactic.Violation
import scala.meta.io.AbsolutePath
import scala.meta.Dialect
abstract class SyntacticSuite extends munit.FunSuite {
protected def getTestFile(name: String): TestFile = {
val path = AbsolutePath(s"../input/res/$name.scala")
new TestFile(name, path)
}
private def mergeViolations(violations: List[Violation]): List[Violation] = {
def merge(vs: List[Violation], acc: List[Violation]): List[Violation] =
vs match {
case va :: vb :: tail if
va.endLine > vb.endLine ||
va.endLine == vb.endLine &&
va.endColumn >= vb.endColumn =>
merge(va :: tail, acc)
case va :: vb :: tail =>
merge(vb :: tail, va :: acc)
case va :: Nil =>
va :: acc
case Nil => acc
}
violations
.groupBy(v => v.startLine)
.flatMap {
case (_, vs) =>
merge(vs.sortBy(_.startColumn), List.empty)
}
.toList
.sortBy(v => (v.startLine, v.startColumn))
}
protected def checkFile(checker: Checker, file: TestFile, dialect: Dialect): Unit = {
val expectedViolations = Specs.load(file, dialect)
checker.checkFile(dialect, file.toString) match {
case CheckResult.ParsingError(e) => throw e
case CheckResult.Valid =>
assertEquals(List.empty[String], expectedViolations)
case CheckResult.Invalid(violations) =>
assertEquals(
mergeViolations(violations).map(v => s"${v.startLine}:${v.startColumn}: ${v.msg}"),
expectedViolations
)
}
}
}
|
import React from "react";
import Button from "components/Button";
import MoreVertIcon from "@material-ui/icons/MoreVert";
import { Card, makeStyles, Theme } from "@material-ui/core";
import { LoansAvailableProps } from "screens/LoanConfirmation/LoanConfirmation";
export type CardProps = {
data: LoansAvailableProps;
};
const CardComponent = ({ data }: CardProps) => {
const classes = useStyles();
const {
lender,
monthlyPayments,
automobile,
originalAmount,
apr,
remainingMonths
} = data;
return (
<Card elevation={1} className={classes.card} data-testid="custom-card">
<div>
<div className={classes.headerWrapper}>
<p className={classes.paragraph}>{lender}</p>
<p className={classes.paragraph}>${monthlyPayments}/month</p>
</div>
<div className={classes.divider} />
<div className={classes.container}>
<div className={classes.box}>
<img
className={classes.image}
src={automobile.imageSource}
alt={`${automobile.make} ${automobile.model} ${automobile.year}`}
/>
<div className={classes.informationWrapper}>
<p className={classes.informationText}>
<b>
{automobile.year} {automobile.make.toLowerCase()}{" "}
{automobile.model.toLowerCase()}
</b>
</p>
<p className={`${classes.text} ${classes.margin}`}>
estimated <b>{originalAmount} mi</b>
</p>
</div>
</div>
<MoreVertIcon />
</div>
<div className={classes.divider} />
<div className={classes.contentWrapper}>
<div className={classes.content}>
<p className={classes.text}>
APR <span className={classes.value}>{apr}%</span>
</p>
<p className={`${classes.text} ${classes.borderTop}`}>
Time remaining
<span className={classes.value}>{remainingMonths} mo</span>
</p>
</div>
</div>
</div>
<div className={classes.buttonWrapper}>
<Button fullWidth borderRadius="8px">
Start Saving
</Button>
</div>
</Card>
);
};
export default CardComponent;
const useStyles = makeStyles<Theme>((theme) => ({
card: {
borderRadius: "8px",
minHeight: "350px",
display: "flex",
flexDirection: "column",
justifyContent: "space-between"
},
paragraph: {
textAlign: "center",
margin: 0,
fontSize: "1rem",
letterSpacing: "1px"
},
value: {
float: "right",
color: "black",
fontWeight: "bold"
},
text: {
color: theme.palette.secondary.main,
fontSize: ".9rem",
margin: ".5rem 0"
},
informationText: {
fontSize: ".9rem",
margin: 0,
textTransform: "capitalize"
},
borderTop: {
paddingTop: "10px",
borderTop: `1px solid ${theme.palette.secondary.main}30`
},
divider: {
height: "1px",
width: "100%",
backgroundColor: `${theme.palette.secondary.main}30`
},
headerWrapper: {
padding: "20px"
},
informationWrapper: {
display: "flex",
alignItems: "flex-start",
flexDirection: "column"
},
contentWrapper: {
padding: "5px 20px"
},
buttonWrapper: {
padding: "20px 15px"
},
margin: {
margin: 0
},
image: {
height: "3rem",
width: "3rem",
borderRadius: "8px",
objectFit: "cover",
marginRight: "10px"
},
container: {
display: "flex",
justifyContent: "space-between",
alignItems: "center",
padding: "20px"
},
box: {
display: "flex",
alignItems: "center"
}
}));
|
package main
import (
"flag"
"fmt"
"io"
"os"
"path/filepath"
"strings"
)
var helpFlag bool
var rootDir string
func NewFlagSet() *flag.FlagSet {
ret := flag.NewFlagSet("initialize_toybox", flag.ExitOnError)
ret.Usage = func() {
fmt.Println("initialize_toybox")
ret.PrintDefaults()
}
ret.BoolVar(&helpFlag, "help", false, "show this message")
ret.StringVar(&rootDir, "s", "/", "install target root")
return ret
}
func InstallMain(stdout io.Writer, args []string) error {
flagSet := NewFlagSet()
flagSet.Parse(args)
if helpFlag {
flagSet.Usage()
return nil
}
return install_toybox(rootDir)
}
func install_toybox(root string) error {
dirs := []string{
"/usr/bin",
"/usr/sbin",
}
if !filepath.IsAbs(root) {
pwd, err := filepath.Abs(filepath.Dir(os.Args[0]))
if err != nil {
return err
}
root = filepath.Join(pwd, root)
}
for _, dir := range dirs {
dir = filepath.Join(root, dir)
err := os.MkdirAll(dir, 0755)
if err != nil && !os.IsExist(err) {
return err
}
}
bins := make([]string, 0)
for k, _ := range Applets {
if strings.Contains(k, "--") {
continue
}
bins = append(bins, k)
}
toypath := filepath.Join(root, "usr", "sbin", "toybox")
for _, bin := range bins {
src := filepath.Join(root, "usr", "bin", bin)
if err := os.Symlink(toypath, src); err != nil {
if os.IsExist(err) {
continue
}
return err
}
}
return nil
}
|
/**
* Date: 11/05/2018
* Time: 03:54
*/
class TestSubject(
var persistenceUnit: String
) {
var clazz: Class<*> = Book::class.java
override fun toString(): String {
return persistenceUnit
}
}
|
<?php declare(strict_types = 1);
namespace Contributte\Psr7\Extra;
use Contributte\Psr7\Exception\Logical\InvalidStateException;
trait ExtraServerRequestTrait
{
use ExtraRequestTrait;
/**
* QUERY PARAM *************************************************************
*/
public function hasQueryParam(string $name): bool
{
return array_key_exists($name, $this->getQueryParams());
}
/**
* @param mixed $default
* @return mixed
*/
public function getQueryParam(string $name, $default = null)
{
if (!$this->hasQueryParam($name)) {
if (func_num_args() < 2) {
throw new InvalidStateException(sprintf('No query parameter "%s" found', $name));
}
return $default;
}
return $this->getQueryParams()[$name];
}
}
|
package es.fernandopal.yato.commands.music;
import es.fernandopal.yato.Main;
import es.fernandopal.yato.commands.CommandContext;
import es.fernandopal.yato.commands.CommandType;
import es.fernandopal.yato.commands.ICommand;
import es.fernandopal.yato.commands.PermLevel;
import es.fernandopal.yato.core.audio.AudioManagerController;
import es.fernandopal.yato.core.audio.GuildAudioManager;
import es.fernandopal.yato.util.Emoji;
import es.fernandopal.yato.util.MessageUtil;
import net.dv8tion.jda.api.entities.Guild;
import net.dv8tion.jda.api.entities.TextChannel;
import java.util.List;
public class VolumeCommand implements ICommand {
@Override
public void handle(CommandContext ctx) {
final Guild guild = ctx.getGuild();
final TextChannel tc = ctx.getChannel();
final MessageUtil msg = Main.getBotManager().getMsgu();
final GuildAudioManager manager = AudioManagerController.getGuildAudioManager(guild);
TextChannel musicChannel = Main.getTextChannelById(Main.getDb().getMusicChannel(guild.getIdLong()));
final List<String> args = ctx.getArgs();
if(musicChannel == null) { musicChannel = tc; }
final int cVol = manager.getPlayer().getVolume();
if(args.size() < 1) {
msg.sendOk(musicChannel, Emoji.VOLUME_HIGH + "Player volume: " + cVol + "%");
} else {
try {
final int nVol = Integer.parseInt(args.get(0));
if(nVol > 100 || nVol < 0) {
msg.sendError(musicChannel, Emoji.ERROR + " The number cant be higher than 100 or smaller than 0");
return;
}
manager.getPlayer().setVolume(nVol);
String vEmoji = (nVol < cVol) ? Emoji.VOLUME_LOW : Emoji.VOLUME_HIGH;
msg.sendOk(musicChannel, vEmoji + " Player volume has been set to " + nVol + "%");
} catch(NumberFormatException e) {
msg.sendError(musicChannel, Emoji.WARNING + " You must insert a number between 0 and 100");
}
}
}
@Override public String name() {
return "volume";
}
@Override public String usage() {
return "{prefix}:volume";
}
@Override public String description() {
return "Change the global volume of the player for your server";
}
@Override public PermLevel permLevel() {
return PermLevel.GUILD_DJ;
}
@Override public CommandType commandType() {
return CommandType.MUSIC;
}
@Override public List<String> getAliases() {
return List.of("v", "vol");
}
}
|
package com.knight.kotlin.library_widget
import android.content.Context
import android.util.AttributeSet
import android.widget.TextView
/**
* Author:Knight
* Time:2021/12/24 16:51
* Description:MarqueeTextView
*/
class MarqueeTextView:TextView {
@JvmOverloads
constructor(context: Context, attributeSet: AttributeSet? = null, defAttrStyle: Int = 0)
: super(context, attributeSet, defAttrStyle) {
}
override fun isFocused(): Boolean {
return true
}
}
|
package com.lany192.box.sample.data.bean
import com.google.gson.annotations.SerializedName
class ApiResult<T> {
@SerializedName(value = "code", alternate = ["errorCode"])
var code = 0
@SerializedName(value = "msg", alternate = ["errorMsg"])
var msg: String? = null
@SerializedName(value = "data", alternate = ["result"])
var data: T? = null
}
|
# Exercise
In this exercise we will implement the following computations as recusrsive functions:
- sum of the numbers between (lower_value, upper_value)
- product of the numbers between (lower_value, upper_value)
E.g.
recursive_sum(1, 10) => 45
recursive_product(1, 10) => 3628800
## Main Function
```cpp
#include <stdint.h>
#include <stdio.h>
uint32_t recursive_sum(uint32_t lower_value, uint32_t upper_value);
uint32_t recursive_product(uint32_t lower_value, uint32_t upper_value);
int main()
{
uint32_t lower_value = 1;
uint32_t upper_value = 10;
printf("recursive_sum: %u\n", recursive_sum(lower_value, upper_value));
printf("recursive_product: %u\n", recursive_product(lower_value, upper_value));
return 0;
}
```
|
// Copyright 2017 The Gitea Authors. All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package models
import (
"testing"
"code.gitea.io/gitea/modules/setting"
"github.com/stretchr/testify/assert"
)
func TestIssueList_LoadRepositories(t *testing.T) {
assert.NoError(t, PrepareTestDatabase())
issueList := IssueList{
AssertExistsAndLoadBean(t, &Issue{ID: 1}).(*Issue),
AssertExistsAndLoadBean(t, &Issue{ID: 2}).(*Issue),
AssertExistsAndLoadBean(t, &Issue{ID: 4}).(*Issue),
}
repos, err := issueList.LoadRepositories()
assert.NoError(t, err)
assert.Len(t, repos, 2)
for _, issue := range issueList {
assert.EqualValues(t, issue.RepoID, issue.Repo.ID)
}
}
func TestIssueList_LoadAttributes(t *testing.T) {
assert.NoError(t, PrepareTestDatabase())
setting.Service.EnableTimetracking = true
issueList := IssueList{
AssertExistsAndLoadBean(t, &Issue{ID: 1}).(*Issue),
AssertExistsAndLoadBean(t, &Issue{ID: 2}).(*Issue),
AssertExistsAndLoadBean(t, &Issue{ID: 4}).(*Issue),
}
assert.NoError(t, issueList.LoadAttributes())
for _, issue := range issueList {
assert.EqualValues(t, issue.RepoID, issue.Repo.ID)
for _, label := range issue.Labels {
assert.EqualValues(t, issue.RepoID, label.RepoID)
AssertExistsAndLoadBean(t, &IssueLabel{IssueID: issue.ID, LabelID: label.ID})
}
if issue.PosterID > 0 {
assert.EqualValues(t, issue.PosterID, issue.Poster.ID)
}
if issue.AssigneeID > 0 {
assert.EqualValues(t, issue.AssigneeID, issue.Assignee.ID)
}
if issue.MilestoneID > 0 {
assert.EqualValues(t, issue.MilestoneID, issue.Milestone.ID)
}
if issue.IsPull {
assert.EqualValues(t, issue.ID, issue.PullRequest.IssueID)
}
for _, attachment := range issue.Attachments {
assert.EqualValues(t, issue.ID, attachment.IssueID)
}
for _, comment := range issue.Comments {
assert.EqualValues(t, issue.ID, comment.IssueID)
}
if issue.ID == int64(1) {
assert.Equal(t, int64(400), issue.TotalTrackedTime)
} else if issue.ID == int64(2) {
assert.Equal(t, int64(3662), issue.TotalTrackedTime)
}
}
}
|
# README
*forked from [jmfederico/run-xtrabackup.sh](https://gist.github.com/jmfederico/1495347)*
Note: have tested on Ubuntu 18.04 with MariaDB 10.3
## Links
[Full Backup and Restore with Mariabackup](https://mariadb.com/kb/en/library/full-backup-and-restore-with-mariabackup/)
[Incremental Backup and Restore with Mariabackup](https://mariadb.com/kb/en/library/incremental-backup-and-restore-with-mariabackup/)
---
## Install mariabackup
sudo apt install mariadb-backup
## Create a backup user
```sql
-- See https://mariadb.com/kb/en/mariabackup-overview/#authentication-and-privileges
CREATE USER 'backup'@'localhost' IDENTIFIED BY 'YourPassword';
-- MariaDB < 10.5:
GRANT RELOAD, PROCESS, LOCK TABLES, REPLICATION CLIENT ON *.* TO 'backup'@'localhost';
-- MariaDB >= 10.5:
GRANT RELOAD, PROCESS, LOCK TABLES, BINLOG MONITOR ON *.* TO 'backup'@'localhost';
FLUSH PRIVILEGES;
```
## Usage
MYSQL_PASSWORD=YourPassword bash run-mariabackup.sh
## Crontab
#MySQL Backup
30 2 * * * MYSQL_PASSWORD=YourPassword bash /data/script/run-mariabackup.sh > /data/script/logs/run-mariabackup.sh.out 2>&1
---
## Restore Example
tree /data/mysql_backup/
/data/mysql_backup/
├── base
│ └── 2018-10-23_10-07-31
│ ├── backup.stream.gz
│ └── xtrabackup_checkpoints
└── incr
└── 2018-10-23_10-07-31
├── 2018-10-23_10-08-49
│ ├── backup.stream.gz
│ └── xtrabackup_checkpoints
└── 2018-10-23_10-13-58
├── backup.stream.gz
└── xtrabackup_checkpoints
```bash
# decompress
cd /data/mysql_backup/
for i in $(find . -name backup.stream.gz | grep '2018-10-23_10-07-31' | xargs dirname); \
do \
mkdir -p $i/backup; \
zcat $i/backup.stream.gz | mbstream -x -C $i/backup/; \
done
# prepare
mariabackup --prepare --target-dir base/2018-10-23_10-07-31/backup/ --user backup --password "YourPassword" --apply-log-only
mariabackup --prepare --target-dir base/2018-10-23_10-07-31/backup/ --user backup --password "YourPassword" --apply-log-only --incremental-dir incr/2018-10-23_10-07-31/2018-10-23_10-08-49/backup/
mariabackup --prepare --target-dir base/2018-10-23_10-07-31/backup/ --user backup --password "YourPassword" --apply-log-only --incremental-dir incr/2018-10-23_10-07-31/2018-10-23_10-13-58/backup/
# stop mairadb
service mariadb stop
# empty datadir
mv /data/mysql/ /data/mysql_bak/
# copy-back
mariabackup --copy-back --target-dir base/2018-10-23_10-07-31/backup/ --user backup --password "YourPassword" --datadir /data/mysql/
# fix privileges
chown -R mysql:mysql /data/mysql/
# start mariadb
service mariadb start
# done!
```
|
package com.braffdev.steganofy.lib.domain
data class PlainTextPayload(val plaintext: String) : Payload {
override fun getType(): Type {
return Type.PLAINTEXT
}
override fun getBytes(): ByteArray {
return plaintext.toByteArray()
}
override fun getLengthInBytes(): Int {
return plaintext.toByteArray().size
}
companion object {
fun of(bytes: ByteArray): PlainTextPayload {
return PlainTextPayload(String(bytes))
}
}
}
|
<?
$MESS ['STAT_TEMPLATE_SECTION_NAME'] = "Statistics";
$MESS ['STAT_TABLE_DEFAULT_TEMPLATE_NAME'] = "Statistics table";
$MESS ['STAT_TABLE_DEFAULT_TEMPLATE_DESCRIPTION'] = "Displays a table with basic statistical parameters of the site";
?>
|
nescalante.github.io
====================
nescalante github page
|
package AdventOfCode2019
abstract class Rule {
def apply(number: Int): Boolean = validate(number)
protected def validate(number: Int): Boolean
}
object IsSixDigit extends Rule {
override def validate(number: Int): Boolean = {
number.toString.length == 6
}
}
object AtLeastTwoAdjacentDigitsTheSame extends Rule {
override def validate(number: Int): Boolean = validate(number.toString.toList)
private def validate(number: List[Char]): Boolean = number match {
case x :: y :: tail => if (x == y) true else validate(y :: tail)
case x :: Nil => false
}
}
object DigitsNeverDecrease extends Rule {
override def validate(number: Int): Boolean = validate(number.toString.toList)
private def validate(number: List[Char]): Boolean = number match {
case x :: y :: tail => if (x > y) false else validate(y :: tail)
case x :: Nil => true
}
}
object AtLeastTwoAdjacentDigitsTheSameAndNotPartOfLargerGroup extends Rule {
override def validate(number: Int): Boolean = validate(number.toString.toList)
private def validate(number: List[Char]): Boolean = {
if (number.isEmpty) return false
val head = number.head
val numberOfSameAdjacentDigits = number.takeWhile(_ == head).length
if (numberOfSameAdjacentDigits == 2) true
else validate(number.drop(numberOfSameAdjacentDigits))
}
}
object Puzzle4 extends App {
val input = 246515 to 739105
val resultPart1 = input.count { n =>
IsSixDigit(n) &&
AtLeastTwoAdjacentDigitsTheSame(n) &&
DigitsNeverDecrease(n)
}
println(resultPart1)
val resultPart2 = input.count { n =>
IsSixDigit(n) &&
AtLeastTwoAdjacentDigitsTheSameAndNotPartOfLargerGroup(n) &&
DigitsNeverDecrease(n)
}
println(resultPart2)
}
|
namespace MAVN.Service.SmartVouchers.Client.Models.Responses.Enums
{
/// <summary>
/// Voucher campaign update error codes
/// </summary>
public enum UpdateVoucherCampaignErrorCodes
{
/// <summary>No error code</summary>
None = 0,
/// <summary>Campaign not found</summary>
VoucherCampaignNotFound,
/// <summary>Total count must be greater than bought vouchers count</summary>
TotalCountMustBeGreaterThanBoughtVouchersCount,
}
}
|
package message
import (
"io"
"github.com/pkg/errors"
)
type ConnAck struct {
*Frame
Property *ConnAckProperty
SessionPresentFlag bool
ReasonCode ReasonCode
}
type ConnAckProperty struct {
SessionExpiryInterval uint32
AssignedClientIdentifier string
ServerKeepAlive uint16
AuthenticationMethod string
AuthenticationData []byte
ResponseInformation string
ServerReference string
ReasonString string
ReceiveMaximum uint16
TopicAliasMaximum uint16
MaximumQoS uint8
RetainAvalilable bool
UserProperty map[string]string
MaximumPacketSize uint32
WildcardSubscriptionAvailable bool
SubscrptionIdentifierAvailable bool
SharedSubscriptionsAvaliable bool
}
func (c *ConnAckProperty) ToProp() *Property {
return &Property{
SessionExpiryInterval: c.SessionExpiryInterval,
AssignedClientIdentifier: c.AssignedClientIdentifier,
ServerKeepAlive: c.ServerKeepAlive,
AuthenticationMethod: c.AuthenticationMethod,
AuthenticationData: c.AuthenticationData,
ResponseInformation: c.ResponseInformation,
ServerReference: c.ServerReference,
ReasonString: c.ReasonString,
ReceiveMaximum: c.ReceiveMaximum,
TopicAliasMaximum: c.TopicAliasMaximum,
MaximumQoS: c.MaximumQoS,
RetainAvalilable: c.RetainAvalilable,
UserProperty: c.UserProperty,
MaximumPacketSize: c.MaximumPacketSize,
WildcardSubscriptionAvailable: c.WildcardSubscriptionAvailable,
SubscrptionIdentifierAvailable: c.SubscrptionIdentifierAvailable,
SharedSubscriptionsAvaliable: c.SharedSubscriptionsAvaliable,
}
}
func ParseConnAck(f *Frame, p []byte) (c *ConnAck, err error) {
c = &ConnAck{
Frame: f,
}
dec := newDecoder(p)
if i, err := dec.Int(); err != nil {
return nil, errors.Wrap(err, "failed to decode as int")
} else {
c.SessionPresentFlag = (i & 0x01) > 0
}
if rc, err := dec.Uint(); err != nil {
return nil, errors.Wrap(err, "failed to decode as uint")
} else if !IsReasonCodeAvailable(rc) {
return nil, errors.New("unexpected reason code supplied")
} else {
c.ReasonCode = ReasonCode(rc)
}
if prop, err := dec.Property(); err != nil {
if err != io.EOF {
return nil, errors.Wrap(err, "failed to decode property")
}
} else if prop != nil {
c.Property = prop.ToConnAck()
}
// no payload
return c, nil
}
func NewConnAck(code ReasonCode, opts ...option) *ConnAck {
return &ConnAck{
Frame: newFrame(CONNACK, opts...),
ReasonCode: code,
}
}
func (c *ConnAck) Validate() error {
if !IsReasonCodeAvailable(uint8(c.ReasonCode)) {
return errors.New("Invalid reason code")
}
return nil
}
func (c *ConnAck) Encode() ([]byte, error) {
if err := c.Validate(); err != nil {
return nil, errors.Wrap(err, "CONNACK validation error")
}
enc := newEncoder()
if c.SessionPresentFlag {
enc.Int(1)
} else {
enc.Int(0)
}
enc.Byte(c.ReasonCode.Byte())
if c.Property != nil {
enc.Property(c.Property.ToProp())
} else {
enc.Uint(0)
}
return c.Frame.Encode(enc.Get()), nil
}
|
%% =====================================================================
%% @doc An abstraction library providing an interface to the possible
%% options supported by hugin. The values returned from the functions
%% in this library can be returned in the hugin init/0 callback
%% function, or be used in the hugin API function set_option/1 and
%% set_options/1. Calling the functions in this library DOES NOTHING
%% MORE THAN RETURNING VALUES, so don't try to use them to directly
%% influence the behavior of the hugin server.
%%
%% If you want to directly influence the behavior of the server you can
%% use the corresponding functions in the hugin module. See
%% {@link hugin}.
%% @copyright 2015 Magnus Kronqvist
%% @author Magnus Kronqvist <magnus.kronqvist@gmail.com>
%% @version {@version}
%% @end
%% =====================================================================
-module(hugin_opts).
-export([max_freq/2, max_freq/3,
max_par/1]).
-opaque opt() :: {atom(), any()}.
-type time_unit() ::
ms | millisecond | milliseconds
| s | sec | second | seconds
| m | min | minute | minutes
| h | hour | hours
| d | day | days
| w | week | weeks.
-export_type([opt/0, time_unit/0]).
%% API
%% @doc An option to limit the amount of calls that hugin makes per
%% time unit. The default option is to have no limits. However, notice
%% that hugin still limits the amount of parallel connections to five
%% by default. See {@link max_par/1}.
%%
%% @equiv max_freq(Amount, 1, Unit)
-spec max_freq(Amount :: integer(), Unit :: time_unit()) -> opt().
max_freq(A, U) ->
max_freq(A, 1, U).
%% @doc Same as max_freq/2 but allows one more argument to specify how many
%% calls per N time units.
-spec max_freq(Amount :: integer(), N :: integer(), Unit :: time_unit())
-> opt().
max_freq(A, N, U) when is_integer(A), is_integer(N), is_atom(U) ->
Ms = milliseconds(U) * N,
{max_freq, {A, Ms}}.
%% @doc An option to limit the amount of parallel connections allowed
%% by hugin.
-spec max_par(N :: integer()) -> opt().
max_par(N) when is_integer(N) ->
{max_par, N}.
%% internal functions
milliseconds(M) ->
case M of
ms -> 1;
millisecond -> milliseconds(ms);
milliseconds -> milliseconds(ms);
s -> 1000 * milliseconds(ms);
sec -> milliseconds(s);
second -> milliseconds(s);
seconds -> milliseconds(s);
m -> 60 * milliseconds(s);
min -> milliseconds(m);
minute -> milliseconds(m);
minutes -> milliseconds(m);
h -> 60 * milliseconds(m);
hour -> milliseconds(h);
hours -> milliseconds(h);
d -> 24 * milliseconds(h);
day -> milliseconds(d);
days -> milliseconds(d);
w -> 7 * milliseconds(d);
week -> milliseconds(w);
weeks -> milliseconds(w);
_ -> erlang:error(badarg)
end.
|
require 'bundler/setup'
require 'mini_magick'
cursor_x = 0
cursor_y = 0
increment_x = 0
increment_y = 'a'
main_image = MiniMagick::Image.open(ARGV[0])
while(cursor_y < main_image.dimensions[1]) do
while(cursor_x < main_image.dimensions[0]) do
image = MiniMagick::Image.open(ARGV[0])
#puts "cropping " + "128x128+#{cursor_x}+#{cursor_y}"
image.crop "128x128+#{cursor_x}+#{cursor_y}"
filename = "#{ARGV[1]}_#{increment_y}_#{increment_x.to_s.rjust(2,'0')}"
image.write "#{filename}.jpg"
print ":#{filename}:"
cursor_x = cursor_x + 128
increment_x = increment_x + 1
end
cursor_x = 0
cursor_y = cursor_y + 128
increment_x = 0
increment_y = increment_y.next
print "\n"
end
|
// Copyright (c) 2013 Couchbase, Inc.
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
// except in compliance with the License. You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software distributed under the
// License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
// either express or implied. See the License for the specific language governing permissions
// and limitations under the License.
// Manages head sector of btree index-file. Head sector contains the following
// items,
// rootFileposition int64
// timestamp int64
// sectorsize int64
// flistsize int64
// blocksize int64
// maxkeys int64
// pick int64
// crc uint32
package btree
import (
"bytes"
"encoding/binary"
"os"
)
// Structure to manage the head sector
type Head struct {
wstore *WStore
dirty bool // tells whether `root` has side-effects
fpos_head1 int64 // file-offset into index file where 1st-head is
fpos_head2 int64 // file-offset into index file where 2nd-head is
// Following fields are persisted on disk.
root int64 // file-offset into index file that has root block
timestamp int64 // snapshot's timestamp is synced with disk aswell.
sectorsize int64 // head sector-size in bytes.
flistsize int64 // free-list size in bytes.
blocksize int64 // btree block size in bytes.
maxkeys int64 // Maximum number of keys can be store in btree block.
pick int64 // either 0 or 1, which freelist to pick. NOT USED !!
crc uint32 // CRC value for head sector + freelist block
}
// Create a new Head sector structure.
func newHead(wstore *WStore) *Head {
hd := Head{
wstore: wstore,
pick: 0,
sectorsize: wstore.Sectorsize,
flistsize: wstore.Flistsize,
blocksize: wstore.Blocksize,
dirty: false,
root: 0,
fpos_head1: 0,
fpos_head2: wstore.Sectorsize,
}
return &hd
}
// Clone `hd` to `newhd`.
func (hd *Head) clone() *Head {
newhd := newHead(hd.wstore)
newhd.pick = hd.pick
newhd.dirty = hd.dirty
newhd.root = hd.root
newhd.timestamp = hd.timestamp
return newhd
}
// Fetch head sector from index file, read root block's file position and
// check whether head1 and head2 copies are consistent.
func (hd *Head) fetch() bool {
LittleEndian := binary.LittleEndian
if hd.dirty {
panic("Cannot read index head when in-memory copy is dirty")
}
rfd, _ := os.Open(hd.wstore.Idxfile)
data1 := make([]byte, hd.sectorsize) // Read from first sector
data2 := make([]byte, hd.sectorsize) // Read from second sector
if _, err := rfd.ReadAt(data1, hd.fpos_head1); err != nil {
panic(err)
}
if _, err := rfd.ReadAt(data2, hd.fpos_head2); err != nil {
panic(err)
}
buf := bytes.NewBuffer(data1)
if err := binary.Read(buf, LittleEndian, &hd.root); err != nil {
panic("Unable to read root from first head sector")
}
if err := binary.Read(buf, LittleEndian, &hd.timestamp); err != nil {
panic("Unable to read root from first head sector")
}
if err := binary.Read(buf, LittleEndian, &hd.sectorsize); err != nil {
panic("Unable to read sectorsize from first head sector")
}
if err := binary.Read(buf, LittleEndian, &hd.flistsize); err != nil {
panic("Unable to read flistsize from first head sector")
}
if err := binary.Read(buf, LittleEndian, &hd.blocksize); err != nil {
panic("Unable to read blocksize from first head sector")
}
if err := binary.Read(buf, LittleEndian, &hd.maxkeys); err != nil {
panic("Unable to read maxkeys from first head sector")
}
if err := binary.Read(buf, LittleEndian, &hd.pick); err != nil {
panic("Unable to read pick from first head sector")
}
if err := binary.Read(buf, LittleEndian, &hd.crc); err != nil {
panic("Unable to read crc from first head sector")
}
if bytes.Equal(data1, data2) {
return false
}
return true
}
// Refer to new root block. When ever an entry / block is updated the entire
// chain has to be re-added.
func (hd *Head) setRoot(fpos int64, timestamp int64) *Head {
hd.root = fpos
hd.timestamp = timestamp
hd.dirty = true
return hd
}
// flush head-structure to index-file. Updates CRC for freelist.
func (hd *Head) flush(crc uint32) *Head {
wfd := hd.wstore.idxWfd
LittleEndian := binary.LittleEndian
hd.crc = crc
buf := bytes.NewBuffer([]byte{})
binary.Write(buf, LittleEndian, &hd.root)
binary.Write(buf, LittleEndian, &hd.timestamp)
binary.Write(buf, LittleEndian, &hd.sectorsize)
binary.Write(buf, LittleEndian, &hd.flistsize)
binary.Write(buf, LittleEndian, &hd.blocksize)
binary.Write(buf, LittleEndian, &hd.maxkeys)
binary.Write(buf, LittleEndian, &hd.pick)
binary.Write(buf, LittleEndian, &hd.crc)
valb := buf.Bytes()
wfd.WriteAt(valb, hd.fpos_head2) // Write into head sector2
wfd.WriteAt(valb, hd.fpos_head1) // Write into head sector1
hd.dirty = false
hd.wstore.flushHeads += 1
return hd
}
|
import 'package:TOrder/db/models/visitor.dart';
import '../dao.dart';
class VisitorDao implements Dao<Visitor> {
final tableName = 'visitors';
final columnVisitorCode = 'Visitor_Code';
final _columnVisitorName = 'Visitor_Name';
final _columnMobileNo = 'MobileNo';
final _columnPocketPassword = 'PocketPassword';
final _columnIsYou = 'IsYou';
@override
String get createTableQuery =>
"CREATE TABLE $tableName($columnVisitorCode INTEGER PRIMARY KEY,"
" $_columnVisitorName TEXT,"
" $_columnMobileNo TEXT,"
" $_columnPocketPassword TEXT,"
" $_columnIsYou INTEGER)";
@override
Visitor fromMap(Map<String, dynamic> query) {
Visitor visitor = Visitor(
query[columnVisitorCode],
query[_columnVisitorName],
query[_columnMobileNo],
query[_columnPocketPassword],
query[_columnIsYou]);
return visitor;
}
@override
Map<String, dynamic> toMap(Visitor object) {
return <String, dynamic>{
_columnVisitorName: object.visitorName,
_columnMobileNo: object.mobileNo,
_columnPocketPassword: object.pocketPassword,
_columnIsYou: object.isYou
};
}
@override
List<Visitor> fromList(List<Map<String, dynamic>> query) {
List<Visitor> visitors = List<Visitor>();
for (Map map in query) {
visitors.add(fromMap(map));
}
return visitors;
}
}
|
$(document).ready(function(){
$(".imgup").hide();
$("#pic").click(function () {
$("#img").click(); //隐藏了input:file样式后,点击头像就可以本地上传
$("#img").on("change",function(){
var objUrl = getObjectURL(this.files[0]) ; //获取图片的路径,该路径不是图片在本地的路径
if (objUrl) {
$("#pic").attr("src", objUrl) ; //将图片路径存入src中,显示出图片
}
});
});
function baomsgjq(){
var msg="<div style='z-index:998; position: absolute;width: 100%;marker-top:200px;top:300px;''>"+
"<div style='z-index:998; margin: auto;width: 500px;height: 200px;background-color: rgba(0,0,0,0.5);'>"+
"<p id='msg' style='color: #ffffff;text-align: center;padding: 50px;font-size: 20px;'>"+data+"</p></div>";
$("body").append(msg);
delayre();
};
});
function getObjectURL(file) {
var url = null ;
if (window.createObjectURL!=undefined) { // basic
url = window.createObjectURL(file) ;
} else if (window.URL!=undefined) { // mozilla(firefox)
url = window.URL.createObjectURL(file) ;
} else if (window.webkitURL!=undefined) { // webkit or chrome
url = window.webkitURL.createObjectURL(file) ;
}
return url ;
}
function baomsg(data){
var msg="<div style='z-index:998; position: absolute;width: 100%;marker-top:200px;top:300px;''>"+
"<div style='z-index:998; margin: auto;width: 500px;height: 200px;background-color: rgba(0,0,0,0.5);'>"+
"<p id='msg' style='color: #ffffff;text-align: center;padding: 50px;font-size: 20px;'>"+data+"</p></div>";
$("body").append(msg);
setTimeout("location.reload()",800);
}
|
#!/usr/bin/ruby
# Control structures
if true
'if statement'
elsif false
'else if, optional'
else
'else, also optional'
end
for counter in 1..5
puts "iteration #{counter}"
end
#=> iteration 1
#=> iteration 2
#=> iteration 3
#=> iteration 4
#=> iteration 5
# HOWEVER
# No-one uses for loops
# Use `each` instead, like this:
(1..5).each do |counter|
puts "iteration #{counter}"
end
#=> iteration 1
#=> iteration 2
#=> iteration 3
#=> iteration 4
#=> iteration 5
counter = 1
while counter <= 5
puts "iteration #{counter}"
counter += 1
end
#=> iteration 1
#=> iteration 2
#=> iteration 3
#=> iteration 4
#=> iteration 5
grade = 'B'
case grade
when 'A'
puts 'Way to go kiddo'
when 'B'
puts 'Better luck next time'
when 'C'
puts 'You can do better'
when 'D'
puts 'Scraping through'
when 'F'
puts 'You failed!'
else
puts 'Alternative grading system, eh?'
end
|
package com.example.movieshowstracker.base
import androidx.multidex.MultiDexApplication
import com.example.movieshowstracker.di.appComponent
import net.danlew.android.joda.JodaTimeAndroid
import org.koin.android.ext.koin.androidContext
import org.koin.core.context.startKoin
class MainApp : MultiDexApplication() {
override fun onCreate() {
super.onCreate()
initiateKoin()
JodaTimeAndroid.init(this)
}
private fun initiateKoin() {
startKoin{
androidContext(this@MainApp)
modules(provideDependency())
}
}
open fun provideDependency() = appComponent
}
|
#! /bin/bash
function say_world() {
echo "World (from lib.sh)"
}
|
#!/bin/bash
set -e
set -x
# Create an HTTP server instance template
gcloud compute instance-templates create simple-http-server-v1 \
--machine-type f1-micro \
--region us-east1 \
--tags http-server \
--metadata-from-file startup-script=./startup-script.sh
# Create two instances using the instance template, one in
# us-east1-b and one in us-east1-c
gcloud compute instances create simple-http-server-1 \
--source-instance-template simple-http-server-v1 \
--zone us-east1-b
gcloud compute instances create simple-http-server-2 \
--source-instance-template simple-http-server-v1 \
--zone us-east1-c
# Create an HTTP health check to use on the new instances
gcloud compute http-health-checks create simple-http-get
# Create a target pool and attach the instances
gcloud compute target-pools create us-east-tcp-unmanaged \
--region us-east1 \
--http-health-check simple-http-get
gcloud compute target-pools add-instances us-east-tcp-unmanaged \
--region us-east1 \
--instances simple-http-server-1 \
--instances-zone us-east1-b \
gcloud compute target-pools add-instances us-east-tcp-unmanaged \
--region us-east1 \
--instances simple-http-server-2 \
--instances-zone us-east1-c
# Create a regional static IP
gcloud compute addresses create us-east-tcp-ip \
--region us-east1
# Create a regional forwarding rule using the regional IP and target pool
gcloud compute forwarding-rules create us-east1-tcp-http \
--region us-east1 \
--target-pool us-east-tcp-unmanaged \
--target-pool-region us-east1 \
--ports 80 \
--address us-east-tcp-ip \
--address-region us-east1
# Show load balancer public IP
public_ip=$(gcloud compute forwarding-rules describe us-east1-tcp-http \
--region us-east1 --format="value(IPAddress)")
echo "Setup complete. Network LB IP is: $public_ip"
|
<?php
namespace App\Http\Controllers;
use Illuminate\Http\Request;
class AlbumController extends Controller
{
public function index()
{
return 'Hello Album!';
}
public function test()
{
// $url = route('profile');
return redirect()->route('album');
}
public function token()
{
return 'Token is valid!';
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.