hexsha stringlengths 40 40 | size int64 7 1.05M | ext stringclasses 13
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 269 | max_stars_repo_name stringlengths 5 108 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 9 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 269 | max_issues_repo_name stringlengths 5 116 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 9 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 269 | max_forks_repo_name stringlengths 5 116 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 9 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 7 1.05M | avg_line_length float64 1.21 330k | max_line_length int64 6 990k | alphanum_fraction float64 0.01 0.99 | author_id stringlengths 2 40 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
38b3d591571417f9228d4f060a5c25dc6ff02d0f | 1,327 | cc | C++ | leet_code/Letter_Combinations_of_a_Phone_Number/solve.cc | ldy121/algorithm | 7939cb4c15e2bc655219c934f00c2bb74ddb4eec | [
"Apache-2.0"
] | 1 | 2020-04-11T22:04:23.000Z | 2020-04-11T22:04:23.000Z | leet_code/Letter_Combinations_of_a_Phone_Number/solve.cc | ldy121/algorithm | 7939cb4c15e2bc655219c934f00c2bb74ddb4eec | [
"Apache-2.0"
] | null | null | null | leet_code/Letter_Combinations_of_a_Phone_Number/solve.cc | ldy121/algorithm | 7939cb4c15e2bc655219c934f00c2bb74ddb4eec | [
"Apache-2.0"
] | null | null | null | class Solution {
private:
vector<string> answer;
vector<vector<char> > phone;
void getAnswer(string &digits, int idx, string buffer) {
if (idx == digits.size()) {
if (buffer.length() > 0) {
answer.push_back(buffer);
}
return;
}
if (digits[idx] == '1') {
getAnswer(digits, idx + 1, buffer);
return;
}
for_each(phone[digits[idx] - '0'].begin(), phone[digits[idx] - '0'].end(), [this, &digits, &idx, &buffer](auto &ch){
getAnswer(digits, idx + 1, buffer + ch);
});
}
public:
Solution() {
phone.push_back(vector<char>({' '}));
phone.push_back(vector<char>());
phone.push_back(vector<char>({'a', 'b', 'c'}));
phone.push_back(vector<char>({'d', 'e', 'f'}));
phone.push_back(vector<char>({'g', 'h', 'i'}));
phone.push_back(vector<char>({'j', 'k', 'l'}));
phone.push_back(vector<char>({'m', 'n', 'o'}));
phone.push_back(vector<char>({'p', 'q', 'r', 's'}));
phone.push_back(vector<char>({'t', 'u', 'v'}));
phone.push_back(vector<char>({'w', 'x', 'y', 'z'}));
}
vector<string> letterCombinations(string digits) {
getAnswer(digits, 0, string());
return answer;
}
};
| 31.595238 | 124 | 0.494348 | ldy121 |
38b7a8929dce7b091bbe16ab65b400df913a7066 | 12,494 | cpp | C++ | cpp/opendnp3/src/opendnp3/outstation/Outstation.cpp | tarm/dnp3_orig | 87c639b3462c980fba255e85793f6ec663abe981 | [
"Apache-2.0"
] | null | null | null | cpp/opendnp3/src/opendnp3/outstation/Outstation.cpp | tarm/dnp3_orig | 87c639b3462c980fba255e85793f6ec663abe981 | [
"Apache-2.0"
] | null | null | null | cpp/opendnp3/src/opendnp3/outstation/Outstation.cpp | tarm/dnp3_orig | 87c639b3462c980fba255e85793f6ec663abe981 | [
"Apache-2.0"
] | 3 | 2016-07-13T18:54:13.000Z | 2021-04-12T13:30:39.000Z | /**
* Licensed to Green Energy Corp (www.greenenergycorp.com) under one or
* more contributor license agreements. See the NOTICE file distributed
* with this work for additional information regarding copyright ownership.
* Green Energy Corp licenses this file to you under the Apache License,
* Version 2.0 (the "License"); you may not use this file except in
* compliance with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* This project was forked on 01/01/2013 by Automatak, LLC and modifications
* may have been made to this file. Automatak, LLC licenses these modifications
* to you under the terms of the License.
*/
#include "Outstation.h"
#include "opendnp3/app/AnalogOutput.h"
#include "opendnp3/app/ControlRelayOutputBlock.h"
#include "opendnp3/app/APDUParser.h"
#include "opendnp3/DNPErrorCodes.h"
#include "opendnp3/LogLevels.h"
#include <openpal/LogMacros.h>
#include <openpal/IExecutor.h>
#include "opendnp3/outstation/OutstationStates.h"
#include "opendnp3/outstation/Database.h"
#include "opendnp3/outstation/IINHelpers.h"
#include "opendnp3/outstation/WriteHandler.h"
#include "opendnp3/outstation/ReadHandler.h"
#include "opendnp3/outstation/CommandActionAdapter.h"
#include "opendnp3/outstation/ConstantCommandAction.h"
#include "opendnp3/outstation/CommandResponseHandler.h"
using namespace openpal;
namespace opendnp3
{
Outstation::Outstation(openpal::LogRoot& root,
IAppLayer* pAppLayer,
IExecutor* pExecutor,
ITimeWriteHandler* pTimeWriteHandler,
Database* pDatabase,
const EventBufferFacade& buffers,
ICommandHandler* pCmdHandler,
const OutstationConfig& config) :
IAppUser(root),
selectBuffer(pExecutor, config.mSelectTimeout),
lastResponse(responseBuffer.GetWriteBuffer()),
pExecutor(pExecutor),
mpAppLayer(pAppLayer),
mpDatabase(pDatabase),
mpCmdHandler(pCmdHandler),
mpState(AS_Closed::Inst()),
mConfig(config),
mpUnsolTimer(nullptr),
mpTimeWriteHandler(pTimeWriteHandler),
eventBuffer(buffers),
rspContext(pDatabase, &eventBuffer, StaticResponseTypes(config)),
mDeferredUnsol(false),
mStartupNullUnsol(false),
mpTimeTimer(nullptr)
{
// Link the event buffer to the database
mpDatabase->SetEventBuffer(eventBuffer);
staticIIN.Set(IINBit::DEVICE_RESTART); // Always set on restart
/* Cause the Outstation to go through the null-unsol startup sequence */
if (!mConfig.mDisableUnsol)
{
mDeferredUnsol = true;
}
}
Outstation::~Outstation()
{
if (mpUnsolTimer)
{
mpUnsolTimer->Cancel();
}
if (mpTimeTimer)
{
mpTimeTimer->Cancel();
}
}
void Outstation::SetNeedTimeIIN()
{
staticIIN.Set(IINBit::NEED_TIME);
}
/* Implement IAppUser - external callbacks from the app layer */
void Outstation::OnLowerLayerUp()
{
mpState->OnLowerLayerUp(this);
}
void Outstation::OnLowerLayerDown()
{
mpState->OnLowerLayerDown(this);
}
void Outstation::OnSolSendSuccess()
{
mpState->OnSolSendSuccess(this);
}
void Outstation::OnSolFailure()
{
mpState->OnSolFailure(this);
SIMPLE_LOG_BLOCK(logger, flags::WARN, "Response failure");
}
void Outstation::OnUnsolSendSuccess()
{
mpState->OnUnsolSendSuccess(this);
}
void Outstation::OnUnsolFailure()
{
mpState->OnUnsolFailure(this);
SIMPLE_LOG_BLOCK(logger, flags::WARN, "Unsol response failure");
}
void Outstation::OnRequest(const APDURecord& record, SequenceInfo aSeqInfo)
{
mpState->OnRequest(this, record, aSeqInfo);
}
/* Internally generated events */
void Outstation::OnDataUpdate()
{
// let the current state decide how to handle the change buffer
mpState->OnDataUpdate(this);
}
void Outstation::OnUnsolTimerExpiration()
{
// let the current state decide how to handle the timer expiration
mpUnsolTimer = nullptr;
mpState->OnUnsolExpiration(this);
}
void Outstation::ChangeState(OutstationStateBase* apState)
{
FORMAT_LOG_BLOCK(logger, flags::DBG, "State changed from %s to %s", mpState->Name(), apState->Name());
mpState = apState;
mpState->Enter(this);
}
void Outstation::RespondToRequest(const APDURecord& record, SequenceInfo sequence)
{
if (!(record.function == FunctionCode::SELECT || record.function == FunctionCode::OPERATE))
{
selectBuffer.Clear();
}
APDUResponse response(responseBuffer.GetWriteBuffer(mConfig.mMaxFragSize));
response.SetFunction(FunctionCode::RESPONSE);
response.SetControl(AppControlField::DEFAULT.ToByte());
auto indications = ConfigureResponse(record, sequence, response);
lastResponse = response;
this->SendResponse(response, indications);
}
IINField Outstation::ConfigureResponse(const APDURecord& request, SequenceInfo sequence, APDUResponse& response)
{
switch(request.function)
{
case(FunctionCode::READ) :
return HandleRead(request, sequence, response);
case(FunctionCode::WRITE):
return HandleWrite(request, sequence);
case(FunctionCode::SELECT) :
return HandleSelect(request, sequence, response);
case(FunctionCode::OPERATE) :
return HandleOperate(request, sequence, response);
case(FunctionCode::DIRECT_OPERATE) :
return HandleDirectOperate(request, sequence, response);
case(FunctionCode::DELAY_MEASURE):
return HandleDelayMeasure(request, sequence, response);
default:
FORMAT_LOG_BLOCK_WITH_CODE(logger, flags::WARN, SERR_FUNC_NOT_SUPPORTED, "Function not supported: %s", FunctionCodeToString(request.function));
return IINField(IINBit::FUNC_NOT_SUPPORTED);
}
}
IINField Outstation::HandleWrite(const APDURecord& request, SequenceInfo sequence)
{
WriteHandler handler(logger, mpTimeWriteHandler, &staticIIN);
auto result = APDUParser::ParseTwoPass(request.objects, &handler, &logger);
if (result == APDUParser::Result::OK)
{
return handler.Errors();
}
else
{
return IINFromParseResult(result);
}
}
IINField Outstation::HandleRead(const APDURecord& request, SequenceInfo sequence, APDUResponse& response)
{
rspContext.Reset();
ReadHandler handler(logger, rspContext);
auto result = APDUParser::ParseTwoPass(request.objects, &handler, &logger, APDUParser::Context(false)); // don't expect range/count context on a READ
if(result == APDUParser::Result::OK)
{
// Do a transaction on the database (lock) for multi-threaded environments
// if the request contained static variations, we double buffer (copy) the entire static database.
// this ensures that an multi-fragmented responses see a consistent snapshot
openpal::Transaction tx(mpDatabase);
mpDatabase->DoubleBuffer(); // todo, make this optional?
auto control = rspContext.Load(response);
response.SetControl(control.ToByte());
return handler.Errors();
}
else
{
rspContext.Reset();
return IINFromParseResult(result);
}
}
IINField Outstation::HandleSelect(const APDURecord& request, SequenceInfo sequence, APDUResponse& response)
{
// Outstations require 2 extra bytes for IIN bits. Therefore it's possible
// there are some requests you cannot possible answer since responses are
// an exact echo the requests with status fields changed.
if(request.objects.Size() > response.Remaining())
{
FORMAT_LOG_BLOCK(logger, flags::WARN, "Igonring command request due to payload size of %i", request.objects.Size());
selectBuffer.Clear();
return IINField(IINBit::PARAM_ERROR);
}
else
{
CommandActionAdapter adapter(this->mpCmdHandler, true);
CommandResponseHandler handler(logger, mConfig.mMaxControls, &adapter, response);
auto result = APDUParser::ParseTwoPass(request.objects, &handler, &logger);
if (result == APDUParser::Result::OK)
{
if(handler.AllCommandsSuccessful())
{
auto result = selectBuffer.Select(request.control.SEQ, request.objects);
switch (result)
{
case(SelectBuffer::SelectResult::OK) :
case(SelectBuffer::SelectResult::REPEAT):
return IINField::Empty;
default:
return IINField(IINBit::PARAM_ERROR);
}
}
else
{
return IINField::Empty;
}
}
else return IINFromParseResult(result);
}
}
IINField Outstation::HandleOperate(const APDURecord& request, SequenceInfo sequence, APDUResponse& response)
{
if (request.objects.Size() > response.Remaining())
{
FORMAT_LOG_BLOCK(logger, flags::WARN, "Igonring command request due to payload size of %i", request.objects.Size());
selectBuffer.Clear();
return IINField(IINBit::PARAM_ERROR);
}
else
{
auto result = selectBuffer.Operate(request.control.SEQ, request.objects);
switch (result)
{
case(SelectBuffer::OperateResult::TIMEOUT):
return HandleCommandWithConstant(request, response, CommandStatus::TIMEOUT);
case(SelectBuffer::OperateResult::REPEAT):
{
// respond with the last response
response = lastResponse;
return lastResponse.GetIIN();
}
case(SelectBuffer::OperateResult::OK) :
{
CommandActionAdapter adapter(this->mpCmdHandler, false);
CommandResponseHandler handler(logger, mConfig.mMaxControls, &adapter, response);
auto result = APDUParser::ParseTwoPass(request.objects, &handler, &logger);
return IINFromParseResult(result);
}
default:
return HandleCommandWithConstant(request, response, CommandStatus::NO_SELECT);
}
}
}
IINField Outstation::HandleDirectOperate(const APDURecord& request, SequenceInfo sequence, APDUResponse& response)
{
if (request.objects.Size() > response.Remaining())
{
FORMAT_LOG_BLOCK(logger, flags::WARN, "Igonring command request due to payload size of %i", request.objects.Size());
return IINField(IINBit::PARAM_ERROR);
}
else
{
CommandActionAdapter adapter(this->mpCmdHandler, false); // do the operation
CommandResponseHandler handler(logger, mConfig.mMaxControls, &adapter, response);
auto result = APDUParser::ParseTwoPass(request.objects, &handler, &logger);
return IINFromParseResult(result);
}
}
void Outstation::ContinueResponse()
{
APDUResponse response(responseBuffer.GetWriteBuffer(mConfig.mMaxFragSize));
response.SetFunction(FunctionCode::RESPONSE);
{
// perform a transaction (lock) the database
openpal::Transaction tx(mpDatabase);
auto control = this->rspContext.Load(response);
response.SetControl(control.ToByte());
}
this->SendResponse(response);
}
IINField Outstation::HandleDelayMeasure(const APDURecord& request, SequenceInfo sequence, APDUResponse& response)
{
if(request.objects.IsEmpty())
{
auto writer = response.GetWriter();
Group52Var2 value = { 0 }; // respond with 0 time delay
writer.WriteSingleValue<UInt8, Group52Var2>(QualifierCode::UINT8_CNT, value);
return IINField::Empty;
}
else
{
// there shouldn't be any trailing headers in delay measure request, no need to even parse
return IINField(IINBit::FUNC_NOT_SUPPORTED);
}
}
IINField Outstation::HandleCommandWithConstant(const APDURecord& request, APDUResponse& response, CommandStatus status)
{
ConstantCommandAction constant(status);
CommandResponseHandler handler(logger, mConfig.mMaxControls, &constant, response);
auto result = APDUParser::ParseTwoPass(request.objects, &handler, &logger);
return IINFromParseResult(result);
}
void Outstation::SendResponse(APDUResponse& response, const IINField& indications)
{
IINField responseIIN(staticIIN | GetDynamicIIN() | indications);
response.SetIIN(responseIIN);
mpAppLayer->SendResponse(response);
}
IINField Outstation::GetDynamicIIN()
{
IINField ret;
auto tracker = eventBuffer.UnselectedEvents();
if (tracker.class1.HasEvents())
{
ret.Set(IINBit::CLASS1_EVENTS);
}
if (tracker.class2.HasEvents())
{
ret.Set(IINBit::CLASS2_EVENTS);
}
if (tracker.class3.HasEvents())
{
ret.Set(IINBit::CLASS3_EVENTS);
}
if (eventBuffer.IsOverflown())
{
ret.Set(IINBit::EVENT_BUFFER_OVERFLOW);
}
return ret;
}
void Outstation::StartUnsolTimer(openpal::TimeDuration aTimeout)
{
assert(mpUnsolTimer == nullptr);
auto lambda = [this]() { this->OnUnsolTimerExpiration(); };
mpUnsolTimer = pExecutor->Start(aTimeout, Bind(lambda));
}
void Outstation::ResetTimeIIN()
{
mpTimeTimer = nullptr;
staticIIN.Set(IINBit::NEED_TIME);
auto lambda = [this]() { this->ResetTimeIIN(); };
mpTimeTimer = pExecutor->Start(mConfig.mTimeSyncPeriod, Bind(lambda));
}
} //end ns
| 30.325243 | 150 | 0.74956 | tarm |
38b8c0ee00901e25876500ed31aeb70e384d808f | 1,681 | cpp | C++ | source/bezier_debug.cpp | Sankhma/AutismoSimulator | ac0cd2c321929b92cadfed9eb6c96faa9e60e18e | [
"MIT"
] | null | null | null | source/bezier_debug.cpp | Sankhma/AutismoSimulator | ac0cd2c321929b92cadfed9eb6c96faa9e60e18e | [
"MIT"
] | 38 | 2020-11-08T21:54:57.000Z | 2020-12-01T09:33:11.000Z | source/bezier_debug.cpp | Sankhma/AutismoSimulator | ac0cd2c321929b92cadfed9eb6c96faa9e60e18e | [
"MIT"
] | 1 | 2020-11-04T22:17:11.000Z | 2020-11-04T22:17:11.000Z | #include <iostream>
#include "Bezier.h"
#include "Vector.h"
// and this
int main(){
Vector2<double> vec0 = Vector2<double>(1, 2);
Vector2<double> vec1 = Vector2<double>(2, 3);
Vector2<double> vec2 = Vector2<double>(7, 4);
Vector2<double> vec3 = Vector2<double>(133, 1123);
std::vector<Vector2<double>> points;
points.push_back(vec0);
points.push_back(vec1);
points.push_back(vec2);
points.push_back(vec3);
Bezier<Vector2<double>> bez0 = Bezier<Vector2<double>>(points);
int steps = 5;
std::cout << "Bezier2 has 4 points in total" << std::endl;
for(int i=0; i <= steps; i++){
double t = double(i) / steps;
Bezier<Vector2<double>>::GenerateVertex(bez0, t);
}
bez0.addPoint(Vector2<double>(1, 2));
bez0.addPoint(Vector2<double>(420, 69));
std::cout << "Added 2 more points, Bezier2 has 6 points in total" << std::endl;
for(int i=0; i <= steps; i++){
double t = double(i) / steps;
Bezier<Vector2<double>>::GenerateVertex(bez0, t);
}
Bezier<Vector2<double>> bez1 = Bezier<Vector2<double>>(3, &vec0, &vec1, &vec2);
std::cout << "Bezier2 (initialized using variadic arguments) has 3 points in total" << std::endl;
for(int i=0; i <= steps; i++){
double t = double(i) / steps;
Bezier<Vector2<double>>::GenerateVertex(bez1, t);
}
bez1.addPoint(Vector2<double>(1, 2));
bez1.addPoint(Vector2<double>(1, 10));
std::cout << "Added 2 more point, Bezier2 has 5 points in total" << std::endl;
for(int i=0; i <= steps; i++){
double t = double(i) / steps;
Bezier<Vector2<double>>::GenerateVertex(bez1, t);
}
} | 29.491228 | 101 | 0.607971 | Sankhma |
38bd0b1ecf9dfade373e61c58b230de2b2bcb28f | 8,093 | cpp | C++ | deps/libgeos/geos/src/operation/polygonize/Polygonizer.cpp | AmristarSolutions/node-gdal-next | 8c0a7d9b26c240bf04abbf1b1de312b0691b3d88 | [
"Apache-2.0"
] | 57 | 2020-02-08T17:52:17.000Z | 2021-10-14T03:45:09.000Z | deps/libgeos/geos/src/operation/polygonize/Polygonizer.cpp | AmristarSolutions/node-gdal-next | 8c0a7d9b26c240bf04abbf1b1de312b0691b3d88 | [
"Apache-2.0"
] | 47 | 2020-02-12T16:41:40.000Z | 2021-09-28T22:27:56.000Z | deps/libgeos/geos/src/operation/polygonize/Polygonizer.cpp | AmristarSolutions/node-gdal-next | 8c0a7d9b26c240bf04abbf1b1de312b0691b3d88 | [
"Apache-2.0"
] | 8 | 2020-03-17T11:18:07.000Z | 2021-10-14T03:45:15.000Z | /**********************************************************************
*
* GEOS - Geometry Engine Open Source
* http://geos.osgeo.org
*
* Copyright (C) 2010 Sandro Santilli <strk@kbt.io>
* Copyright (C) 2005-2006 Refractions Research Inc.
* Copyright (C) 2001-2002 Vivid Solutions Inc.
*
* This is free software; you can redistribute and/or modify it under
* the terms of the GNU Lesser General Public Licence as published
* by the Free Software Foundation.
* See the COPYING file for more information.
*
**********************************************************************
*
* Last port: operation/polygonize/Polygonizer.java 0b3c7e3eb0d3e
*
**********************************************************************/
#include <geos/operation/polygonize/Polygonizer.h>
#include <geos/operation/polygonize/PolygonizeGraph.h>
#include <geos/operation/polygonize/EdgeRing.h>
#include <geos/operation/polygonize/HoleAssigner.h>
#include <geos/geom/LineString.h>
#include <geos/geom/Geometry.h>
#include <geos/geom/Polygon.h>
#include <geos/geom/CoordinateArraySequence.h>
#include <geos/util/Interrupt.h>
#include <geos/index/strtree/STRtree.h>
// std
#include <vector>
#ifdef _MSC_VER
#pragma warning(disable:4355)
#endif
#ifndef GEOS_DEBUG
#define GEOS_DEBUG 0
#endif
using namespace std;
using namespace geos::geom;
namespace geos {
namespace operation { // geos.operation
namespace polygonize { // geos.operation.polygonize
Polygonizer::LineStringAdder::LineStringAdder(Polygonizer* p):
pol(p)
{
}
void
Polygonizer::LineStringAdder::filter_ro(const Geometry* g)
{
auto ls = dynamic_cast<const LineString*>(g);
if(ls) {
pol->add(ls);
}
}
Polygonizer::Polygonizer(bool onlyPolygonal):
lineStringAdder(this),
extractOnlyPolygonal(onlyPolygonal),
graph(nullptr),
dangles(),
cutEdges(),
invalidRingLines(),
holeList(),
shellList(),
polyList(nullptr)
{
}
/*
* Add a collection of geometries to be polygonized.
* May be called multiple times.
* Any dimension of Geometry may be added;
* the constituent linework will be extracted and used
*
* @param geomList a list of {@link Geometry}s with linework to be polygonized
*/
void
Polygonizer::add(vector<Geometry*>* geomList)
{
for(auto& g : (*geomList)) {
add(g);
}
}
/*
* Add a collection of geometries to be polygonized.
* May be called multiple times.
* Any dimension of Geometry may be added;
* the constituent linework will be extracted and used
*
* @param geomList a list of {@link Geometry}s with linework to be polygonized
*/
void
Polygonizer::add(vector<const Geometry*>* geomList)
{
for(auto& g : (*geomList)) {
add(g);
}
}
/*
* Add a geometry to the linework to be polygonized.
* May be called multiple times.
* Any dimension of Geometry may be added;
* the constituent linework will be extracted and used
*
* @param g a Geometry with linework to be polygonized
*/
void
Polygonizer::add(Geometry* g)
{
g->apply_ro(&lineStringAdder);
}
/*
* Add a geometry to the linework to be polygonized.
* May be called multiple times.
* Any dimension of Geometry may be added;
* the constituent linework will be extracted and used
*
* @param g a Geometry with linework to be polygonized
*/
void
Polygonizer::add(const Geometry* g)
{
g->apply_ro(&lineStringAdder);
}
/*
* Add a linestring to the graph of polygon edges.
*
* @param line the LineString to add
*/
void
Polygonizer::add(const LineString* line)
{
// create a new graph using the factory from the input Geometry
if(graph == nullptr) {
graph.reset(new PolygonizeGraph(line->getFactory()));
}
graph->addEdge(line);
}
/*
* Gets the list of polygons formed by the polygonization.
* @return a collection of Polygons
*/
unique_ptr<vector<unique_ptr<Polygon>>>
Polygonizer::getPolygons()
{
polygonize();
return std::move(polyList);
}
/* public */
const vector<const LineString*>&
Polygonizer::getDangles()
{
polygonize();
return dangles;
}
bool
Polygonizer::hasDangles() {
polygonize();
return !dangles.empty();
}
/* public */
const vector<const LineString*>&
Polygonizer::getCutEdges()
{
polygonize();
return cutEdges;
}
bool
Polygonizer::hasCutEdges()
{
polygonize();
return !cutEdges.empty();
}
/* public */
const std::vector<std::unique_ptr<LineString>>&
Polygonizer::getInvalidRingLines()
{
polygonize();
return invalidRingLines;
}
bool
Polygonizer::hasInvalidRingLines()
{
polygonize();
return !invalidRingLines.empty();
}
bool
Polygonizer::allInputsFormPolygons()
{
polygonize();
return !hasCutEdges() && !hasDangles() &&!hasInvalidRingLines();
}
/* public */
void
Polygonizer::polygonize()
{
// check if already computed
if(polyList != nullptr) {
return;
}
// if no geometries were supplied it's possible graph could be null
if(graph == nullptr) {
polyList.reset(new std::vector<std::unique_ptr<Polygon>>());
return;
}
graph->deleteDangles(dangles);
graph->deleteCutEdges(cutEdges);
vector<EdgeRing*> edgeRingList;
graph->getEdgeRings(edgeRingList);
#if GEOS_DEBUG
cerr << "Polygonizer::polygonize(): " << edgeRingList.size() << " edgeRings in graph" << endl;
#endif
vector<EdgeRing*> validEdgeRingList;
invalidRingLines.clear(); /* what if it was populated already ? we should clean ! */
findValidRings(edgeRingList, validEdgeRingList, invalidRingLines);
#if GEOS_DEBUG
cerr << " " << validEdgeRingList.size() << " valid" << endl;
cerr << " " << invalidRingLines.size() << " invalid" << endl;
#endif
findShellsAndHoles(validEdgeRingList);
#if GEOS_DEBUG
cerr << " " << holeList.size() << " holes" << endl;
cerr << " " << shellList.size() << " shells" << endl;
#endif
HoleAssigner::assignHolesToShells(holeList, shellList);
bool includeAll = true;
if (extractOnlyPolygonal) {
findDisjointShells();
includeAll = false;
}
polyList = extractPolygons(shellList, includeAll);
}
/* private */
void
Polygonizer::findValidRings(const vector<EdgeRing*>& edgeRingList,
vector<EdgeRing*>& validEdgeRingList,
vector<std::unique_ptr<LineString>>& invalidRingList)
{
for(const auto& er : edgeRingList) {
if(er->isValid()) {
validEdgeRingList.push_back(er);
}
else {
invalidRingList.push_back(er->getLineString());
}
GEOS_CHECK_FOR_INTERRUPTS();
}
}
/* private */
void
Polygonizer::findShellsAndHoles(const vector<EdgeRing*>& edgeRingList)
{
holeList.clear();
shellList.clear();
for(auto& er : edgeRingList) {
er->computeHole();
if(er->isHole()) {
holeList.push_back(er);
}
else {
shellList.push_back(er);
}
GEOS_CHECK_FOR_INTERRUPTS();
}
}
void
Polygonizer::findDisjointShells() {
findOuterShells(shellList);
for (EdgeRing *er : shellList) {
if (!er->isIncludedSet()) {
er->updateIncludedRecursive();
}
}
return;
}
void
Polygonizer::findOuterShells(vector<EdgeRing*> & shells)
{
for (EdgeRing* er : shells) {
auto outerHoleER = er->getOuterHole();
if (outerHoleER != nullptr && !outerHoleER->isProcessed()) {
er->setIncluded(true);
outerHoleER->setProcessed(true);
}
}
}
std::unique_ptr<std::vector<std::unique_ptr<Polygon>>>
Polygonizer::extractPolygons(vector<EdgeRing*> & shells, bool includeAll)
{
std::unique_ptr<std::vector<std::unique_ptr<Polygon>>> polys(new std::vector<std::unique_ptr<Polygon>>);
for (EdgeRing* er : shells) {
if (includeAll || er->isIncluded()) {
polys->emplace_back(er->getPolygon());
}
}
return polys;
}
} // namespace geos.operation.polygonize
} // namespace geos.operation
} // namespace geos
| 23.802941 | 108 | 0.640306 | AmristarSolutions |
38bef76989da03cdba92d7e5d6ceeedb90466644 | 6,662 | cpp | C++ | src/drv/hd44780/hd44780.cpp | ghsecuritylab/omef | a6b2dec8d57545c3804174883e582080ef6f3af9 | [
"MIT"
] | null | null | null | src/drv/hd44780/hd44780.cpp | ghsecuritylab/omef | a6b2dec8d57545c3804174883e582080ef6f3af9 | [
"MIT"
] | null | null | null | src/drv/hd44780/hd44780.cpp | ghsecuritylab/omef | a6b2dec8d57545c3804174883e582080ef6f3af9 | [
"MIT"
] | null | null | null | #include <stddef.h>
#include "common/assert.h"
#include "third_party/printf/printf.h"
#include "hd44780.hpp"
using namespace drv;
using namespace hal;
#define DDRAM1_MIN_ADDR 0
#define DDRAM1_MAX_ADDR 39
#define DDRAM2_MIN_ADDR 64
#define DDRAM2_MAX_ADDR 103
#define STROB_DELAY 37 // us
enum cmd_t
{
CLEAR_DISPLAY = 1 << 0,
RETURN_HOME = 1 << 1,
ENTRY_MODE_SET = 1 << 2,
DISPLAY_ON_OFF_CONTROL = 1 << 3,
CURSOR_OR_DISPLAY_SHIFT = 1 << 4,
FUNCTION_SET = 1 << 5,
SET_CGRAM_ADDRESS = 1 << 6,
SET_DDRAM_ADDRESS = 1 << 7
};
// Bits for ENTRY_MODE_SET command
enum entry_mode_set_bits_t
{
I_D_BIT = 1 << 1, // Increment/Decrement DDRAM address (cursor position):
// 0 - decrement, 1 - increment
S_BIT = 1 << 0 // Shift the dispaly with each new character
};
// Bits for DISPLAY_ON_OFF_CONTROL command
enum display_on_off_control_bits_t
{
D_BIT = 1 << 2, // On/off entire display
C_BIT = 1 << 1, // On/off cursor
B_BIT = 1 << 0 // On/off blinking cursor position
};
// Bits for CURSOR_OR_DISPLAY_SHIFT command
enum cursor_or_display_shift_bits_t
{
S_C_BIT = 1 << 3, // Shift display or cursor: 0 - cursor, 1 - display
R_L_BIT = 1 << 2 // Direction of shift: 0 - to the left, 1 - to the right
};
// Bits for FUNCTION_SET command
enum function_set_bits_t
{
DL_BIT = 1 << 4, // Interface data length: 0 - 4 bit, 1 - 8 bit
N_BIT = 1 << 3, // Number of display lines: 0 - one line, 1 - two line
F_BIT = 1 << 2, // Character font: 0 - 5x8, 1 - 5x10
FT1_BIT = 1 << 1, // Font table: (FT1:FT0)
FT0_BIT = 1 << 0, // 00 - ENGLISH_JAPANESE
// 01 - WESTERN EUROPEAN
// 10 - ENGLISH_RUSSIAN
// 11 - N/A
};
hd44780::hd44780(gpio &rs, gpio &rw, gpio &e, gpio &db4, gpio &db5, gpio &db6,
gpio &db7, tim &tim):
_rs(rs),
_rw(rw),
_e(e),
_db{&db4, &db5, &db6, &db7},
_tim(tim)
{
ASSERT(_rs.mode() == gpio::MODE_DO);
ASSERT(_rw.mode() == gpio::MODE_DO);
ASSERT(_e.mode() == gpio::MODE_DO);
_rs.set(1);
_rw.set(1);
_e.set(1);
for(uint8_t i = 0; i < (sizeof(_db) / sizeof(_db[0])); i++)
{
ASSERT(_db[i]->mode() == gpio::MODE_DO);
_db[i]->set(1);
}
_tim.cb(tim_cb, &task);
ASSERT(api_lock = xSemaphoreCreateMutex());
}
hd44780::~hd44780()
{
vSemaphoreDelete(api_lock);
}
void hd44780::init()
{
xSemaphoreTake(api_lock, portMAX_DELAY);
_rw.set(0);
_rs.set(0);
write_4bit(FUNCTION_SET >> 4);
delay(4100);
write_4bit(FUNCTION_SET >> 4);
delay(100);
write_4bit(FUNCTION_SET >> 4);
write_4bit((FUNCTION_SET | N_BIT) >> 4);
write(CMD, FUNCTION_SET | N_BIT);
write(CMD, DISPLAY_ON_OFF_CONTROL | D_BIT);
write(CMD, CLEAR_DISPLAY);
delay(6200); // OLED display requires 6,2 ms rather than 1,53 ms
write(CMD, ENTRY_MODE_SET | I_D_BIT);
xSemaphoreGive(api_lock);
}
uint8_t hd44780::print(uint8_t ddram_addr, const char *format, ...)
{
ASSERT(format);
ASSERT((ddram_addr >= DDRAM1_MIN_ADDR && ddram_addr <= DDRAM1_MAX_ADDR) ||
(ddram_addr >= DDRAM2_MIN_ADDR && ddram_addr <= DDRAM2_MAX_ADDR));
xSemaphoreTake(api_lock, portMAX_DELAY);
write(CMD, SET_DDRAM_ADDRESS | ddram_addr);
va_list args;
va_start(args, format);
char message[DDRAM2_MAX_ADDR] = {};
uint8_t new_ddram_addr = vsnprintf_(message, sizeof(message) - 1,
format, args) + ddram_addr;
for(uint8_t i = 0; message[i] != '\0'; i++)
write(DATA, message[i]);
va_end(args);
xSemaphoreGive(api_lock);
return new_ddram_addr;
}
uint8_t hd44780::print(uint8_t ddram_addr, char byte)
{
ASSERT((ddram_addr >= DDRAM1_MIN_ADDR && ddram_addr <= DDRAM1_MAX_ADDR) ||
(ddram_addr >= DDRAM2_MIN_ADDR && ddram_addr <= DDRAM2_MAX_ADDR));
xSemaphoreTake(api_lock, portMAX_DELAY);
write(CMD, SET_DDRAM_ADDRESS | ddram_addr);
write(DATA, byte);
xSemaphoreGive(api_lock);
return ddram_addr++; // TODO: Check returned value to be the really ddram_addr+1
}
uint8_t hd44780::ddram_addr()
{
xSemaphoreTake(api_lock, portMAX_DELAY);
// 7 bit - busy flag
// 0:6 bits - ddram/cgram address
uint8_t addr = read_bf_and_ddram_addr() & 0b01111111;
xSemaphoreGive(api_lock);
return addr;
}
void hd44780::clear()
{
xSemaphoreTake(api_lock, portMAX_DELAY);
write(CMD, CLEAR_DISPLAY);
delay(6200); // OLED display requires 6,2 ms unlike LCD (1,53 ms)
xSemaphoreGive(api_lock);
}
void hd44780::write_cgram(uint8_t buff[8][8])
{
xSemaphoreTake(api_lock, portMAX_DELAY);
uint8_t old_addr = read_bf_and_ddram_addr() & 0b01111111;
write(CMD, SET_CGRAM_ADDRESS);
uint8_t *p = &buff[0][0];
for(uint8_t i = 0; i < 64; i++)
{
write(DATA, p[i]);
}
write(CMD, SET_DDRAM_ADDRESS | old_addr);
xSemaphoreGive(api_lock);
}
void hd44780::read_cgram(uint8_t buff[8][8])
{
xSemaphoreTake(api_lock, portMAX_DELAY);
uint8_t old_addr = read_bf_and_ddram_addr() & 0b01111111;
write(CMD, SET_CGRAM_ADDRESS);
for(uint8_t i = 0; i < (sizeof(_db) / sizeof(_db[0])); i++)
_db[i]->mode(gpio::MODE_DI);
_rw.set(1);
_rs.set(1);
uint8_t *p = &buff[0][0];
for(uint8_t i = 0; i < 64; i++)
{
p[i] = read_4bit() << 4;
p[i] |= read_4bit();
}
for(uint8_t i = 0; i < (sizeof(_db) / sizeof(_db[0])); i++)
_db[i]->mode(gpio::MODE_DO);
write(CMD, SET_DDRAM_ADDRESS | old_addr);
xSemaphoreGive(api_lock);
}
uint8_t hd44780::read_bf_and_ddram_addr()
{
for(uint8_t i = 0; i < (sizeof(_db) / sizeof(_db[0])); i++)
_db[i]->mode(gpio::MODE_DI);
_rw.set(1);
_rs.set(0);
uint8_t byte = read_4bit() << 4;
byte |= read_4bit();
for(uint8_t i = 0; i < (sizeof(_db) / sizeof(_db[0])); i++)
_db[i]->mode(gpio::MODE_DO);
return byte;
}
void hd44780::write_4bit(uint8_t half_byte)
{
for(uint8_t i = 0; i < 4; i++)
_db[i]->set((half_byte >> i) & 1);
_e.set(1);
delay(STROB_DELAY);
_e.set(0);
delay(STROB_DELAY);
}
void hd44780::write(write_t type, uint8_t byte)
{
_rw.set(0);
_rs.set(type == CMD ? 0 : 1);
write_4bit(byte >> 4);
write_4bit(byte);
}
uint8_t hd44780::read_4bit()
{
uint8_t half_byte = 0;
_e.set(1);
delay(STROB_DELAY);
for(uint8_t i = 0; i < 4; i++)
{
if(_db[i]->get())
half_byte |= 1 << i;
}
_e.set(0);
delay(STROB_DELAY);
return half_byte;
}
void hd44780::tim_cb(tim *tim, void *ctx)
{
TaskHandle_t *_task = (TaskHandle_t *)ctx;
BaseType_t hi_task_woken = 0;
vTaskNotifyGiveFromISR(*_task, &hi_task_woken);
portYIELD_FROM_ISR(hi_task_woken);
}
void hd44780::delay(uint32_t us)
{
task = xTaskGetCurrentTaskHandle();
_tim.us(us);
_tim.start();
ulTaskNotifyTake(true, portMAX_DELAY);
}
| 21.700326 | 81 | 0.644401 | ghsecuritylab |
38c0a940ef7fac4fe1b38094bc3cda98e082d9fa | 324 | cpp | C++ | elastic-circuits/examples/string_match.cpp | minseongg/dynamatic | 268d97690f128569da46e4f39a99346e93ee9d4e | [
"MIT"
] | 46 | 2019-11-16T13:44:07.000Z | 2022-03-12T14:28:44.000Z | elastic-circuits/examples/string_match.cpp | minseongg/dynamatic | 268d97690f128569da46e4f39a99346e93ee9d4e | [
"MIT"
] | 11 | 2020-05-12T17:20:51.000Z | 2022-02-04T10:04:59.000Z | elastic-circuits/examples/string_match.cpp | minseongg/dynamatic | 268d97690f128569da46e4f39a99346e93ee9d4e | [
"MIT"
] | 22 | 2020-02-21T21:33:40.000Z | 2022-02-24T06:50:41.000Z | // Finds an occurrence of x in y
// n is the length of x, m is the length of y
int substring(char x[], char y[], int n, int m) {
for (int i = 0; i <= m - n; ++i) {
int j = 0;
while (j < n and x[j] == y[i + j])
++j;
if (j == n)
return i;
}
return -1;
}
| 24.923077 | 50 | 0.41358 | minseongg |
38c1132d12c4d5dc2cd9f95704f1968988993dc8 | 693 | cpp | C++ | code/K-th Smallest Prime Fraction.cpp | htfy96/leetcode-solutions | 4736e87958d7e5aea3cbd999f88c7a86de13205a | [
"Apache-2.0"
] | 1 | 2021-02-21T15:43:13.000Z | 2021-02-21T15:43:13.000Z | code/K-th Smallest Prime Fraction.cpp | htfy96/leetcode-solutions | 4736e87958d7e5aea3cbd999f88c7a86de13205a | [
"Apache-2.0"
] | null | null | null | code/K-th Smallest Prime Fraction.cpp | htfy96/leetcode-solutions | 4736e87958d7e5aea3cbd999f88c7a86de13205a | [
"Apache-2.0"
] | 1 | 2018-12-13T07:14:09.000Z | 2018-12-13T07:14:09.000Z | class Solution {
static int count_smaller(double x, const vector<int>& A) {
int ans = 0;
for (int i=0; i<A.size(); ++i)
ans += lower_bound(A.begin(), A.begin() + i, x * A[i]) - A.begin();
return ans;
}
public:
vector<int> kthSmallestPrimeFraction(vector<int>& A, int K) {
double l = 0.0, r = 1.0;
while (r-l > 1e-14) {
//cout << "l=" << l << " r=" << r << endl;
double mid = (l+r) / 2, result = count_smaller(mid, A);
if (result < K) l = mid; else r = mid;
}
for (int v: A)
if (fabs(round(r * v) - r * v) < 1e-5)
return {r * v + 0.9999, v};
}
}; | 34.65 | 79 | 0.444444 | htfy96 |
38c26fcac0c58012256f14c76372a4f4358826a9 | 3,244 | cpp | C++ | labs/functions/pointDistance.cpp | rambasnet/CPP-Fundamentals | bc2fa9fddac95ffaca56d3251842f35c52eb76b8 | [
"MIT"
] | 6 | 2021-03-12T10:02:23.000Z | 2022-01-11T12:27:41.000Z | labs/functions/pointDistance.cpp | rambasnet/CPPFundamentals | d0dabf1969b2a084b15e8c0bbba3f70045b263f5 | [
"MIT"
] | 1 | 2021-03-13T18:07:37.000Z | 2021-05-12T09:09:17.000Z | labs/functions/pointDistance.cpp | rambasnet/CPPFundamentals | d0dabf1969b2a084b15e8c0bbba3f70045b263f5 | [
"MIT"
] | 10 | 2021-03-12T10:02:33.000Z | 2022-03-07T23:20:39.000Z | /*
Functions Lab
Updated By: FIXME1
CSCI 111
Date: FIXME2
Program prompts the user to enter two points in the form (x1, y1) and (x2, y2) and finds the distance between the two points using a function.
Algorithm steps:
1. Define a function called findDistance(…) that takes four parameters x1, y1 and x2, y2 as two points
a. finds the distance between them using the equation: √((x2-x1)^2 + (y2-y1)^2)
b. returns the calculated distance value
2. Prompt user to enter two points in (x, y) format
3. Store them into 4 individual variables
4. Call function getDistance by passing 4 entered numbers as arguments
5. Display results with proper description. Format output numbers to 2 decimal points.
6. Test and validate that program output is correct for a given set of input points.
7. BONUS - (10 points) Using a loop repeat step 2-6 until the user wants to quit.
*/
#include <iostream>
#include <cstdio>
#include <cassert>
#include <cmath>
using namespace std;
const float epsilon = 1e-5; // 0.00001 accuracy upto 5 decimal points; error of margin
// function prototypes
// Function that calculates the distance between two points
// x1, y1 and x2, y2 and returns the calculated value
double findDistance(int, int, int, int);
// test function that runs automated testing
void test();
// function clears the screen system call
// NOTE: system call is not a security best pracice!
void clearScreen() {
// use "cls" in windows and "clear" command in Mac and Linux
#ifdef _WIN32
system("clS");
#else
system("clear");
#endif
}
int main()
{
int x1, y1, x2, y2; // variables to store two points (x1, y1) and (x2, y2)
char ch;
//FIXME-bonus - 10 bonus points - add loop until user wants to quit
// the loop will execute the following block of code
{
clearScreen();
cout << "Program calculates distance between 2 points on a 2D coordinate." << endl;
cout << "Enter a point in the form (x, y): ";
// parse the input stream
cin >> ch >> x1 >> ch >> y1 >> ch; // value stored in ch is ignored
printf("(x1, y1) = (%d, %d)\n", x1, y1);
cout << "Enter a second point in the form (x, y): ";
//FIXME3 - Read/parse the second point and store data into variables x2 and y2
//FIXME4 - Call test function
//FIXME5 - call findDistance function passing proper arguments
//FIXME6 – Using printf function display the returned distance with proper description
}
cin.ignore(1000, '\n');
cout << "Enter to quit the program: ";
cin.get();
cout << "Good bye..." << endl;
return 0;
}
double findDistance(int x1, int y1, int x2, int y2)
{
// FIXME7 - Find the distance between (x1, y1) and (x2, y2)
// following the algorithm in step 1
// return the calculated distance
return 0.000000;
}
// test function that test findDistance function with 3 test cases
void test()
{
float result = findDistance(4, 3, 5, 1);
float expected = 2.236067f;
assert( fabs(result - expected) <= epsilon); //accept the result if it's less than the error of margin
// FIXME8 - add at least two more test cases
cerr << "all tests passed..." << endl;
}
| 34.147368 | 142 | 0.665845 | rambasnet |
38c5698195e603bcb0e4e82ab49a08cfac92fe4a | 894 | hpp | C++ | modules/boost/simd/swar/include/boost/simd/swar/functions/details/random_permute.hpp | pbrunet/nt2 | 2aeca0f6a315725b335efd5d9dc95d72e10a7fb7 | [
"BSL-1.0"
] | null | null | null | modules/boost/simd/swar/include/boost/simd/swar/functions/details/random_permute.hpp | pbrunet/nt2 | 2aeca0f6a315725b335efd5d9dc95d72e10a7fb7 | [
"BSL-1.0"
] | null | null | null | modules/boost/simd/swar/include/boost/simd/swar/functions/details/random_permute.hpp | pbrunet/nt2 | 2aeca0f6a315725b335efd5d9dc95d72e10a7fb7 | [
"BSL-1.0"
] | null | null | null | //==============================================================================
// Copyright 2003 - 2012 LASMEA UMR 6602 CNRS/Univ. Clermont II
// Copyright 2009 - 2012 LRI UMR 8623 CNRS/Univ Paris Sud XI
//
// Distributed under the Boost Software License, Version 1.0.
// See accompanying file LICENSE.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt
//==============================================================================
#ifndef BOOST_SIMD_SWAR_FUNCTIONS_DETAILS_RANDOM_PERMUTE_HPP_INCLUDED
#define BOOST_SIMD_SWAR_FUNCTIONS_DETAILS_RANDOM_PERMUTE_HPP_INCLUDED
#include <boost/mpl/at.hpp>
namespace boost{ namespace simd{ namespace details
{
template<class IndexMap>
struct random_permute
{
template<class Index, class Cardinal>
struct apply : mpl::at<IndexMap,Index>
{};
};
} } }
#endif
| 34.384615 | 80 | 0.57047 | pbrunet |
38c8116387f0f017c919e518abe6b6d3a6d25320 | 1,162 | cpp | C++ | src/core/test/test_musher_utils.cpp | jmaldon1/Musher | 58f4c8bde4c314821f15bce27555896a00935c1c | [
"MIT"
] | 4 | 2019-11-11T22:57:33.000Z | 2020-11-30T03:12:44.000Z | src/core/test/test_musher_utils.cpp | jmaldon1/Musher | 58f4c8bde4c314821f15bce27555896a00935c1c | [
"MIT"
] | null | null | null | src/core/test/test_musher_utils.cpp | jmaldon1/Musher | 58f4c8bde4c314821f15bce27555896a00935c1c | [
"MIT"
] | 1 | 2019-11-13T16:45:30.000Z | 2019-11-13T16:45:30.000Z | #include <iostream>
#include <stdexcept>
#include <string>
#include <vector>
#include "gtest/gtest.h"
#include "src/core/test/gtest_extras.h"
#include "src/core/test/utils.h"
#include "src/core/utils.h"
using namespace musher::core;
using namespace musher::core::test;
TEST(TestUtils, Uint8_tVectorToHexString) {
std::vector<uint8_t> actual = { 1, 2, 3, 4, 5 };
std::string actualHex = Uint8VectorToHexString(actual);
std::string expectedHex = "0102030405";
EXPECT_EQ(actualHex, expectedHex);
}
TEST(TestUtils, GetStringBetweenSingleQuotes) {
std::string stringInsideSQuotes = "Hello This is a 'Test'";
std::string actual = StrBetweenSQuotes(stringInsideSQuotes);
std::string expected = "Test";
EXPECT_EQ(actual, expected);
}
TEST(TestUtils, Deinterweave) {
std::vector<double> interweaved_vec({ 1., 9., 2., 8., 3., 7., 4., 6. });
std::vector<std::vector<double>> actual_deinterweaved_vectors = Deinterweave(interweaved_vec);
std::vector<std::vector<double>> expected_deinterweaved_vectors({
{ 1., 2., 3., 4. },
{ 9., 8., 7., 6. },
});
EXPECT_MATRIX_EQ(expected_deinterweaved_vectors, actual_deinterweaved_vectors)
}
| 29.794872 | 96 | 0.713425 | jmaldon1 |
38c95e37da5fde4525978b6627c8e2429888881b | 8,581 | cpp | C++ | branches/g3d-8.0-64ffmpeg-win/G3D.lib/source/GImage_png.cpp | brown-ccv/VRG3D | 0854348453ac150b27a8ae89024ef57360f15d45 | [
"BSD-3-Clause"
] | null | null | null | branches/g3d-8.0-64ffmpeg-win/G3D.lib/source/GImage_png.cpp | brown-ccv/VRG3D | 0854348453ac150b27a8ae89024ef57360f15d45 | [
"BSD-3-Clause"
] | null | null | null | branches/g3d-8.0-64ffmpeg-win/G3D.lib/source/GImage_png.cpp | brown-ccv/VRG3D | 0854348453ac150b27a8ae89024ef57360f15d45 | [
"BSD-3-Clause"
] | null | null | null | /**
@file GImage_png.cpp
@author Morgan McGuire, http://graphics.cs.williams.edu
@created 2002-05-27
@edited 2009-04-20
*/
#include "G3D/platform.h"
#include "G3D/GImage.h"
#include "G3D/BinaryInput.h"
#include "G3D/BinaryOutput.h"
#include "G3D/Log.h"
#include <png.h>
namespace G3D {
//libpng required function signature
static void png_read_data(
png_structp png_ptr,
png_bytep data,
png_size_t length) {
debugAssert( png_get_io_ptr(png_ptr) != NULL );
debugAssert( length >= 0 );
debugAssert( data != NULL );
((BinaryInput*)png_get_io_ptr(png_ptr))->readBytes(data, length);
}
//libpng required function signature
static void png_write_data(png_structp png_ptr,
png_bytep data,
png_size_t length) {
debugAssert( png_get_io_ptr(png_ptr) != NULL );
debugAssert( data != NULL );
((BinaryOutput*)png_get_io_ptr(png_ptr))->writeBytes(data, length);
}
//libpng required function signature
static void png_flush_data(
png_structp png_ptr) {
(void)png_ptr;
//Do nothing.
}
//libpng required function signature
static void png_error(
png_structp png_ptr,
png_const_charp error_msg) {
(void)png_ptr;
debugAssert( error_msg != NULL );
throw GImage::Error(error_msg, "PNG");
}
//libpng required function signature
void png_warning(
png_structp png_ptr,
png_const_charp warning_msg) {
(void)png_ptr;
debugAssert( warning_msg != NULL );
Log::common()->println(warning_msg);
}
void GImage::encodePNG(
BinaryOutput& out) const {
if (! (m_channels == 1 || m_channels == 3 || m_channels == 4)) {
throw GImage::Error(format("Illegal channels for PNG: %d", m_channels), out.getFilename());
}
if (m_width <= 0) {
throw GImage::Error(format("Illegal width for PNG: %d", m_width), out.getFilename());
}
if (m_height <= 0) {
throw GImage::Error(format("Illegal height for PNG: %d", m_height), out.getFilename());
}
// PNG library requires that the height * pointer size fit within an int
if (png_uint_32(m_height) * png_sizeof(png_bytep) > PNG_UINT_32_MAX) {
throw GImage::Error("Unsupported PNG height.", out.getFilename());
}
out.setEndian(G3D_LITTLE_ENDIAN);
png_structp png_ptr = png_create_write_struct(PNG_LIBPNG_VER_STRING, NULL, png_error, png_warning);
if (! png_ptr) {
throw GImage::Error("Unable to initialize PNG encoder.", out.getFilename());
}
png_infop info_ptr = png_create_info_struct(png_ptr);
if (! info_ptr) {
png_destroy_write_struct(&png_ptr, &info_ptr);
throw GImage::Error("Unable to initialize PNG encoder.", out.getFilename());
}
//setup libpng write handler so can use BinaryOutput
png_set_write_fn(png_ptr, (void*)&out, png_write_data, png_flush_data);
png_color_8_struct sig_bit;
switch (m_channels) {
case 1:
png_set_IHDR(png_ptr, info_ptr, m_width, m_height, 8, PNG_COLOR_TYPE_GRAY,
PNG_INTERLACE_NONE, PNG_COMPRESSION_TYPE_BASE, PNG_FILTER_TYPE_BASE);
sig_bit.red = 0;
sig_bit.green = 0;
sig_bit.blue = 0;
sig_bit.alpha = 0;
sig_bit.gray = 8;
break;
case 3:
png_set_IHDR(png_ptr, info_ptr, m_width, m_height, 8, PNG_COLOR_TYPE_RGB,
PNG_INTERLACE_NONE, PNG_COMPRESSION_TYPE_BASE, PNG_FILTER_TYPE_BASE);
sig_bit.red = 8;
sig_bit.green = 8;
sig_bit.blue = 8;
sig_bit.alpha = 0;
sig_bit.gray = 0;
break;
case 4:
png_set_IHDR(png_ptr, info_ptr, m_width, m_height, 8, PNG_COLOR_TYPE_RGBA,
PNG_INTERLACE_NONE, PNG_COMPRESSION_TYPE_BASE, PNG_FILTER_TYPE_BASE);
sig_bit.red = 8;
sig_bit.green = 8;
sig_bit.blue = 8;
sig_bit.alpha = 8;
sig_bit.gray = 0;
break;
default:
png_destroy_write_struct(&png_ptr, &info_ptr);
throw GImage::Error("Unsupported number of channels for PNG.", out.getFilename());
}
png_set_sBIT(png_ptr, info_ptr, &sig_bit);
//write the png header
png_write_info(png_ptr, info_ptr);
png_bytepp row_pointers = new png_bytep[m_height];
for (int i=0; i < m_height; ++i) {
row_pointers[i] = (png_bytep)&m_byte[m_width * m_channels * i];
}
png_write_image(png_ptr, row_pointers);
png_write_end(png_ptr, info_ptr);
delete[] row_pointers;
png_destroy_write_struct(&png_ptr, &info_ptr);
}
void GImage::decodePNG(
BinaryInput& input) {
png_structp png_ptr = png_create_read_struct(PNG_LIBPNG_VER_STRING, NULL, png_error, png_warning);
if (png_ptr == NULL) {
throw GImage::Error("Unable to initialize PNG decoder.", input.getFilename());
}
png_infop info_ptr = png_create_info_struct(png_ptr);
if (info_ptr == NULL) {
png_destroy_read_struct(&png_ptr, (png_infopp)NULL, (png_infopp)NULL);
throw GImage::Error("Unable to initialize PNG decoder.", input.getFilename());
}
png_infop end_info = png_create_info_struct(png_ptr);
if (end_info == NULL) {
png_destroy_read_struct(&png_ptr, &info_ptr, (png_infopp)NULL);
throw GImage::Error("Unable to initialize PNG decoder.", input.getFilename());
}
// now that the libpng structures are setup, change the error handlers and read routines
// to use G3D functions so that BinaryInput can be used.
png_set_read_fn(png_ptr, (png_voidp)&input, png_read_data);
// read in sequentially so that three copies of the file are not in memory at once
png_read_info(png_ptr, info_ptr);
png_uint_32 png_width, png_height;
int bit_depth, color_type, interlace_type;
// this will validate the data it extracts from info_ptr
png_get_IHDR(png_ptr, info_ptr, &png_width, &png_height, &bit_depth, &color_type,
&interlace_type, NULL, NULL);
if (color_type == PNG_COLOR_TYPE_GRAY_ALPHA) {
png_destroy_read_struct(&png_ptr, &info_ptr, &end_info);
throw GImage::Error("Unsupported PNG color type - PNG_COLOR_TYPE_GRAY_ALPHA.", input.getFilename());
}
m_width = static_cast<uint32>(png_width);
m_height = static_cast<uint32>(png_height);
//swap bytes of 16 bit files to least significant byte first
png_set_swap(png_ptr);
png_set_strip_16(png_ptr);
//Expand paletted colors into true RGB triplets
if (color_type == PNG_COLOR_TYPE_PALETTE) {
png_set_palette_to_rgb(png_ptr);
}
//Expand grayscale images to the full 8 bits from 1, 2, or 4 bits/pixel
if (color_type == PNG_COLOR_TYPE_GRAY && bit_depth < 8) {
png_set_expand(png_ptr);
}
//Expand paletted or RGB images with transparency to full alpha channels
//so the data will be available as RGBA quartets.
if (png_get_valid(png_ptr, info_ptr, PNG_INFO_tRNS)) {
png_set_tRNS_to_alpha(png_ptr);
}
// Fix sub-8 bit_depth to 8bit
if (bit_depth < 8) {
png_set_packing(png_ptr);
}
png_bytep trans_alpha; // unused
int num_trans; // used.
png_color_16p trans_color;
png_get_tRNS(png_ptr, info_ptr, &trans_alpha, &num_trans, &trans_color);
if ((color_type == PNG_COLOR_TYPE_RGBA) ||
((color_type == PNG_COLOR_TYPE_PALETTE) && (num_trans > 0)) ) {
m_channels = 4;
m_byte = (uint8*)m_memMan->alloc(m_width * m_height * 4);
} else if ((color_type == PNG_COLOR_TYPE_RGB) ||
(color_type == PNG_COLOR_TYPE_PALETTE)) {
m_channels = 3;
m_byte = (uint8*)System::malloc(m_width * m_height * 3);
} else if (color_type == PNG_COLOR_TYPE_GRAY) {
m_channels = 1;
// Round up to the nearest 8 rows to avoid a bug in the PNG decoder
int h = iCeil(m_height / 8) * 8;
int sz = m_width * h;
m_byte = (uint8*)m_memMan->alloc(sz);
} else {
throw GImage::Error("Unsupported PNG bit-depth or type.", input.getFilename());
}
//since we are reading row by row, required to handle interlacing
uint32 number_passes = png_set_interlace_handling(png_ptr);
png_read_update_info(png_ptr, info_ptr);
for (uint32 pass = 0; pass < number_passes; ++pass) {
for (uint32 y = 0; y < (uint32)m_height; ++y) {
png_bytep rowPointer = &m_byte[m_width * m_channels * y];
png_read_rows(png_ptr, &rowPointer, NULL, 1);
}
}
// png_read_image(png_ptr, &_byte);
png_read_end(png_ptr, info_ptr);
png_destroy_read_struct(&png_ptr, &info_ptr, &end_info);
}
}
| 30.429078 | 108 | 0.663326 | brown-ccv |
38ca6e7461ceb6687bc335e05c40db489d7ccd06 | 3,550 | hxx | C++ | src/engine/ivp/ivp_utility/ivu_float.hxx | cstom4994/SourceEngineRebuild | edfd7f8ce8af13e9d23586318350319a2e193c08 | [
"MIT"
] | 6 | 2022-01-23T09:40:33.000Z | 2022-03-20T20:53:25.000Z | src/engine/ivp/ivp_utility/ivu_float.hxx | cstom4994/SourceEngineRebuild | edfd7f8ce8af13e9d23586318350319a2e193c08 | [
"MIT"
] | null | null | null | src/engine/ivp/ivp_utility/ivu_float.hxx | cstom4994/SourceEngineRebuild | edfd7f8ce8af13e9d23586318350319a2e193c08 | [
"MIT"
] | 1 | 2022-02-06T21:05:23.000Z | 2022-02-06T21:05:23.000Z | #ifdef WIN32
#include <float.h>
#endif
#if defined(IVP_NO_DOUBLE) && !defined(SUN)
# include <math.h>
# if defined(WIN32) || defined(PSXII) || defined(LINUX)
union p_float_ieee { IVP_FLOAT val;
struct {
unsigned int valh:23; unsigned int exp:8; unsigned int signum:1;
} ln; };
#else
union p_float_ieee { IVP_FLOAT val;
struct {
unsigned int signum:1; unsigned int exp:8; ;unsigned int valh:23;
} ln; };
#endif
#define IVP_EXP_FOR_ONE 0x7f
inline int PFM_LD(float a){ return ((p_float_ieee *)&(a))->ln.exp - IVP_EXP_FOR_ONE; };
#else
# if defined(LINUX) || defined(WIN32)
union p_double_ieee {
IVP_DOUBLE val;
struct {
int val;
unsigned int valh: 20;
unsigned int exp: 11;
unsigned int signum: 1;
} ln;
struct {
int l;
int h;
} ln2;
};
#define IVP_EXP_FOR_ONE 0x3ff
inline int PFM_LD(double a) { return ((p_double_ieee *) &(a))->ln.exp - IVP_EXP_FOR_ONE; };
# endif
# if defined(SUN) || defined(SUN4) || defined(__POWERPC__) || defined(GEKKO)
union p_double_ieee {
double val;
struct {
unsigned int signum:1;
unsigned int exp:11;
unsigned int valh:20;
int val;
} ln;
struct {
int h;
int l;
} ln2;
};
# define P_EXP_FOR_ONE 0x3ff
inline int PFM_LD(double a){ return ((p_double_ieee *)&(a))->ln.exp - P_EXP_FOR_ONE; };
# endif
#endif
class IVP_Fast_Math {
public:
#if defined(PSXII)
/// Calculates the dot product of the calling vector with v.
/// \param v
inline static IVP_DOUBLE isqrt(IVP_DOUBLE x, int /*resolution_steps*/)
{
float u = 1.0f;
__asm__ __volatile__ ("
.set noreorder
rsqrt.s %0, %1, %0
.set reorder
" : "+f" (x) : "f" (u));
return x;
}
/// Calculates the dot product of the calling vector with v.
/// \param v
inline static IVP_DOUBLE sqrt(IVP_DOUBLE x)
{
__asm__ __volatile__ ("
.set noreorder
sqrt.s %0, %0
.set reorder
" : "+f" (x) :);
return x;
}
#elif defined(IVP_NO_DOUBLE)
static IVP_DOUBLE isqrt(IVP_DOUBLE square, int /*resolution_steps*/){
return 1.0f/IVP_Inline_Math::ivp_sqrtf(square);
}
static IVP_DOUBLE sqrt(IVP_DOUBLE x){
return IVP_Inline_Math::ivp_sqrtf(x);
}
#else
// fast 1/sqrt(x),
// resolution for resolution_steps
// 0 -> 1e-3
// 1 -> 1e-7
// 2 -> 1e-14
// 3 -> 1e-16
static double isqrt(double square, int resolution_steps) {
p_double_ieee *ie = (p_double_ieee *) □
IVP_ASSERT(IVP_Inline_Math::fabsd(square) > 0.0f);
p_double_ieee h;
h.val = 1.0f;
h.ln2.h = ((0x07ff00000 - ie->ln2.h) >> 1) + 0x1ff00000;
IVP_DOUBLE squareh = square * 0.5f;
IVP_DOUBLE inv_sqrt = h.val;
inv_sqrt += inv_sqrt * (0.5f - inv_sqrt * inv_sqrt * squareh);
inv_sqrt += inv_sqrt * (0.5f - inv_sqrt * inv_sqrt * squareh);
if (resolution_steps > 0) inv_sqrt += inv_sqrt * (0.5f - (inv_sqrt * inv_sqrt * squareh));
if (resolution_steps > 1) inv_sqrt += inv_sqrt * (0.5f - (inv_sqrt * inv_sqrt * squareh));
if (resolution_steps > 2) inv_sqrt += inv_sqrt * (0.5f - (inv_sqrt * inv_sqrt * squareh));
IVP_ASSERT(IVP_Inline_Math::fabsd(1.0f - inv_sqrt * inv_sqrt * square) < 0.001f);
return inv_sqrt;
}
static IVP_DOUBLE sqrt(IVP_DOUBLE x) {
return ::sqrt(x);
}
#endif
};
| 26.102941 | 98 | 0.587606 | cstom4994 |
38ca766d5e6de6ec96fdbd66c9960d4178cf2c54 | 737 | cpp | C++ | 33_FileDialog/mainwindow.cpp | mongobaba/learn_qt | eac25c34f0104e229afe9e5408ea89a1eab9aefa | [
"MIT"
] | 1 | 2022-01-16T03:51:50.000Z | 2022-01-16T03:51:50.000Z | 33_FileDialog/mainwindow.cpp | mongobaba/learn_qt | eac25c34f0104e229afe9e5408ea89a1eab9aefa | [
"MIT"
] | null | null | null | 33_FileDialog/mainwindow.cpp | mongobaba/learn_qt | eac25c34f0104e229afe9e5408ea89a1eab9aefa | [
"MIT"
] | null | null | null | #include "mainwindow.h"
#include "ui_mainwindow.h"
#include <QFileDialog>
MainWindow::MainWindow(QWidget *parent)
: QMainWindow(parent)
, ui(new Ui::MainWindow)
{
ui->setupUi(this);
fileName = new QLineEdit(this);
fileName->setGeometry(QRect(50, 50, 450, 25));
button = new QPushButton(tr("浏览"), this);
button->setGeometry(QRect(520, 50, 80, 25));
connect(button, SIGNAL(clicked()), this, SLOT(showFiles()));
}
MainWindow::~MainWindow()
{
delete ui;
}
void MainWindow::showFiles()
{
QString filter(tr("文本文件(*.txt);;C/C++文件(*.cpp);;所有文件(*.*)"));
// 这里的"/"在Windows系统下会指向磁盘(例如D盘)根目录
QString str = QFileDialog::getOpenFileName(this, tr("打开文件"), "/", filter);
fileName->setText(str);
}
| 24.566667 | 78 | 0.645862 | mongobaba |
38cb0edf44d87a3a9a36ca82f4c06e941e8d6a97 | 5,997 | cpp | C++ | meeting-qt/setup/src/dui/Box/TileBox.cpp | GrowthEase/- | 5cc7cab95fc309049de8023ff618219dff22d773 | [
"MIT"
] | 48 | 2022-03-02T07:15:08.000Z | 2022-03-31T08:37:33.000Z | meeting-qt/setup/src/dui/Box/TileBox.cpp | chandarlee/Meeting | 9350fdea97eb2cdda28b8bffd9c4199de15460d9 | [
"MIT"
] | 1 | 2022-02-16T01:54:05.000Z | 2022-02-16T01:54:05.000Z | meeting-qt/setup/src/dui/Box/TileBox.cpp | chandarlee/Meeting | 9350fdea97eb2cdda28b8bffd9c4199de15460d9 | [
"MIT"
] | 9 | 2022-03-01T13:41:37.000Z | 2022-03-10T06:05:23.000Z | /**
* @copyright Copyright (c) 2021 NetEase, Inc. All rights reserved.
* Use of this source code is governed by a MIT license that can be found in the LICENSE file.
*/
#include "stdafx.h"
#include "TileBox.h"
namespace ui
{
TileLayout::TileLayout()
{
m_szItem.cx = m_szItem.cy = 0;
}
CSize TileLayout::ArrangeChild(const std::vector<Control*>& m_items, UiRect rc)
{
// Position the elements
if( m_szItem.cx > 0 ) m_nColumns = (rc.right - rc.left) / m_szItem.cx;
if( m_nColumns == 0 ) m_nColumns = 1;
int cyNeeded = 0;
int cxWidth = rc.GetWidth() / m_nColumns;
int deviation = rc.GetWidth() - cxWidth * m_nColumns;
int cyHeight = 0;
int iCount = 0;
POINT ptTile = { rc.left, rc.top };
int iPosX = rc.left;
for( auto it = m_items.begin(); it != m_items.end(); it++ ) {
auto pControl = *it;
if( !pControl->IsVisible() ) continue;
if( pControl->IsFloat() ) {
SetFloatPos(pControl, rc);
continue;
}
// Determine size
UiRect rcTile(ptTile.x, ptTile.y, ptTile.x + cxWidth, ptTile.y);
if (deviation > 0) {
rcTile.right += 1;
deviation--;
}
if( (iCount % m_nColumns) == 0 )
{
int iIndex = iCount;
for( auto it = m_items.begin(); it != m_items.end(); it++ ) {
auto pLineControl = *it;
if( !pLineControl->IsVisible() ) continue;
if( pLineControl->IsFloat() ) continue;
UiRect rcMargin = pLineControl->GetMargin();
CSize szAvailable = { rcTile.right - rcTile.left - rcMargin.left - rcMargin.right, 9999 };
if( iIndex == iCount || (iIndex + 1) % m_nColumns == 0 ) {
szAvailable.cx -= m_iChildMargin / 2;
}
else {
szAvailable.cx -= m_iChildMargin;
}
if( szAvailable.cx < pControl->GetMinWidth() ) szAvailable.cx = pControl->GetMinWidth();
if( pControl->GetMaxWidth() >= 0 && szAvailable.cx > pControl->GetMaxWidth() ) szAvailable.cx = pControl->GetMaxWidth();
CSize szTile = pLineControl->EstimateSize(szAvailable);
if( szTile.cx < pControl->GetMinWidth() ) szTile.cx = pControl->GetMinWidth();
if( pControl->GetMaxWidth() >= 0 && szTile.cx > pControl->GetMaxWidth() ) szTile.cx = pControl->GetMaxWidth();
if( szTile.cy < pControl->GetMinHeight() ) szTile.cy = pControl->GetMinHeight();
if( szTile.cy > pControl->GetMaxHeight() ) szTile.cy = pControl->GetMaxHeight();
cyHeight = MAX(cyHeight, szTile.cy + rcMargin.top + rcMargin.bottom);
if( (++iIndex % m_nColumns) == 0) break;
}
}
UiRect rcMargin = pControl->GetMargin();
rcTile.left += rcMargin.left + m_iChildMargin / 2;
rcTile.right -= rcMargin.right + m_iChildMargin / 2;
if( (iCount % m_nColumns) == 0 ) {
rcTile.left -= m_iChildMargin / 2;
}
if( ( (iCount + 1) % m_nColumns) == 0 ) {
rcTile.right += m_iChildMargin / 2;
}
// Set position
rcTile.top = ptTile.y + rcMargin.top;
rcTile.bottom = ptTile.y + cyHeight;
CSize szAvailable = { rcTile.right - rcTile.left, rcTile.bottom - rcTile.top };
CSize szTile = pControl->EstimateSize(szAvailable);
if( szTile.cx == DUI_LENGTH_STRETCH ) szTile.cx = szAvailable.cx;
if( szTile.cy == DUI_LENGTH_STRETCH ) szTile.cy = szAvailable.cy;
if( szTile.cx < pControl->GetMinWidth() ) szTile.cx = pControl->GetMinWidth();
if( pControl->GetMaxWidth() >= 0 && szTile.cx > pControl->GetMaxWidth() ) szTile.cx = pControl->GetMaxWidth();
if( szTile.cy < pControl->GetMinHeight() ) szTile.cy = pControl->GetMinHeight();
if( szTile.cy > pControl->GetMaxHeight() ) szTile.cy = pControl->GetMaxHeight();
UiRect rcPos((rcTile.left + rcTile.right - szTile.cx) / 2, (rcTile.top + rcTile.bottom - szTile.cy) / 2,
(rcTile.left + rcTile.right - szTile.cx) / 2 + szTile.cx, (rcTile.top + rcTile.bottom - szTile.cy) / 2 + szTile.cy);
pControl->SetPos(rcPos);
if( (++iCount % m_nColumns) == 0 ) {
ptTile.x = iPosX;
ptTile.y += cyHeight + m_iChildMargin;
cyHeight = 0;
}
else {
ptTile.x += rcTile.GetWidth();
}
cyNeeded = rcTile.bottom - rc.top;
}
CSize size = {rc.right - rc.left, cyNeeded};
return size;
}
CSize TileLayout::AjustSizeByChild(const std::vector<Control*>& m_items, CSize szAvailable)
{
CSize size = m_pOwner->Control::EstimateSize(szAvailable);
size.cy = 0;
if( m_szItem.cx > 0 ) m_nColumns = m_pOwner->GetFixedWidth() / m_szItem.cx;
if( m_nColumns == 0 ) m_nColumns = 1;
int rows = m_pOwner->GetCount() / m_nColumns;
if (m_pOwner->GetCount() % m_nColumns != 0)
{
rows += 1;
}
if (m_items.size() > 0)
{
int childMarginTotal;
if (m_items.size() % m_nColumns == 0)
{
childMarginTotal = (m_items.size() / m_nColumns - 1) * m_iChildMargin;
}
else
{
childMarginTotal = (m_items.size() / m_nColumns) * m_iChildMargin;
}
Control* pControl = static_cast<Control*>(m_items[0]);
size.cy += pControl->GetFixedHeight() * rows + m_rcPadding.top + m_rcPadding.bottom + childMarginTotal;
}
return size;
}
bool TileLayout::SetAttribute(const std::wstring& pstrName, const std::wstring& pstrValue)
{
bool hasAttribute = true;
if( pstrName == _T("itemsize") ) {
CSize szItem;
LPTSTR pstr = NULL;
szItem.cx = _tcstol(pstrValue.c_str(), &pstr, 10); ASSERT(pstr);
szItem.cy = _tcstol(pstr + 1, &pstr, 10); ASSERT(pstr);
SetItemSize(szItem);
}
else if( pstrName == _T("columns"))
{
SetColumns(_ttoi(pstrValue.c_str()));
}
else
{
hasAttribute = Layout::SetAttribute(pstrName, pstrValue);
}
return hasAttribute;
}
CSize TileLayout::GetItemSize() const
{
return m_szItem;
}
void TileLayout::SetItemSize(CSize szItem)
{
if( m_szItem.cx != szItem.cx || m_szItem.cy != szItem.cy ) {
m_szItem = szItem;
m_pOwner->Arrange();
}
}
int TileLayout::GetColumns() const
{
return m_nColumns;
}
void TileLayout::SetColumns(int nCols)
{
if( nCols <= 0 ) return;
m_nColumns = nCols;
m_pOwner->Arrange();
}
TileBox::TileBox() :
Box(new TileLayout())
{
}
}
| 29.541872 | 125 | 0.637986 | GrowthEase |
38d059c88a8cdda0cb6ba2169db771c76242e513 | 1,483 | cpp | C++ | src/Aplicatii Vectori/ex 9/main.cpp | andrew-miroiu/Cpp-projects | d0917a7f78aef929c25dc9b019e910951c2050ac | [
"MIT"
] | 2 | 2021-11-27T18:29:32.000Z | 2021-11-28T14:35:47.000Z | src/Aplicatii Vectori/ex 9/main.cpp | andrew-miroiu/Cpp-projects | d0917a7f78aef929c25dc9b019e910951c2050ac | [
"MIT"
] | null | null | null | src/Aplicatii Vectori/ex 9/main.cpp | andrew-miroiu/Cpp-projects | d0917a7f78aef929c25dc9b019e910951c2050ac | [
"MIT"
] | null | null | null | #include <iostream>
//9. Se citesc elementele unui tablou v unidimensional cu n (n<=100) componente, numere întregi din cel
//mult 4 cifre fiecare. Sa se realizeze următoarele prelucrări: a. Să se afişeze valorile prime. b. Să se afişeze
//numerele prime a căror invers este tot un număr prim.
using namespace std;
int main()
{
int n, v[100], i, prime=0, d, ogl=0, primeogl=0;
cout<<"n= ";
cin>>n;
cout<<"Scrie numerele: ";
for(i=0; i<n; i++)
{
cin>>v[i];
}
cout<<"Numerele prime sunt: ";
for(i=0; i<n; i++)
{
for(d=2; d*d<=v[i]; d++)
{
prime=0;
if(v[i]%d==0)
{
prime++;
}
}
if(prime==0)
{
cout<<v[i]<<" ,";
}
}
cout<<"\b "<<endl;
cout<<"Numerele care sunt prime si rasturnatul lor este tot prim sunt:";
for(i=0; i<n; i++)
{
int cv=v[i];
while(cv)
{
ogl=ogl*10+cv%10;
cv=cv/10;
}
for(d=2; d*d<=v[i]; d++)
{
prime=0;
if(v[i]%d==0)
{
prime++;
}
}
for(int j=2; j*j<=ogl; j++)
{
primeogl=0;
if(ogl%j==0)
{
primeogl++;
}
}
if(prime==0 && primeogl==0)
{
cout<<v[i]<<" ,";
}
}
cout<<"\b ";
return 0;
}
| 19.25974 | 113 | 0.401214 | andrew-miroiu |
38d1097ac49d5678b32384644edd0219f7fcd9f5 | 4,679 | cpp | C++ | Engine/source/platform/platformAssert.cpp | fr1tz/alux3d | 249a3b51751ce3184d52879b481f83eabe89e7e3 | [
"MIT"
] | null | null | null | Engine/source/platform/platformAssert.cpp | fr1tz/alux3d | 249a3b51751ce3184d52879b481f83eabe89e7e3 | [
"MIT"
] | null | null | null | Engine/source/platform/platformAssert.cpp | fr1tz/alux3d | 249a3b51751ce3184d52879b481f83eabe89e7e3 | [
"MIT"
] | 1 | 2018-10-26T03:18:22.000Z | 2018-10-26T03:18:22.000Z | // Copyright information can be found in the file named COPYING
// located in the root directory of this distribution.
#include <stdarg.h>
#include "core/strings/stringFunctions.h"
#include "console/console.h"
//-------------------------------------- STATIC Declaration
PlatformAssert *PlatformAssert::platformAssert = NULL;
//--------------------------------------
PlatformAssert::PlatformAssert()
{
processing = false;
}
//--------------------------------------
PlatformAssert::~PlatformAssert()
{
}
//--------------------------------------
void PlatformAssert::create( PlatformAssert* newAssertClass )
{
if (!platformAssert)
platformAssert = newAssertClass ? newAssertClass : new PlatformAssert;
}
//--------------------------------------
void PlatformAssert::destroy()
{
if (platformAssert)
delete platformAssert;
platformAssert = NULL;
}
//--------------------------------------
bool PlatformAssert::displayMessageBox(const char *title, const char *message, bool retry)
{
if (retry)
return Platform::AlertRetry(title, message);
Platform::AlertOK(title, message);
return false;
}
static const char *typeName[] = { "Unknown", "Fatal-ISV", "Fatal", "Warning" };
//------------------------------------------------------------------------------
static bool askToEnterDebugger(const char* message )
{
static bool haveAsked = false;
static bool useDebugger = true;
if(!haveAsked )
{
static char tempBuff[1024];
dSprintf( tempBuff, 1024, "Torque has encountered an assertion with message\n\n"
"%s\n\n"
"Would you like to use the debugger? If you cancel, you won't be asked"
" again until you restart Torque.", message);
useDebugger = Platform::AlertOKCancel("Use debugger?", tempBuff );
haveAsked = true;
}
return useDebugger;
}
//--------------------------------------
bool PlatformAssert::process(Type assertType,
const char *filename,
U32 lineNumber,
const char *message)
{
// If we're somehow recursing, just die.
if(processing)
Platform::debugBreak();
processing = true;
bool ret = true;
// always dump to the Assert to the Console
if (Con::isActive())
{
if (assertType == Warning)
Con::warnf(ConsoleLogEntry::Assert, "%s(%ld) : %s - %s", filename, lineNumber, typeName[assertType], message);
else
Con::errorf(ConsoleLogEntry::Assert, "%s(%ld) : %s - %s", filename, lineNumber, typeName[assertType], message);
}
// if not a WARNING pop-up a dialog box
if (assertType != Warning)
{
// used for processing navGraphs (an assert won't botch the whole build)
if(Con::getBoolVariable("$FP::DisableAsserts", false) == true)
Platform::forceShutdown(1);
char buffer[2048];
dSprintf(buffer, 2048, "%s(%ld) : %s", filename, lineNumber, typeName[assertType] );
#ifdef TORQUE_DEBUG
// In debug versions, allow a retry even for ISVs...
bool retry = displayMessageBox(buffer, message, true);
#else
bool retry = displayMessageBox(buffer, message, ((assertType == Fatal) ? true : false) );
#endif
if(!retry)
Platform::forceShutdown(1);
ret = askToEnterDebugger(message);
}
processing = false;
return ret;
}
bool PlatformAssert::processingAssert()
{
return platformAssert ? platformAssert->processing : false;
}
//--------------------------------------
bool PlatformAssert::processAssert(Type assertType,
const char *filename,
U32 lineNumber,
const char *message)
{
if (platformAssert)
return platformAssert->process(assertType, filename, lineNumber, message);
else // when platAssert NULL (during _start/_exit) try direct output...
dPrintf("\n%s: (%s @ %ld) %s\n", typeName[assertType], filename, lineNumber, message);
// this could also be platform-specific: OutputDebugString on PC, DebugStr on Mac.
// Will raw printfs do the job? In the worst case, it's a break-pointable line of code.
// would have preferred Con but due to race conditions, it might not be around...
// Con::errorf(ConsoleLogEntry::Assert, "%s: (%s @ %ld) %s", typeName[assertType], filename, lineNumber, message);
return true;
}
//--------------------------------------
const char* avar(const char *message, ...)
{
static char buffer[4096];
va_list args;
va_start(args, message);
dVsprintf(buffer, sizeof(buffer), message, args);
return( buffer );
}
| 30.383117 | 119 | 0.582176 | fr1tz |
38d15c721c10b88000fc8841d3021692d002b1b5 | 2,097 | cpp | C++ | src/Saurobyte/SystemPool.cpp | Symphonym/Saurobyte | c4bc5afd4ac4353ed6cd9a201454fd14aa3aced2 | [
"MIT"
] | 17 | 2015-01-26T19:46:42.000Z | 2021-10-04T15:30:32.000Z | src/Saurobyte/SystemPool.cpp | Symphonym/Saurobyte | c4bc5afd4ac4353ed6cd9a201454fd14aa3aced2 | [
"MIT"
] | 1 | 2021-04-06T01:12:03.000Z | 2021-04-06T01:12:03.000Z | src/Saurobyte/SystemPool.cpp | Symphonym/Saurobyte | c4bc5afd4ac4353ed6cd9a201454fd14aa3aced2 | [
"MIT"
] | 2 | 2015-02-03T21:23:49.000Z | 2021-05-02T14:52:52.000Z | #include <Saurobyte/SystemPool.hpp>
#include <Saurobyte/System.hpp>
namespace Saurobyte
{
SystemPool::SystemPool(Engine *engine)
:
m_engine(engine)
{
}
SystemPool::~SystemPool()
{
frameCleanup();
m_systemPool.clear();
}
void SystemPool::addSystem(BaseSystem *newSystem)
{
// Make sure the system doesn't exist, then add it
auto iter = m_systemPool.find(newSystem->getTypeID());
if(iter == m_systemPool.end())
m_systemPool[newSystem->getTypeID()] = SystemPtr(newSystem);
}
void SystemPool::removeSystem(TypeID id)
{
auto iter = m_systemPool.find(id);
// Delete the system from the map, but the actual memory deletion is done at the start
// of the next frame. This is done so any calls to hasSystem after the remove call will
// return false.
if(iter != m_systemPool.end())
{
m_pendingDeletes.push_back(std::move(iter->second));
m_systemPool.erase(iter);
}
}
BaseSystem* SystemPool::getSystem(TypeID id)
{
auto iter = m_systemPool.find(id);
if(iter == m_systemPool.end())
return nullptr;
else
return iter->second.get();
}
bool SystemPool::hasSystem(TypeID id)
{
auto iter = m_systemPool.find(id);
return iter != m_systemPool.end();
}
void SystemPool::emptySystems()
{
for(auto itr = m_systemPool.begin(); itr != m_systemPool.end(); itr++)
{
if(itr->second->isActive())
itr->second->clearSystem();
}
}
void SystemPool::processSystems()
{
for(auto itr = m_systemPool.begin(); itr != m_systemPool.end(); itr++)
{
if(itr->second->isActive())
{
itr->second->preProcess();
itr->second->processEntities();
itr->second->postProcess();
}
}
}
void SystemPool::removeEntityFromSystems(Entity &entity, bool wasKilled)
{
for(auto itr = m_systemPool.begin(); itr != m_systemPool.end(); itr++)
itr->second->removeEntity(entity, wasKilled);
}
void SystemPool::refreshEntity(Entity &entity)
{
for(auto itr = m_systemPool.begin(); itr != m_systemPool.end(); itr++)
itr->second->refreshEntity(entity);
}
void SystemPool::frameCleanup()
{
m_pendingDeletes.clear();
}
}; | 22.793478 | 89 | 0.680496 | Symphonym |
38d1ea2ff8ef4143ec19cb5090cc2eee413cbcf4 | 539,849 | cpp | C++ | com/netfx/src/clr/vm/class.cpp | npocmaka/Windows-Server-2003 | 5c6fe3db626b63a384230a1aa6b92ac416b0765f | [
"Unlicense"
] | 17 | 2020-11-13T13:42:52.000Z | 2021-09-16T09:13:13.000Z | com/netfx/src/clr/vm/class.cpp | sancho1952007/Windows-Server-2003 | 5c6fe3db626b63a384230a1aa6b92ac416b0765f | [
"Unlicense"
] | 2 | 2020-10-19T08:02:06.000Z | 2020-10-19T08:23:18.000Z | com/netfx/src/clr/vm/class.cpp | sancho1952007/Windows-Server-2003 | 5c6fe3db626b63a384230a1aa6b92ac416b0765f | [
"Unlicense"
] | 14 | 2020-11-14T09:43:20.000Z | 2021-08-28T08:59:57.000Z | // ==++==
//
// Copyright (c) Microsoft Corporation. All rights reserved.
//
// ==--==
// ===========================================================================
// File: CLASS.CPP
//
// ===========================================================================
// This file contains CreateClass() which will return a EEClass*.
// Calling create class is the ONLY way a EEClass should be allocated.
// ===========================================================================
//
#include "common.h"
#include "clsload.hpp"
#include "method.hpp"
#include "class.h"
#include "object.h"
#include "field.h"
#include "util.hpp"
#include "excep.h"
#include "siginfo.hpp"
#include "threads.h"
#include "stublink.h"
#include "ecall.h"
#include "COMPlusWrapper.h"
#include "ComClass.h"
#include "ndirect.h"
#include "gcdesc.h"
#include "verifier.hpp"
#include "JITInterface.h"
#include "ComCallWrapper.h"
#include "EEConfig.h"
#include "log.h"
#include "nstruct.h"
#include "cgensys.h"
#include "gc.h"
#include "ReflectUtil.h"
#include "security.h"
#include "COMStringBuffer.h"
#include "DbgInterface.h"
#include "COMDelegate.h"
#include "COMMember.h"
#include "SigFormat.h"
#include "compluscall.h"
#include "remoting.h"
#include "icecap.h"
#include "EEProfInterfaces.h"
#include "nexport.h"
#include "nstruct.h"
#include "wsperf.h"
#include "ListLock.h"
#include "MethodImpl.h"
#include "jumptargettable.h"
#include "utsem.h"
#include "GuidFromName.h"
#include "stackprobe.h"
#include "EnCEE.h"
#include "EnCEE.h"
#include "ComSynchronizable.h"
#include "CustomAttribute.h"
#include "listlock.inl"
// this file handles string conversion errors for itself
#undef MAKE_TRANSLATIONFAILED
// Helper functions to sort GCdescs by offset (decending order)
int __cdecl compareCGCDescSeries(const void *arg1, const void *arg2)
{
CGCDescSeries* gcInfo1 = (CGCDescSeries*) arg1;
CGCDescSeries* gcInfo2 = (CGCDescSeries*) arg2;
return (int)(gcInfo2->GetSeriesOffset() - gcInfo1->GetSeriesOffset());
}
//-----------------------------------------------------------------------------------
// The following is needed to monitor RVA fields overlapping in InitializeFieldDescs
//
#define RVA_FIELD_VALIDATION_ENABLED
//#define RVA_FIELD_OVERLAPPING_VALIDATION_ENABLED
#include "..\ildasm\DynamicArray.h"
struct RVAFSE // RVA Field Start & End
{
BYTE* pbStart;
BYTE* pbEnd;
};
DynamicArray<RVAFSE> *g_drRVAField = NULL;
ULONG g_ulNumRVAFields=0;
//-----------------------------------------------------------------------------------
// utsem.h defines UNLOCK which conflicts with UNLOCK in ComPlusWrapperCache
#undef UNLOCK
#define UNPLACED_NONVTABLE_SLOT_NUMBER ((WORD) -2)
#include "Assembly.hpp"
extern "C" void* GetGp(void);
// Typedef for string comparition functions.
typedef int (__cdecl *UTF8StringCompareFuncPtr)(const char *, const char *);
char* FormatSig(MethodDesc* pMD);
// Cache the MethodDesc where the Finalize method was placed into Object's MethodTable.
MethodDesc *MethodTable::s_FinalizerMD;
MetaSig *EEClass::s_cctorSig;
#ifdef _DEBUG
BOOL TypeHandle::Verify()
{
if (IsNull())
return(true);
if (IsUnsharedMT()) {
// **********TEMOPORARILY commented out. TarunA to fix it.
//_ASSERTE(m_asMT->GetClass()->GetMethodTable() == m_asMT); // Sane method table
// This assert really should be here, but at the moment it is violated
// (benignly), in JitInterface when you ask for a class when all you have
// is a methodDesc of an array method.
// _ASSERTE(!m_asMT->IsArray());
}
else {
if (IsArray())
AsArray()->Verify();
}
return(true);
}
BOOL ParamTypeDesc::Verify() {
_ASSERTE(m_TemplateMT == 0 || m_TemplateMT == m_TemplateMT->GetClass()->GetMethodTable());
_ASSERTE(!GetTypeParam().IsNull());
_ASSERTE(!(GetTypeParam().IsUnsharedMT() && GetTypeParam().AsMethodTable()->IsArray()));
_ASSERTE(CorTypeInfo::IsModifier(m_Type));
GetTypeParam().Verify();
return(true);
}
BOOL ArrayTypeDesc::Verify() {
_ASSERTE(m_TemplateMT->IsArray());
_ASSERTE(CorTypeInfo::IsArray(m_Type));
ParamTypeDesc::Verify();
return(true);
}
#endif
unsigned TypeHandle::GetSize() {
CorElementType type = GetNormCorElementType();
if (type == ELEMENT_TYPE_VALUETYPE)
return(AsClass()->GetNumInstanceFieldBytes());
return(GetSizeForCorElementType(type));
}
Module* TypeHandle::GetModule() {
if (IsTypeDesc())
return AsTypeDesc()->GetModule();
return(AsMethodTable()->GetModule());
}
Assembly* TypeHandle::GetAssembly() {
if (IsTypeDesc())
return AsTypeDesc()->GetAssembly();
return(AsMethodTable()->GetAssembly());
}
BOOL TypeHandle::IsArray() {
return(IsTypeDesc() && CorTypeInfo::IsArray(AsTypeDesc()->GetNormCorElementType()));
}
BOOL TypeHandle::CanCastTo(TypeHandle type) {
if (*this == type)
return(true);
if (IsTypeDesc())
return AsTypeDesc()->CanCastTo(type);
if (!type.IsUnsharedMT())
return(false);
return ClassLoader::StaticCanCastToClassOrInterface(AsClass(), type.AsClass()) != 0;
}
unsigned TypeHandle::GetName(char* buff, unsigned buffLen) {
if (IsTypeDesc())
return(AsTypeDesc()->GetName(buff, buffLen));
AsMethodTable()->GetClass()->_GetFullyQualifiedNameForClass(buff, buffLen);
_ASSERTE(strlen(buff) < buffLen-1);
return((unsigned)strlen(buff));
}
TypeHandle TypeHandle::GetParent() {
if (IsTypeDesc())
return(AsTypeDesc()->GetParent());
EEClass* parentClass = AsMethodTable()->GetClass()->GetParentClass();
if (parentClass == 0)
return(TypeHandle());
return TypeHandle(parentClass->GetMethodTable());
}
Module* TypeDesc::GetModule() {
// Note here we are making the assumption that a typeDesc lives in
// the classloader of its element type.
if (CorTypeInfo::IsModifier(m_Type)) {
TypeHandle param = GetTypeParam();
_ASSERTE(!param.IsNull());
return(param.GetModule());
}
_ASSERTE(m_Type == ELEMENT_TYPE_FNPTR);
FunctionTypeDesc* asFtn = (FunctionTypeDesc*) this;
return(asFtn->GetSig()->GetModule());
}
Assembly* TypeDesc::GetAssembly() {
// Note here we are making the assumption that a typeDesc lives in
// the classloader of its element type.
TypeHandle param = GetTypeParam();
_ASSERTE(!param.IsNull());
return(param.GetAssembly());
}
unsigned TypeDesc::GetName(char* buff, unsigned buffLen)
{
CorElementType kind = GetNormCorElementType();
return ConstructName(kind,
CorTypeInfo::IsModifier(kind) ? GetTypeParam() : TypeHandle(),
kind == ELEMENT_TYPE_ARRAY ? ((ArrayTypeDesc*) this)->GetRank() : 0,
buff, buffLen);
}
unsigned TypeDesc::ConstructName(CorElementType kind, TypeHandle param, int rank,
char* buff, unsigned buffLen)
{
char* origBuff = buff;
char* endBuff = &buff[buffLen];
if (CorTypeInfo::IsModifier(kind))
{
buff += param.GetName(buff, buffLen);
}
switch(kind) {
case ELEMENT_TYPE_BYREF:
if (buff < endBuff)
*buff++ = '&';
break;
case ELEMENT_TYPE_PTR:
if (buff < endBuff)
*buff++ = '*';
break;
case ELEMENT_TYPE_SZARRAY:
if (&buff[2] <= endBuff) {
*buff++ = '[';
*buff++ = ']';
}
break;
case ELEMENT_TYPE_ARRAY: {
if (&buff[rank+2] <= endBuff) {
*buff++ = '[';
if (rank == 1)
*buff++ = '*';
else {
while(--rank > 0)
*buff++ = ',';
}
*buff++ = ']';
}
break;
}
case ELEMENT_TYPE_FNPTR:
default:
const char* name = CorTypeInfo::GetFullName(kind);
_ASSERTE(name != 0);
unsigned len = (unsigned)strlen(name);
if (buff + len < endBuff) {
strcpy(buff, name);
buff += len;
}
}
if (buff < endBuff)
*buff = 0;
_ASSERTE(buff <= endBuff);
return buff - origBuff;
}
BOOL TypeDesc::CanCastTo(TypeHandle toType) {
if (!toType.IsTypeDesc()) {
if (GetMethodTable() == 0) // I don't have an underlying method table, I am not an object.
return(false);
// This does the right thing if 'type' == System.Array or System.Object, System.Clonable ...
return(ClassLoader::StaticCanCastToClassOrInterface(GetMethodTable()->GetClass(), toType.AsClass()) != 0);
}
TypeDesc* toTypeDesc = toType.AsTypeDesc();
CorElementType toKind = toTypeDesc->GetNormCorElementType();
CorElementType fromKind = GetNormCorElementType();
// The element kinds must match, only exception is that SZARRAY matches a one dimension ARRAY
if (!(toKind == fromKind || (CorTypeInfo::IsArray(toKind) && fromKind == ELEMENT_TYPE_SZARRAY)))
return(false);
// Is it a parameterized type?
if (CorTypeInfo::IsModifier(toKind)) {
if (toKind == ELEMENT_TYPE_ARRAY) {
ArrayTypeDesc* fromArray = (ArrayTypeDesc*) this;
ArrayTypeDesc* toArray = (ArrayTypeDesc*) toTypeDesc;
if (fromArray->GetRank() != toArray->GetRank())
return(false);
}
// While boxed value classes inherit from object their
// unboxed versions do not. Parameterized types have the
// unboxed version, thus, if the from type parameter is value
// class then only an exact match works.
TypeHandle fromParam = GetTypeParam();
TypeHandle toParam = toTypeDesc->GetTypeParam();
if (fromParam == toParam)
return(true);
// Object parameters dont need an exact match but only inheritance, check for that
CorElementType fromParamCorType = fromParam.GetNormCorElementType();
if (CorTypeInfo::IsObjRef(fromParamCorType))
return(fromParam.CanCastTo(toParam));
// Enums with the same underlying type are interchangable
if (CorTypeInfo::IsPrimitiveType(fromParamCorType) &&
fromParamCorType == toParam.GetNormCorElementType()) {
EEClass* pFromClass = fromParam.GetClass();
EEClass* pToClass = toParam.GetClass();
if (pFromClass && (pFromClass->IsEnum() || pFromClass->IsTruePrimitive()) &&
pToClass && (pToClass->IsEnum() || pToClass->IsTruePrimitive())) {
return(true);
}
}
// Anything else is not a match.
return(false);
}
_ASSERTE(toKind == ELEMENT_TYPE_TYPEDBYREF || CorTypeInfo::IsPrimitiveType(toKind));
return(true);
}
TypeHandle TypeDesc::GetParent() {
CorElementType kind = GetNormCorElementType();
if (CorTypeInfo::IsArray(kind)) {
_ASSERTE(kind == ELEMENT_TYPE_SZARRAY || kind == ELEMENT_TYPE_ARRAY);
return g_pArrayClass;
}
if (CorTypeInfo::IsPrimitiveType(kind))
return(g_pObjectClass);
return(TypeHandle());
}
OBJECTREF ParamTypeDesc::CreateClassObj()
{
THROWSCOMPLUSEXCEPTION();
if (!m_ReflectClassObject) {
COMClass::EnsureReflectionInitialized();
BaseDomain *pBaseDomain = GetDomain();
switch(GetNormCorElementType()) {
case ELEMENT_TYPE_ARRAY:
case ELEMENT_TYPE_SZARRAY:
{
// Lookup the array to see if we have already built it.
ReflectArrayClass *newArray = new (pBaseDomain) ReflectArrayClass();
if (!newArray)
COMPlusThrowOM();
newArray->Init((ArrayTypeDesc*)this);
// Let all threads fight over who wins using InterlockedCompareExchange.
// Only the winner can set m_ReflectClassObject from NULL.
// Because memory is coming out of the LoaderHeap we do not delete it .. ;^(
FastInterlockCompareExchange ((void**)&m_ReflectClassObject, newArray, NULL);
}
break;
case ELEMENT_TYPE_BYREF:
case ELEMENT_TYPE_PTR:
{
ReflectTypeDescClass *newTD = new (pBaseDomain) ReflectTypeDescClass();
if (!newTD)
COMPlusThrowOM();
newTD->Init(this);
// Let all threads fight over who wins using InterlockedCompareExchange.
// Only the winner can set m_ReflectClassObject from NULL.
// Because memory is coming out of the LoaderHeap we do not delete it .. ;^(
FastInterlockCompareExchange ((void**)&m_ReflectClassObject, newTD, NULL);
}
break;
default:
_ASSERTE(!"We should never be here");
return NULL;
}
}
return m_ReflectClassObject->GetClassObject();
}
//
// The MethodNameHash is a temporary loader structure which may be allocated if there are a large number of
// methods in a class, to quickly get from a method name to a MethodDesc (potentially a chain of MethodDescs).
//
// Returns TRUE for success, FALSE for failure
BOOL MethodNameHash::Init(DWORD dwMaxEntries)
{
// Given dwMaxEntries, determine a good value for the number of hash buckets
m_dwNumBuckets = (dwMaxEntries / 10);
if (m_dwNumBuckets < 4)
m_dwNumBuckets = 4;
WS_PERF_SET_HEAP(SYSTEM_HEAP);
// We're given the number of hash table entries we're going to insert, so we can allocate the appropriate size
m_pMemoryStart = new BYTE[dwMaxEntries*sizeof(MethodHashEntry) + m_dwNumBuckets*sizeof(MethodHashEntry*)];
if (m_pMemoryStart == NULL)
return FALSE;
WS_PERF_UPDATE("MethodNameHash:Init", dwMaxEntries*sizeof(MethodHashEntry) + m_dwNumBuckets*sizeof(MethodHashEntry*), m_pMemoryStart);
#ifdef _DEBUG
m_pDebugEndMemory = m_pMemoryStart + dwMaxEntries*sizeof(MethodHashEntry) + m_dwNumBuckets*sizeof(MethodHashEntry*);
#endif
// Current alloc ptr
m_pMemory = m_pMemoryStart;
// Allocate the buckets out of the alloc ptr
m_pBuckets = (MethodHashEntry**) m_pMemory;
m_pMemory += sizeof(MethodHashEntry*)*m_dwNumBuckets;
// Buckets all point to empty lists to begin with
memset(m_pBuckets, 0, sizeof(MethodHashEntry*)*m_dwNumBuckets);
return TRUE;
}
// Insert new entry at head of list
void MethodNameHash::Insert(LPCUTF8 pszName, MethodDesc *pDesc)
{
DWORD dwHash = HashStringA(pszName);
DWORD dwBucket = dwHash % m_dwNumBuckets;
MethodHashEntry*pNewEntry;
pNewEntry = (MethodHashEntry *) m_pMemory;
m_pMemory += sizeof(MethodHashEntry);
#ifdef _DEBUG
_ASSERTE(m_pMemory <= m_pDebugEndMemory);
#endif
// Insert at head of bucket chain
pNewEntry->m_pNext = m_pBuckets[dwBucket];
pNewEntry->m_pDesc = pDesc;
pNewEntry->m_dwHashValue = dwHash;
pNewEntry->m_pKey = pszName;
m_pBuckets[dwBucket] = pNewEntry;
}
// Return the first MethodHashEntry with this name, or NULL if there is no such entry
MethodHashEntry *MethodNameHash::Lookup(LPCUTF8 pszName, DWORD dwHash)
{
if (!dwHash)
dwHash = HashStringA(pszName);
DWORD dwBucket = dwHash % m_dwNumBuckets;
MethodHashEntry*pSearch;
for (pSearch = m_pBuckets[dwBucket]; pSearch; pSearch = pSearch->m_pNext)
{
if (pSearch->m_dwHashValue == dwHash && !strcmp(pSearch->m_pKey, pszName))
return pSearch;
}
return NULL;
}
MethodNameHash *MethodNameCache::GetMethodNameHash(EEClass *pParentClass)
{
MethodNameHash *pMethodHash = NULL;
for (DWORD i = 0; i < METH_NAME_CACHE_SIZE; i++)
{
if (pParentClass == m_pParentClass[i])
{
pMethodHash = m_pMethodNameHash[i];
m_dwNumConsecutiveMisses = 0;
m_dwWeights[i]++;
if (m_dwLightWeight == i)
{
for (DWORD j = 0; j < METH_NAME_CACHE_SIZE; j++)
if (j != i && m_dwWeights[j] < m_dwWeights[i])
{
m_dwLightWeight = j;
break;
}
}
}
if (pMethodHash)
break;
}
if (!pMethodHash)
{
m_dwNumConsecutiveMisses++;
// There may be such a method, so we will now create a hash table to reduce the pain for
// further lookups
pMethodHash = pParentClass->CreateMethodChainHash();
if (pMethodHash == NULL)
return NULL;
DWORD dwWeightOfNewClass = 1 + (pParentClass->GetNumVtableSlots() / 50);
if (m_dwWeights[m_dwLightWeight] < dwWeightOfNewClass || m_dwNumConsecutiveMisses > MAX_MISSES)
{
DWORD index = m_dwLightWeight;
DWORD oldWeight = m_dwWeights[m_dwLightWeight];
m_dwWeights[index] = dwWeightOfNewClass;
if (oldWeight == 0 && m_dwLightWeight < (METH_NAME_CACHE_SIZE - 1))
m_dwLightWeight++;
else
for (DWORD j = 0; j < METH_NAME_CACHE_SIZE; j++)
if (j != index && m_dwWeights[j] < dwWeightOfNewClass)
{
m_dwLightWeight = j;
break;
}
if (m_dwNumConsecutiveMisses > MAX_MISSES)
m_dwNumConsecutiveMisses = 0;
if (m_pMethodNameHash[index])
delete m_pMethodNameHash[index];
m_pMethodNameHash[index] = pMethodHash;
m_pParentClass[index] = pParentClass;
}
}
return pMethodHash;
}
//
// For each method in Object, we set the bit corresponding to Hash(MethodName). This allows us to determine
// very easily whether a method definitely does not override something in Object.
//
#define OBJ_CLASS_METHOD_HASH_BITMAP_BITS 103
DWORD g_ObjectClassMethodHashBitmap[(OBJ_CLASS_METHOD_HASH_BITMAP_BITS/8)+4];
BOOL g_ObjectClassMethodHashBitmapInited = FALSE;
//@TODO why isn't this defined anywhere?
#define MAX(a,b) (((a)>(b))?(a):(b))
// Log (base 2) of the size of a pointer on this platform....
#ifndef _WIN64
#define LOG2PTR 2
#else
//#error 64 Bit - Study this carefully before enabling
#define LOG2PTR 3
#endif
#ifdef _DEBUG
static unsigned g_dupMethods = 0;
#endif
// Define this to cause all vtable and field information to be dumped to the screen
//#define FULL_DEBUG
// mark the class as having its <clinit> run. (Or it has none)
void MethodTable::SetClassInited()
{
_ASSERTE(!IsShared()
|| GetClass()->GetNumStaticFields() == 0
|| g_Mscorlib.IsClass(this, CLASS__SHARED_STATICS));
FastInterlockOr(&m_wFlags, enum_flag_ClassInited);
FastInterlockOr(m_pEEClass->GetVMFlagsPtr(), VMFLAG_INITED);
}
// mark the class as having been restored.
void MethodTable::SetClassRestored()
{
FastInterlockAnd(&m_wFlags, ~enum_flag_Unrestored);
FastInterlockAnd(m_pEEClass->GetVMFlagsPtr(), ~(VMFLAG_UNRESTORED | VMFLAG_RESTORING));
}
void MethodTable::SetComObjectType()
{
m_wFlags |= enum_ComObjectMask;
if (m_wNumInterface == 0)
{
// if you got zero interfaces you better use the
// default interface map
m_pInterfaceVTableMap = GetThread()->GetDomain()->GetInterfaceVTableMapMgr().GetAddrOfGlobalTableForComWrappers();
}
}
// mark as transparent proxy type
void MethodTable::SetTransparentProxyType()
{
m_wFlags |= enum_TransparentProxy;
m_pInterfaceVTableMap = GetThread()->GetDomain()->GetInterfaceVTableMapMgr().GetAddrOfGlobalTableForComWrappers();
}
LPVOID MethodTable::GetComCallWrapperTemplate()
{
return GetClass()->GetComCallWrapperTemplate();
}
void MethodTable::SetComCallWrapperTemplate(LPVOID pv)
{
GetClass()->SetComCallWrapperTemplate(pv);
}
LPVOID MethodTable::GetComClassFactory()
{
return GetClass()->GetComClassFactory();
}
void MethodTable::SetComClassFactory(LPVOID pv)
{
GetClass()->SetComClassFactory(pv);
}
BOOL MethodTable::IsInterface()
{
return GetClass()->IsInterface();
}
SIZE_T MethodTable::GetSharedClassIndex()
{
_ASSERTE(IsShared());
return GetModule()->GetBaseClassIndex() + RidFromToken(GetClass()->GetCl()) - 1;
}
MethodDesc* MethodTable::GetMethodDescForSlot(DWORD slot)
{
return GetClass()->GetMethodDescForSlot(slot);
}
MethodDesc* MethodTable::GetUnboxingMethodDescForValueClassMethod(MethodDesc *pMD)
{
return GetClass()->GetUnboxingMethodDescForValueClassMethod(pMD);
}
MethodTable * MethodTable::GetParentMethodTable()
{
EEClass* pClass = GetClass()->GetParentClass();
return (pClass != NULL) ? pClass->GetMethodTable() : NULL;
}
// helper to get parent class skipping over COM class in
// the hierarchy
MethodTable * MethodTable::GetComPlusParentMethodTable()
{
EEClass* pClass = GetClass()->GetParentComPlusClass();
return (pClass != NULL) ? pClass->GetMethodTable() : NULL;
}
BOOL EEClass::IsSharedInterface()
{
// all shared interfaces in shared domain
return (IsInterface() && (GetModule()->GetDomain() == SharedDomain::GetDomain()));
}
SLOT* EEClass::GetMethodSlot(MethodDesc* method)
{
_ASSERTE(m_pMethodTable != NULL);
DWORD slot = method->GetSlot();
//
// Fixup the slot address if necessary
//
GetFixedUpSlot(slot);
//
// Return the slot
//
return(&GetVtable()[slot]);
}
// Get Dispatch vtable for interface
// returns NULL if interface not found.
LPVOID MethodTable::GetDispatchVtableForInterface(MethodTable* pMTIntfClass)
{
_ASSERTE(!IsThunking());
DWORD StartSlot;
// Start by handling pure COM+ objects.
if (!IsComObjectType())
{
StartSlot = GetStartSlotForInterface(pMTIntfClass);
return StartSlot != -1 ? (LPVOID) &GetVtable()[StartSlot] : NULL;
}
// We now handle __ComObject class that doesn't have Dynamic Interface Map
if (!HasDynamicInterfaceMap())
{
// parent should be Object for __COmObject
_ASSERTE(GetParentMethodTable() == g_pObjectClass);
// Com objects are special, just return the interface vtable.
return (LPVOID)pMTIntfClass->GetVtable();
}
// Now we handle the more complex extensible RCW's. The first thing to do is check
// to see if the static definition of the extensible RCW specifies that the class
// implements the interface.
StartSlot = GetStartSlotForInterface(pMTIntfClass);
if (StartSlot != -1)
return (LPVOID) &GetVtable()[StartSlot];
// The interface is not in the static class definition so we need to look at the
// dynamic interfaces.
if (FindDynamicallyAddedInterface(pMTIntfClass))
{
// This interface was added to the class dynamically so it is implemented
// by the COM object. We treat this dynamically added interfaces the same
// way we treat COM objects. That is by using the interface vtable.
return (LPVOID)pMTIntfClass->GetVtable();
}
// The interface is not implemented by this class.
return NULL;
}
// get start slot for interface
// returns -1 if interface not found
DWORD MethodTable::GetStartSlotForInterface(MethodTable* pMTIntfClass)
{
InterfaceInfo_t* pInfo = FindInterface(pMTIntfClass);
if (pInfo != NULL)
{
DWORD startSlot = pInfo->m_wStartSlot;
_ASSERTE(startSlot != -1);
return startSlot;
}
return -1;
}
// get start slot for interface.
// This does no lookup. You better know that this MethodTable has an interface
// in its map at that index -- or else you are reading garbage and will die.
DWORD MethodTable::GetStartSlotForInterface(DWORD index)
{
_ASSERTE(index < m_wNumInterface);
InterfaceInfo_t* pInfo = &m_pIMap[index];
_ASSERTE(pInfo != NULL);
DWORD startSlot = pInfo->m_wStartSlot;
_ASSERTE(startSlot != -1);
return startSlot;
}
InterfaceInfo_t *MethodTable::GetInterfaceForSlot(DWORD slotNumber)
{
InterfaceInfo_t *pInterfaces = m_pIMap;
InterfaceInfo_t *pInterfacesEnd = m_pIMap + m_wNumInterface;
while (pInterfaces < pInterfacesEnd)
{
DWORD startSlot = pInterfaces->m_wStartSlot;
if (slotNumber >= startSlot)
{
MethodTable *pMT = pInterfaces->m_pMethodTable;
// Make sure that all interfaces have no nonvirtual slots - otherwise
// we need to touch the class object to get the vtable section size
_ASSERTE(pMT->GetTotalSlots() == pMT->GetClass()->GetNumVtableSlots());
if (slotNumber - startSlot < pMT->GetTotalSlots())
return pInterfaces;
}
pInterfaces++;
}
return NULL;
}
// get the method desc given the interface method desc
MethodDesc *MethodTable::GetMethodDescForInterfaceMethod(MethodDesc *pItfMD, OBJECTREF pServer)
{
MethodTable * pItfMT = pItfMD->GetMethodTable();
_ASSERTE(pItfMT->IsInterface());
MethodTable *pServerMT = pServer->GetMethodTable()->AdjustForThunking(pServer);
MethodDesc *pMD = NULL;
// First handle pure COM+ types
if(!IsComObjectType())
{
// Get the start slot using the interface class
DWORD start = pServerMT->GetStartSlotForInterface(pItfMT);
if(-1 != start)
{
pMD = pServerMT->GetMethodDescForSlot(start + pItfMD->GetSlot());
}
}
else
{
_ASSERTE(pServerMT == this);
// We now handle __ComObject class that doesn't have Dynamic Interface Map
if (!HasDynamicInterfaceMap())
{
pMD = pItfMD;
}
else
{
// Now we handle the more complex extensible RCW's. The first thing to do is check
// to see if the static definition of the extensible RCW specifies that the class
// implements the interface.
DWORD start = GetStartSlotForInterface(pItfMT);
if (-1 != start)
{
pMD = GetMethodDescForSlot(start + pItfMD->GetSlot());
}
// The interface is not in the static class definition so we need to look at the
// dynamic interfaces.
else if (FindDynamicallyAddedInterface(pItfMT))
{
// This interface was added to the class dynamically so it is implemented
// by the COM object. We treat this dynamically added interfaces the same
// way we treat COM objects. That is by using the interface vtable.
pMD = pItfMD;
}
}
}
return pMD;
}
// This is a helper routine to get the address of code from the server and method descriptor
// It is used by remoting to figure out the address to which the method call needs to be
// dispatched.
const BYTE *MethodTable::GetTargetFromMethodDescAndServer(MethodDesc *pMD, OBJECTREF *ppServer, BOOL fContext)
{
THROWSCOMPLUSEXCEPTION();
TRIGGERSGC();
if(pMD->GetMethodTable()->IsInterface())
{
_ASSERTE(*ppServer != NULL);
MethodDesc* pMDTemp = pMD;
// NOTE: This method can trigger GC
pMD = (*ppServer)->GetMethodTable()->GetMethodDescForInterfaceMethod(pMD, *ppServer);
if(NULL == pMD)
{
LPCWSTR szClassName;
DefineFullyQualifiedNameForClassW();
szClassName = GetFullyQualifiedNameForClassW(pMDTemp->GetClass());
#define MAKE_TRANSLATIONFAILED szMethodName=L""
MAKE_WIDEPTR_FROMUTF8_FORPRINT(szMethodName, pMDTemp->GetName());
#undef MAKE_TRANSLATIONFAILED
COMPlusThrow(kMissingMethodException, IDS_EE_MISSING_METHOD, szClassName, szMethodName);
}
}
// get the target depending on whether the method is virtual or non-virtual
// like a constructor, private or final method
const BYTE* pTarget = NULL;
if (pMD->GetMethodTable()->IsInterface())
{
// Handle the special cases where the invoke is happening through an interface class
// (typically for COM interop).
pTarget = pMD->GetUnsafeAddrofCode();
}
else
{
//if(!fContext)
//{
pTarget = (pMD->DontVirtualize() ? pMD->GetPreStubAddr() : pMD->GetAddrofCode(*ppServer));
//}
/*else
{
// This is the case where we are forcing the execution of the call in the current
// context. We have to infer the actual address of code from either the stub or
// the vtable.
if(pMD->DontVirtualize())
{
pTarget = NULL;
}
else
{
MethodTable *pServerMT = (*ppServer)->GetMethodTable()->AdjustForThunking(*ppServer);
pTarget = (BYTE *)*(pServerMT->GetClass()->GetMethodSlot(pMD));
}
}*/
}
_ASSERTE(NULL != pTarget);
return pTarget;
}
void *EEClass::operator new(size_t size, ClassLoader *pLoader)
{
#ifdef _DEBUG
pLoader->m_dwEEClassData += size;
#endif
void *pTmp;
WS_PERF_SET_HEAP(LOW_FREQ_HEAP);
pTmp = pLoader->GetLowFrequencyHeap()->AllocMem(size);
WS_PERF_UPDATE_DETAIL("EEClass new LowFreq", size, pTmp);
return pTmp;
}
// Static helper to create a new method table. This is the only
// way to allocate a new MT. Don't try calling new / ctor.
MethodTable * MethodTable::AllocateNewMT(
DWORD dwVtableSlots,
DWORD dwStaticFieldBytes,
DWORD dwGCSize,
DWORD dwNumInterfaces,
ClassLoader *pLoader,
BOOL isIFace,
BOOL bHasDynamicInterfaceMap
)
{
// GCSize must be aligned
_ASSERTE((dwGCSize & 3) == 0);
size_t size = sizeof(MethodTable);
#ifdef _DEBUG
BOOL bEmptyIMap = FALSE;
// Add an extra slot if the table is empty.
if (dwNumInterfaces == 0)
{
dwNumInterfaces++;
bEmptyIMap = TRUE;
}
// interface map is placed at the end of the vtable,
// in the debug build, make sure it is not getting trashed
dwNumInterfaces++;
#endif
// size without the interface map
DWORD cbTotalSize = (DWORD)size + dwVtableSlots * sizeof(SLOT) + dwStaticFieldBytes + dwGCSize;
// size with the interface map. DynamicInterfaceMap have an extra DWORD added to the end of the normal interface
// map. This will be used to store the count of dynamically added interfaces (the ones that are not in
// the metadata but are QI'ed for at runtime).
DWORD newSize = cbTotalSize + (bHasDynamicInterfaceMap ? sizeof(DWORD) : 0) + dwNumInterfaces * sizeof(InterfaceInfo_t);
WS_PERF_SET_HEAP(HIGH_FREQ_HEAP);
BYTE *pData = (BYTE *) pLoader->GetHighFrequencyHeap()->AllocMem(newSize);
if (pData == NULL)
return NULL;
WS_PERF_UPDATE_DETAIL("MethodTable:new:HighFreq", newSize, pData);
MethodTable* pMT = (MethodTable*)(pData + dwGCSize);
#ifdef _DEBUG
pLoader->m_dwGCSize += dwGCSize;
pLoader->m_dwInterfaceMapSize += (dwNumInterfaces * sizeof(InterfaceInfo_t));
pLoader->m_dwMethodTableSize += (DWORD)size;
pLoader->m_dwVtableData += (dwVtableSlots * sizeof(SLOT));
pLoader->m_dwStaticFieldData += dwStaticFieldBytes;
#endif
// initialize the total number of slots
pMT->m_cbSlots = dwVtableSlots;
// interface map is at the end of the vtable
pMT->m_pIMap = (InterfaceInfo_t *)(pData+cbTotalSize); // pointer interface map
pMT->m_pInterfaceVTableMap = NULL;
_ASSERTE(((WORD) dwNumInterfaces) == dwNumInterfaces);
// in the debug build, keep a dummmy slot just above the IMAP to
// make sure it is not getting trashed.
#ifdef _DEBUG
pMT->m_pIMap->m_wStartSlot = 0xCDCD;
pMT->m_pIMap->m_wFlags = 0xCDCD;
pMT->m_pIMap->m_pMethodTable = (MethodTable*)((sizeof(int *) == 4)?0xCDCDCDCDL:0xCDCDCDCDCDCDCDCD);
pMT->m_wNumInterface = (WORD) (dwNumInterfaces-1);
pMT->m_pIMap = (InterfaceInfo_t*)(((BYTE*)pMT->m_pIMap) + sizeof(InterfaceInfo_t));
// Readjust the IMap size because we added an extra one above.
if (bEmptyIMap)
pMT->m_wNumInterface = 0;
#else
pMT->m_wNumInterface = (WORD) dwNumInterfaces;
#endif
// Extensible RCW's are prefixed with the count of dynamic interfaces.
if (bHasDynamicInterfaceMap)
{
pMT->m_pIMap = (InterfaceInfo_t*)(((BYTE*)pMT->m_pIMap) + sizeof(DWORD));
*(((DWORD *)pMT->m_pIMap) - 1) = 0;
}
WS_PERF_UPDATE_COUNTER(METHOD_TABLE, HIGH_FREQ_HEAP, 1);
WS_PERF_UPDATE_COUNTER(VTABLES, HIGH_FREQ_HEAP, dwVtableSlots * sizeof(SLOT));
WS_PERF_UPDATE_COUNTER(GCINFO, HIGH_FREQ_HEAP, dwGCSize);
WS_PERF_UPDATE_COUNTER(INTERFACE_MAPS, HIGH_FREQ_HEAP, dwNumInterfaces*sizeof(InterfaceInfo_t));
WS_PERF_UPDATE_COUNTER(STATIC_FIELDS, HIGH_FREQ_HEAP, dwStaticFieldBytes);
return pMT;
}
void EEClass::destruct()
{
// If we haven't been restored, we can ignore the class
if (!IsRestored())
return;
// we can't count on the parent class still being around. If it lives in another module that
// module may have already been unloaded. So nuke it here and catch any refernces to parent
// later.
SetParentClass (NULL);
if (IsInterface() && m_dwInterfaceId != ((UINT32)(-1)))
{
// Mark our entry in the global interface map vtable so it can be reclaimed.
SystemDomain::GetAddressOfGlobalInterfaceVTableMap()[m_dwInterfaceId] = (LPVOID)(-2);
}
#ifdef PROFILING_SUPPORTED
// If profiling, then notify the class is getting unloaded.
ClassID clsId = NULL;
if (CORProfilerTrackClasses() && !IsArrayClass())
g_profControlBlock.pProfInterface->ClassUnloadStarted(
(ThreadID) GetThread(), clsId = (ClassID) TypeHandle(this).AsPtr());
#endif // PROFILING_SUPPORTED
// clean up any COM Data
if (m_pccwTemplate)
CleanupCCWTemplate(m_pccwTemplate);
m_pccwTemplate = NULL;
if (m_pComclassfac)
CleanupComclassfac(m_pComclassfac);
m_pComclassfac = NULL;
if (IsAnyDelegateClass()) {
if ( ((DelegateEEClass*)this)->m_pStaticShuffleThunk ) {
((DelegateEEClass*)this)->m_pStaticShuffleThunk->DecRef();
}
delete ((DelegateEEClass*)this)->m_pUMThunkMarshInfo;
}
// The following is rather questionable. If we are destructing the context
// proxy class, we don't want it asserting everywhere that its vtable is
// strange. So lose the flag to suppress the asserts. We're unloading the
// class anyway.
m_pMethodTable->MarkAsNotThunking();
// Destruct the method descs by walking the chunks.
DWORD i, n;
MethodDescChunk *pChunk = m_pChunks;
while (pChunk != NULL)
{
n = pChunk->GetCount();
for (i = 0; i < n; i++)
{
MethodDesc *pMD = pChunk->GetMethodDescAt(i);
pMD->destruct();
}
pChunk = pChunk->GetNextChunk();
}
// Destroy the reflection StaticFinalField stuff
// @TODO: How should we clean this up. We are failing because
// this stuff is run way after the VM is working.
//if (*m_ExposedClassObject != NULL) {
// REFLECTCLASSBASEREF pRefClass;
// pRefClass = (REFLECTCLASSBASEREF) GetExposedClassObject();
// FieldDesc* fld = (FieldDesc*) pRefClass->GetData();
// if (fld)
// delete fld;
//}
if (m_pSparseVTableMap != NULL && !GetModule()->IsPreloadedObject(this))
delete m_pSparseVTableMap;
#ifdef PROFILING_SUPPORTED
// If profiling, then notify the class is getting unloaded.
if (CORProfilerTrackClasses() && !IsArrayClass())
g_profControlBlock.pProfInterface->ClassUnloadFinished((ThreadID) GetThread(), clsId, S_OK);
#endif // PROFILING_SUPPORTED
}
// Subtypes are recorded in a chain from the super, so that we can e.g. backpatch
// up & down the hierarchy.
void EEClass::NoticeSubtype(EEClass *pSub)
{
// We have no locks around ourselves. To avoid heavy-weight locking and the
// potential for deadlocks, all insertions happen with interlocked
// instructions. But, during appdomain unloading, the teardown relies on the fact
// that the EE is suspended and only one thread is active. Therefore we must be in
// cooperative mode now to ensure that we are prevented from interfering with an
// unload.
BEGIN_ENSURE_COOPERATIVE_GC();
// Only attempt to be the first child if it looks like no others are present,
// to avoid excessive LOCK prefixes on MP machines.
if (m_ChildrenChain == NULL)
if (FastInterlockCompareExchange((void **) &m_ChildrenChain,
pSub,
NULL) == NULL)
{
goto done;
}
// We have to add ourselves to the sibling chain. Add at the head.
while (TRUE)
{
// Grab atomically each time through
EEClass *pOldHead = m_ChildrenChain;
_ASSERTE(pOldHead && "How did a remove happen while we are in cooperative mode?");
pSub->m_SiblingsChain = pOldHead;
if (FastInterlockCompareExchange((void **) &m_ChildrenChain,
pSub,
pOldHead) == pOldHead)
{
break;
}
// someone raced to add a sibling. Skip over all newly added siblings and
// keep trying.
}
done:
END_ENSURE_COOPERATIVE_GC();
}
/* static */
TypeHandle TypeHandle::MergeTypeHandlesToCommonParent(TypeHandle ta, TypeHandle tb)
{
_ASSERTE(!ta.IsNull() && !tb.IsNull());
if (ta == tb)
return ta;
// Handle the array case
if (ta.IsArray())
{
if (tb.IsArray())
return MergeArrayTypeHandlesToCommonParent(ta, tb);
ta = TypeHandle(g_pArrayClass); // keep merging from here.
}
else if (tb.IsArray())
tb = TypeHandle(g_pArrayClass);
_ASSERTE(ta.IsUnsharedMT() && tb.IsUnsharedMT());
MethodTable *pMTa = ta.AsMethodTable();
MethodTable *pMTb = tb.AsMethodTable();
InterfaceInfo_t *pBInterfaceMap;
InterfaceInfo_t *pAInterfaceMap;
DWORD i;
if (pMTb->IsInterface())
{
if (pMTa->IsInterface())
{
//
// Both classes are interfaces. Check that if one
// interface extends the other.
//
// Does tb extend ta ?
//
pBInterfaceMap = pMTb->GetInterfaceMap();
for (i = 0; i < pMTb->GetNumInterfaces(); i++)
{
if (TypeHandle(pBInterfaceMap[i].m_pMethodTable) == ta)
{
// tb extends ta, so our merged state should be ta
return ta;
}
}
//
// Does tb extend ta ?
//
pAInterfaceMap = pMTa->GetInterfaceMap();
for (i = 0; i < pMTa->GetNumInterfaces(); i++)
{
if (TypeHandle(pAInterfaceMap[i].m_pMethodTable) == tb)
{
// ta extends tb, so our merged state should be tb
return tb;
}
}
InterfaceMerge:
//@TODO: HACK - An incredibly slow work around for the @todo below that
// allows WFCSelfhost to verify.
for (i = 0; i < pMTb->GetNumInterfaces(); i++)
{
for (DWORD j = 0; j < pMTa->GetNumInterfaces(); j++)
{
if (TypeHandle(pAInterfaceMap[j].m_pMethodTable) == TypeHandle(pBInterfaceMap[i].m_pMethodTable))
{
return TypeHandle(pAInterfaceMap[j].m_pMethodTable);
}
}
}
// @TODO: Create a temp interface which is the intersection of the two interfaces.
// No compatible merge found - using Object
return TypeHandle(g_pObjectClass);
}
else
{
//
// tb is an interface, but ta is not - check that ta
// implements tb
//
// @TODO: Is a class-interface merge legal?
//
InterfaceInfo_t *pAInterfaceMap = pMTa->GetInterfaceMap();
for (i = 0; i < pMTa->GetNumInterfaces(); i++)
{
if (TypeHandle(pAInterfaceMap[i].m_pMethodTable) == tb)
{
// It does implement it, so our merged state should be tb
return tb;
}
}
// No compatible merge found - using Object
return TypeHandle(g_pObjectClass);
}
}
else if (pMTa->IsInterface())
{
//
// ta is an interface, but tb is not - therefore check that
// tb implements ta
//
InterfaceInfo_t *pBInterfaceMap = pMTb->GetInterfaceMap();
for (i = 0; i < pMTb->GetNumInterfaces(); i++)
{
if (TypeHandle(pBInterfaceMap[i].m_pMethodTable) == ta)
{
// It does implement it, so our merged state should be ta
return ta;
}
}
// No compatible merge found - using Object
return TypeHandle(g_pObjectClass);
}
DWORD aDepth = 0;
DWORD bDepth = 0;
TypeHandle tSearch;
// find the depth in the class hierarchy for each class
for (tSearch = ta; (!tSearch.IsNull()); tSearch = tSearch.GetParent())
aDepth++;
for (tSearch = tb; (!tSearch.IsNull()); tSearch = tSearch.GetParent())
bDepth++;
// for whichever class is lower down in the hierarchy, walk up the superclass chain
// to the same level as the other class
while (aDepth > bDepth)
{
ta = ta.GetParent();
aDepth--;
}
while (bDepth > aDepth)
{
tb = tb.GetParent();
bDepth--;
}
while (ta != tb)
{
ta = ta.GetParent();
tb = tb.GetParent();
}
if (ta == TypeHandle(g_pObjectClass))
{
pBInterfaceMap = pMTb->GetInterfaceMap();
pAInterfaceMap = pMTa->GetInterfaceMap();
goto InterfaceMerge;
}
// If no compatible merge is found, we end up using Object
_ASSERTE(!ta.IsNull());
return ta;
}
/* static */
TypeHandle TypeHandle::MergeArrayTypeHandlesToCommonParent(TypeHandle ta, TypeHandle tb)
{
TypeHandle taElem;
TypeHandle tMergeElem;
// If they match we are good to go.
if (ta == tb)
return ta;
if (ta == TypeHandle(g_pArrayClass))
return ta;
else if (tb == TypeHandle(g_pArrayClass))
return tb;
// Get the rank and kind of the first array
DWORD rank = ta.AsArray()->GetRank();
CorElementType taKind = ta.GetNormCorElementType();
CorElementType mergeKind = taKind;
// if no match on the rank the common ancestor is System.Array
if (rank != tb.AsArray()->GetRank())
return TypeHandle(g_pArrayClass);
CorElementType tbKind = tb.GetNormCorElementType();
if (tbKind != taKind)
{
if (CorTypeInfo::IsArray(tbKind) &&
CorTypeInfo::IsArray(taKind) && rank == 1)
mergeKind = ELEMENT_TYPE_SZARRAY;
else
return TypeHandle(g_pArrayClass);
}
// If both are arrays of reference types, return an array of the common
// ancestor.
taElem = ta.AsArray()->GetElementTypeHandle();
if (taElem == tb.AsArray()->GetElementTypeHandle())
{
// The element types match, so we are good to go.
tMergeElem = taElem;
}
else if (taElem.IsArray() && tb.AsArray()->GetElementTypeHandle().IsArray())
{
// Arrays - Find the common ancestor of the element types.
tMergeElem = MergeArrayTypeHandlesToCommonParent(taElem, tb.AsArray()->GetElementTypeHandle());
}
else if (CorTypeInfo::IsObjRef(taElem.GetSigCorElementType()) &&
CorTypeInfo::IsObjRef(tb.AsArray()->GetElementTypeHandle().GetSigCorElementType()))
{
// Find the common ancestor of the element types.
tMergeElem = MergeTypeHandlesToCommonParent(taElem, tb.AsArray()->GetElementTypeHandle());
}
else
{
// The element types have nothing in common.
return TypeHandle(g_pArrayClass);
}
// Load the array of the merged element type.
return tMergeElem.GetModule()->GetClassLoader()->FindArrayForElem(tMergeElem, mergeKind, rank);
}
EEClassLayoutInfo *EEClass::GetLayoutInfo()
{
_ASSERTE(HasLayout());
return &((LayoutEEClass *) this)->m_LayoutInfo;
}
UINT32 EEClass::AssignInterfaceId()
{
THROWSCOMPLUSEXCEPTION();
_ASSERTE(IsInterface());
_ASSERTE(m_dwInterfaceId == -1);
// !!! HACK COUGH UGGH
// We currently can only have one "shared" vtable map mgr
// - so use the system domain for all shared classes
BaseDomain *pDomain = GetModule()->GetDomain();
if (pDomain == SharedDomain::GetDomain())
pDomain = SystemDomain::System();
m_dwInterfaceId = pDomain->GetInterfaceVTableMapMgr().AllocInterfaceId();
return m_dwInterfaceId;
}
void EEClass::GetGuid(GUID *pGuid, BOOL bGenerateIfNotFound)
{
THROWSCOMPLUSEXCEPTION();
SIZE_T cchName; // Length of the name (possibly after decoration).
CQuickArray<BYTE> rName; // Buffer to accumulate signatures.
SIZE_T cbCur; // Current offset.
HRESULT hr = S_OK; // A result.
LPWSTR szName; // Name to turn to a guid.
MethodTable*pMT = GetMethodTable(); // This classes method table.
BOOL bGenerated = FALSE; // A flag indicating if we generated the GUID from name.
_ASSERTE(pGuid != NULL);
// First check to see if we have already cached the guid for this type.
// We currently only cache guids on interfaces.
if (IsInterface() && pMT->GetGuidInfo())
{
if (pMT->GetGuidInfo()->m_bGeneratedFromName)
{
// If the GUID was generated from the name then only return it
// if bGenerateIfNotFound is set.
if (bGenerateIfNotFound)
*pGuid = pMT->GetGuidInfo()->m_Guid;
else
*pGuid = GUID_NULL;
}
else
{
*pGuid = pMT->GetGuidInfo()->m_Guid;
}
return;
}
if (m_VMFlags & VMFLAG_NO_GUID)
*pGuid = GUID_NULL;
else
{
// If there is a GUID in the metadata then return that.
GetMDImport()->GetItemGuid(GetCl(), pGuid);
if (*pGuid == GUID_NULL)
{
// Remember that we didn't find the GUID, so we can skip looking during
// future checks. (Note that this is a very important optimization in the
// prejit case.)
FastInterlockOr(&m_VMFlags, VMFLAG_NO_GUID);
}
}
if (*pGuid == GUID_NULL && bGenerateIfNotFound)
{
// For interfaces, concatenate the signatures of the methods and fields.
if (!IsNilToken(GetCl()) && IsInterface())
{
// Retrieve the stringized interface definition.
cbCur = GetStringizedItfDef(TypeHandle(GetMethodTable()), rName);
// Pad up to a whole WCHAR.
if (cbCur % sizeof(WCHAR))
{
SIZE_T cbDelta = sizeof(WCHAR) - (cbCur % sizeof(WCHAR));
IfFailThrow(rName.ReSize(cbCur + cbDelta));
memset(rName.Ptr() + cbCur, 0, cbDelta);
cbCur += cbDelta;
}
// Point to the new buffer.
cchName = cbCur / sizeof(WCHAR);
szName = reinterpret_cast<LPWSTR>(rName.Ptr());
}
else
{
// Get the name of the class.
DefineFullyQualifiedNameForClassW();
szName = GetFullyQualifiedNameForClassNestedAwareW(this);
if (szName == NULL)
return;
cchName = wcslen(szName);
// Enlarge buffer for class name.
cbCur = cchName * sizeof(WCHAR);
IfFailThrow(rName.ReSize(cbCur+ sizeof(WCHAR) ));
wcscpy(reinterpret_cast<LPWSTR>(rName.Ptr()), szName);
// Add the assembly guid string to the class name.
IfFailThrow(GetStringizedTypeLibGuidForAssembly(GetAssembly(), rName, cbCur, &cbCur));
// Pad to a whole WCHAR.
if (cbCur % sizeof(WCHAR))
{
IfFailThrow(rName.ReSize(cbCur + sizeof(WCHAR)-(cbCur%sizeof(WCHAR))));
while (cbCur % sizeof(WCHAR))
rName[cbCur++] = 0;
}
// Point to the new buffer.
szName = reinterpret_cast<LPWSTR>(rName.Ptr());
cchName = cbCur / sizeof(WCHAR);
// Dont' want to have to pad.
_ASSERTE((sizeof(GUID) % sizeof(WCHAR)) == 0);
}
// Generate guid from name.
CorGuidFromNameW(pGuid, szName, cchName);
// Remeber we generated the guid from the type name.
bGenerated = TRUE;
}
// Cache the guid in the type, if not already cached.
// We currently only do this for interfaces.
if (IsInterface() && !pMT->GetGuidInfo() && *pGuid != GUID_NULL)
{
// Allocate the guid information.
GuidInfo *pInfo =
(GuidInfo*)GetClassLoader()->GetHighFrequencyHeap()->AllocMem(sizeof(GuidInfo), TRUE);
pInfo->m_Guid = *pGuid;
pInfo->m_bGeneratedFromName = bGenerated;
// Set in in the interface method table.
pMT->m_pGuidInfo = pInfo;
}
}
//==========================================================================
// This function is very specific about how it constructs a EEClass. It first
// determines the necessary size of the vtable and the number of statics that
// this class requires. The necessary memory is then allocated for a EEClass
// and its vtable and statics. The class members are then initialized and
// the memory is then returned to the caller
//
// LPEEClass CreateClass()
//
// Parameters :
// [in] scope - scope of the current class not the one requested to be opened
// [in] cl - class token of the class to be created.
// [out] ppEEClass - pointer to pointer to hold the address of the EEClass
// allocated in this function.
// Return : returns an HRESULT indicating the success of this function.
//
// This parameter has been removed but might need to be reinstated if the
// global for the metadata loader is removed.
// [in] pIMLoad - MetaDataLoader class/object for the current scope.
//==========================================================================
HRESULT EEClass::CreateClass(Module *pModule, mdTypeDef cl, BOOL fHasLayout, BOOL fDelegate, BOOL fIsBlob, BOOL fIsEnum, LPEEClass* ppEEClass)
{
_ASSERTE(!(fHasLayout && fDelegate));
HRESULT hr = S_OK;
EEClass *pEEClass = NULL;
IMDInternalImport *pInternalImport;
ClassLoader *pLoader;
if (!ppEEClass)
return E_FAIL;
//============================================================================
// @TODO - LBS!
// vtabsize and static size need to be converted from pointer sizes to #'s
// of bytes this will be very important for 64 bit NT!
// We will need to call on IMetaDataLoad to get these sizes and fill out the
// tables
// From the classref call on metadata to resolve the classref and check scope
// to make sure that this class is in the same scope otherwise we need to open
// a new scope and possibly file.
// if the scopes are different call the code to load a new file and get the new scope
// scopes are the same so we can use the existing scope to get the class info
// This method needs to be fleshed out.more it currently just returns enough
// space for the defined EEClass and the vtable and statics are not set.
//=============================================================================
pLoader = pModule->GetClassLoader();
if (fHasLayout)
{
pEEClass = new (pLoader) LayoutEEClass(pLoader);
}
else if (fDelegate)
{
pEEClass = new (pLoader) DelegateEEClass(pLoader);
}
else if (fIsEnum)
{
pEEClass = new (pLoader) EnumEEClass(pLoader);
}
else
{
pEEClass = new (pLoader) EEClass(pLoader);
}
if (pEEClass == NULL)
{
hr = E_OUTOFMEMORY;
goto exit;
}
pEEClass->m_cl = cl;
pInternalImport = pModule->GetMDImport();
mdToken tkExtends = mdTokenNil;
pInternalImport->GetTypeDefProps(
cl,
&pEEClass->m_dwAttrClass,
&tkExtends
);
DWORD dwAttrClass = pEEClass->m_dwAttrClass; //cache the value to avoid multiple dereferencing
// MDVal check: can't be both tdSequentialLayout and tdExplicitLayout
if((dwAttrClass & tdLayoutMask) == tdLayoutMask)
{
hr = E_FAIL;
goto exit;
}
if (IsTdInterface(dwAttrClass))
{
// MDVal check: must have nil tkExtends and must be tdAbstract
if((tkExtends & 0x00FFFFFF)||(!IsTdAbstract(dwAttrClass))) { hr = E_FAIL; goto exit; }
// Set the interface ID to -1 to indicate it hasn't been set yet.
pEEClass->m_dwInterfaceId = -1;
}
//
// Initialize SecurityProperties structure
//
if (Security::IsSecurityOn() && IsTdHasSecurity(dwAttrClass))
{
DWORD dwSecFlags;
DWORD dwNullDeclFlags;
hr = Security::GetDeclarationFlags(pInternalImport, cl, &dwSecFlags, &dwNullDeclFlags);
if (FAILED(hr))
goto exit;
pEEClass->m_SecProps.SetFlags(dwSecFlags, dwNullDeclFlags);
}
if (fIsBlob)
pEEClass->m_VMFlags |= VMFLAG_ISBLOBCLASS;
if (pModule->GetAssembly()->IsShared())
pEEClass->m_VMFlags |= VMFLAG_SHARED;
if (fHasLayout)
pEEClass->SetHasLayout();
#ifdef _DEBUG
pModule->GetClassLoader()->m_dwDebugClasses++;
#endif
exit:
if (FAILED(hr))
{
// @TODO delete pEEClass
*ppEEClass = NULL;
}
else
{
*ppEEClass = pEEClass;
}
return hr;
}
//
// @TODO: Would be nice to not add Object's ctor method
//
/* static */ void EEClass::CreateObjectClassMethodHashBitmap(EEClass *pObjectClass)
{
DWORD i;
for (i = 0; i < pObjectClass->GetNumVtableSlots(); i++)
{
MethodDesc *pCurMethod = pObjectClass->GetUnknownMethodDescForSlot(i);
LPCUTF8 pszMemberName;
pszMemberName = pCurMethod->GetNameOnNonArrayClass();
_ASSERTE(pszMemberName != NULL);
DWORD dwBitNum = HashStringA(pszMemberName) % OBJ_CLASS_METHOD_HASH_BITMAP_BITS;
g_ObjectClassMethodHashBitmap[dwBitNum >> 3] |= (1 << (dwBitNum & 7));
}
g_ObjectClassMethodHashBitmapInited = TRUE;
}
//
// Look at this method carefully before using.
//
// Returns whether this method could exist in this class or its superclasses. However, constructors
// and clinits are never added to the hash table, so it won't find them.
//
// If this returns 0, the method definitely does NOT exist. If it returns non-zero, it may exist.
//
/* static */ DWORD EEClass::CouldMethodExistInClass(EEClass *pClass, LPCUTF8 pszMethodName, DWORD dwHashName)
{
if (dwHashName == 0)
dwHashName = HashStringA(pszMethodName);
DWORD dwMethodHashBit = dwHashName % METHOD_HASH_BITS;
_ASSERTE(pClass != NULL);
if (pClass->IsInterface())
{
// If it's an interface, we search only one node - we do not recurse into the parent, Object
return (pClass->m_MethodHash[dwMethodHashBit >> 3] & (1 << (dwMethodHashBit & 7)));
}
else
{
do
{
if (pClass->m_MethodHash[dwMethodHashBit >> 3] & (1 << (dwMethodHashBit & 7)))
{
// This class may have a method by this name
// If it's the Object class, we have a second hash bitmap, so if the second hash bitmap says "no",
// then we're ok
if (pClass->GetMethodTable() == g_pObjectClass && g_ObjectClassMethodHashBitmapInited)
{
DWORD dwObjBitNum = dwHashName % OBJ_CLASS_METHOD_HASH_BITMAP_BITS;
if (g_ObjectClassMethodHashBitmap[dwObjBitNum >> 3] & (1 << (dwObjBitNum & 7)))
return TRUE;
}
else
{
if (!g_ObjectClassMethodHashBitmapInited)
CreateObjectClassMethodHashBitmap(g_pObjectClass->GetClass());
return TRUE;
}
}
pClass = pClass->GetParentClass();
} while (pClass != NULL);
}
return FALSE;
}
//
// Create a hash of all methods in this class. The hash is from method name to MethodDesc.
//
MethodNameHash *EEClass::CreateMethodChainHash()
{
MethodNameHash * pHash = new MethodNameHash();
DWORD i;
WS_PERF_SET_HEAP(SYSTEM_HEAP);
WS_PERF_UPDATE("EEClass:MethodHash", 0, pHash);
if (pHash == NULL)
goto failure;
if (pHash->Init(GetNumVtableSlots()) == FALSE)
goto failure;
for (i = 0; i < GetNumVtableSlots(); i++)
{
MethodDesc *pCurMethod = GetUnknownMethodDescForSlot(i);
MethodDesc *pRealDesc;
if(SUCCEEDED(GetRealMethodImpl(pCurMethod, i, &pRealDesc)))
{
if (pRealDesc != NULL)
{
// We use only method names on this class or a base
// class. If the method impl points to a method
// defined on the interface then we use the bodies
// name.
if(pRealDesc->IsInterface())
pRealDesc = pCurMethod;
LPCUTF8 pszName = pRealDesc->GetNameOnNonArrayClass();
pHash->Insert(pszName, pCurMethod); // We keep the body alias'd with the derivied
}
}
}
// success
return pHash;
failure:
if (pHash != NULL)
delete pHash;
return NULL;
}
EEClass *EEClass::GetEnclosingClass()
{
if (! IsNested())
return NULL;
mdTypeDef tdEnclosing = mdTypeDefNil;
HRESULT hr = GetModule()->GetMDImport()->GetNestedClassProps(GetCl(), &tdEnclosing);
_ASSERTE(SUCCEEDED(hr));
MethodTable *pMT = GetModule()->LookupTypeDef(tdEnclosing).AsMethodTable();
if (pMT)
return pMT->GetClass();
NameHandle name(GetModule(), tdEnclosing);
return GetClassLoader()->LoadTypeHandle(&name).GetClass();
}
void EEClass::AddChunk(MethodDescChunk *chunk)
{
chunk->SetNextChunk(m_pChunks);
m_pChunks = chunk;
}
#ifdef EnC_SUPPORTED
HRESULT EEClass::FixupFieldDescForEnC(EnCFieldDesc *pFD, mdFieldDef fieldDef)
{
LOG((LF_ENC, LL_INFO100, "EEClass::InitializeFieldDescForEnC %s\n", GetMDImport()->GetNameOfFieldDef(fieldDef)));
#ifdef _DEBUG
BOOL shouldBreak = g_pConfig->GetConfigDWORD(L"EncFixupFieldBreak", 0);
if (shouldBreak > 0) {
_ASSERTE(!"EncFixupFieldBreak");
}
#endif
bmtMetaDataInfo bmtMetaData;
bmtMetaData.cFields = 1;
bmtMetaData.pFields = (mdToken*)_alloca(sizeof(mdToken));
bmtMetaData.pFields[0] = fieldDef;
bmtMetaData.pFieldAttrs = (DWORD*)_alloca(sizeof(DWORD));
bmtMetaData.pFieldAttrs[0] = GetModule()->GetMDImport()->GetFieldDefProps(fieldDef);
bmtMethAndFieldDescs bmtMFDescs;
// We need to alloc the memory, but don't have to fill it in. InitializeFieldDescs
// will copy pFD (1st arg) into here.
bmtMFDescs.ppFieldDescList = (FieldDesc**)_alloca(sizeof(FieldDesc*));
bmtEnumMethAndFields bmtEnumMF;
bmtFieldPlacement bmtFP;
// We don't have to fill this in - it'll be filled in if something goes wrong.
// We'll ignore it, but a bad HRESULT will be returned, so it's ok.
bmtErrorInfo bmtError;
bmtInternalInfo bmtInternal;
bmtInternal.pInternalImport = GetModule()->GetMDImport();
bmtInternal.pModule = GetModule();
bmtInternal.cl = m_cl; // This isn't actually used by InitializeFieldDescs right now,
// but it seems too fragile to not fill this in.
// We shouldn't have to fill this in b/c we're not allowed to EnC value classes, or
// anything else with layout info associated with it.
LayoutRawFieldInfo *pLayoutRawFieldInfos = (LayoutRawFieldInfo*)_alloca((2) * sizeof(LayoutRawFieldInfo));
// If not NULL, it means there are some by-value fields, and this contains an entry for each instance or static field,
// which is NULL if not a by value field, and points to the EEClass of the field if a by value field. Instance fields
// come first, statics come second.
EEClass **pByValueClassCache = NULL;
// InitializeFieldDescs are going to change these numbers to something wrong,
// even though we already have the right numbers. Save & restore after.
WORD wNumInstanceFields = m_wNumInstanceFields;
WORD wNumStaticFields = m_wNumStaticFields;
unsigned totalDeclaredFieldSize = 0;
HRESULT hr = InitializeFieldDescs(pFD,
pLayoutRawFieldInfos,
&bmtInternal,
&bmtMetaData,
&bmtEnumMF,
&bmtError,
&pByValueClassCache,
&bmtMFDescs,
&bmtFP,
&totalDeclaredFieldSize);
// Restore now
m_wNumInstanceFields = wNumInstanceFields;
m_wNumStaticFields = wNumStaticFields;
// PERF: For now, we turn off the fast equality check for valuetypes when a
// a field is modified by EnC. Consider doing a check and setting the bit only when
// necessary.
if (IsValueClass())
{
GetMethodTable()->SetNotTightlyPacked();
}
// even if InitializeFieldDesc fails, we want to know which field we're looking at.
pFD->SetMemberDef(fieldDef);
if (! SUCCEEDED(hr))
return hr;
if (pByValueClassCache) {
pFD->SetByValueClass(*pByValueClassCache);
}
pFD->SetMethodTable(GetMethodTable());
pFD->SetEnCNew();
return S_OK;
}
HRESULT EEClass::AddField(mdFieldDef fieldDef)
{
LOG((LF_ENC, LL_INFO100, "EEClass::AddField %s\n", GetMDImport()->GetNameOfFieldDef(fieldDef)));
// Here we allocate a FieldDesc and set just enough info to be able to fix it up later
// when we're running in managed code.
EnCAddedFieldElement *pAddedField = (EnCAddedFieldElement *) GetClassLoader()->GetHighFrequencyHeap()->AllocMem(sizeof(EnCAddedFieldElement));
DWORD dwFieldAttrs = GetMDImport()->GetFieldDefProps(fieldDef);
pAddedField->Init(IsFdStatic(dwFieldAttrs));
EnCFieldDesc *pNewFD = &pAddedField->m_fieldDesc;
if (pNewFD->IsStatic())
++m_wNumStaticFields;
else
++m_wNumInstanceFields;
EnCEEClassData *pEnCClass = ((EditAndContinueModule*)GetModule())->GetEnCEEClassData(this);
if (! pEnCClass)
return E_FAIL;
pEnCClass->AddField(pAddedField);
GetModule()->StoreFieldDef(fieldDef, pNewFD);
pNewFD->SetMethodTable(GetMethodTable());
// try to fixup the field desc, will fail if have to load classes as are currently running
// on the debugger thread and then the fd will be marked as needing fixup
pNewFD->Fixup(fieldDef);
return S_OK;
}
//
// Add a new method to a class. This could be static, virtual or non-virtual and could override
// an existing virtual.
//
// First we want to do some checks (eg, we've got an RVA or else we're not expecting an RVA).
// Then we classify it based on the type of MethodDesc that we'll create.
// We create a new MethodDescChunk that contains just 1 MethodDesc, which we then initialize.
// Note that this is intended for use in Edit and Continue only - the regular code path
// to follow is through BuildMethodTable.
//
#define MAX_DIST_FROM_VTABLE 0xFFFF
HRESULT EEClass::AddMethod(mdMethodDef methodDef, COR_ILMETHOD *pNewCode)
{
LOG((LF_ENC, LL_INFO100, "EEClass::AddMethod %s\n", GetMDImport()->GetNameOfMethodDef(methodDef)));
DWORD dwDescrOffset;
DWORD dwImplFlags;
MethodClassification Classification;
HRESULT hr = S_OK;
GetMDImport()->GetMethodImplProps(methodDef, &dwDescrOffset, &dwImplFlags);
DWORD dwMemberAttrs = GetMDImport()->GetMethodDefProps(methodDef);
if (IsMdAbstract(dwMemberAttrs))
{
LOG((LF_ENC, LL_INFO100, "**Error** EEClass::AddMethod abstract methods not supported\n"));
return E_FAIL;
}
// for now start with static methods
// need to add to our linked list
// then change findmethod to find it here
// add to rid table
IMDInternalImport *pImport = GetMDImport();
#ifdef _DEBUG
mdTypeDef parentTypeDef;
hr = pImport->GetParentToken(methodDef, &parentTypeDef);
_ASSERTE(!FAILED(hr)); // If this fails, we'll know why the debug
// build is behaving differently.
if (FAILED(hr))
return E_FAIL;
DWORD dwParentAttrs;
pImport->GetTypeDefProps(parentTypeDef, &dwParentAttrs, 0);
RVA_OR_SHOULD_BE_ZERO(pNewCode, dwParentAttrs, dwMemberAttrs, dwImplFlags, pImport, methodDef);
#endif //_DEBUG
// Determine the classification of the method being added.
if (pNewCode == 0 && ((IsReallyMdPinvokeImpl(dwMemberAttrs) || IsMiInternalCall(dwImplFlags)) && NDirect::HasNAT_LAttribute(pImport, methodDef)==S_OK))
{
Classification = mcNDirect;
}
else if (IsInterface() && !IsMdStatic(dwMemberAttrs))
{
Classification = mcComInterop;
}
else if (IsMiRuntime(dwImplFlags))
{
Classification = mcEEImpl;
}
else
{
Classification = mcIL;
}
// Create the chunk somewhere we'll know is within range of the VTable
MethodDescChunk *pChunk = MethodDescChunk::CreateChunk(GetClassLoader()->GetHighFrequencyHeap(),
1,
Classification,
::GetTokenRange(methodDef));
if (pChunk == NULL)
return E_OUTOFMEMORY;
pChunk->SetMethodTable(GetMethodTable());
MethodDesc *pNewMD = pChunk->GetFirstMethodDesc();
memset(pNewMD, 0, sizeof(MethodDesc));
// Set the method desc's classification and chunk index.
pNewMD->SetChunkIndex(0, Classification);
LPCSTR pName = NULL;
if (Classification == mcEEImpl)
{
pName = pImport->GetNameOfMethodDef(methodDef);
}
hr = InitMethodDesc(pNewMD,
Classification,
methodDef,
dwImplFlags,
dwMemberAttrs,
TRUE,
// subtract base because code expects an RVA and will add base back to get actual address
(DWORD)((Classification == mcNDirect ||
Classification == mcEEImpl)
? 0 : (BYTE *)pNewCode - GetModule()->GetILBase()),
GetModule()->GetILBase(),
pImport,
pName
#ifdef _DEBUG
, pImport->GetNameOfMethodDef(methodDef),
m_szDebugClassName,
NULL
#endif //_DEBUG
);
if (FAILED(hr))
{
return hr;
}
SLOT *pSlotMemory;
MethodDesc *pParentMD = NULL;
// need to check if we are overriding an pre-existing virtual function, in which case want to just
// update this class' slot rather than create a new one. We start searching at one above this class as
// will always find it in current class (because has been added to the delta PE). If no parent, then
// couldn't be overriding an inherited slot. If are overriding, make sure isn't an override of a function
// added via EnC, in which case don't have a slot to update
if (pNewMD->IsVirtual() &&
this->GetParentClass() != NULL &&
(pParentMD = ((EditAndContinueModule*)GetModule())->FindVirtualFunction(this->GetParentClass(), methodDef)) != NULL &&
! pParentMD->IsEnCNewVirtual())
{
// just write over the existing slot where we are overriding
pSlotMemory = pParentMD->GetSlot() + GetVtable();
pNewMD->SetSlot(pParentMD->GetSlot()); // use same slot as parent
LOG((LF_ENC, LL_INFO100, " EEClass::AddMethod using existing slot %d\n", pParentMD->GetSlot()/sizeof(SLOT) ));
}
else
{
// Method not overriding an existing slot, so need new slot
// The new slot needs to be located appropriately.
const BYTE *pVTable = (const BYTE *)GetVtable();
const BYTE *pLowerBound = pVTable;
const BYTE *pUpperBound = pVTable + MAX_DIST_FROM_VTABLE;
LoaderHeap *pHeap = GetClassLoader()->GetHighFrequencyHeap();
// Can we find acceptable memory within the heap? I hope so!
if ( pHeap->CanAllocMemWithinRange(sizeof(SLOT),
(BYTE *)pLowerBound,
(BYTE *)pUpperBound,
TRUE))
{
// method not overriding an existing slot, so need new slot
pSlotMemory = (SLOT *) pHeap->AllocMem(sizeof(SLOT));
// If we couldn't get something, well, we're screwed, so give up
if (!pSlotMemory)
{
return CORDBG_E_ENC_INTERNAL_ERROR;
}
}
else
{
// Guess not - but we've got a backup!! Go look for some of that
// memory that we squirreled away immediately following the methodtables
// in memory.
// @todo Win64 different alignment for win64?
// This needs to be DWORD aligned, if not drop the first couple bytes
if( (WORD)pLowerBound % 4 != 0)
pLowerBound += (WORD)pLowerBound % 4;
// This needs to be DWORD aligned, if not drop the last couple bytes
if( (WORD)pUpperBound % 4 != 0)
pUpperBound -= (WORD)pUpperBound % 4;
_ASSERTE((WORD)pLowerBound % 4 == 0);
_ASSERTE((WORD)pUpperBound % 4 == 0);
// In SetupMethodTables, we added extra slots to be used here. Go get one that's
// in range.
EditAndContinueModule *pEACM = (EditAndContinueModule*)GetModule();
_ASSERTE(pEACM!=NULL);
pSlotMemory = (SLOT *)pEACM->m_pRangeList->FindIdWithinRange(pLowerBound, pUpperBound);
// If we couldn't get something, well, we're screwed, so give up
if (!pSlotMemory)
{
return CORDBG_E_ENC_INTERNAL_ERROR;
}
// Now remove the slot we just used
SLOT *pEnd = pSlotMemory;
// First we should figure out if there was any more space in the range we got.
// Shouldn't be a lot of work, since ENC_EXTRA_SLOT_COUNT should be small
for(int i = 0; i < ENC_EXTRA_SLOT_COUNT; i++)
{
if(!pEACM->m_pRangeList->IsInRange((const BYTE *)pEnd))
{
_ASSERTE(pEnd > pSlotMemory);
break;
}
// See if the next element is here.
pEnd++;
}
// Yank the range.
pEACM->m_pRangeList->RemoveRanges(pSlotMemory);
LOG((LF_CORDB, LL_INFO10000, "EEC:AM: removed range (0x%x, 0x%x) from SLOT pool\n",
(const BYTE *)pSlotMemory, (const BYTE *)pEnd));
// If there's still space, add the remaining space back in.
if (pEnd > pSlotMemory)
{
LOG((LF_CORDB, LL_INFO10000, "EEC:AM: Re-added range (0x%x, 0x%x) to SLOT pool\n",
(const BYTE *)(pSlotMemory+1), (const BYTE *)pEnd));
// Note that just like in SetupMethodTable, pEnd will point to memory that's
// NOT valid - it's one beyond the range.
BOOL fAdded = pEACM->m_pRangeList->AddRange((const BYTE *)(pSlotMemory+1),
(const BYTE *)pEnd,
(pSlotMemory+1));
// We'll re-use the space we just freed in RemoveRanges, above
_ASSERTE(fAdded);
}
}
pNewMD->SetSlot((WORD)(pSlotMemory - GetVtable())); // this makes the slot index refer to our new slot @TODO - LBS pointer math
if (pNewMD->IsVirtual())
pNewMD->SetEnCNewVirtual();
LOG((LF_ENC, LL_INFO100, " EEClass::AddMethod adding new slot\n"));
}
*pSlotMemory = (SLOT) pNewMD->GetPreStubAddr();
_ASSERTE(((BYTE*)pSlotMemory - (BYTE*)GetVtable()) % 4 == 0); // should always be 4-byte aligned, but just in case
_ASSERTE(((pSlotMemory - GetVtable()) >= -32768) &&
((pSlotMemory - GetVtable()) <= 32767)); // Slot number is only 16 bits.
if ((pSlotMemory - GetVtable()) < -32768 || ((pSlotMemory - GetVtable()) > 32767))
return E_OUTOFMEMORY;
GetModule()->StoreMethodDef(methodDef, pNewMD);
return S_OK;
}
#endif // EnC_SUPPORTED
//
// Find a method in this class hierarchy - used ONLY by the loader during layout. Do not use at runtime.
//
// *ppMethodHash may be NULL - if so, a MethodNameHash may be created.
// *ppMemberSignature must be NULL on entry - it and *pcMemberSignature may or may not be filled out
//
// ppMethodDesc will be filled out with NULL if no matching method in the hierarchy is found.
//
// Returns FALSE if there was an error of some kind.
//
HRESULT EEClass::LoaderFindMethodInClass(
MethodNameHash ** ppMethodHash,
LPCUTF8 pszMemberName,
Module* pModule,
mdMethodDef mdToken,
MethodDesc ** ppMethodDesc,
PCCOR_SIGNATURE * ppMemberSignature,
DWORD * pcMemberSignature,
DWORD dwHashName
)
{
MethodHashEntry *pEntry;
DWORD dwNameHashValue;
_ASSERTE(pModule);
_ASSERTE(*ppMemberSignature == NULL);
// No method found yet
*ppMethodDesc = NULL;
// Use the hash bitmap to exclude the easy cases
if (CouldMethodExistInClass(GetParentClass(), pszMemberName, dwHashName) == 0)
return S_OK; // No such method by this name exists in the hierarchy
// Have we created a hash of all the methods in the class chain?
if (*ppMethodHash == NULL)
{
// There may be such a method, so we will now create a hash table to reduce the pain for
// further lookups
// Optimization disabled until synchronization issues sorted out
//*ppMethodHash = g_pMethodNameCache->GetMethodNameHash(GetParentClass());
*ppMethodHash = GetParentClass()->CreateMethodChainHash();
if (ppMethodHash == NULL)
return E_OUTOFMEMORY;
}
// We have a hash table, so use it
pEntry = (*ppMethodHash)->Lookup(pszMemberName, dwHashName);
if (pEntry == NULL)
return S_OK; // No method by this name exists in the hierarchy
// Get signature of the method we're searching for - we will need this to verify an exact name-signature match
*ppMemberSignature = pModule->GetMDImport()->GetSigOfMethodDef(
mdToken,
pcMemberSignature
);
// Hash value we are looking for in the chain
dwNameHashValue = pEntry->m_dwHashValue;
// We've found a method with the same name, but the signature may be different
// Traverse the chain of all methods with this name
while (1)
{
PCCOR_SIGNATURE pHashMethodSig;
DWORD cHashMethodSig;
// Get sig of entry in hash chain
pEntry->m_pDesc->GetSig(&pHashMethodSig, &cHashMethodSig);
if (MetaSig::CompareMethodSigs(*ppMemberSignature, *pcMemberSignature, pModule,
pHashMethodSig, cHashMethodSig, pEntry->m_pDesc->GetModule()))
{
// Found a match
*ppMethodDesc = pEntry->m_pDesc;
return S_OK;
}
// Advance to next item in the hash chain which has the same name
do
{
pEntry = pEntry->m_pNext; // Next entry in the hash chain
if (pEntry == NULL)
return S_OK; // End of hash chain, no match found
} while ((pEntry->m_dwHashValue != dwNameHashValue) || (strcmp(pEntry->m_pKey, pszMemberName) != 0));
}
return S_OK;
}
//
// Given an interface map to fill out, expand pNewInterface (and its sub-interfaces) into it, increasing
// pdwInterfaceListSize as appropriate, and avoiding duplicates.
//
BOOL EEClass::ExpandInterface(InterfaceInfo_t *pInterfaceMap,
EEClass *pNewInterface,
DWORD *pdwInterfaceListSize,
DWORD *pdwMaxInterfaceMethods,
BOOL fDirect)
{
DWORD i;
// The interface list contains the fully expanded set of interfaces from the parent then
// we start adding all the interfaces we declare. We need to know which interfaces
// we declare but do not need duplicates of the ones we declare. This means we can
// duplicate our parent entries.
// Is it already present in the list?
for (i = 0; i < (*pdwInterfaceListSize); i++)
{
if (pInterfaceMap[i].m_pMethodTable == pNewInterface->m_pMethodTable) {
if(fDirect)
pInterfaceMap[i].m_wFlags |= InterfaceInfo_t::interface_declared_on_class;
return TRUE; // found it, don't add it again
}
}
if (pNewInterface->GetNumVtableSlots() > *pdwMaxInterfaceMethods)
*pdwMaxInterfaceMethods = pNewInterface->GetNumVtableSlots();
// Add it and each sub-interface
pInterfaceMap[*pdwInterfaceListSize].m_pMethodTable = pNewInterface->m_pMethodTable;
pInterfaceMap[*pdwInterfaceListSize].m_wStartSlot = (WORD) -1;
pInterfaceMap[*pdwInterfaceListSize].m_wFlags = 0;
if(fDirect)
pInterfaceMap[*pdwInterfaceListSize].m_wFlags |= InterfaceInfo_t::interface_declared_on_class;
(*pdwInterfaceListSize)++;
InterfaceInfo_t* pNewIPMap = pNewInterface->m_pMethodTable->GetInterfaceMap();
for (i = 0; i < pNewInterface->m_wNumInterfaces; i++)
{
if (ExpandInterface(pInterfaceMap, pNewIPMap[i].m_pMethodTable->GetClass(), pdwInterfaceListSize, pdwMaxInterfaceMethods, FALSE) == FALSE)
return FALSE;
}
return TRUE;
}
//
// Fill out a fully expanded interface map, such that if we are declared to implement I3, and I3 extends I1,I2,
// then I1,I2 are added to our list if they are not already present.
//
// Returns FALSE for failure. Currently we don't fail, but @TODO perhaps we should fail if we recurse
// too much.
//
BOOL EEClass::CreateInterfaceMap(BuildingInterfaceInfo_t *pBuildingInterfaceList, InterfaceInfo_t *pInterfaceMap, DWORD *pdwInterfaceListSize, DWORD *pdwMaxInterfaceMethods)
{
WORD i;
*pdwInterfaceListSize = 0;
// First inherit all the parent's interfaces. This is important, because our interface map must
// list the interfaces in identical order to our parent.
if (GetParentClass() != NULL)
{
InterfaceInfo_t *pParentInterfaceMap = GetParentClass()->GetInterfaceMap();
// The parent's interface list is known to be fully expanded
for (i = 0; i < GetParentClass()->m_wNumInterfaces; i++)
{
// Need to keep track of the interface with the largest number of methods
if (pParentInterfaceMap[i].m_pMethodTable->GetClass()->GetNumVtableSlots() > *pdwMaxInterfaceMethods)
*pdwMaxInterfaceMethods = pParentInterfaceMap[i].m_pMethodTable->GetClass()->GetNumVtableSlots();
pInterfaceMap[*pdwInterfaceListSize].m_pMethodTable = pParentInterfaceMap[i].m_pMethodTable;
pInterfaceMap[*pdwInterfaceListSize].m_wStartSlot = (WORD) -1;
pInterfaceMap[*pdwInterfaceListSize].m_wFlags = 0;
(*pdwInterfaceListSize)++;
}
}
// Go through each interface we explicitly implement (if a class), or extend (if an interface)
for (i = 0; i < m_wNumInterfaces; i++)
{
EEClass *pDeclaredInterface = pBuildingInterfaceList[i].m_pClass;
if (ExpandInterface(pInterfaceMap, pDeclaredInterface, pdwInterfaceListSize, pdwMaxInterfaceMethods, TRUE) == FALSE)
return FALSE;
}
return TRUE;
}
// Do a test on the execeptions to see if it is set. This routine assumes
// that the throwable has been protected. It also disables GC in debug to
// keep the ASSERTS quite. This is not necessary in retail because we
// are just checking of non-null not a specific value (which may change
// during GC)
BOOL EEClass::TestThrowable(OBJECTREF* pThrowable)
{
if (!pThrowableAvailable(pThrowable))
return FALSE;
_ASSERTE(IsProtectedByGCFrame(pThrowable));
BOOL result;
#ifdef _DEBUG
BEGIN_ENSURE_COOPERATIVE_GC();
#endif
result = *pThrowable != NULL;
#ifdef _DEBUG
END_ENSURE_COOPERATIVE_GC();
#endif
return result;
}
//
// Builds the method table, allocates MethodDesc, handles overloaded members, attempts to compress
// interface storage. All dependent classes must already be resolved!
//
// Interface compression strategy:
//
// (NOTE: We do not build interface maps for interfaces - we do have an interface map structure,
// but this simply lists all the interfaces - the slot number is set to -1).
//
// Stage 1: An interface map is created. The interface map is a list of ALL interfaces which this
// class implements, whether they were declared explicitly, or were inherited from the
// parent class, or through interface inheritance.
//
// First, the parent's interface map is copied (the parent's interface map is guaranteed
// to be fully expanded). Then new interfaces are added to it - for each interface which
// this class explicitly implements, that interface and all of its sub-interfaces are
// added to the interface map (duplicates are not added).
//
// Example: Parent class's interface map is { I1 }
// Derived class extends Parent, implements I2
// Interface I2 extends I3, I4
//
// Then the Derived class's interface map will be: { I1, I2, I3, I4 }
//
// Stage 2: We enumerate all the methods in our class. Methods which are "other" methods
// (i.e. non-vtable methods, such as statics and privates) are handled separately, and
// will not be discussed further.
//
// Each vtable method (i.e. non-private and non-static methods) is then enumerated
// and then designated as placed (and given a vtable slot number) or unplaced (given a
// -1 vtable slot number).
//
// If it overrides a parent method, then it is automatically placed - it must use the
// same slot.
//
// If it is not an interface method -that is, no interface implemented by this class has
// such a method, then it is placed in the first available vtable slot.
//
// Otherwise, if it is an interface method, then is set to be unplaced (given slot -1).
//
// Stage 3: Interface placement.
//
// Stage 3A)Inherited placement. We attempt to copy as much as we can from the parent's interface
// map. The parent's interface map is guaranteed to list interfaces in the same order as
// our own interface map.
//
// We can steal interface placement information from the parent only if the interface in
// question lies entirely within the parent's class vtable methods (i.e. does not extend
// into the duplicated vtable slot area). That is, the Interface.VtableStartSlot +
// Interface.NumMethods < ParentClass.VtableSize.
//
// Stage 3B)By this point, we know how many vtable slots are required for the class, since we
// know how many methods the parent had, how many were overridden, and how many are new.
// If we need to duplicate some vtable slots to create interface lists, these duplications will
// occur starting at this point in the vtable (dwCurrentDuplicateVtableSlot).
//
// For each interface in our interface map, we look at all methods in that interface.
//
// a) If NONE of those methods have been placed, then we place them all, in the order
// given by the interface, starting at the first available vtable slot. We update the
// placed slot number for each placed method. The interface map entry for this interface
// is updated to point at the correct starting vtable slot.
//
// b) If ALL of the methods were already placed, but they were all placed in consecutive
// vtable slots, then we simply point the interface map entry for this interface at the
// appropriate slot. Just because their placement slot numbers weren't consecutive,
// it doesn't mean that these methods don't exist somewhere consecutively. For example,
// they could exist in the vtable at dwCurrentDuplicateVtableSlot or later (being
// duplicated in the correct order for some other interface). So we look there also,
// to see if we can find all of our interface methods laid out in the correct order,
// anywhere in the entire vtable.
//
// Failing a) and b), we create a vtable slot for each interface method, starting at
// dwCurrentDuplicateVtableSlot (the value of this variable is advanced as we add more
// duplicate slots). Some of the methods we are creating duplicate slots for may be
// class methods which have never been placed, so if they haven't, they are placed at
// the first available vtable slot.
//
// @FUTURE: If a derived class declares that it implements I1, I2, but I2 extends I1, then it
// would be advantageous to rearrange the order of declared interfaces. If we place
// I1 first, there is no guarantee that we can gain any compression from placing I2
// after it - we may need to create some duplicate slots. On the other hand, if we
// place I2 then I1, I1 will fit entirely inside I2.
//
/****************************************************************************************
IMPORTANT NOTE:
The following is the new version of BuildMethodTable. It has been factored into
smaller functions so that it is easier to manage. The old version is located at the
bottom of this file for reference purposes. It has been commented out.
@TODO: remove the old version of BuildMethodTable near the end of M10, or when we
are comfortable with the new version.
*****************************************************************************************/
HRESULT EEClass::BuildMethodTable(Module *pModule,
mdToken cl,
BuildingInterfaceInfo_t *pBuildingInterfaceList,
const LayoutRawFieldInfo *pLayoutRawFieldInfos,
OBJECTREF *pThrowable)
{
HRESULT hr = S_OK;
// The following structs, defined as private members of EEClass, contain the necessary local
// parameters needed for BuildMethodTable
// Look at the struct definitions for a detailed list of all parameters available
// to BuildMethodTable.
bmtErrorInfo bmtError;
bmtProperties bmtProp;
bmtVtable bmtVT;
bmtParentInfo bmtParent;
bmtInterfaceInfo bmtInterface;
bmtEnumMethAndFields bmtEnumMF;
bmtMetaDataInfo bmtMetaData;
bmtMethAndFieldDescs bmtMFDescs;
bmtFieldPlacement bmtFP;
bmtInternalInfo bmtInternal;
bmtGCSeries bmtGCSeries;
bmtMethodImplInfo bmtMethodImpl;
//Initialize structs
bmtError.resIDWhy = IDS_CLASSLOAD_GENERIC; // Set the reason and the offending method def. If the method information
bmtError.pThrowable = pThrowable;
bmtInternal.pInternalImport = pModule->GetMDImport();
bmtInternal.pModule = pModule;
bmtInternal.cl = cl;
// If not NULL, it means there are some by-value fields, and this contains an entry for each instance or static field,
// which is NULL if not a by value field, and points to the EEClass of the field if a by value field. Instance fields
// come first, statics come second.
EEClass **pByValueClassCache = NULL;
// If not NULL, it means there are some by-value fields, and this contains an entry for each inst
#ifdef _DEBUG
LPCUTF8 className;
LPCUTF8 nameSpace;
bmtInternal.pInternalImport->GetNameOfTypeDef(cl, &className, &nameSpace);
unsigned fileNameSize = 0;
LPCWSTR fileName = NULL;
if (pModule->IsPEFile()) {
fileName = pModule->GetPEFile()->GetLeafFileName();
if (fileName != 0)
fileNameSize = (unsigned int) wcslen(fileName) + 2;
}
m_szDebugClassName = (char*) GetClassLoader()->GetHighFrequencyHeap()->AllocMem(sizeof(char)*(strlen(className) + strlen(nameSpace) + fileNameSize + 2));
_ASSERTE(m_szDebugClassName);
strcpy(m_szDebugClassName, nameSpace);
if (strlen(nameSpace) > 0) {
m_szDebugClassName[strlen(nameSpace)] = '.';
m_szDebugClassName[strlen(nameSpace) + 1] = '\0';
}
strcat(m_szDebugClassName, className);
if (fileNameSize != 0) {
char* ptr = m_szDebugClassName + strlen(m_szDebugClassName);
*ptr++ = '[';
while(*fileName != 0)
*ptr++ = char(*fileName++);
*ptr++ = ']';
*ptr++ = 0;
}
if (g_pConfig->ShouldBreakOnClassBuild(className)) {
_ASSERTE(!"BreakOnClassBuild");
m_fDebuggingClass = TRUE;
}
#endif // _DEBUG
DWORD i;
COMPLUS_TRY
{
//Get Check Point for the thread-based allocator
Thread *pThread = GetThread();
void* checkPointMarker = pThread->m_MarshalAlloc.GetCheckpoint();
// this class must not already be resolved
_ASSERTE(IsResolved() == FALSE);
// If this is mscorlib, then don't perform some sanity checks on the layout
bmtProp.fNoSanityChecks = ((g_pObjectClass != NULL) && pModule == g_pObjectClass->GetModule());
#ifdef _DEBUG
LPCUTF8 pszDebugName,pszDebugNamespace;
pModule->GetMDImport()->GetNameOfTypeDef(GetCl(), &pszDebugName, &pszDebugNamespace);
LOG((LF_CLASSLOADER, LL_INFO1000, "Loading class \"%s%s%s\" from module \"%ws\" in domain 0x%x %s\n",
*pszDebugNamespace ? pszDebugNamespace : "",
*pszDebugNamespace ? NAMESPACE_SEPARATOR_STR : "",
pszDebugName,
pModule->GetFileName(),
pModule->GetDomain(),
(pModule->IsSystem()) ? "System Domain" : ""
));
#endif
// Interfaces have a parent class of Object, but we don't really want to inherit all of
// Object's virtual methods, so pretend we don't have a parent class - at the bottom of this
// function we reset GetParentClass()
if (IsInterface())
{
SetParentClass (NULL);
}
// Check to see if the class is an valuetype
hr = CheckForValueType(&bmtError);
IfFailGoto(hr, exit);
// Check to see if the class is an enumeration
hr = CheckForEnumType(&bmtError);
IfFailGoto(hr, exit);
// Com Import classes are special
if (IsComImport() && IsClass())
{
if(GetParentClass() != g_pObjectClass->GetClass())
{
// ComImport classes can't extend from any other class
bmtError.resIDWhy = IDS_CLASSLOAD_CANTEXTEND;
IfFailGoto(COR_E_TYPELOAD, exit);
}
if(HasLayout())
{
// ComImport classes cannot have layout information.
bmtError.resIDWhy = IDS_CLASSLOAD_COMIMPCANNOTHAVELAYOUT;
IfFailGoto(COR_E_TYPELOAD, exit);
}
// ComImport classes extend from our _ComObject Class
MethodTable *pCOMMT = SystemDomain::GetDefaultComObject();
_ASSERTE(pCOMMT);
SetParentClass (pCOMMT->GetClass());
// if the current class is imported
bmtProp.fIsComObjectType = TRUE;
}
if (GetParentClass())
{
// parent class must already be resolved
_ASSERTE(GetParentClass()->IsResolved());
if (GetParentClass()->GetMethodTable()->IsComObjectType())
{
// if the parent class is of ComObectType
// so is the child
bmtProp.fIsComObjectType = TRUE;
}
}
else if (! (IsInterface() ) ) {
if(g_pObjectClass != NULL) {
BYTE* base = NULL;
Assembly* pAssembly = pModule->GetAssembly();
if(pAssembly && pAssembly->GetManifestFile())
base = pAssembly->GetManifestFile()->GetBase();
if(base != g_pObjectClass->GetAssembly()->GetManifestFile()->GetBase() &&
GetCl() != COR_GLOBAL_PARENT_TOKEN)
{
bmtError.resIDWhy = IDS_CLASSLOAD_PARENTNULL;
IfFailGoto(COR_E_TYPELOAD, exit);
}
}
}
// Check for special types.
hr = CheckForSpecialTypes(&bmtInternal, &bmtProp);
IfFailGoto(hr, exit);
// Set the contextful or marshalbyref flag if necessary
hr = SetContextfulOrByRef(&bmtInternal);
IfFailGoto(hr, exit);
// resolve unresolved interfaces, determine an upper bound on the size of the interface map,
// and determine the size of the largest interface (in # slots)
hr = ResolveInterfaces(pBuildingInterfaceList, &bmtInterface, &bmtProp, &bmtVT, &bmtParent);
IfFailGoto(hr, exit);
// Enumerate this class's members
hr = EnumerateMethodImpls(&bmtInternal, &bmtEnumMF, &bmtMetaData, &bmtMethodImpl, &bmtError);
IfFailGoto(hr, exit);
// Enumerate this class's members
hr = EnumerateClassMembers(&bmtInternal,
&bmtEnumMF,
&bmtMFDescs,
&bmtProp,
&bmtMetaData,
&bmtVT,
&bmtError);
IfFailGoto(hr, exit);
WS_PERF_SET_HEAP(SYSTEM_HEAP);
// Allocate a MethodDesc* for each method (needed later when doing interfaces), and a FieldDesc* for each field
hr = AllocateMethodFieldDescs(&bmtProp, &bmtMFDescs, &bmtMetaData, &bmtVT,
&bmtEnumMF, &bmtInterface, &bmtFP, &bmtParent);
IfFailGoto(hr, exit);
unsigned totalDeclaredFieldSize=0;
// Go thru all fields and initialize their FieldDescs.
hr = InitializeFieldDescs(m_pFieldDescList, pLayoutRawFieldInfos, &bmtInternal,
&bmtMetaData, &bmtEnumMF, &bmtError,
&pByValueClassCache, &bmtMFDescs, &bmtFP,
&totalDeclaredFieldSize);
IfFailGoto(hr, exit);
// Determine vtable placement for each member in this class
hr = PlaceMembers(&bmtInternal, &bmtMetaData, &bmtError,
&bmtProp, &bmtParent, &bmtInterface,
&bmtMFDescs, &bmtEnumMF,
&bmtMethodImpl, &bmtVT);
IfFailGoto(hr, exit);
// First copy what we can leverage from the parent's interface map.
// The parent's interface map will be identical to the beginning of this class's interface map (i.e.
// the interfaces will be listed in the identical order).
if (bmtParent.dwNumParentInterfaces > 0)
{
InterfaceInfo_t *pParentInterfaceList = GetParentClass()->GetInterfaceMap();
#ifdef _DEBUG
// Check that the parent's interface map is identical to the beginning of this
// class's interface map
for (i = 0; i < bmtParent.dwNumParentInterfaces; i++)
_ASSERTE(pParentInterfaceList[i].m_pMethodTable == bmtInterface.pInterfaceMap[i].m_pMethodTable);
#endif
for (i = 0; i < bmtParent.dwNumParentInterfaces; i++)
{
#ifdef _DEBUG
MethodTable *pMT = pParentInterfaceList[i].m_pMethodTable;
EEClass* pClass = pMT->GetClass();
// If the interface resides entirely inside the parent's class methods (i.e. no duplicate
// slots), then we can place this interface in an identical spot to in the parent.
//
// Note carefully: the vtable for this interface could start within the first GetNumVtableSlots()
// entries, but could actually extend beyond it, if we were particularly efficient at placing
// this interface, so check that the end of the interface vtable is before
// GetParentClass()->GetNumVtableSlots().
_ASSERTE(pParentInterfaceList[i].m_wStartSlot + pClass->GetNumVtableSlots() <=
GetParentClass()->GetNumVtableSlots());
#endif
// Interface lies inside parent's methods, so we can place it
bmtInterface.pInterfaceMap[i].m_wStartSlot = pParentInterfaceList[i].m_wStartSlot;
}
}
//
// If we are a class, then there may be some unplaced vtable methods (which are by definition
// interface methods, otherwise they'd already have been placed). Place as many unplaced methods
// as possible, in the order preferred by interfaces. However, do not allow any duplicates - once
// a method has been placed, it cannot be placed again - if we are unable to neatly place an interface,
// create duplicate slots for it starting at dwCurrentDuplicateVtableSlot. Fill out the interface
// map for all interfaces as they are placed.
//
// If we are an interface, then all methods are already placed. Fill out the interface map for
// interfaces as they are placed.
//
if (!IsInterface())
{
hr = PlaceVtableMethods(&bmtInterface, &bmtVT, &bmtMetaData, &bmtInternal, &bmtError, &bmtProp, &bmtMFDescs);
IfFailGoto(hr, exit);
hr = PlaceMethodImpls(&bmtInternal, &bmtMethodImpl, &bmtError, &bmtInterface, &bmtVT);
IfFailGoto(hr, exit);
}
// If we're a value class, we want to create duplicate slots and MethodDescs for all methods in the vtable
// section (i.e. not privates or statics).
hr = DuplicateValueClassSlots(&bmtMetaData, &bmtMFDescs,
&bmtInternal, &bmtVT);
IfFailGoto(hr, exit);
// ensure we filled out all vtable slots
_ASSERTE(bmtVT.dwCurrentVtableSlot == GetNumVtableSlots());
#ifdef _DEBUG
if (IsInterface() == FALSE)
{
for (i = 0; i < m_wNumInterfaces; i++)
_ASSERTE(bmtInterface.pInterfaceMap[i].m_wStartSlot != (WORD) -1);
}
#endif
// Place all non vtable methods
for (i = 0; i < bmtVT.dwCurrentNonVtableSlot; i++)
{
MethodDesc *pMD = (MethodDesc *) bmtVT.pNonVtable[i];
_ASSERTE(pMD->m_wSlotNumber == i);
pMD->m_wSlotNumber += (WORD) bmtVT.dwCurrentVtableSlot;
bmtVT.pVtable[pMD->m_wSlotNumber] = (SLOT) pMD->GetPreStubAddr();
}
if (bmtVT.wDefaultCtorSlot != MethodTable::NO_SLOT)
bmtVT.wDefaultCtorSlot += (WORD) bmtVT.dwCurrentVtableSlot;
if (bmtVT.wCCtorSlot != MethodTable::NO_SLOT)
bmtVT.wCCtorSlot += (WORD) bmtVT.dwCurrentVtableSlot;
bmtVT.dwCurrentNonVtableSlot += bmtVT.dwCurrentVtableSlot;
// ensure we didn't overflow the temporary vtable
_ASSERTE(bmtVT.dwCurrentNonVtableSlot <= bmtVT.dwMaxVtableSize);
m_wNumMethodSlots = (WORD) bmtVT.dwCurrentNonVtableSlot;
// Place static fields
hr = PlaceStaticFields(&bmtVT, &bmtFP, &bmtEnumMF);
IfFailGoto(hr, exit);
#if _DEBUG
if (m_wNumStaticFields > 0)
{
LOG((LF_CODESHARING,
LL_INFO10000,
"Placing %d %sshared statics (%d handles) for class %s.\n",
m_wNumStaticFields, IsShared() ? "" : "un", m_wNumHandleStatics,
pszDebugName));
}
#endif
//#define NumStaticFieldsOfSize $$$$$
//#define StaticFieldStart $$$$$
if (IsBlittable())
{
m_wNumGCPointerSeries = 0;
bmtFP.NumInstanceGCPointerFields = 0;
#if 0
// If an explicit size is specified in the metadata, this represents
// a C-style struct with no EE-recognizable fields.
ULONG cbTotalSize = 0;
if (SUCCEEDED(pModule->GetMDImport()->GetClassTotalSize(cl, &cbTotalSize)) && cbTotalSize)
{
m_dwNumInstanceFieldBytes = cbTotalSize;
}
else
#endif
{
_ASSERTE(HasLayout());
m_dwNumInstanceFieldBytes = ((LayoutEEClass*)this)->GetLayoutInfo()->m_cbNativeSize;
}
}
else
{
_ASSERTE(!IsBlittable());
if (HasExplicitFieldOffsetLayout())
{
hr = HandleExplicitLayout(&bmtMetaData, &bmtMFDescs, pByValueClassCache, &bmtInternal, &bmtGCSeries, &bmtError);
}
else
{
// Place instance fields
hr = PlaceInstanceFields(&bmtFP, &bmtEnumMF, &bmtParent, &bmtError, &pByValueClassCache);
}
IfFailGoto(hr, exit);
}
// We enforce that all value classes have non-zero size
if (IsValueClass() && m_dwNumInstanceFieldBytes == 0)
{
bmtError.resIDWhy = IDS_CLASSLOAD_ZEROSIZE;
hr = COR_E_TYPELOAD;
goto exit;
}
// Now setup the method table
hr = SetupMethodTable(&bmtVT,
&bmtInterface,
&bmtInternal,
&bmtProp,
&bmtMFDescs,
&bmtEnumMF,
&bmtError,
&bmtMetaData,
&bmtParent);
IfFailGoto(hr, exit);
if (IsValueClass() && (m_dwNumInstanceFieldBytes != totalDeclaredFieldSize || HasOverLayedField()))
{
GetMethodTable()->SetNotTightlyPacked();
}
// If this is an interface then assign the interface ID.
if (IsInterface())
{
// Assign the interface ID.
AssignInterfaceId();
GetCoClassAttribInfo();
#ifdef _DEBUG
LPCUTF8 pszDebugName,pszDebugNamespace;
pModule->GetMDImport()->GetNameOfTypeDef(cl, &pszDebugName, &pszDebugNamespace);
LOG((LF_CLASSLOADER, LL_INFO1000, "Interface class \"%s%s%s\" given Interface ID 0x%x by AppDomain 0x%x %s\n",
*pszDebugNamespace ? pszDebugNamespace : "",
*pszDebugNamespace ? "." : "",
pszDebugName,
m_dwInterfaceId,
pModule->GetDomain(),
(pModule->IsSystem()) ? "System Domain" : ""
));
#endif
}
if (IsSharedInterface())
// need to copy this to all the appdomains interface managers
SystemDomain::PropogateSharedInterface(GetInterfaceId(), GetMethodTable()->GetVtable());
else if (IsInterface())
// it's an interface but not shared, so just save it in our own interface manager
(GetModule()->GetDomain()->GetInterfaceVTableMapMgr().GetAddrOfGlobalTableForComWrappers())[GetInterfaceId()] = (LPVOID)(GetMethodTable()->GetVtable());
if (HasExplicitFieldOffsetLayout())
// Perform relevant GC calculations for tdexplicit
hr = HandleGCForExplicitLayout(&bmtGCSeries);
else
// Perform relevant GC calculations for value classes
hr = HandleGCForValueClasses(&bmtFP, &bmtEnumMF, &pByValueClassCache);
IfFailGoto(hr, exit);
// GC reqires the series to be sorted.
// TODO: fix it so that we emit them in the correct order in the first place.
if (GetMethodTable()->ContainsPointers())
{
CGCDesc* gcDesc = CGCDesc::GetCGCDescFromMT(GetMethodTable());
qsort(gcDesc->GetLowestSeries(), (int)gcDesc->GetNumSeries(), sizeof(CGCDescSeries), compareCGCDescSeries);
}
if (!GetMethodTable()->HasClassConstructor()
&& (!IsShared() || bmtEnumMF.dwNumStaticFields == 0))
{
// Mark the class as needing no static initialization
SetInited();
}
// Notice whether this class requires finalization
GetMethodTable()->MaybeSetHasFinalizer();
#if CHECK_APP_DOMAIN_LEAKS
// Figure out if we're domain agile..
// Note that this checks a bunch of field directly on the class & method table,
// so it needs to come late in the game.
hr = SetAppDomainAgileAttribute();
IfFailGoto(hr, exit);
#endif
// Figure out if CCW's created to expose this type to COM need to be agile.
SetCCWAppDomainAgileAttribute();
// Create handles for the static fields that contain object references
// and allocate the ones that are value classes.
hr = CreateHandlesForStaticFields(&bmtEnumMF, &bmtInternal, &pByValueClassCache, &bmtVT, &bmtError);
IfFailGoto(hr, exit);
// If we have a non-interface class, then do inheritance security
// checks on it. The check starts by checking for inheritance
// permission demands on the current class. If these first checks
// succeeded, then the cached declared method list is scanned for
// methods that have inheritance permission demands.
hr = VerifyInheritanceSecurity(&bmtInternal, &bmtError, &bmtParent, &bmtEnumMF);
IfFailGoto(hr, exit);
// We need to populate our com map with an system ids. They are globally unique and
// fit into our table.
hr = MapSystemInterfaces();
IfFailGoto(hr, exit);
// Check for the RemotingProxy Attribute
if (IsContextful())
{
_ASSERTE(g_pObjectClass);
// Skip mscorlib marshal-by-ref classes since they all
// are assumed to have the default proxy attribute
if (!(pModule == g_pObjectClass->GetModule()))
{
hr = CheckForRemotingProxyAttrib(&bmtInternal,&bmtProp);
IfFailGoto(hr, exit);
}
}
_ASSERTE(SUCCEEDED(hr));
// structs with GC poitners MUST be pointer sized aligned because the GC assumes it
if (IsValueClass() && GetMethodTable()->ContainsPointers() && m_dwNumInstanceFieldBytes % sizeof(void*) != 0)
{
bmtError.resIDWhy = IDS_CLASSLOAD_BADFORMAT;
hr = COR_E_TYPELOAD;
goto exit;
}
exit:
if (SUCCEEDED(hr))
{
if (g_pObjectClass == NULL)
{
// Create a hash of all Object's method names in a special bitmap
LPCUTF8 pszName;
LPCUTF8 pszNamespace;
// First determine whether we are Object
GetMDImport()->GetNameOfTypeDef(GetCl(), &pszName, &pszNamespace);
if (!strcmp(pszName, "Object") && !strcmp(pszNamespace, g_SystemNS))
CreateObjectClassMethodHashBitmap(this);
}
if (IsInterface())
{
// Reset parent class
SetParentClass (g_pObjectClass->GetClass());
}
SetResolved();
// NOTE. NOTE!! the EEclass can now be accessed by other threads.
// Do NOT place any initialization after this pointer
#ifdef _DEBUG
NameHandle name(pModule, cl);
_ASSERTE (pModule->GetClassLoader()->LookupInModule(&name).IsNull()
&& "RID map already has this MethodTable");
#endif
// !!! JIT can get to a MT through FieldDesc.
// !!! We need to publish MT before FieldDesc's.
if (!pModule->StoreTypeDef(cl, TypeHandle(GetMethodTable())))
hr = E_OUTOFMEMORY;
else
{
// Now that the class is ready, fill out the RID maps
hr = FillRIDMaps(&bmtMFDescs, &bmtMetaData, &bmtInternal);
// Okay the EEClass is all set to go, insert the class into our clsid hash table
// Note: Only insert the type if it isn't a value class.
if (!IsValueClass())
GetClassLoader()->InsertClassForCLSID(this);
}
} else {
LPCUTF8 pszClassName, pszNameSpace;
pModule->GetMDImport()->GetNameOfTypeDef(GetCl(), &pszClassName, &pszNameSpace);
if ((! bmtError.dMethodDefInError || bmtError.dMethodDefInError == mdMethodDefNil) &&
bmtError.szMethodNameForError == NULL) {
if (hr == E_OUTOFMEMORY)
PostOutOfMemoryException(pThrowable);
else
pModule->GetAssembly()->PostTypeLoadException(pszNameSpace, pszClassName,
bmtError.resIDWhy, pThrowable);
}
else {
LPCUTF8 szMethodName;
if(bmtError.szMethodNameForError == NULL)
szMethodName = (bmtInternal.pInternalImport)->GetNameOfMethodDef(bmtError.dMethodDefInError);
else
szMethodName = bmtError.szMethodNameForError;
pModule->GetAssembly()->PostTypeLoadException(pszNameSpace, pszClassName,
szMethodName, bmtError.resIDWhy, pThrowable);
}
}
#ifdef _DEBUG
if (g_pConfig->ShouldDumpOnClassLoad(pszDebugName))
{
LOG((LF_ALWAYS, LL_ALWAYS, "Method table summary for '%s':\n", pszDebugName));
LOG((LF_ALWAYS, LL_ALWAYS, "Number of static fields: %d\n", bmtEnumMF.dwNumStaticFields));
LOG((LF_ALWAYS, LL_ALWAYS, "Number of instance fields: %d\n", bmtEnumMF.dwNumInstanceFields));
LOG((LF_ALWAYS, LL_ALWAYS, "Number of static obj ref fields: %d\n", bmtEnumMF.dwNumStaticObjRefFields));
LOG((LF_ALWAYS, LL_ALWAYS, "Number of declared fields: %d\n", bmtEnumMF.dwNumDeclaredFields));
LOG((LF_ALWAYS, LL_ALWAYS, "Number of declared methods: %d\n", bmtEnumMF.dwNumDeclaredMethods));
DebugDumpVtable(pszDebugName, false);
DebugDumpFieldLayout(pszDebugName, false);
DebugDumpGCDesc(pszDebugName, false);
}
#endif
STRESS_LOG3(LF_CLASSLOADER, LL_INFO1000, "BuildMethodTable: finished method table for module %p token %x = %pT \n",
pModule, cl, GetMethodTable());
//deallocate space allocated by the thread-based allocator
pThread->m_MarshalAlloc.Collapse(checkPointMarker);
if (bmtParent.pParentMethodHash != NULL)
delete(bmtParent.pParentMethodHash);
WS_PERF_UPDATE_DETAIL("BuildMethodTable:DELETE", 0, bmtParent.pParentMethodHash);
if (bmtMFDescs.ppUnboxMethodDescList != NULL)
delete[] bmtMFDescs.ppUnboxMethodDescList;
WS_PERF_UPDATE_DETAIL("BuildMethodTable:DELETE []", 0, bmtMFDescs.ppUnboxMethodDescList);
if (bmtMFDescs.ppMethodAndFieldDescList != NULL)
delete[] bmtMFDescs.ppMethodAndFieldDescList;
WS_PERF_UPDATE_DETAIL("BuildMethodTable:DELETE []", 0, bmtMFDescs.ppMethodAndFieldDescList);
// delete our temporary vtable
if (bmtVT.pVtable != NULL)
delete[] bmtVT.pVtable;
WS_PERF_UPDATE_DETAIL("BuildMethodTable:DELETE []", 0, bmtVT.pVtable);
// pFields and pMethods are allocated on the stack so we don't need to delete them.
if (pByValueClassCache != NULL)
HeapFree(GetProcessHeap(), 0, pByValueClassCache);
WS_PERF_UPDATE_DETAIL("BuildMethodTable:DELETE []", 0, pByValueClassCache);
if (bmtEnumMF.fNeedToCloseEnumField)
(bmtInternal.pInternalImport)->EnumClose(&bmtEnumMF.hEnumField);
if (bmtEnumMF.fNeedToCloseEnumMethod)
(bmtInternal.pInternalImport)->EnumClose(&bmtEnumMF.hEnumMethod);
if (bmtEnumMF.fNeedToCloseEnumMethodImpl) {
(bmtInternal.pInternalImport)->EnumMethodImplClose(&bmtEnumMF.hEnumBody,
&bmtEnumMF.hEnumDecl);
}
#ifdef _DEBUG
if (FAILED(hr))
{
// This totally junk code allows setting a breakpoint on this line
hr = hr;
}
#endif
}
COMPLUS_CATCH
{
hr = COR_E_TYPELOAD;
}
COMPLUS_END_CATCH
return hr;
}
HRESULT EEClass::MapSystemInterfaces()
{
// Loop through our interface map to ensure that all the system interfaces are defined in our
// com map.
Assembly* pAssembly = GetAssembly();
AppDomain* pDomain = SystemDomain::GetCurrentDomain();
return MapSystemInterfacesToDomain(pDomain);
}
HRESULT EEClass::MapSystemInterfacesToDomain(AppDomain* pDomain)
{
if(pDomain != (AppDomain*) SystemDomain::System()) {
if(IsInterface()) {
_ASSERTE(GetMethodTable());
MapInterfaceFromSystem(pDomain, GetMethodTable());
}
InterfaceInfo_t *pMap = GetInterfaceMap();
DWORD size = GetMethodTable()->GetNumInterfaces();
for(DWORD i = 0; i < size; i ++) {
MethodTable* pTable = pMap[i].m_pMethodTable;
MapInterfaceFromSystem(pDomain, pTable);
}
}
return S_OK;
}
/* static */
HRESULT EEClass::MapInterfaceFromSystem(AppDomain* pDomain, MethodTable* pTable)
{
Module *pModule = pTable->GetModule();
BaseDomain* pOther = pModule->GetDomain();
// !!! HACK COUGH UGGH
// We currently can only have one "shared" vtable map mgr
// - so use the system domain for all shared classes
if (pOther == SharedDomain::GetDomain())
pOther = SystemDomain::System();
if(pOther == SystemDomain::System()) {
EEClass* pClass = pTable->GetClass();
DWORD id = pClass->GetInterfaceId();
pDomain->GetInterfaceVTableMapMgr().EnsureInterfaceId(id);
(pDomain->GetInterfaceVTableMapMgr().GetAddrOfGlobalTableForComWrappers())[id] = (LPVOID)(pTable->GetVtable());
}
return S_OK;
}
//
// Used by BuildMethodTable
//
// Resolve unresolved interfaces, determine an upper bound on the size of the interface map,
// and determine the size of the largest interface (in # slots)
//
HRESULT EEClass::ResolveInterfaces(BuildingInterfaceInfo_t *pBuildingInterfaceList, bmtInterfaceInfo* bmtInterface, bmtProperties* bmtProp, bmtVtable* bmtVT, bmtParentInfo* bmtParent)
{
HRESULT hr = S_OK;
DWORD i;
Thread *pThread = GetThread();
// resolve unresolved interfaces, determine an upper bound on the size of the interface map,
// and determine the size of the largest interface (in # slots)
bmtInterface->dwMaxExpandedInterfaces = 0; // upper bound on max # interfaces implemented by this class
// First look through the interfaces explicitly declared by this class
for (i = 0; i < m_wNumInterfaces; i++)
{
EEClass *pInterface = pBuildingInterfaceList[i].m_pClass;
_ASSERTE(pInterface->IsResolved());
bmtInterface->dwMaxExpandedInterfaces += (1+ pInterface->m_wNumInterfaces);
}
// Now look at interfaces inherited from the parent
if (GetParentClass() != NULL)
{
InterfaceInfo_t *pParentInterfaceMap = GetParentClass()->GetInterfaceMap();
for (i = 0; i < GetParentClass()->m_wNumInterfaces; i++)
{
MethodTable *pMT = pParentInterfaceMap[i].m_pMethodTable;
EEClass *pClass = pMT->GetClass();
bmtInterface->dwMaxExpandedInterfaces += (1+pClass->m_wNumInterfaces);
}
}
// Create a fully expanded map of all interfaces we implement
bmtInterface->pInterfaceMap = (InterfaceInfo_t *) pThread->m_MarshalAlloc.Alloc(sizeof(InterfaceInfo_t) * bmtInterface->dwMaxExpandedInterfaces);
if (bmtInterface->pInterfaceMap == NULL)
{
IfFailRet(E_OUTOFMEMORY);
}
// # slots of largest interface
bmtInterface->dwLargestInterfaceSize = 0;
if (CreateInterfaceMap(pBuildingInterfaceList, bmtInterface->pInterfaceMap, &bmtInterface->dwInterfaceMapSize, &bmtInterface->dwLargestInterfaceSize) == FALSE)
{
IfFailRet(COR_E_TYPELOAD);
}
_ASSERTE(bmtInterface->dwInterfaceMapSize <= bmtInterface->dwMaxExpandedInterfaces);
if (bmtInterface->dwLargestInterfaceSize > 0)
{
// This is needed later - for each interface, we get the MethodDesc pointer for each
// method. We need to be able to persist at most one interface at a time, so we
// need enough memory for the largest interface.
bmtInterface->ppInterfaceMethodDescList = (MethodDesc**)
pThread->m_MarshalAlloc.Alloc(bmtInterface->dwLargestInterfaceSize * sizeof(MethodDesc*));
if (bmtInterface->ppInterfaceMethodDescList == NULL)
{
IfFailRet(E_OUTOFMEMORY);
}
}
// For all the new interfaces we bring in, sum the methods
bmtInterface->dwTotalNewInterfaceMethods = 0;
if (GetParentClass() != NULL)
{
for (i = GetParentClass()->m_wNumInterfaces; i < (bmtInterface->dwInterfaceMapSize); i++)
bmtInterface->dwTotalNewInterfaceMethods +=
bmtInterface->pInterfaceMap[i].m_pMethodTable->GetClass()->GetNumVtableSlots();
}
// The interface map is probably smaller than dwMaxExpandedInterfaces, so we'll copy the
// appropriate number of bytes when we allocate the real thing later.
// Update m_wNumInterfaces to be for the fully expanded interface list
m_wNumInterfaces = (WORD) bmtInterface->dwInterfaceMapSize;
// Inherit parental slot counts
if (GetParentClass() != NULL)
{
bmtVT->dwCurrentVtableSlot = GetParentClass()->GetNumVtableSlots();
bmtParent->dwNumParentInterfaces = GetParentClass()->m_wNumInterfaces;
bmtParent->NumParentPointerSeries = GetParentClass()->m_wNumGCPointerSeries;
if (GetParentClass()->HasFieldsWhichMustBeInited())
m_VMFlags |= VMFLAG_HAS_FIELDS_WHICH_MUST_BE_INITED;
}
else
{
bmtVT->dwCurrentVtableSlot = 0;
bmtParent->dwNumParentInterfaces = 0;
bmtParent->NumParentPointerSeries = 0;
}
memset(m_MethodHash, 0, METHOD_HASH_BYTES);
bmtVT->dwCurrentNonVtableSlot = 0;
// Init the currently number of vtable slots to the number that our parent has - we inc
// this as we find non-overloaded instnace methods.
SetNumVtableSlots ((WORD) bmtVT->dwCurrentVtableSlot);
bmtInterface->pppInterfaceImplementingMD = (MethodDesc ***) pThread->m_MarshalAlloc.Alloc(sizeof(MethodDesc *) * bmtInterface->dwMaxExpandedInterfaces);
memset(bmtInterface->pppInterfaceImplementingMD, 0, sizeof(MethodDesc *) * bmtInterface->dwMaxExpandedInterfaces);
return hr;
}
HRESULT EEClass::EnumerateMethodImpls(bmtInternalInfo* bmtInternal,
bmtEnumMethAndFields* bmtEnumMF,
bmtMetaDataInfo* bmtMetaData,
bmtMethodImplInfo* bmtMethodImpl,
bmtErrorInfo* bmtError)
{
HRESULT hr = S_OK;
IMDInternalImport *pMDInternalImport = bmtInternal->pInternalImport;
DWORD rid, attr, maxRidMD, maxRidMR;
mdToken tkParent, tkGrandparent;
PCCOR_SIGNATURE pSigDecl=NULL,pSigBody = NULL;
ULONG cbSigDecl, cbSigBody;
hr = pMDInternalImport->EnumMethodImplInit(m_cl,
&(bmtEnumMF->hEnumBody),
&(bmtEnumMF->hEnumDecl));
if (SUCCEEDED(hr)) {
bmtEnumMF->fNeedToCloseEnumMethodImpl = true;
bmtEnumMF->dwNumberMethodImpls = pMDInternalImport->EnumMethodImplGetCount(&(bmtEnumMF->hEnumBody),
&(bmtEnumMF->hEnumDecl));
if(bmtEnumMF->dwNumberMethodImpls) {
bmtMetaData->pMethodBody = (mdToken*) GetThread()->m_MarshalAlloc.Alloc(bmtEnumMF->dwNumberMethodImpls *
sizeof(mdToken));
bmtMetaData->pMethodDecl = (mdToken*) GetThread()->m_MarshalAlloc.Alloc(bmtEnumMF->dwNumberMethodImpls *
sizeof(mdToken));
bmtMethodImpl->pBodyDesc = (MethodDesc**) GetThread()->m_MarshalAlloc.Alloc(bmtEnumMF->dwNumberMethodImpls *
sizeof(MethodDesc*));
bmtMethodImpl->pDeclDesc = (MethodDesc**) GetThread()->m_MarshalAlloc.Alloc(bmtEnumMF->dwNumberMethodImpls *
sizeof(MethodDesc*));
bmtMethodImpl->pDeclToken = (mdToken*) GetThread()->m_MarshalAlloc.Alloc(bmtEnumMF->dwNumberMethodImpls *
sizeof(mdToken));
mdToken theBody,theDecl;
mdToken* pBody = bmtMetaData->pMethodBody;
mdToken* pDecl = bmtMetaData->pMethodDecl;
maxRidMD = pMDInternalImport->GetCountWithTokenKind(mdtMethodDef);
maxRidMR = pMDInternalImport->GetCountWithTokenKind(mdtMemberRef);
for(DWORD i = 0; i < bmtEnumMF->dwNumberMethodImpls; i++) {
if(!pMDInternalImport->EnumMethodImplNext(&(bmtEnumMF->hEnumBody),
&(bmtEnumMF->hEnumDecl),
&theBody,
pDecl))
break;
if(TypeFromToken(theBody) != mdtMethodDef) {
Module* pModule;
hr = FindMethodDeclaration(bmtInternal,
theBody,
pBody,
TRUE,
&pModule,
bmtError);
if(FAILED(hr)) {
//_ASSERTE(SUCCEEDED(hr) && "MethodImpl Body: FindMethodDeclaration failed");
bmtError->resIDWhy = IDS_CLASSLOAD_MI_ILLEGAL_BODY;
IfFailRet(hr);
}
_ASSERTE(pModule == bmtInternal->pModule);
theBody = *pBody;
}
else
*pBody = theBody;
// Now that the tokens of Decl and Body are obtained, do the MD validation
// Decl may ne a MemberRef
theDecl = *pDecl;
rid = RidFromToken(theDecl);
if(TypeFromToken(theDecl) == mdtMethodDef)
{
// Decl must be valid token
if ((rid == 0)||(rid > maxRidMD))
{
//_ASSERTE(!"MethodImpl Decl token out of range");
bmtError->resIDWhy = IDS_CLASSLOAD_MI_ILLEGAL_TOKEN_DECL;
IfFailRet(COR_E_TYPELOAD);
}
// Decl must be mdVirtual
attr = pMDInternalImport->GetMethodDefProps(theDecl);
if(!IsMdVirtual(attr))
{
//_ASSERTE(!"MethodImpl Decl method not virtual");
bmtError->resIDWhy = IDS_CLASSLOAD_MI_NONVIRTUAL_DECL;
IfFailRet(COR_E_TYPELOAD);
}
// Decl must not be final
if(IsMdFinal(attr))
{
//_ASSERTE(!"MethodImpl Decl method final");
bmtError->resIDWhy = IDS_CLASSLOAD_MI_FINAL_DECL;
IfFailRet(COR_E_TYPELOAD);
}
// If Decl's parent is other than this class, Decl must not be private
hr = pMDInternalImport->GetParentToken(theDecl,&tkParent);
IfFailRet(hr);
if((m_cl != tkParent)&&IsMdPrivate(attr))
{
//_ASSERTE(!"MethodImpl Decl method private");
bmtError->resIDWhy = IDS_CLASSLOAD_MI_PRIVATE_DECL;
IfFailRet(COR_E_TYPELOAD);
}
// Decl's parent must not be tdSealed
pMDInternalImport->GetTypeDefProps(tkParent,&attr,&tkGrandparent);
if(IsTdSealed(attr))
{
//_ASSERTE(!"MethodImpl Decl's parent class sealed");
bmtError->resIDWhy = IDS_CLASSLOAD_MI_SEALED_DECL;
IfFailRet(COR_E_TYPELOAD);
}
// Get signature and length
pSigDecl = pMDInternalImport->GetSigOfMethodDef(theDecl,&cbSigDecl);
}
else
{
// Decl must be valid token
if ((rid == 0)||(rid > maxRidMR))
{
//_ASSERTE(!"MethodImpl Decl token out of range");
bmtError->resIDWhy = IDS_CLASSLOAD_MI_ILLEGAL_TOKEN_DECL;
IfFailRet(COR_E_TYPELOAD);
}
// Get signature and length
pMDInternalImport->GetNameAndSigOfMemberRef(theDecl,&pSigDecl,&cbSigDecl);
}
// Body must be valid token
rid = RidFromToken(theBody);
if ((rid == 0)||(rid > maxRidMD))
{
//_ASSERTE(!"MethodImpl Body token out of range");
bmtError->resIDWhy = IDS_CLASSLOAD_MI_ILLEGAL_TOKEN_BODY;
IfFailRet(COR_E_TYPELOAD);
}
// Body must not be static
attr = pMDInternalImport->GetMethodDefProps(theBody);
if(IsMdStatic(attr))
{
//_ASSERTE(!"MethodImpl Body method static");
bmtError->resIDWhy = IDS_CLASSLOAD_MI_ILLEGAL_STATIC;
IfFailRet(COR_E_TYPELOAD);
}
// Body's parent must be this class
hr = pMDInternalImport->GetParentToken(theBody,&tkParent);
IfFailRet(hr);
if(tkParent != m_cl)
{
//_ASSERTE(!"MethodImpl Body's parent class different");
bmtError->resIDWhy = IDS_CLASSLOAD_MI_ILLEGAL_BODY;
IfFailRet(COR_E_TYPELOAD);
}
// Decl's and Body's signatures must match
if(pSigDecl && cbSigDecl)
{
if((pSigBody = pMDInternalImport->GetSigOfMethodDef(theBody,&cbSigBody)) != NULL && cbSigBody)
{
// Can't use memcmp because there may be two AssemblyRefs
// in this scope, pointing to the same assembly, etc.).
if (!MetaSig::CompareMethodSigs(pSigDecl,
cbSigDecl,
bmtInternal->pModule,
pSigBody,
cbSigBody,
bmtInternal->pModule))
{
//_ASSERTE(!"MethodImpl Decl's and Body's signatures mismatch");
bmtError->resIDWhy = IDS_CLASSLOAD_MI_BODY_DECL_MISMATCH;
IfFailRet(COR_E_TYPELOAD);
}
}
else
{
//_ASSERTE(!"MethodImpl Body's signature unavailable");
bmtError->resIDWhy = IDS_CLASSLOAD_MI_MISSING_SIG_BODY;
IfFailRet(COR_E_TYPELOAD);
}
}
else
{
//_ASSERTE(!"MethodImpl Decl's signature unavailable");
bmtError->resIDWhy = IDS_CLASSLOAD_MI_MISSING_SIG_DECL;
IfFailRet(COR_E_TYPELOAD);
}
pBody++;
pDecl++;
}
}
}
return hr;
}
//
// Used by BuildMethodTable
//
// Retrieve or add the TokenRange node for a particular token and nodelist.
/*static*/ EEClass::bmtTokenRangeNode *EEClass::GetTokenRange(mdToken tok, bmtTokenRangeNode **ppHead)
{
BYTE tokrange = ::GetTokenRange(tok);
bmtTokenRangeNode *pWalk = *ppHead;
while (pWalk)
{
if (pWalk->tokenHiByte == tokrange)
{
return pWalk;
}
pWalk = pWalk->pNext;
}
// If we got here, this is the first time we've seen this token range.
bmtTokenRangeNode *pNewNode = (bmtTokenRangeNode*)(GetThread()->m_MarshalAlloc.Alloc(sizeof(bmtTokenRangeNode)));
pNewNode->tokenHiByte = tokrange;
pNewNode->cMethods = 0;
pNewNode->dwCurrentChunk = 0;
pNewNode->dwCurrentIndex = 0;
pNewNode->pNext = *ppHead;
*ppHead = pNewNode;
return pNewNode;
}
typedef struct _SigArguments
{
Module* pScopeModule;
Module* pExternalModule;
} CompareSigArguments;
static BOOL SigCompare(PCCOR_SIGNATURE pvScopeSignature, DWORD cbScopeSignature,
PCCOR_SIGNATURE pvExternalSignature, DWORD cbExternalSignature,
void* pSigArgs)
{
CompareSigArguments *pArgs = (CompareSigArguments*) pSigArgs;
return MetaSig::CompareMethodSigs(pvScopeSignature, cbScopeSignature, pArgs->pScopeModule,
pvExternalSignature, cbExternalSignature, pArgs->pExternalModule);
}
//
//
// Find a method declaration that must reside in the scope passed in. This method cannot be called if
// the reference travels to another scope.
//
// Protect against finding a declaration that lives within
// us (the type being created)
//
HRESULT EEClass::FindMethodDeclaration(bmtInternalInfo* bmtInternal,
mdToken pToken, // Token that is being located (MemberRef or MemberDef)
mdToken* pDeclaration, // Method definition for Member
BOOL fSameClass, // Does the declaration need to be in this class
Module** pModule, // Module that the Method Definitions is part of
bmtErrorInfo* bmtError)
{
HRESULT hr = S_OK;
IMDInternalImport *pMDInternalImport = bmtInternal->pInternalImport;
MethodDesc* pMethod = NULL;
// // We are currently assumming that most MethodImpls will be used
// // to define implementation for methods defined on an interface
// // or base type. Therefore, we try to load entry first. If that
// // indicates the member is on our type then we check meta data.
// hr = GetDescFromMemberRef(bmtInternal->pModule,
// pToken,
// GetCl(),
// (void**) (&pMethod),
// bmtError->pThrowable);
// if(FAILED(hr) && !pThrowableAvailable(bmtError->pThrowable)) { // it was us we were find
*pModule = bmtInternal->pModule;
PCCOR_SIGNATURE pSig; // Signature of Member
DWORD cSig;
LPCUTF8 szMember = NULL;
// The token should be a member ref or def. If it is a ref then we need to travel
// back to us hopefully.
if(TypeFromToken(pToken) == mdtMemberRef) {
// Get the parent
mdToken typeref = pMDInternalImport->GetParentOfMemberRef(pToken);
// If parent is a method def then this is a varags method
if (TypeFromToken(typeref) == mdtMethodDef) {
mdTypeDef typeDef;
hr = pMDInternalImport->GetParentToken(typeref, &typeDef);
// Make sure it is a typedef
if (TypeFromToken(typeDef) != mdtTypeDef) {
_ASSERTE(!"MethodDef without TypeDef as Parent");
IfFailRet(COR_E_TYPELOAD);
}
_ASSERTE(typeDef == GetCl());
// This is the real method we are overriding
// @TODO: CTS this may be illegal and we could throw an error
*pDeclaration = mdtMethodDef;
}
else if (TypeFromToken(typeref) == mdtTypeSpec) {
_ASSERTE(!"Method impls cannot override a member parented to a TypeSpec");
IfFailRet(COR_E_TYPELOAD);
}
else {
// Verify that the ref points back to us
mdToken tkDef;
// We only get here when we know the token does not reference a type
// in a different scope.
if(TypeFromToken(typeref) == mdtTypeRef) {
LPCUTF8 pszNameSpace;
LPCUTF8 pszClassName;
pMDInternalImport->GetNameOfTypeRef(typeref, &pszNameSpace, &pszClassName);
mdToken tkRes = pMDInternalImport->GetResolutionScopeOfTypeRef(typeref);
hr = pMDInternalImport->FindTypeDef(pszNameSpace,
pszClassName,
(TypeFromToken(tkRes) == mdtTypeRef) ? tkRes : mdTokenNil,
&tkDef);
if(fSameClass && tkDef != GetCl())
{
IfFailRet(COR_E_TYPELOAD);
}
}
else
tkDef = GetCl();
szMember = pMDInternalImport->GetNameAndSigOfMemberRef(pToken,
&pSig,
&cSig);
if(isCallConv(MetaSig::GetCallingConventionInfo(*pModule, pSig),
IMAGE_CEE_CS_CALLCONV_FIELD)) {
return VLDTR_E_MR_BADCALLINGCONV;
}
hr = pMDInternalImport->FindMethodDef(tkDef,
szMember,
pSig,
cSig,
pDeclaration);
IfFailRet(hr);
}
}
else if(TypeFromToken(pToken) == mdtMethodDef) {
mdTypeDef typeDef;
// Verify that we are the parent
hr = pMDInternalImport->GetParentToken(pToken, &typeDef);
IfFailRet(hr);
if(typeDef != GetCl())
{
IfFailRet(COR_E_TYPELOAD);
}
*pDeclaration = pToken;
}
else {
IfFailRet(COR_E_TYPELOAD);
}
return hr;
}
//
// Used by BuildMethodTable
//
// Enumerate this class's members
//
HRESULT EEClass::EnumerateClassMembers(bmtInternalInfo* bmtInternal,
bmtEnumMethAndFields* bmtEnumMF,
bmtMethAndFieldDescs* bmtMF,
bmtProperties* bmtProp,
bmtMetaDataInfo* bmtMetaData,
bmtVtable* bmtVT,
bmtErrorInfo* bmtError)
{
HRESULT hr = S_OK;
DWORD i;
DWORD dwNumECallMethodDescs = 0;
Thread *pThread = GetThread();
IMDInternalImport *pMDInternalImport = bmtInternal->pInternalImport;
mdToken tok;
DWORD dwMemberAttrs;
BOOL fIsClassEnum = IsEnum();
BOOL fIsClassInterface = IsInterface();
BOOL fIsClassValueType = IsValueClass();
BOOL fIsClassComImport = IsComImport();
BOOL fIsClassNotAbstract = (IsTdAbstract(m_dwAttrClass) == 0);
PCCOR_SIGNATURE pMemberSignature;
ULONG cMemberSignature;
//
// Run through the method list and calculate the following:
// # methods.
// # "other" methods (i.e. static or private)
// # non-other methods
//
bmtVT->dwMaxVtableSize = 0; // we'll fix this later to be the real upper bound on vtable size
bmtMetaData->cMethods = 0;
hr = pMDInternalImport->EnumInit(mdtMethodDef, m_cl, &(bmtEnumMF->hEnumMethod));
if (FAILED(hr))
{
_ASSERTE(!"Cannot count memberdefs");
IfFailRet(hr);
}
bmtEnumMF->fNeedToCloseEnumMethod = true;
// Allocate an array to contain the method tokens as well as information about the methods.
bmtMetaData->cMethAndGaps = pMDInternalImport->EnumGetCount(&(bmtEnumMF->hEnumMethod));
bmtMetaData->pMethods = (mdToken*)pThread->m_MarshalAlloc.Alloc(bmtMetaData->cMethAndGaps * sizeof(mdToken));
bmtMetaData->pMethodAttrs = (DWORD*)pThread->m_MarshalAlloc.Alloc(bmtMetaData->cMethAndGaps * sizeof(DWORD));
bmtMetaData->pMethodRVA = (ULONG*)pThread->m_MarshalAlloc.Alloc(bmtMetaData->cMethAndGaps * sizeof(ULONG));
bmtMetaData->pMethodImplFlags = (DWORD*)pThread->m_MarshalAlloc.Alloc(bmtMetaData->cMethAndGaps * sizeof(DWORD));
bmtMetaData->pMethodClassifications = (DWORD*)pThread->m_MarshalAlloc.Alloc(bmtMetaData->cMethAndGaps * sizeof(DWORD));
bmtMetaData->pstrMethodName = (LPSTR*)pThread->m_MarshalAlloc.Alloc(bmtMetaData->cMethAndGaps * sizeof(LPSTR));
bmtMetaData->pMethodImpl = (BYTE*)pThread->m_MarshalAlloc.Alloc(bmtMetaData->cMethAndGaps * sizeof(BYTE));
bmtMetaData->pMethodType = (BYTE*)pThread->m_MarshalAlloc.Alloc(bmtMetaData->cMethAndGaps * sizeof(BYTE));
enum { SeenInvoke = 1, SeenBeginInvoke = 2, SeenEndInvoke = 4, SeenCtor = 8 };
unsigned delegateMethods = 0;
for (i = 0; i < bmtMetaData->cMethAndGaps; i++)
{
ULONG dwMethodRVA;
DWORD dwImplFlags;
DWORD Classification;
LPSTR strMethodName;
//
// Go to the next method and retrieve its attributes.
//
pMDInternalImport->EnumNext(&(bmtEnumMF->hEnumMethod), &tok);
DWORD rid = RidFromToken(tok);
if ((rid == 0)||(rid > pMDInternalImport->GetCountWithTokenKind(mdtMethodDef)))
{
_ASSERTE(!"Method token out of range");
IfFailRet(COR_E_TYPELOAD);
}
dwMemberAttrs = pMDInternalImport->GetMethodDefProps(tok);
if (IsMdRTSpecialName(dwMemberAttrs) || IsMdVirtual(dwMemberAttrs) || IsAnyDelegateClass())
{
strMethodName = (LPSTR)pMDInternalImport->GetNameOfMethodDef(tok);
if(IsStrLongerThan(strMethodName,MAX_CLASS_NAME))
{
_ASSERTE(!"Method Name Too Long");
IfFailRet(COR_E_TYPELOAD);
}
}
else
strMethodName = NULL;
//
// We need to check if there are any gaps in the vtable. These are
// represented by methods with the mdSpecial flag and a name of the form
// _VTblGap_nnn (to represent nnn empty slots) or _VTblGap (to represent a
// single empty slot).
//
if (IsMdRTSpecialName(dwMemberAttrs))
{
// The slot is special, but it might not be a vtable spacer. To
// determine that we must look at the name.
if (strncmp(strMethodName, "_VtblGap", 8) == 0)
{
//
// This slot doesn't really exist, don't add it to the method
// table. Instead it represents one or more empty slots, encoded
// in the method name. Locate the beginning of the count in the
// name. There are these points to consider:
// There may be no count present at all (in which case the
// count is taken as one).
// There may be an additional count just after Gap but before
// the '_'. We ignore this.
//
LPCSTR pos = strMethodName + 8;
// Skip optional number.
while ((*pos >= '0') && (*pos <= '9'))
pos++;
WORD n = 0;
// Check for presence of count.
if (*pos == '\0')
n = 1;
else
{
// Skip '_'.
_ASSERTE(*pos == '_');
if (*pos != '_')
{
bmtMetaData->cMethods++;
continue;
}
pos++;
// Read count.
while ((*pos >= '0') && (*pos <= '9'))
{
_ASSERTE(n < 6552);
n *= 10;
n += *pos - '0';
pos++;
}
// Check for end of name.
_ASSERTE(*pos == '\0');
if (*pos != '\0')
{
bmtMetaData->cMethods++;
continue;
}
}
// Record vtable gap in mapping list.
if (m_pSparseVTableMap == NULL)
m_pSparseVTableMap = new SparseVTableMap();
if (!m_pSparseVTableMap->RecordGap((WORD)bmtMetaData->cMethods, n))
{
IfFailRet(E_OUTOFMEMORY);
}
bmtProp->fSparse = true;
continue;
}
}
//
// This is a real method so add it to the enumeration of methods. We now need to retrieve
// information on the method and store it for later use.
//
int CurMethod = bmtMetaData->cMethods++;
pMDInternalImport->GetMethodImplProps(tok, &dwMethodRVA, &dwImplFlags);
//
// But first - minimal flags validity checks
//
// No methods in Enums!
if(fIsClassEnum)
{
BAD_FORMAT_ASSERT(!"Method in an Enum");
IfFailRet(COR_E_TYPELOAD);
}
// RVA : 0
if(dwMethodRVA != 0)
{
if(fIsClassComImport)
{
BAD_FORMAT_ASSERT(!"Method with RVA!=0 in an Import");
IfFailRet(COR_E_TYPELOAD);
}
if(IsMdAbstract(dwMemberAttrs))
{
BAD_FORMAT_ASSERT(!"Abstract Method with RVA!=0");
IfFailRet(COR_E_TYPELOAD);
}
if(IsMiRuntime(dwImplFlags))
{
BAD_FORMAT_ASSERT(!"Runtime-Implemented Method with RVA!=0");
IfFailRet(COR_E_TYPELOAD);
}
if(IsMiInternalCall(dwImplFlags))
{
BAD_FORMAT_ASSERT(!"Internal Call Method with RVA!=0");
IfFailRet(COR_E_TYPELOAD);
}
}
// Abstract / not abstract
if(IsMdAbstract(dwMemberAttrs))
{
if(fIsClassNotAbstract)
{
BAD_FORMAT_ASSERT(!"Abstract Method in Non-Abstract Class");
IfFailRet(COR_E_TYPELOAD);
}
if(!IsMdVirtual(dwMemberAttrs))
{
BAD_FORMAT_ASSERT(!"Non-Vitrual Abstract Method");
IfFailRet(COR_E_TYPELOAD);
}
}
else if(fIsClassInterface && strMethodName &&
(strcmp(strMethodName, COR_CCTOR_METHOD_NAME)))
{
BAD_FORMAT_ASSERT(!"Non-abstract, non-cctor Method in an Interface");
IfFailRet(COR_E_TYPELOAD);
}
// Virtual / not virtual
if(IsMdVirtual(dwMemberAttrs))
{
if(IsMdPinvokeImpl(dwMemberAttrs))
{
BAD_FORMAT_ASSERT(!"Virtual PInvoke Implemented Method");
IfFailRet(COR_E_TYPELOAD);
}
if(IsMdStatic(dwMemberAttrs))
{
BAD_FORMAT_ASSERT(!"Virtual Static Method");
IfFailRet(COR_E_TYPELOAD);
}
if(strMethodName && (0==strcmp(strMethodName, COR_CTOR_METHOD_NAME)))
{
BAD_FORMAT_ASSERT(!"Virtual Instance Constructor");
IfFailRet(COR_E_TYPELOAD);
}
}
// No synchronized methods in ValueTypes
if(fIsClassValueType && IsMiSynchronized(dwImplFlags))
{
BAD_FORMAT_ASSERT(!"Synchronized Method in Value Type");
IfFailRet(COR_E_TYPELOAD);
}
// Global methods:
if(m_cl == COR_GLOBAL_PARENT_TOKEN)
{
if(!IsMdStatic(dwMemberAttrs))
{
BAD_FORMAT_ASSERT(!"Non-Static Global Method");
IfFailRet(COR_E_TYPELOAD);
}
if (strMethodName) //@todo: investigate mc++ generating null name
{
if(0==strcmp(strMethodName, COR_CTOR_METHOD_NAME))
{
BAD_FORMAT_ASSERT(!"Global Instance Constructor");
IfFailRet(COR_E_TYPELOAD);
}
}
}
// Signature validation
pMemberSignature = pMDInternalImport->GetSigOfMethodDef(tok,&cMemberSignature);
hr = validateTokenSig(tok,pMemberSignature,cMemberSignature,dwMemberAttrs,pMDInternalImport);
if (FAILED(hr))
{
//_ASSERTE(!"Invalid Signature");
bmtError->resIDWhy = hr;
bmtError->dMethodDefInError = tok;
IfFailRet(hr);
}
//
// Determine the method's classification.
//
if (IsReallyMdPinvokeImpl(dwMemberAttrs) || IsMiInternalCall(dwImplFlags))
{
hr = NDirect::HasNAT_LAttribute(pMDInternalImport, tok);
if (FAILED(hr))
{
bmtError->resIDWhy = IDS_CLASSLOAD_BADPINVOKE;
bmtError->dMethodDefInError = bmtMetaData->pMethods[i];
IfFailRet(hr);
}
if (hr == S_FALSE)
{
if (fIsClassComImport || bmtProp->fComEventItfType)
{
// tlbimported component
if (IsMdRTSpecialName(dwMemberAttrs))
{
// constructor is special
Classification = mcECall;
}
else
{
// Tlbimported components we have some
// method descs in the call which are just used
// for handling methodimpls of all interface methods
Classification = mcComInterop;
}
}
else if (dwMethodRVA == 0)
Classification = mcECall;
else
Classification = mcNDirect;
}
else
Classification = mcNDirect;
}
else if (IsMiRuntime(dwImplFlags))
{
// currently the only runtime implemented functions are delegate instance methods
if (!IsAnyDelegateClass() || IsMdStatic(dwMemberAttrs) || IsMdAbstract(dwMemberAttrs))
{
BAD_FORMAT_ASSERT(!"Bad used of Runtime Impl attribute");
IfFailRet(COR_E_TYPELOAD);
}
if (IsMdRTSpecialName(dwMemberAttrs)) // .ctor
{
if (strcmp(strMethodName, COR_CTOR_METHOD_NAME) != 0 || IsMdVirtual(dwMemberAttrs) || (delegateMethods & SeenCtor))
{
BAD_FORMAT_ASSERT(!"Bad flags on delegate constructor");
IfFailRet(COR_E_TYPELOAD);
}
delegateMethods |= SeenCtor;
Classification = mcECall;
}
else
{
if (strcmp(strMethodName, "Invoke") == 0 && !(delegateMethods & SeenInvoke))
delegateMethods |= SeenInvoke;
else if (strcmp(strMethodName, "BeginInvoke") == 0 && !(delegateMethods & SeenBeginInvoke))
delegateMethods |= SeenBeginInvoke;
else if (strcmp(strMethodName, "EndInvoke") == 0 && !(delegateMethods & SeenEndInvoke))
delegateMethods |= SeenEndInvoke;
else
{
BAD_FORMAT_ASSERT(!"unknown delegate method");
IfFailRet(COR_E_TYPELOAD);
}
Classification = mcEEImpl;
}
}
else
{
if (fIsClassInterface && !IsMdStatic(dwMemberAttrs))
{
// If the interface is a standard managed interface then allocate space for an ECall method desc.
// Otherwise allocate space for an interface call method desc.
if (bmtProp->fIsMngStandardItf)
Classification = mcECall;
else
Classification = mcComInterop;
}
else
{
Classification = mcIL;
}
}
#ifdef _DEBUG
// We don't allow stack based declarative security on ecalls, fcalls and
// other special purpose methods implemented by the EE (the interceptor
// we use doesn't play well with non-jitted stubs).
if ((Classification == mcECall || Classification == mcEEImpl) &&
(IsMdHasSecurity(dwMemberAttrs) || IsTdHasSecurity(m_dwAttrClass)))
{
DWORD dwSecFlags;
DWORD dwNullDeclFlags;
LPSTR szMethodName = (LPSTR)pMDInternalImport->GetNameOfMethodDef(tok);
if (IsTdHasSecurity(m_dwAttrClass) &&
SUCCEEDED(Security::GetDeclarationFlags(pMDInternalImport, GetCl(), &dwSecFlags, &dwNullDeclFlags)))
{
if (dwSecFlags & ~dwNullDeclFlags & DECLSEC_RUNTIME_ACTIONS)
_ASSERTE(!"Cannot add stack based declarative security to a class containing an ecall/fcall/special method.");
}
if (IsMdHasSecurity(dwMemberAttrs) &&
SUCCEEDED(Security::GetDeclarationFlags(pMDInternalImport, tok, &dwSecFlags, &dwNullDeclFlags)))
{
if (dwSecFlags & ~dwNullDeclFlags & DECLSEC_RUNTIME_ACTIONS)
_ASSERTE(!"Cannot add stack based declarative security to an ecall/fcall/special method.");
}
}
#endif
// count how many overrides this method does All methods bodies are defined
// on this type so we can just compare the tok with the body token found
// from the overrides.
for(DWORD impls = 0; impls < bmtEnumMF->dwNumberMethodImpls; impls++) {
if(bmtMetaData->pMethodBody[impls] == tok) {
Classification |= mdcMethodImpl;
break;
}
}
//
// Compute the type & other info
//
// Set the index into the storage locations
BYTE impl;
if (Classification & mdcMethodImpl)
impl = METHOD_IMPL;
else
impl = METHOD_IMPL_NOT;
BYTE type;
if ((Classification & mdcClassification) == mcNDirect)
{
type = METHOD_TYPE_NDIRECT;
}
else if ((Classification & mdcClassification) == mcECall
|| (Classification & mdcClassification) == mcEEImpl)
{
type = METHOD_TYPE_ECALL;
}
else if ((Classification & mdcClassification) == mcComInterop)
{
type = METHOD_TYPE_INTEROP;
}
else
{
type = METHOD_TYPE_NORMAL;
}
//
// Store the method and the information we have gathered on it in the metadata info structure.
//
bmtMetaData->pMethods[CurMethod] = tok;
bmtMetaData->pMethodAttrs[CurMethod] = dwMemberAttrs;
bmtMetaData->pMethodRVA[CurMethod] = dwMethodRVA;
bmtMetaData->pMethodImplFlags[CurMethod] = dwImplFlags;
bmtMetaData->pMethodClassifications[CurMethod] = Classification;
bmtMetaData->pstrMethodName[CurMethod] = strMethodName;
bmtMetaData->pMethodImpl[CurMethod] = impl;
bmtMetaData->pMethodType[CurMethod] = type;
//
// Update the count of the various types of methods.
//
bmtVT->dwMaxVtableSize++;
bmtEnumMF->dwNumDeclaredMethods++;
BOOL hasUnboxing = (IsValueClass()
&& !IsMdStatic(dwMemberAttrs)
&& IsMdVirtual(dwMemberAttrs)
&& !IsMdRTSpecialName(dwMemberAttrs));
if (hasUnboxing)
bmtEnumMF->dwNumUnboxingMethods++;
bmtMF->sets[type][impl].dwNumMethodDescs++;
if (hasUnboxing)
bmtMF->sets[type][impl].dwNumUnboxingMethodDescs++;
GetTokenRange(tok, &(bmtMetaData->ranges[type][impl]))->cMethods
+= (hasUnboxing ? 2 : 1);
}
_ASSERTE(i == bmtMetaData->cMethAndGaps);
pMDInternalImport->EnumReset(&(bmtEnumMF->hEnumMethod));
//
// If the interface is sparse, we need to finalize the mapping list by
// telling it how many real methods we found.
//
if (bmtProp->fSparse)
{
if (!m_pSparseVTableMap->FinalizeMapping((WORD)bmtMetaData->cMethods))
{
return(E_OUTOFMEMORY);
}
}
//
// Run through the field list and calculate the following:
// # static fields
// # static fields that contain object refs.
// # instance fields
//
bmtEnumMF->dwNumStaticFields = 0;
bmtEnumMF->dwNumStaticObjRefFields = 0;
bmtEnumMF->dwNumInstanceFields = 0;
hr = pMDInternalImport->EnumInit(mdtFieldDef, m_cl, &(bmtEnumMF->hEnumField));
if (FAILED(hr))
{
_ASSERTE(!"Cannot count memberdefs");
IfFailRet(hr);
}
bmtMetaData->cFields = pMDInternalImport->EnumGetCount(&(bmtEnumMF->hEnumField));
bmtEnumMF->fNeedToCloseEnumField = true;
// Retrieve the fields and store them in a temp array.
bmtMetaData->pFields = (mdToken*)pThread->m_MarshalAlloc.Alloc(bmtMetaData->cFields * sizeof(mdToken));
bmtMetaData->pFieldAttrs = (DWORD*)pThread->m_MarshalAlloc.Alloc(bmtMetaData->cFields * sizeof(DWORD));
DWORD dwFieldLiteralInitOnly = fdLiteral | fdInitOnly;
for (i = 0; pMDInternalImport->EnumNext(&(bmtEnumMF->hEnumField), &tok); i++)
{
//
// Retrieve the attributes of the field.
//
DWORD rid = tok & 0x00FFFFFF;
if ((rid == 0)||(rid > pMDInternalImport->GetCountWithTokenKind(mdtFieldDef)))
{
BAD_FORMAT_ASSERT(!"Field token out of range");
IfFailRet(COR_E_TYPELOAD);
}
dwMemberAttrs = pMDInternalImport->GetFieldDefProps(tok);
//
// Store the field and its attributes in the bmtMetaData structure for later use.
//
bmtMetaData->pFields[i] = tok;
bmtMetaData->pFieldAttrs[i] = dwMemberAttrs;
if((dwMemberAttrs & fdFieldAccessMask)==fdFieldAccessMask)
{
BAD_FORMAT_ASSERT(!"Invalid Field Acess Flags");
IfFailRet(COR_E_TYPELOAD);
}
if((dwMemberAttrs & dwFieldLiteralInitOnly)==dwFieldLiteralInitOnly)
{
BAD_FORMAT_ASSERT(!"Field is Literal and InitOnly");
IfFailRet(COR_E_TYPELOAD);
}
// can only have static global fields
if(m_cl == COR_GLOBAL_PARENT_TOKEN)
{
if(!IsMdStatic(dwMemberAttrs))
{
BAD_FORMAT_ASSERT(!"Non-Static Global Field");
IfFailRet(COR_E_TYPELOAD);
}
}
//
// Update the count of the various types of fields.
//
if (IsFdStatic(dwMemberAttrs))
{
if (!IsFdLiteral(dwMemberAttrs))
{
bmtEnumMF->dwNumStaticFields++;
}
}
else
{
bmtEnumMF->dwNumInstanceFields++;
if(fIsClassInterface)
{
BAD_FORMAT_ASSERT(!"Instance Field in an Interface");
IfFailRet(COR_E_TYPELOAD);
}
}
}
_ASSERTE(i == bmtMetaData->cFields);
if(fIsClassEnum && (bmtEnumMF->dwNumInstanceFields==0))
{
// Commented out because Reflection Emit doesn't check for this.
_ASSERTE(!"No Instance Field in an Enum");
IfFailRet(COR_E_TYPELOAD);
}
bmtEnumMF->dwNumDeclaredFields = bmtEnumMF->dwNumStaticFields + bmtEnumMF->dwNumInstanceFields;
return hr;
}
//
// Used by AllocateMethodFieldDescs
//
// Allocates the chunks used to contain the method descs.
//
HRESULT EEClass::AllocateMDChunks(bmtTokenRangeNode *pTokenRanges, DWORD type, DWORD impl, DWORD *pNumChunks, MethodDescChunk ***ppItfMDChunkList)
{
HRESULT hr = S_OK;
_ASSERTE(*ppItfMDChunkList == NULL);
static DWORD classifications[METHOD_TYPE_COUNT][METHOD_IMPL_COUNT] =
{
{ mcIL, mcIL | mdcMethodImpl },
{ mcComInterop, mcComInterop | mdcMethodImpl },
{ mcECall, mcECall | mdcMethodImpl },
{ mcNDirect, mcNDirect | mdcMethodImpl }
};
static CounterTypeEnum dataStructureTypes[METHOD_TYPE_COUNT] =
{
METHOD_DESC,
COMPLUS_METHOD_DESC,
NDIRECT_METHOD_DESC, // @nice: add new value here
NDIRECT_METHOD_DESC
};
DWORD Classification = classifications[type][impl];
bmtTokenRangeNode *pTR = pTokenRanges;
*pNumChunks = 0;
while (pTR)
{
// Note: Since dwCurrentChunk isn't being used at this stage, we'll steal it to store
// away the chunk count.
// After this function, we'll set it to its intended value.
pTR->dwCurrentChunk = MethodDescChunk::GetChunkCount(pTR->cMethods, Classification);
(*pNumChunks) += pTR->dwCurrentChunk;
pTR = pTR->pNext;
}
*ppItfMDChunkList = (MethodDescChunk**)GetThread()->m_MarshalAlloc.Alloc((*pNumChunks) * sizeof(MethodDescChunk*));
// @TODO: CTS. update profiling to handle the new types of method descs
// Determine which data structure type will be created.
CounterTypeEnum DataStructureType = dataStructureTypes[type];
// Allocate the chunks for the method descs.
pTR = pTokenRanges;
DWORD chunkIdx = 0;
while (pTR)
{
DWORD NumChunks = pTR->dwCurrentChunk;
DWORD dwMDAllocs = pTR->cMethods;
pTR->dwCurrentChunk = chunkIdx;
for (DWORD i = 0; i < NumChunks; i++)
{
DWORD dwElems = min(dwMDAllocs, MethodDescChunk::GetMaxMethodDescs(Classification));
MethodDescChunk *pChunk = MethodDescChunk::CreateChunk(GetClassLoader()->GetHighFrequencyHeap(),
dwElems,
Classification,
pTR->tokenHiByte);
if (pChunk == NULL)
{
IfFailRet(E_OUTOFMEMORY);
}
(*ppItfMDChunkList)[chunkIdx++] = pChunk;
dwMDAllocs -= dwElems;
WS_PERF_UPDATE_COUNTER(DataStructureType, HIGH_FREQ_HEAP, dwElems);
}
pTR = pTR->pNext;
}
return hr;
}
//
// Used by BuildMethodTable
//
// Allocate a MethodDesc* for each method (needed later when doing interfaces), and a FieldDesc* for each field
//
HRESULT EEClass::AllocateMethodFieldDescs(bmtProperties* bmtProp,
bmtMethAndFieldDescs* bmtMFDescs,
bmtMetaDataInfo* bmtMetaData,
bmtVtable* bmtVT,
bmtEnumMethAndFields* bmtEnumMF,
bmtInterfaceInfo* bmtInterface,
bmtFieldPlacement* bmtFP,
bmtParentInfo* bmtParent)
{
HRESULT hr = S_OK;
DWORD i;
Thread *pThread = GetThread();
// Allocate a MethodDesc* for each method (needed later when doing interfaces), and a FieldDesc* for each field
bmtMFDescs->ppMethodAndFieldDescList = new void* [bmtMetaData->cMethods + bmtMetaData->cFields];
if (bmtMFDescs->ppMethodAndFieldDescList == NULL)
{
IfFailRet(E_OUTOFMEMORY);
}
WS_PERF_UPDATE("EEClass:BuildMethodTable, POINTERS to methoddesc,fielddesc",
sizeof(void *)*(bmtMetaData->cMethods+bmtMetaData->cFields),
bmtMFDescs->ppMethodAndFieldDescList);
bmtMFDescs->ppMethodDescList = (MethodDesc**) bmtMFDescs->ppMethodAndFieldDescList;
bmtMFDescs->ppFieldDescList = (FieldDesc**) &(bmtMFDescs->ppMethodAndFieldDescList[bmtMetaData->cMethods]);
// Init the list
for (i = 0; i < (bmtMetaData->cMethods+bmtMetaData->cFields); i++)
bmtMFDescs->ppMethodAndFieldDescList[i] = NULL;
// Create a temporary function table (we don't know how large the vtable will be until the very end,
// since duplicated interfaces are stored at the end of it). Calculate an upper bound.
//
// Upper bound is: The parent's class vtable size, plus every method declared in
// this class, plus the size of every interface we implement
//
// In the case of value classes, we add # InstanceMethods again, since we have boxed and unboxed versions
// of every vtable method.
//
if (IsValueClass())
{
bmtVT->dwMaxVtableSize += bmtEnumMF->dwNumDeclaredMethods;
WS_PERF_SET_HEAP(SYSTEM_HEAP);
bmtMFDescs->ppUnboxMethodDescList = new MethodDesc* [bmtMetaData->cMethods];
if (bmtMFDescs->ppUnboxMethodDescList == NULL)
{
IfFailRet(E_OUTOFMEMORY);
}
memset(bmtMFDescs->ppUnboxMethodDescList, 0, sizeof(MethodDesc*)*bmtMetaData->cMethods);
WS_PERF_UPDATE("EEClass:BuildMethodTable, for valuclasses", sizeof(MethodDesc*)*bmtMetaData->cMethods, bmtMFDescs->ppMethodAndFieldDescList);
}
// sanity check
_ASSERTE(!GetParentClass() || (bmtInterface->dwInterfaceMapSize - GetParentClass()->m_wNumInterfaces) >= 0);
// add parent vtable size
bmtVT->dwMaxVtableSize += bmtVT->dwCurrentVtableSlot;
for (i = 0; i < m_wNumInterfaces; i++)
{
// We double the interface size because we may end up duplicating the Interface for MethodImpls
bmtVT->dwMaxVtableSize += (bmtInterface->pInterfaceMap[i].m_pMethodTable->GetClass()->GetNumVtableSlots() * 2);
}
WS_PERF_SET_HEAP(SYSTEM_HEAP);
// Allocate the temporary vtable
bmtVT->pVtable = new SLOT[bmtVT->dwMaxVtableSize];
if (bmtVT->pVtable == NULL)
{
IfFailRet(E_OUTOFMEMORY);
}
#ifdef _DEBUG
memset(bmtVT->pVtable, 0, sizeof(SLOT)*bmtVT->dwMaxVtableSize);
#endif
WS_PERF_UPDATE("EEClass:BuildMethodTable, tempVtable", sizeof(SLOT)*bmtVT->dwMaxVtableSize, bmtVT->pVtable);
bmtVT->pNonVtable = (SLOT *) pThread->m_MarshalAlloc.Alloc(sizeof(SLOT)*bmtMetaData->cMethods);
memset(bmtVT->pNonVtable, 0, sizeof(SLOT)*bmtMetaData->cMethods);
if (GetParentClass() != NULL)
{
if (GetParentClass()->GetModule()->IsPreload())
{
//
// Make sure all parent slots are fixed up before we copy the vtable,
// since the fixup rules don't work if we copy down fixup addresses.
//
for (int i=0; i<GetParentClass()->GetNumVtableSlots(); i++)
GetParentClass()->GetFixedUpSlot(i);
}
// Copy parent's vtable into our "temp" vtable
memcpy(
bmtVT->pVtable,
GetParentClass()->GetVtable(),
GetParentClass()->GetNumVtableSlots() * sizeof(SLOT)
);
#if 0
// @todo: Figure out the right way to override Equals for value
// types only.
//
// This is broken because
// (a) g_pObjectClass->FindMethod("Equals", &gsig_IM_Obj_RetBool); will return
// the EqualsValue method
// (b) When mscorlib has been preloaded (and thus the munge already done
// ahead of time), we cannot easily find both methods
// to compute EqualsAddr & EqualsSlot
//
// For now, the Equals method has a runtime check to see if it's
// comparing value types.
//
// If it is a value type, over ride a few of the base class methods.
if (IsValueClass())
{
static ULONG EqualsAddr = 0;
static WORD EqualsSlot;
// If we haven't been through here yet, get some stuff from the Object class definition.
if (EqualsAddr == 0)
{
// Get the slot of the Equals method.
MethodDesc *pEqualsMD = g_pObjectClass->FindMethod("Equals", &gsig_IM_Obj_RetBool);
_ASSERTE(pEqualsMD != NULL);
EqualsSlot = pEqualsMD->GetSlot();
// Get the address of the EqualsValue method.
MethodDesc *pEqualsValueMD = g_pObjectClass->FindMethod("EqualsValue", &gsig_IM_Obj_RetBool);
_ASSERTE(pEqualsValueMD != NULL);
EqualsAddr = (ULONG) pEqualsValueMD->GetPreStubAddr();
_ASSERTE(EqualsAddr != 0);
// Patch the EqualsValue method desc in a dangerous way to
// look like the Equals method desc.
pEqualsValueMD->SetSlot(EqualsSlot);
pEqualsValueMD->SetMemberDef(pEqualsMD->GetMemberDef());
}
// Override the valuetype "Equals" with "EqualsValue".
bmtVT->pVtable[EqualsSlot] = EqualsAddr;
}
#endif
}
// We'll be counting the # fields of each size as we go along
for (i = 0; i <= MAX_LOG2_PRIMITIVE_FIELD_SIZE; i++)
{
bmtFP->NumStaticFieldsOfSize[i] = 0;
bmtFP->NumInstanceFieldsOfSize[i] = 0;
}
// Allocate blocks of MethodDescs and FieldDescs for all declared methods and fields
if ((bmtEnumMF->dwNumDeclaredMethods + bmtEnumMF->dwNumDeclaredFields) > 0)
{
// In order to avoid allocating a field pointing back to the method
// table in every single method desc, we allocate memory in the
// following manner:
// o Field descs get a single contiguous block.
// o Method descs of different sizes (normal vs NDirect) are
// allocated in different MethodDescChunks.
// o Each method desc chunk starts with a header, and has
// at most MAX_ method descs (if there are more
// method descs of a given size, multiple chunks are allocated).
// This way method descs can use an 8-bit offset field to locate the
// pointer to their method table.
WS_PERF_SET_HEAP(HIGH_FREQ_HEAP);
// Allocate fields first.
if (bmtEnumMF->dwNumDeclaredFields > 0)
{
m_pFieldDescList = (FieldDesc *)
GetClassLoader()->GetHighFrequencyHeap()->AllocMem(bmtEnumMF->dwNumDeclaredFields *
sizeof(FieldDesc));
if (m_pFieldDescList == NULL)
{
IfFailRet(E_OUTOFMEMORY);
}
WS_PERF_UPDATE_DETAIL("BuildMethodTable:bmtEnumMF->dwNumDeclaredFields*sizeof(FieldDesc)",
bmtEnumMF->dwNumDeclaredFields * sizeof(FieldDesc), m_pFieldDescList);
WS_PERF_UPDATE_COUNTER(FIELD_DESC, HIGH_FREQ_HEAP, bmtEnumMF->dwNumDeclaredFields);
}
#ifdef _DEBUG
GetClassLoader()->m_dwDebugFieldDescs += bmtEnumMF->dwNumDeclaredFields;
GetClassLoader()->m_dwFieldDescData += (bmtEnumMF->dwNumDeclaredFields * sizeof(FieldDesc));
#endif
for (DWORD impl=0; impl<METHOD_IMPL_COUNT; impl++)
for (DWORD type=0; type<METHOD_TYPE_COUNT; type++)
{
bmtMethodDescSet *set = &bmtMFDescs->sets[type][impl];
DWORD dwAllocs = set->dwNumMethodDescs + set->dwNumUnboxingMethodDescs;
if (dwAllocs > 0)
{
IfFailRet(AllocateMDChunks(bmtMetaData->ranges[type][impl],
type, impl,
&set->dwChunks, &set->pChunkList));
}
#ifdef _DEBUG
GetClassLoader()->m_dwDebugMethods += dwAllocs;
for (UINT i=0; i<set->dwChunks; i++)
GetClassLoader()->m_dwMethodDescData +=
set->pChunkList[i]->Sizeof();
#endif
}
bmtParent->ppParentMethodDescBuf = (MethodDesc **)
pThread->m_MarshalAlloc.Alloc(2 * bmtEnumMF->dwNumDeclaredMethods *
sizeof(MethodDesc*));
if (bmtParent->ppParentMethodDescBuf == NULL)
{
IfFailRet(E_OUTOFMEMORY);
}
bmtParent->ppParentMethodDescBufPtr = bmtParent->ppParentMethodDescBuf;
}
else
{
// No fields or methods
m_pFieldDescList = NULL;
}
return hr;
}
//
// Heuristic to determine if we should have instances of this class 8 byte aligned
//
BOOL EEClass::ShouldAlign8(DWORD dwR8Fields, DWORD dwTotalFields)
{
return dwR8Fields*2>dwTotalFields && dwR8Fields>=2;
}
//
// Used by BuildMethodTable
//
// Go thru all fields and initialize their FieldDescs.
//
HRESULT EEClass::InitializeFieldDescs(FieldDesc *pFieldDescList,
const LayoutRawFieldInfo* pLayoutRawFieldInfos,
bmtInternalInfo* bmtInternal,
bmtMetaDataInfo* bmtMetaData,
bmtEnumMethAndFields* bmtEnumMF,
bmtErrorInfo* bmtError,
EEClass*** pByValueClassCache,
bmtMethAndFieldDescs* bmtMFDescs,
bmtFieldPlacement* bmtFP,
unsigned* totalDeclaredSize)
{
HRESULT hr = S_OK;
DWORD i;
IMDInternalImport *pInternalImport = bmtInternal->pInternalImport; // to avoid multiple dereferencings
FieldMarshaler *pNextFieldMarshaler = NULL;
if (HasLayout())
{
pNextFieldMarshaler = (FieldMarshaler*)(GetLayoutInfo()->GetFieldMarshalers());
}
//========================================================================
// BEGIN:
// Go thru all fields and initialize their FieldDescs.
//========================================================================
DWORD dwCurrentDeclaredField = 0;
DWORD dwCurrentStaticField = 0;
DWORD dwSharedThreadStatic = 0;
DWORD dwUnsharedThreadStatic = 0;
DWORD dwSharedContextStatic = 0;
DWORD dwUnsharedContextStatic = 0;
BOOL fSetThreadStaticOffset = FALSE; // Do we have thread local static fields ?
BOOL fSetContextStaticOffset = FALSE; // Do we have context local static fields ?
DWORD dwR8Fields = 0; // Number of R8's the class has
#ifdef RVA_FIELD_VALIDATION_ENABLED
Module* pMod = bmtInternal->pModule;
#endif
for (i = 0; i < bmtMetaData->cFields; i++)
{
PCCOR_SIGNATURE pMemberSignature;
DWORD cMemberSignature;
DWORD dwMemberAttrs;
dwMemberAttrs = bmtMetaData->pFieldAttrs[i];
// We don't store static final primitive fields in the class layout
if (IsFdLiteral(dwMemberAttrs))
continue;
if(!IsFdPublic(dwMemberAttrs)) m_VMFlags |= VMFLAG_HASNONPUBLICFIELDS;
pMemberSignature = pInternalImport->GetSigOfFieldDef(bmtMetaData->pFields[i], &cMemberSignature);
// Signature validation
IfFailRet(validateTokenSig(bmtMetaData->pFields[i],pMemberSignature,cMemberSignature,dwMemberAttrs,pInternalImport));
FieldDesc * pFD;
DWORD dwLog2FieldSize = 0;
BOOL bCurrentFieldIsGCPointer = FALSE;
PCCOR_SIGNATURE pFieldSig = pMemberSignature;
CorElementType ElementType, FieldDescElementType;
mdToken dwByValueClassToken = 0;
EEClass * pByValueClass = NULL;
BOOL fIsByValue = FALSE;
BOOL fIsRVA = FALSE;
BOOL fIsThreadStatic = FALSE;
BOOL fIsContextStatic = FALSE;
BOOL fHasRVA = FALSE;
// Get type
if (!isCallConv(*pFieldSig++, IMAGE_CEE_CS_CALLCONV_FIELD))
{
IfFailRet(COR_E_TYPELOAD);
}
// Determine if a static field is special i.e. RVA based, local to
// a thread or a context
if(IsFdStatic(dwMemberAttrs))
{
if(IsFdHasFieldRVA(dwMemberAttrs))
{
fHasRVA = TRUE;
}
if(S_OK == pInternalImport->GetCustomAttributeByName(bmtMetaData->pFields[i],
"System.ThreadStaticAttribute",
NULL,
NULL))
{
fIsThreadStatic = TRUE;
fSetThreadStaticOffset = TRUE;
}
if(S_OK == pInternalImport->GetCustomAttributeByName(bmtMetaData->pFields[i],
"System.ContextStaticAttribute",
NULL,
NULL))
{
fIsContextStatic = TRUE;
fSetContextStaticOffset = TRUE;
}
// Do some sanity checks that we are not mixing context and thread
// relative statics.
if (fIsThreadStatic && fIsContextStatic)
{
//@TODO TarunA Define a hresult for this failure.
IfFailRet(COR_E_TYPELOAD);
}
}
SET_ELEMENT_TYPE:
ElementType = (CorElementType) *pFieldSig++;
GOT_ELEMENT_TYPE:
// Type to store in FieldDesc - we don't want to have extra case statements for
// ELEMENT_TYPE_STRING, SDARRAY etc., so we convert all object types to CLASS.
// Also, BOOLEAN, CHAR are converted to U1, I2.
FieldDescElementType = ElementType;
switch (ElementType)
{
case ELEMENT_TYPE_I1:
case ELEMENT_TYPE_U1:
{
dwLog2FieldSize = 0;
break;
}
case ELEMENT_TYPE_I2:
case ELEMENT_TYPE_U2:
{
dwLog2FieldSize = 1;
break;
}
case ELEMENT_TYPE_I:
ElementType = ELEMENT_TYPE_I4;
goto GOT_ELEMENT_TYPE;
case ELEMENT_TYPE_U:
ElementType = ELEMENT_TYPE_U4;
goto GOT_ELEMENT_TYPE;
case ELEMENT_TYPE_I4:
case ELEMENT_TYPE_U4:
case ELEMENT_TYPE_R4:
{
dwLog2FieldSize = 2;
break;
}
case ELEMENT_TYPE_BOOLEAN:
{
// FieldDescElementType = ELEMENT_TYPE_U1;
dwLog2FieldSize = 0;
break;
}
case ELEMENT_TYPE_CHAR:
{
// FieldDescElementType = ELEMENT_TYPE_U2;
dwLog2FieldSize = 1;
break;
}
case ELEMENT_TYPE_R8:
dwR8Fields++;
// Fall through
case ELEMENT_TYPE_I8:
case ELEMENT_TYPE_U8:
{
dwLog2FieldSize = 3;
break;
}
case ELEMENT_TYPE_FNPTR:
case ELEMENT_TYPE_PTR: // ptrs are unmanaged scalars, for layout
{
// 64 bit stuff
dwLog2FieldSize = ((sizeof(SLOT) == 4) ? 2 : 3);
break;
}
case ELEMENT_TYPE_STRING:
case ELEMENT_TYPE_SZARRAY: // single dim, zero
case ELEMENT_TYPE_ARRAY: // all other arrays
case ELEMENT_TYPE_CLASS: // objectrefs
case ELEMENT_TYPE_OBJECT:
case ELEMENT_TYPE_VAR:
{
// 64 bit stuff
dwLog2FieldSize = ((sizeof(SLOT) == 4) ? 2 : 3);
bCurrentFieldIsGCPointer = TRUE;
FieldDescElementType = ELEMENT_TYPE_CLASS;
if (IsFdStatic(dwMemberAttrs) == 0)
{
m_VMFlags |= VMFLAG_HAS_FIELDS_WHICH_MUST_BE_INITED;
}
else
{
// Increment the number of static fields that contain object references.
bmtEnumMF->dwNumStaticObjRefFields++;
}
break;
}
case ELEMENT_TYPE_VALUETYPE: // a byvalue class field
{
// Need to check whether we have an instance of a by-value class
CorSigUncompressToken(pFieldSig, &dwByValueClassToken);
fIsByValue = TRUE;
// By-value class
_ASSERTE(dwByValueClassToken != 0);
#ifndef RVA_FIELD_VALIDATION_ENABLED
if (fHasRVA)
break;
#endif
// It's possible a value class X can have a static field of type X, so we have to catch this
// special case.
//
// We want to avoid calling LoadClass() and having it fail, since that causes all sorts of things
// (like the converter module) to get loaded.
if (this->IsValueClass())
{
if (dwByValueClassToken == this->GetCl())
{
// TypeDef token
if (!IsFdStatic(dwMemberAttrs))
{
bmtError->resIDWhy = IDS_CLASSLOAD_VALUEINSTANCEFIELD;
return COR_E_TYPELOAD;
}
pByValueClass = this;
}
else
{
if (IsFdStatic(dwMemberAttrs) && (TypeFromToken(dwByValueClassToken) == mdtTypeRef))
{
// It's a typeref - check if it's a class that has a static field of itself
mdTypeDef ValueCL;
// @TODO: It would be nice if we didn't have to do this. Right now every time there is a
// static value class, we're going to take this longer code path.
LPCUTF8 pszNameSpace;
LPCUTF8 pszClassName;
pInternalImport->GetNameOfTypeRef(dwByValueClassToken, &pszNameSpace, &pszClassName);
if(IsStrLongerThan((char*)pszClassName,MAX_CLASS_NAME)
|| IsStrLongerThan((char*)pszNameSpace,MAX_CLASS_NAME)
|| (strlen(pszClassName)+strlen(pszNameSpace)+1 >= MAX_CLASS_NAME))
{
_ASSERTE(!"Full Name ofTypeRef Too Long");
return (COR_E_TYPELOAD);
}
mdToken tkRes = pInternalImport->GetResolutionScopeOfTypeRef(dwByValueClassToken);
if(TypeFromToken(tkRes) == mdtTypeRef)
{
DWORD rid = RidFromToken(tkRes);
if((rid==0)||(rid > pInternalImport->GetCountWithTokenKind(mdtTypeRef)))
{
_ASSERTE(!"TypeRef Token Out of Range");
return(COR_E_TYPELOAD);
}
}
else tkRes = mdTokenNil;
if (SUCCEEDED(pInternalImport->FindTypeDef(pszNameSpace,
pszClassName,
tkRes,
&ValueCL)))
{
if (ValueCL == this->GetCl())
pByValueClass = this;
}
} // If field is static typeref
} // If field is self-referencing
} // If 'this' is a value class
if (!pByValueClass) {
NameHandle name(bmtInternal->pModule, dwByValueClassToken);
if (bmtInternal->pModule->IsEditAndContinue() && GetThread() == NULL)
name.SetTokenNotToLoad(tdAllTypes);
pByValueClass = GetClassLoader()->LoadTypeHandle(&name, bmtError->pThrowable).GetClass();
if(! pByValueClass) {
IfFailRet(COR_E_TYPELOAD);
}
}
// IF it is an enum, strip it down to its underlying type
if (pByValueClass->IsEnum()) {
_ASSERTE((pByValueClass == this && bmtEnumMF->dwNumInstanceFields == 1)
|| pByValueClass->GetNumInstanceFields() == 1); // enums must have exactly one field
FieldDesc* enumField = pByValueClass->m_pFieldDescList;
_ASSERTE(!enumField->IsStatic()); // no real static fields on enums
ElementType = enumField->GetFieldType();
_ASSERTE(ElementType != ELEMENT_TYPE_VALUETYPE);
fIsByValue = FALSE; // we're going to treat it as the underlying type now
goto GOT_ELEMENT_TYPE;
}
else if ( (pByValueClass->IsValueClass() == FALSE) &&
(pByValueClass != g_pEnumClass->GetClass()) ) {
_ASSERTE(!"Class must be declared to be by value to use as by value");
return hr;
}
// If it is an illegal type, say so
if (pByValueClass->ContainsStackPtr())
goto BAD_FIELD;
// If a class has a field of type ValueType with non-public fields in it,
// the class must "inherit" this characteristic
if (pByValueClass->HasNonPublicFields())
{
m_VMFlags |= VMFLAG_HASNONPUBLICFIELDS;
}
#ifdef RVA_FIELD_VALIDATION_ENABLED
if (fHasRVA)
{
dwLog2FieldSize = IsFdStatic(dwMemberAttrs) ? LOG2PTR : 0;
break;
}
#endif
if (IsFdStatic(dwMemberAttrs) == 0)
{
if (pByValueClass->HasFieldsWhichMustBeInited())
m_VMFlags |= VMFLAG_HAS_FIELDS_WHICH_MUST_BE_INITED;
}
else
{
// Increment the number of static fields that contain object references.
if (!IsFdHasFieldRVA(dwMemberAttrs))
bmtEnumMF->dwNumStaticObjRefFields++;
}
// Need to create by value class cache. For E&C, this pointer will get
// cached indefinately and not cleaned up as the parent descriptors are
// in the low frequency heap. Use HeapAlloc with the intent of leaking
// this pointer and avoiding the assert (jlz, bug 41344).
if (*pByValueClassCache == NULL)
{
WS_PERF_SET_HEAP(SYSTEM_HEAP);
*pByValueClassCache = (EEClass **) HeapAlloc(GetProcessHeap(), HEAP_ZERO_MEMORY, (bmtEnumMF->dwNumInstanceFields + bmtEnumMF->dwNumStaticFields) * sizeof(EEClass **));
if (*pByValueClassCache == NULL)
{
FailFast(GetThread(), FatalOutOfMemory);
}
WS_PERF_UPDATE("EEClass:BuildMethodTable, by valueclasscache", sizeof(EEClass*)*(bmtEnumMF->dwNumInstanceFields + bmtEnumMF->dwNumStaticFields), *pByValueClassCache);
}
// Static fields come after instance fields in this list
if (IsFdStatic(dwMemberAttrs))
{
(*pByValueClassCache)[bmtEnumMF->dwNumInstanceFields + dwCurrentStaticField] = pByValueClass;
dwLog2FieldSize = LOG2PTR; // handle
}
else
{
(*pByValueClassCache)[dwCurrentDeclaredField] = pByValueClass;
dwLog2FieldSize = 0; // unused
}
break;
}
case ELEMENT_TYPE_CMOD_REQD:
case ELEMENT_TYPE_CMOD_OPT:
// Just skip the custom modifier token.
CorSigUncompressToken(pFieldSig);
goto SET_ELEMENT_TYPE;
default:
{
BAD_FIELD:
bmtError->resIDWhy = IDS_CLASSLOAD_BAD_FIELD;
IfFailRet(COR_E_TYPELOAD);
}
}
// Static fields are not packed
if (IsFdStatic(dwMemberAttrs) && (dwLog2FieldSize < 2))
dwLog2FieldSize = 2;
if (!IsFdStatic(dwMemberAttrs))
{
pFD = &pFieldDescList[dwCurrentDeclaredField];
*totalDeclaredSize += (1 << dwLog2FieldSize);
}
else /* (dwMemberAttrs & mdStatic) */
{
pFD = &pFieldDescList[bmtEnumMF->dwNumInstanceFields + dwCurrentStaticField];
}
bmtMFDescs->ppFieldDescList[i] = pFD;
const LayoutRawFieldInfo *pLayoutFieldInfo;
pLayoutFieldInfo = NULL;
if (HasLayout())
{
const LayoutRawFieldInfo *pwalk = pLayoutRawFieldInfos;
while (pwalk->m_MD != mdFieldDefNil)
{
if (pwalk->m_MD == bmtMetaData->pFields[i])
{
pLayoutFieldInfo = pwalk;
CopyMemory(pNextFieldMarshaler,
&(pwalk->m_FieldMarshaler),
MAXFIELDMARSHALERSIZE);
pNextFieldMarshaler->m_pFD = pFD;
pNextFieldMarshaler->m_dwExternalOffset = pwalk->m_offset;
((BYTE*&)pNextFieldMarshaler) += MAXFIELDMARSHALERSIZE;
break;
}
pwalk++;
}
}
LPCSTR pszFieldName = NULL;
#ifdef _DEBUG
pszFieldName = pInternalImport->GetNameOfFieldDef(bmtMetaData->pFields[i]);
#endif
// Initialize contents
pFD->Init(
bmtMetaData->pFields[i],
FieldDescElementType,
dwMemberAttrs,
IsFdStatic(dwMemberAttrs),
fHasRVA,
fIsThreadStatic,
fIsContextStatic,
pszFieldName
);
// Check if the ValueType field containing non-publics is overlapped
if(HasExplicitFieldOffsetLayout()
&& pLayoutFieldInfo
&& pLayoutFieldInfo->m_fIsOverlapped
&& pByValueClass
&& pByValueClass->HasNonPublicFields())
{
if (!Security::CanSkipVerification(GetAssembly()))
{
bmtError->resIDWhy = IDS_CLASSLOAD_BADOVERLAP;
IfFailRet(COR_E_TYPELOAD);
}
}
if (fIsByValue)
{
if (!IsFdStatic(dwMemberAttrs) &&
(IsBlittable() || HasExplicitFieldOffsetLayout()))
{
pFD->m_pMTOfEnclosingClass =
(MethodTable *)(size_t)((*pByValueClassCache)[dwCurrentDeclaredField]->GetNumInstanceFieldBytes()); // @todo WIN64 - conversion from DWORD to MethodTable * of greater size (GetNumInstanceFieldBytes)
if (pLayoutFieldInfo)
IfFailRet(pFD->SetOffset(pLayoutFieldInfo->m_offset));
else
pFD->SetOffset(FIELD_OFFSET_VALUE_CLASS);
}
else
{
// static value class fields hold a handle, which is ptr sized
// (instance field layout ignores this value)
pFD->m_pMTOfEnclosingClass = (MethodTable *) LOG2PTR;
pFD->SetOffset(FIELD_OFFSET_VALUE_CLASS);
}
}
else
{
// Use the field's MethodTable to temporarily store the field's size
pFD->m_pMTOfEnclosingClass = (MethodTable *)(size_t)dwLog2FieldSize;
// -1 means that this field has not yet been placed
// -2 means that this is a GC Pointer field not yet places
if ((IsBlittable() || HasExplicitFieldOffsetLayout()) && !(IsFdStatic(dwMemberAttrs)))
IfFailRet(pFD->SetOffset(pLayoutFieldInfo->m_offset));
else if (bCurrentFieldIsGCPointer)
pFD->SetOffset(FIELD_OFFSET_UNPLACED_GC_PTR);
else
pFD->SetOffset(FIELD_OFFSET_UNPLACED);
}
if (!IsFdStatic(dwMemberAttrs))
{
if (!fIsByValue)
{
if (++bmtFP->NumInstanceFieldsOfSize[dwLog2FieldSize] == 1)
bmtFP->FirstInstanceFieldOfSize[dwLog2FieldSize] = dwCurrentDeclaredField;
}
dwCurrentDeclaredField++;
if (bCurrentFieldIsGCPointer)
bmtFP->NumInstanceGCPointerFields++;
}
else /* static fields */
{
// Static fields are stored in the vtable after the vtable and interface slots. We don't
// know how large the vtable will be, so we will have to fixup the slot number by
// <vtable + interface size> later.
dwCurrentStaticField++;
if(fHasRVA)
{
#ifdef RVA_FIELD_VALIDATION_ENABLED
// Check if we place ObjectRefs into RVA field
if((FieldDescElementType==ELEMENT_TYPE_CLASS)
||((FieldDescElementType==ELEMENT_TYPE_VALUETYPE)
&&pByValueClass->HasFieldsWhichMustBeInited()))
{
_ASSERTE(!"ObjectRef in an RVA field");
bmtError->resIDWhy = IDS_CLASSLOAD_BAD_FIELD;
IfFailRet(COR_E_TYPELOAD);
}
// Check if we place ValueType with non-public fields into RVA field
if((FieldDescElementType==ELEMENT_TYPE_VALUETYPE)
&&pByValueClass->HasNonPublicFields())
{
if (!Security::CanSkipVerification(GetAssembly()))
{
_ASSERTE(!"ValueType with non-public fields as a type of an RVA field");
bmtError->resIDWhy = IDS_CLASSLOAD_BAD_FIELD;
IfFailRet(COR_E_TYPELOAD);
}
}
#endif
// Set the field offset
DWORD rva;
IfFailRet(pInternalImport->GetFieldRVA(pFD->GetMemberDef(), &rva));
#ifdef RVA_FIELD_VALIDATION_ENABLED
if(pMod->IsPEFile())
{
IMAGE_NT_HEADERS *NtHeaders = pMod->GetPEFile()->GetNTHeader();
ULONG i, Nsect = NtHeaders->FileHeader.NumberOfSections;
PIMAGE_SECTION_HEADER NtSection = IMAGE_FIRST_SECTION( NtHeaders );
DWORD rva_end = rva + (FieldDescElementType==ELEMENT_TYPE_VALUETYPE ?
pByValueClass->GetNumInstanceFieldBytes()
: GetSizeForCorElementType(FieldDescElementType));
DWORD sec_start,sec_end,filler,roundup = NtHeaders->OptionalHeader.SectionAlignment;
for (i=0; i<Nsect; i++, NtSection++)
{
sec_start = NtSection->VirtualAddress;
sec_end = NtSection->Misc.VirtualSize;
filler = sec_end & (roundup-1);
if(filler) filler = roundup-filler;
sec_end += sec_start+filler;
if ((rva >= sec_start) && (rva < sec_end))
{
if ((rva_end < sec_start) || (rva_end > sec_end)) i = Nsect;
break;
}
}
if(i >= Nsect)
{
if (!Security::CanSkipVerification(GetAssembly()))
{
_ASSERTE(!"Illegal RVA of a mapped field");
bmtError->resIDWhy = IDS_CLASSLOAD_BAD_FIELD;
IfFailRet(COR_E_TYPELOAD);
}
}
}
#endif
IfFailRet(pFD->SetOffsetRVA(rva));
#ifdef RVA_FIELD_OVERLAPPING_VALIDATION_ENABLED
// Check if the field overlaps with known RVA fields
BYTE* pbModuleBase = pMod->GetILBase();
DWORD dwSizeOfThisField = FieldDescElementType==ELEMENT_TYPE_VALUETYPE ?
pByValueClass->GetNumInstanceFieldBytes() : GetSizeForCorElementType(FieldDescElementType);
BYTE* FDfrom = pbModuleBase + pFD->GetOffset();
BYTE* FDto = FDfrom + dwSizeOfThisField;
ULONG j;
if(g_drRVAField)
{
for(j=1; j < g_ulNumRVAFields; j++)
{
if((*g_drRVAField)[j].pbStart >= FDto) continue;
if((*g_drRVAField)[j].pbEnd <= FDfrom) continue;
/*
_ASSERTE(!"Overlapping RVA fields");
bmtError->resIDWhy = IDS_CLASSLOAD_BAD_FIELD;
IfFailRet(COR_E_TYPELOAD);
*/
}
}
else
g_drRVAField = new DynamicArray<RVAFSE>;
(*g_drRVAField)[g_ulNumRVAFields].pbStart = FDfrom;
(*g_drRVAField)[g_ulNumRVAFields].pbEnd = FDto;
g_ulNumRVAFields++;
#endif
;
}
else if (fIsThreadStatic)
{
DWORD size = 1 << dwLog2FieldSize;
if(IsShared())
{
IfFailRet(pFD->SetOffset(dwSharedThreadStatic));
dwSharedThreadStatic += size;
}
else
{
IfFailRet(pFD->SetOffset(dwUnsharedThreadStatic));
dwUnsharedThreadStatic += size;
}
}
else if (fIsContextStatic)
{
DWORD size = 1 << dwLog2FieldSize;
if(IsShared())
{
IfFailRet(pFD->SetOffset(dwSharedContextStatic));
dwSharedContextStatic += size;
}
else
{
IfFailRet(pFD->SetOffset(dwUnsharedContextStatic));
dwUnsharedContextStatic += size;
}
}
else
{
bmtFP->NumStaticFieldsOfSize[dwLog2FieldSize]++;
if (bCurrentFieldIsGCPointer || fIsByValue)
bmtFP->NumStaticGCPointerFields++;
}
}
}
m_wNumStaticFields = (WORD) bmtEnumMF->dwNumStaticFields;
m_wNumInstanceFields = (WORD) (dwCurrentDeclaredField + (GetParentClass() ? GetParentClass()->m_wNumInstanceFields : 0));
if (ShouldAlign8(dwR8Fields, m_wNumInstanceFields))
{
SetAlign8Candidate();
}
if(fSetThreadStaticOffset)
{
if(IsShared())
{
SetThreadStaticOffset ((WORD)BaseDomain::IncSharedTLSOffset());
m_wThreadStaticsSize = (WORD)dwSharedThreadStatic;
}
else
{
SetThreadStaticOffset ((WORD)GetDomain()->IncUnsharedTLSOffset());
m_wThreadStaticsSize = (WORD)dwUnsharedThreadStatic;
}
}
if(fSetContextStaticOffset)
{
if(IsShared())
{
SetContextStaticOffset ((WORD)BaseDomain::IncSharedCLSOffset());
m_wContextStaticsSize = (WORD)dwSharedContextStatic;
}
else
{
SetContextStaticOffset ((WORD)GetDomain()->IncUnsharedCLSOffset());
m_wContextStaticsSize = (WORD)dwUnsharedContextStatic;
}
}
//========================================================================
// END:
// Go thru all fields and initialize their FieldDescs.
//========================================================================
return hr;
}
HRESULT EEClass::TestOverRide(DWORD dwParentAttrs, DWORD dwMemberAttrs, BOOL isSameAssembly, bmtErrorInfo* bmtError)
{
HRESULT hr = COR_E_TYPELOAD;
// Virtual methods cannot be static
if (IsMdStatic(dwMemberAttrs)) {
//_ASSERTE(!"A method cannot be both static and virtual");
bmtError->resIDWhy = IDS_CLASSLOAD_STATICVIRTUAL;
IfFailRet(hr);
}
// if the method marks itself as check visibility the the method must be
// public, FamORAssem, or family
if(!isSameAssembly &&
IsMdCheckAccessOnOverride(dwParentAttrs) &&
((dwParentAttrs & mdMemberAccessMask) < mdFamily)) {
bmtError->resIDWhy = IDS_CLASSLOAD_MI_ACCESS_FAILURE;
IfFailRet(hr);
}
// Check that we are not attempting to reduce the access level of a method
// (public -> FamORAssem -> family -> FamANDAssem -> default(package) -> private -> PrivateScope)
// (public -> FamORAssem -> assem -> FamANDAssem -> default(package) -> private -> PrivateScope)
if (IsMdAssem(dwParentAttrs)) {
if (IsMdFamily(dwMemberAttrs) ||
(dwMemberAttrs & mdMemberAccessMask) < (mdMemberAccessMask & dwParentAttrs) ) {
bmtError->resIDWhy = IDS_CLASSLOAD_REDUCEACCESS;
IfFailRet(hr);
}
}
else {
if((dwMemberAttrs & mdMemberAccessMask) < (dwParentAttrs & mdMemberAccessMask)) {
// bug fix 31375: we will allow derived method to be Family if the base method is FamOrAssem and derived
// and base class are not from the same assembly.
//
if (!(IsMdFamORAssem(dwParentAttrs) && IsMdFamily(dwMemberAttrs) && isSameAssembly == FALSE)) {
bmtError->resIDWhy = IDS_CLASSLOAD_REDUCEACCESS;
IfFailRet(hr);
}
}
}
return S_OK;
}
//
// Used by BuildMethodTable
//
// Determine vtable placement for each member in this class
//
HRESULT EEClass::PlaceMembers(bmtInternalInfo* bmtInternal,
bmtMetaDataInfo* bmtMetaData,
bmtErrorInfo* bmtError,
bmtProperties* bmtProp,
bmtParentInfo* bmtParent,
bmtInterfaceInfo* bmtInterface,
bmtMethAndFieldDescs* bmtMFDescs,
bmtEnumMethAndFields* bmtEnumMF,
bmtMethodImplInfo* bmtMethodImpl,
bmtVtable* bmtVT)
{
#ifdef _DEBUG
LPCUTF8 pszDebugName,pszDebugNamespace;
bmtInternal->pModule->GetMDImport()->GetNameOfTypeDef(GetCl(), &pszDebugName, &pszDebugNamespace);
#endif
HRESULT hr = S_OK;
DWORD i, j;
DWORD dwClassDeclFlags = 0xffffffff;
DWORD dwClassNullDeclFlags = 0xffffffff;
IMAGE_NT_HEADERS *pNT = bmtInternal->pModule->IsPEFile() ?
bmtInternal->pModule->GetPEFile()->GetNTHeader() : NULL;
ULONG Nsections = pNT ? pNT->FileHeader.NumberOfSections : 0;
bmtVT->wCCtorSlot = MethodTable::NO_SLOT;
bmtVT->wDefaultCtorSlot = MethodTable::NO_SLOT;
for (i = 0; i < bmtMetaData->cMethods; i++)
{
LPCUTF8 szMemberName = NULL;
PCCOR_SIGNATURE pMemberSignature = NULL;
DWORD cMemberSignature = 0;
DWORD dwMemberAttrs;
DWORD dwDescrOffset;
DWORD dwImplFlags;
BOOL fMethodImplementsInterface = FALSE;
DWORD dwMDImplementsInterfaceNum = 0;
DWORD dwMDImplementsSlotNum = 0;
DWORD dwMethodHashBit;
DWORD dwParentAttrs;
dwMemberAttrs = bmtMetaData->pMethodAttrs[i];
dwDescrOffset = bmtMetaData->pMethodRVA[i];
dwImplFlags = bmtMetaData->pMethodImplFlags[i];
DWORD Classification = bmtMetaData->pMethodClassifications[i];
DWORD type = bmtMetaData->pMethodType[i];
DWORD impl = bmtMetaData->pMethodImpl[i];
// for IL code that is implemented here must have a valid code RVA
// this came up due to a linker bug where the ImplFlags/DescrOffset were
// being set to null and we weren't coping with it
if (dwDescrOffset == 0)
{
if((dwImplFlags == 0 || IsMiIL(dwImplFlags) || IsMiOPTIL(dwImplFlags)) &&
!IsMiRuntime(dwImplFlags) &&
!IsMdAbstract(dwMemberAttrs) &&
!IsReallyMdPinvokeImpl(dwMemberAttrs) &&
!IsMiInternalCall(dwImplFlags) &&
!(bmtInternal->pModule)->IsReflection() &&
!(IsInterface() && !IsMdStatic(dwMemberAttrs)) &&
bmtInternal->pModule->GetAssembly()->GetDomain()->IsExecutable())
{
bmtError->resIDWhy = IDS_CLASSLOAD_MISSINGMETHODRVA;
bmtError->dMethodDefInError = bmtMetaData->pMethods[i];
IfFailRet(COR_E_TYPELOAD);
}
}
else if(Nsections)
{
IMAGE_SECTION_HEADER *pSecHdr = IMAGE_FIRST_SECTION(pNT);
for(j = 0; j < Nsections; j++,pSecHdr++)
{
if((dwDescrOffset >= pSecHdr->VirtualAddress)&&
(dwDescrOffset < pSecHdr->VirtualAddress+pSecHdr->Misc.VirtualSize)) break;
}
if(j >= Nsections)
{
bmtError->resIDWhy = IDS_CLASSLOAD_MISSINGMETHODRVA;
bmtError->dMethodDefInError = bmtMetaData->pMethods[i];
IfFailRet(COR_E_TYPELOAD);
}
}
// If this member is a method which overrides a parent method, it will be set to non-NULL
MethodDesc *pParentMethodDesc = NULL;
BOOL fIsInitMethod = FALSE;
BOOL fIsCCtor = FALSE;
BOOL fIsDefaultCtor = FALSE;
szMemberName = bmtMetaData->pstrMethodName[i];
#ifdef _DEBUG
if(m_fDebuggingClass && g_pConfig->ShouldBreakOnMethod(szMemberName))
_ASSERTE(!"BreakOnMethodName");
#endif
// constructors and class initialisers are special
if (IsMdRTSpecialName(dwMemberAttrs))
{
{
if (IsMdStatic(dwMemberAttrs)) {
// Verify the name for the class constuctor.
if(strcmp(szMemberName, COR_CCTOR_METHOD_NAME))
hr = COR_E_TYPELOAD;
else {
// Validate that we have the correct signature for the .cctor
pMemberSignature = bmtInternal->pInternalImport->GetSigOfMethodDef(bmtMetaData->pMethods[i],
&cMemberSignature
);
PCCOR_SIGNATURE pbBinarySig;
ULONG cbBinarySig;
// .cctor must return void, have default call conv, and have no args
unsigned cconv,nargs;
pbBinarySig = pMemberSignature;
cconv = CorSigUncompressData(pbBinarySig);
nargs = CorSigUncompressData(pbBinarySig);
// TODO: comparisions of return type and call convention unecessary as ComaprMethodSigs does that
if((*pbBinarySig != ELEMENT_TYPE_VOID)||(nargs!=0)||(cconv != IMAGE_CEE_CS_CALLCONV_DEFAULT))
hr = COR_E_TYPELOAD;
else {
if(FAILED(gsig_SM_RetVoid.GetBinaryForm(&pbBinarySig, &cbBinarySig)))
hr = COR_E_EXECUTIONENGINE;
else {
if (MetaSig::CompareMethodSigs(pbBinarySig, cbBinarySig,
SystemDomain::SystemModule(),
pMemberSignature, cMemberSignature, bmtInternal->pModule))
fIsCCtor = TRUE;
else
hr = COR_E_TYPELOAD;
}
}
}
}
else {
// Verify the name for a constructor.
if(strcmp(szMemberName, COR_CTOR_METHOD_NAME) != 0)
{
hr = COR_E_TYPELOAD;
}
else
{
// See if this is a default constructor. If so, remember it for later.
pMemberSignature = bmtInternal->pInternalImport->GetSigOfMethodDef(bmtMetaData->pMethods[i],
&cMemberSignature
);
PCCOR_SIGNATURE pbBinarySig;
ULONG cbBinarySig;
// .ctor must return void
pbBinarySig = pMemberSignature;
CorSigUncompressData(pbBinarySig); // get call conv out of the way
CorSigUncompressData(pbBinarySig); // get num args out of the way
// TODO: explicit check for return type unnecessary, done in compareMethodSigs
if(*pbBinarySig != ELEMENT_TYPE_VOID)
hr = COR_E_TYPELOAD;
else {
if(FAILED(gsig_IM_RetVoid.GetBinaryForm(&pbBinarySig, &cbBinarySig)))
hr = COR_E_EXECUTIONENGINE;
else {
if (MetaSig::CompareMethodSigs(pbBinarySig, cbBinarySig,
SystemDomain::SystemModule(),
pMemberSignature, cMemberSignature, bmtInternal->pModule))
fIsDefaultCtor = TRUE;
}
}
fIsInitMethod = TRUE;
}
}
}
// We have as specially marked member, verify that it is has a legitimate signature
if(FAILED(hr)) {
bmtError->resIDWhy = IDS_CLASSLOAD_BADSPECIALMETHOD;
bmtError->dMethodDefInError = bmtMetaData->pMethods[i];
IfFailRet(hr);
}
} else { // The method does not have the special marking
if (IsMdVirtual(dwMemberAttrs))
{
// Hash that a method with this name exists in this class
// Note that ctors and static ctors are not added to the table
DWORD dwHashName = HashStringA(szMemberName);
dwMethodHashBit = dwHashName % METHOD_HASH_BITS;
m_MethodHash[dwMethodHashBit >> 3] |= (1 << (dwMethodHashBit & 7));
// If the member is marked with a new slot we do not need to find it
// in the parent
if (!IsMdNewSlot(dwMemberAttrs))
{
// If we're not doing sanity checks, then assume that any method declared static
// does not attempt to override some virtual parent.
if (!IsMdStatic(dwMemberAttrs) &&
GetParentClass() != NULL) {
// Attempt to find the method with this name and signature in the parent class.
// This method may or may not create pParentMethodHash (if it does not already exist).
// It also may or may not fill in pMemberSignature/cMemberSignature.
// An error is only returned when we can not create the hash.
IfFailRet(LoaderFindMethodInClass(&(bmtParent->pParentMethodHash),
szMemberName,
bmtInternal->pModule,
bmtMetaData->pMethods[i],
&pParentMethodDesc,
&pMemberSignature, &cMemberSignature,
dwHashName));
if (pParentMethodDesc != NULL) {
dwParentAttrs = pParentMethodDesc->GetAttrs();
_ASSERTE(IsMdVirtual(dwParentAttrs) && "Non virtual methods should not be searched");
_ASSERTE(fIsInitMethod == FALSE);
// if we end up pointing at a slot that is final we are not allowed to override it.
if(IsMdFinal(dwParentAttrs)) {
bmtError->resIDWhy = IDS_CLASSLOAD_MI_FINAL_DECL;
bmtError->dMethodDefInError = bmtMetaData->pMethods[i];
bmtError->szMethodNameForError = NULL;
IfFailRet(COR_E_TYPELOAD);
}
else if(!bmtProp->fNoSanityChecks) {
BOOL isSameAssembly = (pParentMethodDesc->GetClass()->GetClassLoader()->GetAssembly() ==
GetClassLoader()->GetAssembly());
hr = TestOverRide(dwParentAttrs, dwMemberAttrs, isSameAssembly, bmtError);
if(FAILED(hr)) {
//_ASSERTE(!"Attempting to reduce access of public method");
bmtError->dMethodDefInError = bmtMetaData->pMethods[i];
return hr;
}
}
}
}
}
}
}
if(pParentMethodDesc == NULL) {
// This method does not exist in the parent. If we are a class, check whether this
// method implements any interface. If true, we can't place this method now.
if ((IsInterface() == FALSE) &&
( IsMdPublic(dwMemberAttrs) &&
IsMdVirtual(dwMemberAttrs) &&
!IsMdStatic(dwMemberAttrs) &&
!IsMdRTSpecialName(dwMemberAttrs))) {
// Don't check parent class interfaces - if the parent class had to implement an interface,
// then it is already guaranteed that we inherited that method.
for (j = (GetParentClass() ? GetParentClass()->m_wNumInterfaces : 0);
j < bmtInterface->dwInterfaceMapSize;
j++)
{
EEClass *pInterface;
pInterface = bmtInterface->pInterfaceMap[j].m_pMethodTable->GetClass();
if (CouldMethodExistInClass(pInterface, szMemberName, 0) == 0)
continue;
// We've been trying to avoid asking for the signature - now we need it
if (pMemberSignature == NULL) {
pMemberSignature = bmtInternal->pInternalImport->GetSigOfMethodDef(
bmtMetaData->pMethods[i],
&cMemberSignature
);
}
DWORD slotNum = -1;
if (pInterface->InterfaceFindMethod(szMemberName,
pMemberSignature, cMemberSignature,
bmtInternal->pModule, &slotNum)) {
// This method implements an interface - don't place it
fMethodImplementsInterface = TRUE;
// Keep track of this fact and use it while placing the interface
_ASSERTE(slotNum != -1);
if (bmtInterface->pppInterfaceImplementingMD[j] == NULL)
{
bmtInterface->pppInterfaceImplementingMD[j] = (MethodDesc**)GetThread()->m_MarshalAlloc.Alloc(sizeof(MethodDesc *) * pInterface->GetNumVtableSlots());
memset(bmtInterface->pppInterfaceImplementingMD[j], 0, sizeof(MethodDesc *) * pInterface->GetNumVtableSlots());
}
dwMDImplementsInterfaceNum = j;
dwMDImplementsSlotNum = slotNum;
break;
}
}
}
}
// Now we know the classification we can allocate the correct type of
// method desc and perform any classification specific initialization.
bmtTokenRangeNode *pTR = GetTokenRange(bmtMetaData->pMethods[i],
&(bmtMetaData->ranges[type][impl]));
_ASSERTE(pTR->cMethods != 0);
bmtMethodDescSet *set = &bmtMFDescs->sets[type][impl];
// The MethodDesc we allocate for this method
MethodDesc *pNewMD = set->pChunkList[pTR->dwCurrentChunk]->GetMethodDescAt(pTR->dwCurrentIndex);
LPCSTR pName = bmtMetaData->pstrMethodName[i];
if (pName == NULL)
pName = bmtInternal->pInternalImport->GetNameOfMethodDef(bmtMetaData->pMethods[i]);
// Write offset into the chunk back into the method desc. This
// allows us to calculate the location of (and thus the value of)
// the method table pointer for this method desc.
pNewMD->SetChunkIndex(pTR->dwCurrentIndex, Classification);
// Update counters to prepare for next method desc allocation.
pTR->dwCurrentIndex++;
if (pTR->dwCurrentIndex == MethodDescChunk::GetMaxMethodDescs(Classification))
{
pTR->dwCurrentChunk++;
pTR->dwCurrentIndex = 0;
}
#ifdef _DEBUG
LPCUTF8 pszDebugMethodName = bmtInternal->pInternalImport->GetNameOfMethodDef(bmtMetaData->pMethods[i]);
#endif //_DEBUG
// Do the init specific to each classification of MethodDesc & assing some common fields
hr = InitMethodDesc(pNewMD,
Classification,
bmtMetaData->pMethods[i],
dwImplFlags,
dwMemberAttrs,
FALSE,
dwDescrOffset,
bmtInternal->pModule->GetILBase(),
bmtInternal->pInternalImport,
pName
#ifdef _DEBUG
, pszDebugMethodName,
pszDebugName,
"" // FIX this happens on global methods, give better info
#endif // _DEBUG
);
if (FAILED(hr))
{
return hr;
}
_ASSERTE(bmtParent->ppParentMethodDescBufPtr != NULL);
_ASSERTE(((bmtParent->ppParentMethodDescBufPtr - bmtParent->ppParentMethodDescBuf) / sizeof(MethodDesc*))
< bmtEnumMF->dwNumDeclaredMethods);
*(bmtParent->ppParentMethodDescBufPtr++) = pParentMethodDesc;
*(bmtParent->ppParentMethodDescBufPtr++) = pNewMD;
if (fMethodImplementsInterface && IsMdVirtual(dwMemberAttrs))
bmtInterface->pppInterfaceImplementingMD[dwMDImplementsInterfaceNum][dwMDImplementsSlotNum] = pNewMD;
DWORD dwMethDeclFlags = 0;
DWORD dwMethNullDeclFlags = 0;
if (Security::IsSecurityOn())
{
if ( IsMdHasSecurity(dwMemberAttrs) || IsTdHasSecurity(m_dwAttrClass) )
{
// Disable inlining for any function which does runtime declarative
// security actions.
if (pNewMD->GetSecurityFlags(bmtInternal->pInternalImport,
bmtMetaData->pMethods[i],
GetCl(),
&dwClassDeclFlags,
&dwClassNullDeclFlags,
&dwMethDeclFlags,
&dwMethNullDeclFlags) & DECLSEC_RUNTIME_ACTIONS)
{
pNewMD->SetNotInline(true);
// Speculatively mark intercepted here, we may revert
// this if we optimize a demand out at jit time, but at
// worst we'll cause a racing thread to indirect through
// the pre stub needlessly.
pNewMD->SetIntercepted(true);
}
}
if ( IsMdHasSecurity(dwMemberAttrs) )
{
// We only care about checks that are not empty...
dwMethDeclFlags &= ~dwMethNullDeclFlags;
if ( dwMethDeclFlags & (DECLSEC_LINK_CHECKS|DECLSEC_NONCAS_LINK_DEMANDS) )
{
pNewMD->SetRequiresLinktimeCheck();
}
if ( dwMethDeclFlags & (DECLSEC_INHERIT_CHECKS|DECLSEC_NONCAS_INHERITANCE) )
{
pNewMD->SetRequiresInheritanceCheck();
}
}
// Linktime checks on a method override those on a class.
// If the method has an empty set of linktime checks,
// then don't require linktime checking for this method.
if ( this->RequiresLinktimeCheck() && !(dwMethNullDeclFlags & DECLSEC_LINK_CHECKS) )
{
pNewMD->SetRequiresLinktimeCheck();
}
if ( pParentMethodDesc != NULL &&
(pParentMethodDesc->RequiresInheritanceCheck() ||
pParentMethodDesc->ParentRequiresInheritanceCheck()) )
{
pNewMD->SetParentRequiresInheritanceCheck();
}
// Methods on an interface that includes an UnmanagedCode check
// suppression attribute are assumed to be interop methods. We ask
// for linktime checks on these.
// Also place linktime checks on all P/Invoke calls.
if ((IsInterface() &&
bmtInternal->pInternalImport->GetCustomAttributeByName(GetCl(),
COR_SUPPRESS_UNMANAGED_CODE_CHECK_ATTRIBUTE_ANSI,
NULL,
NULL) == S_OK) ||
pNewMD->IsNDirect() ||
(pNewMD->IsComPlusCall() && !IsInterface()))
{
pNewMD->SetRequiresLinktimeCheck();
}
// All public methods on public types will do a link demand of
// full trust, unless AllowUntrustedCaller attribute is set
if (
#ifdef _DEBUG
g_pConfig->Do_AllowUntrustedCaller_Checks() &&
#endif
!pNewMD->RequiresLinktimeCheck())
{
// If the method is public (visible outside it's assembly),
// and the type is public and the assembly
// is not marked with AllowUntrustedCaller attribute, do
// a link demand for full trust on all callers note that
// this won't be effective on virtual overrides. The caller
// can allways do a virtual call on the base type / interface
if (Security::MethodIsVisibleOutsideItsAssembly(
dwMemberAttrs, m_dwAttrClass))
{
_ASSERTE(m_pLoader);
_ASSERTE(GetAssembly());
// See if the Assembly has AllowUntrustedCallerChecks CA
// Pull this page in last
if (!GetAssembly()->AllowUntrustedCaller())
pNewMD->SetRequiresLinktimeCheck();
}
}
}
if (IsMdHasSecurity(dwMemberAttrs))
pNewMD->SetHasSecurity();
bmtMFDescs->ppMethodDescList[i] = pNewMD;
// Make sure that ecalls have a 0 rva. This is assumed by the prejit fixup logic
_ASSERTE(((Classification & ~mdcMethodImpl) != mcECall) || dwDescrOffset == 0);
if (IsMdStatic(dwMemberAttrs) ||
!IsMdVirtual(dwMemberAttrs) ||
IsMdRTSpecialName(dwMemberAttrs))
{
// non-vtable method
_ASSERTE( bmtVT->pNonVtable[ bmtVT->dwCurrentNonVtableSlot ] == NULL);
bmtVT->pNonVtable[ bmtVT->dwCurrentNonVtableSlot ] = (SLOT) pNewMD; // Not prestub addr
pNewMD->m_wSlotNumber = (WORD) bmtVT->dwCurrentNonVtableSlot;
if (fIsDefaultCtor)
bmtVT->wDefaultCtorSlot = (WORD) bmtVT->dwCurrentNonVtableSlot;
else if (fIsCCtor)
bmtVT->wCCtorSlot = (WORD) bmtVT->dwCurrentNonVtableSlot;
bmtVT->dwCurrentNonVtableSlot++;
}
else
{
pNewMD->m_wSlotNumber = -1; // mark it initially as unplaced
// vtable method
if (IsInterface())
{
// if we're an interface, our slot number is fixed
IncrementNumVtableSlots();
_ASSERTE( bmtVT->pVtable[ bmtVT->dwCurrentVtableSlot ] == NULL);
bmtVT->pVtable[ bmtVT->dwCurrentVtableSlot ] = (SLOT) pNewMD->GetPreStubAddr();
pNewMD->m_wSlotNumber = (WORD) bmtVT->dwCurrentVtableSlot;
bmtVT->dwCurrentVtableSlot++;
}
else if (pParentMethodDesc != NULL)
{
WORD slotNumber = pParentMethodDesc->m_wSlotNumber;
// If the MethodDesc was inherited by an interface but not implemented,
// then the interface's MethodDesc is sitting in the slot and will not reflect
// the true slot number. Need to find the starting slot of the interface in
// the parent class to figure out the true slot (starting slot + itf slot)
if (pParentMethodDesc->IsInterface())
{
_ASSERTE(GetParentClass() != NULL);
MethodTable *pItfMT = pParentMethodDesc->GetMethodTable();
InterfaceInfo_t *pItfMap = GetParentClass()->GetInterfaceMap();
InterfaceInfo_t *pItfMapEnd = pItfMap + GetParentClass()->GetNumInterfaces();
while (pItfMap < pItfMapEnd)
{
if (pItfMap->m_pMethodTable == pItfMT)
{
slotNumber += pItfMap->m_wStartSlot;
break;
}
pItfMap++;
}
_ASSERTE(pItfMap < pItfMapEnd);
}
// we are overriding a parent method, so place this method now
bmtVT->pVtable[slotNumber] = (SLOT) pNewMD->GetPreStubAddr();
pNewMD->m_wSlotNumber = slotNumber;
if (pParentMethodDesc->IsDuplicate())
{
pNewMD->SetDuplicate();
}
}
// Place it unless we will do it when laying out an interface or it is a body to
// a method impl. If it is an impl then we will use the slots used by the definition.
else if (!fMethodImplementsInterface)
{
IncrementNumVtableSlots();
bmtVT->pVtable[ bmtVT->dwCurrentVtableSlot ] = (SLOT) pNewMD->GetPreStubAddr();
pNewMD->m_wSlotNumber = (WORD) bmtVT->dwCurrentVtableSlot;
bmtVT->dwCurrentVtableSlot++;
}
}
// If the method desc is a Method Impl then fill in the Array of bodies. Since
// this Method desc can be used more then once fill all the instances of the
// body. Go and find the declarations, if the declaration is in this type
// then store the Token.
if(Classification & mdcMethodImpl) {
for(DWORD m = 0; m < bmtEnumMF->dwNumberMethodImpls; m++) {
if(bmtMetaData->pMethods[i] == bmtMetaData->pMethodBody[m]) {
MethodDesc* desc = NULL;
BOOL fIsMethod;
mdToken mdDecl = bmtMetaData->pMethodDecl[m];
hr = GetDescFromMemberRef(bmtInternal->pModule,
mdDecl,
m_cl,
(void**) &desc,
&fIsMethod,
bmtError->pThrowable);
if(SUCCEEDED(hr) && desc != NULL && !TestThrowable(bmtError->pThrowable)) {
// We found an external member reference
_ASSERTE(fIsMethod);
mdDecl = mdTokenNil;
// Make sure the body is virtaul
if(!IsMdVirtual(dwMemberAttrs)) {
bmtError->resIDWhy = IDS_CLASSLOAD_MI_MUSTBEVIRTUAL;
bmtError->dMethodDefInError = bmtMetaData->pMethods[i];
bmtError->szMethodNameForError = NULL;
IfFailRet(COR_E_TYPELOAD);
}
}
else {
if(pThrowableAvailable(bmtError->pThrowable)) *(bmtError->pThrowable) = NULL;
hr = S_OK;
desc = NULL;
if(TypeFromToken(mdDecl) != mdtMethodDef) {
Module* pModule;
hr = FindMethodDeclaration(bmtInternal,
mdDecl,
&mdDecl,
FALSE,
&pModule,
bmtError);
IfFailRet(hr);
_ASSERTE(pModule == bmtInternal->pModule);
// Make sure the virtual states are the same
DWORD dwDescAttrs = bmtInternal->pInternalImport->GetMethodDefProps(mdDecl);
if(IsMdVirtual(dwMemberAttrs) != IsMdVirtual(dwDescAttrs)) {
bmtError->resIDWhy = IDS_CLASSLOAD_MI_VIRTUALMISMATCH;
bmtError->dMethodDefInError = bmtMetaData->pMethods[i];
bmtError->szMethodNameForError = NULL;
IfFailRet(COR_E_TYPELOAD);
}
}
}
bmtMethodImpl->AddMethod(pNewMD,
desc,
mdDecl);
}
}
}
// check for proper use of hte Managed and native flags
if (IsMiManaged(dwImplFlags)) {
if (IsMiIL(dwImplFlags) || IsMiRuntime(dwImplFlags)) // IsMiOPTIL(dwImplFlags) no longer supported
{
// No need to set code address, pre stub used automatically.
}
else
{
if (IsMiNative(dwImplFlags))
{
// For now simply disallow managed native code if you turn this on you have to at least
// insure that we have SkipVerificationPermission or equivalent
BAD_FORMAT_ASSERT(!"Managed native not presently supported");
// if (!IsMDAbstract()) pNewMD->SetAddrofCode((BYTE*) (bmtInternal->pModule)->GetILBase() + pNewMD->GetRVA());
}
// TODO this should really say bad implementation flags
bmtError->resIDWhy = IDS_CLASSLOAD_BAD_MANAGED_RVA;
bmtError->dMethodDefInError = bmtMetaData->pMethods[i];
bmtError->szMethodNameForError = NULL;
IfFailRet(COR_E_TYPELOAD);
}
}
else {
if (IsMiNative(dwImplFlags) && (GetCl() == COR_GLOBAL_PARENT_TOKEN))
{
// global function unmanaged entrypoint via IJW thunk was handled
// above.
}
else
{
bmtError->resIDWhy = IDS_CLASSLOAD_BAD_UNMANAGED_RVA;
bmtError->dMethodDefInError = bmtMetaData->pMethods[i];
bmtError->szMethodNameForError = NULL;
IfFailRet(COR_E_TYPELOAD);
}
if (Classification != mcNDirect)
{
BAD_FORMAT_ASSERT(!"Bad unmanaged code entry point");
IfFailRet(COR_E_TYPELOAD);
}
}
// Turn off inlining for contextful and marshalbyref classes
// so that we can intercept calls for remoting. Also, any calls
// that are marked in the metadata as not being inlineable.
if(IsMarshaledByRef() || IsMiNoInlining(dwImplFlags))
{
// Contextful classes imply marshal by ref but not vice versa
_ASSERTE(!IsContextful() || IsMarshaledByRef());
pNewMD->SetNotInline(true);
}
} /* end ... for each member */
return hr;
}
// InitMethodDesc takes a pointer to space that's already allocated for the
// particular type of MethodDesc, and initializes based on the other info.
// This factors logic between PlaceMembers (the regular code path) & AddMethod
// (Edit & Continue (EnC) code path) so we don't have to maintain separate copies.
HRESULT EEClass::InitMethodDesc(MethodDesc *pNewMD, // This is should actually be of the correct
// sub-type, based on Classification
DWORD Classification,
mdToken tok,
DWORD dwImplFlags,
DWORD dwMemberAttrs,
BOOL fEnC,
DWORD RVA, // Only needed for NDirect case
BYTE *ilBase, // Only needed for NDirect case
IMDInternalImport *pIMDII, // Needed for NDirect, EEImpl(Delegate) cases
LPCSTR pMethodName // Only needed for mcEEImpl (Delegate) case
#ifdef _DEBUG
, LPCUTF8 pszDebugMethodName,
LPCUTF8 pszDebugClassName,
LPUTF8 pszDebugMethodSignature
#endif //_DEBUG //@todo Is it bad to have a diff sig in debug/retail?
)
{
LOG((LF_CORDB, LL_EVERYTHING, "EEC::IMD: pNewMD:0x%x for tok:0x%x (%s::%s)\n",
pNewMD, tok, pszDebugClassName, pszDebugMethodName));
HRESULT hr = S_OK;
// Now we know the classification we can allocate the correct type of
// method desc and perform any classification specific initialization.
NDirectMethodDesc *pNewNMD;
switch (Classification & mdcClassification)
{
case mcNDirect:
// Zero init the method desc. Should go away once all the fields are
// initialized manually.
if(Classification & mdcMethodImpl)
memset(pNewMD, 0, sizeof(MI_NDirectMethodDesc));
else {
memset(pNewMD, 0, sizeof(NDirectMethodDesc));
}
// NDirect specific initialization.
pNewNMD = (NDirectMethodDesc*)pNewMD;
if (RVA != 0 &&
IsMiUnmanaged(dwImplFlags) && IsMiNative(dwImplFlags) //@todo: why is Banjara emitting a method RVA of 0x1050 for their bjlib.dll P/Invokes?
// Need this clause here to prevent us from treating their P/Invokes as earlybounds.
)
{
pNewNMD->InitSubClassification(pNewNMD->kEarlyBound);
pNewNMD->InitEarlyBoundNDirectTarget(ilBase, RVA);
}
else
{
// regular sysimport
pNewNMD->InitSubClassification(pNewNMD->kLateBound);
pNewNMD->ndirect.m_pNDirectTarget = (LPVOID) pNewNMD->ndirect.m_ImportThunkGlue;
}
pNewNMD->ndirect.m_pMLHeader = 0;
emitCall( pNewNMD->ndirect.m_ImportThunkGlue, NDirectImportThunk );
pNewNMD->InitMarshCategory();
break;
case mcECall:
case mcEEImpl:
// Zero init the method desc. Should go away once all the fields are
// initialized manually.
if(Classification & mdcMethodImpl)
memset(pNewMD, 0, sizeof(MI_ECallMethodDesc));
else {
memset(pNewMD, 0, sizeof(ECallMethodDesc));
}
// EEImpl specific initialization.
if ((Classification & mdcClassification) == mcEEImpl)
{
// For the Invoke method we will set a standard invoke method.
_ASSERTE(IsAnyDelegateClass());
// For the asserts, either the pointer is NULL (since the class hasn't
// been constructed yet), or we're in EnC mode, meaning that the class
// does exist, but we may be re-assigning the field to point to an
// updated MethodDesc
// TODO: I am amazed that replacing the invoke method with EnC works.
// For example: delegate d of type D(int) points at function f(int). Now
// you change D's invoke method from Invoke(int) to Invoke(int, int)
// Thus you can now do d.invoke(2,3), but that will try to call a
// function f(int)!. Seems like we have an AV for sure.
// Anyway, this and EnC, which we punted for V1. - Vancem
if (strcmp(pMethodName, "Invoke") == 0)
{
_ASSERTE(fEnC || NULL == ((DelegateEEClass*)this)->m_pInvokeMethod);
((DelegateEEClass*)this)->m_pInvokeMethod = pNewMD;
}
else if (strcmp(pMethodName, "BeginInvoke") == 0)
{
_ASSERTE(fEnC || NULL == ((DelegateEEClass*)this)->m_pBeginInvokeMethod);
((DelegateEEClass*)this)->m_pBeginInvokeMethod = pNewMD;
}
else if (strcmp(pMethodName, "EndInvoke") == 0)
{
_ASSERTE(fEnC || NULL == ((DelegateEEClass*)this)->m_pEndInvokeMethod);
((DelegateEEClass*)this)->m_pEndInvokeMethod = pNewMD;
}
else
{
hr = E_FAIL;
return hr;
}
}
// StoredSig specific intialization
{
StoredSigMethodDesc *pNewSMD = (StoredSigMethodDesc*) pNewMD;;
DWORD cSig;
PCCOR_SIGNATURE pSig = pIMDII->GetSigOfMethodDef(tok, &cSig);
pNewSMD->m_pSig = pSig;
pNewSMD->m_cSig = cSig;
}
break;
case mcIL:
// Zero init the method desc. Should go away once all the fields are
// initialized manually.
if(Classification & mdcMethodImpl)
memset(pNewMD, 0, sizeof(MI_MethodDesc));
else {
memset(pNewMD, 0, sizeof(MethodDesc));
}
break;
case mcComInterop:
// Zero init the method desc. Should go away once all the fields are
// initialized manually.
if(Classification & mdcMethodImpl)
memset(pNewMD, 0, sizeof(MI_ComPlusCallMethodDesc));
else
memset(pNewMD, 0, sizeof(ComPlusCallMethodDesc));
break;
default:
_ASSERTE(!"Failed to set a method desc classification");
}
// Set the method desc's classification.
pNewMD->SetClassification(Classification & mdcClassification);
pNewMD->SetMethodImpl((Classification & mdcMethodImpl) ? TRUE : FALSE);
// pNewMD->SetLivePointerMapIndex(-1);
#ifdef _IA64_
#ifdef _DEBUG
//
// assert that the gp of the target is the same as the current gp
//
ULONG64 gp = *(((ULONG64*)PreStubWorker)+1);
_ASSERTE((void*)gp == GetGp());
#endif // _DEBUG
//
// @TODO_IA64: make this go through a stub?
//
//
// grab the fn pointer out of the function descriptor
//
void* pActualCode = *((void**)PreStubWorker);
emitStubCall(pNewMD, (UINT64)pActualCode);
#else
emitStubCall(pNewMD, (BYTE*)(ThePreStub()->GetEntryPoint()));
#endif // !_IA64_
pNewMD->SetMemberDef(tok);
if (IsMdStatic(dwMemberAttrs))
pNewMD->SetStatic();
if (IsMiSynchronized(dwImplFlags))
pNewMD->SetSynchronized();
pNewMD->SetRVA(RVA);
#ifdef _DEBUG
pNewMD->m_pszDebugMethodName = (LPUTF8)pszDebugMethodName;
pNewMD->m_pszDebugClassName = (LPUTF8)pszDebugClassName;
pNewMD->m_pDebugEEClass = this;
pNewMD->m_pDebugMethodTable = GetMethodTable();
if (pszDebugMethodSignature == NULL)
pNewMD->m_pszDebugMethodSignature = FormatSig(pNewMD);
else
pNewMD->m_pszDebugMethodSignature = pszDebugMethodSignature;
#endif
return hr;
}
//
// Used by BuildMethodTable
//
// We should have collected all the method impls. Cycle through them creating the method impl
// structure that holds the information about which slots are overridden.
HRESULT EEClass::PlaceMethodImpls(bmtInternalInfo* bmtInternal,
bmtMethodImplInfo* bmtMethodImpl,
bmtErrorInfo* bmtError,
bmtInterfaceInfo* bmtInterface,
bmtVtable* bmtVT)
{
HRESULT hr = S_OK;
if(bmtMethodImpl->pIndex == 0)
return hr;
DWORD pIndex = 0;
MethodDesc* next = bmtMethodImpl->GetBodyMethodDesc(pIndex);
// Allocate some temporary storage. The number of overrides for a single method impl
// cannot be greater then the number of vtable slots.
DWORD* slots = (DWORD*) GetThread()->m_MarshalAlloc.Alloc((bmtVT->dwCurrentVtableSlot) * sizeof(DWORD));
MethodDesc **replaced = (MethodDesc**) GetThread()->m_MarshalAlloc.Alloc((bmtVT->dwCurrentVtableSlot) * sizeof(MethodDesc*));
while(next != NULL) {
DWORD slotIndex = 0;
MethodDesc* body;
// The signature for the body of the method impl. We cache the signature until all
// the method impl's using the same body are done.
PCCOR_SIGNATURE pBodySignature = NULL;
DWORD cBodySignature = 0;
// Get the MethodImpl storage
_ASSERTE(next->IsMethodImpl());
MethodImpl* pImpl = MethodImpl::GetMethodImplData(next);
// The impls are sorted according to the method descs for the body of the method impl.
// Loop through the impls until the next body is found. When a single body
// has been done move the slots implemented and method descs replaced into the storage
// found on the body method desc.
do { // collect information until we reach the next body
body = next;
// Get the declaration part of the method impl. It will either be a token
// (declaration is on this type) or a method desc.
MethodDesc* pDecl = bmtMethodImpl->GetDeclarationMethodDesc(pIndex);
if(pDecl == NULL) {
// The declaration is on this type to get the token.
mdMethodDef mdef = bmtMethodImpl->GetDeclarationToken(pIndex);
hr = PlaceLocalDeclaration(mdef,
body,
bmtInternal,
bmtError,
bmtVT,
slots, // Adds override to the slot and replaced arrays.
replaced,
&slotIndex, // Increments count
&pBodySignature, // Fills in the signature
&cBodySignature);
IfFailRet(hr);
}
else {
if(pDecl->GetClass()->IsInterface()) {
hr = PlaceInterfaceDeclaration(pDecl,
body,
bmtInternal,
bmtInterface,
bmtError,
bmtVT,
slots,
replaced,
&slotIndex, // Increments count
&pBodySignature, // Fills in the signature
&cBodySignature);
IfFailRet(hr);
}
else {
hr = PlaceParentDeclaration(pDecl, body,
bmtInternal,
bmtError,
bmtVT,
slots,
replaced,
&slotIndex, // Increments count
&pBodySignature, // Fills in the signature
&cBodySignature);
IfFailRet(hr);
}
}
pIndex++;
// we hit the end of the list so leave
if(pIndex == bmtMethodImpl->pIndex)
next = NULL;
else
next = bmtMethodImpl->GetBodyMethodDesc(pIndex);
} while(next == body) ;
// Use the number of overrides to
// push information on to the method desc. We store the slots that
// are overridden and the method desc that is replaced. That way
// when derived classes need to determine if the method is to be
// overridden then it can check the name against the replaced
// method desc not the bodies name.
if(slotIndex == 0) {
bmtError->resIDWhy = IDS_CLASSLOAD_MI_DECLARATIONNOTFOUND;
bmtError->dMethodDefInError = body->GetMemberDef();
bmtError->szMethodNameForError = NULL;
IfFailRet(COR_E_TYPELOAD);
}
else {
hr = pImpl->SetSize(GetClassLoader()->GetHighFrequencyHeap(), slotIndex);
IfFailRet(hr);
// Gasp we do a bubble sort. Should change this to a qsort..
for (DWORD i = 0; i < slotIndex; i++) {
for (DWORD j = i+1; j < slotIndex; j++)
{
if (slots[j] < slots[i])
{
MethodDesc* mTmp = replaced[i];
replaced[i] = replaced[j];
replaced[j] = mTmp;
DWORD sTmp = slots[i];
slots[i] = slots[j];
slots[j] = sTmp;
}
}
}
// Go and set the method impl
hr = pImpl->SetData(slots, replaced);
}
} // while(next != NULL)
return hr;
}
HRESULT EEClass::PlaceLocalDeclaration(mdMethodDef mdef,
MethodDesc* body,
bmtInternalInfo* bmtInternal,
bmtErrorInfo* bmtError,
bmtVtable* bmtVT,
DWORD* slots,
MethodDesc** replaced,
DWORD* pSlotIndex,
PCCOR_SIGNATURE* ppBodySignature,
DWORD* pcBodySignature)
{
HRESULT hr = S_OK;
BOOL fVerifySignature = TRUE; // we only need to verify the signature once.
// we search on the token and m_cl
for(USHORT i = 0; i < bmtVT->dwCurrentVtableSlot; i++) {
// We get the current slot. Since we are looking for a method declaration
// that is on our class we would never match up with a method obtained from
// one of our parents or an Interface.
MethodDesc* pMD = GetUnknownMethodDescForSlotAddress(bmtVT->pVtable[i]);
// This entry may have been replaced in a base class so get the original
// method desc for this location
MethodDesc* pRealDesc;
GetRealMethodImpl(pMD, i, &pRealDesc);
// If we get a null then we have already replaced this one. We can't check it
// so we will just by by-pass this.
if(pRealDesc->GetMemberDef() == mdef)
{
// Make sure we have not overridding another method impl
if(pMD != body && pMD->IsMethodImpl() && pMD->GetMethodTable() == NULL) {
bmtError->resIDWhy = IDS_CLASSLOAD_MI_MULTIPLEOVERRIDES;
bmtError->dMethodDefInError = pMD->GetMemberDef();
bmtError->szMethodNameForError = NULL;
IfFailRet(COR_E_TYPELOAD);
}
// We are not allowed to implement another method impl
if(pRealDesc->IsMethodImpl()) {
bmtError->resIDWhy = IDS_CLASSLOAD_MI_OVERRIDEIMPL;
bmtError->dMethodDefInError = pMD->GetMemberDef();
bmtError->szMethodNameForError = NULL;
IfFailRet(COR_E_TYPELOAD);
}
// Compare the signature for the token in the specified scope
if(fVerifySignature) {
// If we have not got the method impl signature go get it now
if(*ppBodySignature == NULL) {
*ppBodySignature =
bmtInternal->pInternalImport->GetSigOfMethodDef(body->GetMemberDef(),
pcBodySignature);
}
PCCOR_SIGNATURE pMethodDefSignature = NULL;
DWORD cMethodDefSignature = 0;
pMethodDefSignature =
bmtInternal->pInternalImport->GetSigOfMethodDef(mdef,
&cMethodDefSignature);
// If they do not match then we are trying to implement
// a method with a body where the signatures do not match
if(!MetaSig::CompareMethodSigs(*ppBodySignature,
*pcBodySignature,
bmtInternal->pModule,
pMethodDefSignature,
cMethodDefSignature,
bmtInternal->pModule))
{
bmtError->resIDWhy = IDS_CLASSLOAD_MI_BADSIGNATURE;
bmtError->dMethodDefInError = mdef;
bmtError->szMethodNameForError = NULL;
IfFailRet(COR_E_TYPELOAD);
}
fVerifySignature = FALSE;
}
// If the body has not been placed then place it here. We do not
// place bodies for method impl's until we find a spot for them.
if(body->GetSlot() == (USHORT) -1) {
body->SetSlot(i);
}
// We implement this slot, record it
slots[*pSlotIndex] = i;
replaced[*pSlotIndex] = pRealDesc;
bmtVT->pVtable[i] = (SLOT) body->GetPreStubAddr();
// increment the counter
(*pSlotIndex)++;
}
// Reset the hr from the GetRealMethodImpl()
hr = S_OK;
}
return hr;
}
HRESULT EEClass::PlaceInterfaceDeclaration(MethodDesc* pDecl,
MethodDesc* pImplBody,
bmtInternalInfo* bmtInternal,
bmtInterfaceInfo* bmtInterface,
bmtErrorInfo* bmtError,
bmtVtable* bmtVT,
DWORD* slots,
MethodDesc** replaced,
DWORD* pSlotIndex,
PCCOR_SIGNATURE* ppBodySignature,
DWORD* pcBodySignature)
{
HRESULT hr = S_OK;
// the fact that an interface only shows up once in the vtable
// When we are looking for a method desc then the declaration is on
// some class or interface that this class implements. The declaration
// will either be to an interface or to a class. If it is to a
// interface then we need to search for that interface. From that
// slot number of the method in the interface we can calculate the offset
// into our vtable. If it is to a class it must be a subclass. This uses
// the fact that an interface only shows up once in the vtable.
EEClass* declClass = pDecl->GetClass();
BOOL fInterfaceFound = FALSE;
// Check our vtable for entries that we are suppose to override.
// Since this is an external method we must also check the inteface map.
// We want to replace any interface methods even if they have been replaced
// by a base class.
for(USHORT i = 0; i < m_wNumInterfaces; i++)
{
MethodTable* pMT;
EEClass * pInterface;
pMT = bmtInterface->pInterfaceMap[i].m_pMethodTable;
pInterface = pMT->GetClass();
// If this is the same interface
if(pInterface == declClass)
{
// We found an interface so no error
fInterfaceFound = TRUE;
// Find out where the interface map is set on our vtable
USHORT dwStartingSlot = (USHORT) bmtInterface->pInterfaceMap[i].m_wStartSlot;
// We need to duplicate the interface to avoid copies. Currently, interfaces
// do not overlap so we just need to check to see if there is a non-duplicated
// MD. If there is then the interface shares it with the class which means
// we need to copy the whole interface
WORD wSlot;
for(wSlot = dwStartingSlot; wSlot < pInterface->GetNumVtableSlots()+dwStartingSlot; wSlot++) {
MethodDesc* pMD = GetUnknownMethodDescForSlotAddress(bmtVT->pVtable[wSlot]);
if(pMD->GetSlot() == wSlot)
break;
}
if(wSlot < pInterface->GetNumVtableSlots()+dwStartingSlot) {
// Check to see if we have allocated the temporay array of starting values.
// This array is used to backpatch entries to the original location. These
// values are never used but will cause problems later when we finish
// laying out the method table.
if(bmtInterface->pdwOriginalStart == NULL) {
Thread *pThread = GetThread();
_ASSERTE(pThread != NULL && "We could never have gotten this far without GetThread() returning a thread");
bmtInterface->pdwOriginalStart = (DWORD*) pThread->m_MarshalAlloc.Alloc(sizeof(DWORD) * bmtInterface->dwMaxExpandedInterfaces);
memset(bmtInterface->pdwOriginalStart, 0, sizeof(DWORD)*bmtInterface->dwMaxExpandedInterfaces);
}
_ASSERTE(bmtInterface->pInterfaceMap[i].m_wStartSlot != (WORD) 0 && "We assume that an interface does not start at position 0");
_ASSERTE(bmtInterface->pdwOriginalStart[i] == 0 && "We should not move an interface twice");
bmtInterface->pdwOriginalStart[i] = bmtInterface->pInterfaceMap[i].m_wStartSlot;
// The interface now starts at the end of the map.
bmtInterface->pInterfaceMap[i].m_wStartSlot = (WORD) bmtVT->dwCurrentVtableSlot;
for(WORD d = dwStartingSlot; d < pInterface->GetNumVtableSlots()+dwStartingSlot; d++) {
// Copy the MD
MethodDesc* pMDCopy = GetUnknownMethodDescForSlotAddress(bmtVT->pVtable[d]);
bmtVT->pVtable[bmtVT->dwCurrentVtableSlot++] = (SLOT) pMDCopy->GetPreStubAddr();
#ifdef _DEBUG
g_dupMethods++;
#endif
pMDCopy->SetDuplicate();
IncrementNumVtableSlots();
}
// Reset the starting slot to the known value
dwStartingSlot = (USHORT) bmtInterface->pInterfaceMap[i].m_wStartSlot;
}
// We found an interface so no error
fInterfaceFound = TRUE;
// Make sure we have placed the interface map.
_ASSERTE(dwStartingSlot != -1);
// Get the Slot location of the method desc.
USHORT dwMySlot = pDecl->GetSlot() + dwStartingSlot;
_ASSERTE(dwMySlot < bmtVT->dwCurrentVtableSlot);
// Get our current method desc for this slot
MethodDesc* pMD = GetUnknownMethodDescForSlotAddress(bmtVT->pVtable[dwMySlot]);
#if 0 //@todo CTS : need to check for multiple methodimpls to the same methoddef scenario
// Make sure we are not overridding another method impl
if(pMD != pImplBody && pMD->IsMethodImpl() && pMD->GetMethodTable() == NULL) {
bmtError->resIDWhy = IDS_CLASSLOAD_MI_MULTIPLEOVERRIDES;
bmtError->dMethodDefInError = pMD->GetMemberDef();
bmtError->szMethodNameForError = NULL;
IfFailRet(COR_E_TYPELOAD);
}
#endif
// Get the real method desc. This method may have been overridden
// by another method impl higher up the class heir.
MethodDesc* pRealDesc;
pInterface->GetRealMethodImpl(pDecl, dwMySlot, &pRealDesc);
// Make sure we have not overriden this entry
if(pRealDesc->IsMethodImpl()) {
bmtError->resIDWhy = IDS_CLASSLOAD_MI_OVERRIDEIMPL;
bmtError->dMethodDefInError = pMD->GetMemberDef();
bmtError->szMethodNameForError = NULL;
IfFailRet(COR_E_TYPELOAD);
}
// If we have not got the method impl signature go get it now. It is cached
// in our caller
if(*ppBodySignature == NULL) {
*ppBodySignature =
bmtInternal->pInternalImport->GetSigOfMethodDef(pImplBody->GetMemberDef(),
pcBodySignature);
}
// Verify the signatures match
PCCOR_SIGNATURE pDeclarationSignature = NULL;
DWORD cDeclarationSignature = 0;
pRealDesc->GetSig(&pDeclarationSignature,
&cDeclarationSignature);
// If they do not match then we are trying to implement
// a method with a body where the signatures do not match
if(!MetaSig::CompareMethodSigs(*ppBodySignature,
*pcBodySignature,
bmtInternal->pModule,
pDeclarationSignature,
cDeclarationSignature,
pRealDesc->GetModule()))
{
bmtError->resIDWhy = IDS_CLASSLOAD_MI_BADSIGNATURE;
bmtError->dMethodDefInError = pImplBody->GetMemberDef();
bmtError->szMethodNameForError = NULL;
IfFailRet(COR_E_TYPELOAD);
}
// If the body has not been placed then place it now.
if(pImplBody->GetSlot() == (USHORT) -1) {
pImplBody->SetSlot(dwMySlot);
}
// Store away the values
slots[*pSlotIndex] = dwMySlot;
replaced[*pSlotIndex] = pRealDesc;
bmtVT->pVtable[dwMySlot] = (SLOT) pImplBody->GetPreStubAddr();
// We are now a duplicate in an interface
pImplBody->SetDuplicate();
// increment the counter
(*pSlotIndex)++;
// if we have moved the interface we need to back patch the original location
// if we had left an interface place holder.
if(bmtInterface->pdwOriginalStart && bmtInterface->pdwOriginalStart[i] != 0) {
USHORT slot = (USHORT) bmtInterface->pdwOriginalStart[i] + pDecl->GetSlot();
MethodDesc* pMD = GetUnknownMethodDescForSlotAddress(bmtVT->pVtable[slot]);
if(pMD->GetMethodTable() && pMD->IsInterface())
bmtVT->pVtable[slot] = (SLOT) pImplBody->GetPreStubAddr();
}
break;
}
}
if(fInterfaceFound == FALSE)
{
bmtError->resIDWhy = IDS_CLASSLOAD_MI_NOTIMPLEMENTED;
bmtError->dMethodDefInError = NULL;
bmtError->szMethodNameForError = pDecl->GetName();
IfFailRet(COR_E_TYPELOAD);
}
return hr;
}
HRESULT EEClass::PlaceParentDeclaration(MethodDesc* pDecl,
MethodDesc* pImplBody,
bmtInternalInfo* bmtInternal,
bmtErrorInfo* bmtError,
bmtVtable* bmtVT,
DWORD* slots,
MethodDesc** replaced,
DWORD* pSlotIndex,
PCCOR_SIGNATURE* ppBodySignature,
DWORD* pcBodySignature)
{
HRESULT hr = S_OK;
BOOL fVerifySignature = TRUE; // we only need to verify the signature once.
// Verify that the class of the declaration is in our heirarchy
EEClass* declType = pDecl->GetClass();
EEClass* pParent = GetParentClass();
while(pParent != NULL) {
if(declType == pParent)
break;
pParent = pParent->GetParentClass();
}
if(pParent == NULL) {
bmtError->resIDWhy = IDS_CLASSLOAD_MI_NOTIMPLEMENTED;
bmtError->dMethodDefInError = NULL;
bmtError->szMethodNameForError = pDecl->GetName();
IfFailRet(COR_E_TYPELOAD);
}
// Compare the signature for the token in the specified scope
// If we have not got the method impl signature go get it now
if(*ppBodySignature == NULL) {
*ppBodySignature =
bmtInternal->pInternalImport->GetSigOfMethodDef(pImplBody->GetMemberDef(),
pcBodySignature);
}
PCCOR_SIGNATURE pDeclarationSignature = NULL;
DWORD cDeclarationSignature = 0;
pDecl->GetSig(&pDeclarationSignature,
&cDeclarationSignature);
// If they do not match then we are trying to implement
// a method with a body where the signatures do not match
if(!MetaSig::CompareMethodSigs(*ppBodySignature,
*pcBodySignature,
bmtInternal->pModule,
pDeclarationSignature,
cDeclarationSignature,
pDecl->GetModule()))
{
bmtError->resIDWhy = IDS_CLASSLOAD_MI_BADSIGNATURE;
bmtError->dMethodDefInError = pImplBody->GetMemberDef();
bmtError->szMethodNameForError = NULL;
IfFailRet(COR_E_TYPELOAD);
}
// We get the method from the parents slot. We will replace the method that is currently
// defined in that slot and any duplicates for that method desc.
USHORT dwSlot = pDecl->GetSlot();
MethodDesc* pMD = GetUnknownMethodDescForSlotAddress(bmtVT->pVtable[dwSlot]);
// Make sure we are not overridding another method impl
if(pMD != pImplBody && pMD->IsMethodImpl() && pMD->GetMethodTable() == NULL)
{
bmtError->resIDWhy = IDS_CLASSLOAD_MI_MULTIPLEOVERRIDES;
bmtError->dMethodDefInError = pMD->GetMemberDef();
bmtError->szMethodNameForError = NULL;
IfFailRet(COR_E_TYPELOAD);
}
// Get the real method desc (a base class may have overridden the method
// with a method impl)
MethodDesc* pReplaceDesc;
GetRealMethodImpl(pMD, dwSlot, &pReplaceDesc);
// Make sure we have not overriden this entry if it was declared within our own
// class. It is perfectly legitimate to override an inherited method.
if(pReplaceDesc->IsMethodImpl() && pReplaceDesc->GetMethodTable() == NULL)
{
bmtError->resIDWhy = IDS_CLASSLOAD_MI_OVERRIDEIMPL;
bmtError->dMethodDefInError = pMD->GetMemberDef();
bmtError->szMethodNameForError = NULL;
IfFailRet(COR_E_TYPELOAD);
}
DWORD dwAttr = pReplaceDesc->GetAttrs();
if(IsMdFinal(dwAttr))
{
//_ASSERTE(!"MethodImpl Decl may have been overridden by a final method");
bmtError->resIDWhy = IDS_CLASSLOAD_MI_FINAL_DECL;
bmtError->dMethodDefInError = pReplaceDesc->GetMemberDef();
bmtError->szMethodNameForError = NULL;
IfFailRet(COR_E_TYPELOAD);
}
// If the body has not been placed then place it here
if(pImplBody->GetSlot() == (USHORT) -1)
pImplBody->SetSlot(dwSlot);
slots[*pSlotIndex] = dwSlot;
replaced[*pSlotIndex] = pReplaceDesc;
bmtVT->pVtable[dwSlot] = (SLOT) pImplBody->GetPreStubAddr();
// increment the counter
(*pSlotIndex)++;
// we search for all duplicates
for(USHORT i = dwSlot+1; i < bmtVT->dwCurrentVtableSlot; i++)
{
pMD = GetUnknownMethodDescForSlotAddress(bmtVT->pVtable[i]);
MethodDesc* pRealDesc;
hr = GetRealMethodImpl(pMD, i, &pRealDesc);
if(pRealDesc == pReplaceDesc)
{
// We do not want to override a body to another method impl
if(pRealDesc->IsMethodImpl())
{
bmtError->resIDWhy = IDS_CLASSLOAD_MI_OVERRIDEIMPL;
bmtError->dMethodDefInError = pMD->GetMemberDef();
bmtError->szMethodNameForError = NULL;
IfFailRet(COR_E_TYPELOAD);
}
// Make sure we are not overridding another method impl
if(pMD != pImplBody && pMD->IsMethodImpl() && pMD->GetMethodTable() == NULL)
{
bmtError->resIDWhy = IDS_CLASSLOAD_MI_MULTIPLEOVERRIDES;
bmtError->dMethodDefInError = pMD->GetMemberDef();
bmtError->szMethodNameForError = NULL;
IfFailRet(COR_E_TYPELOAD);
}
slots[*pSlotIndex] = i;
replaced[*pSlotIndex] = pRealDesc;
bmtVT->pVtable[i] = (SLOT) pImplBody->GetPreStubAddr();
// increment the counter
(*pSlotIndex)++;
}
// Clean up possible S_FALSE from GetRealMethodImpl
hr = S_OK;
}
return hr;
}
HRESULT EEClass::GetRealMethodImpl(MethodDesc* pMD,
DWORD dwVtableSlot,
MethodDesc** ppResult)
{
_ASSERTE(ppResult);
if(pMD->IsMethodImpl()) {
// If we are overriding ourselves then something is
// really messed up.
MethodImpl* data = MethodImpl::GetMethodImplData(pMD);
_ASSERTE(data && "This method should be a method impl");
// Get the real method desc that was already overridden
*ppResult = data->FindMethodDesc(dwVtableSlot, pMD);
return S_FALSE;
}
else {
*ppResult = pMD;
return S_OK;
}
}
//
// Used by BuildMethodTable
//
// If we're a value class, we want to create duplicate slots and MethodDescs for all methods in the vtable
// section (i.e. not privates or statics).
//
HRESULT EEClass::DuplicateValueClassSlots(bmtMetaDataInfo* bmtMetaData, bmtMethAndFieldDescs* bmtMFDescs, bmtInternalInfo* bmtInternal, bmtVtable* bmtVT)
{
HRESULT hr = S_OK;
DWORD i;
// If we're a value class, we want to create duplicate slots and MethodDescs for all methods in the vtable
// section (i.e. not privates or statics).
// TODO: we duplicate every instance method. The Vtable is really not used (except for inherited and
// interface part), so we could shrink the table substantially.
if (IsValueClass())
{
for (i = 0; i < bmtMetaData->cMethods; i++)
{
MethodDesc *pMD;
MethodDesc *pNewMD;
DWORD dwAttrs;
DWORD Classification;
pMD = bmtMFDescs->ppMethodDescList[i];
if (pMD == NULL)
continue;
dwAttrs = bmtMetaData->pMethodAttrs[i];
Classification = bmtMetaData->pMethodClassifications[i];
DWORD type = bmtMetaData->pMethodType[i];
DWORD impl = bmtMetaData->pMethodImpl[i];
if (IsMdStatic(dwAttrs) ||
!IsMdVirtual(dwAttrs) ||
IsMdRTSpecialName(dwAttrs))
continue;
bmtTokenRangeNode *pTR = GetTokenRange(bmtMetaData->pMethods[i],
&(bmtMetaData->ranges[type][impl]));
_ASSERTE(pTR->cMethods != 0);;
bmtMethodDescSet *set = &bmtMFDescs->sets[type][impl];
pNewMD = set->pChunkList[pTR->dwCurrentChunk]->GetMethodDescAt(pTR->dwCurrentIndex);
memcpy(pNewMD, pMD,
set->pChunkList[pTR->dwCurrentChunk]->GetMethodDescSize()
- METHOD_PREPAD);
pNewMD->SetChunkIndex(pTR->dwCurrentIndex, Classification);
pNewMD->SetMemberDef(pMD->GetMemberDef());
// Update counters to prepare for next method desc allocation.
pTR->dwCurrentIndex++;
if (pTR->dwCurrentIndex == MethodDescChunk::GetMaxMethodDescs(Classification))
{
pTR->dwCurrentChunk++;
pTR->dwCurrentIndex = 0;
}
bmtMFDescs->ppUnboxMethodDescList[i] = pNewMD;
pNewMD->m_wSlotNumber = (WORD) bmtVT->dwCurrentNonVtableSlot;
emitStubCall(pNewMD, (BYTE*)(ThePreStub()->GetEntryPoint()));
// Indicate that this method takes a BOXed this pointer.
pMD->SetRVA(METHOD_MAX_RVA);
bmtVT->pNonVtable[ bmtVT->dwCurrentNonVtableSlot ] = (SLOT) pNewMD; // not pre-stub addr, refer to statics above
bmtVT->dwCurrentNonVtableSlot++;
}
}
return hr;
}
//
// Used by BuildMethodTable
//
//
// If we are a class, then there may be some unplaced vtable methods (which are by definition
// interface methods, otherwise they'd already have been placed). Place as many unplaced methods
// as possible, in the order preferred by interfaces. However, do not allow any duplicates - once
// a method has been placed, it cannot be placed again - if we are unable to neatly place an interface,
// create duplicate slots for it starting at dwCurrentDuplicateVtableSlot. Fill out the interface
// map for all interfaces as they are placed.
//
// If we are an interface, then all methods are already placed. Fill out the interface map for
// interfaces as they are placed.
//
HRESULT EEClass::PlaceVtableMethods(bmtInterfaceInfo* bmtInterface,
bmtVtable* bmtVT,
bmtMetaDataInfo* bmtMetaData,
bmtInternalInfo* bmtInternal,
bmtErrorInfo* bmtError,
bmtProperties* bmtProp,
bmtMethAndFieldDescs* bmtMFDescs)
{
HRESULT hr = S_OK;
DWORD i;
BOOL fParentInterface;
for (bmtInterface->dwCurInterface = 0;
bmtInterface->dwCurInterface < m_wNumInterfaces;
bmtInterface->dwCurInterface++)
{
MethodTable* pMT;
EEClass * pInterface;
DWORD dwCurInterfaceMethod;
fParentInterface = FALSE;
// The interface we are attempting to place
pMT = bmtInterface->pInterfaceMap[bmtInterface->dwCurInterface].m_pMethodTable;
pInterface = pMT->GetClass();
if((bmtInterface->pInterfaceMap[bmtInterface->dwCurInterface].m_wFlags &
InterfaceInfo_t::interface_declared_on_class) &&
!pInterface->IsExternallyVisible() &&
pInterface->GetAssembly() != bmtInternal->pModule->GetAssembly())
{
if (!Security::CanSkipVerification(GetAssembly())) {
bmtError->resIDWhy = IDS_CLASSLOAD_GENERIC;
IfFailRet(COR_E_TYPELOAD);
}
}
// Did we place this interface already due to the parent class's interface placement?
if (bmtInterface->pInterfaceMap[bmtInterface->dwCurInterface].m_wStartSlot != (WORD) -1) {
// If we have declared it then we re-lay it out
if(bmtInterface->pInterfaceMap[bmtInterface->dwCurInterface].m_wFlags &
InterfaceInfo_t::interface_declared_on_class)
{
fParentInterface = TRUE;
// If the interface has a folded method from a base class we need to unfold the
// interface
WORD wSlot = bmtInterface->pInterfaceMap[bmtInterface->dwCurInterface].m_wStartSlot;
for(WORD j = 0; j < pInterface->GetNumVtableSlots(); j++) {
MethodDesc* pMD = GetUnknownMethodDescForSlotAddress(bmtVT->pVtable[j+wSlot]);
if(pMD->GetSlot() == j+wSlot) {
bmtInterface->pInterfaceMap[bmtInterface->dwCurInterface].m_wStartSlot = (WORD) -1;
fParentInterface = FALSE;
break;
}
}
}
else
continue;
}
if (pInterface->GetNumVtableSlots() == 0)
{
// no calls can be made to this interface anyway
// so initialize the slot number to 0
bmtInterface->pInterfaceMap[bmtInterface->dwCurInterface].m_wStartSlot = (WORD) 0;
continue;
}
// If this interface has not been given a starting position do that now.
if(!fParentInterface)
bmtInterface->pInterfaceMap[bmtInterface->dwCurInterface].m_wStartSlot = (WORD) bmtVT->dwCurrentVtableSlot;
// For each method declared in this interface
for (dwCurInterfaceMethod = 0; dwCurInterfaceMethod < pInterface->GetNumVtableSlots(); dwCurInterfaceMethod++)
{
DWORD dwMemberAttrs;
// See if we have info gathered while placing members
if (bmtInterface->pppInterfaceImplementingMD[bmtInterface->dwCurInterface] && bmtInterface->pppInterfaceImplementingMD[bmtInterface->dwCurInterface][dwCurInterfaceMethod] != NULL)
{
bmtInterface->ppInterfaceMethodDescList[dwCurInterfaceMethod] = bmtInterface->pppInterfaceImplementingMD[bmtInterface->dwCurInterface][dwCurInterfaceMethod];
continue;
}
MethodDesc *pInterfaceMD = pMT->GetClass()->GetMethodDescForSlot(dwCurInterfaceMethod);
_ASSERTE(pInterfaceMD != NULL);
LPCUTF8 pszInterfaceMethodName = pInterfaceMD->GetNameOnNonArrayClass();
PCCOR_SIGNATURE pInterfaceMethodSig;
DWORD cInterfaceMethodSig;
pInterfaceMD->GetSig(&pInterfaceMethodSig, &cInterfaceMethodSig);
// Try to find the method explicitly declared in our class
for (i = 0; i < bmtMetaData->cMethods; i++)
{
// look for interface method candidates only
dwMemberAttrs = bmtMetaData->pMethodAttrs[i];
if (IsMdVirtual(dwMemberAttrs) && IsMdPublic(dwMemberAttrs))
{
LPCUTF8 pszMemberName;
pszMemberName = bmtMetaData->pstrMethodName[i];
#ifdef _DEBUG
if(m_fDebuggingClass && g_pConfig->ShouldBreakOnMethod(pszMemberName))
_ASSERTE(!"BreakOnMethodName");
#endif
if (pszMemberName == NULL)
{
IfFailRet(COR_E_TYPELOAD);
}
if (strcmp(pszMemberName,pszInterfaceMethodName) == 0)
{
PCCOR_SIGNATURE pMemberSignature;
DWORD cMemberSignature;
_ASSERTE(TypeFromToken(bmtMetaData->pMethods[i]) == mdtMethodDef);
pMemberSignature = bmtInternal->pInternalImport->GetSigOfMethodDef(
bmtMetaData->pMethods[i],
&cMemberSignature
);
if (MetaSig::CompareMethodSigs(
pMemberSignature,
cMemberSignature,
bmtInternal->pModule,
pInterfaceMethodSig,
cInterfaceMethodSig,
pInterfaceMD->GetModule()))
{
break;
}
}
}
} // end ... try to find method
_ASSERTE(dwCurInterfaceMethod < bmtInterface->dwLargestInterfaceSize);
DWORD dwHashName = HashStringA(pszInterfaceMethodName);
if (i >= bmtMetaData->cMethods)
{
// if this interface has been layed out by our parent then
// we do not need to define a new method desc for it
if(fParentInterface)
{
bmtInterface->ppInterfaceMethodDescList[dwCurInterfaceMethod] = NULL;
}
else
{
// We will use the interface implemenation if we do not find one in the
// parent. It will have to be overriden by the a method impl unless the
// class is abstract or it is a special COM type class.
MethodDesc* pParentMD = NULL;
if(GetParentClass())
{
// Check the parent class
if (CouldMethodExistInClass(GetParentClass(), pszInterfaceMethodName, dwHashName)) {
#ifdef _DEBUG
if(m_fDebuggingClass && g_pConfig->ShouldBreakOnMethod(pszInterfaceMethodName))
_ASSERTE(!"BreakOnMethodName");
#endif
pParentMD =
GetParentClass()->FindMethod(pszInterfaceMethodName,
pInterfaceMethodSig,
cInterfaceMethodSig,
pInterfaceMD->GetModule(),
mdPublic | mdVirtual);
}
}
// make sure we do a better back patching for these methods
if(pParentMD) {
//_ASSERTE(IsMdVirtual(pParentMD->GetAttrs()));
bmtInterface->ppInterfaceMethodDescList[dwCurInterfaceMethod] = pParentMD;
}
else {
bmtInterface->ppInterfaceMethodDescList[dwCurInterfaceMethod] = pInterfaceMD;
// Hash that a method with this name exists in this class
// Note that ctors and static ctors are not added to the table
DWORD dwHashName = HashStringA(pInterfaceMD->GetNameOnNonArrayClass());
DWORD dwMethodHashBit = dwHashName % METHOD_HASH_BITS;
m_MethodHash[dwMethodHashBit >> 3] |= (1 << (dwMethodHashBit & 7));
}
}
}
else
{
// Found as declared method in class. If the interface was layed out by the parent we
// will be overridding their slot so our method counts do not increase. We will fold
// our method into our parent's interface if we have not been placed.
if(fParentInterface)
{
WORD dwSlot = (WORD) (bmtInterface->pInterfaceMap[bmtInterface->dwCurInterface].m_wStartSlot + dwCurInterfaceMethod);
_ASSERTE(bmtVT->dwCurrentVtableSlot > dwSlot);
MethodDesc *pMD = bmtMFDescs->ppMethodDescList[i];
_ASSERTE(pMD && "Missing MethodDesc for declared method in class.");
if(pMD->m_wSlotNumber == (WORD) -1)
{
pMD->m_wSlotNumber = dwSlot;
}
else
{
pMD->SetDuplicate();
#ifdef _DEBUG
g_dupMethods++;
#endif
}
bmtVT->pVtable[dwSlot] = (SLOT) pMD->GetPreStubAddr();
_ASSERTE( bmtVT->pVtable[dwSlot] != NULL);
bmtInterface->ppInterfaceMethodDescList[dwCurInterfaceMethod] = NULL;
}
else {
bmtInterface->ppInterfaceMethodDescList[dwCurInterfaceMethod] = (MethodDesc*)(bmtMFDescs->ppMethodDescList[i]);
}
}
}
for (i = 0; i < pInterface->GetNumVtableSlots(); i++)
{
// The entry can be null if the interface was previously
// laid out by a parent and we did not have a method
// that subclassed the interface.
if(bmtInterface->ppInterfaceMethodDescList[i] != NULL)
{
// Get the MethodDesc which was allocated for the method
MethodDesc *pMD;
pMD = bmtInterface->ppInterfaceMethodDescList[i];
if (pMD->m_wSlotNumber == (WORD) -1)
{
pMD->m_wSlotNumber = (WORD) bmtVT->dwCurrentVtableSlot;
}
else
{
// duplicate method, mark the method as so
pMD->SetDuplicate();
#ifdef _DEBUG
g_dupMethods++;
#endif
}
_ASSERTE( bmtVT->pVtable[ bmtVT->dwCurrentVtableSlot ] == NULL);
bmtVT->pVtable[bmtVT->dwCurrentVtableSlot++] = (SLOT) pMD->GetPreStubAddr();
_ASSERTE( bmtVT->pVtable[(bmtVT->dwCurrentVtableSlot - 1)] != NULL);
IncrementNumVtableSlots();
}
}
}
return hr;
}
//
// Used by BuildMethodTable
//
// Place static fields
//
HRESULT EEClass::PlaceStaticFields(bmtVtable* bmtVT, bmtFieldPlacement* bmtFP, bmtEnumMethAndFields* bmtEnumMF)
{
HRESULT hr = S_OK;
DWORD i;
//===============================================================
// BEGIN: Place static fields
//===============================================================
BOOL shared = IsShared();
DWORD dwCumulativeStaticFieldPos;
// If stored in the method table, static fields start after the end of the vtable
if (shared)
dwCumulativeStaticFieldPos = 0;
else
dwCumulativeStaticFieldPos = bmtVT->dwCurrentNonVtableSlot*sizeof(SLOT);
//
// Place gc refs and value types first, as they need to have handles created for them.
// (Placing them together allows us to easily create the handles when Restoring the class,
// and when initializing new DLS for the class.)
//
DWORD dwCumulativeStaticGCFieldPos;
dwCumulativeStaticGCFieldPos = dwCumulativeStaticFieldPos;
dwCumulativeStaticFieldPos += bmtFP->NumStaticGCPointerFields << LOG2PTR;
bmtFP->NumStaticFieldsOfSize[LOG2PTR] -= bmtFP->NumStaticGCPointerFields;
// @todo: pad to alignment, if we other fields which are > ptr size.
// Place fields, largest first
for (i = MAX_LOG2_PRIMITIVE_FIELD_SIZE; (signed long) i >= 0; i--)
{
DWORD dwFieldSize = (1 << i);
// Fields of this size start at the next available location
bmtFP->StaticFieldStart[i] = dwCumulativeStaticFieldPos;
dwCumulativeStaticFieldPos += (bmtFP->NumStaticFieldsOfSize[i] << i);
// Reset counters for the loop after this one
bmtFP->NumStaticFieldsOfSize[i] = 0;
}
if (dwCumulativeStaticFieldPos > FIELD_OFFSET_LAST_REAL_OFFSET)
IfFailRet(COR_E_TYPELOAD);
m_wNumHandleStatics = 0;
// Place static fields
for (i = 0; i < bmtEnumMF->dwNumStaticFields; i++)
{
DWORD dwIndex = bmtEnumMF->dwNumInstanceFields+i; // index in the FieldDesc list
DWORD dwFieldSize = (DWORD)(size_t)m_pFieldDescList[dwIndex].m_pMTOfEnclosingClass; // log2(field size)
DWORD dwOffset = (DWORD) m_pFieldDescList[dwIndex].m_dwOffset; // offset or type of field
switch (dwOffset)
{
case FIELD_OFFSET_UNPLACED_GC_PTR:
case FIELD_OFFSET_VALUE_CLASS:
m_pFieldDescList[dwIndex].SetOffset(dwCumulativeStaticGCFieldPos);
dwCumulativeStaticGCFieldPos += 1<<LOG2PTR;
m_wNumHandleStatics++;
break;
case FIELD_OFFSET_UNPLACED:
m_pFieldDescList[dwIndex].SetOffset(bmtFP->StaticFieldStart[dwFieldSize] + (bmtFP->NumStaticFieldsOfSize[dwFieldSize] << dwFieldSize));
bmtFP->NumStaticFieldsOfSize[dwFieldSize]++;
default:
// RVA field
break;
}
}
if (shared)
{
bmtVT->dwStaticFieldBytes = dwCumulativeStaticFieldPos;
bmtVT->dwStaticGCFieldBytes = dwCumulativeStaticGCFieldPos;
}
else
{
bmtVT->dwStaticFieldBytes = dwCumulativeStaticFieldPos - bmtVT->dwCurrentNonVtableSlot*sizeof(SLOT);
bmtVT->dwStaticGCFieldBytes = dwCumulativeStaticGCFieldPos - bmtVT->dwCurrentNonVtableSlot*sizeof(SLOT);
}
//===============================================================
// END: Place static fields
//===============================================================
return hr;
}
//
// Used by BuildMethodTable
//
// Place instance fields
//
HRESULT EEClass::PlaceInstanceFields(bmtFieldPlacement* bmtFP, bmtEnumMethAndFields* bmtEnumMF,
bmtParentInfo* bmtParent, bmtErrorInfo *bmtError,
EEClass*** pByValueClassCache)
{
HRESULT hr = S_OK;
DWORD i;
//===============================================================
// BEGIN: Place instance fields
//===============================================================
DWORD dwCumulativeInstanceFieldPos;
// Instance fields start right after the parent
dwCumulativeInstanceFieldPos = (GetParentClass() != NULL) ? GetParentClass()->m_dwNumInstanceFieldBytes : 0;
// The parent may have a number of field bytes that is not DWORD aligned, so use 2 byte and 1 byte
// fields to pad out to the next DWORD if we can.
// @TODO: When GC pads on QWORD boundaries, look at doing the same
if (dwCumulativeInstanceFieldPos & 1)
{
if (bmtFP->NumInstanceFieldsOfSize[0] > 0)
{
// Place the first field of size 1
m_pFieldDescList[ bmtFP->FirstInstanceFieldOfSize[0] ].SetOffset(dwCumulativeInstanceFieldPos);
dwCumulativeInstanceFieldPos++;
// We've placed this field now, so there is now one less of this size field to place
// Update FirstInstanceFieldOfSize[0] to point to the next such field
if (--bmtFP->NumInstanceFieldsOfSize[0] > 0)
{
// Search for next field of size 1
for (i = bmtFP->FirstInstanceFieldOfSize[0]+1; i < bmtEnumMF->dwNumInstanceFields; i++)
{
// The log of the field size is stored in the method table
if (m_pFieldDescList[i].m_pMTOfEnclosingClass == (MethodTable *) 0)
{
bmtFP->FirstInstanceFieldOfSize[0] = i;
break;
}
}
_ASSERTE(i < bmtEnumMF->dwNumInstanceFields);
}
}
}
// If we're word aligned, but not dword aligned
if ((dwCumulativeInstanceFieldPos & 3) == 2)
{
// Try to place a WORD field
// @TODO: Better to place a USHORT field or two BYTE fields?
if (bmtFP->NumInstanceFieldsOfSize[1] > 0)
{
// Place the first field of size 2
m_pFieldDescList[ bmtFP->FirstInstanceFieldOfSize[1] ].SetOffset(dwCumulativeInstanceFieldPos);
dwCumulativeInstanceFieldPos += 2;
// We've placed this field now, so there is now one less of this size field to place
// Don't bother to update FirstInstanceOfFieldSize[1], since we don't use that array any
// more - we're done aligning
bmtFP->NumInstanceFieldsOfSize[1]--;
}
else if (bmtFP->NumInstanceFieldsOfSize[0] >= 2)
{
// Place two fields of size 1
m_pFieldDescList[ bmtFP->FirstInstanceFieldOfSize[0] ].SetOffset(dwCumulativeInstanceFieldPos);
dwCumulativeInstanceFieldPos++;
// We've placed this field now, so there is now one less of this size field to place
bmtFP->NumInstanceFieldsOfSize[0]--;
// Find next field of this size
// Don't bother to update FirstInstanceOfFieldSize[0], since we don't use that array any
// more - we're done aligning
for (i = bmtFP->FirstInstanceFieldOfSize[0]+1; i < bmtEnumMF->dwNumInstanceFields; i++)
{
// The log of the field size is stored in the method table
// Since we're continuing a progressive search through the list, we know we won't
// be placing an already-placed field
if (m_pFieldDescList[i].m_pMTOfEnclosingClass == (MethodTable *) 0)
{
// Place field #2
m_pFieldDescList[ i ].SetOffset(dwCumulativeInstanceFieldPos);
dwCumulativeInstanceFieldPos++;
bmtFP->NumInstanceFieldsOfSize[0]--;
break;
}
}
_ASSERTE(i < bmtEnumMF->dwNumInstanceFields);
}
}
// Align instance fields on a DWORD boundary if we aren't already
// Static fields are auto-aligned, since they appear after the vtable
if (dwCumulativeInstanceFieldPos & 3)
dwCumulativeInstanceFieldPos = (dwCumulativeInstanceFieldPos+3) & (~3);
// Place fields, largest first
for (i = MAX_LOG2_PRIMITIVE_FIELD_SIZE; (signed long) i >= 0; i--)
{
DWORD dwFieldSize = (1 << i);
// Fields of this size start at the next available location
bmtFP->InstanceFieldStart[i] = dwCumulativeInstanceFieldPos;
dwCumulativeInstanceFieldPos += (bmtFP->NumInstanceFieldsOfSize[i] << i);
// Reset counters for the loop after this one
bmtFP->NumInstanceFieldsOfSize[i] = 0;
}
// Make corrections to reserve space for GC Pointer Fields
//
// The GC Pointers simply take up the top part of the region associated
// with fields of that size (GC pointers can be 64 bit on certain systems)
if (bmtFP->NumInstanceGCPointerFields)
{
bmtFP->GCPointerFieldStart = bmtFP->InstanceFieldStart[LOG2SLOT];
bmtFP->InstanceFieldStart[LOG2SLOT] = bmtFP->InstanceFieldStart[LOG2SLOT] + (bmtFP->NumInstanceGCPointerFields << LOG2SLOT);
bmtFP->NumInstanceGCPointerFields = 0; // reset to zero here, counts up as pointer slots are assigned below
}
// Place instance fields - be careful not to place any already-placed fields
for (i = 0; i < bmtEnumMF->dwNumInstanceFields; i++)
{
DWORD dwFieldSize = (DWORD)(size_t)m_pFieldDescList[i].m_pMTOfEnclosingClass;
DWORD dwOffset;
dwOffset = m_pFieldDescList[i].GetOffset();
// Don't place already-placed fields
if ((dwOffset == FIELD_OFFSET_UNPLACED || dwOffset == FIELD_OFFSET_UNPLACED_GC_PTR || dwOffset == FIELD_OFFSET_VALUE_CLASS))
{
if (dwOffset == FIELD_OFFSET_UNPLACED_GC_PTR)
{
m_pFieldDescList[i].SetOffset(bmtFP->GCPointerFieldStart + (bmtFP->NumInstanceGCPointerFields << LOG2SLOT));
bmtFP->NumInstanceGCPointerFields++;
}
else if (m_pFieldDescList[i].IsByValue() == FALSE) // it's a regular field
{
m_pFieldDescList[i].SetOffset(bmtFP->InstanceFieldStart[dwFieldSize] + (bmtFP->NumInstanceFieldsOfSize[dwFieldSize] << dwFieldSize));
bmtFP->NumInstanceFieldsOfSize[dwFieldSize]++;
}
}
}
// Save Number of pointer series
if (bmtFP->NumInstanceGCPointerFields)
m_wNumGCPointerSeries = bmtParent->NumParentPointerSeries + 1;
else
m_wNumGCPointerSeries = bmtParent->NumParentPointerSeries;
// Place by value class fields last
// Update the number of GC pointer series
for (i = 0; i < bmtEnumMF->dwNumInstanceFields; i++)
{
if (m_pFieldDescList[i].IsByValue())
{
_ASSERTE(*pByValueClassCache != NULL);
EEClass *pByValueClass = (*pByValueClassCache)[i];
// value classes could have GC pointers in them, which need to be DWORD aligned
// so do this if it has not been done already
if (dwCumulativeInstanceFieldPos & 3)
dwCumulativeInstanceFieldPos = (dwCumulativeInstanceFieldPos+3) & (~3);
m_pFieldDescList[i].SetOffset(dwCumulativeInstanceFieldPos);
dwCumulativeInstanceFieldPos += pByValueClass->GetAlignedNumInstanceFieldBytes();
// Add pointer series for by-value classes
m_wNumGCPointerSeries += pByValueClass->m_wNumGCPointerSeries;
}
}
// Can be unaligned
m_dwNumInstanceFieldBytes = dwCumulativeInstanceFieldPos;
if (IsValueClass())
{
// The JITs like to copy full machine words, so if the size bigger
// than a void* round it up
if(m_dwNumInstanceFieldBytes > sizeof(void*) / 2)
m_dwNumInstanceFieldBytes = (m_dwNumInstanceFieldBytes + sizeof(void*)-1) & ~(sizeof(void*)-1);
// Like C++ we enforce that there can be no 0 length structures.
// Thus for a value class with no fields, we 'pad' the length to be 1
else if (m_dwNumInstanceFieldBytes == 0)
m_dwNumInstanceFieldBytes++;
}
if (m_dwNumInstanceFieldBytes > FIELD_OFFSET_LAST_REAL_OFFSET) {
bmtError->resIDWhy = IDS_CLASSLOAD_FIELDTOOLARGE;
IfFailRet(COR_E_TYPELOAD);
}
//===============================================================
// END: Place instance fields
//===============================================================
return hr;
}
// this accesses the field size which is temporarily stored in m_pMTOfEnclosingClass
// during class loading. Don't use any other time
DWORD EEClass::GetFieldSize(FieldDesc *pFD)
{
// We should only be calling this while this class is being built.
_ASSERTE(m_pMethodTable == 0);
_ASSERTE(! pFD->IsByValue() || HasExplicitFieldOffsetLayout());
if (pFD->IsByValue())
return (DWORD)(size_t)(pFD->m_pMTOfEnclosingClass);
return (1 << (DWORD)(size_t)(pFD->m_pMTOfEnclosingClass));
}
// make sure that no object fields are overlapped incorrectly and define the
// GC pointer series for the class. We are assuming that this class will always be laid out within
// its enclosing class by the compiler in such a way that offset 0 will be the correct alignment
// for object ref fields so we don't need to try to align it
HRESULT EEClass::HandleExplicitLayout(bmtMetaDataInfo *bmtMetaData, bmtMethAndFieldDescs *bmtMFDescs, EEClass **pByValueClassCache, bmtInternalInfo* bmtInternal, bmtGCSeries *pGCSeries, bmtErrorInfo *bmtError)
{
// need to calculate instance size as can't use nativeSize or anything else that
// has been previously calculated.
UINT instanceSliceSize = 0;
BOOL fVerifiable = TRUE;
BOOL fOverLayed = FALSE;
HRESULT hr = S_OK;
for (UINT i=0; i < bmtMetaData->cFields; i++) {
FieldDesc *pFD = bmtMFDescs->ppFieldDescList[i];
if (!pFD)
continue;
if (pFD->IsStatic())
continue;
UINT fieldExtent = pFD->GetOffset() + GetFieldSize(pFD);
if (fieldExtent > instanceSliceSize)
instanceSliceSize = fieldExtent;
}
char *pFieldLayout = (char*)alloca(instanceSliceSize);
for (i=0; i < instanceSliceSize; i++)
pFieldLayout[i] = empty;
// go through each field and look for invalid layout
// verify that every OREF is on a valid alignment
// verify that only OREFs overlap
char emptyObject[4] = {empty, empty, empty, empty};
char isObject[4] = {oref, oref, oref, oref};
UINT badOffset = 0;
int firstOverlay = -1;
FieldDesc *pFD = NULL;
for (i=0; i < bmtMetaData->cFields; i++) {
pFD = bmtMFDescs->ppFieldDescList[i];
if (!pFD)
continue;
if (pFD->IsStatic())
continue;
if (CorTypeInfo::IsObjRef(pFD->GetFieldType())) {
if (pFD->GetOffset() & ((ULONG)sizeof(OBJECTREF) - 1)) {
badOffset = pFD->GetOffset();
break;
}
// check if overlaps another object
if (memcmp((void *)&pFieldLayout[pFD->GetOffset()], (void *)&isObject, sizeof(isObject)) == 0) {
fVerifiable = FALSE;
fOverLayed = TRUE;
if(firstOverlay == -1) firstOverlay = pFD->GetOffset();
continue;
}
// check if is empty at this point
if (memcmp((void *)&pFieldLayout[pFD->GetOffset()], (void *)&emptyObject, sizeof(emptyObject)) == 0) {
memset((void *)&pFieldLayout[pFD->GetOffset()], oref, sizeof(isObject));
continue;
}
badOffset = pFD->GetOffset();
break;
// anything else is an error
} else {
UINT fieldSize;
if (pFD->IsByValue()) {
EEClass *pByValue = pByValueClassCache[i];
if (pByValue->GetMethodTable()->ContainsPointers()) {
if ((pFD->GetOffset() & ((ULONG)sizeof(void*) - 1)) == 0)
{
hr = pByValue->CheckValueClassLayout(&pFieldLayout[pFD->GetOffset()], pFD->GetOffset(), &fVerifiable);
if(SUCCEEDED(hr)) {
if(hr == S_FALSE)
fOverLayed = TRUE;
// see if this overlays other
continue;
}
}
// anything else is an error
badOffset = pFD->GetOffset();
break;
}
// no pointers so fall through to do standard checking
fieldSize = pByValue->m_dwNumInstanceFieldBytes;
} else {
// field size temporarily stored in pMT field
fieldSize = GetFieldSize(pFD);
}
// look for any orefs under this field
char *loc;
if ((loc = (char*)memchr((void*)&pFieldLayout[pFD->GetOffset()], oref, fieldSize)) == NULL) {
// If we have a nonoref in the range then we are doing an overlay
if( memchr((void*)&pFieldLayout[pFD->GetOffset()], nonoref, fieldSize))
fOverLayed = TRUE;
memset((void*)&pFieldLayout[pFD->GetOffset()], nonoref, fieldSize);
continue;
}
badOffset = (UINT)(loc - pFieldLayout);
break;
// anything else is an error
}
}
if (i < bmtMetaData->cFields) {
IfFailRet(PostFieldLayoutError(GetCl(),
bmtInternal->pModule,
badOffset,
IDS_CLASSLOAD_EXPLICIT_LAYOUT,
bmtError->pThrowable));
}
if(!fVerifiable) {
BEGIN_ENSURE_COOPERATIVE_GC();
if (!Security::CanSkipVerification(GetAssembly())) {
hr = PostFieldLayoutError(GetCl(),
bmtInternal->pModule,
(DWORD) firstOverlay,
IDS_CLASSLOAD_UNVERIFIABLE_FIELD_LAYOUT,
bmtError->pThrowable);
}
END_ENSURE_COOPERATIVE_GC();
IfFailRet(hr);
}
if(fOverLayed)
SetHasOverLayedFields();
hr = FindPointerSeriesExplicit(instanceSliceSize, pFieldLayout, pGCSeries);
// Fixup the offset to include parent as current offsets are relative to instance slice
// Could do this earlier, but it's just easier to assume instance relative for most
// of the earlier calculations
// Instance fields start right after the parent
UINT dwInstanceSliceOffset = InstanceSliceOffsetForExplicit(pGCSeries->numSeries != 0);
// Set the total size
m_dwNumInstanceFieldBytes = GetLayoutInfo()->m_cbNativeSize;
if (m_dwNumInstanceFieldBytes < (dwInstanceSliceOffset + instanceSliceSize))
IfFailRet(COR_E_TYPELOAD);
for (i=0; i < bmtMetaData->cFields; i++) {
FieldDesc *pFD = bmtMFDescs->ppFieldDescList[i];
if (!pFD)
continue;
if (pFD->IsStatic())
continue;
IfFailRet(pFD->SetOffset(pFD->GetOffset() + dwInstanceSliceOffset));
}
return hr;
}
// make sure that no object fields are overlapped incorrectly, returns S_FALSE if there overlap
// but nothing illegal, S_OK if there is no overlap
HRESULT EEClass::CheckValueClassLayout(char *pFieldLayout, UINT fieldOffset, BOOL* pfVerifiable)
{
HRESULT hr = S_OK;
// Build a layout of the value class. Don't know the sizes of all the fields easily, but
// do know a) vc is already consistent so don't need to check it's overlaps and
// b) size and location of all objectrefs. So build it by setting all non-oref
// then fill in the orefs later
UINT fieldSize = GetNumInstanceFieldBytes();
char *vcLayout = (char*)alloca(fieldSize);
memset((void*)vcLayout, nonoref, fieldSize);
// use pointer series to locate the orefs
_ASSERTE(m_wNumGCPointerSeries > 0);
CGCDescSeries *pSeries = ((CGCDesc*) GetMethodTable())->GetLowestSeries();
for (UINT j = 0; j < m_wNumGCPointerSeries; j++)
{
_ASSERTE(pSeries <= CGCDesc::GetCGCDescFromMT(GetMethodTable())->GetHighestSeries());
memset((void*)&vcLayout[pSeries->GetSeriesOffset()-sizeof(Object)], oref, pSeries->GetSeriesSize() + GetMethodTable()->GetBaseSize());
pSeries++;
}
// if there are orefs in the current layout, we have to go the slow way and
// compare each element. If is ok, then can just copy the vc layout onto it
char *loc;
if ((loc = (char*)memchr((void*)pFieldLayout, oref, fieldSize)) != NULL) {
for (UINT i=0; i < fieldSize; i++) {
if (vcLayout[i] == oref) {
if (pFieldLayout[i] == nonoref)
return COR_E_TYPELOAD;
else {
if(pFieldLayout[i] == nonoref)
hr = S_FALSE;
*pfVerifiable = FALSE;
}
} else if (vcLayout[i] == nonoref) {
if (pFieldLayout[i] == oref)
return COR_E_TYPELOAD;
else if(pFieldLayout[i] == nonoref) {
// We are overlapping another field
hr = S_FALSE;
}
}
}
}
else {
// Are we overlapping another field
if(memchr((void*)pFieldLayout, nonoref, fieldSize))
hr = S_FALSE;
}
// so either no orefs in the base or all checks out ok
memcpy((void*)pFieldLayout, (void*)vcLayout, fieldSize);
return S_OK;
}
HRESULT EEClass::FindPointerSeriesExplicit(UINT instanceSliceSize, char *pFieldLayout, bmtGCSeries *pGCSeries)
{
THROWSCOMPLUSEXCEPTION();
// allocate a structure to track the series. We know that the worst case is a oref-non-oref-non
// so would the number of series is total instance size div 2 div size of oref.
// But watch out for the case where we have e.g. an instanceSlizeSize of 4.
DWORD sz = (instanceSliceSize + (2 * sizeof(OBJECTREF)) - 1);
pGCSeries->pSeries = new (throws) bmtGCSeries::Series[sz/2/sizeof(OBJECTREF)];
char *loc = pFieldLayout;
char *layoutEnd = pFieldLayout + instanceSliceSize;
while (loc < layoutEnd) {
loc = (char*)memchr((void*)loc, oref, layoutEnd-loc);
if (!loc)
break;
char *cur = loc;
while(*cur == oref)
cur++;
// so we have a GC series at loc for cur-loc bytes
pGCSeries->pSeries[pGCSeries->numSeries].offset = (DWORD)(loc - pFieldLayout);
pGCSeries->pSeries[pGCSeries->numSeries].len = (DWORD)(cur - loc);
pGCSeries->numSeries++;
loc = cur;
}
m_wNumGCPointerSeries = pGCSeries->numSeries + (GetParentClass() ? GetParentClass()->m_wNumGCPointerSeries : 0);
return S_OK;
}
HRESULT EEClass::HandleGCForExplicitLayout(bmtGCSeries *pGCSeries)
{
if (! pGCSeries->numSeries)
{
delete [] pGCSeries->pSeries;
pGCSeries->pSeries = NULL;
return S_OK;
}
m_pMethodTable->SetContainsPointers();
// Copy the pointer series map from the parent
CGCDesc::Init( (PVOID) m_pMethodTable, m_wNumGCPointerSeries );
if (GetParentClass() && (GetParentClass()->m_wNumGCPointerSeries > 0))
{
UINT ParentGCSize = CGCDesc::ComputeSize(GetParentClass()->m_wNumGCPointerSeries);
memcpy( (PVOID) (((BYTE*) m_pMethodTable) - ParentGCSize), (PVOID) (((BYTE*) GetParentClass()->m_pMethodTable) - ParentGCSize), ParentGCSize - sizeof(UINT) );
}
// Build the pointer series map for this pointers in this instance
CGCDescSeries *pSeries = ((CGCDesc*)m_pMethodTable)->GetLowestSeries();
for (UINT i=0; i < pGCSeries->numSeries; i++) {
// See gcdesc.h for an explanation of why we adjust by subtracting BaseSize
_ASSERTE(pSeries <= CGCDesc::GetCGCDescFromMT(m_pMethodTable)->GetHighestSeries());
pSeries->SetSeriesSize( pGCSeries->pSeries[i].len - m_pMethodTable->m_BaseSize );
pSeries->SetSeriesOffset(pGCSeries->pSeries[i].offset + sizeof(Object) + InstanceSliceOffsetForExplicit(TRUE));
pSeries++;
}
delete [] pGCSeries->pSeries;
pGCSeries->pSeries = NULL;
return S_OK;
}
//
// Used by BuildMethodTable
//
// Setup the method table
//
HRESULT EEClass::SetupMethodTable(bmtVtable* bmtVT,
bmtInterfaceInfo* bmtInterface,
bmtInternalInfo* bmtInternal,
bmtProperties* bmtProp,
bmtMethAndFieldDescs* bmtMFDescs,
bmtEnumMethAndFields* bmtEnumMF,
bmtErrorInfo* bmtError,
bmtMetaDataInfo* bmtMetaData,
bmtParentInfo* bmtParent)
{
HRESULT hr = S_OK;
DWORD i;
BOOL fEnC = bmtInternal->pModule->IsEditAndContinue();
BOOL bHasDynamicInterfaceMap = bmtInterface->dwMaxExpandedInterfaces > 0 &&
bmtProp->fIsComObjectType &&
(GetParentClass() != g_pObjectClass->GetClass());
// Now setup the method table
// interface map is allocated along with the method table
m_pMethodTable = MethodTable::AllocateNewMT(
bmtVT->dwCurrentNonVtableSlot,
bmtVT->dwStaticFieldBytes,
m_wNumGCPointerSeries ? CGCDesc::ComputeSize(m_wNumGCPointerSeries) : 0,
bmtInterface->dwInterfaceMapSize,
GetClassLoader(),
IsInterface(),
bHasDynamicInterfaceMap
);
if (m_pMethodTable == NULL)
{
IfFailRet(E_OUTOFMEMORY);
}
m_pMethodTable->m_pEEClass = this;
m_pMethodTable->m_pModule = bmtInternal->pModule;
m_pMethodTable->m_wFlags &= 0xFFFF; // clear flags without touching m_ComponentSize
m_pMethodTable->m_NormType = ELEMENT_TYPE_CLASS;
// @todo: SetupMethodTable is thread-safe, yes? It's gotta be, since we're
// setting m_pMethodTable...
if (fEnC)
{
EditAndContinueModule *pEACM = (EditAndContinueModule*)m_pMethodTable->m_pModule;
SIZE_T newSize = sizeof(SLOT)*ENC_EXTRA_SLOT_COUNT;
// It's very important that we use here the same heap used in MethodTable::new, so
// that the memory ends up soon after the VTable of the MethodTable
WS_PERF_SET_HEAP(HIGH_FREQ_HEAP);
const BYTE *start = (const BYTE *)GetClassLoader()->GetHighFrequencyHeap()->AllocMem(newSize);
WS_PERF_UPDATE_DETAIL("MethodTable:new:HighFreq", newSize, (void *)start);
if (start != NULL)
{
const BYTE *end = start + newSize; //this is 1 beyond the valid memory
// Drop return value on floor - if this fails, we simply won't be able
// to use the extra slots.
LOG((LF_CORDB, LL_INFO100000, "EEC::SMT:Added (0x%x,0x%x) to ranges for later SLOT use!\n",
start, end));
_ASSERTE(pEACM->m_pRangeList);
pEACM->m_pRangeList->AddRange(start,
end,
(void *)start);
}
}
if (IsShared())
m_pMethodTable->SetShared();
if (IsValueClass())
{
m_pMethodTable->m_NormType = ELEMENT_TYPE_VALUETYPE;
LPCUTF8 name, nameSpace;
if (IsEnum())
{
if (GetNumInstanceFields() != 1 ||
!CorTypeInfo::IsPrimitiveType(m_pFieldDescList->GetFieldType()))
{
bmtError->resIDWhy = IDS_CLASSLOAD_BAD_FIELD;
bmtError->dMethodDefInError = mdMethodDefNil;
bmtError->szMethodNameForError = "Enum does not have exactly one instance field of a primitive type";
IfFailRet(COR_E_TYPELOAD);
}
_ASSERTE(!m_pFieldDescList->IsStatic());
m_pMethodTable->m_NormType = m_pFieldDescList->GetFieldType();
}
else if (!IsNested())
{
// Check if it is a primitive type or other special type
if (bmtInternal->pModule->IsSystemClasses()) // we are in mscorlib
{
bmtInternal->pModule->GetMDImport()->GetNameOfTypeDef(GetCl(), &name, &nameSpace);
if (strcmp(nameSpace, "System") == 0) {
m_pMethodTable->m_NormType = CorTypeInfo::FindPrimitiveType(nameSpace, name);
if (m_pMethodTable->m_NormType == ELEMENT_TYPE_END)
{
m_pMethodTable->m_NormType = ELEMENT_TYPE_VALUETYPE;
if ((strcmp(name, g_RuntimeTypeHandleName) == 0) ||
(strcmp(name, g_RuntimeMethodHandleName) == 0) ||
(strcmp(name, g_RuntimeFieldHandleName) == 0) ||
(strcmp(name, g_RuntimeArgumentHandleName) == 0))
{
// TODO: ultimately I want all value classes that look like an I
// to be mapped to ELEMENT_TYPE_I, however for now we just do
// the ones above. -vancem
m_pMethodTable->m_NormType = ELEMENT_TYPE_I;
}
// Mark the special types that have embeded stack poitners in them
if (strcmp(name, "ArgIterator") == 0 || strcmp(name, "RuntimeArgumentHandle") == 0)
m_VMFlags |= VMFLAG_CONTAINS_STACK_PTR;
}
else {
m_VMFlags |= VMFLAG_TRUEPRIMITIVE;
if (m_pMethodTable->m_NormType == ELEMENT_TYPE_TYPEDBYREF)
m_VMFlags |= VMFLAG_CONTAINS_STACK_PTR;
}
}
}
}
}
if (bmtProp->fSparse)
m_pMethodTable->SetSparse();
m_pMethodTable->m_wCCtorSlot = bmtVT->wCCtorSlot;
m_pMethodTable->m_wDefaultCtorSlot = bmtVT->wDefaultCtorSlot;
// Push pointer to method table into the head of each of the method desc
// chunks we allocated earlier, so that method descs can map back to method
// tables.
for (DWORD impl=0; impl<METHOD_IMPL_COUNT; impl++)
for (DWORD type=0; type<METHOD_TYPE_COUNT; type++)
{
bmtMethodDescSet *set = &bmtMFDescs->sets[type][impl];
for (i=0; i<set->dwChunks; i++)
set->pChunkList[i]->SetMethodTable(m_pMethodTable);
}
#ifdef _DEBUG
for (i = 0; i < bmtMetaData->cMethods; i++) {
if (bmtMFDescs->ppMethodDescList[i] != NULL) {
bmtMFDescs->ppMethodDescList[i]->m_pDebugMethodTable = m_pMethodTable;
bmtMFDescs->ppMethodDescList[i]->m_pszDebugMethodSignature = FormatSig(bmtMFDescs->ppMethodDescList[i]);
}
}
if (bmtMFDescs->ppUnboxMethodDescList != NULL) {
for (i = 0; i < bmtMetaData->cMethods; i++) {
if (bmtMFDescs->ppUnboxMethodDescList[i] != NULL) {
bmtMFDescs->ppUnboxMethodDescList[i]->m_pDebugMethodTable = m_pMethodTable;
bmtMFDescs->ppUnboxMethodDescList[i]->m_pszDebugMethodSignature = FormatSig(bmtMFDescs->ppUnboxMethodDescList[i]);
}
}
}
for (i = 0; i < bmtEnumMF->dwNumDeclaredMethods; i++) {
bmtParent->ppParentMethodDescBuf[i*2+1]->m_pDebugMethodTable = m_pMethodTable;
bmtParent->ppParentMethodDescBuf[i*2+1]->m_pszDebugMethodSignature = FormatSig(bmtParent->ppParentMethodDescBuf[i*2+1]);
}
#endif
// Note that for value classes, the following calculation is only appropriate
// when the instance is in its "boxed" state.
if (!IsInterface())
{
m_pMethodTable->m_BaseSize = MAX(m_dwNumInstanceFieldBytes + ObjSizeOf(Object), MIN_OBJECT_SIZE);
m_pMethodTable->m_BaseSize = (m_pMethodTable->m_BaseSize + 3) & ~3; // m_BaseSize must be dword aligned
m_pMethodTable->m_ComponentSize = 0;
if (bmtProp->fIsComObjectType)
{
//propagate the com specific info
m_pMethodTable->SetComObjectType();
}
}
else
{
// If this is an interface then we need to set the ComInterfaceType to
// -1 to indicate we have not yet determined the interface type.
m_pMethodTable->SetComInterfaceType((CorIfaceAttr)-1);
// If this is a special COM event interface, then mark the MT as such.
if (bmtProp->fComEventItfType)
{
m_pMethodTable->SetComEventItfType();
}
}
if (HasLayout())
{
m_pMethodTable->SetNativeSize(GetLayoutInfo()->GetNativeSize());
}
if (m_VMFlags & VMFLAG_ISBLOBCLASS)
{
m_pMethodTable->SetNativeSize(m_dwNumInstanceFieldBytes);
}
// copy onto the real vtable (methods only)
memcpy(GetVtable(), bmtVT->pVtable, bmtVT->dwCurrentNonVtableSlot * sizeof(SLOT));
// TODO change this soon: rajak
BOOL fCheckForMissingMethod = (!bmtProp->fIsComObjectType && !IsAbstract() && !IsInterface());
// Propagate inheritance
for (i = 0; i < bmtVT->dwCurrentVtableSlot; i++)
{
// For now only propagate inheritance for method desc that are not interface MD's.
// This is not sufficient but InterfaceImpl's will complete the picture.
MethodDesc* pMD = GetUnknownMethodDescForSlot(i);
if (pMD == NULL)
{
_ASSERTE(!"Could not resolve MethodDesc Slot!");
IfFailRet(COR_E_TYPELOAD);
}
if(!pMD->IsInterface() && pMD->GetSlot() != i)
{
GetVtable()[i] = GetVtable()[ pMD->GetSlot() ];
pMD = GetUnknownMethodDescForSlot(i);
}
if (fCheckForMissingMethod)
{
if (pMD->IsInterface() || pMD->IsAbstract())
{
bmtError->resIDWhy = IDS_CLASSLOAD_NOTIMPLEMENTED;
bmtError->dMethodDefInError = pMD->GetMemberDef();
bmtError->szMethodNameForError = pMD->GetNameOnNonArrayClass();
IfFailRet(COR_E_TYPELOAD);
}
// we check earlier to make certain only abstract methods have RVA != 0
_ASSERTE(!(pMD->GetModule()->IsPEFile() && pMD->IsIL() && pMD->GetRVA() == 0));
}
}
#ifdef _DEBUG
for (i = 0; i < bmtVT->dwCurrentNonVtableSlot; i++)
{
_ASSERTE(bmtVT->pVtable[i] != NULL);
}
#endif
// Set all field slots to point to the newly created MethodTable
for (i = 0; i < (bmtEnumMF->dwNumStaticFields + bmtEnumMF->dwNumInstanceFields); i++)
{
m_pFieldDescList[i].m_pMTOfEnclosingClass = m_pMethodTable;
}
// Zero-init all static fields. J++ does not generate class initialisers if all you are doing
// is setting fields to zero.
memset((SLOT *) GetVtable() + bmtVT->dwCurrentNonVtableSlot, 0, bmtVT->dwStaticFieldBytes);
_ASSERTE(bmtInterface->dwInterfaceMapSize < 0xffff);
m_wNumInterfaces = (WORD)bmtInterface->dwInterfaceMapSize;
// Now create our real interface map now that we know how big it should be
if (bmtInterface->dwInterfaceMapSize == 0)
{
bmtInterface->pInterfaces = NULL;
}
else
{
bmtInterface->pInterfaces = m_pMethodTable->GetInterfaceMap();
_ASSERTE(bmtInterface->pInterfaces != NULL);
// Copy from temporary interface map
memcpy(bmtInterface->pInterfaces, bmtInterface->pInterfaceMap, bmtInterface->dwInterfaceMapSize * sizeof(InterfaceInfo_t));
if (!IsInterface())
{
hr = m_pMethodTable->InitInterfaceVTableMap();
}
//#endif
}
// for ComObject types, i.e. if the class extends from a COM Imported
// class
// make sure any interface implementated by the COM Imported class
// is overridden fully, (OR) not overridden at all..
if (bmtProp->fIsComObjectType)
{
BOOL fSuccess = TRUE;
if (bmtInterface->dwInterfaceMapSize != 0)
{
for (unsigned i = 0; i < bmtInterface->dwInterfaceMapSize; i++)
{
MethodTable* pIntfMT = bmtInterface->pInterfaceMap[i].m_pMethodTable;
EEClass* pIntfClass = pIntfMT->GetClass();
if (pIntfClass->GetNumVtableSlots() != 0)
{
WORD startSlot = bmtInterface->pInterfaceMap[i].m_wStartSlot;
BOOL hasComImportMethod = FALSE;
BOOL hasManagedMethod = FALSE;
for (int j = startSlot;j <(pIntfClass->GetNumVtableSlots()+startSlot); j++)
{
// either all the methods should be complus-call or none
// should be
MethodDesc* pClsMD = GetUnknownMethodDescForSlot(j);
if (pClsMD->GetMethodTable()->IsInterface() || pClsMD->GetClass()->IsComImport())
{
// this is a ComImported method.. i.e. the calls will go
// to COM
hasComImportMethod = TRUE;
}
else
{
hasManagedMethod = TRUE;
}
fSuccess = (hasComImportMethod ^ hasManagedMethod);
if (fSuccess == FALSE)
{
bmtError->resIDWhy = IDS_EE_BAD_COMEXTENDS_CLASS;
bmtError->dMethodDefInError = pClsMD->GetMemberDef();
bmtError->szMethodNameForError = pClsMD->GetNameOnNonArrayClass();
IfFailRet(COR_E_TYPELOAD);
}
}
}
}
}
}
// For COM event interfaces, we need to make sure that all the methods are
// methods to add or remove events. This means that they all need to take
// a delegate derived class and have a void return type.
if (bmtProp->fComEventItfType)
{
// COM event interfaces had better be interfaces.
_ASSERTE(IsInterface());
// Go through all the methods and
for (int i = 0; i < GetNumVtableSlots(); i++)
{
MethodDesc* pMD = GetUnknownMethodDescForSlot(i);
_ASSERTE(pMD);
MetaSig Sig(pMD->GetSig(), pMD->GetModule());
if (Sig.GetReturnType() != ELEMENT_TYPE_VOID ||
Sig.NumFixedArgs() != 1 ||
Sig.NextArg() != ELEMENT_TYPE_CLASS ||
!Sig.GetTypeHandle().CanCastTo(TypeHandle(g_pDelegateClass)))
{
bmtError->resIDWhy = IDS_EE_BAD_COMEVENTITF_CLASS;
bmtError->dMethodDefInError = pMD->GetMemberDef();
bmtError->szMethodNameForError = pMD->GetNameOnNonArrayClass();
IfFailRet(COR_E_TYPELOAD);
}
}
}
return hr;
}
HRESULT EEClass::CheckForRemotingProxyAttrib(bmtInternalInfo *bmtInternal, bmtProperties* bmtProp)
{
BEGIN_ENSURE_COOPERATIVE_GC();
// See if our parent class has a proxy attribute
EEClass *pParent = GetParentClass();
_ASSERTE(g_pObjectClass != NULL);
if (!pParent->HasRemotingProxyAttribute())
{
// Call the metadata api to look for a proxy attribute on this type
// Note: the api does not check for inherited attributes
// Set the flag is the type has a non-default proxy attribute
if (COMCustomAttribute::IsDefined(
bmtInternal->pModule,
m_cl,
TypeHandle(CRemotingServices::GetProxyAttributeClass())))
{
m_VMFlags |= VMFLAG_REMOTING_PROXY_ATTRIBUTE;
}
}
else
{
// parent has proxyAttribute ... mark this class as having one too!
m_VMFlags |= VMFLAG_REMOTING_PROXY_ATTRIBUTE;
}
END_ENSURE_COOPERATIVE_GC();
return S_OK;
}
HRESULT EEClass::CheckForValueType(bmtErrorInfo* bmtError)
{
HRESULT hr = S_OK;
if(g_pValueTypeClass != NULL && GetParentClass() == g_pValueTypeClass->GetClass()) {
// There is one exception to the rule that you are a value class
// if you inherit from g_pValueTypeClass, namely System.Enum.
// we detect that we are System.Enum because g_pEnumClass has
// not been set
if (g_pEnumClass != NULL)
{
SetValueClass();
/*
if(!IsTdSealed(m_dwAttrClass))
{
_ASSERTE(!"Non-sealed Value Type");
bmtError->resIDWhy = IDS_CLASSLOAD_GENERIC;
hr = E_FAIL;
}
*/
}
else
_ASSERTE(strncmp(m_szDebugClassName, g_EnumClassName, strlen(g_EnumClassName)) == 0);
}
return hr;
}
HRESULT EEClass::CheckForEnumType(bmtErrorInfo* bmtError)
{
HRESULT hr = S_OK;
if(g_pEnumClass != NULL && GetParentClass() == g_pEnumClass->GetClass()) {
// Enums are also value classes, so set both bits.
SetValueClass();
SetEnum();
/*
if(!IsTdSealed(m_dwAttrClass))
{
_ASSERTE(!"Non-sealed Enum");
bmtError->resIDWhy = IDS_CLASSLOAD_GENERIC;
hr = E_FAIL;
}
*/
}
return hr;
}
//
// Used by BuildMethodTable
//
// Set the contextful or marshaledbyref flag on the attributes of the class
//
HRESULT EEClass::CheckForSpecialTypes(bmtInternalInfo *bmtInternal, bmtProperties *bmtProp)
{
Module *pModule = bmtInternal->pModule;
IMDInternalImport *pMDImport = pModule->GetMDImport();
// Check to see if this type is a managed standard interface. All the managed
// standard interfaces live in mscorlib.dll so checking for that first
// makes the strcmp that comes afterwards acceptable.
if (IsInterface() && pModule->IsSystem())
{
LPCUTF8 pszClassName;
LPCUTF8 pszClassNamespace;
pMDImport->GetNameOfTypeDef(GetCl(), &pszClassName, &pszClassNamespace);
if (pszClassName && pszClassNamespace)
{
LPUTF8 pszFullyQualifiedName = NULL;
MAKE_FULLY_QUALIFIED_NAME(pszFullyQualifiedName, pszClassNamespace, pszClassName);
// This is just to give us a scope to break out of.
do
{
#define MNGSTDITF_BEGIN_INTERFACE(FriendlyName, strMngItfName, strUCOMMngItfName, strCustomMarshalerName, strCustomMarshalerCookie, strManagedViewName, NativeItfIID, bCanCastOnNativeItfQI) \
if (strcmp(strMngItfName, pszFullyQualifiedName) == 0) \
{ \
bmtProp->fIsMngStandardItf = TRUE; \
break; \
}
#define MNGSTDITF_DEFINE_METH_IMPL(FriendlyName, ECallMethName, MethName, MethSig)
#define MNGSTDITF_END_INTERFACE(FriendlyName)
#include "MngStdItfList.h"
#undef MNGSTDITF_BEGIN_INTERFACE
#undef MNGSTDITF_DEFINE_METH_IMPL
#undef MNGSTDITF_END_INTERFACE
} while (FALSE);
}
}
// Check to see if the type is a COM event interface.
if(IsInterface())
{
const BYTE *pVal;
ULONG cbVal;
HRESULT hr = pMDImport->GetCustomAttributeByName(GetCl(), INTEROP_COMEVENTINTERFACE_TYPE, (const void**)&pVal, &cbVal);
if (hr == S_OK)
{
bmtProp->fComEventItfType = TRUE;
}
}
return S_OK;
}
//
// Used by BuildMethodTable
//
// Set the contextful or marshaledbyref flag on the attributes of the class
//
HRESULT EEClass::SetContextfulOrByRef(bmtInternalInfo *bmtInternal)
{
_ASSERTE(bmtInternal);
// @TODO: This should be done by the compiler ultimately
// Check whether these classes are the root classes of contextful
// and marshalbyref classes i.e. System.ContextBoundObject and
// System.MarshalByRefObject respectively.
// Extract the class name
LPCUTF8 pszClassName = NULL;
LPCUTF8 pszNameSpace = NULL;
bmtInternal->pModule->GetMDImport()->GetNameOfTypeDef(GetCl(), &pszClassName, &pszNameSpace);
DefineFullyQualifiedNameForClass();
if (FAILED(StoreFullyQualifiedName(_szclsname_,MAX_CLASSNAME_LENGTH,pszNameSpace,pszClassName)))
return COR_E_TYPELOAD;
// Compare
if(0 == strcmp(g_ContextBoundObjectClassName, _szclsname_))
// Set the contextful and marshalbyref flag
SetContextful();
else if(0 == strcmp(g_MarshalByRefObjectClassName, _szclsname_))
// Set the marshalbyref flag
SetMarshaledByRef();
else
{
// First check whether the parent class is contextful or
// marshalbyref
EEClass* pParent = GetParentClass();
if(pParent)
{
if(pParent->IsContextful())
// Set the contextful and marshalbyref flag
SetContextful();
else if (pParent->IsMarshaledByRef())
// Set the marshalbyref flag
SetMarshaledByRef();
}
}
return S_OK;
}
void EEClass::GetPredefinedAgility(Module *pModule, mdTypeDef td,
BOOL *pfIsAgile, BOOL *pfCheckAgile)
{
//
// There are 4 settings possible:
// IsAgile CheckAgile
// F F (default) Use normal type logic to determine agility
// T F "Proxy" Treated as agile even though may not be.
// F T "Maybe" Not agile, but specific instances can be made agile.
// T T "Force" All instances are forced agile, even though not typesafe.
//
// Also, note that object arrays of agile or maybe agile types are made maybe agile.
//
static struct PredefinedAgility
{
const char *name;
BOOL isAgile;
BOOL checkAgile;
}
agility[] =
{
// The Thread and its LocalDataStore leak across context boundaries.
// We manage the leaks manually
// @todo: stop doing this
{ g_ThreadClassName, TRUE, FALSE },
{ g_LocalDataStoreClassName, TRUE, FALSE },
// The SharedStatics class is a container for process-wide data
{ g_SharedStaticsClassName, FALSE, TRUE },
// Make all containers maybe agile
{ "System.Collections.*", FALSE, TRUE },
// Make all globalization objects agile
// We have CultureInfo objects on thread. Because threads leak across
// app domains, we have to be prepared for CultureInfo to leak across.
// CultureInfo exposes all of the other globalization objects, so we
// just make the entire namespace app domain agile.
{ "System.Globalization.*", FALSE, TRUE },
// Remoting structures for legally smuggling messages across app domains
{ "System.Runtime.Remoting.Messaging.SmuggledMethodCallMessage", FALSE, TRUE },
{ "System.Runtime.Remoting.Messaging.SmuggledMethodReturnMessage", FALSE, TRUE },
{ "System.Runtime.Remoting.Messaging.SmuggledObjRef", FALSE, TRUE},
{ "System.Runtime.Remoting.ObjRef", FALSE, TRUE },
{ "System.Runtime.Remoting.ChannelInfo", FALSE, TRUE },
// Remoting cached data structures are all in mscorlib
{ "System.Runtime.Remoting.Metadata.RemotingCachedData", FALSE, TRUE },
{ "System.Runtime.Remoting.Metadata.RemotingMethodCachedData", FALSE, TRUE },
{ "System.Runtime.Remoting.Metadata.RemotingTypeCachedData", FALSE, TRUE },
{ "System.Reflection.MemberInfo", FALSE, TRUE },
{ "System.Type", FALSE, TRUE },
{ "System.RuntimeType", FALSE, TRUE },
{ "System.Reflection.ConstructorInfo", FALSE, TRUE },
{ "System.Reflection.RuntimeConstructorInfo", FALSE, TRUE },
{ "System.Reflection.EventInfo", FALSE, TRUE },
{ "System.Reflection.RuntimeEventInfo", FALSE, TRUE },
{ "System.Reflection.FieldInfo", FALSE, TRUE },
{ "System.Reflection.RuntimeFieldInfo", FALSE, TRUE },
{ "System.Reflection.RuntimeMethodBase", FALSE, TRUE },
{ "System.Reflection.RuntimeMethodInfo", FALSE, TRUE },
{ "System.Reflection.PropertyInfo", FALSE, TRUE },
{ "System.Reflection.RuntimePropertyInfo", FALSE, TRUE },
{ "System.Reflection.ParameterInfo", FALSE, TRUE },
//{ "System.Runtime.Remoting.Activation.ActivationAttributeStack", FALSE, TRUE },
{ "System.Runtime.Remoting.Metadata.SoapAttribute", FALSE, TRUE },
{ "System.Runtime.Remoting.Metadata.SoapFieldAttribute", FALSE, TRUE },
{ "System.Runtime.Remoting.Metadata.SoapMethodAttribute",FALSE, TRUE },
{ "System.Runtime.Remoting.Metadata.SoapParameterAttribute", FALSE, TRUE },
{ "System.Runtime.Remoting.Metadata.SoapTypeAttribute", FALSE, TRUE },
{ "System.Reflection.Cache.InternalCache", FALSE, TRUE },
{ "System.Reflection.Cache.InternalCacheItem", FALSE, TRUE },
// LogSwitches are agile even though we can't prove it
// @todo: do they need really to be?
{ "System.Diagnostics.LogSwitch", FALSE, TRUE },
// There is a process global PermissionTokenFactory
{ "System.Security.PermissionToken", FALSE, TRUE },
{ "System.Security.PermissionTokenFactory", FALSE, TRUE },
// Mark all the exceptions we throw agile. This makes
// most BVTs pass even though exceptions leak
//
// Note that making exception checked automatically
// makes a bunch of subclasses checked as well.
//
// Pre-allocated exceptions
{ "System.Exception", FALSE, TRUE },
{ "System.OutOfMemoryException", FALSE, TRUE },
{ "System.StackOverflowException", FALSE, TRUE },
{ "System.ExecutionEngineException", FALSE, TRUE },
// Reflection objects may be agile - specifically for
// shared & system domain objects.
//
// @todo: currently appdomain based reflection objects
// are global too.
// ReflectionMethodName is agile, but we can't prove
// it at load time.
{ g_ReflectionMethodName, TRUE, TRUE },
// ReflectionParamInfoName contains an object referece
// for default value.
{ g_ReflectionParamInfoName, FALSE, TRUE },
// BinaryFormatter smuggles these across appdomains.
{ "System.Runtime.Serialization.Formatters.Binary.BinaryObjectWithMap", TRUE, FALSE},
{ "System.Runtime.Serialization.Formatters.Binary.BinaryObjectWithMapTyped", TRUE, FALSE},
NULL
};
if (pModule == SystemDomain::SystemModule())
{
while (TRUE)
{
LPCUTF8 pszName;
LPCUTF8 pszNamespace;
HRESULT hr;
mdTypeDef tdEnclosing;
pModule->GetMDImport()->GetNameOfTypeDef(td, &pszName, &pszNamespace);
PredefinedAgility *p = agility;
while (p->name != NULL)
{
SIZE_T length = strlen(pszNamespace);
if (strncmp(pszNamespace, p->name, length) == 0
&& (strcmp(pszName, p->name + length + 1) == 0
|| strcmp("*", p->name + length + 1) == 0))
{
*pfIsAgile = p->isAgile;
*pfCheckAgile = p->checkAgile;
return;
}
p++;
}
// Perhaps we have a nested type like 'bucket' that is supposed to be
// agile or checked agile by virtue of being enclosed in a type like
// hashtable, which is itself inside "System.Collections".
tdEnclosing = mdTypeDefNil;
hr = pModule->GetMDImport()->GetNestedClassProps(td, &tdEnclosing);
if (SUCCEEDED(hr))
{
_ASSERTE(tdEnclosing != td && TypeFromToken(tdEnclosing) == mdtTypeDef);
td = tdEnclosing;
}
else
break;
}
}
*pfIsAgile = FALSE;
*pfCheckAgile = FALSE;
}
#if CHECK_APP_DOMAIN_LEAKS
HRESULT EEClass::SetAppDomainAgileAttribute(BOOL fForceSet)
{
//
// The most general case for provably a agile class is
// (1) No instance fields of non-sealed or non-agile types
// (2) Class is in system domain (its type must be not unloadable
// & loaded in all app domains)
// (3) The class can't have a finalizer
// (4) The class can't be a COMClass
//
_ASSERTE(!IsAppDomainAgilityDone());
HRESULT hr = S_OK;
BOOL fCheckAgile = FALSE;
BOOL fAgile = FALSE;
BOOL fFieldsAgile = TRUE;
if (!GetModule()->IsSystem())
{
//
// No types outside of the system domain can even think about
// being agile
//
goto exit;
}
if (m_pMethodTable->IsComObjectType())
{
//
// No COM type is agile, as there is domain specific stuff in the sync block
//
goto exit;
}
if (m_pMethodTable->IsInterface())
{
//
// Don't mark interfaces agile
//
goto exit;
}
//
// See if we need agile checking in the class
//
GetPredefinedAgility(GetModule(), m_cl,
&fAgile, &fCheckAgile);
if (m_pMethodTable->HasFinalizer())
{
if (!fAgile && !fCheckAgile)
{
//
// If we're finalizable, we need domain affinity. Otherwise, we may appear
// to a particular app domain not to call the finalizer (since it may run
// in a different domain.)
//
// Note: do not change this assumption. The eager finalizaton code for
// appdomain unloading assumes that no obects other than those in mscorlib
// can be agile and finalizable (jenh)
//
goto exit;
}
else
{
// Note that a finalizable object will be considered potentially agile if it has one of the two
// predefined agility bits set. This will cause an assert in the eager finalization code if you add
// a finalizer to such a class - we don't want to have them as we can't run them eagerly and running
// them after we've cleared the roots/handles means it can't do much safely. Right now thread is the
// only one we allow. If you need to add a finalizer to an object with predefined agility, talk to jenh.
_ASSERTE(g_pThreadClass == NULL || m_pMethodTable->IsAgileAndFinalizable());
}
}
//
// Now see if the type is "naturally agile" - that is, it's type structure
// guarantees agility.
//
if (GetParentClass() != NULL)
{
//
// Make sure our parent was computed. This should only happen
// when we are prejitting - otherwise it is computed for each
// class as its loaded.
//
_ASSERTE(GetParentClass()->IsAppDomainAgilityDone());
if (!GetParentClass()->IsAppDomainAgile())
{
fFieldsAgile = FALSE;
if (fCheckAgile)
_ASSERTE(GetParentClass()->IsCheckAppDomainAgile());
}
//
// To save having to list a lot of trivial (layout-wise) subclasses,
// automatically check a subclass if its parent is checked and
// it introduces no new fields.
//
if (!fCheckAgile
&& GetParentClass()->IsCheckAppDomainAgile()
&& GetNumInstanceFields() == GetParentClass()->GetNumInstanceFields())
fCheckAgile = TRUE;
}
WORD nFields = GetNumInstanceFields()
- (GetParentClass() == NULL ? 0 : GetParentClass()->GetNumInstanceFields());
#if 0
FieldDesc **ppFDRefSelf = (FieldDesc **) new (nothrow) FieldDesc* [nFields];
if (ppFDRefSelf == NULL) {
hr = E_OUTOFMEMORY;
goto exit;
}
for (int i = 0; i < nFields; i ++) {
ppFDRefSelf[i] = NULL;
}
WORD nSavedFDs = 0;
#endif
if (fFieldsAgile || fCheckAgile)
{
FieldDesc *pFD = m_pFieldDescList;
FieldDesc *pFDEnd = pFD + nFields;
while (pFD < pFDEnd)
{
switch (pFD->GetFieldType())
{
case ELEMENT_TYPE_CLASS:
{
//
// There is a bit of a problem in computing the classes which are naturally agile -
// we don't want to load types of non-value type fields. So for now we'll
// err on the side of conservatism and not allow any non-value type fields other than
// the forced agile types listed above.
//
PCCOR_SIGNATURE pSig;
DWORD cSig;
pFD->GetSig(&pSig, &cSig);
FieldSig sig(pSig, GetModule());
SigPointer sigPtr = sig.GetProps();
CorElementType type = sigPtr.GetElemType();
//
// Don't worry about strings
//
if (type == ELEMENT_TYPE_STRING)
break;
// Find our field's token so we can proceed cautiously
mdToken token = mdTokenNil;
if (type == ELEMENT_TYPE_CLASS)
token = sigPtr.GetToken();
//
// First, a special check to see if the field is of our own type.
//
if (token == GetCl() && (GetAttrClass() & tdSealed))
break;
//
// Now, look for the field's TypeHandle.
//
// @todo: there is some ifdef'd code here to to load the type if it's
// not already loading. This code has synchronization problems, as well
// as triggering more aggressive loading than normal. So it's disabled
// for now.
//
TypeHandle th;
#if 0
if (TypeFromToken(token) == mdTypeDef
&& GetClassLoader()->FindUnresolvedClass(GetModule, token) == NULL)
th = pFD->LoadType();
else
#endif
th = pFD->FindType();
//
// See if the referenced type is agile. Note that there is a reasonable
// chance that the type hasn't been loaded yet. If this is the case,
// we just have to assume that it's not agile, since we can't trigger
// extra loads here (for fear of circular recursion.)
//
// If you have an agile class which runs into this problem, you can solve it by
// setting the type manually to be agile.
//
if (th.IsNull()
|| !th.IsAppDomainAgile()
|| (th.IsUnsharedMT()
&& (th.AsClass()->GetAttrClass() & tdSealed) == 0))
{
//
// Treat the field as non-agile.
//
fFieldsAgile = FALSE;
if (fCheckAgile)
pFD->SetDangerousAppDomainAgileField();
}
}
break;
case ELEMENT_TYPE_VALUETYPE:
{
TypeHandle th = pFD->LoadType();
_ASSERTE(!th.IsNull());
if (!th.IsAppDomainAgile())
{
fFieldsAgile = FALSE;
if (fCheckAgile)
pFD->SetDangerousAppDomainAgileField();
}
}
break;
default:
break;
}
pFD++;
}
}
if (fFieldsAgile || fAgile)
SetAppDomainAgile();
if (fCheckAgile && !fFieldsAgile)
SetCheckAppDomainAgile();
#if 0
if (fFieldsAgile || fAgile){
for (i = 0; i < nSavedFDs; i ++) {
ppFDRefSelf[i]->SetDangerousAppDomainAgileField();
}
}
delete [] ppFDRefSelf;
#endif
exit:
SetAppDomainAgilityDone();
return hr;
}
#endif
void EEClass::SetCCWAppDomainAgileAttribute()
{
mdTypeDef td = m_cl;
static struct CCWAgility
{
const char *name;
}
agility[] =
{
{ "System.RuntimeType" },
{ "System.Reflection.RuntimeConstructorInfo" },
{ "System.Reflection.RuntimeEventInfo" },
{ "System.Reflection.RuntimeFieldInfo" },
{ "System.Reflection.RuntimeMethodBase" },
{ "System.Reflection.RuntimeMethodInfo" },
{ "System.Reflection.RuntimePropertyInfo" },
{ "System.Reflection.ParameterInfo" },
{ g_ReflectionMethodName },
{ g_ReflectionParamInfoName },
NULL
};
if (GetModule() == SystemDomain::SystemModule())
{
while (TRUE)
{
LPCUTF8 pszName;
LPCUTF8 pszNamespace;
HRESULT hr;
mdTypeDef tdEnclosing;
GetModule()->GetMDImport()->GetNameOfTypeDef(td, &pszName, &pszNamespace);
CCWAgility *p = agility;
while (p->name != NULL)
{
SIZE_T length = strlen(pszNamespace);
if (strncmp(pszNamespace, p->name, length) == 0
&& (strcmp(pszName, p->name + length + 1) == 0
|| strcmp("*", p->name + length + 1) == 0))
{
SetCCWAppDomainAgile();
return;
}
p++;
}
// Perhaps we have a nested type like 'bucket' that is supposed to be
// agile or checked agile by virtue of being enclosed in a type like
// hashtable, which is itself inside "System.Collections".
tdEnclosing = mdTypeDefNil;
hr = GetModule()->GetMDImport()->GetNestedClassProps(td, &tdEnclosing);
if (SUCCEEDED(hr))
{
_ASSERTE(tdEnclosing != td && TypeFromToken(tdEnclosing) == mdtTypeDef);
td = tdEnclosing;
}
else
break;
}
}
}
OBJECTREF MethodTable::GetObjCreateDelegate()
{
_ASSERT(!IsInterface());
if (m_ohDelegate)
return ObjectFromHandle(m_ohDelegate);
else
return NULL;
}
void MethodTable::SetObjCreateDelegate(OBJECTREF orDelegate)
{
if (m_ohDelegate)
StoreObjectInHandle(m_ohDelegate, orDelegate);
else
m_ohDelegate = GetAppDomain()->CreateHandle(orDelegate);
}
HRESULT MethodTable::InitInterfaceVTableMap()
{
_ASSERTE(!IsInterface());
LPVOID *pInterfaceVTableMap;
BaseDomain* pDomain = GetModule()->GetDomain();
// HACKKK COUGH UGGH
// We currently can only have one "shared" vtable map mgr
// - so use the system domain for all shared classes
if (pDomain == SharedDomain::GetDomain())
pDomain = SystemDomain::System();
DWORD count = m_wNumInterface + GetNumDynamicallyAddedInterfaces();
if (count > 0)
{
pInterfaceVTableMap = pDomain->GetInterfaceVTableMapMgr().
GetInterfaceVTableMap(m_pIMap, this, count);
if (pInterfaceVTableMap == NULL)
return E_FAIL;
m_pInterfaceVTableMap = pInterfaceVTableMap;
}
return S_OK;
}
InterfaceInfo_t* MethodTable::GetDynamicallyAddedInterfaceMap()
{
VALIDATE_INTERFACE_MAP(this);
// Only extensible RCW's have dynamically added interfaces.
if (!HasDynamicInterfaceMap())
return NULL;
#ifdef _DEBUG
return GetNumDynamicallyAddedInterfaces() ? &m_pIMap[m_wNumInterface] : NULL;
#else
return &m_pIMap[m_wNumInterface];
#endif
}
unsigned MethodTable::GetNumDynamicallyAddedInterfaces()
{
VALIDATE_INTERFACE_MAP(this);
// Only extensible RCW's have dynamically added interfaces.
if (!HasDynamicInterfaceMap())
return 0;
return *(((DWORD *)m_pIMap) - 1);
}
InterfaceInfo_t* MethodTable::FindDynamicallyAddedInterface(MethodTable *pInterface)
{
_ASSERTE(IsRestored());
// Only extensible RCW's have dynamically added interfaces.
if (!HasDynamicInterfaceMap())
return NULL;
int cDynInterfaces = GetNumDynamicallyAddedInterfaces();
InterfaceInfo_t *pDynItfMap = GetDynamicallyAddedInterfaceMap();
for (int i = 0; i < cDynInterfaces; i++)
{
if (pDynItfMap[i].m_pMethodTable == pInterface)
return &pDynItfMap[i];
}
return NULL;
}
void MethodTable::AddDynamicInterface(MethodTable *pItfMT)
{
THROWSCOMPLUSEXCEPTION();
HRESULT hr;
_ASSERTE(IsRestored());
// This should never be called on for a type that is not an extensible RCW.
_ASSERT(HasDynamicInterfaceMap());
int NumDynAddedInterfaces = GetNumDynamicallyAddedInterfaces();
int TotalNumInterfaces = GetNumInterfaces() + NumDynAddedInterfaces;
InterfaceInfo_t *pNewItfMap = NULL;
int AllocSize = sizeof(DWORD) + sizeof(InterfaceInfo_t) * (TotalNumInterfaces + 1);
#ifdef _DEBUG
// Add space for the dummy entry that precedes the interface map.
AllocSize += sizeof(InterfaceInfo_t);
#endif
// Allocate the new interface table adding one for the new interface and one
// more for the dummy slot before the start of the table..
WS_PERF_SET_HEAP(HIGH_FREQ_HEAP);
pNewItfMap = (InterfaceInfo_t*)GetClass()->GetClassLoader()->GetHighFrequencyHeap()->AllocMem(AllocSize);
if (!pNewItfMap)
{
ComPlusWrapperCache::GetComPlusWrapperCache()->UNLOCK();
COMPlusThrowOM();
}
WS_PERF_UPDATE_DETAIL("ComPlusWrapper:GrowInterfaceMap:HighFreq", AllocSize, pNewItfMap);
#ifdef _DEBUG
// Fill in the dummy entry that precedes the interface map.
pNewItfMap[0].m_wStartSlot = 0xCDCD;
pNewItfMap[0].m_wFlags = 0xCDCD;
pNewItfMap[0].m_pMethodTable = (MethodTable *)((sizeof(int *) == 4)?0xCDCDCDCDL:0xCDCDCDCDCDCDCDCD);
pNewItfMap = (InterfaceInfo_t*)(((BYTE *)pNewItfMap) + sizeof(InterfaceInfo_t) + sizeof(DWORD));
#else
pNewItfMap = (InterfaceInfo_t*)(((BYTE *)pNewItfMap) + sizeof(DWORD));
#endif
// Copy the old map into the new one.
memcpy(pNewItfMap, m_pIMap, TotalNumInterfaces * sizeof(InterfaceInfo_t));
// Add the new interface at the end of the map.
pNewItfMap[TotalNumInterfaces].m_wStartSlot = (WORD) -1;
pNewItfMap[TotalNumInterfaces].m_wFlags = 0;
pNewItfMap[TotalNumInterfaces].m_pMethodTable = pItfMT;
// Update the count of dynamically added interfaces.
*(((DWORD *)pNewItfMap) - 1) = NumDynAddedInterfaces + 1;
// Switch the old interface map with the new one.
m_pIMap = pNewItfMap;
// Re layout the interface vtable map.
hr = InitInterfaceVTableMap();
if (FAILED(hr))
{
ComPlusWrapperCache::GetComPlusWrapperCache()->UNLOCK();
COMPlusThrowHR(hr);
}
// Log the fact that we leaked the interface vtable map.
#ifdef _DEBUG
LOG((LF_INTEROP, LL_EVERYTHING,
"Extensible RCW %s being cast to interface %s caused an interface vtable map leak",
GetClass()->m_szDebugClassName, pItfMT->GetClass()->m_szDebugClassName));
#else
LOG((LF_INTEROP, LL_EVERYTHING,
"Extensible RCW being cast to an interface caused an interface vtable map leak"));
#endif
}
#ifdef DEBUGGING_SUPPORTED
//
// Debugger notification
//
void EEClass::NotifyDebuggerLoad()
{
if (!CORDebuggerAttached())
return;
NotifyDebuggerAttach(NULL, FALSE);
}
BOOL EEClass::NotifyDebuggerAttach(AppDomain *pDomain, BOOL attaching)
{
return g_pDebugInterface->LoadClass(
this, m_cl, GetModule(), pDomain, GetAssembly()->IsSystem(), attaching);
}
void EEClass::NotifyDebuggerDetach(AppDomain *pDomain)
{
if (!pDomain->IsDebuggerAttached())
return;
g_pDebugInterface->UnloadClass(m_cl, GetModule(), pDomain, FALSE);
}
#endif // DEBUGGING_SUPPORTED
//
// Used by BuildMethodTable
//
// Perform relevant GC calculations for value classes
//
HRESULT EEClass::HandleGCForValueClasses(bmtFieldPlacement* bmtFP, bmtEnumMethAndFields* bmtEnumMF, EEClass*** pByValueClassCache)
{
HRESULT hr = S_OK;
DWORD i, j;
// Note that for value classes, the following calculation is only appropriate
// when the instance is in its "boxed" state.
if (m_wNumGCPointerSeries > 0)
{
CGCDescSeries *pSeries;
CGCDescSeries *pHighest;
m_pMethodTable->SetContainsPointers();
// Copy the pointer series map from the parent
CGCDesc::Init( (PVOID) m_pMethodTable, m_wNumGCPointerSeries );
if (GetParentClass() && (GetParentClass()->m_wNumGCPointerSeries > 0))
{
DWORD ParentGCSize = CGCDesc::ComputeSize(GetParentClass()->m_wNumGCPointerSeries);
memcpy( (PVOID) (((BYTE*) m_pMethodTable) - ParentGCSize), (PVOID) (((BYTE*) GetParentClass()->m_pMethodTable) - ParentGCSize), ParentGCSize - sizeof(DWORD) );
}
// Build the pointer series map for this pointers in this instance
pSeries = ((CGCDesc*)m_pMethodTable)->GetLowestSeries();
if (bmtFP->NumInstanceGCPointerFields)
{
// See gcdesc.h for an explanation of why we adjust by subtracting BaseSize
pSeries->SetSeriesSize( (bmtFP->NumInstanceGCPointerFields * sizeof(OBJECTREF)) - m_pMethodTable->GetBaseSize());
pSeries->SetSeriesOffset(bmtFP->GCPointerFieldStart+sizeof(Object));
pSeries++;
}
// Insert GC info for fields which are by-value classes
for (i = 0; i < bmtEnumMF->dwNumInstanceFields; i++)
{
if (m_pFieldDescList[i].IsByValue())
{
EEClass *pByValueClass = (*pByValueClassCache)[i];
MethodTable *pByValueMT = pByValueClass->GetMethodTable();
CGCDescSeries *pByValueSeries;
// The by value class may have more than one pointer series
DWORD dwNumByValueSeries = pByValueClass->m_wNumGCPointerSeries;
if (dwNumByValueSeries > 0)
{
// Offset of the by value class in the class we are building, does NOT include Object
DWORD dwCurrentOffset = m_pFieldDescList[i].GetOffset();
pByValueSeries = ((CGCDesc*) pByValueMT)->GetLowestSeries();
for (j = 0; j < dwNumByValueSeries; j++)
{
DWORD dwSeriesSize;
DWORD dwSeriesOffset;
_ASSERTE(pSeries <= CGCDesc::GetCGCDescFromMT(m_pMethodTable)->GetHighestSeries());
dwSeriesSize = pByValueSeries->GetSeriesSize();
// Add back the base size of the by value class, since it's being transplanted to this class
dwSeriesSize += pByValueMT->GetBaseSize();
// Subtract the base size of the class we're building
dwSeriesSize -= m_pMethodTable->GetBaseSize();
// Set current series we're building
pSeries->SetSeriesSize(dwSeriesSize);
// Get offset into the value class of the first pointer field (includes a +Object)
dwSeriesOffset = pByValueSeries->GetSeriesOffset();
// Add it to the offset of the by value class in our class
dwSeriesOffset += dwCurrentOffset;
pSeries->SetSeriesOffset(dwSeriesOffset); // Offset of field
pSeries++;
pByValueSeries++;
}
}
}
}
// Adjust the inherited series - since the base size has increased by "# new field instance bytes", we need to
// subtract that from all the series (since the series always has BaseSize subtracted for it - see gcdesc.h)
pHighest = CGCDesc::GetCGCDescFromMT(m_pMethodTable)->GetHighestSeries();
while (pSeries <= pHighest)
{
_ASSERTE( GetParentClass() );
pSeries->SetSeriesSize( pSeries->GetSeriesSize() - (GetMethodTable()->GetBaseSize() - GetParentClass()->GetMethodTable()->GetBaseSize()) );
pSeries++;
}
_ASSERTE(pSeries-1 <= CGCDesc::GetCGCDescFromMT(m_pMethodTable)->GetHighestSeries());
}
return hr;
}
//
// Used by BuildMethodTable
//
// Create handles for the static fields that contain object references
// and allocate the ones that are value classes.
//
HRESULT EEClass::CreateHandlesForStaticFields(bmtEnumMethAndFields* bmtEnumMF, bmtInternalInfo* bmtInternal, EEClass*** pByValueClassCache, bmtVtable *bmtVT, bmtErrorInfo* bmtError)
{
HRESULT hr = S_OK;
DWORD i;
// Create handles for the static fields that contain object references
// and allocate the ones that are value classes.
if (bmtEnumMF->dwNumStaticObjRefFields > 0)
{
if (!IsShared())
{
BEGIN_ENSURE_COOPERATIVE_GC();
int ipObjRefs = 0;
// Retrieve the object ref pointers from the app domain.
OBJECTREF **apObjRefs = new OBJECTREF*[bmtEnumMF->dwNumStaticObjRefFields];
// Reserve some object ref pointers.
((AppDomain*)bmtInternal->pModule->GetDomain())->
AllocateStaticFieldObjRefPtrs(bmtEnumMF->dwNumStaticObjRefFields, apObjRefs);
for (i = 0; i < bmtEnumMF->dwNumStaticFields; i++)
{
DWORD dwIndex = bmtEnumMF->dwNumInstanceFields + i; // index in the FieldDesc list
FieldDesc *pField = &m_pFieldDescList[dwIndex];
if (pField->IsSpecialStatic())
continue;
// to a boxed version of the value class. This allows the standard GC
// algorithm to take care of internal pointers in the value class.
if (pField->IsByValue())
{
_ASSERTE(*pByValueClassCache);
EEClass *pByValueClass = (*pByValueClassCache)[dwIndex];
OBJECTREF obj = NULL;
// @todo IA64 - Determine why the compiler doesn't like this try/catch
#ifndef _IA64_
COMPLUS_TRY
{
#endif // !_IA64_
obj = AllocateObject(pByValueClass->GetMethodTable());
#ifndef _IA64_
}
COMPLUS_CATCH
{
hr = COR_E_TYPELOAD;
UpdateThrowable(bmtError->pThrowable);
break;
}
COMPLUS_END_CATCH
#endif !_IA64_
SetObjectReference( apObjRefs[ipObjRefs], obj,
(AppDomain*) bmtInternal->pModule->GetDomain() );
// initialize static addres with object ref to boxed value type
void *pStaticAddress = (void*)((BYTE*)pField->GetBase() + pField->GetOffset());
*(void**)pStaticAddress = (void*)apObjRefs[ipObjRefs++];
}
else if (m_pFieldDescList[dwIndex].GetFieldType() == ELEMENT_TYPE_CLASS)
{
// initialize static addres with object ref
void *pStaticAddress = (void*)((BYTE*)pField->GetBase() + pField->GetOffset());
*(void**)pStaticAddress = (void*)apObjRefs[ipObjRefs++];
}
}
delete []apObjRefs;
END_ENSURE_COOPERATIVE_GC();
}
else
{
//
// For shared classes, we don't allocate any handles
// in the method table (since statics live in DLS),
// but we do store information about what handles need to be
// allocated later on. This information goes where the
// statics themselves (in non-shared types) would go.
// This allows us to later initialize the DLS version of the
// statics without bringing the FieldDescs into the working set.
//
FieldDesc *pField = m_pFieldDescList + bmtEnumMF->dwNumInstanceFields;
FieldDesc *pFieldEnd = pField + bmtEnumMF->dwNumStaticFields;
for (; pField < pFieldEnd; pField++)
{
_ASSERTE(pField->IsStatic());
if(!pField->IsSpecialStatic()) {
MethodTable *pMT;
void *addr;
switch (pField->GetFieldType())
{
case ELEMENT_TYPE_CLASS:
addr = (BYTE *) GetMethodTable()->m_Vtable +
bmtVT->dwCurrentNonVtableSlot*sizeof(SLOT*) + pField->GetOffset();
*(MethodTable**)addr = (MethodTable *) NULL;
break;
case ELEMENT_TYPE_VALUETYPE:
pMT = (*pByValueClassCache)[pField - m_pFieldDescList]->GetMethodTable();
_ASSERTE(pMT->IsValueClass());
addr = (BYTE *) GetMethodTable()->m_Vtable +
bmtVT->dwCurrentNonVtableSlot*sizeof(SLOT*) + pField->GetOffset();
*(MethodTable**)addr = pMT;
break;
default:
break;
}
}
}
}
}
return hr;
}
//
// Used by BuildMethodTable
//
// If we have a non-interface class, then do inheritance security
// checks on it. The check starts by checking for inheritance
// permission demands on the current class. If these first checks
// succeeded, then the cached declared method list is scanned for
// methods that have inheritance permission demands.
//
HRESULT EEClass::VerifyInheritanceSecurity(bmtInternalInfo* bmtInternal, bmtErrorInfo* bmtError, bmtParentInfo* bmtParent, bmtEnumMethAndFields* bmtEnumMF)
{
HRESULT hr = S_OK;
// If we have a non-interface class, then do inheritance security
// checks on it. The check starts by checking for inheritance
// permission demands on the current class. If these first checks
// succeeded, then the cached declared method list is scanned for
// methods that have inheritance permission demands.
if (!IsInterface() && (bmtInternal->pModule->IsSystemClasses() == FALSE) &&
Security::IsSecurityOn())
{
//We need to disable preemptive GC if there's any chance that it could still be
//active. The inheritance checks might allocate objects.
BEGIN_ENSURE_COOPERATIVE_GC();
//@ASSUMPTION: The current class has been resolved to the point that
// we can construct a reflection object on the class or its methods.
// This is required for the security checks.
// Check the entire parent chain for inheritance permission demands.
EEClass *pParentClass = GetParentClass();
while (pParentClass != NULL)
{
if (pParentClass->RequiresInheritanceCheck() &&
! Security::ClassInheritanceCheck(this, pParentClass, bmtError->pThrowable) )
{
bmtError->resIDWhy = IDS_CLASSLOAD_INHERITANCECHECK;
IfFailGoto(COR_E_TYPELOAD, reenable_gc);
}
pParentClass = pParentClass->GetParentClass();
}
if (GetParentClass() != NULL)
{
bmtParent->ppParentMethodDescBufPtr = bmtParent->ppParentMethodDescBuf;
for (DWORD i = 0; i < bmtEnumMF->dwNumDeclaredMethods; i++)
{
// Check the entire chain of overridden methods for
// inheritance permission demands.
MethodDesc *pParent = *(bmtParent->ppParentMethodDescBufPtr++);
MethodDesc *pMethod = *(bmtParent->ppParentMethodDescBufPtr++);
_ASSERTE(pMethod != NULL);
if (pParent != NULL)
{
// Get the name and signature for the method so
// we can find the new parent method desc.
DWORD dwSlot;
dwSlot = pParent->GetSlot();
#ifdef _DEBUG
LPCUTF8 szName;
PCCOR_SIGNATURE pSignature;
DWORD cSignature;
szName = bmtInternal->pInternalImport->GetNameOfMethodDef(pMethod->GetMemberDef());
if (szName == NULL)
{
_ASSERTE(0);
IfFailGoto(COR_E_TYPELOAD, reenable_gc);
}
pSignature = bmtInternal->pInternalImport->GetSigOfMethodDef(
pMethod->GetMemberDef(),
&cSignature);
#endif
do
{
if (pParent->RequiresInheritanceCheck() &&
! Security::MethodInheritanceCheck(pMethod, pParent, bmtError->pThrowable) )
{
bmtError->resIDWhy = IDS_CLASSLOAD_INHERITANCECHECK;
IfFailGoto(COR_E_TYPELOAD, reenable_gc);
}
if (pParent->ParentRequiresInheritanceCheck())
{
EEClass *pParentClass = pParent->GetClass()->GetParentClass();
// Find this method in the parent.
// If it does exist in the parent, it would be at the same vtable slot.
if (dwSlot >= GetParentClass()->GetNumVtableSlots())
{
// Parent does not have this many vtable slots, so it doesn't exist there
pParent = NULL;
}
else
{
// It is in the vtable of the parent
pParent = pParentClass->GetUnknownMethodDescForSlot(dwSlot);
_ASSERTE(pParent != NULL);
#ifdef _DEBUG
_ASSERTE(pParent == pParentClass->FindMethod(
szName,
pSignature,
cSignature,
bmtInternal->pModule,
mdTokenNil
));
#endif
}
}
else
{
pParent = NULL;
}
} while (pParent != NULL);
}
}
}
reenable_gc:
END_ENSURE_COOPERATIVE_GC();
if (FAILED(hr)){
return hr;
}
}
return hr;
}
//
// Used by BuildMethodTable
//
// Now that the class is ready, fill out the RID maps
//
HRESULT EEClass::FillRIDMaps(bmtMethAndFieldDescs* bmtMFDescs, bmtMetaDataInfo* bmtMetaData, bmtInternalInfo* bmtInternal)
{
HRESULT hr = S_OK;
DWORD i;
// Now that the class is ready, fill out the RID maps
if (bmtMFDescs->ppUnboxMethodDescList != NULL)
{
// We're a value class
// Make sure to add the unboxed version to the RID map
for (i = 0; i < bmtMetaData->cMethods; i++)
{
if (bmtMFDescs->ppUnboxMethodDescList[i] != NULL)
(void) bmtInternal->pModule->StoreMethodDef(bmtMetaData->pMethods[i],
bmtMFDescs->ppUnboxMethodDescList[i]);
else
(void) bmtInternal->pModule->StoreMethodDef(bmtMetaData->pMethods[i],
bmtMFDescs->ppMethodDescList[i]);
}
}
else
{
// Not a value class
for (i = 0; i < bmtMetaData->cMethods; i++)
{
(void) bmtInternal->pModule->StoreMethodDef(bmtMetaData->pMethods[i],
bmtMFDescs->ppMethodDescList[i]);
}
}
for (i = 0; i < bmtMetaData->cFields; i++)
{
(void) bmtInternal->pModule->StoreFieldDef(bmtMetaData->pFields[i],
bmtMFDescs->ppFieldDescList[i]);
}
return hr;
}
MethodDesc* EEClass::GetMethodDescForSlot(DWORD slot)
{
_ASSERTE(!IsThunking());
return(GetUnknownMethodDescForSlot(slot));
}
/* Given the value class method, find the unboxing Stub for the given method */
MethodDesc* EEClass::GetUnboxingMethodDescForValueClassMethod(MethodDesc *pMD)
{
_ASSERTE(IsValueClass());
_ASSERTE(!pMD->IsUnboxingStub());
for (int i = GetNumVtableSlots() - 1; i >= 0; i--) {
// Get the MethodDesc for current method
MethodDesc* pCurMethod = GetUnknownMethodDescForSlot(i);
if (pCurMethod && pCurMethod->IsUnboxingStub()) {
if ((pCurMethod->GetMemberDef() == pMD->GetMemberDef()) &&
(pCurMethod->GetModule() == pMD->GetModule())) {
return pCurMethod;
}
}
}
return NULL;
}
/* Given the unboxing value class method, find the non-unboxing method */
MethodDesc* EEClass::GetMethodDescForUnboxingValueClassMethod(MethodDesc *pMD)
{
_ASSERTE(IsValueClass());
_ASSERTE(pMD->IsUnboxingStub());
for (int i = m_wNumMethodSlots - 1; i >= GetNumVtableSlots(); i--) {
// Get the MethodDesc for current method
MethodDesc* pCurMethod = GetUnknownMethodDescForSlot(i);
if (pCurMethod && !pCurMethod->IsUnboxingStub()) {
if ((pCurMethod->GetMemberDef() == pMD->GetMemberDef()) &&
(pCurMethod->GetModule() == pMD->GetModule())) {
return pCurMethod;
}
}
}
return NULL;
}
SLOT EEClass::GetFixedUpSlot(DWORD slot)
{
_ASSERTE(slot >= 0);
SLOT *s = m_pMethodTable->GetVtable();
SLOT addr = s[slot];
//
// Make sure we're not pointing to a jump target
//
if (GetModule()->IsJumpTargetTableEntry(addr))
return GetModule()->FixupInheritedSlot(GetMethodTable(), slot);
else
return addr;
}
MethodDesc* EEClass::GetUnknownMethodDescForSlot(DWORD slot)
{
_ASSERTE(slot >= 0);
// DO: Removed because reflection can reflect on this
//_ASSERTE(!IsThunking());
return GetUnknownMethodDescForSlotAddress(GetFixedUpSlot(slot));
}
MethodDesc* EEClass::GetUnknownMethodDescForSlotAddress(SLOT addr)
{
IJitManager * pJM = ExecutionManager::FindJitMan(addr);
if (pJM)
// Since we are walking in the class these should be methods so the cast should be valid
return (MethodDesc*)pJM->JitCode2MethodDesc(addr);
const BYTE *addrOfCode = (const BYTE*)(addr);
if (UpdateableMethodStubManager::CheckIsStub(addrOfCode, &addrOfCode)) {
pJM = ExecutionManager::FindJitMan((SLOT)addrOfCode);
_ASSERTE(pJM);
return (MethodDesc*)pJM->JitCode2MethodDesc((SLOT)addrOfCode);
}
// Is it an FCALL?
MethodDesc* ret = MapTargetBackToMethod((VOID*) addr);
if (ret != 0) {
_ASSERTE(ret->GetUnsafeAddrofCode() == addrOfCode);
return(ret);
}
ret = (MethodDesc*) (addrOfCode + METHOD_CALL_PRESTUB_SIZE);
_ASSERTE(ret->m_pDebugMethodTable == NULL || ret->m_pDebugEEClass == ret->m_pDebugMethodTable->GetClass());
return(ret);
}
DWORD MethodTable::GetStaticSize()
{
DWORD count = (DWORD)((BYTE*) m_pIMap - (BYTE*) &m_Vtable[m_cbSlots]);
#ifdef _DEBUG
count -= sizeof(InterfaceInfo_t);
if (HasDynamicInterfaceMap())
count -= sizeof(DWORD);
#endif
return count;
}
// Notice whether this class requires finalization
void MethodTable::MaybeSetHasFinalizer()
{
_ASSERTE(!HasFinalizer()); // one shot
// This method is called after we've built the MethodTable. Since we always
// load parents before children, this also guarantees that g_pObjectClass is
// loaded (though the variable may not have been initialized yet if we are
// just finishing the load of "Object".
if (g_pObjectClass && !IsInterface() && !IsValueClass())
{
WORD slot = s_FinalizerMD->GetSlot();
// Structs and other objects not derived from Object will get marked as
// having a finalizer, if they have sufficient virtual methods. This will
// only be an issue if they can be allocated in the GC heap (which will
// cause all sorts of other problems).
//
// We are careful to check that we have a method that is distinct from both
// the JITted and unJITted (prestub) addresses of Object's Finalizer.
if ((GetClass()->GetNumVtableSlots() >= slot) &&
(GetVtable() [slot] != s_FinalizerMD->GetPreStubAddr()) &&
(GetVtable() [slot] != s_FinalizerMD->GetAddrofCode()))
{
m_wFlags |= enum_flag_HasFinalizer;
}
}
}
// From the GC finalizer thread, invoke the Finalize() method on an object.
void MethodTable::CallFinalizer(Object *obj)
{
COMPLUS_TRY
{
// There's no reason to actually set up a frame here. If we crawl out of the
// Finalize() method on this thread, we will see FRAME_TOP which indicates
// that the crawl should terminate. This is analogous to how KickOffThread()
// starts new threads in the runtime.
__try
{
SLOT funcPtr = obj->GetMethodTable()->GetVtable() [s_FinalizerMD->GetSlot()];
#ifdef DEBUGGING_SUPPORTED
if (CORDebuggerTraceCall())
g_pDebugInterface->TraceCall((const BYTE *) funcPtr);
#endif // DEBUGGING_SUPPORTED
#ifdef _X86_
INSTALL_COMPLUS_EXCEPTION_HANDLER();
__asm
{
mov ecx, [obj]
call [funcPtr]
INDEBUG(nop) // Mark the fact that we can call managed code
}
UNINSTALL_COMPLUS_EXCEPTION_HANDLER();
#else
INT64 arg = (INT64)obj;
s_FinalizerMD->Call(&arg);
#endif
}
__except(ThreadBaseExceptionFilter(GetExceptionInformation(),
GetThread(),
FinalizerThread))
{
_ASSERTE(!"ThreadBaseExceptionFilter returned EXCEPTION_EXECUTE_HANDLER");
}
}
COMPLUS_CATCH
{
// quietly swallow all errors
Thread* pCurThread = GetThread();
_ASSERTE(GCHeap::GetFinalizerThread() == pCurThread);
if (pCurThread->IsAbortRequested())
pCurThread->UserResetAbort();
}
COMPLUS_END_CATCH
}
// Set up the system to support finalization
void MethodTable::InitForFinalization()
{
_ASSERTE(s_FinalizerMD == 0);
s_FinalizerMD = g_Mscorlib.GetMethod(METHOD__OBJECT__FINALIZE);
}
// Release resources associated with supporting finalization
#ifdef SHOULD_WE_CLEANUP
void MethodTable::TerminateForFinalization()
{
s_FinalizerMD = 0;
}
#endif /* SHOULD_WE_CLEANUP */
//
// Finds a method by name and signature, where scope is the scope in which the signature is defined.
//
MethodDesc *EEClass::FindMethod(LPCUTF8 pszName,
PCCOR_SIGNATURE pSignature,
DWORD cSignature,
Module* pModule,
DWORD requiredAttributes,
MethodTable *pDefMT,
BOOL bCaseSensitive,
TypeHandle typeHnd)
{
signed long i;
_ASSERTE(!IsThunking());
// Retrive the right comparition function to use.
UTF8StringCompareFuncPtr StrCompFunc = bCaseSensitive ? strcmp : _stricmp;
// shared method tables (arrays) need to pass instantiation information too
TypeHandle typeVarsBuff;
TypeHandle* typeVars = 0;
if (IsArrayClass() && !typeHnd.IsNull()) {
typeVarsBuff = typeHnd.AsTypeDesc()->GetTypeParam();
typeVars = &typeVarsBuff;
}
// Statistically it's most likely for a method to be found in non-vtable portion of this class's members, then in the
// vtable of this class's declared members, then in the inherited portion of the vtable, so we search backwards.
// For value classes, if it's a value class method, we want to return the duplicated MethodDesc, not the one in the vtable
// section. We'll find the one in the duplicate section before the one in the vtable section, so we're ok.
// Search non-vtable portion of this class first
if (pDefMT)
{
for (i = m_wNumMethodSlots-1; i >= 0; i--)
{
MethodDesc *pCurMethod = GetUnknownMethodDescForSlot(i);
if (!pCurMethod)
continue;
if (pCurMethod->IsMethodImpl())
{
MethodImpl* data = MethodImpl::GetMethodImplData(pCurMethod);
_ASSERTE(data && "This method should be a method impl");
MethodDesc **apImplementedMDs = data->GetImplementedMDs();
DWORD *aSlots = data->GetSlots();
for (DWORD iMethImpl = 0; iMethImpl < data->GetSize(); iMethImpl++)
{
MethodDesc *pCurImplMD = apImplementedMDs[iMethImpl];
// Prejitted images may leave NULL in this table if
// the methoddesc is declared in another module.
// In this case we need to manually compute & restore it
// from the slot number.
if (pCurImplMD == NULL)
pCurImplMD = data->RestoreSlot(iMethImpl, GetMethodTable());
if (pCurImplMD->GetMethodTable() == pDefMT && StrCompFunc(pszName, pCurImplMD->GetName((USHORT) aSlots[iMethImpl])) == 0)
{
PCCOR_SIGNATURE pCurMethodSig;
DWORD cCurMethodSig;
pCurImplMD->GetSig(&pCurMethodSig, &cCurMethodSig);
if (MetaSig::CompareMethodSigs(pSignature, cSignature, pModule,
pCurMethodSig, cCurMethodSig,
pCurImplMD->GetModule(), typeVars) &&
(IsNilToken(requiredAttributes) ||
(requiredAttributes & pCurMethod->GetAttrs()) == requiredAttributes))
return pCurMethod;
}
}
}
else
{
PCCOR_SIGNATURE pCurMethodSig;
DWORD cCurMethodSig;
if (StrCompFunc(pszName, pCurMethod->GetName((USHORT) i)) == 0)
{
pCurMethod->GetSig(&pCurMethodSig, &cCurMethodSig);
// Not in vtable section, so don't worry about value classes
if (MetaSig::CompareMethodSigs(pSignature, cSignature, pModule, pCurMethodSig, cCurMethodSig, pCurMethod->GetModule(), typeVars) &&
(IsNilToken(requiredAttributes) ||
(requiredAttributes & pCurMethod->GetAttrs()) == requiredAttributes))
return pCurMethod;
}
}
}
}
else
{
for (i = m_wNumMethodSlots-1; i >= 0; i--)
{
MethodDesc *pCurMethod = GetUnknownMethodDescForSlot(i);
if ((pCurMethod != NULL) && (StrCompFunc(pszName, pCurMethod->GetName((USHORT) i)) == 0))
{
PCCOR_SIGNATURE pCurMethodSig;
DWORD cCurMethodSig;
pCurMethod->GetSig(&pCurMethodSig, &cCurMethodSig);
// Not in vtable section, so don't worry about value classes
if (MetaSig::CompareMethodSigs(pSignature, cSignature, pModule, pCurMethodSig, cCurMethodSig, pCurMethod->GetModule(), typeVars) &&
(IsNilToken(requiredAttributes) ||
(requiredAttributes & pCurMethod->GetAttrs()) == requiredAttributes))
return pCurMethod;
}
}
}
if (IsValueClass()) {
// we don't allow inheritance on value type (yet)
_ASSERTE(!GetParentClass() || !GetParentClass()->IsValueClass());
return NULL;
}
// Recurse up the hierarchy if the method was not found.
//@todo: This routine might be factored slightly to improve perf.
_ASSERTE(IsRestored());
if (GetParentClass() != NULL)
{
MethodDesc *md = GetParentClass()->FindMethod(pszName, pSignature, cSignature, pModule,
requiredAttributes, NULL, bCaseSensitive, typeHnd);
// Don't inherit constructors from parent classes. It is important to forbid this,
// because the JIT needs to get the class handle from the memberRef, and when the
// constructor is inherited, the JIT will get the class handle for the parent class
// (and not allocate enough space, etc.). See bug #50035 for details.
if (md)
{
_ASSERTE(strcmp(pszName, md->GetName()) == 0);
if (IsMdInstanceInitializer(md->GetAttrs(), pszName))
{
md = NULL;
}
}
return md;
}
return NULL;
}
//
// Are more optimised case if we are an interface - we know that the vtable won't be pointing to JITd code
// EXCEPT when it's a <clinit>
//
MethodDesc *EEClass::InterfaceFindMethod(LPCUTF8 pszName, PCCOR_SIGNATURE pSignature, DWORD cSignature, Module* pModule, DWORD *slotNum, BOOL bCaseSensitive)
{
DWORD i;
SLOT* s = m_pMethodTable->GetVtable();
_ASSERTE(!IsThunking());
// Retrive the right comparition function to use.
UTF8StringCompareFuncPtr StrCompFunc = bCaseSensitive ? strcmp : _stricmp;
// This cannot be a clinit
for (i = 0; i < GetNumVtableSlots(); i++)
{
MethodDesc *pCurMethod = (MethodDesc*) (((BYTE*)s[i]) + METHOD_CALL_PRESTUB_SIZE);
_ASSERTE(pCurMethod != NULL);
if (StrCompFunc(pszName, pCurMethod->GetNameOnNonArrayClass()) == 0)
{
PCCOR_SIGNATURE pCurMethodSig;
DWORD cCurMethodSig;
pCurMethod->GetSig(&pCurMethodSig, &cCurMethodSig);
if (MetaSig::CompareMethodSigs(pSignature, cSignature, pModule, pCurMethodSig, cCurMethodSig, pCurMethod->GetModule()))
{
*slotNum = i;
return pCurMethod;
}
}
}
// One can be a clinit
for (i = GetNumVtableSlots(); i < m_wNumMethodSlots; i++)
{
MethodDesc *pCurMethod = (MethodDesc*) GetUnknownMethodDescForSlot(i);
_ASSERTE(pCurMethod != NULL);
if (StrCompFunc(pszName, pCurMethod->GetNameOnNonArrayClass()) == 0)
{
PCCOR_SIGNATURE pCurMethodSig;
DWORD cCurMethodSig;
pCurMethod->GetSig(&pCurMethodSig, &cCurMethodSig);
if (MetaSig::CompareMethodSigs(pSignature, cSignature, pModule, pCurMethodSig, cCurMethodSig, pCurMethod->GetModule()))
{
*slotNum = i;
return pCurMethod;
}
}
}
return NULL;
}
MethodDesc *EEClass::FindMethod(LPCUTF8 pwzName, LPHARDCODEDMETASIG pwzSignature, MethodTable *pDefMT, BOOL bCaseSensitive)
{
PCCOR_SIGNATURE pBinarySig;
ULONG cbBinarySigLength;
_ASSERTE(!IsThunking());
if (FAILED(pwzSignature->GetBinaryForm(&pBinarySig, &cbBinarySigLength )))
{
return NULL;
}
return FindMethod(pwzName, pBinarySig, cbBinarySigLength, SystemDomain::SystemModule(),
mdTokenNil, pDefMT, bCaseSensitive);
}
MethodDesc *EEClass::FindMethod(mdMethodDef mb)
{
_ASSERTE(!IsThunking());
// We have the EEClass (this) and so lets just look this up in the ridmap.
MethodDesc *pDatum = NULL;
if (TypeFromToken(mb) == mdtMemberRef)
pDatum = GetModule()->LookupMemberRefAsMethod(mb);
else
pDatum = GetModule()->LookupMethodDef(mb);
if (pDatum != NULL)
pDatum->GetMethodTable()->CheckRestore();
if (pDatum != NULL)
return pDatum;
else
return NULL;
}
MethodDesc *EEClass::FindPropertyMethod(LPCUTF8 pszName, EnumPropertyMethods Method, BOOL bCaseSensitive)
{
_ASSERTE(!IsThunking());
_ASSERTE(!IsArrayClass());
// @TODO (DM): Retrieve the property setter from the metadata.
// Retrive the right comparition function to use.
UTF8StringCompareFuncPtr StrCompFunc = bCaseSensitive ? strcmp : _stricmp;
// The format strings for the getter and setter. These must stay in synch with the
// EnumPropertyMethods enum defined in class.h
static LPCUTF8 aFormatStrings[] =
{
"get_%s",
"set_%s"
};
LPUTF8 strMethName = (LPUTF8)_alloca(strlen(pszName) + strlen(aFormatStrings[Method]) + 1);
sprintf(strMethName, aFormatStrings[Method], pszName);
// Scan all classes in the hierarchy, starting at the current class and
// moving back up towards the base. This is necessary since non-virtual
// properties won't be copied down into the method table for derived
// classes.
for (EEClass *pClass = this; pClass; pClass = pClass->GetParentClass())
{
for (int i = pClass->m_wNumMethodSlots-1; i >= 0; i--)
{
MethodDesc *pCurMethod = pClass->GetUnknownMethodDescForSlot(i);
if ((pCurMethod != NULL) && (StrCompFunc(strMethName, pCurMethod->GetNameOnNonArrayClass()) == 0))
return pCurMethod;
}
}
return NULL;
}
MethodDesc *EEClass::FindEventMethod(LPCUTF8 pszName, EnumEventMethods Method, BOOL bCaseSensitive)
{
_ASSERTE(!IsThunking());
_ASSERTE(!IsArrayClass());
// @TODO (DM): Retrieve the property setter from the metadata.
// Retrive the right comparition function to use.
UTF8StringCompareFuncPtr StrCompFunc = bCaseSensitive ? strcmp : _stricmp;
// The format strings for the getter and setter. These must stay in synch with the
// EnumPropertyMethods enum defined in class.h
static LPCUTF8 aFormatStrings[] =
{
"add_%s",
"remove_%s",
"raise_%s"
};
LPUTF8 strMethName = (LPUTF8)_alloca(strlen(pszName) + strlen(aFormatStrings[Method]) + 1);
sprintf(strMethName, aFormatStrings[Method], pszName);
// Scan all classes in the hierarchy, starting at the current class and
// moving back up towards the base. This is necessary since non-virtual
// event methods won't be copied down into the method table for derived
// classes.
for (EEClass *pClass = this; pClass; pClass = pClass->GetParentClass())
{
for (int i = pClass->m_wNumMethodSlots-1; i >= 0; i--)
{
MethodDesc *pCurMethod = pClass->GetUnknownMethodDescForSlot(i);
if ((pCurMethod != NULL) && (StrCompFunc(strMethName, pCurMethod->GetNameOnNonArrayClass()) == 0))
return pCurMethod;
}
}
return NULL;
}
MethodDesc *EEClass::FindMethodByName(LPCUTF8 pszName, BOOL bCaseSensitive)
{
_ASSERTE(!IsThunking());
_ASSERTE(!IsArrayClass());
// Retrive the right comparition function to use.
UTF8StringCompareFuncPtr StrCompFunc = bCaseSensitive ? strcmp : _stricmp;
// Scan all classes in the hierarchy, starting at the current class and
// moving back up towards the base.
for (EEClass *pClass = this; pClass; pClass = pClass->m_pParentClass)
{
for (int i = pClass->m_wNumMethodSlots-1; i >= 0; i--)
{
MethodDesc *pCurMethod = pClass->GetUnknownMethodDescForSlot(i);
if ((pCurMethod != NULL) && (StrCompFunc(pszName, pCurMethod->GetName((USHORT) i)) == 0))
return pCurMethod;
}
}
return NULL;
}
FieldDesc *EEClass::FindField(LPCUTF8 pszName, LPHARDCODEDMETASIG pszSignature, BOOL bCaseSensitive)
{
PCCOR_SIGNATURE pBinarySig;
ULONG cbBinarySigLength;
// The following assert is very important, but we need to special case it enough
// to allow us access to the legitimate fields of a context proxy object.
_ASSERTE(!IsThunking() ||
!strcmp(pszName, "actualObject") ||
!strcmp(pszName, "contextID") ||
!strcmp(pszName, "_rp") ||
!strcmp(pszName, "_stubData") ||
!strcmp(pszName, "_pMT") ||
!strcmp(pszName, "_pInterfaceMT") ||
!strcmp(pszName, "_stub"));
if (FAILED(pszSignature->GetBinaryForm(&pBinarySig, &cbBinarySigLength)))
{
return NULL;
}
return FindField(pszName, pBinarySig, cbBinarySigLength, SystemDomain::SystemModule(), bCaseSensitive);
}
FieldDesc *EEClass::FindField_Int(LPCUTF8 pszName, PCCOR_SIGNATURE pSignature, DWORD cSignature, Module* pModule, BOOL bCaseSensitive)
{
DWORD i;
DWORD dwFieldDescsToScan;
IMDInternalImport *pInternalImport = GetMDImport(); // All explicitly declared fields in this class will have the same scope
_ASSERTE(IsRestored());
// Retrive the right comparition function to use.
UTF8StringCompareFuncPtr StrCompFunc = bCaseSensitive ? strcmp : _stricmp;
// The following assert is very important, but we need to special case it enough
// to allow us access to the legitimate fields of a context proxy object.
_ASSERTE(!IsThunking() ||
!strcmp(pszName, "actualObject") ||
!strcmp(pszName, "contextID") ||
!strcmp(pszName, "_rp") ||
!strcmp(pszName, "_stubData") ||
!strcmp(pszName, "_pMT") ||
!strcmp(pszName, "_pInterfaceMT") ||
!strcmp(pszName, "_stub") );
// Array classes don't have fields, and don't have metadata
if (IsArrayClass())
return NULL;
// Scan the FieldDescs of this class
if (GetParentClass() != NULL)
dwFieldDescsToScan = m_wNumInstanceFields - GetParentClass()->m_wNumInstanceFields + m_wNumStaticFields;
else
dwFieldDescsToScan = m_wNumInstanceFields + m_wNumStaticFields;
for (i = 0; i < dwFieldDescsToScan; i++)
{
LPCUTF8 szMemberName;
FieldDesc * pFD = &m_pFieldDescList[i];
mdFieldDef mdField = pFD->GetMemberDef();
// Check is valid FieldDesc, and not some random memory
_ASSERTE(pFD->GetMethodTableOfEnclosingClass()->GetClass()->GetMethodTable() ==
pFD->GetMethodTableOfEnclosingClass());
szMemberName = pInternalImport->GetNameOfFieldDef(mdField);
if (StrCompFunc(szMemberName, pszName) == 0)
{
PCCOR_SIGNATURE pMemberSig;
DWORD cMemberSig;
pMemberSig = pInternalImport->GetSigOfFieldDef(
mdField,
&cMemberSig
);
if (MetaSig::CompareFieldSigs(
pMemberSig,
cMemberSig,
GetModule(),
pSignature,
cSignature,
pModule))
{
return pFD;
}
}
}
return NULL;
}
FieldDesc *EEClass::FindFieldInherited(LPCUTF8 pzName, LPHARDCODEDMETASIG pzSignature, BOOL bCaseSensitive)
{
PCCOR_SIGNATURE pBinarySig;
ULONG cbBinarySigLength;
_ASSERTE(!IsThunking());
if (FAILED(pzSignature->GetBinaryForm(&pBinarySig, &cbBinarySigLength )))
{
return NULL;
}
return FindFieldInherited(pzName, pBinarySig, cbBinarySigLength,
SystemDomain::SystemModule(), bCaseSensitive);
}
FieldDesc *EEClass::FindFieldInherited(LPCUTF8 pszName, PCCOR_SIGNATURE pSignature, DWORD cSignature, Module* pModule, BOOL bCaseSensitive)
{
EEClass *pClass = this;
FieldDesc *pFD;
_ASSERTE(IsRestored());
// The following assert is very important, but we need to special case it enough
// to allow us access to the legitimate fields of a context proxy object.
_ASSERTE(!IsThunking() ||
!strcmp(pszName, "actualObject") ||
!strcmp(pszName, "contextID") ||
!strcmp(pszName, "_rp") ||
!strcmp(pszName, "_stubData") ||
!strcmp(pszName, "_pMT") ||
!strcmp(pszName, "_pInterfaceMT") ||
!strcmp(pszName, "_stub"));
while (pClass != NULL)
{
pFD = pClass->FindField_Int(pszName, pSignature, cSignature, pModule, bCaseSensitive);
if (pFD != NULL)
return pFD;
pClass = pClass->GetParentClass();
}
return NULL;
}
MethodDesc *EEClass::FindConstructor(LPHARDCODEDMETASIG pwzSignature)
{
PCCOR_SIGNATURE pBinarySig;
ULONG cbBinarySigLength;
if (FAILED(pwzSignature->GetBinaryForm(&pBinarySig, &cbBinarySigLength )))
{
return NULL;
}
return FindConstructor(pBinarySig, cbBinarySigLength, SystemDomain::SystemModule());
}
MethodDesc *EEClass::FindConstructor(PCCOR_SIGNATURE pSignature,DWORD cSignature, Module* pModule)
{
SLOT * pVtable;
DWORD i;
//_ASSERTE(!IsThunking());
// Array classes don't have metadata
if (IsArrayClass())
return NULL;
pVtable = GetVtable();
DWORD dwCurMethodAttrs;
for (i = GetNumVtableSlots(); i < m_wNumMethodSlots; i++)
{
PCCOR_SIGNATURE pCurMethodSig;
DWORD cCurMethodSig;
MethodDesc *pCurMethod = GetUnknownMethodDescForSlot(i);
if (pCurMethod == NULL)
continue;
dwCurMethodAttrs = pCurMethod->GetAttrs();
if(!IsMdRTSpecialName(dwCurMethodAttrs))
continue;
// Don't want class initializers.
if (IsMdStatic(dwCurMethodAttrs))
continue;
// Find only the constructor for for this object
_ASSERTE(pCurMethod->GetMethodTable() == this->GetMethodTable());
pCurMethod->GetSig(&pCurMethodSig, &cCurMethodSig);
if (MetaSig::CompareMethodSigs(pSignature, cSignature, pModule, pCurMethodSig, cCurMethodSig, pCurMethod->GetModule()))
return pCurMethod;
}
return NULL;
}
void EEClass::SetupCoClassAttribInfo()
{
THROWSCOMPLUSEXCEPTION();
if(IsComClassInterface() == 0)
return;
_ASSERTE(IsInterface());
TypeHandle CoClassType;
AppDomain *pCurrDomain = SystemDomain::GetCurrentDomain();
OBJECTREF pThrowable = NULL;
const BYTE *pVal = NULL;
ULONG cbVal = 0;
ULONG cbcb = 0;
ULONG cbStr = 0;
CQuickArray<CHAR> qbClassName;
HRESULT hr = GetMDImport()->GetCustomAttributeByName(GetCl(), "System.Runtime.InteropServices.CoClassAttribute" , (const void**)&pVal, &cbVal);
if (hr == S_OK)
{
BEGIN_ENSURE_COOPERATIVE_GC();
// Skip the CA type prefix.
pVal += 2;
// Retrieve the COM source interface class name.
cbcb = CorSigUncompressData((PCCOR_SIGNATURE)pVal, (ULONG*)&cbStr);
pVal += cbcb;
// Copy the name to a temporary buffer and NULL terminate it.
IfFailThrow(qbClassName.ReSize(cbStr + 1));
memcpyNoGCRefs(qbClassName.Ptr(), pVal, cbStr);
qbClassName[cbStr] = 0;
pVal += cbStr;
// Try to load the class using its name as a fully qualified name. If that fails,
// then we try to load it in the assembly of the current class.
GCPROTECT_BEGIN(pThrowable);
{
CoClassType = pCurrDomain->FindAssemblyQualifiedTypeHandle(qbClassName.Ptr(), true, GetAssembly(), NULL, &pThrowable);
if (CoClassType.IsNull())
COMPlusThrow(pThrowable);
}
GCPROTECT_END();
END_ENSURE_COOPERATIVE_GC();
// Set the source interface and event provider classes.
m_pCoClassForIntf = CoClassType.GetClass();
}
}
void EEClass::GetCoClassAttribInfo()
{
const BYTE *pVal = NULL;
ULONG cbVal = 0;
// Retrieve the CoClassAttribute CA.
HRESULT hr = GetMDImport()->GetCustomAttributeByName(GetCl(), INTEROP_COCLASS_TYPE, (const void**)&pVal, &cbVal);
if (hr == S_OK)
SetIsComClassInterface();
}
void EEClass::GetEventInterfaceInfo(EEClass **ppSrcItfClass, EEClass **ppEvProvClass)
{
THROWSCOMPLUSEXCEPTION();
TypeHandle EventProvType;
TypeHandle SrcItfType;
AppDomain *pCurrDomain = SystemDomain::GetCurrentDomain();
OBJECTREF pThrowable = NULL;
const BYTE *pVal = NULL;
ULONG cbVal = 0;
ULONG cbcb = 0;
ULONG cbStr = 0;
CQuickArray<CHAR> qbClassName;
// Retrieve the ComEventProviderAttribute CA.
HRESULT hr = GetMDImport()->GetCustomAttributeByName(GetCl(), INTEROP_COMEVENTINTERFACE_TYPE, (const void**)&pVal, &cbVal);
_ASSERTE(hr == S_OK);
// Skip the CA type prefix.
pVal += 2;
// Retrieve the COM source interface class name.
cbcb = CorSigUncompressData((PCCOR_SIGNATURE)pVal, (ULONG*)&cbStr);
pVal += cbcb;
// Copy the name to a temporary buffer and NULL terminate it.
IfFailThrow(qbClassName.ReSize(cbStr + 1));
memcpyNoGCRefs(qbClassName.Ptr(), pVal, cbStr);
qbClassName[cbStr] = 0;
pVal += cbStr;
// Try to load the class using its name as a fully qualified name. If that fails,
// then we try to load it in the assembly of the current class.
GCPROTECT_BEGIN(pThrowable);
{
SrcItfType = pCurrDomain->FindAssemblyQualifiedTypeHandle(qbClassName.Ptr(), true, GetAssembly(), NULL, &pThrowable);
if (SrcItfType.IsNull())
COMPlusThrow(pThrowable);
}
GCPROTECT_END();
// Retrieve the COM event provider class name.
cbcb = CorSigUncompressData((PCCOR_SIGNATURE)pVal, (ULONG*)&cbStr);
pVal += cbcb;
// Copy the name to a temporary buffer and NULL terminate it.
IfFailThrow(qbClassName.ReSize(cbStr + 1));
memcpyNoGCRefs(qbClassName.Ptr(), pVal, cbStr);
qbClassName[cbStr] = 0;
pVal += cbStr;
// Try to load the class using its name as a fully qualified name. If that fails,
// then we try to load it in the assembly of the current class.
GCPROTECT_BEGIN(pThrowable);
{
EventProvType = pCurrDomain->FindAssemblyQualifiedTypeHandle(qbClassName.Ptr(), true, GetAssembly(), NULL, &pThrowable);
if (EventProvType.IsNull())
COMPlusThrow(pThrowable);
}
GCPROTECT_END();
// Set the source interface and event provider classes.
*ppSrcItfClass = SrcItfType.GetClass();
*ppEvProvClass = EventProvType.GetClass();
}
// We find a lot of information from the VTable. But sometimes the VTable is a
// thunking layer rather than the true type's VTable. For instance, context
// proxies use a single VTable for proxies to all the types we've loaded.
// The following service adjusts a EEClass based on the supplied instance. As
// we add new thunking layers, we just need to teach this service how to navigate
// through them.
EEClass *EEClass::AdjustForThunking(OBJECTREF or)
{
EEClass *pClass = this;
_ASSERTE((or->GetClass() == this) ||
or->GetClass()->IsThunking());
if (IsThunking())
{
if(GetMethodTable()->IsTransparentProxyType())
{
pClass = CTPMethodTable::GetClassBeingProxied(or);
}
else
{
pClass = or->GetClass();
}
_ASSERTE(!pClass->IsThunking());
}
return pClass;
}
EEClass *EEClass::GetDefItfForComClassItf()
{
_ASSERTE(IsComClassInterface());
if (GetMethodTable()->GetNumInterfaces() > 0)
{
// The COM class interface uses the normal scheme which is to have no
// methods and to implement default interface and optionnally the
// default source interface. In this scheme, the first implemented
// interface is the default interface which we return.
_ASSERTE(GetMethodTable()->GetInterfaceMap());
return GetMethodTable()->GetInterfaceMap()[0].m_pMethodTable->GetClass();
}
else
{
// The COM class interface has the methods directly on the itself.
// Because of this we need to consider it to be the default interface.
return this;
}
}
MethodTable *MethodTable::AdjustForThunking(OBJECTREF or)
{
MethodTable *pMT = this;
_ASSERTE(or->GetMethodTable() == this);
if (IsThunking())
{
if(IsTransparentProxyType())
{
pMT = CTPMethodTable::GetClassBeingProxied(or)->GetMethodTable();
}
else
{
pMT = or->GetMethodTable();
}
_ASSERTE(!pMT->IsThunking());
}
return pMT;
}
//
// Helper routines for the macros defined at the top of this class.
// You probably should not use these functions directly.
//
LPUTF8 EEClass::_GetFullyQualifiedNameForClassNestedAware(LPUTF8 buf, DWORD dwBuffer)
{
LPCUTF8 pszNamespace;
LPCUTF8 pszName;
mdTypeDef mdEncl;
IMDInternalImport *pImport;
CQuickBytes qb;
pszName = GetFullyQualifiedNameInfo(&pszNamespace);
if (pszName == NULL)
return NULL;
pImport = this->GetModule()->GetMDImport();
mdEncl = this->GetCl();
DWORD dwAttr;
this->GetMDImport()->GetTypeDefProps(this->GetCl(), &dwAttr, NULL);
if (IsTdNested(dwAttr))
{ // Build the nesting chain.
while (SUCCEEDED(pImport->GetNestedClassProps(mdEncl, &mdEncl))) {
CQuickBytes qb2;
CQuickBytes qb3;
LPCUTF8 szEnclName;
LPCUTF8 szEnclNameSpace;
pImport->GetNameOfTypeDef(mdEncl,
&szEnclName,
&szEnclNameSpace);
ns::MakePath(qb2, szEnclNameSpace, szEnclName);
ns::MakeNestedTypeName(qb3, (LPCUTF8) qb2.Ptr(), pszName);
// @todo: this should be a SIZE_T
SIZE_T sLen = strlen((LPCUTF8) qb3.Ptr()) + 1;
strncpy((LPUTF8) qb.Alloc(sLen), (LPCUTF8) qb3.Ptr(), sLen);
pszName = (LPCUTF8) qb.Ptr();
}
}
if (FAILED(StoreFullyQualifiedName(buf, dwBuffer, pszNamespace, pszName)))
return NULL;
return buf;
}
LPWSTR EEClass::_GetFullyQualifiedNameForClassNestedAware(LPWSTR buf, DWORD dwBuffer)
{
CQuickSTR szBuffer;
if (FAILED(szBuffer.ReSize(dwBuffer)))
return NULL;
_GetFullyQualifiedNameForClassNestedAware(szBuffer.Ptr(), dwBuffer);
if(!WszMultiByteToWideChar(CP_UTF8, MB_ERR_INVALID_CHARS, szBuffer.Ptr(), -1, buf, dwBuffer))
return NULL;
return buf;
}
LPUTF8 EEClass::_GetFullyQualifiedNameForClass(LPUTF8 buf, DWORD dwBuffer)
{
if (IsArrayClass())
{
ArrayClass *pArrayClass = (ArrayClass*)this;
TypeDesc::ConstructName(GetMethodTable()->GetNormCorElementType(),
pArrayClass->GetElementTypeHandle(),
pArrayClass->GetRank(),
buf, dwBuffer);
return buf;
}
else if (!IsNilToken(m_cl))
{
LPCUTF8 szNamespace;
LPCUTF8 szName;
GetMDImport()->GetNameOfTypeDef(m_cl, &szName, &szNamespace);
if (FAILED(StoreFullyQualifiedName(buf, dwBuffer, szNamespace, szName)))
return NULL;
}
else
return NULL;
return buf;
}
LPWSTR EEClass::_GetFullyQualifiedNameForClass(LPWSTR buf, DWORD dwBuffer)
{
CQuickSTR szBuffer;
if (FAILED(szBuffer.ReSize(dwBuffer)))
return NULL;
_GetFullyQualifiedNameForClass(szBuffer.Ptr(), dwBuffer);
if(!WszMultiByteToWideChar(CP_UTF8, 0, szBuffer.Ptr(), -1, buf, dwBuffer))
return NULL;
return buf;
}
//
// Gets the namespace and class name for the class. The namespace
// can legitimately come back NULL, however a return value of NULL indicates
// an error.
//
// NOTE: this used to return array class names, which were sometimes squirreled away by the
// class loader hash table. It's been removed because it wasted space and was basically broken
// in general (sometimes wasn't set, sometimes set wrong). If you need array class names,
// use GetFullyQualifiedNameForClass instead.
//
LPCUTF8 EEClass::GetFullyQualifiedNameInfo(LPCUTF8 *ppszNamespace)
{
if (IsArrayClass())
{
*ppszNamespace = NULL;
return NULL;
}
else
{
LPCUTF8 szName;
GetMDImport()->GetNameOfTypeDef(m_cl, &szName, ppszNamespace);
return szName;
}
}
// Store a fully qualified namespace and name in the supplied buffer (of size cBuffer).
HRESULT EEClass::StoreFullyQualifiedName(
LPUTF8 pszFullyQualifiedName,
DWORD cBuffer,
LPCUTF8 pszNamespace,
LPCUTF8 pszName
)
{
if (ns::MakePath(pszFullyQualifiedName, (int) cBuffer, pszNamespace, pszName))
return S_OK;
else
return HRESULT_FROM_WIN32(ERROR_INSUFFICIENT_BUFFER);
}
// Store a fully qualified namespace and name in the supplied buffer (of size cBuffer).
HRESULT EEClass::StoreFullyQualifiedName(
LPWSTR pszFullyQualifiedName,
DWORD cBuffer,
LPCUTF8 pszNamespace,
LPCUTF8 pszName
)
{
if (ns::MakePath(pszFullyQualifiedName, (int) cBuffer, pszNamespace, pszName))
return S_OK;
else
return HRESULT_FROM_WIN32(ERROR_INSUFFICIENT_BUFFER);
}
//
// Used for static analysis - therefore, "this" can be an interface
//
BOOL EEClass::StaticSupportsInterface(MethodTable *pInterfaceMT)
{
_ASSERTE(pInterfaceMT->GetClass()->IsInterface());
_ASSERTE(!IsThunking());
_ASSERTE(IsRestored());
// Check to see if the current class is for the interface passed in.
if (GetMethodTable() == pInterfaceMT)
return TRUE;
// Check to see if the static class definition indicates we implement the interface.
InterfaceInfo_t *pInterfaces = GetInterfaceMap();
for (WORD i = 0; i < GetMethodTable()->m_wNumInterface; i++)
{
if (pInterfaces[i].m_pMethodTable == pInterfaceMT)
return TRUE;
}
return FALSE;
}
BOOL EEClass::SupportsInterface(OBJECTREF pObj, MethodTable* pInterfaceMT)
{
_ASSERTE(pInterfaceMT->GetClass()->IsInterface());
_ASSERTE((pObj->GetClass() == this) || pObj->GetClass()->IsThunking());
_ASSERTE(IsRestored());
// Check to see if the static class definition indicates we implement the interface.
InterfaceInfo_t* pIntf = FindInterface(pInterfaceMT);
if (pIntf != NULL)
return TRUE;
// It is important to give internal context boundaries priority over COM boundaries.
// Start by checking if we are thunking and if we are delegate the call to the real class.
EEClass *cls = AdjustForThunking(pObj);
if (cls != this)
return cls->SupportsInterface(pObj, pInterfaceMT);
// If this is a COM object, the static class definition might not be complete so we need
// to check if the COM object implements the interface.
if (GetMethodTable()->IsComObjectType())
return ComObject::SupportsInterface(pObj, pInterfaceMT);
return FALSE;
}
// Version of SupportsInterface that only deals with classic COM instances that
// are not being thunked.
BOOL EEClass::ComObjectSupportsInterface(OBJECTREF pObj, MethodTable* pInterfaceMT)
{
_ASSERTE(pInterfaceMT->GetClass()->IsInterface());
_ASSERTE(GetMethodTable()->IsComObjectType());
_ASSERTE(pObj->GetClass() == this);
_ASSERTE(IsRestored());
// Check to see if the static class definition indicates we implement the interface.
InterfaceInfo_t* pIntf = FindInterface(pInterfaceMT);
if (pIntf != NULL)
return TRUE;
// Since this is a COM object the static class definition might not be complete so we need
// to check if the COM object implements the interface.
return ComObject::SupportsInterface(pObj, pInterfaceMT);
}
void EEClass::DebugRecursivelyDumpInstanceFields(LPCUTF8 pszClassName, BOOL debug)
{
CQuickBytes qb;
const int nLen = MAX_CLASSNAME_LENGTH + 20;
LPWSTR buff = (LPWSTR) qb.Alloc( nLen * sizeof(WCHAR));
if( buff == NULL)
return;
DWORD cParentInstanceFields;
DWORD i;
_ASSERTE(IsRestored());
if (GetParentClass() != NULL)
{
cParentInstanceFields = GetParentClass()->m_wNumInstanceFields;
DefineFullyQualifiedNameForClass();
LPCUTF8 name = GetFullyQualifiedNameForClass(GetParentClass());
GetParentClass()->DebugRecursivelyDumpInstanceFields(name, debug);
}
else
{
cParentInstanceFields = 0;
}
// Are there any new instance fields declared by this class?
if (m_wNumInstanceFields > cParentInstanceFields)
{
// Display them
if(debug) {
_snwprintf(buff, nLen - 1, L"%lS:\n", pszClassName);
buff[nLen - 1] = L'\0';
WszOutputDebugString(buff);
}
else {
LOG((LF_ALWAYS, LL_ALWAYS, "%ls:\n", pszClassName));
}
for (i = 0; i < (m_wNumInstanceFields-cParentInstanceFields); i++)
{
FieldDesc *pFD = &m_pFieldDescList[i];
// printf("offset %s%3d %s\n", pFD->IsByValue() ? "byvalue " : "", pFD->GetOffset(), pFD->GetName());
if(debug) {
_snwprintf(buff, nLen - 1, L"offset %3d %S\n", pFD->GetOffset(), pFD->GetName());
buff[nLen - 1] = L'\0';
WszOutputDebugString(buff);
}
else {
LOG((LF_ALWAYS, LL_ALWAYS, "offset %3d %s\n", pFD->GetOffset(), pFD->GetName()));
}
}
}
}
void EEClass::DebugDumpFieldLayout(LPCUTF8 pszClassName, BOOL debug)
{
CQuickBytes qb;
const int nLen = MAX_CLASSNAME_LENGTH + 40;
LPWSTR buff = (LPWSTR) qb.Alloc(nLen * sizeof(WCHAR));
if( buff == NULL)
return;
DWORD i;
DWORD cParentInstanceFields;
_ASSERTE(IsRestored());
if (m_wNumStaticFields == 0 && m_wNumInstanceFields == 0)
return;
if (GetParentClass() != NULL)
cParentInstanceFields = GetParentClass()->m_wNumInstanceFields;
else
cParentInstanceFields = 0;
if(debug) {
_snwprintf(buff, nLen - 1, L"Field layout for '%S':\n\n", pszClassName);
buff[nLen -1] = L'\0';
WszOutputDebugString(buff);
}
else {
LOG((LF_ALWAYS, LL_ALWAYS, "Field layout for '%s':\n\n", pszClassName));
}
if (m_wNumStaticFields > 0)
{
if(debug) {
WszOutputDebugString(L"Static fields (stored at vtable offsets)\n");
WszOutputDebugString(L"----------------------------------------\n");
}
else {
LOG((LF_ALWAYS, LL_ALWAYS, "Static fields (stored at vtable offsets)\n"));
LOG((LF_ALWAYS, LL_ALWAYS, "----------------------------------------\n"));
}
for (i = 0; i < m_wNumStaticFields; i++)
{
FieldDesc *pFD = &m_pFieldDescList[(m_wNumInstanceFields-cParentInstanceFields) + i];
if(debug) {
_snwprintf(buff, nLen - 1, L"offset %3d %S\n", pFD->GetOffset(), pFD->GetName());
buff[nLen -1] = L'\0';
WszOutputDebugString(buff);
}
else {
LOG((LF_ALWAYS, LL_ALWAYS, "offset %3d %s\n", pFD->GetOffset(), pFD->GetName()));
}
}
}
if (m_wNumInstanceFields > 0)
{
if (m_wNumStaticFields) {
if(debug) {
WszOutputDebugString(L"\n");
}
else {
LOG((LF_ALWAYS, LL_ALWAYS, "\n"));
}
}
if(debug) {
WszOutputDebugString(L"Instance fields\n");
WszOutputDebugString(L"---------------\n");
}
else {
LOG((LF_ALWAYS, LL_ALWAYS, "Instance fields\n"));
LOG((LF_ALWAYS, LL_ALWAYS, "---------------\n"));
}
DebugRecursivelyDumpInstanceFields(pszClassName, debug);
}
if(debug) {
WszOutputDebugString(L"\n");
}
else {
LOG((LF_ALWAYS, LL_ALWAYS, "\n"));
}
}
void EEClass::DebugDumpVtable(LPCUTF8 pszClassName, BOOL debug)
{
DWORD i;
CQuickBytes qb;
const int nLen = MAX_CLASSNAME_LENGTH *2 + 100;
LPWSTR buff = (LPWSTR) qb.Alloc(nLen * sizeof(WCHAR));
if( buff == NULL)
return;
if(debug) {
_snwprintf(buff, nLen - 1, L"Vtable (with interface dupes) for '%S':\n", pszClassName);
buff[nLen - 1] = L'\0';
#ifdef _DEBUG
_snwprintf(&buff[wcslen(buff)], nLen -wcslen(buff) -1, L"Total duplicate slots = %d\n", g_dupMethods);
buff[nLen - 1] = L'\0';
#endif
WszOutputDebugString(buff);
}
else {
LOG((LF_ALWAYS, LL_ALWAYS, "Vtable (with interface dupes) for '%s':\n", pszClassName));
LOG((LF_ALWAYS, LL_ALWAYS, "Total duplicate slots = %d\n", g_dupMethods));
}
for (i = 0; i < m_wNumMethodSlots; i++)
{
MethodDesc *pMD = GetUnknownMethodDescForSlot(i);
{
LPCUTF8 pszName = pMD->GetName((USHORT) i);
DWORD dwAttrs = pMD->GetAttrs();
if(debug) {
DefineFullyQualifiedNameForClass();
LPCUTF8 name = GetFullyQualifiedNameForClass(pMD->GetClass());
_snwprintf(buff, nLen -1,
L"slot %2d: %S::%S%S 0x%X (slot = %2d)\n",
i,
name,
pszName,
IsMdFinal(dwAttrs) ? " (final)" : "",
pMD->GetAddrofCode(),
pMD->GetSlot()
);
buff[nLen - 1] = L'\0';
WszOutputDebugString(buff);
}
else {
LOG((LF_ALWAYS, LL_ALWAYS,
"slot %2d: %s::%s%s 0x%X (slot = %2d)\n",
i,
pMD->GetClass()->m_szDebugClassName,
pszName,
IsMdFinal(dwAttrs) ? " (final)" : "",
pMD->GetAddrofCode(),
pMD->GetSlot()
));
}
}
if (i == (DWORD)(GetNumVtableSlots()-1)) {
if(debug)
WszOutputDebugString(L"<-- vtable ends here\n");
else {
LOG((LF_ALWAYS, LL_ALWAYS, "<-- vtable ends here\n"));
}
}
}
if (m_wNumInterfaces > 0)
{
if(debug)
WszOutputDebugString(L"Interface map:\n");
else {
LOG((LF_ALWAYS, LL_ALWAYS, "Interface map:\n"));
}
if (!IsInterface())
{
for (i = 0; i < m_wNumInterfaces; i++)
{
_ASSERTE(GetInterfaceMap()[i].m_wStartSlot != (WORD) -1);
if(debug) {
DefineFullyQualifiedNameForClass();
LPCUTF8 name = GetFullyQualifiedNameForClass(GetInterfaceMap()[i].m_pMethodTable->GetClass());
_snwprintf(buff, nLen -1,
L"slot %2d %S %d\n",
GetInterfaceMap()[i].m_wStartSlot,
name,
GetInterfaceMap()[i].m_pMethodTable->GetInterfaceMethodSlots()
);
buff[nLen - 1] = L'\0';
WszOutputDebugString(buff);
}
else {
LOG((LF_ALWAYS, LL_ALWAYS,
"slot %2d %s %d\n",
GetInterfaceMap()[i].m_wStartSlot,
GetInterfaceMap()[i].m_pMethodTable->GetClass()->m_szDebugClassName,
GetInterfaceMap()[i].m_pMethodTable->GetInterfaceMethodSlots()
));
}
}
}
}
if(debug)
WszOutputDebugString(L"\n");
else {
LOG((LF_ALWAYS, LL_ALWAYS, "\n"));
}
}
void EEClass::DebugDumpGCDesc(LPCUTF8 pszClassName, BOOL debug)
{
CQuickBytes qb;
const int nLen = MAX_CLASSNAME_LENGTH + 100;
LPWSTR buff = (LPWSTR) qb.Alloc(nLen * sizeof(WCHAR));
if( buff == NULL)
return;
if(debug) {
_snwprintf(buff, nLen - 1, L"GC description for '%S':\n\n", pszClassName);
buff[nLen -1]= L'\0';
WszOutputDebugString(buff);
}
else {
LOG((LF_ALWAYS, LL_ALWAYS, "GC description for '%s':\n\n", pszClassName));
}
if (GetMethodTable()->ContainsPointers())
{
CGCDescSeries *pSeries;
CGCDescSeries *pHighest;
if(debug)
WszOutputDebugString(L"GCDesc:\n");
else {
LOG((LF_ALWAYS, LL_ALWAYS, "GCDesc:\n"));
}
pSeries = CGCDesc::GetCGCDescFromMT(GetMethodTable())->GetLowestSeries();
pHighest = CGCDesc::GetCGCDescFromMT(GetMethodTable())->GetHighestSeries();
while (pSeries <= pHighest)
{
if(debug) {
_snwprintf(buff, nLen - 1, L" offset %5d (%d w/o Object), size %5d (%5d w/o BaseSize subtr)\n",
pSeries->GetSeriesOffset(),
pSeries->GetSeriesOffset() - sizeof(Object),
pSeries->GetSeriesSize(),
pSeries->GetSeriesSize() + GetMethodTable()->GetBaseSize()
);
buff[nLen - 1] = L'\0';
WszOutputDebugString(buff);
}
else {
LOG((LF_ALWAYS, LL_ALWAYS, " offset %5d (%d w/o Object), size %5d (%5d w/o BaseSize subtr)\n",
pSeries->GetSeriesOffset(),
pSeries->GetSeriesOffset() - sizeof(Object),
pSeries->GetSeriesSize(),
pSeries->GetSeriesSize() + GetMethodTable()->GetBaseSize()
));
}
pSeries++;
}
if(debug)
WszOutputDebugString(L"\n");
else {
LOG((LF_ALWAYS, LL_ALWAYS, "\n"));
}
}
}
InterfaceInfo_t* EEClass::FindInterface(MethodTable *pInterface)
{
// verify the interface map is valid
_ASSERTE(GetInterfaceMap() == m_pMethodTable->GetInterfaceMap());
_ASSERTE(!IsThunking());
return m_pMethodTable->FindInterface(pInterface);
}
InterfaceInfo_t* MethodTable::FindInterface(MethodTable *pInterface)
{
// we can't be an interface ourselves
_ASSERTE(GetClass()->IsInterface() == FALSE);
// class we are looking up should be an interface
_ASSERTE(pInterface->GetClass()->IsInterface() != FALSE);
_ASSERTE(!IsThunking());
// We need to be restored so we can compare interface IDs if necessary
_ASSERTE(IsRestored() || GetClass()->IsRestoring());
_ASSERTE(pInterface->IsRestored());
for (DWORD i = 0; i < m_wNumInterface; i++)
{
if (m_pIMap[i].m_pMethodTable == pInterface)
{
// Extensible RCW's need to be handled specially because they can have interfaces
// in their map that are added at runtime. These interfaces will have a start offset
// of -1 to indicate this. We cannot take for granted that every instance of this
// COM object has this interface so FindInterface on these interfaces is made to fail.
//
// However, we are only considering the statically available slots here
// (m_wNumInterface doesn't contain the dynamic slots), so we can safely
// ignore this detail.
_ASSERTE(m_pIMap[i].m_wStartSlot != (WORD) -1);
return &m_pIMap[i];
}
}
return NULL;
}
MethodDesc *MethodTable::GetMethodDescForInterfaceMethod(MethodDesc *pInterfaceMD)
{
MethodTable *pInterfaceMT = pInterfaceMD->GetMethodTable();
_ASSERTE(pInterfaceMT->IsInterface());
_ASSERTE(FindInterface(pInterfaceMT) != NULL);
SLOT pCallAddress = ((SLOT **) m_pInterfaceVTableMap)[pInterfaceMT->GetClass()->GetInterfaceId()][pInterfaceMD->GetSlot()];
MethodDesc *pMD = EEClass::GetUnknownMethodDescForSlotAddress(pCallAddress);
return pMD;
}
//--------------------------------------------------------------------
// Reorders argument from the Interpreter operand stack (operands
// are pushed right->left which each argument occupying 64 bits)
// into WIL format (right->left, variable-sized arguments.)
//
// Parameters:
// pSig - The method signature
// psrc - Points to first byte *after* the arguments
// pdst - Points to first byte *after* the allocated
// stack space.
//--------------------------------------------------------------------
extern "C" VOID __stdcall ArgFiller_WilDefault(BOOL fIsStatic,PCCOR_SIGNATURE pSig, Module *pmodule, BYTE *psrc, BYTE *pdst)
{
BEGINFORBIDGC();
UINT32 n = MetaSig::SizeOfVirtualFixedArgStack(pmodule,pSig,fIsStatic);
pdst -= n;
psrc -= n;
memcpy(pdst, psrc, n);
ENDFORBIDGC();
}
#ifdef _X86_
//--------------------------------------------------------------------
// Stack-allocates the memory needed to hold the arguments for a
// EE->Method call where the method's calling convention is
// Wil. This function has to be in assembly because it needs
// complete control over the stack pointer.
//
// Once it has allocated the space, it invokes the ArgFillter_Pascal
// to fill in the arguments.
//--------------------------------------------------------------------
static
__declspec(naked)
INT64 __cdecl CallWorker_WilDefault(const BYTE *pStubTarget, // [ecx+4]
UINT32 numArgSlots, // [ecx+8]
PCCOR_SIGNATURE pSig, // [ecx+12]
Module *pModule, // [ecx+16]
const BYTE *pArgsEnd, // [ecx+20]
BOOL fIsStatic) // [ecx+24]
{
__asm{
mov ecx, esp ;; will use ecx as "ebp" pointer
mov eax, [ecx+8] ;; get "numArgSlots"
shl eax, 2 ;; slots -> bytes
sub esp, eax ;; allocate space for arguments
push ecx ;; need to save ecx across call
push ecx ;; push "pDstEnd" as argument
push dword ptr [ecx+20] ;; push "pArgsEnd" as argument
push dword ptr [ecx+16] ;; push "pmodule" as argument
push dword ptr [ecx+12] ;; push "pSig" as argument
push dword ptr [ecx+24] ;; push "fIsStatic" as argument
call ArgFiller_WilDefault ;; call function to fill argument array
pop ecx ;; restore ecx
call [ecx+4] ;; call the stub (which also pops the args)
retn ;; return
}
}
#endif //X86
#ifdef _X86_
//------------------------------------------------------------------------------
// This helper routine enregisters the appropriate arguments and makes the actual call.
//------------------------------------------------------------------------------
__declspec(naked)
INT64
#ifdef _DEBUG
CallDescrWorkerInternal
#else
CallDescrWorker
#endif
(LPVOID pSrcEnd, //[edx+0]
UINT32 numStackSlots, //[edx+4]
const ArgumentRegisters *pArgumentRegisters, //[edx+8]
LPVOID pTarget //[edx+12]
)
{
__asm {
push ebp
mov ebp, esp
mov eax, pSrcEnd // copy the stack
mov ecx, numStackSlots
test ecx, ecx
jz donestack
sub eax,4
push dword ptr [eax]
dec ecx
jz donestack
sub eax,4
push dword ptr [eax]
dec ecx
jz donestack
stackloop:
sub eax,4
push dword ptr [eax]
dec ecx
jnz stackloop
donestack:
// Now, we must push each field of the ArgumentRegister structure.
mov eax, pArgumentRegisters
#define DEFINE_ARGUMENT_REGISTER_BACKWARD_WITH_OFFSET(regname,ofs) \
mov regname, dword ptr [eax+ofs]
#include "eecallconv.h"
call pTarget
INDEBUG(nop) // This is a tag that we use in an assert. Fcalls expect to
// be called from Jitted code or from certain blessed call sites like
// this one. (See HelperMethodFrame::InsureInit)
leave
ret 16
}
}
#ifdef _DEBUG
INT64 CallDescrWorker
(LPVOID pSrcEnd, //[edx+0]
UINT32 numStackSlots, //[edx+4]
const ArgumentRegisters *pArgumentRegisters, //[edx+8]
LPVOID pTarget //[edx+12]
)
{
INT64 retValue;
// Save a copy of dangerousObjRefs in table.
Thread* curThread;
unsigned ObjRefTable[OBJREF_TABSIZE];
curThread = GetThread();
if (curThread)
memcpy(ObjRefTable, curThread->dangerousObjRefs,
sizeof(curThread->dangerousObjRefs));
if (curThread)
curThread->SetReadyForSuspension ();
_ASSERTE(curThread->PreemptiveGCDisabled()); // Jitted code expects to be in cooperative mode
retValue = CallDescrWorkerInternal (pSrcEnd, numStackSlots, pArgumentRegisters, pTarget);
// Restore dangerousObjRefs when we return back to EE after call
if (curThread)
memcpy(curThread->dangerousObjRefs, ObjRefTable,
sizeof(curThread->dangerousObjRefs));
TRIGGERSGC ();
ENABLESTRESSHEAP ();
return retValue;
}
#endif
#else // !_X86_
#ifndef _IA64_
INT64 __cdecl CallDescrWorker(LPVOID pSrcEnd, //[edx+0]
UINT32 numStackSlots, //[edx+4]
const ArgumentRegisters *pArgumentRegisters, //[edx+8]
LPVOID pTarget //[edx+12]
)
{
_ASSERTE(!"@TODO non-X86, non-IA64 - CallDescrWorker (Class.cpp)");
return 0;
}
#endif // !_IA64_
#endif // !_X86_
BOOL EEClass::CheckRestore()
{
if (!IsRestored())
{
THROWSCOMPLUSEXCEPTION();
_ASSERTE(GetClassLoader());
BEGIN_ENSURE_COOPERATIVE_GC();
OBJECTREF pThrowable = NULL;
GCPROTECT_BEGIN(pThrowable);
NameHandle name(GetModule(), m_cl);
TypeHandle th = GetClassLoader()->LoadTypeHandle(&name, &pThrowable);
if (th.IsNull())
COMPlusThrow(pThrowable);
GCPROTECT_END();
END_ENSURE_COOPERATIVE_GC();
if (IsInited())
return TRUE;
}
return FALSE;
}
void MethodTable::InstantiateStaticHandles(OBJECTREF **pHandles, BOOL fTokens)
{
if (GetClass()->GetNumHandleStatics() == 0)
return;
MethodTable **pPointers = (MethodTable**)(m_Vtable + GetClass()->GetNumMethodSlots());
MethodTable **pPointersEnd = pPointers + GetClass()->GetNumHandleStatics();
BEGIN_ENSURE_COOPERATIVE_GC();
// Retrieve the object ref pointers from the app domain.
OBJECTREF **apObjRefs = new OBJECTREF*[GetClass()->GetNumHandleStatics()];
//
// For shared classes, handles should get allocated in the current app domain.
// For all others, allocate in the same domain as the class.
//
AppDomain *pDomain;
if (IsShared())
pDomain = ::GetAppDomain();
else
pDomain = (AppDomain*) GetModule()->GetDomain();
// Reserve some object ref pointers.
pDomain->AllocateStaticFieldObjRefPtrs(GetClass()->GetNumHandleStatics(), apObjRefs);
OBJECTREF **pHandle = apObjRefs;
while (pPointers < pPointersEnd)
{
if (*pPointers != NULL)
{
OBJECTREF obj = NULL;
MethodTable *pMT;
if (fTokens)
{
DWORD rva = * (DWORD*) pPointers;
Module *pContainingModule = GetModule()->GetBlobModule(rva);
//
// A value type may have static members of its own type;
// we need to treat this as a special case to avoid
// circular loading dependencies.
//
TypeHandle typeHnd = CEECompileInfo::DecodeClass(pContainingModule,
GetModule()->GetZapBase() + rva,
TRUE);
pMT = typeHnd.GetMethodTable();
if (pMT != this)
pMT->CheckRestore();
}
else
pMT = (MethodTable*)*pPointers;
obj = AllocateObject(pMT);
SetObjectReference( *pHandle, obj, pDomain );
*pHandles++ = *pHandle++;
}
else
{
*pHandles++ = *pHandle++;
}
pPointers++;
}
delete []apObjRefs;
END_ENSURE_COOPERATIVE_GC();
}
void MethodTable::FixupStaticMethodTables()
{
size_t *blobs = (size_t*)(m_Vtable + GetClass()->GetNumMethodSlots());
size_t *blobsEnd = blobs + GetClass()->GetNumHandleStatics();
while (blobs < blobsEnd)
{
DWORD rva = (DWORD)(size_t)*blobs; //@todo WIN64 - is it correct to assume that these blobs will initially contain rva's that we later replce with addresses?
if (rva != NULL)
{
Module *pContainingModule = GetModule()->GetBlobModule(rva);
//
// A value type may have static members of its own type;
// we need to treat this as a special case to avoid
// circular loading dependencies.
//
TypeHandle typeHnd = CEECompileInfo::DecodeClass(pContainingModule,
GetModule()->GetZapBase() + rva,
TRUE);
MethodTable *pMT = typeHnd.GetMethodTable();
if (pMT != this)
pMT->CheckRestore();
*blobs = (size_t) pMT;
}
blobs++;
}
}
void EEClass::Restore()
{
THROWSCOMPLUSEXCEPTION();
MethodTable *pMT = GetMethodTable();
STRESS_LOG1(LF_ZAP, LL_INFO10000, "EEClass::Restore: Restoring type %xT\n", pMT);
LOG((LF_ZAP, LL_INFO10000,
"ZAP: Restoring class %s at 0x%x/0x%x.\n",
m_szDebugClassName, this, pMT));
//
// Set RESTORING flag so we can detect recursive restores
// (this is currently only used in asserts)
//
m_VMFlags |= VMFLAG_RESTORING;
//
// Restore parent class
//
m_SiblingsChain = m_ChildrenChain = 0;
if (m_pParentClass != NULL)
{
DWORD rva = (DWORD)(size_t)m_pParentClass; //@todo WIN64 - is it safe to assume that parentclass initially contains an rva that we later replace with an address?
Module *pContainingModule = GetModule()->GetBlobModule(rva);
TypeHandle type = CEECompileInfo::DecodeClass(pContainingModule,
GetModule()->GetZapBase() + rva);
m_pParentClass = type.AsClass();
}
//
// Restore interface classes
//
InterfaceInfo_t *pInterfaceMap = GetInterfaceMap();
InterfaceInfo_t *pInterfaceMapEnd = pInterfaceMap + GetNumInterfaces();
while (pInterfaceMap < pInterfaceMapEnd)
{
DWORD rva = (DWORD)(size_t) pInterfaceMap->m_pMethodTable; //@todo WIN64 - is it safe to assume the m_pMethodTable initially contains an rva and we will later replace it with an address?
Module *pContainingModule = GetModule()->GetBlobModule(rva);
TypeHandle type = CEECompileInfo::DecodeClass(pContainingModule,
GetModule()->GetZapBase() + rva);
pInterfaceMap->m_pMethodTable = type.AsMethodTable();
pInterfaceMap++;
}
//
// Init m_pInterfaceVTableMap
//
if (!IsInterface())
{
//
// Set up the interface vtable map, or
// if we are a com object with zero interfaces,
// use the global one.
//
pMT->InitInterfaceVTableMap();
if (pMT->IsComObjectType())
pMT->SetComObjectType();
}
else
{
//
// Assign an interface ID
//
UINT32 id = AssignInterfaceId();
//
// Set up our entry in the global interface vtable
//
if (IsSharedInterface())
{
// need to copy this to all the appdomains interface managers
SystemDomain::PropogateSharedInterface(id,
GetMethodTable()->GetVtable());
}
else
{
GetModule()->GetDomain()->
GetInterfaceVTableMapMgr().GetAddrOfGlobalTableForComWrappers()[id]
= (LPVOID)(pMT->GetVtable());
}
}
// If we have any thread local statics, our class needs an index, allocate it now.
if (m_wThreadStaticsSize > 0)
{
if(IsShared())
m_wThreadStaticOffset = (WORD)BaseDomain::IncSharedTLSOffset();
else
m_wThreadStaticOffset = (WORD)GetDomain()->IncUnsharedTLSOffset();
}
// same for context local statics
if (m_wContextStaticsSize > 0)
{
if(IsShared())
m_wContextStaticOffset = (WORD)BaseDomain::IncSharedCLSOffset();
else
m_wContextStaticOffset = (WORD)GetDomain()->IncUnsharedCLSOffset();
}
//
// Map our system interfaces into the current app domain for COM interop
//
MapSystemInterfaces();
//
// If the type is not a value class, then store under IID map.
//
if (!IsValueClass())
GetClassLoader()->InsertClassForCLSID(this);
#if CHECK_APP_DOMAIN_LEAKS
//
// Figure out if we're domain agile.. Need to do this before we start
// allocating objects of the type (which could happen in InstantiateStaticHandles)
// because need to determine agility prior to setting the appdomain.
//
if (g_pConfig->AppDomainLeaks())
_ASSERTE(IsAppDomainAgilityDone());
#endif
//
// Allocate handles for statics
//
if (IsShared())
GetMethodTable()->FixupStaticMethodTables();
else
GetMethodTable()->InstantiateStaticHandles((OBJECTREF**)(pMT->m_Vtable
+ m_wNumMethodSlots), TRUE);
//
// Restore field marshaler vptrs
//
if (HasLayout())
{
EEClassLayoutInfo *pInfo = &((LayoutEEClass*)this)->m_LayoutInfo;
FieldMarshaler *pFM = pInfo->m_pFieldMarshalers;
FieldMarshaler *pFMEnd = (FieldMarshaler*) ((BYTE *)pFM + pInfo->m_numCTMFields*MAXFIELDMARSHALERSIZE);
while (pFM < pFMEnd)
{
FieldMarshaler::RestoreConstruct(pMT, (void*)pFM, GetModule());
((BYTE*&)pFM) += MAXFIELDMARSHALERSIZE;
}
}
if (m_pParentClass != NULL)
m_pParentClass->NoticeSubtype(this);
GetMethodTable()->SetClassRestored();
if (IsInited())
{
//
// Clear the method table's flag.
//
GetMethodTable()->SetClassInited();
}
}
/*
//void EEClass::Restore()
void MethodTable::Restore()
{
THROWSCOMPLUSEXCEPTION();
EEClass* pCl = NULL;
#ifdef _DEBUG
pCl = GetClass();
LOG((LF_ZAP, LL_INFO10000,
"ZAP: Restoring class %s at 0x%x/0x%x.\n",
pCl->m_szDebugClassName, pCl, this));
#endif // _DEBUG
//
// Set RESTORING flag so we can detect recursive restores
// (this is currently only used in asserts)
//
pCl = GetClass();
pCl->SetVMFlags(pCl->GetVMFlags() | VMFLAG_RESTORING);
//
// Restore parent class
//
// pCl->SetSiblingsChain (0);
// pCl->SetChildrenChain (0);
if (GetParentClass() != NULL)
{
DWORD rva = (DWORD)(size_t)GetParentClass(); //@todo WIN64 - is it safe to assume that parentclass initially contains an rva that we later replace with an address?
Module *pContainingModule = GetModule()->GetBlobModule(rva);
TypeHandle type = CEECompileInfo::DecodeClass(pContainingModule,
GetModule()->GetZapBase() + rva);
SetParentMT (type.AsMethodTable());
}
//
// Restore interface classes
//
InterfaceInfo_t *pInterfaceMap = GetInterfaceMap();
InterfaceInfo_t *pInterfaceMapEnd = pInterfaceMap + GetNumInterfaces();
while (pInterfaceMap < pInterfaceMapEnd)
{
DWORD rva = (DWORD)(size_t) pInterfaceMap->m_pMethodTable; //@todo WIN64 - is it safe to assume the m_pMethodTable initially contains an rva and we will later replace it with an address?
Module *pContainingModule = GetModule()->GetBlobModule(rva);
TypeHandle type = CEECompileInfo::DecodeClass(pContainingModule,
GetModule()->GetZapBase() + rva);
pInterfaceMap->m_pMethodTable = type.AsMethodTable();
pInterfaceMap++;
}
//
// Init m_pInterfaceVTableMap
//
if (!IsInterface())
{
//
// Set up the interface vtable map, or
// if we are a com object with zero interfaces,
// use the global one.
//
InitInterfaceVTableMap();
if (IsComObjectType())
SetComObjectType();
}
else
{
//
// Assign an interface ID
//
pCl = GetClass();
UINT32 id = pCl->AssignInterfaceId();
//
// Set up our entry in the global interface vtable
//
if (pCl->IsSharedInterface())
{
// need to copy this to all the appdomains interface managers
SystemDomain::PropogateSharedInterface(id, GetVtable());
}
else
{
GetModule()->GetDomain()->
GetInterfaceVTableMapMgr().GetAddrOfGlobalTableForComWrappers()[id]
= (LPVOID)(GetVtable());
}
}
// If we have any thread local statics, our class needs an index, allocate it now.
pCl = GetClass();
if (pCl->GetThreadStaticsSize() > 0)
{
if(IsShared())
SetThreadStaticOffset ((WORD)BaseDomain::IncSharedTLSOffset());
else
SetThreadStaticOffset ((WORD)GetDomain()->IncUnsharedTLSOffset());
}
// same for context local statics
if (pCl->GetContextStaticsSize() > 0)
{
if(IsShared())
SetContextStaticOffset ((WORD)BaseDomain::IncSharedCLSOffset());
else
SetContextStaticOffset ((WORD)GetDomain()->IncUnsharedCLSOffset());
}
//
// Map our system interfaces into the current app domain for COM interop
//
pCl->MapSystemInterfaces();
//
// Store under IID map
//
pCl->GetClassLoader()->InsertClassForCLSID(pCl);
#if CHECK_APP_DOMAIN_LEAKS
//
// Figure out if we're domain agile.. Need to do this before we start
// allocating objects of the type (which could happen in InstantiateStaticHandles)
// because need to determine agility prior to setting the appdomain.
//
if (g_pConfig->AppDomainLeaks())
_ASSERTE(pCl->IsAppDomainAgilityDone());
#endif
//
// Allocate handles for statics
//
if (IsShared())
FixupStaticMethodTables();
else
InstantiateStaticHandles((OBJECTREF**)(m_Vtable + (size_t)pCl->GetNumMethodSlots()), TRUE);
//
// Restore field marshaler vptrs
//
if (pCl->HasLayout())
{
pCl = GetClass();
EEClassLayoutInfo *pInfo = &((LayoutEEClass*)pCl)->m_LayoutInfo;
FieldMarshaler *pFM = (FieldMarshaler *)pInfo->GetFieldMarshalers();
FieldMarshaler *pFMEnd = (FieldMarshaler*) ((BYTE *)pFM + pInfo->GetNumCTMFields()*MAXFIELDMARSHALERSIZE);
while (pFM < pFMEnd)
{
FieldMarshaler::RestoreConstruct((void*)pFM, GetModule());
((BYTE*&)pFM) += MAXFIELDMARSHALERSIZE;
}
}
if (GetParentClass() != NULL)
GetParentClass()->NoticeSubtype(pCl);
SetClassRestored();
if (pCl->IsInited())
{
//
// Clear the method table's flag.
//
SetClassInited();
}
}
*/
/*******************************************************************/
// See EEClass::DoRunClassInit() below. Here we haven't brought the EEClass into our
// working set yet. We only impact working set if there is a strong likelihood that
// the class needs <clinit> to be run.
BOOL MethodTable::CheckRunClassInit(OBJECTREF *pThrowable)
{
_ASSERTE(IsRestored());
// To find GC hole easier...
TRIGGERSGC();
if (IsClassInited())
return TRUE;
return GetClass()->DoRunClassInit(pThrowable);
}
BOOL MethodTable::CheckRunClassInit(OBJECTREF *pThrowable,
DomainLocalClass **ppLocalClass,
AppDomain *pDomain)
{
_ASSERTE(IsRestored());
// To find GC hole easier...
TRIGGERSGC();
if (IsShared())
{
if (pDomain==NULL)
pDomain = SystemDomain::GetCurrentDomain();
DomainLocalBlock *pLocalBlock = pDomain->GetDomainLocalBlock();
if (pLocalBlock->IsClassInitialized(GetSharedClassIndex()))
{
if (ppLocalClass != NULL)
*ppLocalClass = pLocalBlock->GetClass(GetSharedClassIndex());
return TRUE;
}
}
if (IsClassInited())
{
if (ppLocalClass != NULL)
*ppLocalClass = NULL;
return TRUE;
}
return GetClass()->DoRunClassInit(pThrowable, pDomain, ppLocalClass);
}
CorIfaceAttr MethodTable::GetComInterfaceType()
{
// This should only be called on interfaces.
_ASSERTE(IsInterface());
// Check to see if we have already determined the COM interface type
// of this interface.
if (m_ComInterfaceType != (CorIfaceAttr)-1)
return m_ComInterfaceType;
// Retrieve the interface type from the metadata.
CorIfaceAttr ItfType = ifDual;
HRESULT hr = GetClass()->GetMDImport()->GetIfaceTypeOfTypeDef(GetClass()->GetCl(), (ULONG*)&ItfType);
_ASSERTE(SUCCEEDED(hr));
// Cache the interface type
m_ComInterfaceType = ItfType;
return ItfType;
}
OBJECTREF MethodTable::Allocate()
{
THROWSCOMPLUSEXCEPTION();
CheckRestore();
if (!IsClassInited())
{
OBJECTREF throwable = NULL;
if (!CheckRunClassInit(&throwable))
COMPlusThrow(throwable);
}
return AllocateObject(this);
}
OBJECTREF MethodTable::Box(void *data, BOOL mayContainRefs)
{
_ASSERTE(IsValueClass());
OBJECTREF ref;
GCPROTECT_BEGININTERIOR (data);
ref = Allocate();
if (mayContainRefs)
CopyValueClass(ref->UnBox(), data, this, ref->GetAppDomain());
else
memcpyNoGCRefs(ref->UnBox(), data, GetClass()->GetAlignedNumInstanceFieldBytes());
GCPROTECT_END ();
return ref;
}
CorClassIfaceAttr EEClass::GetComClassInterfaceType()
{
THROWSCOMPLUSEXCEPTION();
// This should only be called on classes.
_ASSERTE(!IsInterface());
const BYTE *pVal;
ULONG cbVal;
// First look for the class interface attribute at the class level.
HRESULT hr = GetMDImport()->GetCustomAttributeByName(GetCl(), INTEROP_CLASSINTERFACE_TYPE, (const void**)&pVal, &cbVal);
if (hr == S_OK)
{
_ASSERTE("The ClassInterface custom attribute is invalid" && cbVal);
_ASSERTE("ClassInterface custom attribute does not have the right format" && (*pVal == 0x01) && (*(pVal + 1) == 0x00));
CorClassIfaceAttr ClassItfType = (CorClassIfaceAttr)*(pVal + 2);
if (ClassItfType < clsIfLast)
return ClassItfType;
}
// If we haven't found the class interface attribute at the class level then look at the
// assembly level.
Assembly *pAssembly = GetAssembly();
if (pAssembly->IsAssembly())
{
IfFailThrow(pAssembly->GetManifestImport()->GetCustomAttributeByName(pAssembly->GetManifestToken(), INTEROP_CLASSINTERFACE_TYPE, (const void**)&pVal, &cbVal));
if (hr == S_OK)
{
_ASSERTE("The ClassInterface custom attribute is invalid" && cbVal);
_ASSERTE("ClassInterface custom attribute does not have the right format" && (*pVal == 0x01) && (*(pVal + 1) == 0x00));
CorClassIfaceAttr ClassItfType = (CorClassIfaceAttr)*(pVal + 2);
if (ClassItfType < clsIfLast)
return ClassItfType;
}
}
return DEFAULT_CLASS_INTERFACE_TYPE;
}
Assembly* EEClass::GetAssembly()
{
return GetClassLoader()->m_pAssembly;
}
BaseDomain* EEClass::GetDomain()
{
return GetAssembly()->GetDomain();
}
BOOL EEClass::RunClassInit(DeadlockAwareLockedListElement *pEntry, OBJECTREF *pThrowable)
{
Thread *pCurThread = GetThread();
_ASSERTE(IsRestored());
// This behavior is being removed from spec - I don't want to delete the code though
// until I've run tests.
#if 0
// Init the parent first if it hasn't already been inited
if (GetParentClass() != NULL)
{
if (GetParentClass()->GetMethodTable()->CheckRunClassInit(pThrowable) == FALSE)
{
// Failed to init parent - pThrowable would have been set by the parent
return FALSE;
}
}
#endif
if (s_cctorSig == NULL)
{
// Allocate a metasig to use for all class constructors.
void *tempSpace = SystemDomain::Loader()->GetHighFrequencyHeap()->AllocMem(sizeof(MetaSig));
s_cctorSig = new (tempSpace) MetaSig(gsig_SM_RetVoid.GetBinarySig(),
SystemDomain::SystemModule());
}
// Find init method
MethodDesc *pCLInitMethod = GetMethodDescForSlot(GetMethodTable()->GetClassConstructorSlot());
// If the static initialiser throws an exception that it doesn't catch, it has failed
COMPLUS_TRY
{
// During the <clinit>, this thread must not be asynchronously
// stopped or interrupted. That would leave the class unavailable
// and is therefore a security hole. We don't have to worry about
// multithreading, since we only manipulate the current thread's count.
pCurThread->IncPreventAsync();
STRESS_LOG1(LF_CLASSLOADER, LL_INFO1000, "RunClassInit: Calling class contructor for type %pT\n", GetMethodTable());
// We want to give the debugger a chance to handle any unhandled exceptions
// that occur during class initialization, so we need to have filter
__try
{
(void) pCLInitMethod->Call((BYTE *) NULL, s_cctorSig);
}
__except(ThreadBaseExceptionFilter(GetExceptionInformation(),
GetThread(),
ClassInitUnhandledException))
{
_ASSERTE(!"ThreadBaseExceptionFilter returned EXCEPTION_EXECUTE_HANDLER");
}
STRESS_LOG1(LF_CLASSLOADER, LL_INFO1000, "RunClassInit: Returned Successfully from class contructor for type %pT\n", GetMethodTable());
pCurThread->DecPreventAsync();
// success
return TRUE;
}
COMPLUS_CATCH
{
// Exception set by parent
// @TODO: We should make this an ExceptionInInitializerError if the exception thrown is not
// a subclass of Error
pCurThread->DecPreventAsync();
UpdateThrowable(pThrowable);
return FALSE;
}
COMPLUS_END_CATCH
_ASSERTE(0); // Should not be reached, but COMPLUS_CATCH nastiness ...
return FALSE; // ... is more that VC can handle.
}
//
// Check whether the class initialiser has to be run for this class, and run it if necessary.
// Returns TRUE for success, FALSE for failure.
//
// If this returns FALSE, then pThrowable MUST be set to an exception.
//
BOOL EEClass::DoRunClassInit(OBJECTREF *pThrowable, AppDomain *pDomain, DomainLocalClass **ppLocalClass)
{
HRESULT hrResult = E_FAIL;
DeadlockAwareLockedListElement* pEntry;
BOOL bLeaveLock = FALSE;
BEGIN_REQUIRES_16K_STACK;
// by default are always operating off the current domain, but sometimes need to do this before
// we've switched in
if (IsShared() && pDomain == NULL)
pDomain = SystemDomain::GetCurrentDomain();
//
// Check to see if we have already run the .cctor for this class.
//
// Have we run clinit already on this class?
if (IsInited())
return TRUE;
STRESS_LOG2(LF_CLASSLOADER, LL_INFO1000, "DoRunClassInit: Request to init %pT in appdomain %p\n", GetMethodTable(), pDomain);
//
// If we're shared, see if our DLS is set up already.
// (We will never set the Inited flag on a shared class.)
//
SIZE_T sharedIndex = 0;
DomainLocalBlock *pLocalBlock = NULL;
if (IsShared())
{
sharedIndex = GetMethodTable()->GetSharedClassIndex();
pLocalBlock = pDomain->GetDomainLocalBlock();
if (pLocalBlock->IsClassInitialized(sharedIndex))
{
if (ppLocalClass != NULL)
*ppLocalClass = pLocalBlock->GetInitializedClass(sharedIndex);
return TRUE;
}
}
//
// Take the global lock
//
ListLock *pLock;
if (IsShared())
pLock = pDomain->GetClassInitLock();
else
pLock = GetAssembly()->GetClassInitLock();
_ASSERTE(GetClassLoader());
pLock->Enter();
// Check again
if (IsInited())
{
pLock->Leave();
return TRUE;
}
//
// Check the shared case again
//
if (IsShared())
{
if (pLocalBlock->IsClassInitialized(sharedIndex))
{
pLock->Leave();
if (ppLocalClass != NULL)
*ppLocalClass = pLocalBlock->GetInitializedClass(sharedIndex);
return TRUE;
}
}
//
// Handle cases where the .cctor has already tried to run but failed.
//
if (IsInitError() || (IsShared() && pLocalBlock->IsClassInitError(sharedIndex)))
{
// Some error occurred trying to init this class
pEntry = (DeadlockAwareLockedListElement *) pLock->Find(this);
_ASSERTE(pEntry!=NULL);
// Extract the saved exception.
*pThrowable = ObjectFromHandle(pEntry->m_hInitException);
pLock->Leave();
return FALSE;
}
//
// Check to see if the .cctor for this class is already being run.
//
pEntry = (DeadlockAwareLockedListElement *) pLock->Find(this);
BOOL bEnterLockSucceeded = FALSE;
__try
{
if (pEntry == NULL)
{
//
// We are the first one to try and run this classe's .cctor so create an entry for it
//
// No one else is running class init, so we need to allocate a new entry
pEntry = new DeadlockAwareLockedListElement;
if (pEntry == NULL)
{
// Out of memory
SetClassInitError();
pLock->Leave();
CreateExceptionObject(kOutOfMemoryException, pThrowable);
return FALSE;
}
// Fill in the entry information and add it to the correct list
pEntry->AddEntryToList(pLock, this);
// Take the entry's lock. This cannot cause a deadlock since nobody has started
// running the .cctor for this class.
bEnterLockSucceeded = pEntry->DeadlockAwareEnter();
_ASSERTE(bEnterLockSucceeded);
// Leave global lock
pLock->Leave();
//
// If we are shared, allocate our handles
//
// @ToDo: I don't think that this TRY/CATCH is necessary
// as RunClassInit() contains a dominating TRY/CATCH
COMPLUS_TRY
{
if (IsShared())
pLocalBlock->PopulateClass(GetMethodTable());
//
// We are now ready to run the .cctor itself (i.e RunClassInit() )
//
if (!GetMethodTable()->HasClassConstructor() || RunClassInit(pEntry, pThrowable))
{
hrResult = S_OK;
if (IsShared())
pLocalBlock->SetClassInitialized(sharedIndex);
else
SetInited();
}
}
COMPLUS_CATCH
{
hrResult = E_FAIL;
UpdateThrowable(pThrowable);
}
COMPLUS_END_CATCH
if (FAILED(hrResult))
{
// The .cctor failed and we want to store the exception that resulted
// in the entry. Increment the ref count to keep the entry alive for
// subsequent attempts to run the .cctor.
pEntry->m_dwRefCount++;
DefineFullyQualifiedNameForClassWOnStack();
LPWSTR wszName = GetFullyQualifiedNameForClassW(this);
OBJECTREF pInitException = NULL;
GCPROTECT_BEGIN(pInitException);
CreateTypeInitializationExceptionObject(wszName,pThrowable,&pInitException);
// Save the exception object, and return to caller as well.
pEntry->m_hInitException = (pDomain ? pDomain : GetDomain())->CreateHandle(pInitException);
pEntry->m_hrResultCode = E_FAIL;
*pThrowable = pInitException;
GCPROTECT_END();
if (IsShared())
pLocalBlock->SetClassInitError(sharedIndex);
else
SetClassInitError();
}
// Set the flag indicating we need to leave the lock now that we have either
// finished running the .cctor or failed.
bLeaveLock = TRUE;
}
else
{
//
// Someone else is initing this class
//
// Refcount ourselves as waiting for this class to init
pEntry->m_dwRefCount++;
pLock->Leave();
// Wait for class - note, we could be waiting on our own thread from running a class init further up the stack
bEnterLockSucceeded = pEntry->DeadlockAwareEnter();
if(bEnterLockSucceeded)
{
//
// We managed to take the lock this means that the other thread has finished running it or
// that the current thread is the one that is already running it.
//
hrResult = pEntry->m_hrResultCode;
if(FAILED(hrResult))
{
// An exception may have occurred in the cctor. DoRunClassInit() should return FALSE in that
// case.
_ASSERTE(pEntry->m_hInitException);
_ASSERTE(IsInitError() || (IsShared() && pLocalBlock->IsClassInitError(sharedIndex)));
// Extract the saved exception.
*pThrowable = ObjectFromHandle(pEntry->m_hInitException);
}
bLeaveLock = TRUE;
}
else
{
//
// Taking the lock would cause a deadlock.
//
COMPLUS_TRY
{
//
// If we are shared, go ahead and allocate our DLS entry & handles.
// (It's OK to exposed uninitialized statics, but we do need to allocate them.)
// It's OK to race with the other thread to do this, since there is an
// app domain level lock which protects the DLS block.
//
if (IsShared())
pLocalBlock->PopulateClass(GetMethodTable());
// The class init has not run yet so lets return S_FALSE to indicate this.
hrResult = S_FALSE;
}
COMPLUS_CATCH
{
hrResult = E_FAIL;
UpdateThrowable(pThrowable);
}
COMPLUS_END_CATCH
}
}
//
// Notify any entries waiting on the current entry and wait for the required entries.
//
// We need to take the global lock before we play with the list of entries.
pLock->Enter();
}
// Leave the lock if the flag is set.
__finally
{
if (bEnterLockSucceeded)
pEntry->DeadlockAwareLeave();
}
//
// If we are the last waiter, delete the entry
//
if (--pEntry->m_dwRefCount == 0)
{
// Unlink item from list - in reality, anyone can do this, it doesn't have to be the last waiter.
pLock->Unlink(pEntry);
// Clean up the information contained in the entry and delete it.
pEntry->Destroy();
delete pEntry;
}
pLock->Leave();
if (ppLocalClass != NULL)
if (IsShared())
*ppLocalClass = pLocalBlock->GetClass(sharedIndex);
else
*ppLocalClass = NULL;
END_CHECK_STACK;
STRESS_LOG2(LF_CLASSLOADER, LL_INFO1000, "DoRunClassInit: returning SUCCESS for init %pT in appdomain %p\n", GetMethodTable(), pDomain);
// No need to set pThrowable in case of error it will already have been set.
return SUCCEEDED(hrResult) ? TRUE : FALSE;
}
//
// This function is a shortcut to get the current DomainLocalClass without
// doing any locking. Currently it should be used by the debugger only
// (since it doesn't do any locking.)
//
DomainLocalClass *EEClass::GetDomainLocalClassNoLock(AppDomain *pAppDomain)
{
_ASSERTE(IsShared());
DomainLocalBlock *pLocalBlock = pAppDomain->GetDomainLocalBlock();
return pLocalBlock->GetClass(GetMethodTable()->GetSharedClassIndex());
}
//==========================================================================
// If the EEClass doesn't yet know the Exposed class that represents it via
// Reflection, acquire that class now. Regardless, return it to the caller.
//==========================================================================
OBJECTREF EEClass::GetExposedClassObject()
{
THROWSCOMPLUSEXCEPTION();
TRIGGERSGC();
// We shouldnt be here if the class is __TransparentProxy
_ASSERTE(!CRemotingServices::IsRemotingInitialized()||this != CTPMethodTable::GetMethodTable()->GetClass());
if (m_ExposedClassObject == NULL) {
// Make sure that reflection has been initialized
COMClass::EnsureReflectionInitialized();
// Make sure that we have been restored
CheckRestore();
REFLECTCLASSBASEREF refClass = NULL;
GCPROTECT_BEGIN(refClass);
COMClass::CreateClassObjFromEEClass(this, &refClass);
// Let all threads fight over who wins using InterlockedCompareExchange.
// Only the winner can set m_ExposedClassObject from NULL.
OBJECTREF *exposedClassObject;
GetDomain()->AllocateObjRefPtrsInLargeTable(1, &exposedClassObject);
SetObjectReference(exposedClassObject, refClass, IsShared() ? NULL : (AppDomain*)GetDomain());
if (VipInterlockedCompareExchange ((void**)&m_ExposedClassObject, *(void**)&exposedClassObject, NULL))
SetObjectReference(exposedClassObject, NULL, NULL);
GCPROTECT_END();
}
return *m_ExposedClassObject;
}
void EEClass::UnlinkChildrenInDomain(AppDomain *pDomain)
{
EEClass **ppRewrite;
EEClass *pCur, *pFirstRemove;
restart:
ppRewrite = &m_ChildrenChain;
// We only remember parents of classes that are being unloaded. Such parents
// clearly have children. But we never notice the subtypes of e.g. __ComObject
// and it's not really worth it from a backpatching perspective.
// _ASSERTE(m_ChildrenChain);
do
{
// Skip all leading classes for domains that are NOT being unloaded.
while (*ppRewrite && (*ppRewrite)->GetDomain() != pDomain)
ppRewrite = &(*ppRewrite)->m_SiblingsChain;
if (*ppRewrite)
{
// Now march pCur along until we find the end of a sublist of classes that
// are being unloaded.
//
// By grabbing pFirstRemove before checking pCur->GetDomain(), we handle the
// race between someone inserting a type that doesn't need unloading at the
// head, in the case where ppRewrite points to the head. This will simply
// perform a NOP and then go back and pick up the next segment to remove.
pFirstRemove = pCur = *ppRewrite;
while (pCur && pCur->GetDomain() == pDomain)
pCur = pCur->m_SiblingsChain;
// Now extract that portion of the chain. We can have contention with inserts
// only if we are removing from the head. And if we have contention, it is
// guaranteed that we have moved from the head to further down. So we don't
// have to worry about contention in a loop. Nevertheless, we need to find
// the point at which to start removing because it has moved. The best way to
// ensure we are running well-tested code is to simply restart. This is
// inefficient, but it's the best way to guarantee robustness for an exceedingly
// rare situation.
if (ppRewrite == &m_ChildrenChain)
{
if (FastInterlockCompareExchange((void **) ppRewrite,
pCur,
pFirstRemove) != pFirstRemove)
{
// contention. Try again
goto restart;
}
}
else
{
// We aren't operating at the head, so we don't need to worry about races.
*ppRewrite = pCur;
}
_ASSERTE(!*ppRewrite ||
(*ppRewrite)->GetDomain() != pDomain);
}
} while (*ppRewrite);
}
BOOL s_DisableBackpatching = FALSE;
void EEClass::DisableBackpatching()
{
s_DisableBackpatching = TRUE;
}
void EEClass::EnableBackpatching()
{
s_DisableBackpatching = FALSE;
}
// Backpatch up and down the class hierarchy, as aggressively as possible.
BOOL EEClass::PatchAggressively(MethodDesc *pMD, SLOT codeaddr)
{
// If we are in the middle of appdomain unloading, the sibling and children chains
// are potentially corrupt. They will be fixed by the time the appdomain is
// fully unloaded but -- until the patch list is applied and deleted -- we bypass
// any aggressive backpatching opportunities.
if (s_DisableBackpatching)
return FALSE;
MethodTable *pMT = pMD->GetMethodTable();
MethodTable *baseMT = pMT;
DWORD slot = pMD->GetSlot();
SLOT prestub = pMD->GetPreStubAddr();
BOOL IsDup = pMD->IsDuplicate();
DWORD numSlots;
EEClass *pClass;
SLOT curaddr;
_ASSERTE(pMD->IsVirtual());
// We are starting at the point in the hierarchy where the MD was introduced. So
// we only need to patch downwards.
while (TRUE)
{
_ASSERTE(pMT->IsInterface() ||
pMT->GetClass()->GetNumVtableSlots() >= slot);
curaddr = (pMT->IsInterface()
? 0
: pMT->GetVtable() [slot]);
// If it points to *our* prestub, patch it. If somehow it already got
// patched, we keep hunting downwards (this is probably a race). For anything
// else, we are perhaps seeing an override in a child. Further searches down
// are likely to be fruitless.
if (curaddr == prestub)
pMT->GetVtable() [slot] = codeaddr;
else
if (curaddr != codeaddr)
goto go_sideways;
// If this is a duplicate, let's scan the rest of the VTable hunting for other
// hits.
if (IsDup)
{
numSlots = pMT->GetClass()->GetNumVtableSlots();
for (DWORD i=0; i<numSlots; i++)
if (pMT->GetVtable() [i] == prestub)
pMT->GetVtable() [i] = codeaddr;
}
// Whenever we finish a class, we go downwards.
// go_down:
pClass = pMT->GetClass()->m_ChildrenChain;
if (pClass)
{
pMT = pClass->GetMethodTable();
continue;
}
// If we can go down no further, we go sideways.
go_sideways:
// We never go sideways from our root. When we attempt that, we are done.
if (pMT == baseMT)
break;
pClass = pMT->GetClass()->m_SiblingsChain;
if (pClass)
{
pMT = pClass->GetMethodTable();
continue;
}
// If we can go down no further, we go up and then try to go sideways
// from there. (We've already done our parent).
// go_up:
pMT = pMT->GetParentMethodTable();
goto go_sideways;
}
return TRUE;
}
// if this returns E_FAIL and pThrowable is specified, it must be set
HRESULT EEClass::GetDescFromMemberRef(Module *pModule,
mdMemberRef MemberRef,
mdToken mdTokenNotToLoad,
void **ppDesc,
BOOL *pfIsMethod,
OBJECTREF *pThrowable)
{
_ASSERTE(IsProtectedByGCFrame(pThrowable));
HRESULT hr = S_OK;
LPCUTF8 szMember;
EEClass * pEEClass = 0;
PCCOR_SIGNATURE pSig = NULL;
DWORD cSig;
DWORD rid = RidFromToken(MemberRef);
mdToken tk = TypeFromToken(MemberRef);
ClassLoader* pLoader = NULL;
*ppDesc = NULL;
*pfIsMethod = TRUE;
if (tk == mdtMemberRef)
{
Module *pReference = pModule;
// In lookup table?
void *pDatum = pModule->LookupMemberRef(MemberRef, pfIsMethod);
if (pDatum != NULL)
{
if (*pfIsMethod)
((MethodDesc*)pDatum)->GetMethodTable()->CheckRestore();
*ppDesc = pDatum;
return S_OK;
}
// No, so do it the long way
mdTypeRef typeref;
IMDInternalImport *pInternalImport;
pInternalImport = pModule->GetMDImport();
szMember = pInternalImport->GetNameAndSigOfMemberRef(
MemberRef,
&pSig,
&cSig
);
*pfIsMethod = !isCallConv(MetaSig::GetCallingConventionInfo(pModule, pSig),
IMAGE_CEE_CS_CALLCONV_FIELD);
typeref = pInternalImport->GetParentOfMemberRef(MemberRef);
// If parent is a method def, then this is a varargs method and the
// desc lives in the same module.
if (TypeFromToken(typeref) == mdtMethodDef)
{
MethodDesc *pDatum = pModule->LookupMethodDef(typeref);
if (pDatum)
{
pDatum->GetMethodTable()->CheckRestore();
*ppDesc = pDatum;
return S_OK;
}
else // There is no value for this def so we haven't yet loaded the class.
{
// Get the parent of the MethodDef
mdTypeDef typeDef;
hr = pInternalImport->GetParentToken(typeref, &typeDef);
// Make sure it is a typedef
if (TypeFromToken(typeDef) != mdtTypeDef)
{
_ASSERTE(!"MethodDef without TypeDef as Parent");
hr = E_FAIL;
goto exit;
}
// load the class
pLoader = pModule->GetClassLoader();
_ASSERTE(pLoader);
NameHandle name(pModule, typeDef);
name.SetTokenNotToLoad(mdTokenNotToLoad);
pEEClass = pLoader->LoadTypeHandle(&name, pThrowable).GetClass();
if (pEEClass == NULL)
{
hr = COR_E_TYPELOAD;
goto exitThrowable;
}
// the class has been loaded and the method should be in the rid map!
pDatum = pModule->LookupMethodDef(typeref);
if (pDatum)
{
*ppDesc = pDatum;
return S_OK;
}
else
{
hr = E_FAIL;
goto exit;
}
}
}
else if (TypeFromToken(typeref) == mdtModuleRef)
{
// Global function/variable
if (FAILED(hr = pModule->GetAssembly()->FindModuleByModuleRef(pInternalImport,
typeref,
mdTokenNotToLoad,
&pModule,
pThrowable)))
goto exit;
typeref = COR_GLOBAL_PARENT_TOKEN;
}
else if (TypeFromToken(typeref) != mdtTypeRef &&
TypeFromToken(typeref) != mdtTypeDef &&
TypeFromToken(typeref) != mdtTypeSpec)
{
// @TODO: Need to handle this.
hr = E_FAIL;
goto exit;
}
NameHandle name(pModule, typeref);
pLoader = pModule->GetClassLoader();
_ASSERTE(pLoader);
name.SetTokenNotToLoad(mdTokenNotToLoad);
TypeHandle typeHnd = pLoader->LoadTypeHandle(&name, pThrowable);
pEEClass = typeHnd.GetClass();
if (pEEClass == NULL)
{
hr = COR_E_TYPELOAD;
goto exitThrowable;
}
if (!*pfIsMethod)
{
FieldDesc *pFD = pEEClass->FindField(szMember, pSig, cSig, pModule);
if (pFD == NULL)
{
hr = E_FAIL;
goto exit;
}
*ppDesc = (void *) pFD;
pReference->StoreMemberRef(MemberRef, pFD);
}
else
{
MethodDesc *pMD;
pMD = pEEClass->FindMethod(szMember, pSig, cSig, pModule, mdTokenNil, 0, TRUE, typeHnd);
if (pMD == NULL)
{
hr = E_FAIL;
goto exit;
}
*ppDesc = (void *) pMD;
pReference->StoreMemberRef(MemberRef, pMD);
}
hr = S_OK;
}
else if (tk == mdtMethodDef)
{
*pfIsMethod = TRUE;
// In lookup table?
MethodDesc *pDatum = pModule->LookupMethodDef(MemberRef);
if (!pDatum)
{
// No, so do it the long way
mdTypeDef typeDef;
hr = pModule->GetMDImport()->GetParentToken(MemberRef, &typeDef);
if (FAILED(hr))
return FALSE;
NameHandle name(pModule, typeDef);
name.SetTokenNotToLoad(mdTokenNotToLoad);
if (!pModule->GetClassLoader()->LoadTypeHandle(&name, pThrowable).GetClass())
{
hr = COR_E_TYPELOAD;
goto exitThrowable;
}
// The RID map should have been filled out if we loaded the class
pDatum = pModule->LookupMethodDef(MemberRef);
if (!pDatum)
{
pSig = pModule->GetMDImport()->GetSigOfMethodDef(MemberRef, &cSig);
szMember = pModule->GetMDImport()->GetNameOfMethodDef(MemberRef);
hr = E_FAIL;
goto exit;
}
}
pDatum->GetMethodTable()->CheckRestore();
*ppDesc = pDatum;
return S_OK;
}
else if (tk == mdtFieldDef)
{
*pfIsMethod = FALSE;
// In lookup table?
FieldDesc *pDatum = pModule->LookupFieldDef(MemberRef);
if (!pDatum)
{
// No, so do it the long way
mdTypeDef typeDef;
hr = pModule->GetMDImport()->GetParentToken(MemberRef, &typeDef);
if (FAILED(hr))
return hr;
// Load the class - that should set the desc in the rid map
NameHandle name(pModule, typeDef);
name.SetTokenNotToLoad(mdTokenNotToLoad);
if (!pModule->GetClassLoader()->LoadTypeHandle(&name, pThrowable).GetClass())
{
hr = COR_E_TYPELOAD;
goto exitThrowable;
}
pDatum = pModule->LookupFieldDef(MemberRef);
if (!pDatum)
{
pSig = pModule->GetMDImport()->GetSigOfFieldDef(MemberRef, &cSig);
szMember = pModule->GetMDImport()->GetNameOfFieldDef(MemberRef);
hr = E_FAIL;
goto exit;
}
}
pDatum->GetMethodTableOfEnclosingClass()->CheckRestore();
#ifdef EnC_SUPPORTED
if (pModule->IsEditAndContinue()) {
EnCFieldDesc *pFD = (EnCFieldDesc*)pDatum;
// we may not have the full FieldDesc info at applyEnC time becuase we don't
// have a thread so can't do things like load classes (due to possible exceptions)
if (pFD->IsEnCNew() && pFD->NeedsFixup())
{
if (FAILED(hr=pFD->Fixup(MemberRef)))
return hr;
}
}
#endif // EnC_SUPPORTED
*ppDesc = pDatum;
return S_OK;
}
else
{
szMember = NULL;
hr = E_FAIL;
}
exit:
if (FAILED(hr) && pThrowable) {
DefineFullyQualifiedNameForClass();
LPUTF8 szClassName;
if (pEEClass)
{
szClassName = GetFullyQualifiedNameForClass(pEEClass);
}
else
{
szClassName = "?";
}
if (!*pfIsMethod)
{
LPUTF8 szFullName;
MAKE_FULLY_QUALIFIED_MEMBER_NAME(szFullName, NULL, szClassName, szMember, NULL);
#define MAKE_TRANSLATIONFAILED szwFullName=L""
MAKE_WIDEPTR_FROMUTF8_FORPRINT(szwFullName, szFullName);
#undef MAKE_TRANSLATIONFAILED
CreateExceptionObject(kMissingFieldException, IDS_EE_MISSING_FIELD, szwFullName, NULL, NULL, pThrowable);
}
else
{
if (pSig && pModule)
{
MetaSig tmp(pSig, pModule);
SigFormat sf(tmp, szMember ? szMember : "?", szClassName, NULL);
#define MAKE_TRANSLATIONFAILED szwFullName=L""
MAKE_WIDEPTR_FROMUTF8_FORPRINT(szwFullName, sf.GetCString());
#undef MAKE_TRANSLATIONFAILED
CreateExceptionObject(kMissingMethodException, IDS_EE_MISSING_METHOD, szwFullName, NULL, NULL, pThrowable);
}
else
CreateExceptionObject(kMissingMethodException, IDS_EE_MISSING_METHOD, L"?", NULL, NULL, pThrowable);
}
}
exitThrowable:
return hr;
}
HRESULT EEClass::GetMethodDescFromMemberRef(Module *pModule, mdMemberRef MemberRef, MethodDesc **ppMethodDesc, OBJECTREF *pThrowable)
{
_ASSERTE(IsProtectedByGCFrame(pThrowable));
BOOL fIsMethod;
// We did not find this in the various permutations available to methods now so use the fallback!
HRESULT hr = GetDescFromMemberRef(pModule, MemberRef, (void **) ppMethodDesc, &fIsMethod, pThrowable);
if (SUCCEEDED(hr) && !fIsMethod)
{
hr = E_FAIL;
*ppMethodDesc = NULL;
}
return hr;
}
HRESULT EEClass::GetFieldDescFromMemberRef(Module *pModule, mdMemberRef MemberRef, FieldDesc **ppFieldDesc, OBJECTREF *pThrowable)
{
_ASSERTE(IsProtectedByGCFrame(pThrowable));
BOOL fIsMethod;
HRESULT hr = GetDescFromMemberRef(pModule, MemberRef, (void **) ppFieldDesc, &fIsMethod, pThrowable);
if (SUCCEEDED(hr) && fIsMethod)
{
hr = E_FAIL;
*ppFieldDesc = NULL;
}
return hr;
}
// Implementations of SparseVTableMap methods.
SparseVTableMap::SparseVTableMap()
{
m_MapList = NULL;
m_MapEntries = 0;
m_Allocated = 0;
m_LastUsed = 0;
m_VTSlot = 0;
m_MTSlot = 0;
}
SparseVTableMap::~SparseVTableMap()
{
if (m_MapList != NULL)
{
delete [] m_MapList;
m_MapList = NULL;
}
}
// Allocate or expand the mapping list for a new entry.
BOOL SparseVTableMap::AllocOrExpand()
{
if (m_MapEntries == m_Allocated) {
Entry *maplist = new Entry[m_Allocated + MapGrow];
if (maplist == NULL)
return false;
if (m_MapList != NULL)
memcpy(maplist, m_MapList, m_MapEntries * sizeof(Entry));
m_Allocated += MapGrow;
delete [] m_MapList;
m_MapList = maplist;
}
return true;
}
// While building mapping list, record a gap in VTable slot numbers.
BOOL SparseVTableMap::RecordGap(WORD StartMTSlot, WORD NumSkipSlots)
{
_ASSERTE((StartMTSlot == 0) || (StartMTSlot > m_MTSlot));
_ASSERTE(NumSkipSlots > 0);
// We use the information about the current gap to complete a map entry for
// the last non-gap. There is a special case where the vtable begins with a
// gap, so we don't have a non-gap to record.
if (StartMTSlot == 0) {
_ASSERTE((m_MTSlot == 0) && (m_VTSlot == 0));
m_VTSlot = NumSkipSlots;
return true;
}
// We need an entry, allocate or expand the list as necessary.
if (!AllocOrExpand())
return false;
// Update the list with an entry describing the last non-gap in vtable
// entries.
m_MapList[m_MapEntries].m_Start = m_MTSlot;
m_MapList[m_MapEntries].m_Span = StartMTSlot - m_MTSlot;
m_MapList[m_MapEntries].m_MapTo = m_VTSlot;
m_VTSlot += (StartMTSlot - m_MTSlot) + NumSkipSlots;
m_MTSlot = StartMTSlot;
m_MapEntries++;
return true;
}
// Finish creation of mapping list.
BOOL SparseVTableMap::FinalizeMapping(WORD TotalMTSlots)
{
_ASSERTE(TotalMTSlots >= m_MTSlot);
// If mapping ended with a gap, we have nothing else to record.
if (TotalMTSlots == m_MTSlot)
return true;
// Allocate or expand the list as necessary.
if (!AllocOrExpand())
return false;
// Update the list with an entry describing the last non-gap in vtable
// entries.
m_MapList[m_MapEntries].m_Start = m_MTSlot;
m_MapList[m_MapEntries].m_Span = TotalMTSlots - m_MTSlot;
m_MapList[m_MapEntries].m_MapTo = m_VTSlot;
// Update VT slot cursor, because we use it to determine total number of
// vtable slots for GetNumVtableSlots.
m_VTSlot += TotalMTSlots - m_MTSlot;
m_MapEntries++;
return true;
}
// Lookup a VTable slot number from a method table slot number.
WORD SparseVTableMap::LookupVTSlot(WORD MTSlot)
{
// As an optimization, check the last entry which yielded a correct result.
if ((MTSlot >= m_MapList[m_LastUsed].m_Start) &&
(MTSlot < (m_MapList[m_LastUsed].m_Start + m_MapList[m_LastUsed].m_Span)))
return (MTSlot - m_MapList[m_LastUsed].m_Start) + m_MapList[m_LastUsed].m_MapTo;
// Check all MT slots spans to see which one our input slot lies in.
for (WORD i = 0; i < m_MapEntries; i++) {
if ((MTSlot >= m_MapList[i].m_Start) &&
(MTSlot < (m_MapList[i].m_Start + m_MapList[i].m_Span))) {
m_LastUsed = i;
return (MTSlot - m_MapList[i].m_Start) + m_MapList[i].m_MapTo;
}
}
_ASSERTE(!"Invalid MethodTable slot");
return ~0;
}
// Retrieve the number of slots in the vtable (both empty and full).
WORD SparseVTableMap::GetNumVTableSlots()
{
return m_VTSlot;
}
HRESULT SparseVTableMap::Save(DataImage *image, mdToken attribution)
{
HRESULT hr;
IfFailRet(image->StoreStructure(this, sizeof(SparseVTableMap),
DataImage::SECTION_CLASS,
DataImage::DESCRIPTION_CLASS,
attribution));
IfFailRet(image->StoreStructure(m_MapList, m_Allocated * sizeof(Entry),
DataImage::SECTION_CLASS,
DataImage::DESCRIPTION_CLASS,
attribution));
return S_OK;
}
HRESULT SparseVTableMap::Fixup(DataImage *image)
{
HRESULT hr;
IfFailRet(image->FixupPointerField(&m_MapList));
return S_OK;
}
void MethodTable::GetExtent(BYTE **pStart, BYTE **pEnd)
{
BYTE *start, *end;
if (ContainsPointers())
start = (BYTE*) CGCDesc::GetCGCDescFromMT(this)->GetLowestSeries();
else
start = (BYTE*) this;
end = (BYTE*) (m_pIMap + m_wNumInterface);
*pStart = start;
*pEnd = end;
}
HRESULT MethodTable::Save(DataImage *image)
{
HRESULT hr;
_ASSERTE(IsRestored());
BYTE *start, *end;
GetExtent(&start, &end);
IfFailRet(image->StoreStructure(start, (ULONG)(end - start),
DataImage::SECTION_METHOD_TABLE,
DataImage::DESCRIPTION_METHOD_TABLE,
GetClass()->GetCl()));
if (IsInterface())
{
// Make sure our guid is computed
GUID dummy;
GetClass()->GetGuid(&dummy, TRUE);
_ASSERTE(m_pGuidInfo != NULL);
IfFailRet(image->StoreStructure(m_pGuidInfo, sizeof(GuidInfo),
DataImage::SECTION_METHOD_TABLE,
DataImage::DESCRIPTION_METHOD_TABLE,
GetClass()->GetCl()));
}
return m_pEEClass->Save(image);
}
HRESULT MethodTable::Fixup(DataImage *image, DWORD *pRidToCodeRVAMap)
{
HRESULT hr;
_ASSERTE(IsRestored());
IfFailRet(image->FixupPointerField(&m_pEEClass));
IfFailRet(image->FixupPointerField(&m_pModule));
IfFailRet(image->FixupPointerField(&m_pIMap, NULL,
DataImage::REFERENCE_STORE,
DataImage::FIXUP_VA,
TRUE));
//
// This field must be initialized at
// load time
//
if (IsInterface())
IfFailRet(image->FixupPointerField(&m_pGuidInfo));
else
IfFailRet(image->ZeroPointerField(&m_pInterfaceVTableMap));
//
// Fix flags
//
MethodTable *pNewMT = (MethodTable *) image->GetImagePointer(this);
if (pNewMT == NULL)
return E_POINTER;
if (HasClassConstructor()
|| (IsShared() && GetClass()->GetNumStaticFields() > 0))
pNewMT->m_wFlags &= ~enum_flag_ClassInited;
pNewMT->m_wFlags |= enum_flag_Unrestored;
//
// Fixup static fields:
// Zero all non-reference fields
// For reference fields (which need handles allocated),
// store (DWORD)(-1) in the field - we'll allocate a handle
// for it at load time
// For value class fields (which need boxed objects allocated),
// store the method table pointer for the value class - we'll
// allocate the object at load time.
//
BYTE *start = (BYTE *) (m_Vtable + m_pEEClass->GetNumMethodSlots());
BYTE *end = start + GetStaticSize();
IfFailRet(image->ZeroField(start, end - start));
SIZE_T fieldCount = m_pEEClass->GetNumInstanceFields();
if (m_pEEClass->GetParentClass() != NULL)
fieldCount -= m_pEEClass->GetParentClass()->GetNumInstanceFields();
FieldDesc *pField = m_pEEClass->GetFieldDescList() + fieldCount;
FieldDesc *pFieldEnd = pField + m_pEEClass->GetNumStaticFields();
while (pField < pFieldEnd)
{
_ASSERTE(pField->IsStatic());
//
// We have to treat class statics & value type statics
// specially - they will require additional manual fixup
// at load time. To help with this, we assign -1 to
// all referenced fields (which will be replaced with an
// allocated handle), and the method table ptr for each
// value type field (which will be replaced with an allocated
// boxed instance.)
//
if (!pField->IsSpecialStatic())
{
switch (pField->GetFieldType())
{
case ELEMENT_TYPE_CLASS:
{
BYTE *addr = (BYTE *) m_Vtable;
if (IsShared())
addr += GetClass()->GetNumMethodSlots()*sizeof(SLOT);
addr += pField->GetOffset();
void **pFieldPtr = (void **) image->GetImagePointer(addr);
if (pFieldPtr == NULL)
return E_POINTER;
*pFieldPtr = NULL;
}
break;
case ELEMENT_TYPE_VALUETYPE:
{
BYTE *addr = (BYTE *) m_Vtable;
if (IsShared())
addr += GetClass()->GetNumMethodSlots()*sizeof(SLOT);
addr += pField->GetOffset();
MethodTable *pMT = pField->GetTypeOfField()->GetMethodTable();
IfFailRet(image->FixupPointerFieldToToken(addr, pMT,
pMT->GetModule(),
mdtTypeDef));
}
break;
default:
break;
}
}
pField++;
}
//
// Fixup vtable
//
unsigned slotNumber = 0;
while (slotNumber < m_cbSlots)
{
SLOT *pSlot = &m_Vtable[slotNumber];
//
// Find the method desc from the slot.
//
MethodDesc *pMD = EEClass::GetUnknownMethodDescForSlotAddress(*pSlot);
_ASSERTE(pMD != NULL);
BOOL fSecurity = (pMD->GetSecurityFlags() != 0);
BOOL fRemotingIntercepted = pMD->IsRemotingIntercepted();
//
// If the method needs a security check, we need
// to always go through the stub.
//
void *code;
if (fSecurity || fRemotingIntercepted)
code = NULL;
else
IfFailRet(image->GetFunctionAddress(pMD, &code));
if (code == NULL)
{
if (pMD->GetModule() == GetModule())
{
IfFailRet(image->FixupPointerField(pSlot,
pMD->GetPreStubAddr()));
}
else
{
_ASSERTE(!pMD->IsStatic());
// We prefer to use the slot number directly; that way the
// correct slot gets fixed up by the stub. But in some rare cases
// involving interfaces we cannot recover the MethodDesc from the actual slot number; in
// such cases we use the destination MethodDesc's slot number and rely on
// the fixup code scanning the vtable to backpatch the right slot.
DWORD targetSlotNumber = slotNumber;
if (slotNumber >= m_pEEClass->GetParentClass()->GetNumVtableSlots())
{
MethodTable *pParentMT = m_pEEClass->GetParentClass()->GetMethodTable();
InterfaceInfo_t *pInterface = GetInterfaceForSlot(slotNumber);
if (pInterface == NULL)
{
// We are in a slot which isn't covered by an interface.
targetSlotNumber = pMD->GetSlot();
_ASSERTE(targetSlotNumber < m_pEEClass->GetParentClass()->GetNumVtableSlots());
_ASSERTE(m_pEEClass->GetParentClass()->
GetUnknownMethodDescForSlot(targetSlotNumber)
== pMD);
}
else
{
MethodTable *pInterfaceMT = pInterface->m_pMethodTable;
InterfaceInfo_t *pParentInterface = pParentMT->FindInterface(pInterfaceMT);
if (pParentInterface == NULL)
{
if (pMD->GetMethodTable() == pInterfaceMT)
{
// We are inheriting the interface's method desc; the fixup code can figure
// this out.
_ASSERTE(pMD->IsComPlusCall());
_ASSERTE(IsComObjectType() || GetClass()->IsAbstract());
_ASSERTE(pInterfaceMT->GetClass()->
GetUnknownMethodDescForSlot(targetSlotNumber
- pInterface->m_wStartSlot)
== pMD);
}
else
{
// We've implemented a new interface with a parent's
// MethodDesc. In this case we must use the real MD's slot number.
targetSlotNumber = pMD->GetSlot();
_ASSERTE(targetSlotNumber < m_pEEClass->GetParentClass()->GetNumVtableSlots());
_ASSERTE(m_pEEClass->GetParentClass()->
GetUnknownMethodDescForSlot(targetSlotNumber)
== pMD);
}
}
else
{
// Our parent implemented this interface. Any methods
// which implement the interface which are inherited from
// our parent will also be on our parent's interface implementation
// The fixup logic can figure out where this is so use
// the real slot number.
_ASSERTE(m_pEEClass->GetParentClass()->
GetUnknownMethodDescForSlot(targetSlotNumber
- pInterface->m_wStartSlot
+ pParentInterface->m_wStartSlot)
== pMD);
}
}
}
else
{
_ASSERTE(m_pEEClass->GetParentClass()->
GetUnknownMethodDescForSlot(targetSlotNumber)
== pMD);
}
IfFailRet(image->FixupPointerField(pSlot,
GetModule()->GetJumpTargetTable() +
X86JumpTargetTable::ComputeTargetOffset(targetSlotNumber)));
}
}
else
IfFailRet(image->FixupPointerField(pSlot, code,
DataImage::REFERENCE_FUNCTION));
slotNumber++;
}
//
// Fixup Interface map
//
InterfaceInfo_t *pIMap = m_pIMap;
InterfaceInfo_t *pIMapEnd = pIMap + m_wNumInterface;
while (pIMap < pIMapEnd)
{
IfFailRet(image->FixupPointerFieldToToken(&pIMap->m_pMethodTable,
NULL, pIMap->m_pMethodTable->GetModule(),
mdtTypeDef));
pIMap++;
}
return m_pEEClass->Fixup(image, this, pRidToCodeRVAMap);
}
void EEClass::GetExtent(BYTE **pStart, BYTE **pEnd)
{
*pStart = (BYTE *) this;
*pEnd = *pStart +
(HasLayout() ? sizeof(LayoutEEClass)
: IsDelegateClass() || IsMultiDelegateClass() ? sizeof(DelegateEEClass)
: IsEnum() ? sizeof(EnumEEClass)
: sizeof(EEClass));
}
HRESULT EEClass::Save(DataImage *image)
{
HRESULT hr;
_ASSERTE(IsRestored());
BYTE *start, *end;
GetExtent(&start, &end);
IfFailRet(image->StoreStructure(start, (ULONG)(end - start),
DataImage::SECTION_CLASS,
DataImage::DESCRIPTION_CLASS,
GetCl()));
#ifdef _DEBUG
if (!image->IsStored(m_szDebugClassName))
IfFailRet(image->StoreStructure(m_szDebugClassName, (ULONG)(strlen(m_szDebugClassName)+1),
DataImage::SECTION_DEBUG,
DataImage::DESCRIPTION_DEBUG,
mdTokenNil, 1));
#endif // _DEBUG
if (m_pSparseVTableMap != NULL)
IfFailRet(m_pSparseVTableMap->Save(image, GetCl()));
//
// Save FieldDescs
//
SIZE_T fieldCount = m_wNumInstanceFields + m_wNumStaticFields;
if (GetParentClass() != NULL)
fieldCount -= GetParentClass()->m_wNumInstanceFields;
IfFailRet(image->StoreStructure(m_pFieldDescList, (ULONG)(fieldCount * sizeof(FieldDesc)),
DataImage::SECTION_FIELD_DESC,
DataImage::DESCRIPTION_FIELD_DESC));
FieldDesc *pFD = m_pFieldDescList;
FieldDesc *pFDEnd = pFD + fieldCount;
while (pFD < pFDEnd)
{
IfFailRet(pFD->SaveContents(image));
pFD++;
}
//
// Save MethodDescs
//
MethodDescChunk *chunk = m_pChunks;
while (chunk != NULL)
{
IfFailRet(chunk->Save(image));
chunk = chunk->GetNextChunk();
}
if (HasLayout())
{
EEClassLayoutInfo *pInfo = &((LayoutEEClass*)this)->m_LayoutInfo;
if (pInfo->m_numCTMFields > 0)
IfFailRet(image->StoreStructure(pInfo->m_pFieldMarshalers,
pInfo->m_numCTMFields * MAXFIELDMARSHALERSIZE,
DataImage::SECTION_FIELD_INFO,
DataImage::DESCRIPTION_FIELD_DESC,
GetCl()));
}
if (IsEnum())
{
EnumEEClass *pEnumClass = (EnumEEClass*) this;
pEnumClass->BuildEnumTables();
if (pEnumClass->GetEnumCount() > 0)
{
DWORD enumCount = pEnumClass->GetEnumCount();
IfFailRet(image->StoreStructure(pEnumClass->m_values,
enumCount * (1<<pEnumClass->GetEnumLogSize()),
DataImage::SECTION_FIELD_INFO,
DataImage::DESCRIPTION_FIELD_DESC,
GetCl()));
IfFailRet(image->StoreStructure(pEnumClass->m_names,
enumCount * sizeof(LPCUTF8),
DataImage::SECTION_FIELD_INFO,
DataImage::DESCRIPTION_FIELD_DESC,
GetCl()));
LPCUTF8 *pNames = pEnumClass->m_names;
LPCUTF8 *pNamesEnd = pNames + enumCount;
while (pNames < pNamesEnd)
{
if (!image->IsStored((void*) *pNames))
image->StoreStructure((void *) *pNames, (ULONG)strlen(*pNames)+1,
DataImage::SECTION_FIELD_INFO,
DataImage::DESCRIPTION_FIELD_DESC,
GetCl());
pNames++;
}
}
}
#if CHECK_APP_DOMAIN_LEAKS
//
// Make sure we've computed our agility flags, in case a later load of this
// prejit file occurs with leak detection enabled.
//
if (!IsAppDomainAgilityDone())
SetAppDomainAgileAttribute(TRUE);
#endif
return S_OK;
}
DWORD EEClass::FieldDescListSize()
{
DWORD fieldCount = m_wNumInstanceFields + m_wNumStaticFields;
if (GetParentClass() != NULL)
fieldCount -= GetParentClass()->m_wNumInstanceFields;
return fieldCount;
}
HRESULT EEClass::Fixup(DataImage *image, MethodTable *pMethodTable, DWORD *pRidToCodeRVAMap)
{
HRESULT hr = S_OK;
_ASSERTE(IsRestored());
#ifdef _DEBUG
IfFailRet(image->FixupPointerField(&m_szDebugClassName));
#endif // _DEBUG
if (m_pSparseVTableMap != NULL)
{
IfFailRet(image->FixupPointerField(&m_pSparseVTableMap));
IfFailRet(m_pSparseVTableMap->Fixup(image));
}
if (GetParentClass() != NULL)
IfFailRet(image->FixupPointerFieldToToken(GetParentClassPtr(),
GetParentClass()->GetMethodTable(),
GetParentClass()->GetModule(),
mdtTypeDef));
//
// We pass in the method table, because some classes (e.g. remoting proxy)
// have fake method tables set up in them & we want to restore the regular
// one.
//
IfFailRet(image->FixupPointerField(&m_pMethodTable, pMethodTable));
//
// Clear the restored flag & class init flag (if appropriate)
//
EEClass *pNewClass = (EEClass *) image->GetImagePointer(this);
if (pNewClass == NULL)
return E_POINTER;
pNewClass->m_VMFlags |= VMFLAG_UNRESTORED;
if (!IsThunking()) // assume thunking has no inits -
// this case triggers asserts in FindMethod
{
if (GetMethodTable()->HasClassConstructor()
|| (IsShared() && GetNumStaticFields() > 0))
pNewClass->m_VMFlags &= ~VMFLAG_INITED;
}
//
// Fixup FieldDescs
//
SIZE_T fieldCount = FieldDescListSize();
IfFailRet(image->FixupPointerField(&m_pFieldDescList));
FieldDesc *pField = m_pFieldDescList;
FieldDesc *pFieldEnd = pField + fieldCount;
while (pField < pFieldEnd)
{
IfFailRet(pField->Fixup(image));
pField++;
}
//
// Fixup MethodDescs
//
IfFailRet(image->FixupPointerField(&m_pChunks));
MethodDescChunk *chunk = m_pChunks;
while (chunk != NULL)
{
IfFailRet(chunk->Fixup(image, pRidToCodeRVAMap));
chunk = chunk->GetNextChunk();
}
// These fields will be lazy inited if we zero them
IfFailRet(image->ZeroPointerField(&m_pComclassfac));
IfFailRet(image->ZeroPointerField(&m_pccwTemplate));
IfFailRet(image->ZeroPointerField(&m_ExposedClassObject));
IfFailRet(image->ZeroPointerField(&m_pLoader));
//
// Clear interface ID to -1 so it
// is lazy inited on startup.
//
UINT32 *newID = (UINT32 *) image->GetImagePointer(&m_dwInterfaceId);
if (newID == NULL)
return E_POINTER;
*newID = -1;
if (HasLayout())
{
EEClassLayoutInfo *pInfo = &((LayoutEEClass*)this)->m_LayoutInfo;
IfFailRet(image->FixupPointerField(&pInfo->m_pFieldMarshalers));
FieldMarshaler *pFM = pInfo->m_pFieldMarshalers;
FieldMarshaler *pFMEnd = (FieldMarshaler*) ((BYTE *)pFM + pInfo->m_numCTMFields*MAXFIELDMARSHALERSIZE);
while (pFM < pFMEnd)
{
IfFailRet(pFM->Fixup(image));
((BYTE*&)pFM) += MAXFIELDMARSHALERSIZE;
}
}
else if (IsDelegateClass() || IsMultiDelegateClass())
{
DelegateEEClass *pDelegateClass = (DelegateEEClass *) this;
IfFailRet(image->FixupPointerField(&pDelegateClass->m_pInvokeMethod));
if (pDelegateClass->m_pBeginInvokeMethod != NULL)
IfFailRet(image->FixupPointerField(&pDelegateClass->m_pBeginInvokeMethod));
if (pDelegateClass->m_pEndInvokeMethod != NULL)
IfFailRet(image->FixupPointerField(&pDelegateClass->m_pEndInvokeMethod));
IfFailRet(image->ZeroPointerField(&((DelegateEEClass*)this)->m_pUMThunkMarshInfo));
IfFailRet(image->ZeroPointerField(&((DelegateEEClass*)this)->m_pStaticShuffleThunk));
}
else if (IsEnum())
{
EnumEEClass *pEnumClass = (EnumEEClass*) this;
if (pEnumClass->GetEnumCount() > 0)
{
IfFailRet(image->FixupPointerField(&pEnumClass->m_values));
IfFailRet(image->FixupPointerField(&pEnumClass->m_names));
DWORD enumCount = pEnumClass->GetEnumCount();
LPCUTF8 *pNames = pEnumClass->m_names;
LPCUTF8 *pNamesEnd = pNames + enumCount;
while (pNames < pNamesEnd)
{
IfFailRet(image->FixupPointerField(pNames));
pNames++;
}
}
}
return S_OK;
}
void EEClass::Unload()
{
LOG((LF_APPDOMAIN, LL_INFO100, "EEClass::Unload %8.8x, MethodTable %8.8x, %s\n", this, m_pMethodTable, m_szDebugClassName));
// clean up any COM Data
if (m_pccwTemplate)
UnloadCCWTemplate(m_pccwTemplate);
m_pccwTemplate = NULL;
if (m_pComclassfac)
UnloadComclassfac(m_pComclassfac);
m_pComclassfac = NULL;
}
/**************************************************************************/
// returns true if 'this' delegate is structurally equivalent to 'toDelegate'
// delegagate. For example if
// delegate Object delegate1(String)
// delegate String delegate2(Object)
// then
// delegate2->CanCastTo(delegate1)
//
// note that the return type can be any subclass (covariant)
// but the args need to be superclasses (contra-variant)
BOOL DelegateEEClass::CanCastTo(DelegateEEClass* toDelegate) {
MetaSig fromSig(m_pInvokeMethod->GetSig(), m_pInvokeMethod ->GetModule());
MetaSig toSig(toDelegate->m_pInvokeMethod->GetSig(), toDelegate->m_pInvokeMethod ->GetModule());
unsigned numArgs = fromSig.NumFixedArgs();
if (numArgs != toSig.NumFixedArgs() ||
fromSig.GetCallingConventionInfo() != toSig.GetCallingConventionInfo())
return false;
TypeHandle fromType = fromSig.GetRetTypeHandle();
TypeHandle toType = toSig.GetRetTypeHandle();
if (fromType.IsNull() || toType.IsNull() || !fromType.CanCastTo(toType))
return(false);
while (numArgs > 0) {
fromSig.NextArg();
toSig.NextArg();
fromType = fromSig.GetTypeHandle();
toType = toSig.GetTypeHandle();
if (fromType.IsNull() || toType.IsNull() || !toType.CanCastTo(fromType))
return(false);
--numArgs;
}
return(true);
}
struct TempEnumValue
{
LPCUTF8 name;
UINT64 value;
};
class TempEnumValueSorter : public CQuickSort<TempEnumValue>
{
public:
TempEnumValueSorter(TempEnumValue *pArray, SSIZE_T iCount)
: CQuickSort<TempEnumValue>(pArray, iCount) {}
int Compare(TempEnumValue *pFirst, TempEnumValue *pSecond)
{
if (pFirst->value == pSecond->value)
return 0;
if (pFirst->value > pSecond->value)
return 1;
else
return -1;
}
};
int EnumEEClass::GetEnumLogSize()
{
switch (GetMethodTable()->GetNormCorElementType())
{
case ELEMENT_TYPE_I1:
case ELEMENT_TYPE_U1:
case ELEMENT_TYPE_BOOLEAN:
return 0;
case ELEMENT_TYPE_I2:
case ELEMENT_TYPE_U2:
case ELEMENT_TYPE_CHAR:
return 1;
case ELEMENT_TYPE_I4:
case ELEMENT_TYPE_U4:
case ELEMENT_TYPE_I:
case ELEMENT_TYPE_U:
return 2;
case ELEMENT_TYPE_I8:
case ELEMENT_TYPE_U8:
return 3;
default:
_ASSERTE(!"Illegal enum type");
return 0;
}
}
HRESULT EnumEEClass::BuildEnumTables()
{
HRESULT hr;
_ASSERTE(IsEnum());
// Note about synchronization:
// This routine is synchronized OK without any locking since it's idempotent. (although it
// may leak in races.)
// Right now we'll be satisfied with this - external code can lock if appropriate.
if (EnumTablesBuilt())
return S_OK;
IMDInternalImport *pImport = GetMDImport();
HENUMInternal fields;
IfFailRet(pImport->EnumInit(mdtFieldDef, GetCl(), &fields));
//
// Note that we're fine treating signed types as unsigned, because all we really
// want to do is sort them based on a convenient strong ordering.
//
int logSize = GetEnumLogSize();
int size = 1<<logSize;
ULONG fieldCount = pImport->EnumGetCount(&fields)-1; // Omit one for __value field
if (fieldCount > 0)
{
CQuickArray<TempEnumValue> temps;
if (FAILED(temps.ReSize(fieldCount)))
return E_OUTOFMEMORY;
TempEnumValue *pTemps = temps.Ptr();
// The following is not portable code - it assumes that the address of all union members
// is the same.
_ASSERTE((offsetof(MDDefaultValue, m_byteValue)
== offsetof(MDDefaultValue, m_usValue))
&& (offsetof(MDDefaultValue, m_ulValue)
== offsetof(MDDefaultValue, m_ullValue)));
mdFieldDef field;
int nTotalInstanceFields = 0;
while (pImport->EnumNext(&fields, &field))
{
if (IsFdStatic(pImport->GetFieldDefProps(field)))
{
pTemps->name = pImport->GetNameOfFieldDef(field);
MDDefaultValue defaultValue;
IfFailRet(pImport->GetDefaultValue(field, &defaultValue));
switch (logSize)
{
case 0:
pTemps->value = defaultValue.m_byteValue;
break;
case 1:
pTemps->value = defaultValue.m_usValue;
break;
case 2:
pTemps->value = defaultValue.m_ulValue;
break;
case 3:
pTemps->value = defaultValue.m_ullValue;
break;
}
pTemps++;
}
else
{
nTotalInstanceFields++;
}
}
_ASSERTE((nTotalInstanceFields == 1) && "Zero or Multiple instance fields in an enum!");
//
// Check to see if we are already sorted. This may seem extraneous, but is
// actually probably the normal case.
//
BOOL sorted = TRUE;
pTemps = temps.Ptr();
TempEnumValue *pTempsEnd = pTemps + fieldCount - 1;
while (pTemps < pTempsEnd)
{
if (pTemps[0].value > pTemps[1].value)
{
sorted = FALSE;
break;
}
pTemps++;
}
if (!sorted)
{
TempEnumValueSorter sorter(temps.Ptr(), fieldCount);
sorter.Sort();
}
// Last chance to exit race without leaking!
if (EnumTablesBuilt())
return S_OK;
LPCUTF8 *pNames = (LPCUTF8 *) GetAssembly()->GetHighFrequencyHeap()->AllocMem(fieldCount * sizeof(LPCUTF8));
BYTE *pValues = (BYTE *) GetAssembly()->GetHighFrequencyHeap()->AllocMem(fieldCount * size);
pTemps = temps.Ptr();
pTempsEnd = pTemps + fieldCount;
LPCUTF8 *pn = pNames;
BYTE *pv = pValues;
while (pTemps < pTempsEnd)
{
*pn++ = pTemps->name;
switch (logSize)
{
case 0:
*pv++ = (BYTE) pTemps->value;
break;
case 1:
*(USHORT*)pv = (USHORT) pTemps->value;
pv += sizeof(USHORT);
break;
case 2:
*(UINT*)pv = (UINT) pTemps->value;
pv += sizeof(UINT);
break;
case 3:
*(UINT64*)pv = (UINT64) pTemps->value;
pv += sizeof(UINT64);
break;
}
pTemps++;
}
m_names = pNames;
m_values = pValues;
pImport->EnumClose(&fields);
}
m_countPlusOne = fieldCount+1;
return S_OK;
}
DWORD EnumEEClass::FindEnumValueIndex(BYTE value)
{
_ASSERTE(GetMethodTable()->GetNormCorElementType() == ELEMENT_TYPE_I1
|| GetMethodTable()->GetNormCorElementType() == ELEMENT_TYPE_U1
|| GetMethodTable()->GetNormCorElementType() == ELEMENT_TYPE_BOOLEAN);
CBinarySearch<BYTE> searcher(GetEnumByteValues(), GetEnumCount());
const BYTE *found = searcher.Find(&value);
if (found == NULL)
return NOT_FOUND;
else
return found - m_byteValues;
}
DWORD EnumEEClass::FindEnumValueIndex(USHORT value)
{
_ASSERTE(GetMethodTable()->GetNormCorElementType() == ELEMENT_TYPE_I2
|| GetMethodTable()->GetNormCorElementType() == ELEMENT_TYPE_U2
|| GetMethodTable()->GetNormCorElementType() == ELEMENT_TYPE_CHAR);
CBinarySearch<USHORT> searcher(GetEnumShortValues(), GetEnumCount());
const USHORT *found = searcher.Find(&value);
if (found == NULL)
return NOT_FOUND;
else
return found - m_shortValues;
}
DWORD EnumEEClass::FindEnumValueIndex(UINT value)
{
_ASSERTE(GetMethodTable()->GetNormCorElementType() == ELEMENT_TYPE_I4
|| GetMethodTable()->GetNormCorElementType() == ELEMENT_TYPE_U4
|| GetMethodTable()->GetNormCorElementType() == ELEMENT_TYPE_I
|| GetMethodTable()->GetNormCorElementType() == ELEMENT_TYPE_U);
CBinarySearch<UINT> searcher(GetEnumIntValues(), GetEnumCount());
const UINT *found = searcher.Find(&value);
if (found == NULL)
return NOT_FOUND;
else
return found - m_intValues;
}
DWORD EnumEEClass::FindEnumValueIndex(UINT64 value)
{
_ASSERTE(GetMethodTable()->GetNormCorElementType() == ELEMENT_TYPE_I8
|| GetMethodTable()->GetNormCorElementType() == ELEMENT_TYPE_U8);
CBinarySearch<UINT64> searcher(GetEnumLongValues(), GetEnumCount());
const UINT64 *found = searcher.Find(&value);
if (found == NULL)
return NOT_FOUND;
else
return found - m_longValues;
}
DWORD EnumEEClass::FindEnumNameIndex(LPCUTF8 name)
{
LPCUTF8 *names = GetEnumNames();
LPCUTF8 *namesEnd = names + GetEnumCount();
// Same identity is the most common case
// & doesn't touch string data
while (names < namesEnd)
{
if (name == *names)
return names - GetEnumNames();
names++;
}
// otherwise compare strings
while (names < namesEnd)
{
if (strcmp(name, *names) == 0)
return names - GetEnumNames();
names++;
}
return NOT_FOUND;
}
BOOL TypeHandle::IsEnum()
{
if (!IsUnsharedMT())
return(false);
return(AsMethodTable()->GetClass()->IsEnum());
}
EEClass* TypeHandle::GetClass()
{
MethodTable* pMT = GetMethodTable();
return(pMT ? pMT->GetClass() : 0);
}
EEClass* TypeHandle::AsClass()
{
MethodTable* pMT = AsMethodTable();
return(pMT ? pMT->GetClass() : 0);
}
BOOL TypeHandle::IsRestored()
{
return !IsUnsharedMT() || GetMethodTable()->IsRestored();
}
void TypeHandle::CheckRestore()
{
if (IsUnsharedMT())
{
MethodTable *pMT = GetMethodTable();
if (!pMT->IsRestored())
pMT->CheckRestore();
}
}
OBJECTREF TypeHandle::CreateClassObj()
{
OBJECTREF o;
switch(GetNormCorElementType()) {
case ELEMENT_TYPE_ARRAY:
case ELEMENT_TYPE_SZARRAY:
case ELEMENT_TYPE_BYREF:
case ELEMENT_TYPE_PTR:
o = ((ParamTypeDesc*)AsTypeDesc())->CreateClassObj();
break;
case ELEMENT_TYPE_TYPEDBYREF:
{
EEClass* cls = COMMember::g_pInvokeUtil->GetAnyRef();
o = cls->GetExposedClassObject();
}
break;
// for this release a function pointer is mapped into an IntPtr. This result in a loss of information. Fix next release
case ELEMENT_TYPE_FNPTR:
o = TheIntPtrClass()->GetClass()->GetExposedClassObject();
break;
default:
if (!IsUnsharedMT()) {
_ASSERTE(!"Bad Type");
o = NULL;
}
EEClass* cls = AsClass();
// We never create the Type object for the transparent proxy...
if (cls->GetMethodTable()->IsTransparentProxyType())
return 0;
o = cls->GetExposedClassObject();
break;
}
return o;
}
#if CHECK_APP_DOMAIN_LEAKS
BOOL TypeHandle::IsAppDomainAgile()
{
if (IsUnsharedMT())
{
MethodTable *pMT = AsMethodTable();
return pMT->GetClass()->IsAppDomainAgile();
}
else if (IsArray())
{
TypeHandle th = AsArray()->GetElementTypeHandle();
return th.IsArrayOfElementsAppDomainAgile();
}
else
{
// @todo: consider other types of type handles agile?
return FALSE;
}
}
BOOL TypeHandle::IsCheckAppDomainAgile()
{
if (IsUnsharedMT())
{
MethodTable *pMT = AsMethodTable();
return pMT->GetClass()->IsCheckAppDomainAgile();
}
else if (IsArray())
{
TypeHandle th = AsArray()->GetElementTypeHandle();
return th.IsArrayOfElementsCheckAppDomainAgile();
}
else
{
// @todo: consider other types of type handles agile?
return FALSE;
}
}
BOOL TypeHandle::IsArrayOfElementsAppDomainAgile()
{
if (IsUnsharedMT())
{
MethodTable *pMT = AsMethodTable();
return (pMT->GetClass()->GetAttrClass() & tdSealed) && pMT->GetClass()->IsAppDomainAgile();
}
else
{
// I'm not sure how to prove a typedesc is sealed, so
// just bail and return FALSE here rather than recursing.
return FALSE;
}
}
BOOL TypeHandle::IsArrayOfElementsCheckAppDomainAgile()
{
if (IsUnsharedMT())
{
MethodTable *pMT = AsMethodTable();
return (pMT->GetClass()->IsAppDomainAgile()
&& (pMT->GetClass()->GetAttrClass() & tdSealed) == 0)
|| pMT->GetClass()->IsCheckAppDomainAgile();
}
else
{
// I'm not sure how to prove a typedesc is sealed, so
// just bail and return FALSE here rather than recursing.
return FALSE;
}
}
#endif
FieldDescIterator::FieldDescIterator(EEClass *pClass, int iteratorType)
{
m_iteratorType = iteratorType;
m_pClass = pClass;
m_currField = -1;
#ifdef EnC_SUPPORTED
m_isEnC = pClass->GetModule()->IsEditAndContinue();
m_pCurrListElem = NULL;
#endif // EnC_SUPPORTED
m_totalFields = m_pClass->GetNumIntroducedInstanceFields();
if (!(iteratorType & (int)INSTANCE_FIELDS))
// if not handling instances then skip them by setting curr to last one
m_currField = m_pClass->GetNumIntroducedInstanceFields() - 1;
if (iteratorType & (int)STATIC_FIELDS)
m_totalFields += m_pClass->GetNumStaticFields();
}
FieldDesc* FieldDescIterator::Next()
{
++m_currField;
if (m_currField >= m_totalFields)
return NULL;
#ifdef EnC_SUPPORTED
if (m_isEnC)
{
FieldDesc *pFD = NextEnC();
// Either it's not EnC, or it is and it's been fixed up so we can use it, or
// we're the Debugger RC thread, we can't fix it up, but it's ok since our
// logic will check & make sure we don't try and use it.
_ASSERTE(!pFD->IsEnCNew() ||
!((EnCFieldDesc*)pFD)->NeedsFixup() ||
g_pDebugInterface->GetRCThreadId() == GetCurrentThreadId() );
return pFD;
}
#endif // EnC_SUPPORTED
return (m_pClass->GetFieldDescListRaw()) + m_currField;
}
#ifdef EnC_SUPPORTED
FieldDesc* FieldDescIterator::NextEnC()
{
EnCEEClassData *pEnCClass = ((EditAndContinueModule*)(m_pClass->GetModule()))->GetEnCEEClassData(m_pClass, TRUE);
int numIntroducedFields = m_pClass->GetNumIntroducedInstanceFields();
if (m_iteratorType & (int)INSTANCE_FIELDS &&
m_currField < numIntroducedFields)
{
if (! pEnCClass || m_currField < numIntroducedFields - pEnCClass->m_dwNumAddedInstanceFields) {
return (m_pClass->GetFieldDescListRaw()) + m_currField;
} else if (m_pCurrListElem) {
_ASSERTE(m_pCurrListElem->m_next);
m_pCurrListElem = m_pCurrListElem->m_next;
return &m_pCurrListElem->m_fieldDesc;
} else {
_ASSERTE(pEnCClass->m_pAddedInstanceFields);
m_pCurrListElem = pEnCClass->m_pAddedInstanceFields;
return &m_pCurrListElem->m_fieldDesc;
}
}
int staticFieldOffset = m_currField - numIntroducedFields;
if (! pEnCClass) {
_ASSERTE(staticFieldOffset < m_pClass->GetNumStaticFields());
return (m_pClass->GetFieldDescListRaw()) + staticFieldOffset + numIntroducedFields;
} else {
if (staticFieldOffset < m_pClass->GetNumStaticFields() - pEnCClass->m_dwNumAddedStaticFields) {
return (m_pClass->GetFieldDescListRaw()) + staticFieldOffset + numIntroducedFields - pEnCClass->m_dwNumAddedInstanceFields;
} else if (m_pCurrListElem && m_pCurrListElem->m_fieldDesc.IsStatic()) {
_ASSERTE(m_pCurrListElem->m_next);
m_pCurrListElem = m_pCurrListElem->m_next;
return &m_pCurrListElem->m_fieldDesc;
} else {
_ASSERTE(pEnCClass->m_pAddedStaticFields);
m_pCurrListElem = pEnCClass->m_pAddedStaticFields;
return &m_pCurrListElem->m_fieldDesc;
}
}
}
#endif // EnC_SUPPORTED
| 37.878824 | 219 | 0.549328 | npocmaka |
38d3c1e7f8c1a57cf3db762f9dda90899b1e041b | 11,451 | cpp | C++ | Spider3D.cpp | Artars/WalkingGLSpider | d939a1f27730aaabdc36b01fd574351f967c08dd | [
"MIT"
] | null | null | null | Spider3D.cpp | Artars/WalkingGLSpider | d939a1f27730aaabdc36b01fd574351f967c08dd | [
"MIT"
] | null | null | null | Spider3D.cpp | Artars/WalkingGLSpider | d939a1f27730aaabdc36b01fd574351f967c08dd | [
"MIT"
] | null | null | null | #include "Spider3D.h"
Spider3D::Spider3D(){
finishConstruction();
}
Spider3D::Spider3D(Vector3 position, Vector3 scale, Vector3 rotation){
this->position = position;
this->scale = scale;
this->rotation = rotation;
finishConstruction();
}
void Spider3D::finishConstruction() {
char path[] = "SBody.obj";
char path2[] = "SEyes.obj";
char path3[] = "SLeg2.obj";
char path4[] = "SLeg1.obj";
loadModel(path);
children = new vector<Transform*>();
Transform *newPart, *newPart2;
newPart = new Transform();
newPart->loadModel(path2);
newPart->setColor(1,0,0);
children->push_back(newPart);
//Perna dianteira esquerda
newPart = new Transform(Vector3(-0.2, -0.2, 0), Vector3(1,1,1), Vector3(-15,0,0));
newPart->loadModel(path4);
newPart->setColor(color[0],color[1],color[2]);
children->push_back(newPart);
newPart2 = new Transform(Vector3(0,-0.6,0),Vector3(1,1,1),Vector3(48,0,0));
newPart2->loadModel(path4);
newPart2->setColor(color[0],color[1],color[2]);
newPart->children = new vector<Transform*>();
newPart->children->push_back(newPart2);
//Perna secundaria esquerda
newPart = new Transform(Vector3(-0.05, -0.2, 0), Vector3(1,1,1), Vector3(-15,0,0));
newPart->loadModel(path3);
newPart->setColor(color[0],color[1],color[2]);
children->push_back(newPart);
newPart2 = new Transform(Vector3(0,-0.45,0),Vector3(1,1,1),Vector3(60,0,0));
newPart2->loadModel(path3);
newPart2->setColor(color[0],color[1],color[2]);
newPart->children = new vector<Transform*>();
newPart->children->push_back(newPart2);
//Perna secundaria esquerda
newPart = new Transform(Vector3(0.1, -0.2, 0), Vector3(1,1,1), Vector3(-15,0,0));
newPart->loadModel(path3);
newPart->setColor(color[0],color[1],color[2]);
children->push_back(newPart);
newPart2 = new Transform(Vector3(0,-0.45,0),Vector3(1,1,1),Vector3(60,0,0));
newPart2->loadModel(path3);
newPart2->setColor(color[0],color[1],color[2]);
newPart->children = new vector<Transform*>();
newPart->children->push_back(newPart2);
//Perna Traseira esquerda
newPart = new Transform(Vector3(0.25, -0.2, 0), Vector3(1,1,1), Vector3(-15,0,0));
newPart->loadModel(path4);
newPart->setColor(color[0],color[1],color[2]);
children->push_back(newPart);
newPart2 = new Transform(Vector3(0,-0.6,0),Vector3(1,1,1),Vector3(48,0,0));
newPart2->loadModel(path4);
newPart2->setColor(color[0],color[1],color[2]);
newPart->children = new vector<Transform*>();
newPart->children->push_back(newPart2);
//Perna dianteira direita
newPart = new Transform(Vector3(-0.2, 0.2, 0), Vector3(1,-1,1), Vector3(15,0,0));
newPart->loadModel(path4);
newPart->setColor(color[0],color[1],color[2]);
children->push_back(newPart);
newPart2 = new Transform(Vector3(0,-0.6,0),Vector3(1,1,1),Vector3(48,0,0));
newPart2->loadModel(path4);
newPart2->setColor(color[0],color[1],color[2]);
newPart->children = new vector<Transform*>();
newPart->children->push_back(newPart2);
//Perna secundaria direita
newPart = new Transform(Vector3(-0.05, 0.2, 0), Vector3(1,-1,1), Vector3(15,0,0));
newPart->loadModel(path3);
newPart->setColor(color[0],color[1],color[2]);
children->push_back(newPart);
newPart2 = new Transform(Vector3(0,-0.45,0),Vector3(1,1,1),Vector3(60,0,0));
newPart2->loadModel(path3);
newPart2->setColor(color[0],color[1],color[2]);
newPart->children = new vector<Transform*>();
newPart->children->push_back(newPart2);
//Perna terciaria direita
newPart = new Transform(Vector3(0.1, 0.2, 0), Vector3(1,-1,1), Vector3(15,0,0));
newPart->loadModel(path3);
newPart->setColor(color[0],color[1],color[2]);
children->push_back(newPart);
newPart2 = new Transform(Vector3(0,-0.45,0),Vector3(1,1,1),Vector3(60,0,0));
newPart2->loadModel(path3);
newPart2->setColor(color[0],color[1],color[2]);
newPart->children = new vector<Transform*>();
newPart->children->push_back(newPart2);
//Perna traseira esquerda
newPart = new Transform(Vector3(0.25, 0.2, 0), Vector3(1,-1,1), Vector3(15,0,0));
newPart->loadModel(path4);
newPart->setColor(color[0],color[1],color[2]);
children->push_back(newPart);
newPart2 = new Transform(Vector3(0,-0.6,0),Vector3(1,1,1),Vector3(48,0,0));
newPart2->loadModel(path4);
newPart2->setColor(color[0],color[1],color[2]);
newPart->children = new vector<Transform*>();
newPart->children->push_back(newPart2);
}
void Spider3D::setColor(GLfloat r, GLfloat g, GLfloat b) {
color[0] = r;
color[1] = g;
color[2] = b;
int i;
if(children != NULL){
for(i = 1; i < children->size(); i++){
(*children)[i]->setColor(r,g,b);
(*(*children)[i]->children)[0]->setColor(r,g,b);
}
}
}
void Spider3D::turn(float axis) {
axisRot = axis;
}
void Spider3D::advance(float axis) {
axisFow = axis;
}
void Spider3D::update(double delta){
float angleVar = angularSpeed * axisRot * delta/1000;
rotation = rotation + Vector3(0,0,angleVar);
float deltaPos = fowardSpeed * axisFow * delta/1000;
Vector3 deltaPosition = Vector3(cos(rotation.z/rad2Deg)*deltaPos,sin(rotation.z/rad2Deg)*deltaPos,0);
position = position + deltaPosition;
if(axisRot == -1 && currentState == P1){
currentState = P2;
}
else if (axisRot == 1 && currentState == P1) {
currentState = P3;
}
if(axisFow != 0 && currentState == P1) {
currentState = P2;
}
if(axisRot == 0 && axisFow == 0 && currentState != P1){
currentState = P1;
}
updateLegs(delta);
}
void Spider3D::updateLegs(double delta) {
float angularSpeed = M_PI/animationTime;
float risingTime = (3.14)*(animationCounter/animationTime);
int re2,fo2,re3,fo3; //Determinam quais patas estarao levantando
if(axisFow == -1){//Aranha esta indo para frente
re2 = 0; fo2 = 1; re3 = 0; fo3 = 1;
}
else if(axisFow == 1){//Aranha esta indo para tras
re2 = 1; fo2 = 0; re3 = 1; fo3 = 0;
}
else if(axisRot == -1){//Aranha esta virando
re2 = 0; fo2 = 1; re3 = 1; fo3 = 0;
}
else if(axisRot == 1){
re2 = 1; fo2 = 0; re3 = 0; fo3 = 1;
}
else{
re2 = 0; fo2 = 0; re3 = 0; fo3 = 0;
}
if(currentState == P2){
(*children)[1]->rotation = Vector3(-15,0,0)+Vector3(-15*sin(risingTime)*re2,0, +8*cos(angularSpeed * animationCounter));
(*children)[2]->rotation = Vector3(-15,0,0)+Vector3(-15*sin(risingTime)*fo2,0, -4*cos(angularSpeed * animationCounter));
(*children)[3]->rotation = Vector3(-15,0,0)+Vector3(-15*sin(risingTime)*re2,0, +4*cos(angularSpeed * animationCounter));
(*children)[4]->rotation = Vector3(-15,0,0)+Vector3(-15*sin(risingTime)*fo2,0, -8*cos(angularSpeed * animationCounter));
(*children)[5]->rotation = Vector3(15,0,0)+Vector3(15*sin(risingTime)*fo3,0, +8*cos(angularSpeed * animationCounter));
(*children)[6]->rotation = Vector3(15,0,0)+Vector3(15*sin(risingTime)*re3,0, -4*cos(angularSpeed * animationCounter));
(*children)[7]->rotation = Vector3(15,0,0)+Vector3(15*sin(risingTime)*fo3,0, +4*cos(angularSpeed * animationCounter));
(*children)[8]->rotation = Vector3(15,0,0)+Vector3(15*sin(risingTime)*re3,0, -8*cos(angularSpeed * animationCounter));
animationCounter -= delta/1000;
}
else if(currentState == P3){
(*children)[1]->rotation = Vector3(-15,0,0)+Vector3(-15*sin(risingTime)*fo2,0, +8*cos(angularSpeed * animationCounter));
(*children)[2]->rotation = Vector3(-15,0,0)+Vector3(-15*sin(risingTime)*re2,0, -4*cos(angularSpeed * animationCounter));
(*children)[3]->rotation = Vector3(-15,0,0)+Vector3(-15*sin(risingTime)*fo2,0, +4*cos(angularSpeed * animationCounter));
(*children)[4]->rotation = Vector3(-15,0,0)+Vector3(-15*sin(risingTime)*re2,0, -8*cos(angularSpeed * animationCounter));
(*children)[5]->rotation = Vector3(15,0,0)+Vector3(15*sin(risingTime)*re3,0, +8*cos(angularSpeed * animationCounter));
(*children)[6]->rotation = Vector3(15,0,0)+Vector3(15*sin(risingTime)*fo3,0, -4*cos(angularSpeed * animationCounter));
(*children)[7]->rotation = Vector3(15,0,0)+Vector3(15*sin(risingTime)*re3,0, +4*cos(angularSpeed * animationCounter));
(*children)[8]->rotation = Vector3(15,0,0)+Vector3(15*sin(risingTime)*fo3,0, -8*cos(angularSpeed * animationCounter));
animationCounter += delta/1000;
}
else if(currentState == P1){ //Fazer suavização para o ponto de parada da animação
if((animationCounter - (animationTime)/2) > 0){
animationCounter -= delta/1000;
if(((animationCounter - (animationTime)/2) < 0)){
animationCounter = (animationTime)/2;
}
(*children)[1]->rotation = Vector3(-15,0,0)+Vector3(-15*sin(risingTime+1.57)*fo2,0, +8*cos(angularSpeed * animationCounter));
(*children)[2]->rotation = Vector3(-15,0,0)+Vector3(-15*sin(risingTime+1.57)*re2,0, -4*cos(angularSpeed * animationCounter));
(*children)[3]->rotation = Vector3(-15,0,0)+Vector3(-15*sin(risingTime+1.57)*fo2,0, +4*cos(angularSpeed * animationCounter));
(*children)[4]->rotation = Vector3(-15,0,0)+Vector3(-15*sin(risingTime+1.57)*re2,0, -8*cos(angularSpeed * animationCounter));
(*children)[5]->rotation = Vector3(15,0,0)+Vector3(15*sin(risingTime+1.57)*re3,0, +8*cos(angularSpeed * animationCounter));
(*children)[6]->rotation = Vector3(15,0,0)+Vector3(15*sin(risingTime+1.57)*fo3,0, -4*cos(angularSpeed * animationCounter));
(*children)[7]->rotation = Vector3(15,0,0)+Vector3(15*sin(risingTime+1.57)*re3,0, +4*cos(angularSpeed * animationCounter));
(*children)[8]->rotation = Vector3(15,0,0)+Vector3(15*sin(risingTime+1.57)*fo3,0, -8*cos(angularSpeed * animationCounter));
}
else if((animationCounter - (animationTime)/2) < 0) {
animationCounter += delta/1000;
if(((animationCounter - (animationTime)/2) > 0)){
animationCounter = (animationTime)/2;
}
(*children)[1]->rotation = Vector3(-15,0,0)+Vector3(-15*sin(risingTime-1.57)*re2,0, +8*cos(angularSpeed * animationCounter));
(*children)[2]->rotation = Vector3(-15,0,0)+Vector3(-15*sin(risingTime-1.57)*fo2,0, -4*cos(angularSpeed * animationCounter));
(*children)[3]->rotation = Vector3(-15,0,0)+Vector3(-15*sin(risingTime-1.57)*re2,0, +4*cos(angularSpeed * animationCounter));
(*children)[4]->rotation = Vector3(-15,0,0)+Vector3(-15*sin(risingTime-1.57)*fo2,0, -8*cos(angularSpeed * animationCounter));
(*children)[5]->rotation = Vector3(15,0,0)+Vector3(15*sin(risingTime-1.57)*fo3,0, +8*cos(angularSpeed * animationCounter));
(*children)[6]->rotation = Vector3(15,0,0)+Vector3(15*sin(risingTime-1.57)*re3,0, -4*cos(angularSpeed * animationCounter));
(*children)[7]->rotation = Vector3(15,0,0)+Vector3(15*sin(risingTime-1.57)*fo3,0, +4*cos(angularSpeed * animationCounter));
(*children)[8]->rotation = Vector3(15,0,0)+Vector3(15*sin(risingTime-1.57)*re3,0, -8*cos(angularSpeed * animationCounter));
}
}
if(animationCounter < 0){
currentState = P3;
animationCounter = 0;
}
if(animationCounter > animationTime){
currentState = P2;
animationCounter = animationTime;
}
}
| 40.896429 | 128 | 0.643699 | Artars |
38dc60458e367ea135611f1b6160469ccd7f898a | 78,194 | cpp | C++ | SGXDNN/sgxdnn_main.cpp | goten-team/Goten | 690f1429b62c70caec72f4010ee5b7a9786f0d25 | [
"MIT"
] | 17 | 2020-04-28T09:18:28.000Z | 2021-12-28T08:38:00.000Z | SGXDNN/sgxdnn_main.cpp | goten-team/Goten | 690f1429b62c70caec72f4010ee5b7a9786f0d25 | [
"MIT"
] | 2 | 2021-09-26T04:10:51.000Z | 2022-03-31T05:28:25.000Z | SGXDNN/sgxdnn_main.cpp | goten-team/Goten | 690f1429b62c70caec72f4010ee5b7a9786f0d25 | [
"MIT"
] | 2 | 2021-09-26T05:06:17.000Z | 2021-12-14T16:25:06.000Z | #define USE_EIGEN_TENSOR
#ifndef USE_SGX
#define EIGEN_USE_THREADS
#include <malloc.h>
#else
#include "Enclave.h"
#include "sgx_tseal.h"
#include "sgx_trts.h"
#include "sgx_thread.h"
#endif
#include "sgxdnn_main.hpp"
#include "randpool.hpp"
#include "utils.hpp"
#include "common_with_enclaves.h"
#include <unsupported/Eigen/CXX11/Tensor>
#include <Eigen/Core>
#include <Eigen/Dense>
#include <iostream>
#include <memory>
#include <chrono>
#include <string>
#include <cstring>
#include <cmath>
#include <deque>
#include <unordered_map>
#include <cstdlib>
#include <mutex>
#include <stack>
#include "Crypto.h"
#include "../App/common_utils.cpp"
using namespace std;
using std::shared_ptr;
using std::make_shared;
using std::unordered_map;
using std::string;
using defer = shared_ptr<void>;
//using namespace SGXDNN;
int p_int = PrimeLimit;
float p = (float) p_int;
float mid = (float) (p_int / 2);
// some vectorized constants
__m256 p8f = _mm256_set1_ps(p);
__m256 p28f = _mm256_set1_ps(p * 2);
__m256 mid8f = _mm256_set1_ps(mid);
__m256 pmid8f = _mm256_set1_ps(p + mid);
__m256 negmid8f = _mm256_set1_ps(-mid - 1);
__m256 zero8f = _mm256_set1_ps((float)(0));
__m256 inv_shift8f = _mm256_set1_ps((float)(1.0/256));
__m256 six8f = _mm256_set1_ps((float) 6 * 256 * 256);
inline void MoveDown(float* input, float* out, int num_elements) {
for(size_t i = 0; i < num_elements; i += 8) {
const __m256 inp8f = _mm256_load_ps( &input[i] ); // blinded input
const __m256 if_geq = _mm256_cmp_ps(inp8f, mid8f, 0x0d); // unblinded >= mid
// const __m256 if_lt = _mm256_cmp_ps(inp8f, negmid8f, 0x01); // unblinded < -mid
const __m256 then8f = _mm256_sub_ps(inp8f, p8f); // unblinded - p
// const __m256 elif8f = _mm256_add_ps(inp8f, p8f); // unblinded + p
const __m256 res8f = _mm256_blendv_ps(
inp8f,
then8f,
if_geq);
_mm256_stream_ps(&out[i], res8f);
}
}
void ModP(MapMatRowMajor& m) {
DtypeForCpuOp PLimit = static_cast<DtypeForCpuOp>(PrimeLimit);
DtypeForCpuOp invPLimit = static_cast<DtypeForCpuOp>(1) / PrimeLimit;
m.array() = m.array() - (m * invPLimit).array() * PLimit;
}
void ModP(EigenTensor& m) {
DtypeForCpuOp PLimit = static_cast<DtypeForCpuOp>(PrimeLimit);
DtypeForCpuOp invPLimit = static_cast<double>(1) / PrimeLimit;
m -= (m * invPLimit).floor() * PLimit;
// m = (m > m.constant((float) HalfPrime)).select(m - (float) HalfPrime, m);
}
void ModP(MapEigenTensor& m) {
DtypeForCpuOp PLimit = static_cast<DtypeForCpuOp>(PrimeLimit);
DtypeForCpuOp invPLimit = static_cast<double>(1) / PrimeLimit;
m -= (m * invPLimit).floor() * PLimit;
// m = (m > m.constant((float) HalfPrime)).select(m - (float) HalfPrime, m);
}
class ChunkPool {
public:
ChunkPool(int size_pool_, int num_byte_chunk_) :
size_pool(size_pool_),
num_byte_chunk(num_byte_chunk_)
{
for (int i = 0; i < size_pool; i++) {
void* enc_chunk = (void*)memalign(64, num_byte_chunk);
chunks.push_back(enc_chunk);
chunk_ids.push(i);
}
}
int get_chunk_id() {
std::unique_lock<std::mutex> lock(stack_mutex);
if (chunk_ids.empty()) {
printf("Running out of chunks\n");
throw std::invalid_argument("Running out of chunks");
}
int res;
res = chunk_ids.top();
chunk_ids.pop();
return res;
}
void return_chunk_id(int id) {
std::unique_lock<std::mutex> lock(stack_mutex);
chunk_ids.push(id);
}
std::vector<void*> chunks;
private:
int size_pool;
int num_byte_chunk;
std::mutex stack_mutex;
std::stack<int> chunk_ids;
};
class StoreChunkPool {
public:
static shared_ptr<ChunkPool> GetChunkPool() {
static StoreChunkPool instance;
return instance.chunk_pool;
}
StoreChunkPool(StoreChunkPool const&) = delete;
void operator=(StoreChunkPool const&) = delete;
private:
StoreChunkPool() {
chunk_pool = make_shared<ChunkPool>(THREAD_POOL_SIZE * 2, STORE_CHUNK_ELEM * sizeof(DtypeForCpuOp));
}
shared_ptr<ChunkPool> chunk_pool;
};
template<typename T>
class ChunkGuard {
public:
ChunkGuard<T>(shared_ptr<ChunkPool> chunk_pool_, T*& pointer) :
chunk_pool(chunk_pool_)
{
id = chunk_pool->get_chunk_id();
pointer = (T*) chunk_pool->chunks[id];
}
~ChunkGuard<T>() {
chunk_pool->return_chunk_id(id);
}
private:
int id;
shared_ptr<ChunkPool> chunk_pool;
};
class TrustedChunkManager {
public:
static TrustedChunkManager& getInstance() {
static TrustedChunkManager instance;
return instance;
}
TrustedChunkManager(TrustedChunkManager const&) = delete;
void operator=(TrustedChunkManager const&) = delete;
IdT GetNewId() {
return id_counter++;
}
const int start_idx = 1000;
void StoreChunk(IdT id, void* src_chunk, int num_byte) {
int num_byte_enc_chunk = CalcEncDataSize(0, num_byte);
SgxEncT* enc_chunk = (SgxEncT*) get_untrusted_mem(id, num_byte_enc_chunk);
DtypeForCpuOp* src_float = (DtypeForCpuOp*) src_chunk;
encrypt((uint8_t *) src_chunk,
num_byte,
(uint8_t *) (&(enc_chunk->payload)),
(sgx_aes_gcm_128bit_iv_t *)(&(enc_chunk->reserved)),
(sgx_aes_gcm_128bit_tag_t *)(&(enc_chunk->payload_tag)));
DtypeForCpuOp* dst_chunk = (DtypeForCpuOp*)malloc(num_byte);
GetChunk(id, dst_chunk, num_byte);
uint8_t* blind_chunk;
ChunkGuard<uint8_t> guard(blind_chunks, blind_chunk);
decrypt((uint8_t *) (&(enc_chunk->payload)),
num_byte,
(uint8_t *) dst_chunk,
(sgx_aes_gcm_128bit_iv_t *)(&(enc_chunk->reserved)),
(sgx_aes_gcm_128bit_tag_t *)(&(enc_chunk->payload_tag)),
(uint8_t *) blind_chunk);
src_float = (DtypeForCpuOp*) dst_chunk;
free(dst_chunk);
}
void GetChunk(IdT id, void* dst_chunk, int num_byte) {
int num_byte_enc_chunk = CalcEncDataSize(0, num_byte);
uint8_t* blind_chunk;
ChunkGuard<uint8_t> guard(blind_chunks, blind_chunk);
SgxEncT* enc_chunk = (SgxEncT*) get_untrusted_mem(id, num_byte_enc_chunk);
decrypt((uint8_t *) (&(enc_chunk->payload)),
num_byte,
(uint8_t *) dst_chunk,
(sgx_aes_gcm_128bit_iv_t *)(&(enc_chunk->reserved)),
(sgx_aes_gcm_128bit_tag_t *)(&(enc_chunk->payload_tag)),
(uint8_t *) blind_chunk);
DtypeForCpuOp* src_float = (DtypeForCpuOp*) dst_chunk;
}
private:
TrustedChunkManager() {
max_num_byte_plain_chunk = STORE_CHUNK_ELEM * sizeof(DtypeForCpuOp);
max_num_byte_enc_chunk = CalcEncDataSize(0, max_num_byte_plain_chunk);
blind_chunks = make_shared<ChunkPool>(THREAD_POOL_SIZE, max_num_byte_plain_chunk);
}
void* get_untrusted_mem(IdT id, int num_byte) {
void* dst_buf;
bool is_diff_size = false;
auto it = untrusted_mem_holder.begin();
auto end = untrusted_mem_holder.end();
int prev_num_byte;
{
std::unique_lock <std::mutex> lock(address_mutex);
it = untrusted_mem_holder.find(id);
end = untrusted_mem_holder.end();
}
if (it == end) {
allocate_in_untrusted(&dst_buf, num_byte);
{
std::unique_lock<std::mutex> lock(address_mutex);
untrusted_mem_holder[id] = std::make_pair(dst_buf, num_byte);
}
} else {
std::unique_lock<std::mutex> lock(address_mutex);
std::tie(dst_buf, prev_num_byte) = untrusted_mem_holder[id];
if (prev_num_byte != num_byte) {
is_diff_size = true;
}
}
if (is_diff_size) {
printf("id=%u\n",id);
printf("A id has assigned with multiple size: original: %d, now: %d\n", prev_num_byte, num_byte);
throw std::invalid_argument("A id has assigned with multiple size.");
}
return dst_buf;
}
const int size_chunk_pool = THREAD_POOL_SIZE;
int max_num_byte_plain_chunk;
int max_num_byte_enc_chunk;
std::atomic<int> id_counter;
std::mutex address_mutex;
std::shared_ptr<ChunkPool> blind_chunks;
std::unordered_map<int, std::pair<void*, int>> untrusted_mem_holder;
};
template <typename Func>
void run_all_chunks(Func chunk_op, int num_elem_in_chunk, int num_elem) {
int start_chunk;
for (start_chunk = 0; start_chunk + num_elem_in_chunk <= num_elem; start_chunk += num_elem_in_chunk) {
chunk_op(start_chunk, num_elem_in_chunk);
}
if (start_chunk < num_elem) chunk_op(start_chunk, num_elem - start_chunk);
}
template <typename Func>
void run_all_chunks_for_maxpool(Func chunk_op, size_t num_elem_in_chunk, size_t num_elem, size_t num_elem_out, size_t inputhw, size_t outputhw) {
size_t start_chunk;
for (start_chunk = 0; start_chunk + num_elem_in_chunk <= num_elem; start_chunk += num_elem_in_chunk) {
chunk_op(start_chunk, num_elem_in_chunk, num_elem_out);
}
size_t remain_size = num_elem - start_chunk;
if (start_chunk < num_elem) chunk_op(start_chunk, remain_size, (remain_size/inputhw)*outputhw);
}
class SecretTen {
public:
SecretTen() {}
SecretTen(IdT TenId_, DimsT* Dims_) : TenId(TenId_), Dims(*Dims_) { Init(); }
~SecretTen() {
for (auto& it: PrgStateHolder) free(it.second);
}
int GetNumElem() { return Dims.dim0 * Dims.dim1 * Dims.dim2 * Dims.dim3; }
int GetSizeInByte() { return GetNumElem() * sizeof(DtypeForCpuOp); }
void Init() {
DtypeForCpuOp* store_chunk;
ChunkGuard<DtypeForCpuOp> guard(StoreChunkPool::GetChunkPool(), store_chunk);
auto& chunk_manager = TrustedChunkManager::getInstance();
auto chunk_op = [&](int start, int num_elem_in_op) {
int chunk_id = chunk_manager.GetNewId();
ChunkIds.push_back(chunk_id);
chunk_manager.StoreChunk(chunk_id, store_chunk, num_elem_in_op * sizeof(DtypeForCpuOp));
};
run_all_chunks(chunk_op, STORE_CHUNK_ELEM, GetNumElem());
}
int GetChunkId(int start) {
if (start >= GetNumElem()) {
printf("The start exceed the size of the tensor.\n");
throw std::invalid_argument("The start exceed the size of the tensor.");
}
return ChunkIds[start / STORE_CHUNK_ELEM];
}
void GetStoreChunk(int start, DtypeForCpuOp* store_chunk, int num_byte) {
auto& chunk_manager = TrustedChunkManager::getInstance();
int chunk_id = GetChunkId(start);
chunk_manager.StoreChunk(chunk_id, store_chunk, num_byte * sizeof(DtypeForCpuOp));
}
void SetTen(DtypeForCpuOp* Arr) {
auto& chunk_manager = TrustedChunkManager::getInstance();
auto chunk_op = [&](int start, int num_elem_in_op) {
int chunk_id = GetChunkId(start);
DtypeForCpuOp* src_arr = Arr + start;
chunk_manager.StoreChunk(chunk_id, src_arr, num_elem_in_op * sizeof(DtypeForCpuOp));
};
run_all_chunks(chunk_op, STORE_CHUNK_ELEM, GetNumElem());
}
void GetTen(DtypeForCpuOp* Arr) {
auto& chunk_manager = TrustedChunkManager::getInstance();
auto chunk_op = [&](int start, int num_elem_in_op) {
int chunk_id = GetChunkId(start);
DtypeForCpuOp* dst_arr = Arr + start;
chunk_manager.GetChunk(chunk_id, dst_arr, num_elem_in_op * sizeof(DtypeForCpuOp));
};
run_all_chunks(chunk_op, STORE_CHUNK_ELEM, GetNumElem());
}
void SetSeed(uint64_t RawSeed) {
SeedT seed;
memset(seed, 0, sizeof(SeedT));
auto TmpRawSeed = RawSeed;
for (int i = 0; TmpRawSeed > 0; i++) {
seed[i] = (uint8_t) (TmpRawSeed & ((1 << 9) - 1));
TmpRawSeed >>= 8;
}
PrgStateHolder[RawSeed] = (aes_stream_state*)memalign(16, sizeof(aes_stream_state));
InitPrgWithSeed(PrgStateHolder[RawSeed], seed);
}
void GetRandom(DtypeForCpuOp* DstArr, uint64_t RawSeed) {
auto PrgState = PrgStateHolder[RawSeed];
DtypeForCpuOp PLimit = static_cast<DtypeForCpuOp>(PrimeLimit);
DtypeForCpuOp invPLimit = static_cast<double>(1) / PrimeLimit;
auto chunk_op = [&](int start, int num_elem_in_op) {
float* input = DstArr + start;
get_r(PrgState, (uint8_t*) input, num_elem_in_op * sizeof(DtypeForCpuOp), 9);
for(size_t j = 0; j < num_elem_in_op; j++) {
input[j] -= floor(input[j] * invPLimit) * PLimit;
input[j] = (input[j] >= mid) ? (input[j] - p) : input[j];
}
};
run_all_chunks(chunk_op, WORK_CHUNK_ELEM, GetNumElem());
}
void GetShare(DtypeForCpuOp* DstArr, uint64_t RawSeed) {
auto PrgState = PrgStateHolder[RawSeed];
const DtypeForCpuOp PLimit = static_cast<DtypeForCpuOp>(PrimeLimit);
const DtypeForCpuOp invPLimit = static_cast<double>(1) / PrimeLimit;
auto& chunk_manager = TrustedChunkManager::getInstance();
// DtypeForCpuOp* store_chunk;
// ChunkGuard<DtypeForCpuOp> guard(StoreChunkPool::GetChunkPool(), store_chunk);
DtypeForCpuOp* store_chunk = (DtypeForCpuOp*)memalign(64, STORE_CHUNK_ELEM * sizeof(DtypeForCpuOp));
auto store_chunk_op = [&](int start_store_chunk, int num_elem_in_store_chunk) {
chunk_manager.GetChunk(GetChunkId(start_store_chunk), store_chunk, num_elem_in_store_chunk * sizeof(DtypeForCpuOp));
auto chunk_op = [&](int start, int num_elem_in_op) {
float *input = DstArr + start_store_chunk + start;
float *original = store_chunk + start;
get_r(PrgState, (uint8_t *) input, num_elem_in_op * sizeof(DtypeForCpuOp), 9);
for (size_t j = 0; j < num_elem_in_op; j++) {
input[j] = original[j] - input[j];
input[j] -= floor(input[j] * invPLimit) * PLimit;
input[j] = (input[j] >= mid) ? (input[j] - p) : input[j];
}
};
run_all_chunks(chunk_op, WORK_CHUNK_ELEM, num_elem_in_store_chunk);
};
run_all_chunks(store_chunk_op, STORE_CHUNK_ELEM, GetNumElem());
free(store_chunk);
}
IdT TenId;
DimsT Dims;
vector<int> ChunkIds;
unordered_map<uint64_t, aes_stream_state*> PrgStateHolder;
};
unordered_map<IdT, shared_ptr<SecretTen>> SecretTenHolder;
unordered_map<IdT, shared_ptr<EigenTensor>> TensorHolder;
shared_ptr<SecretTen> GetTenById(IdT TenId) {
return SecretTenHolder[TenId];
}
unordered_map<uint64_t, DtypeForCpuOp> quantize_exp;
static inline float uint32_to_float(uint32_t x) {
const union { uint32_t i; float d; } u = { .i = UINT32_C(0x7F) << 23 | x >> 9 };
return u.d - 1.0f;
}
static inline float float_to_uniform(uint32_t x) {
const union { uint32_t i; float d; } u = { .i = (((UINT32_C(0x7F) << 23) | x) << 2) >> 2 };
return u.d - 1.0f;
}
// http://prng.di.unimi.it/
class Xoshiro256 {
public:
Xoshiro256() {}
Xoshiro256(uint64_t raw_seed) {
set_seed(raw_seed);
}
void set_seed(uint64_t raw_seed) {
s[0] = raw_seed;
}
static inline uint64_t rotl(const uint64_t x, int k) {
return (x << k) | (x >> (64 - k));
}
uint64_t next(void) {
const uint64_t result = rotl(s[0] + s[3], 23) + s[0];
const uint64_t t = s[1] << 17;
s[2] ^= s[0];
s[3] ^= s[1];
s[1] ^= s[2];
s[0] ^= s[3];
s[2] ^= t;
s[3] = rotl(s[3], 45);
return result;
}
void rand_like(float* arr, uint64_t n_elem) {
if (n_elem % 2 != 0) {
printf("n_elem has to be even.\n");
throw string("n_elem has to be even.");
}
for (int i = 0; i < n_elem; i+=2) {
const uint64_t rnd = next();
const uint32_t b = rnd & ((((uint64_t) 1) << 32) - 1);
const uint32_t a = rnd >> 32;
arr[i] = uint32_to_float(a);
arr[i+1] = uint32_to_float(b);
}
}
uint64_t s[4] = {};
};
class Xoshiro128 {
public:
Xoshiro128() {}
Xoshiro128(uint64_t raw_seed) {
set_seed(raw_seed);
}
void set_seed(uint64_t raw_seed) {
s[0] = raw_seed;
}
static inline uint64_t rotl(const uint64_t x, int k) {
return (x << k) | (x >> (64 - k));
}
uint64_t next(void) {
const uint64_t s0 = s[0];
uint64_t s1 = s[1];
const uint64_t result = rotl(s0 + s1, 17) + s0;
s1 ^= s0;
s[0] = rotl(s0, 49) ^ s1 ^ (s1 << 21); // a, b
s[1] = rotl(s1, 28); // c
return result;
}
uint64_t s[2] = {};
};
unordered_map<uint64_t, shared_ptr<Xoshiro256>> fast_rngs;
//unordered_map<uint64_t, shared_ptr<Xoshiro128>> fast_rngs;
shared_ptr<Xoshiro256> get_fast_rng(uint64_t tag) {
if (fast_rngs.find(tag) == fast_rngs.end()) {
fast_rngs[tag] = make_shared<Xoshiro256>(tag);
}
return fast_rngs[tag];
}
void quantize_stochastic(shared_ptr<SecretTen> src_ten, shared_ptr<SecretTen> dst_ten, uint64_t quantize_tag) {
const int bits = 8;
const int ebit = 8;
const DtypeForCpuOp lower_limit = -pow(2, (bits - 1));
const DtypeForCpuOp upper_limit = pow(2, (bits - 1)) - 1;
auto& chunk_manager = TrustedChunkManager::getInstance();
DtypeForCpuOp *store_chunk, *dst_store_chunk;
ChunkGuard<DtypeForCpuOp> guard(StoreChunkPool::GetChunkPool(), store_chunk);
ChunkGuard<DtypeForCpuOp> dst_guard(StoreChunkPool::GetChunkPool(), dst_store_chunk);
//DtypeForCpuOp max_entry = 0;
const __m256 neg8f = _mm256_set1_ps(-0.0f);
__m256 tmp8f = _mm256_set1_ps(0.0f);
auto get_max_chunk_op = [&](int start_store_chunk, int num_elem_in_store_chunk) {
int chunk_id = src_ten->GetChunkId(start_store_chunk);
chunk_manager.GetChunk(chunk_id, store_chunk, num_elem_in_store_chunk * sizeof(DtypeForCpuOp));
for(uint64_t i=0;i<num_elem_in_store_chunk;i+=8){
const __m256 inp8f = _mm256_load_ps(&store_chunk[i]);
const __m256 abs8f = _mm256_andnot_ps(neg8f, inp8f);
const __m256 if_eq = _mm256_cmp_ps(inp8f, tmp8f, 0x0e);
tmp8f = _mm256_blendv_ps(tmp8f, inp8f, if_eq);
}
//MapEigenVector src_vecmap(store_chunk, num_elem_in_store_chunk);
//max_entry = std::max(max_entry, src_vecmap.cwiseAbs().maxCoeff());
};
run_all_chunks(get_max_chunk_op, STORE_CHUNK_ELEM, src_ten->GetNumElem());
_mm256_stream_ps(dst_store_chunk, tmp8f);
for(int i=4;i>0;i=i>>1){
copy(dst_store_chunk+i,dst_store_chunk+2*i,dst_store_chunk+8);
const __m256 inp8f = _mm256_load_ps(dst_store_chunk);
const __m256 inp8f2 = _mm256_load_ps(&dst_store_chunk[8]);
const __m256 if_eq = _mm256_cmp_ps(inp8f, inp8f2, 0x0e);
const __m256 res8f = _mm256_blendv_ps(inp8f2, inp8f, if_eq);
_mm256_stream_ps(dst_store_chunk, res8f);
}
if(1){
dst_store_chunk[0] = (dst_store_chunk[0] == 0) ? 0: floor(log2(dst_store_chunk[0]));
const __m256 inp8f = _mm256_load_ps(dst_store_chunk);
//tmp8f = _mm256_set1_ps(pow(-2, (ebit - 1)));
//__m256 if_gt = _mm256_cmp_ps(inp8f, tmp8f, 0x0e);
//__m256 res8f = _mm256_blendv_ps(tmp8f, inp8f, if_gt);
tmp8f = _mm256_set1_ps(pow(2, (ebit - 1)) - 1);
__m256 if_gt = _mm256_cmp_ps(inp8f, tmp8f, 0x0e);
tmp8f = _mm256_blendv_ps(inp8f, tmp8f, if_gt);
_mm256_stream_ps(dst_store_chunk, tmp8f);
}
DtypeForCpuOp exp = dst_store_chunk[0];
// DtypeForCpuOp exp = (max_entry == 0) ? 0 : floor(log2(max_entry));
// exp = std::min(std::max(exp, (DtypeForCpuOp) pow(-2, (ebit - 1))), (DtypeForCpuOp) pow(2, (ebit - 1) - 1));
quantize_exp[quantize_tag] = exp;
DtypeForCpuOp enlarge_factor = pow(2, -exp + (bits - 2));
auto& xor_rnd = *get_fast_rng(quantize_tag);
auto store_chunk_op = [&](int start_store_chunk, int num_elem_in_store_chunk) {
chunk_manager.GetChunk(src_ten->GetChunkId(start_store_chunk), store_chunk, num_elem_in_store_chunk * sizeof(DtypeForCpuOp));
chunk_manager.GetChunk(dst_ten->GetChunkId(start_store_chunk), dst_store_chunk, num_elem_in_store_chunk * sizeof(DtypeForCpuOp));
auto chunk_op = [&](int start, int num_elem_in_op) {
float *input = store_chunk + start;
float *output = dst_store_chunk + start;
xor_rnd.rand_like(output, num_elem_in_op);
for(uint64_t i=0;i<num_elem_in_op;i+=8){
tmp8f = _mm256_set1_ps(enlarge_factor);
const __m256 inp8f = _mm256_load_ps(&input[i]);
const __m256 out8f = _mm256_load_ps(&output[i]);
const __m256 mul8f = _mm256_mul_ps(inp8f, tmp8f);
const __m256 add8f = _mm256_add_ps(mul8f, out8f);
const __m256 flo8f = _mm256_floor_ps(add8f);
tmp8f = _mm256_set1_ps(lower_limit);
__m256 if_gt = _mm256_cmp_ps(flo8f, tmp8f, 0x0e);
__m256 res8f = _mm256_blendv_ps(tmp8f, flo8f, if_gt);
tmp8f = _mm256_set1_ps(upper_limit);
if_gt = _mm256_cmp_ps(res8f, tmp8f, 0x0e);
res8f = _mm256_blendv_ps(res8f, tmp8f, if_gt);
_mm256_stream_ps(&output[i], res8f);
}
//MapEigenTensor in_map = MapEigenTensor(input, 1, 1, 1, num_elem_in_op);
//MapEigenTensor out_map = MapEigenTensor(output, 1, 1, 1, num_elem_in_op);
//out_map = (in_map * enlarge_factor + out_map).floor().cwiseMax(lower_limit).cwiseMin(upper_limit);
};
run_all_chunks(chunk_op, WORK_CHUNK_ELEM, num_elem_in_store_chunk);
//add
chunk_manager.StoreChunk(dst_ten->GetChunkId(start_store_chunk), dst_store_chunk, num_elem_in_store_chunk * sizeof(DtypeForCpuOp));
//add
};
run_all_chunks(store_chunk_op, STORE_CHUNK_ELEM, src_ten->GetNumElem());
}
void dequantize_stochastic(shared_ptr<SecretTen> src_ten, shared_ptr<SecretTen> dst_ten,
uint64_t x_tag, uint64_t y_tag) {
const int bits = 8;
DtypeForCpuOp x_exp = quantize_exp[x_tag];
DtypeForCpuOp y_exp = quantize_exp[y_tag];
auto& chunk_manager = TrustedChunkManager::getInstance();
DtypeForCpuOp *store_chunk, *dst_store_chunk;
ChunkGuard<DtypeForCpuOp> guard(StoreChunkPool::GetChunkPool(), store_chunk);
ChunkGuard<DtypeForCpuOp> dst_guard(StoreChunkPool::GetChunkPool(), dst_store_chunk);
auto store_chunk_op = [&](int start_store_chunk, int num_elem_in_store_chunk) {
chunk_manager.GetChunk(src_ten->GetChunkId(start_store_chunk), store_chunk, num_elem_in_store_chunk * sizeof(DtypeForCpuOp));
chunk_manager.GetChunk(dst_ten->GetChunkId(start_store_chunk), dst_store_chunk, num_elem_in_store_chunk * sizeof(DtypeForCpuOp));
MapEigenTensor src_map = MapEigenTensor(store_chunk, 1, 1, 1, num_elem_in_store_chunk);
MapEigenTensor dst_map = MapEigenTensor(dst_store_chunk, 1, 1, 1, num_elem_in_store_chunk);
DtypeForCpuOp shrink_factor = pow(2, x_exp - (bits - 2) + y_exp - (bits - 2));
dst_map = src_map * shrink_factor;
};
run_all_chunks(store_chunk_op, STORE_CHUNK_ELEM, src_ten->GetNumElem());
}
DtypeForCpuOp* get_small_chunk(
shared_ptr<SecretTen> tensor,
vector<std::pair<shared_ptr<SecretTen>, DtypeForCpuOp*>>& small_chunks) {
int size_in_byte = tensor->GetSizeInByte();
DtypeForCpuOp* arr = (DtypeForCpuOp*) memalign(64, size_in_byte);
auto& chunk_manager = TrustedChunkManager::getInstance();
chunk_manager.GetChunk(tensor->GetChunkId(0), arr, size_in_byte);
small_chunks.emplace_back(tensor, arr);
return arr;
}
void store_small_chunks(vector<std::pair<shared_ptr<SecretTen>, DtypeForCpuOp*>>& small_chunks) {
for (auto& x : small_chunks) {
auto tensor = x.first;
auto arr = x.second;
auto& chunk_manager = TrustedChunkManager::getInstance();
int size_in_byte = tensor->GetSizeInByte();
chunk_manager.StoreChunk(tensor->GetChunkId(0), arr, size_in_byte);
free(arr);
}
}
class BatchnormBuffer {
public:
BatchnormBuffer(){}
BatchnormBuffer(IdT FunId_) : FunId(FunId_) {
NumBatchesTrackedArr = 0;
BackwardState = false;
}
~BatchnormBuffer() = default;
void init(
IdT input, IdT output, IdT gamma, IdT beta,
IdT der_input, IdT der_output, IdT der_gamma, IdT der_beta,
IdT run_mean, IdT run_var, IdT cur_mean, IdT cur_var,
IdT mu,
uint32_t batch_, uint32_t channel_, uint32_t height_, uint32_t width_,
int affine_, int is_cumulative_, float momentum_, float epsilon_) {
input_tensor = GetTenById(input);
output_tensor = GetTenById(output);
der_input_tensor = GetTenById(der_input);
der_output_tensor = GetTenById(der_output);
mu_tensor = GetTenById(mu);
// size = num_channel * sizeof(byte)
gamma_tensor = GetTenById(gamma);
beta_tensor = GetTenById(beta);
der_gamma_tensor = GetTenById(der_gamma);
der_beta_tensor = GetTenById(der_beta);
run_mean_tensor = GetTenById(run_mean);
run_var_tensor = GetTenById(run_var);
cur_mean_tensor = GetTenById(cur_mean);
cur_var_tensor = GetTenById(cur_var);
batch = batch_;
channel = channel_;
height = height_;
width = width_;
Affine = affine_;
momentum = momentum_;
epsilon = epsilon_;
is_cumulative = is_cumulative_;
num_rows = channel * height * width;
num_rows_in_channel = height * width;
total_n = height * width * batch;
default_num_batches_per_chunk = std::min(STORE_CHUNK_ELEM, input_tensor->GetNumElem()) / num_rows;
if (STORE_CHUNK_ELEM % num_rows != 0) {
printf("STORE_CHUNK_ELEM % num_rows != 0\n");
return;
}
}
DtypeForCpuOp get_fraction_bag(int num_elem_in_chunk) {
int batch_in_chunk = num_elem_in_chunk / num_rows;
return ((DtypeForCpuOp) batch_in_chunk / batch);
}
int get_num_batches_per_chunk(int num_elem_in_chunk) {
return num_elem_in_chunk / num_rows;
}
void forward(int training) {
Training = training;
vector<std::pair<shared_ptr<SecretTen>, DtypeForCpuOp*>> small_chunks;
auto& chunk_manager = TrustedChunkManager::getInstance();
DtypeForCpuOp *data_chunk, *mu_chunk;
ChunkGuard<DtypeForCpuOp> data_guard(StoreChunkPool::GetChunkPool(), data_chunk);
ChunkGuard<DtypeForCpuOp> mu_guard(StoreChunkPool::GetChunkPool(), mu_chunk);
EigenMatrixMap data_mat(data_chunk, num_rows, default_num_batches_per_chunk);
EigenMatrixMap mu_mat(mu_chunk, num_rows, default_num_batches_per_chunk);
DtypeForCpuOp *gamma_chunk = get_small_chunk(gamma_tensor, small_chunks);
DtypeForCpuOp *beta_chunk = get_small_chunk(beta_tensor, small_chunks);
DtypeForCpuOp *run_mean_chunk = get_small_chunk(run_mean_tensor, small_chunks);
DtypeForCpuOp *run_var_chunk = get_small_chunk(run_var_tensor, small_chunks);
DtypeForCpuOp *cur_mean_chunk = get_small_chunk(cur_mean_tensor, small_chunks);
DtypeForCpuOp *cur_var_chunk = get_small_chunk(cur_var_tensor, small_chunks);
if (training) {
NumBatchesTrackedArr += 1;
const DtypeForCpuOp chosen_momentum = (is_cumulative) ? (1 / (DtypeForCpuOp) NumBatchesTrackedArr) : momentum;
fill(cur_mean_chunk, cur_mean_chunk + channel, 0);
fill(cur_var_chunk, cur_var_chunk + channel, epsilon);
run_all_chunks([&](int start_store_chunk, int num_elem_in_store_chunk) {
int num_batches_per_chunk = get_num_batches_per_chunk(num_elem_in_store_chunk);
int chunk_size_in_byte = num_elem_in_store_chunk * sizeof(DtypeForCpuOp);
chunk_manager.GetChunk(input_tensor->GetChunkId(start_store_chunk), data_chunk, chunk_size_in_byte);
for(uint32_t i = 0; i < channel; i++) {
auto data_block = data_mat.block(i * num_rows_in_channel, 0, num_rows_in_channel, num_batches_per_chunk);
cur_mean_chunk[i] += data_block.mean() * get_fraction_bag(num_elem_in_store_chunk);
}
}, STORE_CHUNK_ELEM, input_tensor->GetNumElem());
run_all_chunks([&](int start_store_chunk, int num_elem_in_store_chunk) {
int num_batches_per_chunk = get_num_batches_per_chunk(num_elem_in_store_chunk);
int chunk_size_in_byte = num_elem_in_store_chunk * sizeof(DtypeForCpuOp);
chunk_manager.GetChunk(input_tensor->GetChunkId(start_store_chunk), data_chunk, chunk_size_in_byte);
for(uint32_t i = 0; i < channel; i++) {
auto data_block = data_mat.block(i * num_rows_in_channel, 0, num_rows_in_channel, num_batches_per_chunk);
auto mu_block = mu_mat.block(i * num_rows_in_channel, 0, num_rows_in_channel, num_batches_per_chunk);
mu_block = data_block.array() - cur_mean_chunk[i];
cur_var_chunk[i] += (mu_block).cwiseProduct(mu_block).mean() * get_fraction_bag(num_elem_in_store_chunk);
}
chunk_manager.StoreChunk(mu_tensor->GetChunkId(start_store_chunk), mu_chunk, chunk_size_in_byte);
}, STORE_CHUNK_ELEM, input_tensor->GetNumElem());
run_all_chunks([&](int start_store_chunk, int num_elem_in_store_chunk) {
int num_batches_per_chunk = get_num_batches_per_chunk(num_elem_in_store_chunk);
int chunk_size_in_byte = num_elem_in_store_chunk * sizeof(DtypeForCpuOp);
chunk_manager.GetChunk(mu_tensor->GetChunkId(start_store_chunk), data_chunk, chunk_size_in_byte);
for(uint32_t i = 0; i < channel; i++) {
auto data_block = data_mat.block(i * num_rows_in_channel, 0, num_rows_in_channel, num_batches_per_chunk);
if (Affine) {
data_block = (data_block.array() / sqrt(cur_var_chunk[i])) * gamma_chunk[i] + beta_chunk[i];
} else {
data_block = data_block / sqrt(cur_var_chunk[i]);
}
}
chunk_manager.StoreChunk(output_tensor->GetChunkId(start_store_chunk), data_chunk, chunk_size_in_byte);
}, STORE_CHUNK_ELEM, input_tensor->GetNumElem());
for (int i = 0; i < channel; i++) {
run_mean_chunk[i] = (cur_mean_chunk[i] - run_mean_chunk[i]) * chosen_momentum + run_mean_chunk[i];
run_var_chunk[i] = (cur_var_chunk[i] - run_var_chunk[i]) * chosen_momentum + run_var_chunk[i];
}
} else {
run_all_chunks([&](int start_store_chunk, int num_elem_in_store_chunk) {
int num_batches_per_chunk = get_num_batches_per_chunk(num_elem_in_store_chunk);
int chunk_size_in_byte = num_elem_in_store_chunk * sizeof(DtypeForCpuOp);
chunk_manager.GetChunk(input_tensor->GetChunkId(start_store_chunk), data_chunk, chunk_size_in_byte);
for(uint32_t i = 0; i < channel; i++) {
auto data_block = data_mat.block(i * num_rows_in_channel, 0, num_rows_in_channel, num_batches_per_chunk);
data_block = data_block.array() - run_mean_chunk[i];
if (Affine) {
data_block = (data_block.array() / sqrt(run_var_chunk[i])) * gamma_chunk[i] + beta_chunk[i];
} else {
data_block = data_block / sqrt(run_var_chunk[i]);
}
}
chunk_manager.StoreChunk(output_tensor->GetChunkId(start_store_chunk), data_chunk, chunk_size_in_byte);
}, STORE_CHUNK_ELEM, input_tensor->GetNumElem());
}
store_small_chunks(small_chunks);
BackwardState = true;
}
void backward() {
if (!BackwardState) {
printf("Forward Batch Normalization has not been done.\n");
return;
}
vector<std::pair<shared_ptr<SecretTen>, DtypeForCpuOp*>> small_chunks;
auto& chunk_manager = TrustedChunkManager::getInstance();
DtypeForCpuOp *data_chunk, *mu_chunk;
ChunkGuard<DtypeForCpuOp> data_guard(StoreChunkPool::GetChunkPool(), data_chunk);
ChunkGuard<DtypeForCpuOp> mu_guard(StoreChunkPool::GetChunkPool(), mu_chunk);
EigenMatrixMap data_mat(data_chunk, num_rows, default_num_batches_per_chunk);
EigenMatrixMap mu_mat(mu_chunk, num_rows, default_num_batches_per_chunk);
DtypeForCpuOp *gamma_chunk = get_small_chunk(gamma_tensor, small_chunks);
DtypeForCpuOp *beta_chunk = get_small_chunk(beta_tensor, small_chunks);
DtypeForCpuOp *der_gamma_chunk = get_small_chunk(der_gamma_tensor, small_chunks);
DtypeForCpuOp *der_beta_chunk = get_small_chunk(der_beta_tensor, small_chunks);
DtypeForCpuOp *run_mean_chunk = get_small_chunk(run_mean_tensor, small_chunks);
DtypeForCpuOp *run_var_chunk = get_small_chunk(run_var_tensor, small_chunks);
DtypeForCpuOp *cur_mean_chunk = get_small_chunk(cur_mean_tensor, small_chunks);
DtypeForCpuOp *cur_var_chunk = get_small_chunk(cur_var_tensor, small_chunks);
fill(der_beta_chunk, der_beta_chunk + channel, 0);
fill(der_gamma_chunk, der_gamma_chunk + channel, 0);
run_all_chunks([&](int start_store_chunk, int num_elem_in_store_chunk) {
int num_batches_per_chunk = get_num_batches_per_chunk(num_elem_in_store_chunk);
int chunk_size_in_byte = num_elem_in_store_chunk * sizeof(DtypeForCpuOp);
chunk_manager.GetChunk(der_output_tensor->GetChunkId(start_store_chunk), data_chunk, chunk_size_in_byte);
chunk_manager.GetChunk(mu_tensor->GetChunkId(start_store_chunk), mu_chunk, chunk_size_in_byte);
for(uint32_t i = 0; i < channel; i++) {
auto data_block = data_mat.block(i * num_rows_in_channel, 0, num_rows_in_channel, num_batches_per_chunk);
auto mu_block = mu_mat.block(i * num_rows_in_channel, 0, num_rows_in_channel, num_batches_per_chunk);
DtypeForCpuOp variance = (Training) ? cur_var_chunk[i] : run_var_chunk[i];
der_gamma_chunk[i] += mu_block.cwiseProduct(data_block).sum() / sqrt(variance);
der_beta_chunk[i] += data_block.sum();
}
}, STORE_CHUNK_ELEM, input_tensor->GetNumElem());
run_all_chunks([&](int start_store_chunk, int num_elem_in_store_chunk) {
int num_batches_per_chunk = get_num_batches_per_chunk(num_elem_in_store_chunk);
int chunk_size_in_byte = num_elem_in_store_chunk * sizeof(DtypeForCpuOp);
chunk_manager.GetChunk(der_output_tensor->GetChunkId(start_store_chunk), data_chunk, chunk_size_in_byte);
chunk_manager.GetChunk(mu_tensor->GetChunkId(start_store_chunk), mu_chunk, chunk_size_in_byte);
for(uint32_t i = 0; i < channel; i++) {
auto data_block = data_mat.block(i * num_rows_in_channel, 0, num_rows_in_channel, num_batches_per_chunk);
auto mu_block = mu_mat.block(i * num_rows_in_channel, 0, num_rows_in_channel, num_batches_per_chunk);
DtypeForCpuOp variance = (Training) ? cur_var_chunk[i] : run_var_chunk[i];
DtypeForCpuOp gamma = (Affine) ? gamma_chunk[i] : 1;
mu_block *= der_gamma_chunk[i] / sqrt(variance);
variance = sqrt(variance);
// der_gamma_chunk[i] /= variance;
variance = gamma / ((DtypeForCpuOp) total_n * variance);
data_block = total_n * data_block.array() - der_beta_chunk[i] - mu_block.array();
data_block *= variance;
}
chunk_manager.StoreChunk(der_input_tensor->GetChunkId(start_store_chunk), data_chunk, chunk_size_in_byte);
}, STORE_CHUNK_ELEM, input_tensor->GetNumElem());
store_small_chunks(small_chunks);
BackwardState = false;
}
IdT FunId;
int batch;
int channel;
int height;
int width;
DtypeForCpuOp momentum;
DtypeForCpuOp epsilon;
bool is_cumulative;
bool BackwardState;
bool Affine;
bool Training;
int num_rows;
int num_rows_in_channel;
int total_n;
int default_num_batches_per_chunk;
int NumBatchesTrackedArr = 0;
shared_ptr<SecretTen> input_tensor;
shared_ptr<SecretTen> output_tensor;
shared_ptr<SecretTen> der_input_tensor;
shared_ptr<SecretTen> der_output_tensor;
shared_ptr<SecretTen> mu_tensor;
shared_ptr<SecretTen> gamma_tensor;
shared_ptr<SecretTen> beta_tensor;
shared_ptr<SecretTen> der_gamma_tensor;
shared_ptr<SecretTen> der_beta_tensor;
shared_ptr<SecretTen> run_mean_tensor;
shared_ptr<SecretTen> run_var_tensor;
shared_ptr<SecretTen> cur_mean_tensor;
shared_ptr<SecretTen> cur_var_tensor;
};
class MaxpoolBuffer {
public:
MaxpoolBuffer() {}
MaxpoolBuffer(IdT FunId_, IdT TenIdin_trans_, IdT TenIdout_trans_) : FunId(FunId_), TenIdin_trans(TenIdin_trans_), TenIdout_trans(TenIdout_trans_) { }
~MaxpoolBuffer() = default;
IdT get_TenIdin_trans(){
return TenIdin_trans;
}
IdT get_TenIdout_trans(){
return TenIdout_trans;
}
//if NCHW->WHCN N=CN M=HW
void transpose(const DtypeForCpuOp *src, DtypeForCpuOp *dst, const size_t N, const size_t M) {
#pragma omp parallel for
for(size_t n = 0; n<N*M; n++) {
size_t i = n/N;
size_t j = n%N;
dst[n] = src[M*j + i];
}
}
inline void transpose4x4_SSE(const float *A, float *B, const uint32_t lda, const uint32_t ldb) {
__m128 row1 = _mm_load_ps(&A[0*lda]);
__m128 row2 = _mm_load_ps(&A[1*lda]);
__m128 row3 = _mm_load_ps(&A[2*lda]);
__m128 row4 = _mm_load_ps(&A[3*lda]);
_MM_TRANSPOSE4_PS(row1, row2, row3, row4);
_mm_store_ps(&B[0*ldb], row1);
_mm_store_ps(&B[1*ldb], row2);
_mm_store_ps(&B[2*ldb], row3);
_mm_store_ps(&B[3*ldb], row4);
}
inline void transpose_block_SSE4x4(const float *A, float *B, const uint32_t lda, const uint32_t ldb ,const int block_size) {
#pragma omp parallel for
for(uint32_t i=0; i<ldb; i+=block_size) {
for(uint32_t j=0; j<lda; j+=block_size) {
uint32_t max_i2 = i+block_size < ldb ? i + block_size : ldb;
uint32_t max_j2 = j+block_size < lda ? j + block_size : lda;
for(uint32_t i2=i; i2<max_i2; i2+=4) {
for(uint32_t j2=j; j2<max_j2; j2+=4) {
transpose4x4_SSE(&A[i2*lda +j2], &B[j2*ldb + i2], lda, ldb);
}
}
}
}
}
inline void MaxpoolAVX(const uint32_t num_img, float* input, float* output){
#pragma omp parallel for
for(size_t i=0; i<num_img; i+=8){
const __m256 inp8f = _mm256_load_ps(&input[i]);
const __m256 out8f = _mm256_load_ps(&output[i]);
const __m256 if_lq = _mm256_cmp_ps(out8f, inp8f, 0x01);
const __m256 res8f = _mm256_blendv_ps(out8f, inp8f, if_lq);
_mm256_stream_ps(&output[i], res8f);
}
}
inline void MaxpoolbackAVX(const uint32_t num_img, float* input, float* output, float* dinput, float* doutput){
#pragma omp parallel for
for(size_t i=0; i<num_img; i+=8){
const __m256 inp8f = _mm256_load_ps(&input[i]);
const __m256 out8f = _mm256_load_ps(&output[i]);
const __m256 din8f = _mm256_load_ps(&dinput[i]);
const __m256 dout8f = _mm256_load_ps(&doutput[i]);
const __m256 if_eq = _mm256_cmp_ps(out8f, inp8f, 0x00);
const __m256 sum8f = _mm256_add_ps(din8f, dout8f);
const __m256 res8f = _mm256_blendv_ps(din8f, sum8f, if_eq); // define dinput
const __m256 res28f = _mm256_blendv_ps(dout8f, zero8f, if_eq); // redefine doutput
_mm256_store_ps(&dinput[i], res8f);
_mm256_stream_ps(&doutput[i], res28f);
}
}
void forward(
shared_ptr<SecretTen> ten_in, shared_ptr<SecretTen> ten_out,
shared_ptr<SecretTen> ten_in_trans, shared_ptr<SecretTen> ten_out_trans,
uint32_t batch, uint32_t channel,uint32_t input_height, uint32_t input_width,
uint32_t output_height, uint32_t output_width, uint32_t filter_height,
uint32_t filter_width, uint32_t row_stride, uint32_t col_stride) {
const uint32_t inputhw = input_height*input_width;
const uint32_t num_img_in_storechunk = STORE_CHUNK_ELEM/inputhw;
if(STORE_CHUNK_ELEM % inputhw != 0){
printf("STORE_CHUNK_ELEM %% inputhw != 0\n");
return;
}
//if (num_img_in_storechunk % 8 != 0){
// printf("STORE_CHUNK_ELEM/inputhw is not divisible by 8!\n");
// return;
//}
const uint32_t outputhw = output_height * output_width;
uint32_t outputsize_in_storechunk = num_img_in_storechunk * outputhw;
const uint32_t total_size = batch * channel * inputhw;
size_t idx_out=0;
size_t idx_tmp=0;
size_t size_of_store_chunk = STORE_CHUNK_ELEM * sizeof(float);
bool if_use_SSE_out =(outputhw%4==0);
float* chunk_in, *chunk_out, *chunk_in_trans, *chunk_out_trans, *chunk_tmp;
auto& chunk_manager = TrustedChunkManager::getInstance();
ChunkGuard<DtypeForCpuOp> guard_in(StoreChunkPool::GetChunkPool(), chunk_in);
ChunkGuard<DtypeForCpuOp> guard_out(StoreChunkPool::GetChunkPool(), chunk_out);
ChunkGuard<DtypeForCpuOp> guard_int(StoreChunkPool::GetChunkPool(), chunk_in_trans);
ChunkGuard<DtypeForCpuOp> guard_outt(StoreChunkPool::GetChunkPool(), chunk_out_trans);
ChunkGuard<DtypeForCpuOp> guard_tmp(StoreChunkPool::GetChunkPool(), chunk_tmp); // chunk_tmp is used to store output temporarily
auto chunk_op = [&](size_t start_chunk, size_t num_elem_in, size_t num_elem_out) {
// printf("maxpooling forward in enclave. start_chunk: %d\n", start_chunk);
chunk_manager.GetChunk(ten_in->GetChunkId(start_chunk), chunk_in, num_elem_in * sizeof(DtypeForCpuOp));
transpose_block_SSE4x4(chunk_in, chunk_in_trans, inputhw, num_img_in_storechunk, 8);
chunk_manager.StoreChunk(ten_in_trans->GetChunkId(start_chunk), chunk_in_trans, size_of_store_chunk);
fill(chunk_out_trans, chunk_out_trans + outputsize_in_storechunk, std::numeric_limits<DtypeForCpuOp>::lowest());
for(uint32_t h = 0; h < input_height; ++h) {
for(uint32_t w = 0; w < input_width; ++w) {
// (h_start, h_end) * (w_start, w_end) is the range that the input
// vector projects to.
const uint32_t h_start = (h < filter_height)
? 0
: (h - filter_height) / row_stride + 1;
const uint32_t h_end = std::min(h / row_stride + 1, output_height);
const uint32_t w_start = (w < filter_width)
? 0
: (w - filter_width) / col_stride + 1;
const uint32_t w_end = std::min(w / col_stride + 1, output_width);
// compute elementwise max
const uint32_t in_offset = (h * input_width + w)*num_img_in_storechunk;
for (uint32_t ph = h_start; ph < h_end; ++ph) {
const uint32_t out_offset_base = ph * output_width;
for (uint32_t pw = w_start; pw < w_end; ++pw) {
const uint32_t out_offset = (out_offset_base + pw) * num_img_in_storechunk;
MaxpoolAVX(num_img_in_storechunk, chunk_in_trans+in_offset, chunk_out_trans + out_offset);
}
}
}
}
chunk_manager.StoreChunk(ten_out_trans->GetChunkId(start_chunk), chunk_out_trans, size_of_store_chunk);
//transpose
if(if_use_SSE_out){
transpose_block_SSE4x4(chunk_out_trans, chunk_tmp, num_img_in_storechunk, outputhw, 8);
}
else{
transpose(chunk_out_trans, chunk_tmp, outputhw, num_img_in_storechunk);
}
if(idx_tmp+num_elem_out<STORE_CHUNK_ELEM){
copy(chunk_tmp, chunk_tmp+num_elem_out, chunk_out + idx_tmp);
idx_tmp+=num_elem_out;
}
else{
size_t idx_add = STORE_CHUNK_ELEM-idx_tmp;
copy(chunk_tmp,chunk_tmp+idx_add,chunk_out+idx_tmp);
chunk_manager.StoreChunk(ten_out->GetChunkId(idx_out), chunk_out, size_of_store_chunk);
idx_out += STORE_CHUNK_ELEM;
copy(chunk_tmp + idx_add,chunk_tmp + num_elem_out,chunk_out + idx_tmp+idx_add);
idx_tmp += num_elem_out;
idx_tmp -= STORE_CHUNK_ELEM;
}
};//end of chunk_op
run_all_chunks_for_maxpool(chunk_op, STORE_CHUNK_ELEM, batch * channel * inputhw, outputsize_in_storechunk, inputhw, outputhw);
if (idx_tmp!=0) {
chunk_manager.StoreChunk(ten_out->GetChunkId(idx_out), chunk_out, idx_tmp * sizeof(DtypeForCpuOp));
}
}//end maxpooling
void backward(
shared_ptr<SecretTen> ten_din, shared_ptr<SecretTen> ten_dout,
shared_ptr<SecretTen> ten_in_trans, shared_ptr<SecretTen> ten_out_trans,
uint32_t batch, uint32_t channel,uint32_t input_height, uint32_t input_width,
uint32_t output_height, uint32_t output_width,
uint32_t filter_height, uint32_t filter_width, uint32_t row_stride, uint32_t col_stride) {
const uint32_t num_img = batch*channel;
const uint32_t inputhw = input_height * input_width;
const uint32_t num_img_in_storechunk = STORE_CHUNK_ELEM / inputhw;
const uint32_t outputhw = output_height*output_width;
uint32_t outputsize_in_storechunk = num_img_in_storechunk * outputhw;
const uint32_t total_size = num_img * inputhw;
const uint32_t total_size_out = num_img * outputhw;
size_t idx_dout=0;
size_t idx_tmp=0;
bool if_use_SSE_out = (outputhw%4==0);
float* chunk_din, *chunk_dout, *chunk_in_trans, *chunk_out_trans, *chunk_din_trans, *chunk_dout_trans, *chunk_tmp;
auto& chunk_manager = TrustedChunkManager::getInstance();
ChunkGuard<DtypeForCpuOp> guard_din(StoreChunkPool::GetChunkPool(), chunk_din);
ChunkGuard<DtypeForCpuOp> guard_dout(StoreChunkPool::GetChunkPool(), chunk_dout);
ChunkGuard<DtypeForCpuOp> guard_int(StoreChunkPool::GetChunkPool(), chunk_in_trans);
ChunkGuard<DtypeForCpuOp> guard_outt(StoreChunkPool::GetChunkPool(), chunk_out_trans);
ChunkGuard<DtypeForCpuOp> guard_dint(StoreChunkPool::GetChunkPool(), chunk_din_trans);
ChunkGuard<DtypeForCpuOp> guard_doutt(StoreChunkPool::GetChunkPool(), chunk_dout_trans);
ChunkGuard<DtypeForCpuOp> guard_tmp(StoreChunkPool::GetChunkPool(), chunk_tmp);
size_t start_chunk_out=0;
if(total_size>=STORE_CHUNK_ELEM){
size_t getsize_out;
if(STORE_CHUNK_ELEM>total_size_out){
getsize_out = total_size_out;
}
else{
getsize_out = STORE_CHUNK_ELEM;
}
chunk_manager.GetChunk(ten_dout->GetChunkId(0), chunk_tmp, getsize_out * sizeof(DtypeForCpuOp));
start_chunk_out += getsize_out;
}
else{
chunk_manager.GetChunk(ten_dout->GetChunkId(0), chunk_tmp, total_size_out * sizeof(float));
}
auto chunk_op = [&](size_t start_chunk, size_t num_elem_in, size_t num_elem_out) {
chunk_manager.GetChunk(ten_in_trans->GetChunkId(start_chunk), chunk_in_trans, STORE_CHUNK_ELEM * sizeof(DtypeForCpuOp));
chunk_manager.GetChunk(ten_out_trans->GetChunkId(start_chunk), chunk_out_trans, STORE_CHUNK_ELEM * sizeof(DtypeForCpuOp));
if(num_elem_in == STORE_CHUNK_ELEM){
if(idx_tmp + outputsize_in_storechunk > STORE_CHUNK_ELEM){
copy(chunk_tmp+idx_tmp,chunk_tmp+STORE_CHUNK_ELEM,chunk_dout);
idx_dout = STORE_CHUNK_ELEM-idx_tmp;
chunk_manager.GetChunk(ten_dout->GetChunkId(start_chunk_out), chunk_tmp, STORE_CHUNK_ELEM * sizeof(DtypeForCpuOp));
start_chunk_out += STORE_CHUNK_ELEM;
idx_tmp = outputsize_in_storechunk-idx_dout;
copy(chunk_tmp, chunk_tmp+idx_tmp, chunk_dout+idx_dout);
}
else{
copy(chunk_tmp+idx_tmp,chunk_tmp+idx_tmp+outputsize_in_storechunk,chunk_dout);
idx_tmp += outputsize_in_storechunk;
}
}
else{
if(idx_tmp==STORE_CHUNK_ELEM||idx_tmp==0){
chunk_manager.GetChunk(ten_dout->GetChunkId(start_chunk_out), chunk_dout, (total_size_out-start_chunk_out) * sizeof(DtypeForCpuOp));
}
else{
copy(chunk_tmp+idx_tmp,chunk_tmp+STORE_CHUNK_ELEM,chunk_dout);
idx_dout = STORE_CHUNK_ELEM-idx_tmp;
if(total_size_out!=start_chunk_out)
chunk_manager.GetChunk(ten_dout->GetChunkId(start_chunk_out), chunk_tmp, (total_size_out-start_chunk_out) * sizeof(DtypeForCpuOp));
//assume total_size_out-start_chunk_out+idx_dout<=STORE_CHUNK_ELEM
idx_tmp = total_size_out - start_chunk_out;
copy(chunk_tmp, chunk_tmp+idx_tmp, chunk_dout+idx_dout);
//idx_dout
}
}
if(if_use_SSE_out){
transpose_block_SSE4x4(chunk_dout, chunk_dout_trans, outputhw, num_img_in_storechunk, 4);
}
else{
transpose(chunk_dout, chunk_dout_trans, num_img_in_storechunk, outputhw);
}
fill(chunk_din_trans, chunk_din_trans + STORE_CHUNK_ELEM,0);
for(uint32_t h = 0; h < input_height; ++h) {
for(uint32_t w = 0; w < input_width; ++w) {
// (h_start, h_end) * (w_start, w_end) is the range that the input
// vector projects to.
const uint32_t h_start = (h < filter_height)
? 0
: (h - filter_height) / row_stride + 1;
const uint32_t h_end = std::min(h / row_stride + 1, output_height);
const uint32_t w_start = (w < filter_width)
? 0
: (w - filter_width) / col_stride + 1;
const uint32_t w_end = std::min(w / col_stride + 1, output_width);
// compute elementwise max
const uint32_t in_offset = (h * input_width + w)*num_img_in_storechunk;
for (uint32_t ph = h_start; ph < h_end; ++ph) {
const uint32_t out_offset_base = ph * output_width;
for (uint32_t pw = w_start; pw < w_end; ++pw) {
const uint32_t out_offset = (out_offset_base + pw) * num_img_in_storechunk;
MaxpoolbackAVX(num_img_in_storechunk, chunk_in_trans + in_offset, chunk_out_trans + out_offset, chunk_din_trans + in_offset, chunk_dout_trans + out_offset);
}
}
}
}
//transpose
transpose_block_SSE4x4(chunk_din_trans, chunk_din, num_img_in_storechunk ,inputhw, 8);
chunk_manager.StoreChunk(ten_din->GetChunkId(start_chunk), chunk_din, num_elem_in * sizeof(float));
};//end of chunk_op
run_all_chunks_for_maxpool(chunk_op, STORE_CHUNK_ELEM, total_size, outputsize_in_storechunk, inputhw, outputhw);
}//end maxpoolbackward
IdT FunId;
IdT TenIdin_trans;
IdT TenIdout_trans;
};
static inline float float2_to_uniform(uint32_t x, uint32_t y, float& a, float& b) {
const union { uint32_t i; float d; } u = { .i = UINT32_C(0x7F) << 23 | ((x ^ y) >> 2) };
const union { uint32_t i; float d; } v = { .i = UINT32_C(0x7F) << 23 | (((x ^ y) >> 5) ^ UINT32_C(0x7FFFFF))};
a = u.d - 1.0f;
b = v.d - 1.0f;
}
// Input: Af
// Output: E
// E = AQ - U = Q(Af) - U
// test: E + U ~= Q(Af)
//void FusedQuantizeShare(shared_ptr<SecretTen> af_ten, shared_ptr<SecretTen> e_ten, uint64_t q_tag, uint64_t u_seed) {
void FusedQuantizeShare(shared_ptr<SecretTen> af_ten, DtypeForCpuOp* e_arr, uint64_t q_tag, uint64_t u_seed) {
const int bits = 8;
const int ebit = 8;
const DtypeForCpuOp lower_limit = -pow(2, (bits - 1));
const DtypeForCpuOp upper_limit = pow(2, (bits - 1)) - 1;
const int num_elem_in_chunk = WORK_CHUNK_ELEM;
auto& chunk_manager = TrustedChunkManager::getInstance();
DtypeForCpuOp* store_chunk;
ChunkGuard<DtypeForCpuOp> guard(StoreChunkPool::GetChunkPool(), store_chunk);
DtypeForCpuOp max_entry = 0;
auto get_max_chunk_op = [&](int start_store_chunk, int num_elem_in_store_chunk) {
int chunk_id = af_ten->GetChunkId(start_store_chunk);
chunk_manager.GetChunk(chunk_id, store_chunk, num_elem_in_store_chunk * sizeof(DtypeForCpuOp));
MapEigenVector src_vecmap(store_chunk, num_elem_in_store_chunk);
max_entry = std::max(max_entry, src_vecmap.cwiseAbs().maxCoeff());
};
run_all_chunks(get_max_chunk_op, STORE_CHUNK_ELEM, af_ten->GetNumElem());
DtypeForCpuOp exp = (max_entry == 0) ? 0 : floor(log2(max_entry));
exp = std::min(std::max(exp, (DtypeForCpuOp) pow(-2, (ebit - 1))), (DtypeForCpuOp) pow(2, (ebit - 1) - 1));
quantize_exp[q_tag] = exp;
const DtypeForCpuOp enlarge_factor = pow(2, -exp + (bits - 2));
const DtypeForCpuOp PLimit = static_cast<DtypeForCpuOp>(PrimeLimit);
const DtypeForCpuOp invPLimit = static_cast<double>(1) / PrimeLimit;
auto& xor_rnd = *get_fast_rng(q_tag);
auto PrgState = af_ten->PrgStateHolder[u_seed];
DtypeForCpuOp* tmp_chunk = (DtypeForCpuOp*)malloc(num_elem_in_chunk * sizeof(DtypeForCpuOp));
auto store_chunk_op = [&](int start_store_chunk, int num_elem_in_store_chunk) {
chunk_manager.GetChunk(af_ten->GetChunkId(start_store_chunk), store_chunk, num_elem_in_store_chunk * sizeof(DtypeForCpuOp));
auto chunk_op = [&](int start, int num_elem_in_op) {
float* af_chunk = store_chunk + start;
float* e_chunk = e_arr + start_store_chunk + start;
MapEigenTensor af_map = MapEigenTensor(af_chunk, 1, 1, 1, num_elem_in_op);
MapEigenTensor tmp_map = MapEigenTensor(tmp_chunk, 1, 1, 1, num_elem_in_op);
get_r(PrgState, (uint8_t*) e_chunk, num_elem_in_op * sizeof(DtypeForCpuOp), 9);
#if QUANTIZE_MODE == STOCHASTIC
// xor_rnd.rand_like(tmp_chunk, num_elem_in_op);
uint32_t* uint32_chunk = (uint32_t*) e_chunk;
// uint32_t* uint32_chunk = reinterpret_cast<uint32_t*>(e_chunk);
// for(size_t j = 0; j < num_elem_in_op; j++) tmp_chunk[j] = uint32_to_float(uint32_chunk[j]);
for(size_t j = 0; j < num_elem_in_op; j++) tmp_chunk[j] = float_to_uniform(uint32_chunk[j]);
// for(size_t j = 0; j < 10; j++) {
// printf("%f ", tmp_chunk[j]);
// }
// printf("\n");
// for(size_t j = 0; j < num_elem_in_op; j++) tmp_chunk[j] = e_chunk[j];
tmp_map = (af_map * enlarge_factor + tmp_map).floor().cwiseMax(lower_limit).cwiseMin(upper_limit);
#else
tmp_map = (af_map * enlarge_factor).round().cwiseMax(lower_limit).cwiseMin(upper_limit);
#endif
for(size_t j = 0; j < num_elem_in_op; j++) {
e_chunk[j] = tmp_chunk[j] - e_chunk[j];
e_chunk[j] -= floor(e_chunk[j] * invPLimit) * PLimit;
e_chunk[j] = (e_chunk[j] >= mid) ? (e_chunk[j] - p) : e_chunk[j];
}
};
run_all_chunks(chunk_op, WORK_CHUNK_ELEM, num_elem_in_store_chunk);
};
run_all_chunks(store_chunk_op, STORE_CHUNK_ELEM, af_ten->GetNumElem());
free(tmp_chunk);
}
// Input: Af
// Output: A1, E
// AQ = Q(Af)
// A0, U <- Random
// A1 = AQ - A0
// E = AQ - U
// test: E + U = A0 + A1 ~= AQ ~= Q(Af)
void FusedQuantizeShare2(shared_ptr<SecretTen> af_ten, DtypeForCpuOp* a1_arr, DtypeForCpuOp* e_arr,
uint64_t q_tag, uint64_t a0_seed, uint64_t u_seed) {
const int bits = 8;
const int ebit = 8;
const DtypeForCpuOp lower_limit = -pow(2, (bits - 1));
const DtypeForCpuOp upper_limit = pow(2, (bits - 1)) - 1;
auto& chunk_manager = TrustedChunkManager::getInstance();
DtypeForCpuOp* store_chunk;
ChunkGuard<DtypeForCpuOp> guard(StoreChunkPool::GetChunkPool(), store_chunk);
DtypeForCpuOp max_entry = 0;
auto get_max_chunk_op = [&](int start_store_chunk, int num_elem_in_store_chunk) {
chunk_manager.GetChunk(af_ten->GetChunkId(start_store_chunk), store_chunk, num_elem_in_store_chunk * sizeof(DtypeForCpuOp));
MapEigenVector src_vecmap(store_chunk, num_elem_in_store_chunk);
max_entry = std::max(max_entry, src_vecmap.cwiseAbs().maxCoeff());
};
run_all_chunks(get_max_chunk_op, STORE_CHUNK_ELEM, af_ten->GetNumElem());
DtypeForCpuOp exp = (max_entry == 0) ? 0 : floor(log2(max_entry));
exp = std::min(std::max(exp, (DtypeForCpuOp) pow(-2, (ebit - 1))), (DtypeForCpuOp) pow(2, (ebit - 1) - 1));
quantize_exp[q_tag] = exp;
DtypeForCpuOp enlarge_factor = pow(2, -exp + (bits - 2));
DtypeForCpuOp PLimit = static_cast<DtypeForCpuOp>(PrimeLimit);
DtypeForCpuOp invPLimit = static_cast<double>(1) / PrimeLimit;
auto& xor_rnd = *get_fast_rng(q_tag);
const int n_elem_in_chunk = WORK_CHUNK_ELEM;
auto u_prg_state = af_ten->PrgStateHolder[u_seed];
auto a0_prg_state = af_ten->PrgStateHolder[a0_seed];
DtypeForCpuOp* tmp_chunk = (DtypeForCpuOp*)malloc(n_elem_in_chunk * sizeof(DtypeForCpuOp));
auto store_chunk_op = [&](int start_store_chunk, int num_elem_in_store_chunk) {
chunk_manager.GetChunk(af_ten->GetChunkId(start_store_chunk), store_chunk, num_elem_in_store_chunk * sizeof(DtypeForCpuOp));
auto chunk_op = [&](int start, int num_elem_in_op) {
float* af_chunk = store_chunk + start;
float* a1_chunk = a1_arr + start_store_chunk + start;
float* e_chunk = e_arr + start_store_chunk + start;
MapEigenTensor af_map = MapEigenTensor(af_chunk, 1, 1, 1, num_elem_in_op);
MapEigenTensor tmp_map = MapEigenTensor(tmp_chunk, 1, 1, 1, num_elem_in_op);
get_r(a0_prg_state, (uint8_t*) a1_chunk, num_elem_in_op * sizeof(DtypeForCpuOp), 9);
get_r(u_prg_state, (unsigned char*) e_chunk, num_elem_in_op * sizeof(DtypeForCpuOp), 0);
#if QUANTIZE_MODE == STOCHASTIC
// xor_rnd.rand_like(tmp_chunk, num_elem_in_op);
uint32_t* uint32_chunk = (uint32_t*) e_chunk;
// for(size_t j = 0; j < num_elem_in_op; j++) tmp_chunk[j] = uint32_to_float(uint32_chunk[j]);
for(size_t j = 0; j < num_elem_in_op; j++) tmp_chunk[j] = float_to_uniform(uint32_chunk[j]);
// for(size_t j = 0; j < num_elem_in_op; j++) tmp_chunk[j] = e_chunk[j];
tmp_map = (af_map * enlarge_factor + tmp_map).floor().cwiseMax(lower_limit).cwiseMin(upper_limit);
#else
tmp_map = (af_map * enlarge_factor).round().cwiseMax(lower_limit).cwiseMin(upper_limit);
#endif
for(size_t j = 0; j < num_elem_in_op; j++) {
e_chunk[j] = tmp_chunk[j] - e_chunk[j];
e_chunk[j] -= floor(e_chunk[j] * invPLimit) * PLimit;
e_chunk[j] = (e_chunk[j] >= mid) ? (e_chunk[j] - p) : e_chunk[j];
a1_chunk[j] = tmp_chunk[j] - a1_chunk[j];
a1_chunk[j] -= floor(a1_chunk[j] * invPLimit) * PLimit;
a1_chunk[j] = (a1_chunk[j] >= mid) ? (a1_chunk[j] - p) : a1_chunk[j];
}
};
run_all_chunks(chunk_op, WORK_CHUNK_ELEM, num_elem_in_store_chunk);
};
run_all_chunks(store_chunk_op, STORE_CHUNK_ELEM, af_ten->GetNumElem());
free(tmp_chunk);
}
// Input: C', Ci
// Output: Cf
// Cf = dQ(C' + Ci)
// test: Cf ~= deQ(C' + Ci)
void FusedRecon(shared_ptr<SecretTen> cf_ten, shared_ptr<SecretTen> cq_ten, DtypeForCpuOp* c_left_arr,
uint64_t x_tag, uint64_t y_tag) {
const int bits = 8;
const DtypeForCpuOp x_exp = quantize_exp[x_tag];
const DtypeForCpuOp y_exp = quantize_exp[y_tag];
const DtypeForCpuOp shrink_factor = pow(2, x_exp - (bits - 2) + y_exp - (bits - 2));
const DtypeForCpuOp PLimit = static_cast<DtypeForCpuOp>(PrimeLimit);
const DtypeForCpuOp invPLimit = static_cast<double>(1) / PrimeLimit;
const int total_num_elem = cf_ten->GetNumElem();
auto& chunk_manager = TrustedChunkManager::getInstance();
DtypeForCpuOp *cf_store_chunk, *cq_store_chunk;
ChunkGuard<DtypeForCpuOp> cf_guard(StoreChunkPool::GetChunkPool(), cf_store_chunk);
ChunkGuard<DtypeForCpuOp> cq_guard(StoreChunkPool::GetChunkPool(), cq_store_chunk);
auto store_chunk_op = [&](int start_store_chunk, int num_elem_in_store_chunk) {
chunk_manager.GetChunk(cq_ten->GetChunkId(start_store_chunk), cq_store_chunk, num_elem_in_store_chunk * sizeof(DtypeForCpuOp));
auto chunk_op = [&](int start, int num_elem_in_op) {
DtypeForCpuOp* cf_chunk = cf_store_chunk + start;
DtypeForCpuOp* cq_chunk = cq_store_chunk + start;
DtypeForCpuOp* c_left_chunk = c_left_arr + start_store_chunk + start;
for(size_t j = 0; j < num_elem_in_op; j++) {
cq_chunk[j] += c_left_chunk[j];
cf_chunk[j] = cq_chunk[j];
cf_chunk[j] -= floor(cf_chunk[j] * invPLimit) * PLimit;
cf_chunk[j] = (cf_chunk[j] >= mid) ? (cf_chunk[j] - p) : cf_chunk[j];
cf_chunk[j] *= shrink_factor;
}
};
run_all_chunks(chunk_op, WORK_CHUNK_ELEM, num_elem_in_store_chunk);
// chunk_manager.StoreChunk(cq_ten->GetChunkId(start_store_chunk), cq_store_chunk, num_elem_in_store_chunk * sizeof(DtypeForCpuOp));
chunk_manager.StoreChunk(cf_ten->GetChunkId(start_store_chunk), cf_store_chunk, num_elem_in_store_chunk * sizeof(DtypeForCpuOp));
};
run_all_chunks(store_chunk_op, STORE_CHUNK_ELEM, total_num_elem);
}
extern "C" {
void SecretInitTensor(IdT TenId, void *voidDims) {
DimsT *Dims = (DimsT *) voidDims;
SecretTenHolder[TenId] = make_shared<SecretTen>(TenId, Dims);
}
void SecretSetTen(IdT TenId, void *voidArr) {
GetTenById(TenId)->SetTen((DtypeForCpuOp *) voidArr);
}
void SecretGetTen(IdT TenId, void *voidArr) {
GetTenById(TenId)->GetTen((DtypeForCpuOp *) voidArr);
}
void SecretSetSeed(IdT TenId, uint64_t RawSeed) {
GetTenById(TenId)->SetSeed(RawSeed);
}
void SecretGetRandom(IdT TenId, void *voidArr, uint64_t RawSeed) {
GetTenById(TenId)->GetRandom((DtypeForCpuOp *) voidArr, RawSeed);
}
void SecretGetShare(IdT TenId, void *voidArr, uint64_t RawSeed) {
GetTenById(TenId)->GetShare((DtypeForCpuOp *) voidArr, RawSeed);
}
void SecretAddFromCpu(void* inputArr, IdT dstId) {
shared_ptr<SecretTen > StoreTensor = GetTenById(dstId);
DtypeForCpuOp PLimit = static_cast<DtypeForCpuOp>(PrimeLimit);
DtypeForCpuOp invPLimit = static_cast<double>(1) / PrimeLimit;
const int total_num_elem = StoreTensor->GetNumElem();
auto& chunk_manager = TrustedChunkManager::getInstance();
DtypeForCpuOp* store_chunk;
ChunkGuard<DtypeForCpuOp> guard(StoreChunkPool::GetChunkPool(), store_chunk);
auto store_chunk_op = [&](int start_store_chunk, int num_elem_in_store_chunk) {
chunk_manager.GetChunk(StoreTensor->GetChunkId(start_store_chunk), store_chunk, num_elem_in_store_chunk * sizeof(DtypeForCpuOp));
auto chunk_op = [&](int start_chunk, int num_elem_in_op) {
DtypeForCpuOp* output_arr = store_chunk + start_chunk;
DtypeForCpuOp* input_arr = ((DtypeForCpuOp*) inputArr) + start_store_chunk + start_chunk;
for(size_t j = 0; j < num_elem_in_op; j++) {
output_arr[j] += input_arr[j];
output_arr[j] -= floor(output_arr[j] * invPLimit) * PLimit;
output_arr[j] = (output_arr[j] >= mid) ? (output_arr[j] - p) : output_arr[j];
}
};
run_all_chunks(chunk_op, WORK_CHUNK_ELEM, num_elem_in_store_chunk);
chunk_manager.StoreChunk(StoreTensor->GetChunkId(start_store_chunk), store_chunk, num_elem_in_store_chunk * sizeof(DtypeForCpuOp));
};
run_all_chunks(store_chunk_op, STORE_CHUNK_ELEM, total_num_elem);
}
void newrelu(IdT TenIdin, IdT TenIdout, uint64_t size){
shared_ptr<SecretTen > ten_in = GetTenById(TenIdin);
shared_ptr<SecretTen > ten_out = GetTenById(TenIdout);
auto& chunk_manager = TrustedChunkManager::getInstance();
DtypeForCpuOp* chunk_in,* chunk_tmp;
ChunkGuard<DtypeForCpuOp> guard_tmp(StoreChunkPool::GetChunkPool(), chunk_tmp);
//ChunkGuard<DtypeForCpuOp> guard_out(StoreChunkPool::GetChunkPool(), chunk_out);
auto store_chunk_op = [&](int start_store_chunk, int num_elem_in_store_chunk) {
chunk_manager.GetChunk(ten_in->GetChunkId(start_store_chunk), chunk_tmp, num_elem_in_store_chunk * sizeof(DtypeForCpuOp));
for(uint64_t i=0;i<num_elem_in_store_chunk;i+=8){
const __m256 inp8f = _mm256_load_ps(&chunk_tmp[i]);
const __m256 if_gt = _mm256_cmp_ps(inp8f, zero8f, 0x0e);
const __m256 res8f = _mm256_blendv_ps(zero8f, inp8f, if_gt);
_mm256_stream_ps(&chunk_tmp[i], res8f);
}
chunk_manager.StoreChunk(ten_out->GetChunkId(start_store_chunk), chunk_tmp, num_elem_in_store_chunk * sizeof(DtypeForCpuOp));
};
run_all_chunks(store_chunk_op, STORE_CHUNK_ELEM, size);
}
void newreluback(IdT TenIdout, IdT TenIddout,IdT TenIddin, uint64_t size){
shared_ptr<SecretTen > ten_din = GetTenById(TenIddin);
shared_ptr<SecretTen > ten_dout = GetTenById(TenIddout);
shared_ptr<SecretTen > ten_out = GetTenById(TenIdout);
auto& chunk_manager = TrustedChunkManager::getInstance();
DtypeForCpuOp* chunk_dtmp,* chunk_out;
//ChunkGuard<DtypeForCpuOp> guard_din(StoreChunkPool::GetChunkPool(), chunk_din);
ChunkGuard<DtypeForCpuOp> guard_dtmp(StoreChunkPool::GetChunkPool(), chunk_dtmp);
ChunkGuard<DtypeForCpuOp> guard_out(StoreChunkPool::GetChunkPool(), chunk_out);
auto store_chunk_op = [&](int start_store_chunk, int num_elem_in_store_chunk) {
chunk_manager.GetChunk(ten_dout->GetChunkId(start_store_chunk),chunk_dtmp, num_elem_in_store_chunk * sizeof(DtypeForCpuOp));
chunk_manager.GetChunk(ten_out->GetChunkId(start_store_chunk),chunk_out, num_elem_in_store_chunk * sizeof(DtypeForCpuOp));
for(uint64_t i=0;i<num_elem_in_store_chunk;i+=8){
const __m256 inp8f = _mm256_load_ps(&chunk_out[i]);
const __m256 if_eq = _mm256_cmp_ps(inp8f, zero8f, 0x00);
const __m256 gra8f = _mm256_load_ps(&chunk_dtmp[i]);
const __m256 res8f = _mm256_blendv_ps(gra8f, zero8f, if_eq);
_mm256_stream_ps(&chunk_dtmp[i], res8f);
}
chunk_manager.StoreChunk(ten_din->GetChunkId(start_store_chunk), chunk_dtmp, num_elem_in_store_chunk * sizeof(DtypeForCpuOp));
};
run_all_chunks(store_chunk_op, STORE_CHUNK_ELEM, size);
}
unordered_map<IdT, shared_ptr<MaxpoolBuffer>> MaxpoolHolder;
shared_ptr<MaxpoolBuffer> GetBufferByIdM(IdT FunId) {
return MaxpoolHolder[FunId];
}
void initmaxpool(IdT FunId, IdT TenIdin_trans, IdT TenIdout_trans){
MaxpoolHolder[FunId] = make_shared<MaxpoolBuffer>(FunId, TenIdin_trans, TenIdout_trans);
}
void newmaxpool(IdT FunId, IdT TenIdin, IdT TenIdout, uint32_t batch, uint32_t channel,uint32_t input_height, uint32_t input_width,uint32_t output_height, uint32_t output_width, uint32_t filter_height, uint32_t filter_width, uint32_t row_stride, uint32_t col_stride, uint32_t row_pad, uint32_t col_pad){
shared_ptr<SecretTen > ten_in = GetTenById(TenIdin);
shared_ptr<SecretTen > ten_out = GetTenById(TenIdout);
IdT TenIdin_trans = GetBufferByIdM(FunId)->get_TenIdin_trans();
shared_ptr<SecretTen> ten_in_trans = GetTenById(TenIdin_trans);
IdT TenIdout_trans = GetBufferByIdM(FunId)->get_TenIdout_trans();
shared_ptr<SecretTen> ten_out_trans = GetTenById(TenIdout_trans);
GetBufferByIdM(FunId)->forward(ten_in, ten_out,ten_in_trans, ten_out_trans, batch, channel,input_height,input_width,output_height,output_width,filter_height,filter_width,row_stride,col_stride);
}
void newmaxpoolback(IdT FunId, IdT TenIddout,IdT TenIddin, uint32_t batch, uint32_t channel,uint32_t input_height, uint32_t input_width,uint32_t output_height, uint32_t output_width, uint32_t filter_height, uint32_t filter_width, uint32_t row_stride, uint32_t col_stride){
shared_ptr<SecretTen > ten_din = GetTenById(TenIddin);
shared_ptr<SecretTen > ten_dout = GetTenById(TenIddout);
IdT TenIdin_trans = GetBufferByIdM(FunId)->get_TenIdin_trans();
shared_ptr<SecretTen> ten_in_trans = GetTenById(TenIdin_trans);
IdT TenIdout_trans = GetBufferByIdM(FunId)->get_TenIdout_trans();
shared_ptr<SecretTen> ten_out_trans = GetTenById(TenIdout_trans);
//shared_ptr<SecretTen > ten_in_trans = GetTenById(0);
//uint64_t tensor_size=(batch*channel*input_height*input_width+STORE_CHUNK_ELEM/2)/STORE_CHUNK_ELEM*STORE_CHUNK_ELEM;
//shared_ptr<SecretTen > ten_out_trans = GetTenById(tensor_size*sizeof(float));
GetBufferByIdM(FunId)->backward(ten_din, ten_dout, ten_in_trans, ten_out_trans, batch, channel,input_height,input_width,output_height,output_width,filter_height,filter_width,row_stride,col_stride);
}
unordered_map<IdT, shared_ptr<BatchnormBuffer>> BatchnormHolder;
shared_ptr<BatchnormBuffer> GetBufferByIdB(IdT FunId) {
return BatchnormHolder[FunId];
}
void SecretInitBatchnorm(
IdT FunId,
IdT input, IdT output, IdT gamma, IdT beta,
IdT der_input, IdT der_output, IdT der_gamma, IdT der_beta,
IdT run_mean, IdT run_var, IdT cur_mean, IdT cur_var,
IdT mu,
uint32_t batch_, uint32_t channel_, uint32_t height_, uint32_t width_,
int affine_, int is_cumulative_, float momentum_, float epsilon_) {
auto bn_buffer = make_shared<BatchnormBuffer>(FunId);
BatchnormHolder[FunId] = bn_buffer;
bn_buffer->init(
input, output, gamma, beta,
der_input, der_output, der_gamma, der_beta,
run_mean, run_var, cur_mean, cur_var,
mu,
batch_, channel_, height_, width_,
affine_, is_cumulative_, momentum_, epsilon_);
}
void SecretBatchnormForward(IdT FunId, int Training) {
GetBufferByIdB(FunId)->forward(Training);
}
void SecretBatchnormBackward(IdT FunId) {
GetBufferByIdB(FunId)->backward();
}
// Store <- C0 + C1 + C2 (MainSeed + Seed1 + Seed2)
// DstArr <- MainSeed (either C0 or C1)
void SecretMaskingC01(IdT storeId, uint64_t mainRawSeed, uint64_t rawSeed0, uint64_t rawSeed1, DtypeForCpuOp *DstArr) {
shared_ptr<SecretTen > StoreTensor = GetTenById(storeId);
auto MainPrgState = StoreTensor->PrgStateHolder[mainRawSeed];
auto PrgState0 = StoreTensor->PrgStateHolder[rawSeed0];
auto PrgState1 = StoreTensor->PrgStateHolder[rawSeed1];
const int total_num_elem = StoreTensor->GetNumElem();
auto& chunk_manager = TrustedChunkManager::getInstance();
DtypeForCpuOp* store_chunk;
ChunkGuard<DtypeForCpuOp> guard(StoreChunkPool::GetChunkPool(), store_chunk);
DtypeForCpuOp* aux_chunk_arr = (DtypeForCpuOp*)memalign(32, WORK_CHUNK_ELEM * sizeof(DtypeForCpuOp));
DtypeForCpuOp PLimit = static_cast<DtypeForCpuOp>(PrimeLimit);
DtypeForCpuOp invPLimit = static_cast<double>(1) / PrimeLimit;
auto store_chunk_op = [&](int start_store_chunk, int num_elem_in_store_chunk) {
chunk_manager.GetChunk(StoreTensor->GetChunkId(start_store_chunk), store_chunk, num_elem_in_store_chunk * sizeof(DtypeForCpuOp));
auto chunk_op = [&](int start, int num_elem_in_op) {
DtypeForCpuOp* store_arr = store_chunk + start;
DtypeForCpuOp* output_arr = DstArr + start_store_chunk + start;
get_r(MainPrgState, (uint8_t*) output_arr, num_elem_in_op * sizeof(DtypeForCpuOp), 9);
get_r(PrgState0, (uint8_t*) store_arr, num_elem_in_op * sizeof(DtypeForCpuOp), 9);
get_r(PrgState1, (uint8_t*) aux_chunk_arr, num_elem_in_op * sizeof(DtypeForCpuOp), 9);
for(size_t j = 0; j < num_elem_in_op; j++) {
store_arr[j] += output_arr[j] + aux_chunk_arr[j];
store_arr[j] -= floor(store_arr[j] * invPLimit) * PLimit;
store_arr[j] = (store_arr[j] >= mid) ? (store_arr[j] - p) : store_arr[j];
output_arr[j] -= floor(output_arr[j] * invPLimit) * PLimit;
output_arr[j] = (output_arr[j] >= mid) ? (output_arr[j] - p) : output_arr[j];
}
};
run_all_chunks(chunk_op, WORK_CHUNK_ELEM, num_elem_in_store_chunk);
chunk_manager.StoreChunk(StoreTensor->GetChunkId(start_store_chunk), store_chunk, num_elem_in_store_chunk * sizeof(DtypeForCpuOp));
};
run_all_chunks(store_chunk_op, STORE_CHUNK_ELEM, total_num_elem);
free(aux_chunk_arr);
}
// Assume momentum > 0
void SecretSgdUpdate(IdT paramId, IdT gradId, IdT momentumId,
DtypeForCpuOp lr, DtypeForCpuOp momentum, DtypeForCpuOp weight_decay,
DtypeForCpuOp dampening, bool nesterov, bool first_momentum) {
shared_ptr<SecretTen> ParamTensor = GetTenById(paramId);
shared_ptr<SecretTen> GradTensor = GetTenById(gradId);
shared_ptr<SecretTen> MomentumTensor = (momentumId != 0) ? GetTenById(momentumId) : nullptr;
const int total_num_elem = ParamTensor->GetNumElem();
auto& chunk_manager = TrustedChunkManager::getInstance();
DtypeForCpuOp *param_store_chunk, *grad_store_chunk, *momentum_store_chunk;
ChunkGuard<DtypeForCpuOp> param_guard(StoreChunkPool::GetChunkPool(), param_store_chunk);
ChunkGuard<DtypeForCpuOp> grad_guard(StoreChunkPool::GetChunkPool(), grad_store_chunk);
ChunkGuard<DtypeForCpuOp> momentum_guard(StoreChunkPool::GetChunkPool(), momentum_store_chunk);
auto store_chunk_op = [&](int start_store_chunk, int num_elem_in_store_chunk) {
chunk_manager.GetChunk(ParamTensor->GetChunkId(start_store_chunk), param_store_chunk, num_elem_in_store_chunk * sizeof(DtypeForCpuOp));
chunk_manager.GetChunk(GradTensor->GetChunkId(start_store_chunk), grad_store_chunk, num_elem_in_store_chunk * sizeof(DtypeForCpuOp));
chunk_manager.GetChunk(MomentumTensor->GetChunkId(start_store_chunk), momentum_store_chunk, num_elem_in_store_chunk * sizeof(DtypeForCpuOp));
auto chunk_op = [&](int start, int num_elem_in_op) {
DtypeForCpuOp* param_arr = param_store_chunk + start;
DtypeForCpuOp* grad_arr = grad_store_chunk + start;
DtypeForCpuOp* momentum_arr = momentum_store_chunk + start;
if (first_momentum) {
for(size_t j = 0; j < num_elem_in_op; j++) {
grad_arr[j] += weight_decay * param_arr[j];
momentum_arr[j] = grad_arr[j];
param_arr[j] -= lr * momentum_arr[j];
}
} else {
for(size_t j = 0; j < num_elem_in_op; j++) {
grad_arr[j] += weight_decay * param_arr[j];
momentum_arr[j] = momentum_arr[j] * momentum + (1 - dampening) * grad_arr[j];
param_arr[j] -= lr * momentum_arr[j];
}
}
};
run_all_chunks(chunk_op, WORK_CHUNK_ELEM, num_elem_in_store_chunk);
chunk_manager.StoreChunk(ParamTensor->GetChunkId(start_store_chunk), param_store_chunk, num_elem_in_store_chunk * sizeof(DtypeForCpuOp));
chunk_manager.StoreChunk(MomentumTensor->GetChunkId(start_store_chunk), momentum_store_chunk, num_elem_in_store_chunk * sizeof(DtypeForCpuOp));
};
run_all_chunks(store_chunk_op, STORE_CHUNK_ELEM, total_num_elem);
}
void SecretStochasticQuantize(IdT src_id, IdT dst_id, uint64_t q_tag) {
quantize_stochastic(GetTenById(src_id), GetTenById(dst_id), q_tag);
}
void SecretFusedQuantizeShare(IdT af_id, void* e_arr, uint64_t q_tag, uint64_t u_seed) {
FusedQuantizeShare(GetTenById(af_id), (DtypeForCpuOp*) e_arr, q_tag, u_seed);
}
void SecretFusedQuantizeShare2(IdT af_id, void* a1_arr, void* e_arr,
uint64_t q_tag, uint64_t a0_seed, uint64_t u_seed) {
FusedQuantizeShare2(GetTenById(af_id), (DtypeForCpuOp*) a1_arr, (DtypeForCpuOp*) e_arr,
q_tag, a0_seed, u_seed);
}
void SecretFusedRecon(IdT cf_id, IdT cq_id, DtypeForCpuOp* c_left_arr, uint64_t x_tag, uint64_t y_tag) {
FusedRecon(GetTenById(cf_id), GetTenById(cq_id), (DtypeForCpuOp*) c_left_arr, x_tag, y_tag);
}
} // End of extern C
| 45.647402 | 303 | 0.656201 | goten-team |
38e1f5eafcc25c2f57ec7bc27db575cd4b52dbaf | 6,688 | cpp | C++ | Project/Tiger/src/IR.cpp | TheNetAdmin/tiger-compiler | ef09e538d23c0f59910d651cf6e1ea63a96d3112 | [
"MIT"
] | 4 | 2018-09-18T03:46:51.000Z | 2020-08-19T09:40:16.000Z | Project/Tiger/src/IR.cpp | TheNetAdmin/tiger-compiler | ef09e538d23c0f59910d651cf6e1ea63a96d3112 | [
"MIT"
] | null | null | null | Project/Tiger/src/IR.cpp | TheNetAdmin/tiger-compiler | ef09e538d23c0f59910d651cf6e1ea63a96d3112 | [
"MIT"
] | 4 | 2018-02-15T21:02:10.000Z | 2022-03-03T14:16:01.000Z | #include "IR.h"
//
// Created by Chege on 2017/6/1.
//
namespace IR
{
Stm::Stm(StmType type)
: stmType(type)
{
}
StmType Stm::getStmType() const
{
return stmType;
}
Exp::Exp(ExpType expType)
: Stm(EXP), expType(expType)
{
}
ExpType Exp::getExpType() const
{
return expType;
}
Seq::Seq(const std::shared_ptr<Stm> &left, const std::shared_ptr<Stm> &right)
: Stm(SEQ), left(left), right(right)
{
}
const std::shared_ptr<Stm> Seq::getLeft() const
{
return left;
}
const std::shared_ptr<Stm> Seq::getRight() const
{
return right;
}
Label::Label(const std::shared_ptr<Temporary::Label> &label)
: Stm(LABEL), label(label)
{
}
const std::shared_ptr<Temporary::Label> Label::getLabel() const
{
return label;
}
Jump::Jump(const std::shared_ptr<Exp> &exp, const std::shared_ptr<LabelList> &labels)
: Stm(JUMP), exp(exp), labels(labels)
{
}
const std::shared_ptr<Exp> Jump::getExp() const
{
return exp;
}
const std::shared_ptr<LabelList> Jump::getLabels() const
{
return labels;
}
CJump::CJump(ComparisonOp op, const std::shared_ptr<Exp> &left,
const std::shared_ptr<Exp> &right, const std::shared_ptr<Temporary::Label> &labelTrue,
const std::shared_ptr<Temporary::Label> &labelFalse)
: Stm(CJUMP), op(op), left(left), right(right),
labelTrue(labelTrue), labelFalse(labelFalse)
{
}
ComparisonOp CJump::getOp() const
{
return op;
}
const std::shared_ptr<Exp> CJump::getLeft() const
{
return left;
}
const std::shared_ptr<Exp> CJump::getRight() const
{
return right;
}
const std::shared_ptr<Temporary::Label> CJump::getLabelTrue() const
{
return labelTrue;
}
const std::shared_ptr<Temporary::Label> CJump::getLabelFalse() const
{
return labelFalse;
}
void CJump::setLabelTrue(const std::shared_ptr<Temporary::Label> &labelTrue)
{
CJump::labelTrue = labelTrue;
}
void CJump::setLabelFalse(const std::shared_ptr<Temporary::Label> &labelFalse)
{
CJump::labelFalse = labelFalse;
}
Move::Move(const std::shared_ptr<Exp> &dst,
const std::shared_ptr<Exp> &src) : Stm(MOVE), dst(dst), src(src)
{
}
const std::shared_ptr<Exp> Move::getDst() const
{
return dst;
}
const std::shared_ptr<Exp> Move::getSrc() const
{
return src;
}
Binop::Binop(ArithmeticOp op, const std::shared_ptr<Exp> &left, const std::shared_ptr<Exp> &right)
: Exp(BINOP), op(op), left(left), right(right)
{
}
ArithmeticOp Binop::getOp() const
{
return op;
}
const std::shared_ptr<Exp> Binop::getLeft() const
{
return left;
}
const std::shared_ptr<Exp> Binop::getRight() const
{
return right;
}
Mem::Mem(const std::shared_ptr<Exp> &exp) : Exp(MEM), exp(exp)
{
}
const std::shared_ptr<Exp> Mem::getExp() const
{
return exp;
}
Temp::Temp(const std::shared_ptr<Temporary::Temp> &temp)
: Exp(TEMP), temp(temp)
{
}
const std::shared_ptr<Temporary::Temp> Temp::getTemp() const
{
return temp;
}
Eseq::Eseq(const std::shared_ptr<Stm> &stm, const std::shared_ptr<Exp> &exp)
: Exp(ESEQ), stm(stm), exp(exp)
{
}
const std::shared_ptr<Stm> Eseq::getStm() const
{
return stm;
}
const std::shared_ptr<Exp> Eseq::getExp() const
{
return exp;
}
Name::Name(const std::shared_ptr<Temporary::Label> &label)
: Exp(NAME), label(label)
{
}
const std::shared_ptr<Temporary::Label> Name::getLabel() const
{
return label;
}
Const::Const(int constt) : Exp(CONST), constt(constt)
{
}
int Const::getConstt() const
{
return constt;
}
Call::Call(const std::shared_ptr<Exp> &fun, const std::shared_ptr<ExpList> &args)
: Exp(CALL), fun(fun), args(args)
{
}
const std::shared_ptr<Exp> Call::getFun() const
{
return fun;
}
const std::shared_ptr<ExpList> Call::getArgs() const
{
return args;
}
std::shared_ptr<ExpList>
makeExpList(std::shared_ptr<Exp> head, std::shared_ptr<ExpList> tail)
{
if (tail == nullptr)
{
tail = std::make_shared<ExpList>();
}
tail->push_front(head);
return tail;
}
std::shared_ptr<Stm> makeSeq(std::shared_ptr<Stm> left, std::shared_ptr<Stm> right)
{
return std::make_shared<Seq>(left, right);
}
std::shared_ptr<Stm> makeLabel(std::shared_ptr<Temporary::Label> label)
{
return std::make_shared<Label>(label);
}
std::shared_ptr<Stm>
makeJump(std::shared_ptr<Exp> exp, std::shared_ptr<LabelList> labels)
{
return std::make_shared<Jump>(exp, labels);
}
std::shared_ptr<Stm>
makeCJump(ComparisonOp op, std::shared_ptr<Exp> left, std::shared_ptr<Exp> right,
std::shared_ptr<Temporary::Label> labelTrue, std::shared_ptr<Temporary::Label> labelFalse)
{
return std::make_shared<CJump>(op, left, right, labelTrue, labelFalse);
}
std::shared_ptr<Stm> makeMove(std::shared_ptr<Exp> dst, std::shared_ptr<Exp> src)
{
return std::make_shared<Move>(dst, src);
}
std::shared_ptr<Stm> makeExp(std::shared_ptr<Exp> exp)
{
return exp;
}
std::shared_ptr<Exp>
makeBinop(ArithmeticOp op, std::shared_ptr<Exp> left, std::shared_ptr<Exp> right)
{
return std::make_shared<Binop>(op, left, right);
}
std::shared_ptr<Exp> makeMem(std::shared_ptr<Exp> exp)
{
return std::make_shared<Mem>(exp);
}
std::shared_ptr<Exp> makeTemp(std::shared_ptr<Temporary::Temp> temp)
{
return std::make_shared<Temp>(temp);
}
std::shared_ptr<Exp> makeEseq(std::shared_ptr<Stm> stm, std::shared_ptr<Exp> exp)
{
return std::make_shared<Eseq>(stm, exp);
}
std::shared_ptr<Exp> makeName(std::shared_ptr<Temporary::Label> label)
{
return std::make_shared<Name>(label);
}
std::shared_ptr<Exp> makeConst(int constt)
{
return std::make_shared<Const>(constt);
}
std::shared_ptr<Exp> makeCall(std::shared_ptr<Exp> fun, std::shared_ptr<ExpList> args)
{
return std::make_shared<Call>(fun, args);
}
}
| 22.671186 | 104 | 0.580144 | TheNetAdmin |
38e4d38d50750c10109432ede6945a804cd9906f | 7,047 | cc | C++ | test/distributions.cc | TeoGiane/bayesmix | 43182d61c3f332aefb832426cc9e8e2b2394bd68 | [
"BSD-3-Clause"
] | null | null | null | test/distributions.cc | TeoGiane/bayesmix | 43182d61c3f332aefb832426cc9e8e2b2394bd68 | [
"BSD-3-Clause"
] | null | null | null | test/distributions.cc | TeoGiane/bayesmix | 43182d61c3f332aefb832426cc9e8e2b2394bd68 | [
"BSD-3-Clause"
] | null | null | null | #include "src/utils/distributions.h"
#include <gtest/gtest.h>
#include <Eigen/Dense>
#include <stan/math/prim.hpp>
#include <vector>
#include "src/utils/rng.h"
TEST(mix_dist, 1) {
auto& rng = bayesmix::Rng::Instance().get();
int nclus = 5;
Eigen::VectorXd weights1 =
stan::math::dirichlet_rng(Eigen::VectorXd::Ones(nclus), rng);
Eigen::VectorXd means1(nclus);
Eigen::VectorXd sds1(nclus);
for (int i = 0; i < nclus; i++) {
means1(i) = stan::math::normal_rng(0, 2, rng);
sds1(i) = stan::math::uniform_rng(0.1, 2.0, rng);
}
int nclus2 = 10;
Eigen::VectorXd weights2 =
stan::math::dirichlet_rng(Eigen::VectorXd::Ones(nclus2), rng);
Eigen::VectorXd means2(nclus2);
Eigen::VectorXd sds2(nclus2);
for (int i = 0; i < nclus2; i++) {
means2(i) = stan::math::normal_rng(0, 2, rng);
sds2(i) = stan::math::uniform_rng(0.1, 2.0, rng);
}
double dist = bayesmix::gaussian_mixture_dist(means1, sds1, weights1, means2,
sds2, weights2);
ASSERT_GE(dist, 0.0);
}
TEST(mix_dist, 2) {
int nclus = 5;
auto& rng = bayesmix::Rng::Instance().get();
Eigen::VectorXd weights1 =
stan::math::dirichlet_rng(Eigen::VectorXd::Ones(nclus), rng);
Eigen::VectorXd means1(nclus);
Eigen::VectorXd sds1(nclus);
for (int i = 0; i < nclus; i++) {
means1(i) = stan::math::normal_rng(0, 2, rng);
sds1(i) = stan::math::uniform_rng(0.1, 2.0, rng);
}
double dist_to_self = bayesmix::gaussian_mixture_dist(
means1, sds1, weights1, means1, sds1, weights1);
ASSERT_DOUBLE_EQ(dist_to_self, 0.0);
}
TEST(student_t, squareform) {
Eigen::MatrixXd A = Eigen::MatrixXd::Random(5, 5);
Eigen::MatrixXd sigma =
(A * A.transpose()) + 1.0 * Eigen::MatrixXd::Identity(5, 5);
Eigen::VectorXd mean = Eigen::VectorXd::Zero(5);
double df = 15;
Eigen::MatrixXd sigma_inv = stan::math::inverse_spd(sigma);
Eigen::MatrixXd sigma_inv_chol =
Eigen::LLT<Eigen::MatrixXd>(sigma_inv).matrixU();
Eigen::VectorXd x = Eigen::VectorXd::Ones(5);
double sq1 = (x - mean).transpose() * sigma_inv * (x - mean);
double sq2 = (sigma_inv_chol * (x - mean)).squaredNorm();
ASSERT_DOUBLE_EQ(sq1, sq2);
}
TEST(student_t, optimized) {
Eigen::MatrixXd A = Eigen::MatrixXd::Random(5, 5);
Eigen::MatrixXd sigma =
(A * A.transpose()) + 1.0 * Eigen::MatrixXd::Identity(5, 5);
Eigen::VectorXd mean = Eigen::VectorXd::Zero(5);
double df = 15;
Eigen::VectorXd x = Eigen::VectorXd::Ones(5);
double lpdf_stan = stan::math::multi_student_t_lpdf(x, df, mean, sigma);
// std::cout << "lpdf_stan: " << lpdf_stan << std::endl;
Eigen::MatrixXd sigma_inv = stan::math::inverse_spd(sigma);
Eigen::MatrixXd sigma_inv_chol =
Eigen::LLT<Eigen::MatrixXd>(sigma_inv).matrixU();
Eigen::VectorXd diag = sigma_inv_chol.diagonal();
double logdet = 2 * log(diag.array()).sum();
double our_lpdf = bayesmix::multi_student_t_invscale_lpdf(
x, df, mean, sigma_inv_chol, logdet);
// std::cout << "our_lpdf: " << our_lpdf << std::endl;
ASSERT_LE(std::abs(our_lpdf - lpdf_stan), 0.001);
}
TEST(student_t, marginal) {
double var_scaling = 0.1;
double deg_free = 10;
int dim = 3;
Eigen::MatrixXd A = Eigen::MatrixXd::Random(dim, dim);
Eigen::MatrixXd scale_inv =
(A * A.transpose()) + 1.0 * Eigen::MatrixXd::Identity(dim, dim);
Eigen::MatrixXd sigma_n =
scale_inv * (var_scaling + 1) / (var_scaling * (deg_free - dim + 1));
double nu_n = deg_free - dim + 1;
Eigen::VectorXd datum = Eigen::VectorXd::Ones(dim);
Eigen::VectorXd mean = Eigen::VectorXd::Zero(dim);
Eigen::MatrixXd scale = stan::math::inverse_spd(scale_inv);
Eigen::MatrixXd scale_chol = Eigen::LLT<Eigen::MatrixXd>(scale).matrixU();
double coeff = (var_scaling + 1) / (var_scaling * (deg_free - dim + 1));
Eigen::MatrixXd scale_chol_n = scale_chol / std::sqrt(coeff);
Eigen::VectorXd diag = scale_chol_n.diagonal();
double logdet = 2 * log(diag.array()).sum();
double old_qf = (datum - mean).transpose() *
stan::math::inverse_spd(sigma_n) * (datum - mean);
double new_qf = (scale_chol_n * (datum - mean)).squaredNorm();
ASSERT_DOUBLE_EQ(old_qf, new_qf);
double old_lpdf =
stan::math::multi_student_t_lpdf(datum, nu_n, mean, sigma_n);
double new_lpdf = bayesmix::multi_student_t_invscale_lpdf(
datum, nu_n, mean, scale_chol_n, logdet);
ASSERT_LE(std::abs(old_lpdf - new_lpdf), 0.001);
}
TEST(mult_normal, lpdf_grid) {
int dim = 3;
Eigen::MatrixXd data = Eigen::MatrixXd::Random(20, dim);
Eigen::VectorXd mean = Eigen::ArrayXd::LinSpaced(dim, 0.0, 10.0);
Eigen::MatrixXd tmp = Eigen::MatrixXd::Random(dim + 1, dim);
Eigen::MatrixXd prec =
tmp.transpose() * tmp + Eigen::MatrixXd::Identity(dim, dim);
Eigen::MatrixXd prec_chol = Eigen::LLT<Eigen::MatrixXd>(prec).matrixU();
Eigen::VectorXd diag = prec_chol.diagonal();
double prec_logdet = 2 * log(diag.array()).sum();
Eigen::VectorXd lpdfs = bayesmix::multi_normal_prec_lpdf_grid(
data, mean, prec_chol, prec_logdet);
for (int i = 0; i < 20; i++) {
double curr = bayesmix::multi_normal_prec_lpdf(data.row(i), mean,
prec_chol, prec_logdet);
ASSERT_DOUBLE_EQ(curr, lpdfs(i));
}
}
TEST(mult_t, lpdf_grid) {
int dim = 3;
Eigen::MatrixXd data = Eigen::MatrixXd::Random(20, dim);
Eigen::VectorXd mean = Eigen::ArrayXd::LinSpaced(dim, 0.0, 10.0);
Eigen::MatrixXd tmp = Eigen::MatrixXd::Random(dim + 1, dim);
Eigen::MatrixXd invscale =
tmp.transpose() * tmp + Eigen::MatrixXd::Identity(dim, dim);
Eigen::MatrixXd invscale_chol =
Eigen::LLT<Eigen::MatrixXd>(invscale).matrixU();
Eigen::VectorXd diag = invscale_chol.diagonal();
double invscale_logdet = 2 * log(diag.array()).sum();
double df = 10;
Eigen::VectorXd lpdfs = bayesmix::multi_student_t_invscale_lpdf_grid(
data, df, mean, invscale_chol, invscale_logdet);
for (int i = 0; i < 20; i++) {
double curr = bayesmix::multi_student_t_invscale_lpdf(
data.row(i), df, mean, invscale_chol, invscale_logdet);
ASSERT_DOUBLE_EQ(curr, lpdfs(i));
}
}
TEST(lpdf_woodbury, 1) {
int dim = 1000;
int q = 10;
auto& rng = bayesmix::Rng::Instance().get();
Eigen::VectorXd mean(dim);
Eigen::VectorXd datum(dim);
Eigen::VectorXd sigma_diag(dim);
Eigen::MatrixXd lambda(dim, q);
for (size_t j = 0; j < dim; j++) {
mean[j] = stan::math::normal_rng(0, 1, rng);
sigma_diag[j] = stan::math::inv_gamma_rng(2.5, 1, rng);
for (size_t i = 0; i < q; i++) {
lambda(j, i) = stan::math::normal_rng(0, 1, rng);
}
}
Eigen::MatrixXd cov =
lambda * lambda.transpose() + Eigen::MatrixXd(sigma_diag.asDiagonal());
datum = stan::math::multi_normal_rng(mean, cov, rng);
double stan_lpdf = stan::math::multi_normal_lpdf(datum, mean, cov);
double our_lpdf =
bayesmix::multi_normal_lpdf_woodbury(datum, mean, sigma_diag, lambda);
ASSERT_LE(std::abs(stan_lpdf - our_lpdf), 1e-10);
}
| 31.600897 | 79 | 0.648503 | TeoGiane |
38eca32354dc5f2fd7f50db6ee608ecde759de72 | 1,409 | cpp | C++ | src/generator/SwitchGenerator.cpp | jaydee-io/bnf2c | 453b9dec8d94f32eebf9df1ab9578da0b5c70d84 | [
"BSD-4-Clause"
] | null | null | null | src/generator/SwitchGenerator.cpp | jaydee-io/bnf2c | 453b9dec8d94f32eebf9df1ab9578da0b5c70d84 | [
"BSD-4-Clause"
] | null | null | null | src/generator/SwitchGenerator.cpp | jaydee-io/bnf2c | 453b9dec8d94f32eebf9df1ab9578da0b5c70d84 | [
"BSD-4-Clause"
] | null | null | null | ////////////////////////////////////////////////////////////////////////////////
// BNF2C
//
// This file is distributed under the 4-clause Berkeley Software Distribution
// License. See LICENSE for details.
////////////////////////////////////////////////////////////////////////////////
#include "generator/SwitchGenerator.h"
////////////////////////////////////////////////////////////////////////////////
SwitchGenerator::SwitchGenerator(Indenter & indenter, const std::string & switchOnExpr, const std::string & defaultCode)
: m_indenter(indenter), m_switchOnExpr(switchOnExpr), m_defaultCode(defaultCode)
{
}
////////////////////////////////////////////////////////////////////////////////
void SwitchGenerator::printBeginTo(std::ostream & os) const
{
os << m_indenter << "switch(" << m_switchOnExpr << ")" << std::endl;
os << m_indenter << "{" << std::endl;
m_indenter++;
}
////////////////////////////////////////////////////////////////////////////////
void SwitchGenerator::printEndTo(std::ostream & os) const
{
printDefaultTo(os);
m_indenter--;
os << m_indenter << "}" << std::endl;
}
////////////////////////////////////////////////////////////////////////////////
void SwitchGenerator::printDefaultTo(std::ostream & os) const
{
if(!m_defaultCode.empty())
os << m_indenter << "default : " << m_defaultCode << std::endl;
}
| 38.081081 | 120 | 0.432221 | jaydee-io |
38ee1e8d1b6727b4cc07d51b0c016c9e57777c01 | 3,960 | cc | C++ | cc/animation/filter_animation_curve.cc | zealoussnow/chromium | fd8a8914ca0183f0add65ae55f04e287543c7d4a | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 14,668 | 2015-01-01T01:57:10.000Z | 2022-03-31T23:33:32.000Z | cc/animation/filter_animation_curve.cc | zealoussnow/chromium | fd8a8914ca0183f0add65ae55f04e287543c7d4a | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 113 | 2015-05-04T09:58:14.000Z | 2022-01-31T19:35:03.000Z | cc/animation/filter_animation_curve.cc | zealoussnow/chromium | fd8a8914ca0183f0add65ae55f04e287543c7d4a | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 5,941 | 2015-01-02T11:32:21.000Z | 2022-03-31T16:35:46.000Z | // Copyright 2021 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "cc/animation/filter_animation_curve.h"
#include "base/memory/ptr_util.h"
#include "ui/gfx/animation/keyframe/keyframed_animation_curve-inl.h"
namespace cc {
void FilterAnimationCurve::Tick(base::TimeDelta t,
int property_id,
gfx::KeyframeModel* keyframe_model) const {
if (target_) {
target_->OnFilterAnimated(GetValue(t), property_id, keyframe_model);
}
}
int FilterAnimationCurve::Type() const {
return gfx::AnimationCurve::FILTER;
}
const char* FilterAnimationCurve::TypeName() const {
return "Filter";
}
const FilterAnimationCurve* FilterAnimationCurve::ToFilterAnimationCurve(
const gfx::AnimationCurve* c) {
DCHECK_EQ(gfx::AnimationCurve::FILTER, c->Type());
return static_cast<const FilterAnimationCurve*>(c);
}
FilterAnimationCurve* FilterAnimationCurve::ToFilterAnimationCurve(
gfx::AnimationCurve* c) {
DCHECK_EQ(AnimationCurve::FILTER, c->Type());
return static_cast<FilterAnimationCurve*>(c);
}
std::unique_ptr<FilterKeyframe> FilterKeyframe::Create(
base::TimeDelta time,
const FilterOperations& value,
std::unique_ptr<gfx::TimingFunction> timing_function) {
return base::WrapUnique(
new FilterKeyframe(time, value, std::move(timing_function)));
}
FilterKeyframe::FilterKeyframe(
base::TimeDelta time,
const FilterOperations& value,
std::unique_ptr<gfx::TimingFunction> timing_function)
: Keyframe(time, std::move(timing_function)), value_(value) {}
FilterKeyframe::~FilterKeyframe() = default;
const FilterOperations& FilterKeyframe::Value() const {
return value_;
}
std::unique_ptr<FilterKeyframe> FilterKeyframe::Clone() const {
std::unique_ptr<gfx::TimingFunction> func;
if (timing_function())
func = timing_function()->Clone();
return FilterKeyframe::Create(Time(), Value(), std::move(func));
}
base::TimeDelta KeyframedFilterAnimationCurve::TickInterval() const {
return ComputeTickInterval(timing_function_, scaled_duration(), keyframes_);
}
void KeyframedFilterAnimationCurve::AddKeyframe(
std::unique_ptr<FilterKeyframe> keyframe) {
InsertKeyframe(std::move(keyframe), &keyframes_);
}
base::TimeDelta KeyframedFilterAnimationCurve::Duration() const {
return (keyframes_.back()->Time() - keyframes_.front()->Time()) *
scaled_duration();
}
std::unique_ptr<gfx::AnimationCurve> KeyframedFilterAnimationCurve::Clone()
const {
std::unique_ptr<KeyframedFilterAnimationCurve> to_return =
KeyframedFilterAnimationCurve::Create();
for (const auto& keyframe : keyframes_)
to_return->AddKeyframe(keyframe->Clone());
if (timing_function_)
to_return->SetTimingFunction(timing_function_->Clone());
to_return->set_scaled_duration(scaled_duration());
return std::move(to_return);
}
FilterOperations KeyframedFilterAnimationCurve::GetValue(
base::TimeDelta t) const {
if (t <= (keyframes_.front()->Time() * scaled_duration()))
return keyframes_.front()->Value();
if (t >= (keyframes_.back()->Time() * scaled_duration()))
return keyframes_.back()->Value();
t = TransformedAnimationTime(keyframes_, timing_function_, scaled_duration(),
t);
size_t i = GetActiveKeyframe(keyframes_, scaled_duration(), t);
double progress =
TransformedKeyframeProgress(keyframes_, scaled_duration(), t, i);
return keyframes_[i + 1]->Value().Blend(keyframes_[i]->Value(), progress);
}
std::unique_ptr<KeyframedFilterAnimationCurve>
KeyframedFilterAnimationCurve::Create() {
return base::WrapUnique(new KeyframedFilterAnimationCurve);
}
KeyframedFilterAnimationCurve::KeyframedFilterAnimationCurve()
: scaled_duration_(1.0) {}
KeyframedFilterAnimationCurve::~KeyframedFilterAnimationCurve() = default;
} // namespace cc
| 31.935484 | 79 | 0.732576 | zealoussnow |
38eec9b7f9f3239a097967416fa7f2d6fe070a22 | 10,011 | cpp | C++ | fluxes.cpp | rocketcrush/suchsolver | 18ffeaf13aa0c549f7081acc943bc1257e50c656 | [
"MIT"
] | 1 | 2017-11-07T17:45:52.000Z | 2017-11-07T17:45:52.000Z | fluxes.cpp | rocketcrush/suchsolver | 18ffeaf13aa0c549f7081acc943bc1257e50c656 | [
"MIT"
] | null | null | null | fluxes.cpp | rocketcrush/suchsolver | 18ffeaf13aa0c549f7081acc943bc1257e50c656 | [
"MIT"
] | null | null | null | #include <iostream>
#include <vector>
#include <cmath>
#include <data.h>
#include <constants.h>
#include <mesh.h>
#include <initialconditions.h>
using namespace std;
extern vector<node> n;
extern vector<element> e;
extern vector<type1> tp1;
extern vector<type2> tp2;
extern vector<type3> tp3;
void W(int i)
{
tp1[i].W[0] = e[i].ro;
tp1[i].W[1] = e[i].ro * e[i].u;
tp1[i].W[2] = e[i].ro * e[i].v;
tp1[i].W[3] = e[i].ro * e[i].et;
}
void Fc1(int i)
{
double fenergyp, fenergym, fmassp, fmassm;
double ML,MR;
double VL, VR; //contravariant velocity of right state and left state
double cL, cR; //speed of sound
double roL, uL, vL, pL, etL;
double roR, uR, vR, pR, etR;
double vnx, vny;
double Fp[4], Fm[4];
roL = e[i].ro;
uL = e[i].u;
vL = e[i].v;
pL = e[i].p;
etL = e[i].et;
for(int d = 0; d < 4; d++)
{
roR = e[e[i].neigh[d]].ro;
uR = e[e[i].neigh[d]].u;
vR = e[e[i].neigh[d]].v;
pR = e[e[i].neigh[d]].p;
etR = e[e[i].neigh[d]].et;
vnx = e[i].vn[d].x;
vny = e[i].vn[d].y;
VL = vnx * uL + vny * vL;
VR = vnx * uR + vny * vR;
cL = sqrt(gama * pL / roL);
cR = sqrt(gama * pR / roR);
ML = VL / cL;
MR = VR / cR;
if(ML >= 1.)
{
Fp[0] = roL * VL;
Fp[1] = roL * uL * VL + vnx * pL;
Fp[2] = roL * vL * VL + vny * pL;
Fp[3] = roL * (etL + pL / roL) * VL;
}
else if(abs(ML) < 1)
{
fmassp = roL * cL * pow((ML + 1), 2) / 4.;
fenergyp = fmassp * ((pow(((gama - 1.) * VL + 2. * cL),2) / (2. * (pow(gama, 2) - 1.))) + (pow(uL, 2) + pow(vL, 2) - pow(VL, 2)) / 2.);
Fp[0] = fmassp;
Fp[1] = fmassp * (vnx * (-VL + 2. * cL) / gama + uL);
Fp[2] = fmassp * (vny * (-VL + 2. * cL) / gama + vL);
Fp[3] = fenergyp;
}
else
{
Fp[0] = 0;
Fp[1] = 0;
Fp[2] = 0;
Fp[3] = 0;
}
if(MR >= 1.)
{
Fm[0] = 0;
Fm[1] = 0;
Fm[2] = 0;
Fm[3] = 0;
}
else if(abs(MR) < 1)
{
fmassm = -roR * cR * pow((MR - 1), 2) / 4.;
fenergym = fmassm * ((pow(((gama - 1.) * VR - 2. * cR),2) / (2. * (pow(gama, 2) - 1.))) + (pow(uR, 2) + pow(vR, 2) - pow(VR, 2)) / 2.);
Fm[0] = fmassm;
Fm[1] = fmassm * (vnx * (-VR - 2. * cR) / gama + uR);
Fm[2] = fmassm * (vny * (-VR - 2. * cR) / gama + vR);
Fm[3] = fenergym;
}
else
{
Fm[0] = roR * VR;
Fm[1] = roR * uR * VR + vnx * pR;
Fm[2] = roR * vR * VR + vny * pR;
Fm[3] = roR * (etR + pR / roR) * VR;
}
for(int a = 0; a < 4; a++)
tp1[i].Fc[d][a] = Fp[a] + Fm[a];
}
}
void W3(int i, int t)
{
if(t == 0)
{
tp3[i].W[0][t] = e[i].roprev;
tp3[i].W[1][t] = e[i].roprev * e[i].uprev;
tp3[i].W[2][t] = e[i].roprev * e[i].vprev;
tp3[i].W[3][t] = e[i].roprev * e[i].etprev;
}
else if(t == 1)
{
tp3[i].W[0][t] = e[i].ro;
tp3[i].W[1][t] = e[i].ro * e[i].u;
tp3[i].W[2][t] = e[i].ro * e[i].v;
tp3[i].W[3][t] = e[i].ro * e[i].et;
}
else if(t == 2)
{
tp3[i].W[0][t] = e[i].ronew;
tp3[i].W[1][t] = e[i].ronew * e[i].unew;
tp3[i].W[2][t] = e[i].ronew * e[i].vnew;
tp3[i].W[3][t] = e[i].ronew * e[i].etnew;
}
}
void Fc1new(int i)
{
double fenergyp, fenergym, fmassp, fmassm;
double ML,MR;
double VL, VR; //contravariant velocity of right state and left state
double cL, cR; //speed of sound
double roL, uL, vL, pL, etL;
double roR, uR, vR, pR, etR;
double vnx, vny;
double Fp[4], Fm[4];
roL = e[i].ronew;
uL = e[i].unew;
vL = e[i].vnew;
pL = e[i].pnew;
etL = e[i].etnew;
for(int d = 0; d < 4; d++)
{
roR = e[e[i].neigh[d]].ro;
uR = e[e[i].neigh[d]].u;
vR = e[e[i].neigh[d]].v;
pR = e[e[i].neigh[d]].p;
etR = e[e[i].neigh[d]].et;
vnx = e[i].vn[d].x;
vny = e[i].vn[d].y;
VL = vnx * uL + vny * vL;
VR = vnx * uR + vny * vR;
cL = sqrt(gama * pL / roL);
cR = sqrt(gama * pR / roR);
ML = VL / cL;
MR = VR / cR;
if(ML >= 1.)
{
Fp[0] = roL * VL;
Fp[1] = roL * uL * VL + vnx * pL;
Fp[2] = roL * vL * VL + vny * pL;
Fp[3] = roL * (etL + pL / roL) * VL;
}
else if(abs(ML) < 1)
{
fmassp = roL * cL * pow((ML + 1), 2) / 4.;
fenergyp = fmassp * ((pow(((gama - 1.) * VL + 2. * cL),2) / (2. * (pow(gama, 2) - 1.))) + (pow(uL, 2) + pow(vL, 2) - pow(VL, 2)) / 2.);
Fp[0] = fmassp;
Fp[1] = fmassp * (vnx * (-VL + 2. * cL) / gama + uL);
Fp[2] = fmassp * (vny * (-VL + 2. * cL) / gama + vL);
Fp[3] = fenergyp;
}
else
{
Fp[0] = 0;
Fp[1] = 0;
Fp[2] = 0;
Fp[3] = 0;
}
if(MR >= 1.)
{
Fm[0] = 0;
Fm[1] = 0;
Fm[2] = 0;
Fm[3] = 0;
}
else if(abs(MR) < 1)
{
fmassm = -roR * cR * pow((MR - 1), 2) / 4.;
fenergym = fmassm * ((pow(((gama - 1.) * VR - 2. * cR),2) / (2. * (pow(gama, 2) - 1.))) + (pow(uR, 2) + pow(vR, 2) - pow(VR, 2)) / 2.);
Fm[0] = fmassm;
Fm[1] = fmassm * (vnx * (-VR - 2. * cR) / gama + uR);
Fm[2] = fmassm * (vny * (-VR - 2. * cR) / gama + vR);
Fm[3] = fenergym;
}
else
{
Fm[0] = roR * VR;
Fm[1] = roR * uR * VR + vnx * pR;
Fm[2] = roR * vR * VR + vny * pR;
Fm[3] = roR * (etR + pR / roR) * VR;
}
for(int a = 0; a < 4; a++)
tp1[i].Fc[d][a] = Fp[a] + Fm[a];
}
}
void Fc1new1(int i)
{
double fenergyp, fenergym, fmassp, fmassm;
double ML,MR;
double VL, VR; //contravariant velocity of right state and left state
double cL, cR; //speed of sound
double roL, uL, vL, pL, etL;
double roR, uR, vR, pR, etR;
double vnx, vny;
double Fp[4], Fm[4];
roL = e[i].ronew;
uL = e[i].unew;
vL = e[i].vnew;
pL = e[i].pnew;
etL = e[i].etnew;
for(int d = 0; d < 4; d++)
{
roR = e[e[i].neigh[d]].ronew;
uR = e[e[i].neigh[d]].unew;
vR = e[e[i].neigh[d]].vnew;
pR = e[e[i].neigh[d]].pnew;
etR = e[e[i].neigh[d]].etnew;
vnx = e[i].vn[d].x;
vny = e[i].vn[d].y;
VL = vnx * uL + vny * vL;
VR = vnx * uR + vny * vR;
cL = sqrt(gama * pL / roL);
cR = sqrt(gama * pR / roR);
ML = VL / cL;
MR = VR / cR;
if(ML >= 1.)
{
Fp[0] = roL * VL;
Fp[1] = roL * uL * VL + vnx * pL;
Fp[2] = roL * vL * VL + vny * pL;
Fp[3] = roL * (etL + pL / roL) * VL;
}
else if(abs(ML) < 1)
{
fmassp = roL * cL * pow((ML + 1), 2) / 4.;
fenergyp = fmassp * ((pow(((gama - 1.) * VL + 2. * cL),2) / (2. * (pow(gama, 2) - 1.))) + (pow(uL, 2) + pow(vL, 2) - pow(VL, 2)) / 2.);
Fp[0] = fmassp;
Fp[1] = fmassp * (vnx * (-VL + 2. * cL) / gama + uL);
Fp[2] = fmassp * (vny * (-VL + 2. * cL) / gama + vL);
Fp[3] = fenergyp;
}
else
{
Fp[0] = 0;
Fp[1] = 0;
Fp[2] = 0;
Fp[3] = 0;
}
if(MR >= 1.)
{
Fm[0] = 0;
Fm[1] = 0;
Fm[2] = 0;
Fm[3] = 0;
}
else if(abs(MR) < 1)
{
fmassm = -roR * cR * pow((MR - 1), 2) / 4.;
fenergym = fmassm * ((pow(((gama - 1.) * VR - 2. * cR),2) / (2. * (pow(gama, 2) - 1.))) + (pow(uR, 2) + pow(vR, 2) - pow(VR, 2)) / 2.);
Fm[0] = fmassm;
Fm[1] = fmassm * (vnx * (-VR - 2. * cR) / gama + uR);
Fm[2] = fmassm * (vny * (-VR - 2. * cR) / gama + vR);
Fm[3] = fenergym;
}
else
{
Fm[0] = roR * VR;
Fm[1] = roR * uR * VR + vnx * pR;
Fm[2] = roR * vR * VR + vny * pR;
Fm[3] = roR * (etR + pR / roR) * VR;
}
for(int a = 0; a < 4; a++)
tp1[i].Fc[d][a] = Fp[a] + Fm[a];
}
}
void Fv1(int i)
{
double ro, u, v, p, et, mu;
double vnx, vny;
double gradTx, gradTy, gradux, graduy, gradvx, gradvy, qx, qy;
double taoxx, taoxy, taoyy;
for(int d = 0; d < 4; d++)
{
vnx = e[i].vn[d].x;
vny = e[i].vn[d].y;
ro = (e[i].ro + e[e[i].neigh[d]].ro) / 2.;
u = (e[i].u + e[e[i].neigh[d]].u) / 2.;
v = (e[i].v + e[e[i].neigh[d]].v) / 2.;
p = (e[i].ro + e[e[i].neigh[d]].p) / 2.;
et = (e[i].et + e[e[i].neigh[d]].et) / 2.;
mu = (e[i].mu + e[e[i].neigh[d]].mu) / 2.;
gradux = (e[e[i].neigh[d]].u - e[i].u) / e[i].rm[d] * e[i].r[d].x / e[i].rm[d];
graduy = (e[e[i].neigh[d]].u - e[i].u) / e[i].rm[d] * e[i].r[d].y / e[i].rm[d];
gradvx = (e[e[i].neigh[d]].v - e[i].v) / e[i].rm[d] * e[i].r[d].x / e[i].rm[d];
gradvy = (e[e[i].neigh[d]].v - e[i].v) / e[i].rm[d] * e[i].r[d].y / e[i].rm[d];
gradTx = (e[e[i].neigh[d]].T - e[i].T) / e[i].rm[d] * e[i].r[d].x / e[i].rm[d];
gradTy = (e[e[i].neigh[d]].T - e[i].T) / e[i].rm[d] * e[i].r[d].y / e[i].rm[d];
taoxx = mu / Reinf * (4. / 3. * gradux - 2. / 3. * gradvy);
taoxy = mu / Reinf * (graduy + gradvx);
taoyy = mu / Reinf * (4. / 3. * gradvy - 2. / 3. * gradux);
qx = mu / (Reinf * Pr * (gama - 1.) * pow(Machinf,2)) * gradTx;
qy = mu / (Reinf * Pr * (gama - 1.) * pow(Machinf,2)) * gradTy;
tp2[i].Fv[d][0] = 0;
tp2[i].Fv[d][1] = vnx * taoxx + vny * taoxy;
tp2[i].Fv[d][2] = vnx * taoxy + vny * taoyy;
tp2[i].Fv[d][3] = vnx * (u * taoxx + v * taoxy + qx) + vny * (u * taoxy + v * taoyy + qy);
}
}
| 18.888679 | 145 | 0.386775 | rocketcrush |
38ef2670a4f65f2b81725468725d81d53a46c3c9 | 392 | hpp | C++ | 08_pass-by-reference/include/vehicle.hpp | JuliusDiestra/cpp-sandbox | 6fa3bcb2a284e58136168e1952a8a54621232621 | [
"MIT"
] | null | null | null | 08_pass-by-reference/include/vehicle.hpp | JuliusDiestra/cpp-sandbox | 6fa3bcb2a284e58136168e1952a8a54621232621 | [
"MIT"
] | null | null | null | 08_pass-by-reference/include/vehicle.hpp | JuliusDiestra/cpp-sandbox | 6fa3bcb2a284e58136168e1952a8a54621232621 | [
"MIT"
] | null | null | null | #ifndef TOKEN_VEHICLE_H_
#define TOKEN_VEHICLE_H_
#include <iostream>
#include <memory>
class Vehicle
{
public:
Vehicle();
float GetVelocity();
float GetAcceleration();
void SetVelocity(float velocity_);
void SetAcceleration(float acceleration_);
private:
float velocity_;
float acceleration_;
};
#endif // TOKEN_VEHICLE_H_
| 17.818182 | 50 | 0.660714 | JuliusDiestra |
38f3ed6d74edc2ff689deff592ba87296c98c9f6 | 3,771 | cpp | C++ | libs/assign/v2/speed/tools.cpp | rogard/assign_v2 | 8735f57177dbee57514b4e80c498dd4b89f845e5 | [
"BSL-1.0"
] | null | null | null | libs/assign/v2/speed/tools.cpp | rogard/assign_v2 | 8735f57177dbee57514b4e80c498dd4b89f845e5 | [
"BSL-1.0"
] | null | null | null | libs/assign/v2/speed/tools.cpp | rogard/assign_v2 | 8735f57177dbee57514b4e80c498dd4b89f845e5 | [
"BSL-1.0"
] | null | null | null | ///////////////////////////////////////////////////////////////////////////////
// Copyright 2010 Manuel Peinado Gallego //
// Distributed under the Boost Software License, Version 1.0. (See //
// accompanying file LICENSE_1_0.txt or copy at //
// http://www.boost.org/LICENSE_1_0.txt) //
///////////////////////////////////////////////////////////////////////////////
#include <ctime>
#include <algorithm>
#include <string>
#include <vector>
#include <iterator>
#include <cstdlib>
#include <boost/bind.hpp>
#include <libs/assign/v2/speed/tools.h>
// http://code.google.com/p/truffle/source/browse/trunk/include/mpg/TimeIt.h
// http://code.google.com/p/truffle/source/browse/trunk/include/mpg/Random.h
// http://www.eternallyconfuzzled.com/arts/jsw_art_rand.aspx
inline double uniform_deviate ( int seed )
{
return seed * ( 1.0 / ( RAND_MAX + 1.0 ) );
}
inline int rand(int M, int N) // Range [M..N)
{
return int(M + uniform_deviate(std::rand()) * (N - M));
}
char rand_letter()
{
return char(rand('a', 'z' + 1));
}
std::string rand_str(int len)
{
std::string result;
std::generate_n(std::back_inserter(result), len, &rand_letter);
return result;
}
std::vector<int>
rand_vec(int max_n)
{
std::vector<int> result(
(std::size_t)mpg::rand(1, max_n)
);
std::generate(
result.begin(),
result.end(),
boost::bind(
&mpg::rand,
0,
20
)
);
return result;
}
namespace mpg
{
namespace detail
{
double clock_diff_to_sec(long clock_diff)
{
return double(clock_diff) / CLOCKS_PER_SEC;
}
template<class Proc>
double time_it_impl(Proc proc, int N) // returns time in microseconds
{
std::clock_t const start = std::clock();
for(int i = 0; i < N; ++i)
proc();
std::clock_t const end = std::clock();
if(clock_diff_to_sec(end - start) < .2)
return time_it_impl(proc, N * 5);
return clock_diff_to_sec(end - start) * (1e6 / N);
}
template<class Proc, class Result>
double time_it_impl(Proc proc, Result & result, int N) // returns time in microseconds
{
std::clock_t const start = std::clock();
for(int i = 0; i < N; ++i)
result = proc();
std::clock_t const end = std::clock();
if(clock_diff_to_sec(end - start) < .2)
return time_it_impl(proc, result, N * 5);
return clock_diff_to_sec(end - start) * (1e6 / N);
}
}
template<class Proc>
double time_it(Proc proc) // returns time in microseconds
{
return detail::time_it_impl(proc, 1);
}
template<class Proc, class Result>
double time_it(Proc proc, Result & result) // returns time in microseconds
{
return detail::time_it_impl(proc, result, 1);
}
}
namespace mpg
{
inline double rand_dbl()
{
return double(::rand()) / RAND_MAX;
}
inline double rand_dbl(double M, double N)
{
return M + rand_dbl() * (N - M);
}
// http://www.eternallyconfuzzled.com/arts/jsw_art_rand.aspx
inline int rand(int M, int N) // Range (M..N)
{
return int(M + std::rand() * ( 1.0 / ( RAND_MAX + 1.0 )) * (N - M));
}
inline char rand_letter()
{
return char(rand('a', 'z' + 1));
}
inline std::string rand_str(int len)
{
std::string result;
result.reserve(len);
for(int i = 0; i < len; ++i)
result.push_back(rand_letter());
return result;
}
}
| 27.129496 | 94 | 0.529568 | rogard |
38f659577afa98b56ac96286e15c1d72bdd86810 | 475 | cpp | C++ | src/LibCraft/renderEngine/ibo.cpp | Kenny38GH/Test | 24c0277de8f98a3b0b3b8a90a300a321a485684c | [
"MIT"
] | 1 | 2021-11-24T16:49:48.000Z | 2021-11-24T16:49:48.000Z | src/LibCraft/renderEngine/ibo.cpp | leodlplq/IMACraft | 5fec1729238e7e428bd39543dfd1fad521e16047 | [
"MIT"
] | null | null | null | src/LibCraft/renderEngine/ibo.cpp | leodlplq/IMACraft | 5fec1729238e7e428bd39543dfd1fad521e16047 | [
"MIT"
] | null | null | null | //
// Created by leodlplq on 18/11/2021.
//
#include "LibCraft/renderEngine/include/ibo.hpp"
ibo::ibo(GLuint *vertices, GLsizeiptr size) {
glGenBuffers(1, &_id);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, _id);
glBufferData(GL_ELEMENT_ARRAY_BUFFER, size, vertices, GL_STATIC_DRAW);
}
void ibo::bind() {
glBindBuffer(GL_ARRAY_BUFFER, _id);
}
void ibo::unbind() {
glBindBuffer(GL_ARRAY_BUFFER, 0);
}
void ibo::deleteIbo() {
glDeleteBuffers(1, &_id);
}
| 19 | 74 | 0.701053 | Kenny38GH |
38fa76819bdbf20fe7f3616974344595930a6254 | 3,293 | hpp | C++ | src/libsnw_event/future.hpp | Sojourn/snw | e2c5a2bfbf5ad721c01a681c4e094aa35f8c010f | [
"MIT"
] | null | null | null | src/libsnw_event/future.hpp | Sojourn/snw | e2c5a2bfbf5ad721c01a681c4e094aa35f8c010f | [
"MIT"
] | null | null | null | src/libsnw_event/future.hpp | Sojourn/snw | e2c5a2bfbf5ad721c01a681c4e094aa35f8c010f | [
"MIT"
] | null | null | null | #include <cassert>
template<typename T>
snw::future<T>::future()
: promise_(nullptr)
, state_(state::broken)
{
}
template<typename T>
snw::future<T>::future(promise<T>* promise)
: promise_(promise)
, state_(state::waiting)
{
}
template<typename T>
snw::future<T>::future(T value)
: promise_(nullptr)
, state_(state::ready)
{
value_.create(std::move(value));
}
template<typename T>
snw::future<T>::future(future&& other)
: promise_(other.promise_)
, state_(other.state_)
{
if(promise_) {
promise_->future_ = this;
}
if(state_ == state::ready) {
value_.create(std::move(*other.value_));
other.value_.destroy();
}
other.promise_ = nullptr;
other.state_ = state::broken;
}
template<typename T>
snw::future<T>::~future() {
if(promise_) {
promise_->future_ = nullptr;
}
if(state_ == state::ready) {
value_.destroy();
}
}
template<typename T>
snw::future<T>& snw::future<T>::operator=(future&& rhs) {
if(this != &rhs) {
// unlink
if(promise_) {
promise_->future_ = nullptr;
}
// clear value
if(state_ == state::ready) {
value_.destroy();
}
// relink
if((promise_ = rhs.promise_)) {
promise_->future_ = this;
}
rhs.promise_ = nullptr;
// take value
if((state_ = rhs.state_) == state::ready) {
value_.create(std::move(*rhs.value_));
rhs.value_.destroy();
}
rhs.state_ = state::broken;
}
return *this;
}
template<typename T>
bool snw::future<T>::is_broken() const {
return state_ == state::broken;
}
template<typename T>
bool snw::future<T>::is_waiting() const {
return state_ == state::waiting;
}
template<typename T>
bool snw::future<T>::has_value() const {
return state_ == state::ready;
}
template<typename T>
T& snw::future<T>::value() {
assert(has_value());
return *value_;
}
template<typename T>
const T& snw::future<T>::value() const {
assert(has_value());
return *value_;
}
template<typename T>
snw::promise<T>::promise()
: future_(nullptr)
{
}
template<typename T>
snw::promise<T>::promise(future<T>* future)
: future_(future)
{
}
template<typename T>
snw::promise<T>::promise(promise&& other)
: future_(other.future_)
{
if(future_) {
other.future_ = nullptr;
future_->promise_ = this;
}
}
template<typename T>
snw::promise<T>::~promise() {
if(future_) {
future_->promise_ = nullptr;
}
}
template<typename T>
snw::promise<T>& snw::promise<T>::operator=(promise<T>&& rhs) {
if(this != &rhs) {
// unlink
if(future_) {
future_->promise_ = nullptr;
}
// relink
if((future_ = rhs.future_)) {
rhs.future_ = nullptr;
future_->promise_ = this;
}
}
return *this;
}
template<typename T>
void snw::promise<T>::set_value(T value) {
if(future_) {
assert(!future_->has_value());
future_->value_.create(std::move(value));
future_->state_ = future<T>::state::ready;
}
}
template<typename T>
snw::future<T> snw::make_ready_future(T value) {
return snw::future<T>(value);
}
| 19.485207 | 63 | 0.57607 | Sojourn |
38fd5226cfb72ff89b1201543e35bcbe39bbbdd8 | 1,051 | cpp | C++ | test/ParamResponseTest.cpp | DronMDF/zond | c882400aff5f41569e6deee609ddbe7b2b1bc378 | [
"MIT"
] | 5 | 2018-11-14T19:46:49.000Z | 2022-01-08T09:00:45.000Z | test/ParamResponseTest.cpp | DronMDF/zond | c882400aff5f41569e6deee609ddbe7b2b1bc378 | [
"MIT"
] | 223 | 2018-11-14T19:20:27.000Z | 2019-02-13T11:53:23.000Z | test/ParamResponseTest.cpp | DronMDF/zond | c882400aff5f41569e6deee609ddbe7b2b1bc378 | [
"MIT"
] | null | null | null | // Copyright (c) 2018-2019 Andrey Valyaev <dron.valyaev@gmail.com>
//
// This software may be modified and distributed under the terms
// of the MIT license. See the LICENSE file for details.
#include "ParamResponseTest.h"
#include "../http/ParamResponse.h"
#include "../http/RawResponse.h"
using namespace std;
using namespace oout;
class ResponseRepr final : public Representation {
public:
explicit ResponseRepr(const shared_ptr<const Response> &response)
: response(response)
{
}
string asString() const override
{
return response->asString();
}
private:
const shared_ptr<const Response> response;
};
ParamResponseTest::ParamResponseTest()
: TestSuite(
make_shared<TestNamed>(
"ParamResponse can specify multiply params",
make_shared<TestContainText>(
make_shared<ResponseRepr>(
make_shared<ParamResponse>(
make_shared<RawResponse>(
"HTTP/1.1 200 Ok\r\n"
"\r\n"
),
"first", "1",
"second", "2"
)
),
"first: 1\r\n"
"second: 2\r\n"
"\r\n"
)
)
)
{
}
| 20.211538 | 66 | 0.671741 | DronMDF |
ac0181486adf913dc29ed5cb8a5939bca47e8b2d | 838 | hpp | C++ | meta/include/mgs/meta/concepts/input_range.hpp | theodelrieu/mgs | 965a95e3d539447cc482e915f9c44b3439168a4e | [
"BSL-1.0"
] | 24 | 2020-07-01T13:45:50.000Z | 2021-11-04T19:54:47.000Z | meta/include/mgs/meta/concepts/input_range.hpp | theodelrieu/mgs | 965a95e3d539447cc482e915f9c44b3439168a4e | [
"BSL-1.0"
] | null | null | null | meta/include/mgs/meta/concepts/input_range.hpp | theodelrieu/mgs | 965a95e3d539447cc482e915f9c44b3439168a4e | [
"BSL-1.0"
] | null | null | null | #pragma once
#include <type_traits>
#include <mgs/meta/concepts/input_iterator.hpp>
#include <mgs/meta/concepts/range.hpp>
#include <mgs/meta/detected.hpp>
#include <mgs/meta/iterator_t.hpp>
namespace mgs
{
namespace meta
{
template <typename T>
struct is_input_range
{
private:
using Iterator = meta::detected_t<iterator_t, T>;
public:
using requirements = std::tuple<is_range<T>, is_input_iterator<Iterator>>;
static constexpr bool value =
is_range<T>::value && is_input_iterator<Iterator>::value;
static constexpr int trigger_static_asserts()
{
static_assert(value, "T does not model meta::input_range");
return 1;
}
};
template <typename T>
constexpr auto is_input_range_v = is_input_range<T>::value;
template <typename T, typename = std::enable_if_t<is_input_range_v<T>>>
using input_range = T;
}
}
| 20.95 | 76 | 0.74105 | theodelrieu |
ac0483ad1b7c2d1c61cf0a68a621281d07889c6e | 194 | cpp | C++ | OCT18B/CHSERVE.cpp | Chhekur/codechef-solutions | 14ca902ea693139de13ffe5b9f602447bf34b79f | [
"MIT"
] | 1 | 2019-03-25T14:14:47.000Z | 2019-03-25T14:14:47.000Z | OCT18B/CHSERVE.cpp | Chhekur/codechef-solutions | 14ca902ea693139de13ffe5b9f602447bf34b79f | [
"MIT"
] | null | null | null | OCT18B/CHSERVE.cpp | Chhekur/codechef-solutions | 14ca902ea693139de13ffe5b9f602447bf34b79f | [
"MIT"
] | null | null | null | #include<iostream>
using namespace std;
int main(void){
int t;cin>>t;
while(t--){
long a,b,c;cin>>a>>b>>c;
long e = (a + b) / c;
if(e % 2 == 0)cout<<"CHEF\n";
else cout<<"COOK\n";
}
} | 17.636364 | 31 | 0.541237 | Chhekur |
f19b1b83a103bd226c6c7a5a11a8ce8ec21649fe | 920 | cc | C++ | third_party/blink/renderer/core/frame/navigator_ua.cc | mghgroup/Glide-Browser | 6a4c1eaa6632ec55014fee87781c6bbbb92a2af5 | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | null | null | null | third_party/blink/renderer/core/frame/navigator_ua.cc | mghgroup/Glide-Browser | 6a4c1eaa6632ec55014fee87781c6bbbb92a2af5 | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | null | null | null | third_party/blink/renderer/core/frame/navigator_ua.cc | mghgroup/Glide-Browser | 6a4c1eaa6632ec55014fee87781c6bbbb92a2af5 | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 2 | 2021-01-05T23:43:46.000Z | 2021-01-07T23:36:34.000Z | // Copyright 2019 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "third_party/blink/renderer/core/frame/navigator_ua.h"
namespace blink {
NavigatorUAData* NavigatorUA::userAgentData() {
NavigatorUAData* ua_data =
MakeGarbageCollected<NavigatorUAData>(GetUAExecutionContext());
UserAgentMetadata metadata = GetUserAgentMetadata();
ua_data->SetBrandVersionList(metadata.brand_version_list);
ua_data->SetMobile(metadata.mobile);
ua_data->SetPlatform(String::FromUTF8(metadata.platform),
String::FromUTF8(metadata.platform_version));
ua_data->SetArchitecture(String::FromUTF8(metadata.architecture));
ua_data->SetModel(String::FromUTF8(metadata.model));
ua_data->SetUAFullVersion(String::FromUTF8(metadata.full_version));
return ua_data;
}
} // namespace blink
| 35.384615 | 73 | 0.765217 | mghgroup |
f19f54632a9a42642580b073ddb19973492bd572 | 5,582 | cpp | C++ | Source/Gui3D-master/Gui3DPanel.cpp | shanefarris/CoreGameEngine | 5bef275d1cd4e84aa059f2f4f9e97bfa2414d000 | [
"MIT"
] | 3 | 2019-04-12T15:22:53.000Z | 2022-01-05T02:59:56.000Z | Source/Gui3D-master/Gui3DPanel.cpp | shanefarris/CoreGameEngine | 5bef275d1cd4e84aa059f2f4f9e97bfa2414d000 | [
"MIT"
] | null | null | null | Source/Gui3D-master/Gui3DPanel.cpp | shanefarris/CoreGameEngine | 5bef275d1cd4e84aa059f2f4f9e97bfa2414d000 | [
"MIT"
] | 2 | 2019-04-10T22:46:21.000Z | 2020-05-27T16:21:37.000Z | /*
Gui3D
-------
Copyright (c) 2012 Valentin Frechaud
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
*/
#include "Gui3DPanel.h"
#include "Gui3DPanelColors.h"
namespace Gui3D
{
using namespace std;
Panel::Panel(Gui3D* gui,
Ogre::SceneManager* sceneMgr,
const Ogre::Vector2& size,
Ogre::Real distanceFromPanelToInteractWith,
const Ogre::String& atlasName,
const Ogre::String& name)
: PanelContainer(gui, size),
mDistanceFromPanelToInteractWith(distanceFromPanelToInteractWith),
mNode(NULLPTR), mPanelCameraNode(NULLPTR), mScreenRenderable(NULLPTR)
{
mScreenRenderable =
gui->createScreenRenderable(Ogre::Vector2(mSize.x/100, mSize.y/100), atlasName, name);
mNode = sceneMgr->getRootSceneNode()->createChildSceneNode();
mNode->attachObject(mScreenRenderable);
mPanelCameraNode = mNode->createChildSceneNode();
mPanelCameraNode->setPosition(-1, 0, 7);
mPanelCameraNode->lookAt(mNode->getPosition(), Ogre::Node::TS_PARENT);
mGUILayer = gui->createLayer(mScreenRenderable, name);
mBackground = mGUILayer->createRectangle(0, 0, mSize.x, mSize.y);
if (getPanelColors()->panelBackgroundSpriteName.length() == 0 ||
getPanelColors()->panelBackgroundSpriteName == "none")
{
mBackground->background_gradient(mGui3D->getPanelColors()->panelGradientType,
mGui3D->getPanelColors()->panelGradientColorStart,
mGui3D->getPanelColors()->panelGradientColorEnd);
mBackground->border(mGui3D->getPanelColors()->panelBorderSize,
mGui3D->getPanelColors()->panelBorder);
}
else
mBackground->background_image(getPanelColors()->panelBackgroundSpriteName);
// Create an empty mouse pointer which follow the mouse cursor
mMousePointer = mGUILayer->createRectangle(0, 0, 0, 0);
showInternalMousePointer();
}
Panel::~Panel()
{
for (size_t i=0; i < mPanelElements.size(); i++)
delete mPanelElements[i];
// Destroy all elements that had been created on the screen renderable
mGui3D->destroyScreenRenderable(mScreenRenderable);
}
void Panel::setDistanceFromPanelToInteractWith(Ogre::Real distanceFromPanelToInteractWith)
{
mDistanceFromPanelToInteractWith = distanceFromPanelToInteractWith;
}
bool Panel::injectMouseMoved(const Ogre::Ray& ray)
{
Ogre::Matrix4 transform;
transform.makeTransform(mNode->getPosition(), mNode->getScale(), mNode->getOrientation());
Ogre::AxisAlignedBox aabb = mScreenRenderable->getBoundingBox();
aabb.transform(transform);
pair<bool, Ogre::Real> result = Ogre::Math::intersects(ray, aabb);
if (result.first == false)
{
unOverAllElements();
return false;
}
Ogre::Vector3 a,b,c,d;
Ogre::Vector2 halfSize = (mSize/100) * 0.5f;
a = transform * Ogre::Vector3(-halfSize.x,-halfSize.y,0);
b = transform * Ogre::Vector3( halfSize.x,-halfSize.y,0);
c = transform * Ogre::Vector3(-halfSize.x, halfSize.y,0);
d = transform * Ogre::Vector3( halfSize.x, halfSize.y,0);
result = Ogre::Math::intersects(ray, c, b, a);
if (result.first == false)
result = Ogre::Math::intersects(ray, c, d, b);
if (result.first == false)
{
unOverAllElements();
return false;
}
if (result.second > mDistanceFromPanelToInteractWith)
{
unOverAllElements();
return false;
}
Ogre::Vector3 hitPos = (ray.getOrigin() + (ray.getDirection() * result.second));
Ogre::Vector3 localPos = transform.inverse() * hitPos;
localPos.x += halfSize.x;
localPos.y -= halfSize.y;
localPos.x *= 100;
localPos.y *= 100;
// Cursor clip
localPos.x = Ogre::Math::Clamp<Ogre::Real>(localPos.x, 0, mSize.x - 10);
localPos.y = Ogre::Math::Clamp<Ogre::Real>(-localPos.y, 0, mSize.y - 18);
mInternalMousePos = Ogre::Vector2(localPos.x, localPos.y);
mMousePointer->position(mInternalMousePos);
// Let's actualize the "over" for each elements
for (size_t i=0; i < mPanelElements.size(); i++)
mPanelElements[i]->isOver(mInternalMousePos);
return true;
}
} | 36.966887 | 94 | 0.64296 | shanefarris |
f1a4c44e616b221904cda01046e4865fa97fe64e | 742 | cpp | C++ | src/LuminoEngine/sandbox/Example_Shader.cpp | infinnie/Lumino | 921caabdbcb91528a2aac290e31d650628bc3bed | [
"MIT"
] | null | null | null | src/LuminoEngine/sandbox/Example_Shader.cpp | infinnie/Lumino | 921caabdbcb91528a2aac290e31d650628bc3bed | [
"MIT"
] | null | null | null | src/LuminoEngine/sandbox/Example_Shader.cpp | infinnie/Lumino | 921caabdbcb91528a2aac290e31d650628bc3bed | [
"MIT"
] | null | null | null |
#include <LuminoEngine.hpp>
using namespace ln;
class App_Example_Shader : public Application
{
virtual void onInit() override
{
Engine::renderView()->setGuideGridEnabled(true);
Engine::mainCamera()->addComponent(CameraOrbitControlComponent::create());
auto plane = PlaneMesh::create();
plane->setScale(5);
auto shader = Shader::create(u"C:/Proj/LN/Lumino/src/LuminoEngine/sandbox/Assets/Shader/Ring.fx");
auto mat = Material::create();
mat->setShader(shader);
plane->planeMeshComponent()->setMaterial(mat);
}
virtual void onUpdate() override
{
}
};
void Example_Shader()
{
App_Example_Shader app;
detail::ApplicationHelper::run(&app);
}
| 21.2 | 106 | 0.657682 | infinnie |
f1a57afa200e7da6937ae658156a22b31fb8054f | 456 | cpp | C++ | Leetcode/35.SearchInsertPosition/SearchInsertPosition.cpp | juancgar/CompetitiveProgramming | 9ff93ba14d9d5f45314a18cc78bd2d44de0b9fec | [
"MIT"
] | null | null | null | Leetcode/35.SearchInsertPosition/SearchInsertPosition.cpp | juancgar/CompetitiveProgramming | 9ff93ba14d9d5f45314a18cc78bd2d44de0b9fec | [
"MIT"
] | null | null | null | Leetcode/35.SearchInsertPosition/SearchInsertPosition.cpp | juancgar/CompetitiveProgramming | 9ff93ba14d9d5f45314a18cc78bd2d44de0b9fec | [
"MIT"
] | null | null | null | int searchInsert(vector<int>& nums, int target) {
int begin = 0;
int end = nums.size();
end > 0 ? end = end-1: end = begin;
int mid;
while(begin <= end)
{
mid = begin + (end-begin)/2;
if(nums[mid] == target)
return mid;
else if(nums[mid] < target)
{
begin = mid+1;
}
else
end = mid-1;
}
return begin;
} | 22.8 | 50 | 0.414474 | juancgar |
f1a68d13344afd1477dfc29135e40432a55a63b1 | 1,052 | cpp | C++ | extra/news/src/xk/iat/ImageAnnotation/iat-stage/listtool.cpp | scignscape/PGVM | e24f46cdf657a8bdb990c7883c6bd3d0a0c9cff0 | [
"BSL-1.0"
] | null | null | null | extra/news/src/xk/iat/ImageAnnotation/iat-stage/listtool.cpp | scignscape/PGVM | e24f46cdf657a8bdb990c7883c6bd3d0a0c9cff0 | [
"BSL-1.0"
] | null | null | null | extra/news/src/xk/iat/ImageAnnotation/iat-stage/listtool.cpp | scignscape/PGVM | e24f46cdf657a8bdb990c7883c6bd3d0a0c9cff0 | [
"BSL-1.0"
] | null | null | null | #include "listtool.h"
#include "ui_listtool.h"
ListTool::ListTool(QWidget *parent):QDialog(parent),ui(new Ui::ListTool){
ui->setupUi(this);
}
ListTool::~ListTool(){
delete ui;
}
void ListTool::on_textObject_textChanged()
{
}
void ListTool::on_textInstance_textChanged()
{
}
void ListTool::on_AddLine_clicked(){
//QLineEdit *lineEdit = new QLineEdit;
//QLabel *label = new QLabel;
//ui->gridLayout->addWidget(lineEdit,count,0);
//Now you want to access text of all lineEdit you can do it like
/*int iCount = ui->gridLayout->count(); //Total no of LineEdit added on gridLayout dynamically
QString str = tr("");
for(int i = 0; i < iCount; i++)
{
QLayoutItem* pLine = ui->gridLayout->itemAt(i);
QLineEdit* pLineEdit = (QLineEdit*)pLine->widget();
str = pLineEdit->text();
//Now do whatever you want to do with the text
} */
}
void ListTool::on_Save_clicked()
{
}
void ListTool::on_Cancel_clicked()
{
}
| 21.04 | 99 | 0.615019 | scignscape |
f1a7d76d699ec0852a16f4325b7452f4240f7166 | 1,244 | cpp | C++ | source/Input/Queue.cpp | kurocha/input | 619cbe901ebb2cfd9dd97235d30e596edc96aa14 | [
"MIT",
"Unlicense"
] | null | null | null | source/Input/Queue.cpp | kurocha/input | 619cbe901ebb2cfd9dd97235d30e596edc96aa14 | [
"MIT",
"Unlicense"
] | null | null | null | source/Input/Queue.cpp | kurocha/input | 619cbe901ebb2cfd9dd97235d30e596edc96aa14 | [
"MIT",
"Unlicense"
] | null | null | null | //
// Queue.cpp
// This file is part of the "Input" project and released under the MIT License.
//
// Created by Samuel Williams on 23/2/2019.
// Copyright, 2019, by Samuel Williams. All rights reserved.
//
#include "Queue.hpp"
#include "ResizeEvent.hpp"
#include "ButtonEvent.hpp"
#include "MotionEvent.hpp"
#include "RenderEvent.hpp"
#include "FocusEvent.hpp"
namespace Input
{
Queue::Queue()
{
}
void Queue::dequeue(Handler & handler)
{
const auto & items = _queue.dequeue();
for (auto & item : items) {
handler.process(reinterpret_cast<const Event &>(item));
}
}
bool Queue::process(const ResizeEvent & event)
{
_queue.emplace(std::make_unique<ResizeEvent>(event));
return true;
}
bool Queue::process(const ButtonEvent & event)
{
_queue.emplace(std::make_unique<ButtonEvent>(event));
return true;
}
bool Queue::process(const MotionEvent & event)
{
_queue.emplace(std::make_unique<MotionEvent>(event));
return true;
}
bool Queue::process(const RenderEvent & event)
{
_queue.emplace(std::make_unique<RenderEvent>(event));
return true;
}
bool Queue::process(const FocusEvent & event)
{
_queue.emplace(std::make_unique<FocusEvent>(event));
return true;
}
}
| 18.567164 | 80 | 0.686495 | kurocha |
f1a8e7362e9df7d5424c5caab15669ef09c32c1b | 1,280 | cpp | C++ | 3/main.cpp | ls171433/leetcode | 863d562153382f3d1480deb7ab453c15a72da6c4 | [
"MIT"
] | null | null | null | 3/main.cpp | ls171433/leetcode | 863d562153382f3d1480deb7ab453c15a72da6c4 | [
"MIT"
] | null | null | null | 3/main.cpp | ls171433/leetcode | 863d562153382f3d1480deb7ab453c15a72da6c4 | [
"MIT"
] | null | null | null | #include <algorithm>
#include <iostream>
#include <string>
using namespace std;
class Solution
{
public:
int lengthOfLongestSubstring(string s)
{
int longest = 0;
size_t indexs[256];
size_t repeated_index = (size_t)(-1);
for (size_t &index : indexs)
{
index = (size_t)(-1);
}
for (size_t i = 0; i < s.size(); ++i)
{
unsigned char c = s[i];
if (indexs[c] != (size_t)(-1))
{
if (repeated_index == (size_t)(-1))
{
longest = max(longest, (int)(i));
repeated_index = indexs[c];
}
else
{
longest = max(longest, (int)(i - repeated_index - 1));
repeated_index = max(repeated_index, indexs[c]);
}
}
indexs[c] = i;
}
if (repeated_index == (size_t)(-1))
{
return s.size();
}
else
{
longest = max(longest, (int)(s.size() - repeated_index - 1));
}
return longest;
}
};
int main()
{
auto result = Solution().lengthOfLongestSubstring("abba");
cout << result << endl;
}
| 22.45614 | 74 | 0.435156 | ls171433 |
f1b056fb7ca09893bca8210a477e191668acb722 | 709 | cpp | C++ | src/game/Hazard.cpp | HaedHutner/Hazard | 88a50582dac83a5e80bce94c9faadd5cd4618279 | [
"MIT"
] | null | null | null | src/game/Hazard.cpp | HaedHutner/Hazard | 88a50582dac83a5e80bce94c9faadd5cd4618279 | [
"MIT"
] | null | null | null | src/game/Hazard.cpp | HaedHutner/Hazard | 88a50582dac83a5e80bce94c9faadd5cd4618279 | [
"MIT"
] | null | null | null | #include "Hazard.h"
Hazard::Hazard()
: Game(), config("res/config/config.json")
{
name = "Hazard";
log_info("Width: %d, Height: %d, Test: %f",
config.getInt({"window", "width"}),
config.getInt({"window", "height"}),
config.getDouble({"window", "test"})
);
glm::vec3 color = config.getVec3({"window", "bgcolor"});
log_info("x: %f, y: %f, z: %f",
color.x,
color.y,
color.z
);
eventDispatcher.listen<GameStartEvent>([=](Event &event) {
log_info("Game Started.");
});
eventDispatcher.post<GameStartEvent>(this);
background = color;
state = std::make_unique<IdleGameState>();
}
Hazard::~Hazard()
{
} | 20.257143 | 62 | 0.554302 | HaedHutner |
f1b0a041aa928c91071daf898bd919066ceeba51 | 1,727 | cpp | C++ | SDL Game/SDL Game/Behaviours/Mover.cpp | BrunoAOR/SDL-Game | 090a09e2c19d18b000769f353c5e7727d60fe5f6 | [
"MIT"
] | null | null | null | SDL Game/SDL Game/Behaviours/Mover.cpp | BrunoAOR/SDL-Game | 090a09e2c19d18b000769f353c5e7727d60fe5f6 | [
"MIT"
] | null | null | null | SDL Game/SDL Game/Behaviours/Mover.cpp | BrunoAOR/SDL-Game | 090a09e2c19d18b000769f353c5e7727d60fe5f6 | [
"MIT"
] | null | null | null | #include "Mover.h"
#include "Engine/API/API.h"
#include "Engine/GameObjects/GameObject.h"
#include "Engine/Components/Transforms/Transform.h"
#include "Engine/Vector2.h"
Mover::Mover()
{
useWASD = false;
speed = 100;
speedStep = 50;
}
void Mover::update()
{
Vector2 motion(0, 0);
// Check speed
if (InputAPI::getKeyDown(SDL_SCANCODE_KP_PLUS))
{
speed += speedStep;
printf("Speed: %i\n", speed);
}
if (InputAPI::getKeyDown(SDL_SCANCODE_KP_MINUS))
{
speed -= speedStep;
printf("Speed: %i\n", speed);
}
// Check motion
if (useWASD)
{
moveWithWASD(motion.x, motion.y);
}
else
{
moveWithArrows(motion.x, motion.y);
}
if (motion.x != 0 || motion.y != 0)
{
if (motion.x != 0 && motion.y != 0) {
double sqrt2 = sqrt(2);
motion.x /= sqrt2;
motion.y /= sqrt2;
}
double elapsedSeconds = TimeAPI::deltaTime() / 1000.0;
motion.x *= speed * elapsedSeconds;
motion.y *= speed * elapsedSeconds;
auto transform = gameObject()->transform.lock();
Vector2 currentPos = transform->getLocalPosition();
Vector2 targetPos = currentPos + motion;
transform->setLocalPosition(targetPos);
}
}
void Mover::moveWithArrows(double& x, double& y)
{
x = 0;
y = 0;
if (InputAPI::getKey(SDL_SCANCODE_UP))
{
y = +1;
}
if (InputAPI::getKey(SDL_SCANCODE_DOWN))
{
y = -1;
}
if (InputAPI::getKey(SDL_SCANCODE_LEFT))
{
x = -1;
}
if (InputAPI::getKey(SDL_SCANCODE_RIGHT))
{
x = +1;
}
}
void Mover::moveWithWASD(double& x, double& y)
{
x = 0;
y = 0;
if (InputAPI::getKey(SDL_SCANCODE_W))
{
y = +1;
}
if (InputAPI::getKey(SDL_SCANCODE_S))
{
y = -1;
}
if (InputAPI::getKey(SDL_SCANCODE_A))
{
x = -1;
}
if (InputAPI::getKey(SDL_SCANCODE_D))
{
x = +1;
}
}
| 16.292453 | 56 | 0.634627 | BrunoAOR |
f1b0fb4bdda39f7ee4cfec67a370d89ddd7ac883 | 13,926 | cpp | C++ | API/src/Scene.cpp | cnsuhao/OtterUI-1 | 07149c970adaaa8c7696efa9ad7a92137f0a4557 | [
"MIT"
] | 10 | 2016-06-03T02:18:37.000Z | 2021-02-03T15:14:50.000Z | API/src/Scene.cpp | ppiecuch/OtterUI | 07149c970adaaa8c7696efa9ad7a92137f0a4557 | [
"MIT"
] | null | null | null | API/src/Scene.cpp | ppiecuch/OtterUI | 07149c970adaaa8c7696efa9ad7a92137f0a4557 | [
"MIT"
] | 5 | 2017-07-31T07:25:50.000Z | 2021-03-16T11:45:31.000Z | #include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <assert.h>
#include "Common/Platforms.h"
#include "Data/OtterData.h"
#include "Graphics/Graphics.h"
#include "Scene.h"
#include "View.h"
#include "Font.h"
#include "Memory/Memory.h"
namespace Otter
{
/* Default Constructor
*/
Scene::Scene(System* pSystem, Graphics* pGraphics, ISoundSystem* pSoundSystem, const SceneData* pSceneData)
{
mFonts = NULL;
mViews = NULL;
mSceneData = pSceneData;
mSystem = pSystem;
mGraphics = pGraphics;
mSoundSystem = pSoundSystem;
LoadFromData(mSceneData);
}
/* Destructor
*/
Scene::~Scene(void)
{
Unload();
}
/* Retrieves the total number of views within this scene
*/
uint32 Scene::GetNumViews()
{
if(!mSceneData)
return 0;
return mSceneData->mNumViews;
}
/* Retrieves the scene id
*/
uint32 Scene::GetID()
{
if(!mSceneData)
return 0;
return mSceneData->mID;
}
/* Activates a view by name. Prompts the renderer
* to load as many textures as it needs to.
*
* The life-cycle of a view goes something like this:
*
* PlayAnim("OnActivate")---->OnActivate()---> (stuff) --> PlayAnim("OnDeactivate")--->OnDeactivate()
*
* We have to set up the listeners here, instead of "OnActivate", in order to get
* events from the view
*/
Result Scene::ActivateView(const char* szName)
{
View* pView = GetView(szName);
if(!pView)
return kResult_ViewNotFound;
return ActivateView(pView);
}
/* Activates a view by index
*/
Result Scene::ActivateView(uint32 index)
{
if(index >= mSceneData->mNumViews)
return kResult_ViewNotFound;
return ActivateView(mViews[index]);
}
/* Activates a view by pointer
*/
Result Scene::ActivateView(View* pView)
{
for(uint32 i = 0; i < mActiveViews.size(); i++)
{
if(mActiveViews[i] == pView)
{
if (pView->GetActiveAnimationName(Otter::ANIM_ONDEACTIVATE) != NULL)
{
pView->StopAnimation(Otter::ANIM_ONDEACTIVATE);
break;
}
else
{
return kResult_ViewAlreadyActive;
}
}
}
mActiveViews.push_back(pView);
pView->mOnActivate.AddHandler(this, &Scene::OnViewActivate);
pView->mOnDeactivate.AddHandler(this, &Scene::OnViewDeactivate);
pView->mOnAnimationStarted.AddHandler(this, &Scene::OnViewAnimationStarted);
pView->mOnAnimationEnded.AddHandler(this, &Scene::OnViewAnimationEnded);
LoadViewResources((ViewData*)pView->GetData());
pView->OnActivate();
pView->PlayAnimation("OnActivate");
return kResult_OK;
}
/* Deactivates a view by name
*/
Result Scene::DeactivateView(const char* szName)
{
View* pView = GetActiveView(szName);
if(!pView)
return kResult_ViewNotFound;
return DeactivateView(pView);
}
/* Deactivates a view by index
*/
Result Scene::DeactivateView(uint32 index)
{
if(index >= mActiveViews.size())
return kResult_ViewNotFound;
return DeactivateView(mActiveViews[index]);
}
/* Deactivates a view by pointer
*/
Result Scene::DeactivateView(View* pView)
{
pView->PlayAnimation("OnDeactivate");
pView->Update(0.0f); // Force a zero update to initialize positions.
return kResult_OK;
}
/* Called when a view has activated
*/
void Scene::OnViewActivate(void* pSender, void* pContext)
{
}
/* Called when a view has deactivated.
* Remove the listeners here to ensure it's the very last thing the view does.
*/
void Scene::OnViewDeactivate(void* pSender, void* pContext)
{
View* pView = (View*)pSender;
pView->mOnActivate.RemoveHandler(this, &Scene::OnViewActivate);
pView->mOnDeactivate.RemoveHandler(this, &Scene::OnViewDeactivate);
pView->mOnAnimationStarted.RemoveHandler(this, &Scene::OnViewAnimationStarted);
pView->mOnAnimationEnded.RemoveHandler(this, &Scene::OnViewAnimationEnded);
UnLoadViewResources((ViewData*)pView->GetData());
}
/* Called when a view's animation has started
*/
void Scene::OnViewAnimationStarted(void* pSender, uint32 animID)
{
}
/* Called when a view's animation has ended
*/
void Scene::OnViewAnimationEnded(void* pSender, uint32 animID)
{
View* pView = (View*)pSender;
if(animID == ANIM_ONDEACTIVATE)
{
for(uint32 i = 0; i < mActiveViews.size(); i++)
{
if(mActiveViews[i] == pView)
{
pView->OnDeactivate();
mActiveViews.erase(i);
return;
}
}
}
}
/* Retrieves the total number of views within this scene
*/
uint32 Scene::GetNumActiveViews()
{
return mActiveViews.size();
}
/* Retrieves an active view by name
*/
View* Scene::GetActiveView(const char* szName)
{
if(mSceneData == NULL || mSceneData->mNumViews == 0)
return NULL;
for(uint32 i = 0; i < mActiveViews.size(); i++)
{
if(strcmp(mActiveViews[i]->GetData()->mName, szName) == 0)
return mActiveViews[i];
}
return NULL;
}
/* Retrieves an active view by index
*/
View* Scene::GetActiveView(uint32 index)
{
if(index >= mActiveViews.size())
return NULL;
return mActiveViews[index];
}
/* Loads all the resource (textures, sounds, etc) for a particular view
*/
void Scene::LoadViewResources(const char* szName)
{
View* pView = GetView(szName);
if(!pView)
return;
LoadViewResources((ViewData*)pView->GetData());
}
/* Loads the specified view data's resources
*/
void Scene::LoadViewResources(const ViewData* pViewData)
{
if(!pViewData)
return;
// Load the textures
const sint32* textureIDs = pViewData->GetTextureIDs();
for(uint32 i = 0; i < pViewData->mNumTextures; i++)
{
LoadResource(textureIDs[i]);
}
// Load the sounds
const sint32* soundIDs = pViewData->GetSoundIDs();
for(uint32 i = 0; i < pViewData->mNumSounds; i++)
{
LoadResource(soundIDs[i]);
}
}
/* Loads all the textures for a particular view
*/
void Scene::UnLoadViewResources(const char* szName)
{
View* pView = GetView(szName);
if(!pView)
return;
UnLoadViewResources((ViewData*)pView->GetData());
}
/* Unloads the specified view data's textures
*/
void Scene::UnLoadViewResources(const ViewData* pViewData)
{
if(!pViewData)
return;
// Unload used textures
const sint32* textureIDs = pViewData->GetTextureIDs();
for(uint32 i = 0; i < pViewData->mNumTextures; i++)
{
UnloadResource(textureIDs[i]);
}
// Unload used sounds
const sint32* soundIDs = pViewData->GetSoundIDs();
for(uint32 i = 0; i < pViewData->mNumSounds; i++)
{
UnloadResource(soundIDs[i]);
}
}
/**
* Loads a resource
*/
bool Scene::LoadResource(uint32 resource)
{
const TextureData* pTextureData = mSceneData->GetTexture(resource);
if(pTextureData)
{
const TextureData* pActualTexture = mSceneData->GetTexture(pTextureData->mTextureRect.mTextureID);
if(pActualTexture != NULL && pActualTexture->mRefCount == 0)
{
mGraphics->LoadTexture(pActualTexture->mTextureID, pActualTexture->mTextureName);
}
const_cast<TextureData*>(pActualTexture)->mRefCount++;
return true;
}
const SoundData* pSoundData = mSceneData->GetSound(resource);
if(pSoundData)
{
if(pSoundData->mRefCount == 0 && mSoundSystem)
mSoundSystem->OnLoadSound(pSoundData->mSoundID, pSoundData->mSoundName);
const_cast<SoundData*>(pSoundData)->mRefCount++;
return true;
}
return false;
}
/**
* Unloads the specified resource
*/
bool Scene::UnloadResource(uint32 resource)
{
const TextureData* pTextureData = mSceneData->GetTexture(resource);
if(pTextureData)
{
const TextureData* pActualTexture = mSceneData->GetTexture(pTextureData->mTextureRect.mTextureID);
if(pActualTexture && pActualTexture->mRefCount > 0)
{
const_cast<TextureData*>(pActualTexture)->mRefCount--;
if(pActualTexture->mRefCount <= 0)
mGraphics->UnloadTexture(pActualTexture->mTextureID);
}
return true;
}
const SoundData* pSoundData = mSceneData->GetSound(resource);
if(pSoundData && pSoundData->mRefCount > 0)
{
const_cast<SoundData*>(pSoundData)->mRefCount--;
if(pSoundData->mRefCount <= 0 && mSoundSystem)
mSoundSystem->OnUnloadSound(pSoundData->mSoundID);
return true;
}
return false;
}
/* Retrieves a font by ID
*/
Font* Scene::GetFont(uint32 fontID)
{
if(mSceneData == NULL || mSceneData->mNumFonts == 0 || mFonts == NULL)
return NULL;
for(uint32 i = 0; i < mSceneData->mNumFonts; i++)
{
if(mFonts[i]->GetData().mID == fontID)
return mFonts[i];
}
return NULL;
}
/* Retrieves a font by name
*/
Font* Scene::GetFont(const char* szName)
{
if(mSceneData == NULL || mSceneData->mNumFonts == 0 || mFonts == NULL)
return NULL;
UTF8String fontName = szName;
for(uint32 i = 0; i < mSceneData->mNumFonts; i++)
{
UTF8String tmpName = mFonts[i]->GetData().mName;
if(fontName == tmpName)
return mFonts[i];
}
return NULL;
}
/* Retrieves a texture id by name
*/
uint32 Scene::GetTextureID(const char* szTextureName)
{
UTF8String nameA = szTextureName;
UTF8String nameB = "";
for(uint32 i = 0; i < mSceneData->mNumTextures; i++)
{
const TextureData* pTextureData = mSceneData->GetTextureByIndex(i);
nameB = pTextureData->mTextureName;
if(nameA == nameB)
return pTextureData->mTextureID;
}
return 0xFFFFFFFF;
}
/* Retrieves the texture data by texture id
*/
const TextureData* Scene::GetTextureData(uint32 textureID)
{
for(uint32 i = 0; i < mSceneData->mNumTextures; i++)
{
const TextureData* pTextureData = mSceneData->GetTextureByIndex(i);
if(pTextureData->mTextureID == textureID)
return pTextureData;
}
static TextureData emptyData;
return &emptyData;
}
/* Loads the scene from the provided scene data.
* Unloads all existing data first.
*/
bool Scene::LoadFromData(const SceneData* pSceneData)
{
Unload();
if(pSceneData->mNumViews <= 0)
return false;
mSceneData = pSceneData;
uint32 maxTexID = 0;
for(uint32 i = 0; i < mSceneData->mNumTextures; i++)
{
const TextureData* pTextureData = mSceneData->GetTextureByIndex(i);
if(pTextureData->mTextureID > maxTexID)
maxTexID = pTextureData->mTextureID;
}
// Create our array of fonts if any
if(mSceneData->mNumFonts > 0)
{
mFonts = (Font**)OTTER_ALLOC(sizeof(Font*) * mSceneData->mNumFonts);
memset(mFonts, 0x00, sizeof(mFonts));
for(uint32 i = 0; i < mSceneData->mNumFonts; i++)
{
const FontData* pFontData = mSceneData->GetFontData(i);
mFonts[i] = OTTER_NEW(Font, (pFontData));
uint32 textures[256];
for(uint32 j = 0; j < pFontData->mNumTextures; j++)
{
// Now load up the texture for the font
// We look for the font texture under the /Fonts directory
char fontTexture[128];
sprintf_s(fontTexture, 128, "Fonts\\%s_%d.png", pFontData->mName, j);
maxTexID++;
mGraphics->LoadTexture(maxTexID, fontTexture);
textures[j] = maxTexID;
}
mFonts[i]->SetTextures(textures, pFontData->mNumTextures);
}
}
// Create the array of views
mViews = (View**)OTTER_ALLOC(sizeof(View*) * mSceneData->mNumViews);
memset(mViews, 0x00, sizeof(mViews));
// And now load them individually
for(uint32 i = 0; i < mSceneData->mNumViews; i++)
{
const ViewData* pViewData = mSceneData->GetViewData(i);
if(pViewData->mFourCC == FOURCC_GGVW)
{
mViews[i] = OTTER_NEW(View, (this, pViewData));
}
else
{
assert(false);
}
}
return true;
}
/* Unloads the scene's internal data
*/
void Scene::Unload()
{
if(mViews != NULL)
{
for(uint32 i = 0; i < mSceneData->mNumViews; i++)
{
OTTER_DELETE(mViews[i]);
}
OTTER_FREE(mViews);
}
mViews = NULL;
if(mFonts != NULL)
{
for(uint32 i = 0; i < mSceneData->mNumFonts; i++)
{
const Array<uint32>& textures = mFonts[i]->GetTextures();
for(uint32 j = 0; j < textures.size(); j++)
mGraphics->UnloadTexture(textures[j]);
OTTER_DELETE(mFonts[i]);
}
OTTER_FREE(mFonts);
}
mFonts = NULL;
}
/* Retrieves a view by name
*/
View* Scene::GetView(const char* szName)
{
if(mSceneData == NULL || mSceneData->mNumViews == 0)
return NULL;
for(uint32 i = 0; i < mSceneData->mNumViews; i++)
{
if(strcmp(mViews[i]->GetData()->mName, szName) == 0)
return mViews[i];
}
return NULL;
}
/* Retrieves a view by index
*/
View* Scene::GetView(uint32 index)
{
if(mSceneData == NULL || (index >= mSceneData->mNumViews))
return NULL;
return mViews[index];
}
/* Sets the scene resolution
*/
void Scene::SetResolution(uint32 width, uint32 height)
{
int viewCount = GetNumViews();
for(int i = 0; i < viewCount; i++)
GetView(i)->SetSize(VectorMath::Vector2((float)width, (float)height));
}
/* Points (touches/mouse/etc) were pressed down
*/
void Scene::OnPointsDown(Point* points, sint32 numPoints)
{
Array<View*> temp(mActiveViews);
for(uint32 i = 0; i < temp.size(); i++)
{
if(temp[i]->OnPointsDown(points, numPoints))
return;
}
}
/* Points (touches/mouse/etc) were released
*/
void Scene::OnPointsUp(Point* points, sint32 numPoints)
{
Array<View*> temp(mActiveViews);
for(uint32 i = 0; i < temp.size(); i++)
{
if(temp[i]->OnPointsUp(points, numPoints))
return;
}
}
/* Points (touches/mouse/etc) were moved.
*/
void Scene::OnPointsMove(Point* points, sint32 numPoints)
{
Array<View*> temp(mActiveViews);
for(uint32 i = 0; i < temp.size(); i++)
{
if(temp[i]->OnPointsMove(points, numPoints))
return;
}
}
/* Draws the scene. Only the active views are
* drawn.
*/
void Scene::Draw()
{
for(uint32 i = 0; i < mActiveViews.size(); i++)
{
mActiveViews[i]->Draw(mGraphics);
}
}
/* Updates the scene
* "frameDelta" is the number of frames that have passed since last update
*/
void Scene::Update(float frameDelta)
{
Array<View*> temp(mActiveViews);
for(uint32 i = 0; i < temp.size(); i++)
{
temp[i]->Update(frameDelta);
}
}
};
| 22.069731 | 108 | 0.665087 | cnsuhao |
f1b16795a51cf46ab86bd6b0a21cb1319c93f2f9 | 2,085 | cpp | C++ | GLGUIWindowManager.cpp | BernardIgiri/archive-OpenGLTechDemonstrator | d18d8d16833f2d8a95bb7f5f29f2952708938573 | [
"MIT"
] | null | null | null | GLGUIWindowManager.cpp | BernardIgiri/archive-OpenGLTechDemonstrator | d18d8d16833f2d8a95bb7f5f29f2952708938573 | [
"MIT"
] | null | null | null | GLGUIWindowManager.cpp | BernardIgiri/archive-OpenGLTechDemonstrator | d18d8d16833f2d8a95bb7f5f29f2952708938573 | [
"MIT"
] | null | null | null | #include "GLGUIWindowManager.h"
#include "SystemTimer.h"
typedef struct
{
CInputManager *cInputMan;
CGLGUIWINDOWGENERATOR *cWinGen;
} GLGGUISWINITDATA_t;
bool GLGUIRenderWin(CGLGUIWindow *&window,int windowType, void *data, int x, int y, int screenWidth, int screenHeight)
{
static width=640,height=480;
if (screenWidth>0)
width = screenWidth;
if (screenHeight>0)
height = screenHeight;
static bool intialized=false;
static GLGGUISWINITDATA_t winData;
static const inputManButtonStatus_t* buttonStatus = NULL;
static int cursorX = 0,cursorY = 0,mouseB = 0;
static const unsigned char *textInputChar = NULL;
static CSystemTimer timer;
if (!intialized)
{
if (window==NULL&&windowType==-1&&data!=NULL&&x==0&&y==0)
{
memcpy(&winData,data,sizeof(GLGGUISWINITDATA_t));
buttonStatus = winData.cInputMan->GetStatusPointer();
cursorX = winData.cInputMan->GetButtonIndex("CursorX");
cursorY = winData.cInputMan->GetButtonIndex("CursorY");
mouseB = winData.cInputMan->GetButtonIndex("MouseButton");
textInputChar = winData.cInputMan->GetTextInputPointer();
intialized=timer.Initialize(winData.cInputMan->GetSystemTimePointer());
}
return false;
}
if (window!=NULL)
{
unsigned char current = INPUTMANConvertTextInputToCharacters(textInputChar,winData.cInputMan->GetCapsLock(),winData.cInputMan->GetShiftStatus());
int cX = (int)(buttonStatus[cursorX].buttonValue*(width-1)),
cY = (int)(-buttonStatus[cursorY].buttonValue*(height-2))+(height-1);
bool result = GLGUIUpdateWin(window,cX,cY,current,(buttonStatus[mouseB].buttonValue==1.0f),timer.GetElapsedSeconds(),windowType,data);
if (result)
int donothing=0;
return result;
}
else
{
window=GLGUIMakeWindow(windowType,winData.cWinGen,data,x,y);
}
return false;
}
bool GLGUIInitWinRenderer(CInputManager *cInputMan,CGLGUIWINDOWGENERATOR *cWinGen)
{
if (cInputMan==NULL||cWinGen==NULL)
return false;
GLGGUISWINITDATA_t winData;
winData.cInputMan=cInputMan;
winData.cWinGen=cWinGen;
CGLGUIWindow *window=NULL;
return GLGUIRenderWin(window,-1,&winData,0,0);
}
| 31.590909 | 147 | 0.753477 | BernardIgiri |
f1b6df40f02c068d419034453b13e697531733b3 | 1,169 | cpp | C++ | test/ExtensionWordTest.cpp | PaulTrampert/GenieSys | 637e7f764bc7faac8d0b5afcf22646e200562f6a | [
"MIT"
] | null | null | null | test/ExtensionWordTest.cpp | PaulTrampert/GenieSys | 637e7f764bc7faac8d0b5afcf22646e200562f6a | [
"MIT"
] | 82 | 2020-12-17T04:03:10.000Z | 2022-03-24T17:54:28.000Z | test/ExtensionWordTest.cpp | PaulTrampert/GenieSys | 637e7f764bc7faac8d0b5afcf22646e200562f6a | [
"MIT"
] | null | null | null | //
// Created by paul.trampert on 11/26/2020.
//
#include <gtest/gtest.h>
#include <GenieSys/ExtensionWord.h>
TEST(ExtensionWord, DecodeBriefExtensionWord) {
uint16_t word = 0b1010101000000011;
auto result = GenieSys::ExtensionWord(word);
EXPECT_EQ(GenieSys::M68K_REG_TYPE_ADDR, result.getIdxRegType());
EXPECT_EQ((uint8_t)2, result.getIdxRegAddr());
EXPECT_EQ(GenieSys::EXT_WORD_IDX_SIZE_LONG_WORD, result.getIdxSize());
EXPECT_EQ(1, result.getScale());
EXPECT_TRUE(result.isBrief());
EXPECT_EQ(3, result.getDisplacement());
}
TEST(ExtensionWord, DecodeExtensionWord) {
uint16_t word = 0b1010101110100011;
auto result = GenieSys::ExtensionWord(word);
EXPECT_EQ(GenieSys::M68K_REG_TYPE_ADDR, result.getIdxRegType());
EXPECT_EQ((uint8_t)2, result.getIdxRegAddr());
EXPECT_EQ(GenieSys::EXT_WORD_IDX_SIZE_LONG_WORD, result.getIdxSize());
EXPECT_EQ(1, result.getScale());
EXPECT_FALSE(result.isBrief());
EXPECT_EQ(true, result.getBaseRegSuppress());
EXPECT_EQ(false, result.getIndexSuppress());
EXPECT_EQ(2, result.getBaseDisplacementSize());
EXPECT_EQ(3, result.getIndexIndirectSelection());
} | 37.709677 | 74 | 0.740804 | PaulTrampert |
f1b77b495c0fe88863915c977078ffa07b1236a2 | 604 | cpp | C++ | Chapter_6_Loops/Program1.cpp | othneildrew/CPP-Programming-Practices | 27a20c00b395446a7d2e0dd4b199f4cd9e35591b | [
"MIT"
] | 1 | 2020-12-03T15:26:20.000Z | 2020-12-03T15:26:20.000Z | Chapter_6_Loops/Program1.cpp | othneildrew/CPP-Programming-Practices | 27a20c00b395446a7d2e0dd4b199f4cd9e35591b | [
"MIT"
] | null | null | null | Chapter_6_Loops/Program1.cpp | othneildrew/CPP-Programming-Practices | 27a20c00b395446a7d2e0dd4b199f4cd9e35591b | [
"MIT"
] | null | null | null | // Chapter 6: Program 1
/***
Write a C++ program to read ten students' names and display them.
**/
# include <iostream>
# include <string>
using namespace std;
int main(void)
{
string Name;
int LCV, Size = 10;
for(LCV = 0; LCV < Size; LCV++)
{
if(LCV == 0)
cout <<"\n\t Please enter student's name:\n";
else
cout <<"\n\t Please enter another student's name: \n";
cin >> Name;
cout <<"Student #" << LCV + 1 << " is " << Name << endl;
}
system("pause");
return 0;
}
// Code written by: Othneil Drew
| 18.875 | 66 | 0.516556 | othneildrew |
f1b7f3bc74e80fa36b8323602b2fbc7fa005694e | 947 | cpp | C++ | source/Ch18/main.cpp | Koma52/UDProg-Introduction | ca8cf7f7a2c11559d4f5b3c8ad5dd55040e99762 | [
"CC0-1.0"
] | null | null | null | source/Ch18/main.cpp | Koma52/UDProg-Introduction | ca8cf7f7a2c11559d4f5b3c8ad5dd55040e99762 | [
"CC0-1.0"
] | null | null | null | source/Ch18/main.cpp | Koma52/UDProg-Introduction | ca8cf7f7a2c11559d4f5b3c8ad5dd55040e99762 | [
"CC0-1.0"
] | 1 | 2020-09-12T11:41:44.000Z | 2020-09-12T11:41:44.000Z | #include "../std_lib_facilities.h"
int* ga = new int[10] { 1, 2, 4, 8, 16, 32, 64, 128, 256, 512 };
void f(int* array, int n){
int* la = new int[10];
for (int i = 0; i < 10; ++i){
la[i] = ga[i];
}
for (int i = 0; i < 10; ++i){
cout << la[i] << " ";
}
cout << endl;
int* p = new int[n];
for (int i = 0; i < n; ++i){
p[i] = array[i];
}
cout << "Elements of free-store array: " << endl;
for (int i = 0; i < n; ++i){
cout << p[i] << " ";
}
cout << endl;
delete[] p;
}
int main()
try {
f(ga, 10);
int* aa = new int[10];
int x = 1;
for (int i = 2; i < 12; ++i){
aa[i-2] = x;
x*=i;
}
cout << "---------------------------" << endl;
f(aa, 10);
return 0;
}
catch (exception& e) {
cerr << "error: " << e.what() << '\n';
return 1;
}
catch (...) {
cerr << "Oops: unknown exception!\n";
return 2;
}
| 14.796875 | 64 | 0.393875 | Koma52 |
f1b89d4d3c8d3d92409904dd78483cd83ba357f6 | 1,554 | cpp | C++ | client/main.cpp | master-gekus/thrift_test | cbf6b763706f1212e4fcac377b2a5225ea7cd033 | [
"Unlicense"
] | null | null | null | client/main.cpp | master-gekus/thrift_test | cbf6b763706f1212e4fcac377b2a5225ea7cd033 | [
"Unlicense"
] | null | null | null | client/main.cpp | master-gekus/thrift_test | cbf6b763706f1212e4fcac377b2a5225ea7cd033 | [
"Unlicense"
] | null | null | null | #include "SharedService.h"
#include <cstdio>
#include <thrift/protocol/TBinaryProtocol.h>
#include <thrift/transport/TSocket.h>
#include <thrift/transport/TBufferTransports.h>
using namespace std;
using namespace apache::thrift;
using namespace apache::thrift::protocol;
using namespace apache::thrift::transport;
using namespace ::thrifts;
int main(int argc, char* argv[])
{
int port = 9090;
stdcxx::shared_ptr<TTransport> socket(new TSocket("localhost", port));
stdcxx::shared_ptr<TTransport> transport(new TBufferedTransport(socket));
stdcxx::shared_ptr<TProtocol> protocol(new TBinaryProtocol(transport));
SharedServiceClient client(protocol);
try {
transport->open();
bool res = client.putPair(1, "Test!");
printf("1. res = %s\n", res ? "true" : "false");
res = client.putPair(2, "Test 2");
printf("2. res = %s\n", res ? "true" : "false");
SharedStruct s;
client.getStruct(s, 1);
printf("3. s.value = \"%s\"\n", s.value.c_str());
res = client.putPair(2, "Test New");
printf("4. res = %s\n", res ? "true" : "false");
client.getStruct(s, 2);
printf("5. s.value = \"%s\"\n", s.value.c_str());
client.replacePair(2, "Test New");
printf("6. replacePair\n");
client.getStruct(s, 2);
printf("7. s.value = \"%s\"\n", s.value.c_str());
printf("All testst finished!\n");
} catch (TException& tx) {
printf("ERROR: %s\n", tx.what());
return 1;
}
return 0;
}
| 27.75 | 77 | 0.602317 | master-gekus |
f1b9ddc57c32d1c2385dbe1d61224e24c989287a | 1,330 | cpp | C++ | engine/time/source/SwimmingMovementAccumulationChecker.cpp | sidav/shadow-of-the-wyrm | 747afdeebed885b1a4f7ab42f04f9f756afd3e52 | [
"MIT"
] | 60 | 2019-08-21T04:08:41.000Z | 2022-03-10T13:48:04.000Z | engine/time/source/SwimmingMovementAccumulationChecker.cpp | cleancoindev/shadow-of-the-wyrm | 51b23e98285ecb8336324bfd41ebf00f67b30389 | [
"MIT"
] | 3 | 2021-03-18T15:11:14.000Z | 2021-10-20T12:13:07.000Z | engine/time/source/SwimmingMovementAccumulationChecker.cpp | cleancoindev/shadow-of-the-wyrm | 51b23e98285ecb8336324bfd41ebf00f67b30389 | [
"MIT"
] | 8 | 2019-11-16T06:29:05.000Z | 2022-01-23T17:33:43.000Z | #include "CombatManager.hpp"
#include "Game.hpp"
#include "MapUtils.hpp"
#include "SwimmingCalculator.hpp"
#include "SwimmingMovementAccumulationChecker.hpp"
#include "RNG.hpp"
// Check for damage due to swimming past the point of exhaustion.
void SwimmingMovementAccumulationChecker::check(CreaturePtr creature)
{
if (creature)
{
SwimmingCalculator sc;
MapPtr current_map = Game::instance().get_current_map();
bool submerged = false;
if (current_map != nullptr)
{
TilePtr tile = MapUtils::get_tile_for_creature(current_map, creature);
submerged = tile && tile->get_submerged();
}
ulonglong max_swimming_time = static_cast<ulonglong>(sc.calculate_maximum_swimming_time(submerged, creature, creature->get_breathes()));
MovementAccumulation& movement_accumulation = creature->get_movement_accumulation_ref();
ulonglong time_in_water = movement_accumulation.get_minutes_on_super_type_given_movement();
// If a creature has water breathing, it can basically swim forever.
if ((time_in_water > max_swimming_time) && !creature->can_breathe(BreatheType::BREATHE_TYPE_WATER))
{
swim.process(creature, nullptr);
}
else
{
if (RNG::percent_chance(10))
{
sm.check_skill(creature, SkillType::SKILL_GENERAL_SWIMMING);
}
}
}
}
| 32.439024 | 140 | 0.728571 | sidav |
f1bd0a6c75cf98cba651784528e7a7d0b476d9cf | 4,344 | cpp | C++ | gl-spline/Texture.cpp | dlarudgus20/gl-spline | bc1c5ab71e8522c6cf0bbf35c7673e9f98c5b105 | [
"BSD-2-Clause"
] | null | null | null | gl-spline/Texture.cpp | dlarudgus20/gl-spline | bc1c5ab71e8522c6cf0bbf35c7673e9f98c5b105 | [
"BSD-2-Clause"
] | null | null | null | gl-spline/Texture.cpp | dlarudgus20/gl-spline | bc1c5ab71e8522c6cf0bbf35c7673e9f98c5b105 | [
"BSD-2-Clause"
] | null | null | null | // Copyright (c) 2014, 임경현
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
//
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
/**
* @file Texture.cpp
* @date 2015. 9. 27.
* @author dlarudgus20
* @copyright The BSD (2-Clause) License
*/
#include "pch.h"
#include "ext.h"
#include "Texture.h"
void Texture::bind(int idx, const Texture *pTexture)
{
glActiveTexture(GL_TEXTURE0 + idx);
bind(pTexture);
}
void Texture::bind(const Texture *pTexture)
{
if (pTexture != nullptr)
glBindTexture(GL_TEXTURE_2D, pTexture->m_texture);
else
glBindTexture(GL_TEXTURE_2D, 0);
}
Texture::Texture()
: m_texture(0)
{
}
Texture::Texture(const char *file, const Parameter ¶ms)
{
init(file, GL_SRGB, GL_RGB, params);
}
Texture::Texture(const char *file, GLint internalformat, GLenum format, const Parameter ¶ms)
{
init(file, internalformat, format, params);
}
void Texture::init(const char *file, GLint internalformat, GLenum format, const Parameter ¶ms)
{
glGenTextures(1, &m_texture);
glBindTexture(GL_TEXTURE_2D, m_texture);
{
params.apply();
int texWidth, texHeight;
unsigned char *image = SOIL_load_image(file, &texWidth, &texHeight, nullptr, SOIL_LOAD_RGB);
if (image == nullptr)
{
throw LoadError("[" + std::string(file) + "] : failed to load");
}
glTexImage2D(GL_TEXTURE_2D, 0, internalformat, texWidth, texHeight, 0, format, GL_UNSIGNED_BYTE, image);
glGenerateMipmap(GL_TEXTURE_2D);
SOIL_free_image_data(image);
}
glBindTexture(GL_TEXTURE_2D, 0);
}
Texture::Texture(GLint level, GLint internalformat,
GLsizei width, GLsizei height, GLint border, GLenum format, GLenum type, const void *pixels,
bool bGenMipmap, const Parameter ¶ms)
{
glGenTextures(1, &m_texture);
glBindTexture(GL_TEXTURE_2D, m_texture);
params.apply();
glTexImage2D(GL_TEXTURE_2D, level, internalformat, width, height, border, format, type, pixels);
if (bGenMipmap)
glGenerateMipmap(GL_TEXTURE_2D);
glBindTexture(GL_TEXTURE_2D, 0);
}
Texture::~Texture()
{
if (m_texture != 0)
{
glDeleteTextures(1, &m_texture);
}
}
Texture::Texture(Texture &&other)
: m_texture(other.m_texture)
{
other.m_texture = 0;
}
Texture &Texture::operator =(Texture &&other)
{
Texture().swap(*this);
m_texture = other.m_texture;
other.m_texture = 0;
return *this;
}
void Texture::swap(Texture &other)
{
std::swap(m_texture, other.m_texture);
}
GLuint Texture::get() const
{
return m_texture;
}
Texture::Parameter Texture::Parameter::getDefault()
{
return Parameter()
[GL_TEXTURE_WRAP_S](GL_REPEAT)
[GL_TEXTURE_WRAP_T](GL_REPEAT)
[GL_TEXTURE_MIN_FILTER](GL_LINEAR_MIPMAP_LINEAR)
[GL_TEXTURE_MAG_FILTER](GL_LINEAR);
}
void Texture::Parameter::apply() const
{
for (const auto &pr : m_pari)
{
glTexParameteri(GL_TEXTURE_2D, pr.first, pr.second);
}
for (const auto &pr : m_parfv)
{
glTexParameterfv(GL_TEXTURE_2D, pr.first, pr.second);
}
}
| 28.96 | 112 | 0.700046 | dlarudgus20 |
f1bdd927a07345f35ee7e40bd6e687692fc0f579 | 3,795 | cpp | C++ | src/Engine/TextLine.cpp | Terryhata6/Mengine | dfe36fdc84d7398fbbbd199feffc46c6f157f1d4 | [
"MIT"
] | null | null | null | src/Engine/TextLine.cpp | Terryhata6/Mengine | dfe36fdc84d7398fbbbd199feffc46c6f157f1d4 | [
"MIT"
] | null | null | null | src/Engine/TextLine.cpp | Terryhata6/Mengine | dfe36fdc84d7398fbbbd199feffc46c6f157f1d4 | [
"MIT"
] | null | null | null | #include "TextLine.h"
#include "Kernel/Logger.h"
#include "utf8.h"
namespace Mengine
{
//////////////////////////////////////////////////////////////////////////
TextLine::TextLine( uint32_t _layout, float _charOffset )
: m_layout( _layout )
, m_length( 0.f )
, m_charOffset( _charOffset )
{
}
//////////////////////////////////////////////////////////////////////////
TextLine::~TextLine()
{
}
//////////////////////////////////////////////////////////////////////////
bool TextLine::initialize( uint32_t _fontId, const TextFontInterfacePtr & _font, const U32String & _text )
{
U32String::size_type text_size = _text.size();
m_charsData.reserve( text_size );
bool successful = true;
for( U32String::const_iterator
it = _text.begin(),
it_end = _text.end();
it != it_end;
++it )
{
GlyphCode glyphChar = (GlyphCode)*it;
U32String::const_iterator it_kerning = it;
std::advance( it_kerning, 1 );
GlyphCode glyphCharNext = (it_kerning != _text.end()) ? *it_kerning : 0;
Glyph glyph;
if( _font->getGlyph( m_layout, glyphChar, glyphCharNext, &glyph ) == false )
{
LOGGER_ERROR( "fontName '%s' invalid glyph %u next %u"
, _font->getName().c_str()
, glyphChar
, glyphCharNext
);
successful = false;
continue;
}
CharData charData;
charData.code = glyphChar;
charData.advance = glyph.advance;
charData.offset = glyph.offset;
charData.size = glyph.size;
charData.uv = glyph.uv;
charData.fontId = _fontId;
charData.texture = glyph.texture;
m_charsData.emplace_back( charData );
m_length += charData.advance + m_charOffset;
}
m_length -= m_charOffset;
return successful;
}
//////////////////////////////////////////////////////////////////////////
uint32_t TextLine::getCharsDataSize() const
{
VectorCharData::size_type charsDataSize = m_charsData.size();
return (uint32_t)charsDataSize;
}
//////////////////////////////////////////////////////////////////////////
float TextLine::getLength() const
{
return m_length;
}
//////////////////////////////////////////////////////////////////////////
const VectorCharData & TextLine::getCharsData() const
{
return m_charsData;
}
//////////////////////////////////////////////////////////////////////////
void TextLine::calcCharPosition( const CharData & _cd, const mt::vec2f & _offset, float _charScale, uint32_t _index, mt::vec3f * const _pos ) const
{
mt::vec2f size = _cd.size * _charScale;
mt::vec2f offset = _offset + _cd.offset * _charScale;
const float size_xi[] = {0.f, size.x, size.x, 0.f};
const float size_yi[] = {0.f, 0.f, size.y, size.y};
float size_x = size_xi[_index];
float size_y = size_yi[_index];
_pos->x = offset.x + size_x;
_pos->y = offset.y + size_y;
_pos->z = 0.f;
}
//////////////////////////////////////////////////////////////////////////
void TextLine::advanceCharOffset( const CharData & _cd, float _charScale, mt::vec2f * const _offset ) const
{
_offset->x += (_cd.advance + m_charOffset) * _charScale;
}
//////////////////////////////////////////////////////////////////////////
}
| 33.289474 | 152 | 0.436627 | Terryhata6 |
f1beb97847c09949f33987b4e64fb137edf47025 | 74 | cpp | C++ | src/rosic_NumberManipulations.cpp | NeoBirth/rs-303 | bf3cda07e354809b2bdee389d8ee230210f741c4 | [
"MIT"
] | 13 | 2019-11-04T17:54:43.000Z | 2022-03-30T12:31:58.000Z | src/rosic_NumberManipulations.cpp | NeoBirth/rs-303 | bf3cda07e354809b2bdee389d8ee230210f741c4 | [
"MIT"
] | null | null | null | src/rosic_NumberManipulations.cpp | NeoBirth/rs-303 | bf3cda07e354809b2bdee389d8ee230210f741c4 | [
"MIT"
] | 5 | 2020-04-10T06:23:43.000Z | 2022-03-12T18:15:59.000Z | #include "rosic_NumberManipulations.h"
using namespace rosic;
| 9.25 | 39 | 0.702703 | NeoBirth |
f1becde937ef49393472de6ff5c3a4cdc6354ea8 | 51,253 | cpp | C++ | source/game/anim/Anim.cpp | JasonHutton/QWTA | 7f42dc70eb230cf69a8048fc98d647a486e752f1 | [
"MIT"
] | 2 | 2021-05-02T18:37:48.000Z | 2021-07-18T16:18:14.000Z | source/game/anim/Anim.cpp | JasonHutton/QWTA | 7f42dc70eb230cf69a8048fc98d647a486e752f1 | [
"MIT"
] | null | null | null | source/game/anim/Anim.cpp | JasonHutton/QWTA | 7f42dc70eb230cf69a8048fc98d647a486e752f1 | [
"MIT"
] | null | null | null |
// Copyright (C) 2007 Id Software, Inc.
//
#include "../precompiled.h"
#pragma hdrstop
#if defined( _DEBUG ) && !defined( ID_REDIRECT_NEWDELETE )
#define new DEBUG_NEW
#undef THIS_FILE
static char THIS_FILE[] = __FILE__;
#endif
#include "Anim.h"
#include "../../framework/Licensee.h"
bool idAnimManager::forceExport = false;
idCVar anim_reduced( "anim_reduced", "1", CVAR_BOOL|CVAR_ARCHIVE, "" );
idCVar r_writeAnimB( "r_writeAnimB", "0", CVAR_BOOL, "Write out binary versions of animations." );
idCVar r_loadAnimB( "r_loadAnimB", "1", CVAR_BOOL, "Attempt loading of binary version of animations." );
/*
===============================================================================
idAnimBlendNetworkInfo_Minimal
===============================================================================
*/
/*
==================
idAnimBlendNetworkInfo_Minimal::MakeDefault
==================
*/
void idAnimBlend::idAnimBlendNetworkInfo_Minimal::MakeDefault( void ) {
startTime = 0;
endTime = 0;
blendStartTime = 0;
blendDuration = 0;
blendStartValue = 0.f;
blendEndValue = 0.f;
animNum = -1;
}
/*
==================
idAnimBlendNetworkInfo_Minimal::operator=
==================
*/
void idAnimBlend::idAnimBlendNetworkInfo_Minimal::operator=( const idAnimBlend& anim ) {
startTime = anim.starttime;
endTime = anim.endtime;
blendStartTime = anim.blendStartTime;
blendDuration = anim.blendDuration;
blendStartValue = anim.blendStartValue;
blendEndValue = anim.blendEndValue;
animNum = anim.animNum;
}
/*
==================
idAnimBlendNetworkInfo_Minimal::Write
==================
*/
void idAnimBlend::idAnimBlendNetworkInfo_Minimal::Write( idAnimBlend& anim ) const {
anim.starttime = startTime;
anim.endtime = endTime;
anim.blendStartTime = blendStartTime;
anim.blendDuration = blendDuration;
anim.blendStartValue = blendStartValue;
anim.blendEndValue = blendEndValue;
anim.animNum = animNum;
}
/*
==================
idAnimBlendNetworkInfo_Minimal::operator==
==================
*/
bool idAnimBlend::idAnimBlendNetworkInfo_Minimal::operator==( const idAnimBlendNetworkInfo_Minimal& rhs ) const {
return startTime == rhs.startTime &&
endTime == rhs.endTime &&
blendStartTime == rhs.blendStartTime &&
blendDuration == rhs.blendDuration &&
blendStartValue == rhs.blendStartValue &&
blendEndValue == rhs.blendEndValue &&
animNum == rhs.animNum;
}
/*
==================
idAnimBlendNetworkInfo_Minimal::operator==
==================
*/
bool idAnimBlend::idAnimBlendNetworkInfo_Minimal::operator==( const idAnimBlend& rhs ) const {
return startTime == rhs.starttime &&
endTime == rhs.endtime &&
blendStartTime == rhs.blendStartTime &&
blendDuration == rhs.blendDuration &&
blendStartValue == rhs.blendStartValue &&
blendEndValue == rhs.blendEndValue &&
animNum == rhs.animNum;
}
/*
==================
idAnimBlend::idAnimBlendNetworkInfo_Minimal::Read
==================
*/
void idAnimBlend::idAnimBlendNetworkInfo_Minimal::Read( const idAnimBlendNetworkInfo_Minimal& base, const idBitMsg& msg ) {
startTime = msg.ReadDeltaLong( base.startTime );
endTime = msg.ReadDeltaLong( base.endTime );
blendStartTime = msg.ReadDeltaLong( base.blendStartTime );
blendDuration = msg.ReadDeltaLong( base.blendDuration );
blendStartValue = msg.ReadDeltaFloat( base.blendStartValue );
blendEndValue = msg.ReadDeltaFloat( base.blendEndValue );
animNum = msg.ReadDeltaShort( base.animNum );
}
/*
==================
idAnimBlend::idAnimBlendNetworkInfo_Minimal::Write
==================
*/
void idAnimBlend::idAnimBlendNetworkInfo_Minimal::Write( const idAnimBlendNetworkInfo_Minimal& base, idBitMsg& msg ) const {
msg.WriteDeltaLong( base.startTime, startTime );
msg.WriteDeltaLong( base.endTime, endTime );
msg.WriteDeltaLong( base.blendStartTime, blendStartTime );
msg.WriteDeltaLong( base.blendDuration, blendDuration );
msg.WriteDeltaFloat( base.blendStartValue,blendStartValue );
msg.WriteDeltaFloat( base.blendEndValue, blendEndValue );
msg.WriteDeltaShort( base.animNum, animNum );
}
/*
==================
idAnimBlend::idAnimBlendNetworkInfo_Minimal::Read
==================
*/
void idAnimBlend::idAnimBlendNetworkInfo_Minimal::Read( idFile* file ) {
}
/*
==================
idAnimBlend::idAnimBlendNetworkInfo_Minimal::Write
==================
*/
void idAnimBlend::idAnimBlendNetworkInfo_Minimal::Write( idFile* file ) const {
}
/***********************************************************************
idMD5Anim
***********************************************************************/
/*
====================
idMD5Anim::idMD5Anim
====================
*/
idMD5Anim::idMD5Anim() {
ref_count = 0;
numFrames = 0;
numJoints = 0;
frameRate = 24;
animLength = 0;
reduced = false;
totaldelta.Zero();
}
/*
====================
idMD5Anim::idMD5Anim
====================
*/
idMD5Anim::~idMD5Anim() {
Free();
}
/*
====================
idMD5Anim::Free
====================
*/
void idMD5Anim::Free( void ) {
numFrames = 0;
numJoints = 0;
frameRate = 24;
animLength = 0;
reduced = false;
name = "";
totaldelta.Zero();
jointInfo.Clear();
bounds.Clear();
componentFrames.Clear();
}
/*
====================
idMD5Anim::NumFrames
====================
*/
int idMD5Anim::NumFrames( void ) const {
return numFrames;
}
/*
====================
idMD5Anim::NumJoints
====================
*/
int idMD5Anim::NumJoints( void ) const {
return numJoints;
}
/*
====================
idMD5Anim::Length
====================
*/
int idMD5Anim::Length( void ) const {
return animLength;
}
/*
=====================
idMD5Anim::TotalMovementDelta
=====================
*/
const idVec3 &idMD5Anim::TotalMovementDelta( void ) const {
return totaldelta;
}
/*
=====================
idMD5Anim::TotalMovementDelta
=====================
*/
const char *idMD5Anim::Name( void ) const {
return name;
}
/*
====================
idMD5Anim::Reload
====================
*/
bool idMD5Anim::Reload( void ) {
idStr filename;
filename = name;
Free();
return LoadAnim( filename );
}
/*
====================
idMD5Anim::Allocated
====================
*/
size_t idMD5Anim::Allocated( void ) const {
size_t size = bounds.Allocated() + jointInfo.Allocated() + baseFrame.Allocated() + componentFrames.Allocated() + name.Allocated();
return size;
}
/*
====================
idMD5Anim::LoadAnim
====================
*/
ID_INLINE short AssertShortRange( int value ) {
assert( value >= -( 1 << ( sizeof( short ) * 8 - 1 ) ) );
assert( value < ( 1 << ( sizeof( short ) * 8 - 1 ) ) );
return (short) value;
}
bool idMD5Anim::LoadAnim( const char *filename ) {
int version;
idLexer parser( LEXFL_ALLOWPATHNAMES | LEXFL_NOSTRINGESCAPECHARS | LEXFL_NOSTRINGCONCAT );
idToken token;
int i, j;
int num;
bool offsetwarning = false;
int skipFrames = 2;
if ( !parser.LoadFile( filename ) ) {
return false;
}
Free();
name = filename;
parser.ExpectTokenString( MD5_VERSION_STRING );
version = parser.ParseInt();
if ( version != MD5_VERSION ) {
// ARNOUT: FIXME: BACKWARDS COMPATIBILITY
if ( version != 10 ) {
parser.Error( "Invalid version %d. Should be version %d", version, MD5_VERSION );
}
}
// skip the commandline
parser.ExpectTokenString( "commandline" );
parser.ReadToken( &token );
// parse num frames
parser.ExpectTokenString( "numFrames" );
numFrames = parser.ParseInt();
if ( numFrames <= 0 ) {
parser.Error( "Invalid number of frames: %d", numFrames );
}
// parse num joints
parser.ExpectTokenString( "numJoints" );
numJoints = parser.ParseInt();
if ( numJoints <= 0 ) {
parser.Error( "Invalid number of joints: %d", numJoints );
}
// parse frame rate
parser.ExpectTokenString( "frameRate" );
frameRate = parser.ParseInt();
if ( frameRate < 0 ) {
parser.Error( "Invalid frame rate: %d", frameRate );
}
// parse number of animated components
parser.ExpectTokenString( "numAnimatedComponents" );
numAnimatedComponents = parser.ParseInt();
if ( ( numAnimatedComponents < 0 ) || ( numAnimatedComponents > numJoints * 6 ) ) {
parser.Error( "Invalid number of animated components: %d", numAnimatedComponents );
}
// parse the hierarchy
jointInfo.SetGranularity( 1 );
jointInfo.SetNum( numJoints );
parser.ExpectTokenString( "hierarchy" );
parser.ExpectTokenString( "{" );
for( i = 0; i < numJoints; i++ ) {
parser.ReadToken( &token );
jointInfo[ i ].nameIndex = AssertShortRange( animationLib.JointIndex( token ) );
// parse parent num
jointInfo[ i ].parentNum = AssertShortRange( parser.ParseInt() );
if ( jointInfo[ i ].parentNum >= i ) {
parser.Error( "Invalid parent num: %d", jointInfo[ i ].parentNum );
}
if ( ( i != 0 ) && ( jointInfo[ i ].parentNum < 0 ) ) {
parser.Error( "Animations may have only one root joint" );
}
// parse anim bits
jointInfo[ i ].animBits = AssertShortRange( parser.ParseInt() );
if ( jointInfo[ i ].animBits & ~63 ) {
parser.Error( "Invalid anim bits: %d", jointInfo[ i ].animBits );
}
// parse first component
jointInfo[ i ].firstComponent = AssertShortRange( parser.ParseInt() );
if ( ( numAnimatedComponents > 0 ) && ( ( jointInfo[ i ].firstComponent < 0 ) || ( jointInfo[ i ].firstComponent >= numAnimatedComponents ) ) ) {
parser.Error( "Invalid first component: %d", jointInfo[ i ].firstComponent );
}
}
parser.ExpectTokenString( "}" );
// parse bounds
parser.ExpectTokenString( "bounds" );
parser.ExpectTokenString( "{" );
bounds.SetGranularity( 1 );
bounds.SetNum( numFrames );
for( i = 0; i < numFrames; i++ ) {
idBounds b;
parser.Parse1DMatrix( 3, b[ 0 ].ToFloatPtr() );
parser.Parse1DMatrix( 3, b[ 1 ].ToFloatPtr() );
bounds[i].SetBounds( b );
}
parser.ExpectTokenString( "}" );
// parse base frame
baseFrame.SetGranularity( 1 );
baseFrame.SetNum( numJoints );
parser.ExpectTokenString( "baseframe" );
parser.ExpectTokenString( "{" );
for( i = 0; i < numJoints; i++ ) {
idVec3 t;
idCQuat q;
parser.Parse1DMatrix( 3, t.ToFloatPtr() );
parser.Parse1DMatrix( 3, q.ToFloatPtr() );
t.FixDenormals();
q.FixDenormals();
if ( !offsetwarning ) {
if ( fabsf( t.x ) >= idCompressedJointQuat::MAX_BONE_LENGTH ||
fabsf( t.y ) >= idCompressedJointQuat::MAX_BONE_LENGTH ||
fabsf( t.z ) >= idCompressedJointQuat::MAX_BONE_LENGTH ) {
int jointNum = jointInfo[ i ].nameIndex;
gameLocal.Warning( "WARNING: bone offset of '%s' joint '%s' greater than %i",
filename,
animationLib.JointName( jointNum ),
idCompressedJointQuat::MAX_BONE_LENGTH );
offsetwarning = true;
}
}
baseFrame[ i ].t[0] = idCompressedJointQuat::OffsetToShort( t.x );
baseFrame[ i ].t[1] = idCompressedJointQuat::OffsetToShort( t.y );
baseFrame[ i ].t[2] = idCompressedJointQuat::OffsetToShort( t.z );
baseFrame[ i ].q[0] = idCompressedJointQuat::QuatToShort( q.x );
baseFrame[ i ].q[1] = idCompressedJointQuat::QuatToShort( q.y );
baseFrame[ i ].q[2] = idCompressedJointQuat::QuatToShort( q.z );
}
parser.ExpectTokenString( "}" );
// parse frames
componentFrames.SetGranularity( 1 );
componentFrames.SetNum( numAnimatedComponents * numFrames );
short *componentPtr = componentFrames.Begin();
for( i = 0; i < numFrames; i++ ) {
parser.ExpectTokenString( "frame" );
num = parser.ParseInt();
if ( num != i ) {
parser.Error( "Expected frame number %d", i );
}
parser.ExpectTokenString( "{" );
for ( j = 0; j < numJoints; j++ ) {
int animBits = jointInfo[j].animBits;
if ( animBits & ANIM_TX ) {
float x = parser.ParseFloat();
*componentPtr++ = idCompressedJointQuat::OffsetToShort( x );
}
if ( animBits & ANIM_TY ) {
float y = parser.ParseFloat();
*componentPtr++ = idCompressedJointQuat::OffsetToShort( y );
}
if ( animBits & ANIM_TZ ) {
float z = parser.ParseFloat();
*componentPtr++ = idCompressedJointQuat::OffsetToShort( z );
}
if ( animBits & ANIM_QX ) {
float x = parser.ParseFloat();
*componentPtr++ = idCompressedJointQuat::QuatToShort( x );
}
if ( animBits & ANIM_QY ) {
float y = parser.ParseFloat();
*componentPtr++ = idCompressedJointQuat::QuatToShort( y );
}
if ( animBits & ANIM_QZ ) {
float z = parser.ParseFloat();
*componentPtr++ = idCompressedJointQuat::QuatToShort( z );
}
}
parser.ExpectTokenString( "}" );
}
// get total move delta
if ( !numAnimatedComponents ) {
totaldelta.Zero();
} else {
componentPtr = &componentFrames[ jointInfo[ 0 ].firstComponent ];
if ( jointInfo[ 0 ].animBits & ANIM_TX ) {
for( i = 0; i < numFrames; i++ ) {
componentPtr[ numAnimatedComponents * i ] -= baseFrame[ 0 ].t[0];
}
totaldelta.x = idCompressedJointQuat::ShortToOffset( componentPtr[ numAnimatedComponents * ( numFrames - 1 ) ] );
componentPtr++;
} else {
totaldelta.x = 0.0f;
}
if ( jointInfo[ 0 ].animBits & ANIM_TY ) {
for( i = 0; i < numFrames; i++ ) {
componentPtr[ numAnimatedComponents * i ] -= baseFrame[ 0 ].t[1];
}
totaldelta.y = idCompressedJointQuat::ShortToOffset( componentPtr[ numAnimatedComponents * ( numFrames - 1 ) ] );
componentPtr++;
} else {
totaldelta.y = 0.0f;
}
if ( jointInfo[ 0 ].animBits & ANIM_TZ ) {
for( i = 0; i < numFrames; i++ ) {
componentPtr[ numAnimatedComponents * i ] -= baseFrame[ 0 ].t[2];
}
totaldelta.z = idCompressedJointQuat::ShortToOffset( componentPtr[ numAnimatedComponents * ( numFrames - 1 ) ] );
} else {
totaldelta.z = 0.0f;
}
}
baseFrame[ 0 ].ClearOffset();
// we don't count last frame because it would cause a 1 frame pause at the end
animLength = ( ( numFrames - 1 ) * 1000 + frameRate - 1 ) / frameRate;
if ( numFrames > 4 && numAnimatedComponents && anim_reduced.GetBool() && !r_writeAnimB.GetBool() ) {
Resample();
}
// done
return true;
}
/*
====================
idMD5Anim::Resample
====================
*/
void idMD5Anim::Resample( void ) {
if ( reduced ) {
return;
}
int idealFrames = numFrames/2;
idList<short> resampledFrames;
resampledFrames.SetGranularity( 1 );
resampledFrames.SetNum( numAnimatedComponents * idealFrames );
idCompressedJointQuat *compressedJoints = (idCompressedJointQuat *)_alloca16( numJoints * sizeof( compressedJoints[0] ) );
idCompressedJointQuat *compressedBlendJoints = (idCompressedJointQuat *)_alloca16( numJoints * sizeof( compressedBlendJoints[0] ) );
idJointQuat *joints = (idJointQuat *)_alloca16( numJoints * sizeof( joints[0] ) );
idJointQuat *blendJoints = (idJointQuat *)_alloca16( numJoints * sizeof( blendJoints[0] ) );
int *baseIndex = (int*)_alloca16( numJoints * sizeof( baseIndex[0] ) );
for (int i=0; i<numJoints; i++) {
baseIndex[i] = i;
}
for (int i=0; i<idealFrames; i++) {
float srcf = (i*(numFrames-1)) / (idealFrames-1);
int srci = (int)idMath::Floor( srcf );
float blend = srcf - srci;
if ( i != srci ) {
bounds[i] = bounds[srci];
}
{
short *destPtr = &resampledFrames[ i * numAnimatedComponents ];
short *srcPtr = &componentFrames[ srci * numAnimatedComponents ];
short *nextSrcPtr;
if ( (srci+1) < numFrames ) {
nextSrcPtr = &componentFrames[ (srci+1) * numAnimatedComponents ];
} else {
nextSrcPtr = srcPtr;
}
int numBaseIndex = 0;
for ( int j = 0; j < numJoints; j++ ) {
const jointAnimInfo_t * infoPtr = &jointInfo[j];
int animBits = infoPtr->animBits;
if ( animBits == 0 ) {
continue;
}
baseIndex[numBaseIndex] = numBaseIndex;
idCompressedJointQuat *jointPtr = &compressedJoints[numBaseIndex];
idCompressedJointQuat *blendPtr = &compressedBlendJoints[numBaseIndex];
const short *jointframe1 = srcPtr + infoPtr->firstComponent;
const short *jointframe2 = nextSrcPtr + infoPtr->firstComponent;
*jointPtr = baseFrame[j];
switch( animBits & (ANIM_TX|ANIM_TY|ANIM_TZ) ) {
case 0:
blendPtr->t[0] = jointPtr->t[0];
blendPtr->t[1] = jointPtr->t[1];
blendPtr->t[2] = jointPtr->t[2];
break;
case ANIM_TX:
jointPtr->t[0] = jointframe1[0];
blendPtr->t[0] = jointframe2[0];
blendPtr->t[1] = jointPtr->t[1];
blendPtr->t[2] = jointPtr->t[2];
jointframe1++;
jointframe2++;
break;
case ANIM_TY:
jointPtr->t[1] = jointframe1[0];
blendPtr->t[1] = jointframe2[0];
blendPtr->t[0] = jointPtr->t[0];
blendPtr->t[2] = jointPtr->t[2];
jointframe1++;
jointframe2++;
break;
case ANIM_TZ:
jointPtr->t[2] = jointframe1[0];
blendPtr->t[2] = jointframe2[0];
blendPtr->t[0] = jointPtr->t[0];
blendPtr->t[1] = jointPtr->t[1];
jointframe1++;
jointframe2++;
break;
case ANIM_TX|ANIM_TY:
jointPtr->t[0] = jointframe1[0];
jointPtr->t[1] = jointframe1[1];
blendPtr->t[0] = jointframe2[0];
blendPtr->t[1] = jointframe2[1];
blendPtr->t[2] = jointPtr->t[2];
jointframe1 += 2;
jointframe2 += 2;
break;
case ANIM_TX|ANIM_TZ:
jointPtr->t[0] = jointframe1[0];
jointPtr->t[2] = jointframe1[1];
blendPtr->t[0] = jointframe2[0];
blendPtr->t[2] = jointframe2[1];
blendPtr->t[1] = jointPtr->t[1];
jointframe1 += 2;
jointframe2 += 2;
break;
case ANIM_TY|ANIM_TZ:
jointPtr->t[1] = jointframe1[0];
jointPtr->t[2] = jointframe1[1];
blendPtr->t[1] = jointframe2[0];
blendPtr->t[2] = jointframe2[1];
blendPtr->t[0] = jointPtr->t[0];
jointframe1 += 2;
jointframe2 += 2;
break;
case ANIM_TX|ANIM_TY|ANIM_TZ:
jointPtr->t[0] = jointframe1[0];
jointPtr->t[1] = jointframe1[1];
jointPtr->t[2] = jointframe1[2];
blendPtr->t[0] = jointframe2[0];
blendPtr->t[1] = jointframe2[1];
blendPtr->t[2] = jointframe2[2];
jointframe1 += 3;
jointframe2 += 3;
break;
}
switch( animBits & (ANIM_QX|ANIM_QY|ANIM_QZ) ) {
case 0:
blendPtr->q[0] = jointPtr->q[0];
blendPtr->q[1] = jointPtr->q[1];
blendPtr->q[2] = jointPtr->q[2];
break;
case ANIM_QX:
jointPtr->q[0] = jointframe1[0];
blendPtr->q[0] = jointframe2[0];
blendPtr->q[1] = jointPtr->q[1];
blendPtr->q[2] = jointPtr->q[2];
break;
case ANIM_QY:
jointPtr->q[1] = jointframe1[0];
blendPtr->q[1] = jointframe2[0];
blendPtr->q[0] = jointPtr->q[0];
blendPtr->q[2] = jointPtr->q[2];
break;
case ANIM_QZ:
jointPtr->q[2] = jointframe1[0];
blendPtr->q[2] = jointframe2[0];
blendPtr->q[0] = jointPtr->q[0];
blendPtr->q[1] = jointPtr->q[1];
break;
case ANIM_QX|ANIM_QY:
jointPtr->q[0] = jointframe1[0];
jointPtr->q[1] = jointframe1[1];
blendPtr->q[0] = jointframe2[0];
blendPtr->q[1] = jointframe2[1];
blendPtr->q[2] = jointPtr->q[2];
break;
case ANIM_QX|ANIM_QZ:
jointPtr->q[0] = jointframe1[0];
jointPtr->q[2] = jointframe1[1];
blendPtr->q[0] = jointframe2[0];
blendPtr->q[2] = jointframe2[1];
blendPtr->q[1] = jointPtr->q[1];
break;
case ANIM_QY|ANIM_QZ:
jointPtr->q[1] = jointframe1[0];
jointPtr->q[2] = jointframe1[1];
blendPtr->q[1] = jointframe2[0];
blendPtr->q[2] = jointframe2[1];
blendPtr->q[0] = jointPtr->q[0];
break;
case ANIM_QX|ANIM_QY|ANIM_QZ:
jointPtr->q[0] = jointframe1[0];
jointPtr->q[1] = jointframe1[1];
jointPtr->q[2] = jointframe1[2];
blendPtr->q[0] = jointframe2[0];
blendPtr->q[1] = jointframe2[1];
blendPtr->q[2] = jointframe2[2];
break;
}
numBaseIndex++;
}
blendJoints = (idJointQuat *)_alloca16( baseFrame.Num() * sizeof( blendJoints[ 0 ] ) );
SIMDProcessor->DecompressJoints( joints, compressedJoints, baseIndex, numBaseIndex );
SIMDProcessor->DecompressJoints( blendJoints, compressedBlendJoints, baseIndex, numBaseIndex );
SIMDProcessor->BlendJoints( joints, blendJoints, 1.f-blend, baseIndex, numBaseIndex );
numBaseIndex = 0;
for ( int j = 0; j < numJoints; j++ ) {
const jointAnimInfo_t * infoPtr = &jointInfo[j];
int animBits = infoPtr->animBits;
if ( animBits == 0 ) {
continue;
}
idJointQuat const &curjoint = joints[numBaseIndex];
idCQuat cq = curjoint.q.ToCQuat();
idCompressedJointQuat cj;
cj.t[0] = idCompressedJointQuat::OffsetToShort( curjoint.t.x );
cj.t[1] = idCompressedJointQuat::OffsetToShort( curjoint.t.y );
cj.t[2] = idCompressedJointQuat::OffsetToShort( curjoint.t.z );
cj.q[0] = idCompressedJointQuat::QuatToShort( cq.x );
cj.q[1] = idCompressedJointQuat::QuatToShort( cq.y );
cj.q[2] = idCompressedJointQuat::QuatToShort( cq.z );
short *output = &destPtr[ infoPtr->firstComponent ];
if ( animBits & (ANIM_TX) ) {
*output++ = cj.t[0];
}
if ( animBits & (ANIM_TY) ) {
*output++ = cj.t[1];
}
if ( animBits & (ANIM_TZ) ) {
*output++ = cj.t[2];
}
if ( animBits & (ANIM_QX) ) {
*output++ = cj.q[0];
}
if ( animBits & (ANIM_QY) ) {
*output++ = cj.q[1];
}
if ( animBits & (ANIM_QZ) ) {
*output++ = cj.q[2];
}
numBaseIndex++;
}
}
}
int nb = numFrames;
int fr = frameRate;
frameRate = (frameRate * idealFrames) / numFrames;//(((numFrames - 1) * 1000) + animLength - 1) / (animLength);
numFrames = idealFrames;
animLength = ( ( numFrames - 1 ) * 1000 + frameRate - 1 ) / frameRate;
bounds.SetGranularity( 1 );
bounds.SetNum( numFrames );
componentFrames = resampledFrames;
reduced = true;
}
/*
====================
idMD5Anim::IncreaseRefs
====================
*/
void idMD5Anim::IncreaseRefs( void ) const {
ref_count++;
}
/*
====================
idMD5Anim::DecreaseRefs
====================
*/
void idMD5Anim::DecreaseRefs( void ) const {
ref_count--;
}
/*
====================
idMD5Anim::NumRefs
====================
*/
int idMD5Anim::NumRefs( void ) const {
return ref_count;
}
/*
====================
idMD5Anim::GetFrameBlend
====================
*/
void idMD5Anim::GetFrameBlend( int framenum, frameBlend_t &frame ) const {
frame.cycleCount = 0;
frame.backlerp = 0.0f;
frame.frontlerp = 1.0f;
// frame 1 is first frame
framenum--;
if ( framenum < 0 ) {
framenum = 0;
} else if ( framenum >= numFrames ) {
framenum = numFrames - 1;
}
frame.frame1 = framenum;
frame.frame2 = framenum;
}
/*
====================
idMD5Anim::ConvertTimeToFrame
====================
*/
void idMD5Anim::ConvertTimeToFrame( int time, int cyclecount, frameBlend_t &frame ) const {
int frameTime;
int frameNum;
if ( numFrames <= 1 ) {
frame.frame1 = 0;
frame.frame2 = 0;
frame.backlerp = 0.0f;
frame.frontlerp = 1.0f;
frame.cycleCount = 0;
return;
}
if ( time <= 0 ) {
frame.frame1 = 0;
frame.frame2 = 1;
frame.backlerp = 0.0f;
frame.frontlerp = 1.0f;
frame.cycleCount = 0;
return;
}
frameTime = time * frameRate;
frameNum = frameTime / 1000;
frame.cycleCount = frameNum / ( numFrames - 1 );
if ( ( cyclecount > 0 ) && ( frame.cycleCount >= cyclecount ) ) {
frame.cycleCount = cyclecount - 1;
frame.frame1 = numFrames - 1;
frame.frame2 = frame.frame1;
frame.backlerp = 0.0f;
frame.frontlerp = 1.0f;
return;
}
frame.frame1 = frameNum % ( numFrames - 1 );
frame.frame2 = frame.frame1 + 1;
if ( frame.frame2 >= numFrames ) {
frame.frame2 = 0;
}
frame.backlerp = ( frameTime % 1000 ) * 0.001f;
frame.frontlerp = 1.0f - frame.backlerp;
}
/*
====================
idMD5Anim::GetOrigin
====================
*/
void idMD5Anim::GetOrigin( idVec3 &offset, int time, int cyclecount ) const {
frameBlend_t frame;
offset[0] = idCompressedJointQuat::ShortToOffset( baseFrame[ 0 ].t[0] );
offset[1] = idCompressedJointQuat::ShortToOffset( baseFrame[ 0 ].t[1] );
offset[2] = idCompressedJointQuat::ShortToOffset( baseFrame[ 0 ].t[2] );
if ( !( jointInfo[ 0 ].animBits & ( ANIM_TX | ANIM_TY | ANIM_TZ ) ) ) {
// just use the baseframe
return;
}
ConvertTimeToFrame( time, cyclecount, frame );
const short *componentPtr1 = &componentFrames[ numAnimatedComponents * frame.frame1 + jointInfo[ 0 ].firstComponent ];
const short *componentPtr2 = &componentFrames[ numAnimatedComponents * frame.frame2 + jointInfo[ 0 ].firstComponent ];
if ( jointInfo[ 0 ].animBits & ANIM_TX ) {
offset.x = idCompressedJointQuat::ShortToOffset( *componentPtr1 ) * frame.frontlerp + idCompressedJointQuat::ShortToOffset( *componentPtr2 ) * frame.backlerp;
componentPtr1++;
componentPtr2++;
}
if ( jointInfo[ 0 ].animBits & ANIM_TY ) {
offset.y = idCompressedJointQuat::ShortToOffset( *componentPtr1 ) * frame.frontlerp + idCompressedJointQuat::ShortToOffset( *componentPtr2 ) * frame.backlerp;
componentPtr1++;
componentPtr2++;
}
if ( jointInfo[ 0 ].animBits & ANIM_TZ ) {
offset.z = idCompressedJointQuat::ShortToOffset( *componentPtr1 ) * frame.frontlerp + idCompressedJointQuat::ShortToOffset( *componentPtr2 ) * frame.backlerp;
}
if ( frame.cycleCount ) {
offset += totaldelta * ( float )frame.cycleCount;
}
}
/*
====================
idMD5Anim::GetOriginRotation
====================
*/
void idMD5Anim::GetOriginRotation( idQuat &rotation, int time, int cyclecount ) const {
frameBlend_t frame;
int animBits;
animBits = jointInfo[ 0 ].animBits;
if ( !( animBits & ( ANIM_QX | ANIM_QY | ANIM_QZ ) ) ) {
// just use the baseframe
rotation[0] = idCompressedJointQuat::ShortToQuat( baseFrame[ 0 ].q[0] );
rotation[1] = idCompressedJointQuat::ShortToQuat( baseFrame[ 0 ].q[1] );
rotation[2] = idCompressedJointQuat::ShortToQuat( baseFrame[ 0 ].q[2] );
rotation.w = rotation.CalcW();
return;
}
ConvertTimeToFrame( time, cyclecount, frame );
const short *jointframe1 = &componentFrames[ numAnimatedComponents * frame.frame1 + jointInfo[ 0 ].firstComponent ];
const short *jointframe2 = &componentFrames[ numAnimatedComponents * frame.frame2 + jointInfo[ 0 ].firstComponent ];
if ( animBits & ANIM_TX ) {
jointframe1++;
jointframe2++;
}
if ( animBits & ANIM_TY ) {
jointframe1++;
jointframe2++;
}
if ( animBits & ANIM_TZ ) {
jointframe1++;
jointframe2++;
}
idQuat q1;
idQuat q2;
switch( animBits & (ANIM_QX|ANIM_QY|ANIM_QZ) ) {
case ANIM_QX:
q1.x = idCompressedJointQuat::ShortToQuat( jointframe1[0] );
q2.x = idCompressedJointQuat::ShortToQuat( jointframe2[0] );
q1.y = idCompressedJointQuat::ShortToQuat( baseFrame[ 0 ].q[1] );
q2.y = idCompressedJointQuat::ShortToQuat( baseFrame[ 0 ].q[1] );
q1.z = idCompressedJointQuat::ShortToQuat( baseFrame[ 0 ].q[2] );
q2.z = idCompressedJointQuat::ShortToQuat( baseFrame[ 0 ].q[2] );
q1.w = q1.CalcW();
q2.w = q2.CalcW();
break;
case ANIM_QY:
q1.y = idCompressedJointQuat::ShortToQuat( jointframe1[0] );
q2.y = idCompressedJointQuat::ShortToQuat( jointframe2[0] );
q1.x = idCompressedJointQuat::ShortToQuat( baseFrame[ 0 ].q[0] );
q2.x = idCompressedJointQuat::ShortToQuat( baseFrame[ 0 ].q[0] );
q1.z = idCompressedJointQuat::ShortToQuat( baseFrame[ 0 ].q[2] );
q2.z = idCompressedJointQuat::ShortToQuat( baseFrame[ 0 ].q[2] );
q1.w = q1.CalcW();
q2.w = q2.CalcW();
break;
case ANIM_QZ:
q1.z = idCompressedJointQuat::ShortToQuat( jointframe1[0] );
q2.z = idCompressedJointQuat::ShortToQuat( jointframe2[0] );
q1.x = idCompressedJointQuat::ShortToQuat( baseFrame[ 0 ].q[0] );
q2.x = idCompressedJointQuat::ShortToQuat( baseFrame[ 0 ].q[0] );
q1.y = idCompressedJointQuat::ShortToQuat( baseFrame[ 0 ].q[1] );
q2.y = idCompressedJointQuat::ShortToQuat( baseFrame[ 0 ].q[1] );
q1.w = q1.CalcW();
q2.w = q2.CalcW();
break;
case ANIM_QX|ANIM_QY:
q1.x = idCompressedJointQuat::ShortToQuat( jointframe1[0] );
q1.y = idCompressedJointQuat::ShortToQuat( jointframe1[1] );
q2.x = idCompressedJointQuat::ShortToQuat( jointframe2[0] );
q2.y = idCompressedJointQuat::ShortToQuat( jointframe2[1] );
q1.z = idCompressedJointQuat::ShortToQuat( baseFrame[ 0 ].q[2] );
q2.z = idCompressedJointQuat::ShortToQuat( baseFrame[ 0 ].q[2] );
q1.w = q1.CalcW();
q2.w = q2.CalcW();
break;
case ANIM_QX|ANIM_QZ:
q1.x = idCompressedJointQuat::ShortToQuat( jointframe1[0] );
q1.z = idCompressedJointQuat::ShortToQuat( jointframe1[1] );
q2.x = idCompressedJointQuat::ShortToQuat( jointframe2[0] );
q2.z = idCompressedJointQuat::ShortToQuat( jointframe2[1] );
q1.y = idCompressedJointQuat::ShortToQuat( baseFrame[ 0 ].q[1] );
q2.y = idCompressedJointQuat::ShortToQuat( baseFrame[ 0 ].q[1] );
q1.w = q1.CalcW();
q2.w = q2.CalcW();
break;
case ANIM_QY|ANIM_QZ:
q1.y = idCompressedJointQuat::ShortToQuat( jointframe1[0] );
q1.z = idCompressedJointQuat::ShortToQuat( jointframe1[1] );
q2.y = idCompressedJointQuat::ShortToQuat( jointframe2[0] );
q2.z = idCompressedJointQuat::ShortToQuat( jointframe2[1] );
q1.x = idCompressedJointQuat::ShortToQuat( baseFrame[ 0 ].q[0] );
q2.x = idCompressedJointQuat::ShortToQuat( baseFrame[ 0 ].q[0] );
q1.w = q1.CalcW();
q2.w = q2.CalcW();
break;
case ANIM_QX|ANIM_QY|ANIM_QZ:
q1.x = idCompressedJointQuat::ShortToQuat( jointframe1[0] );
q1.y = idCompressedJointQuat::ShortToQuat( jointframe1[1] );
q1.z = idCompressedJointQuat::ShortToQuat( jointframe1[2] );
q2.x = idCompressedJointQuat::ShortToQuat( jointframe2[0] );
q2.y = idCompressedJointQuat::ShortToQuat( jointframe2[1] );
q2.z = idCompressedJointQuat::ShortToQuat( jointframe2[2] );
q1.w = q1.CalcW();
q2.w = q2.CalcW();
break;
}
rotation.Slerp( q1, q2, frame.backlerp );
}
/*
====================
idMD5Anim::GetBounds
====================
*/
void idMD5Anim::GetBounds( idBounds &bnds, int time, int cyclecount ) const {
frameBlend_t frame;
idVec3 offset;
ConvertTimeToFrame( time, cyclecount, frame );
bnds = bounds[ frame.frame1 ].ToBounds();
bnds.AddBounds( bounds[ frame.frame2 ].ToBounds() );
// origin position
offset[0] = idCompressedJointQuat::ShortToOffset( baseFrame[ 0 ].t[0] );
offset[1] = idCompressedJointQuat::ShortToOffset( baseFrame[ 0 ].t[1] );
offset[2] = idCompressedJointQuat::ShortToOffset( baseFrame[ 0 ].t[2] );
if ( jointInfo[ 0 ].animBits & ( ANIM_TX | ANIM_TY | ANIM_TZ ) ) {
const short *componentPtr1 = &componentFrames[ numAnimatedComponents * frame.frame1 + jointInfo[ 0 ].firstComponent ];
const short *componentPtr2 = &componentFrames[ numAnimatedComponents * frame.frame2 + jointInfo[ 0 ].firstComponent ];
if ( jointInfo[ 0 ].animBits & ANIM_TX ) {
offset.x = idCompressedJointQuat::ShortToOffset( *componentPtr1 ) * frame.frontlerp + idCompressedJointQuat::ShortToOffset( *componentPtr2 ) * frame.backlerp;
componentPtr1++;
componentPtr2++;
}
if ( jointInfo[ 0 ].animBits & ANIM_TY ) {
offset.y = idCompressedJointQuat::ShortToOffset( *componentPtr1 ) * frame.frontlerp + idCompressedJointQuat::ShortToOffset( *componentPtr2 ) * frame.backlerp;
componentPtr1++;
componentPtr2++;
}
if ( jointInfo[ 0 ].animBits & ANIM_TZ ) {
offset.z = idCompressedJointQuat::ShortToOffset( *componentPtr1 ) * frame.frontlerp + idCompressedJointQuat::ShortToOffset( *componentPtr2 ) * frame.backlerp;
}
}
bnds[ 0 ] -= offset;
bnds[ 1 ] -= offset;
}
/*
====================
idMD5Anim::GetInterpolatedFrame
====================
*/
void idMD5Anim::GetInterpolatedFrame( frameBlend_t &frame, idJointQuat *joints, const int *index, int numIndexes ) const {
int i, numLerpJoints;
const short * frame1;
const short * frame2;
const short * jointframe1;
const short * jointframe2;
const jointAnimInfo_t * infoPtr;
int animBits;
idJointQuat * blendJoints;
idCompressedJointQuat * compressedJoints;
idCompressedJointQuat * compressedBlendJoints;
idCompressedJointQuat * jointPtr;
idCompressedJointQuat * blendPtr;
int * lerpIndex;
int * baseIndex;
// FIXME: have global static ?
// index with all joints
baseIndex = (int *)_alloca16( baseFrame.Num() * sizeof( baseIndex[ 0 ] ) );
for ( i = 0; i < baseFrame.Num(); i++ ) {
baseIndex[i] = i;
}
if ( !numAnimatedComponents ) {
// just use the base frame
SIMDProcessor->DecompressJoints( joints, baseFrame.Begin(), baseIndex, baseFrame.Num() );
return;
}
compressedJoints = (idCompressedJointQuat *)_alloca16( baseFrame.Num() * sizeof( compressedJoints[0] ) );
compressedBlendJoints = (idCompressedJointQuat *)_alloca16( baseFrame.Num() * sizeof( compressedBlendJoints[0] ) );
SIMDProcessor->Memcpy( compressedJoints, baseFrame.Begin(), baseFrame.Num() * sizeof( compressedJoints[0] ) );
lerpIndex = (int *)_alloca16( baseFrame.Num() * sizeof( lerpIndex[ 0 ] ) );
numLerpJoints = 0;
frame1 = &componentFrames[ frame.frame1 * numAnimatedComponents ];
frame2 = &componentFrames[ frame.frame2 * numAnimatedComponents ];
// delta decompression relative to base frame
for ( i = 0; i < numIndexes; i++ ) {
int j = index[i];
infoPtr = &jointInfo[j];
animBits = infoPtr->animBits;
if ( animBits == 0 ) {
continue;
}
jointPtr = &compressedJoints[j];
blendPtr = &compressedBlendJoints[j];
lerpIndex[numLerpJoints++] = j;
jointframe1 = frame1 + infoPtr->firstComponent;
jointframe2 = frame2 + infoPtr->firstComponent;
switch( animBits & (ANIM_TX|ANIM_TY|ANIM_TZ) ) {
case 0:
blendPtr->t[0] = jointPtr->t[0];
blendPtr->t[1] = jointPtr->t[1];
blendPtr->t[2] = jointPtr->t[2];
break;
case ANIM_TX:
jointPtr->t[0] = jointframe1[0];
blendPtr->t[0] = jointframe2[0];
blendPtr->t[1] = jointPtr->t[1];
blendPtr->t[2] = jointPtr->t[2];
jointframe1++;
jointframe2++;
break;
case ANIM_TY:
jointPtr->t[1] = jointframe1[0];
blendPtr->t[1] = jointframe2[0];
blendPtr->t[0] = jointPtr->t[0];
blendPtr->t[2] = jointPtr->t[2];
jointframe1++;
jointframe2++;
break;
case ANIM_TZ:
jointPtr->t[2] = jointframe1[0];
blendPtr->t[2] = jointframe2[0];
blendPtr->t[0] = jointPtr->t[0];
blendPtr->t[1] = jointPtr->t[1];
jointframe1++;
jointframe2++;
break;
case ANIM_TX|ANIM_TY:
jointPtr->t[0] = jointframe1[0];
jointPtr->t[1] = jointframe1[1];
blendPtr->t[0] = jointframe2[0];
blendPtr->t[1] = jointframe2[1];
blendPtr->t[2] = jointPtr->t[2];
jointframe1 += 2;
jointframe2 += 2;
break;
case ANIM_TX|ANIM_TZ:
jointPtr->t[0] = jointframe1[0];
jointPtr->t[2] = jointframe1[1];
blendPtr->t[0] = jointframe2[0];
blendPtr->t[2] = jointframe2[1];
blendPtr->t[1] = jointPtr->t[1];
jointframe1 += 2;
jointframe2 += 2;
break;
case ANIM_TY|ANIM_TZ:
jointPtr->t[1] = jointframe1[0];
jointPtr->t[2] = jointframe1[1];
blendPtr->t[1] = jointframe2[0];
blendPtr->t[2] = jointframe2[1];
blendPtr->t[0] = jointPtr->t[0];
jointframe1 += 2;
jointframe2 += 2;
break;
case ANIM_TX|ANIM_TY|ANIM_TZ:
jointPtr->t[0] = jointframe1[0];
jointPtr->t[1] = jointframe1[1];
jointPtr->t[2] = jointframe1[2];
blendPtr->t[0] = jointframe2[0];
blendPtr->t[1] = jointframe2[1];
blendPtr->t[2] = jointframe2[2];
jointframe1 += 3;
jointframe2 += 3;
break;
}
switch( animBits & (ANIM_QX|ANIM_QY|ANIM_QZ) ) {
case 0:
blendPtr->q[0] = jointPtr->q[0];
blendPtr->q[1] = jointPtr->q[1];
blendPtr->q[2] = jointPtr->q[2];
break;
case ANIM_QX:
jointPtr->q[0] = jointframe1[0];
blendPtr->q[0] = jointframe2[0];
blendPtr->q[1] = jointPtr->q[1];
blendPtr->q[2] = jointPtr->q[2];
break;
case ANIM_QY:
jointPtr->q[1] = jointframe1[0];
blendPtr->q[1] = jointframe2[0];
blendPtr->q[0] = jointPtr->q[0];
blendPtr->q[2] = jointPtr->q[2];
break;
case ANIM_QZ:
jointPtr->q[2] = jointframe1[0];
blendPtr->q[2] = jointframe2[0];
blendPtr->q[0] = jointPtr->q[0];
blendPtr->q[1] = jointPtr->q[1];
break;
case ANIM_QX|ANIM_QY:
jointPtr->q[0] = jointframe1[0];
jointPtr->q[1] = jointframe1[1];
blendPtr->q[0] = jointframe2[0];
blendPtr->q[1] = jointframe2[1];
blendPtr->q[2] = jointPtr->q[2];
break;
case ANIM_QX|ANIM_QZ:
jointPtr->q[0] = jointframe1[0];
jointPtr->q[2] = jointframe1[1];
blendPtr->q[0] = jointframe2[0];
blendPtr->q[2] = jointframe2[1];
blendPtr->q[1] = jointPtr->q[1];
break;
case ANIM_QY|ANIM_QZ:
jointPtr->q[1] = jointframe1[0];
jointPtr->q[2] = jointframe1[1];
blendPtr->q[1] = jointframe2[0];
blendPtr->q[2] = jointframe2[1];
blendPtr->q[0] = jointPtr->q[0];
break;
case ANIM_QX|ANIM_QY|ANIM_QZ:
jointPtr->q[0] = jointframe1[0];
jointPtr->q[1] = jointframe1[1];
jointPtr->q[2] = jointframe1[2];
blendPtr->q[0] = jointframe2[0];
blendPtr->q[1] = jointframe2[1];
blendPtr->q[2] = jointframe2[2];
break;
}
}
blendJoints = (idJointQuat *)_alloca16( baseFrame.Num() * sizeof( blendJoints[ 0 ] ) );
SIMDProcessor->DecompressJoints( joints, compressedJoints, baseIndex, baseFrame.Num() );
SIMDProcessor->DecompressJoints( blendJoints, compressedBlendJoints, lerpIndex, numLerpJoints );
SIMDProcessor->BlendJoints( joints, blendJoints, frame.backlerp, lerpIndex, numLerpJoints );
if ( frame.cycleCount ) {
joints[ 0 ].t += totaldelta * ( float )frame.cycleCount;
}
}
/*
====================
idMD5Anim::GetSingleFrame
====================
*/
void idMD5Anim::GetSingleFrame( int framenum, idJointQuat *joints, const int *index, int numIndexes ) const {
int i;
const short * frame;
const short * jointframe;
int animBits;
idCompressedJointQuat * compressedJoints;
idCompressedJointQuat * jointPtr;
const jointAnimInfo_t * infoPtr;
int * baseIndex;
// FIXME: have global static ?
// index with all joints
baseIndex = (int *)_alloca16( baseFrame.Num() * sizeof( baseIndex[ 0 ] ) );
for ( i = 0; i < baseFrame.Num(); i++ ) {
baseIndex[i] = i;
}
if ( ( framenum == 0 ) || !numAnimatedComponents ) {
// just use the base frame
SIMDProcessor->DecompressJoints( joints, baseFrame.Begin(), baseIndex, baseFrame.Num() );
return;
}
compressedJoints = (idCompressedJointQuat *)_alloca16( baseFrame.Num() * sizeof( compressedJoints[0] ) );
SIMDProcessor->Memcpy( compressedJoints, baseFrame.Begin(), baseFrame.Num() * sizeof( baseFrame[0] ) );
frame = &componentFrames[ framenum * numAnimatedComponents ];
// delta decompression relative to base frame
for ( i = 0; i < numIndexes; i++ ) {
int j = index[i];
infoPtr = &jointInfo[j];
animBits = infoPtr->animBits;
if ( animBits == 0 ) {
continue;
}
jointPtr = &compressedJoints[j];
jointframe = frame + infoPtr->firstComponent;
switch( animBits & (ANIM_TX|ANIM_TY|ANIM_TZ) ) {
case 0:
break;
case ANIM_TX:
jointPtr->t[0] = jointframe[0];
jointframe++;
break;
case ANIM_TY:
jointPtr->t[1] = jointframe[0];
jointframe++;
break;
case ANIM_TZ:
jointPtr->t[2] = jointframe[0];
jointframe++;
break;
case ANIM_TX|ANIM_TY:
jointPtr->t[0] = jointframe[0];
jointPtr->t[1] = jointframe[1];
jointframe += 2;
break;
case ANIM_TX|ANIM_TZ:
jointPtr->t[0] = jointframe[0];
jointPtr->t[2] = jointframe[1];
jointframe += 2;
break;
case ANIM_TY|ANIM_TZ:
jointPtr->t[1] = jointframe[0];
jointPtr->t[2] = jointframe[1];
jointframe += 2;
break;
case ANIM_TX|ANIM_TY|ANIM_TZ:
jointPtr->t[0] = jointframe[0];
jointPtr->t[1] = jointframe[1];
jointPtr->t[2] = jointframe[2];
jointframe += 3;
break;
}
switch( animBits & (ANIM_QX|ANIM_QY|ANIM_QZ) ) {
case 0:
break;
case ANIM_QX:
jointPtr->q[0] = jointframe[0];
break;
case ANIM_QY:
jointPtr->q[1] = jointframe[0];
break;
case ANIM_QZ:
jointPtr->q[2] = jointframe[0];
break;
case ANIM_QX|ANIM_QY:
jointPtr->q[0] = jointframe[0];
jointPtr->q[1] = jointframe[1];
break;
case ANIM_QX|ANIM_QZ:
jointPtr->q[0] = jointframe[0];
jointPtr->q[2] = jointframe[1];
break;
case ANIM_QY|ANIM_QZ:
jointPtr->q[1] = jointframe[0];
jointPtr->q[2] = jointframe[1];
break;
case ANIM_QX|ANIM_QY|ANIM_QZ:
jointPtr->q[0] = jointframe[0];
jointPtr->q[1] = jointframe[1];
jointPtr->q[2] = jointframe[2];
break;
}
}
SIMDProcessor->DecompressJoints( joints, compressedJoints, baseIndex, baseFrame.Num() );
}
/*
====================
idMD5Anim::CheckModelHierarchy
====================
*/
void idMD5Anim::CheckModelHierarchy( const idRenderModel *model ) const {
int i;
int jointNum;
int parent;
if ( jointInfo.Num() != model->NumJoints() ) {
gameLocal.Error( "Model '%s' has different # of joints than anim '%s'", model->Name(), name.c_str() );
}
const idMD5Joint *modelJoints = model->GetJoints();
for( i = 0; i < jointInfo.Num(); i++ ) {
jointNum = jointInfo[ i ].nameIndex;
if ( modelJoints[ i ].name != animationLib.JointName( jointNum ) ) {
gameLocal.Error( "Model '%s''s joint names don't match anim '%s''s", model->Name(), name.c_str() );
}
if ( modelJoints[ i ].parent ) {
parent = modelJoints[ i ].parent - modelJoints;
} else {
parent = -1;
}
if ( parent != jointInfo[ i ].parentNum ) {
gameLocal.Error( "Model '%s' has different joint hierarchy than anim '%s'", model->Name(), name.c_str() );
}
}
}
/***********************************************************************
idAnimManager
***********************************************************************/
/*
====================
idAnimManager::idAnimManager
====================
*/
idAnimManager::idAnimManager() {
}
/*
====================
idAnimManager::~idAnimManager
====================
*/
idAnimManager::~idAnimManager() {
Shutdown();
}
/*
====================
idAnimManager::Shutdown
====================
*/
void idAnimManager::Shutdown( void ) {
animations.DeleteContents();
jointnames.Clear();
jointnamesHash.Free();
}
/*
==============
idMD5Anim::LoadAnimBinary
==============
*/
bool idMD5Anim::LoadAnimBinary( const char *filename ) {
int ident, version, num;
idFile* file = fileSystem->OpenFileRead( filename );
if ( file == NULL ) {
// common->Warning( "Couldn't load binary anim, %s", filename );
return false;
}
#if defined( SD_BUFFERED_FILE_LOADS )
file = fileSystem->OpenBufferedFile( file );
#endif
Free();
file->ReadInt( ident );
if ( ident != ANIMB_IDENT ) {
common->Warning( "idMD5Anim::LoadAnimBinary : unknown fileid on '%s'", filename );
return false;
}
file->ReadInt( version );
if ( version != ANIMB_VERSION ) {
common->Warning( "idMD5Anim::LoadAnimBinary : wrong version on '%s' (%i should be %i)", filename, version, ANIMB_VERSION );
return false;
}
file->ReadInt( numFrames );
file->ReadInt( frameRate );
file->ReadInt( animLength );
file->ReadInt( numJoints );
file->ReadInt( numAnimatedComponents );
file->ReadInt( num );
bounds.SetGranularity( 1 );
bounds.SetNum( num );
for ( int i=0; i<num; i++ ) {
short list[6];
file->ReadShort( list[0] );
file->ReadShort( list[1] );
file->ReadShort( list[2] );
file->ReadShort( list[3] );
file->ReadShort( list[4] );
file->ReadShort( list[5] );
bounds[i].SetBounds( list );
}
file->ReadInt( num );
jointInfo.SetGranularity( 1 );
jointInfo.SetNum( num );
idStr temp;
for ( int i=0; i<num; i++ ) {
file->ReadString( temp );
jointInfo[i].nameIndex = animationLib.JointIndex( temp );
file->ReadShort( jointInfo[i].parentNum );
file->ReadShort( jointInfo[i].animBits );
file->ReadShort( jointInfo[i].firstComponent );
}
file->ReadInt( num );
baseFrame.SetGranularity( 1 );
baseFrame.SetNum( num );
for ( int i=0; i<num; i++ ) {
file->ReadShort( baseFrame[i].q[0] );
file->ReadShort( baseFrame[i].q[1] );
file->ReadShort( baseFrame[i].q[2] );
file->ReadShort( baseFrame[i].t[0] );
file->ReadShort( baseFrame[i].t[1] );
file->ReadShort( baseFrame[i].t[2] );
}
file->ReadInt( num );
componentFrames.SetGranularity( 1 );
componentFrames.SetNum( num );
for ( int i=0; i<num; i++ ) {
file->ReadShort( componentFrames[i] );
}
file->ReadString( name );
file->ReadVec3( totaldelta );
fileSystem->CloseFile( file );
if ( numFrames > 4 && numAnimatedComponents && anim_reduced.GetBool() ) {
Resample();
}
return true;
}
/*
==============
idMD5Anim::WriteAnimBinary
==============
*/
bool idMD5Anim::WriteAnimBinary( const char *filename ) {
int num;
idStr str = filename;
str.StripFileExtension();
str = str + ".animb";
idFile* file = fileSystem->OpenFileWrite( str.c_str(), "fs_savepath" );
if ( file == NULL ) {
return false;
}
file->WriteInt( ANIMB_IDENT );
file->WriteInt( ANIMB_VERSION );
file->WriteInt( numFrames );
file->WriteInt( frameRate );
file->WriteInt( animLength );
file->WriteInt( numJoints );
file->WriteInt( numAnimatedComponents );
num = bounds.Num();
file->WriteInt( num );
for ( int i = 0; i < num; i++ ) {
const short *list = bounds[i].GetBounds();
file->WriteShort( list[0] );
file->WriteShort( list[1] );
file->WriteShort( list[2] );
file->WriteShort( list[3] );
file->WriteShort( list[4] );
file->WriteShort( list[5] );
}
num = jointInfo.Num();
file->WriteInt( num );
for ( int i=0; i<num; i++ ) {
jointAnimInfo_t animInfo = jointInfo[i];
file->WriteString( animationLib.JointName( animInfo.nameIndex ) );
file->WriteShort( animInfo.parentNum );
file->WriteShort( animInfo.animBits );
file->WriteShort( animInfo.firstComponent );
}
num = baseFrame.Num();
file->WriteInt( num );
for ( int i=0; i<num; i++ ) {
idCompressedJointQuat jointQuat = baseFrame[i];
file->WriteShort( jointQuat.q[0] );
file->WriteShort( jointQuat.q[1] );
file->WriteShort( jointQuat.q[2] );
file->WriteShort( jointQuat.t[0] );
file->WriteShort( jointQuat.t[1] );
file->WriteShort( jointQuat.t[2] );
}
num = componentFrames.Num();
file->WriteInt( num );
for ( int i=0; i<num; i++ ) {
file->WriteShort( componentFrames[i] );
}
file->WriteString( name );
file->WriteVec3( totaldelta );
fileSystem->CloseFile( file );
return true;
}
/*
====================
idAnimManager::GetAnim
====================
*/
idMD5Anim *idAnimManager::GetAnim( const char *name ) {
idMD5Anim **animptrptr;
idMD5Anim *anim;
bool loaded = false;
// see if it has been asked for before
animptrptr = NULL;
if ( animations.Get( name, &animptrptr ) ) {
anim = *animptrptr;
} else {
idStr extension;
idStr filename = name;
filename.ExtractFileExtension( extension );
if ( extension != MD5_ANIM_EXT ) {
return NULL;
}
anim = new idMD5Anim();
if ( r_loadAnimB.GetBool() ) {
idStr animbName = va( PREGENERATED_BASEDIR "/animb/%s", name );
animbName.StripFileExtension();
animbName = animbName + ".animb";
loaded = anim->LoadAnimBinary( animbName );
}
if ( !loaded ) {
if ( !anim->LoadAnim( filename ) ) {
gameLocal.Warning( "Couldn't load anim: '%s'", filename.c_str() );
delete anim;
anim = NULL;
}
}
if ( r_writeAnimB.GetBool() && anim ) {
// Write binary file
idStr fullPath, relativePath;
relativePath = va( PREGENERATED_BASEDIR "/animb/%s", name );
anim->WriteAnimBinary( relativePath );
}
animations.Set( filename, anim );
}
return anim;
}
/*
================
idAnimManager::ReloadAnims
================
*/
void idAnimManager::ReloadAnims( void ) {
int i;
idMD5Anim **animptr;
for ( i = 0; i < animations.Num(); i++ ) {
animptr = animations.GetIndex( i );
if ( animptr && *animptr ) {
( *animptr )->Reload();
}
}
}
/*
================
idAnimManager::JointIndex
================
*/
int idAnimManager::JointIndex( const char *name ) {
int i, hash;
hash = jointnamesHash.GenerateKey( name );
for ( i = jointnamesHash.GetFirst( hash ); i != -1; i = jointnamesHash.GetNext( i ) ) {
if ( jointnames[i].Cmp( name ) == 0 ) {
return i;
}
}
i = jointnames.Append( name );
jointnamesHash.Add( hash, i );
return i;
}
/*
================
idAnimManager::JointName
================
*/
const char *idAnimManager::JointName( int index ) const {
return jointnames[ index ];
}
/*
================
idAnimManager::ListAnims
================
*/
void idAnimManager::ListAnims( void ) const {
int i;
idMD5Anim* const* animptr;
idMD5Anim* anim;
size_t size;
size_t s;
size_t namesize;
int num;
num = 0;
size = 0;
for ( i = 0; i < animations.Num(); i++ ) {
animptr = animations.GetIndex( i );
if ( animptr && *animptr ) {
anim = *animptr;
s = anim->Size();
gameLocal.Printf( "%8d bytes : %2d refs : %s\n", s, anim->NumRefs(), anim->Name() );
size += s;
num++;
}
}
namesize = jointnames.Size() + jointnamesHash.Size();
for( i = 0; i < jointnames.Num(); i++ ) {
namesize += jointnames[ i ].Size();
}
gameLocal.Printf( "\n%d memory used in %d anims\n", size, num );
gameLocal.Printf( "%d memory used in %d joint names\n", namesize, jointnames.Num() );
}
/*
================
idAnimManager::FlushUnusedAnims
================
*/
void idAnimManager::FlushUnusedAnims( void ) {
int i;
idMD5Anim **animptr;
idList<idMD5Anim *> removeAnims;
for ( i = 0; i < animations.Num(); i++ ) {
animptr = animations.GetIndex( i );
if ( animptr && *animptr ) {
if ( ( *animptr )->NumRefs() <= 0 ) {
removeAnims.Append( *animptr );
}
}
}
for( i = 0; i < removeAnims.Num(); i++ ) {
animations.Remove( removeAnims[ i ]->Name() );
delete removeAnims[ i ];
}
}
| 28.697088 | 162 | 0.604609 | JasonHutton |
f1bee58ab434b40987cd26f207f244d178b46bff | 1,334 | hpp | C++ | clstatphys/clstatphys/tools/auto_correlation_function.hpp | FIshikawa/ClassicalStatPhys | e4010480d3c7977829c1b3fdeaf51401a2409373 | [
"MIT"
] | null | null | null | clstatphys/clstatphys/tools/auto_correlation_function.hpp | FIshikawa/ClassicalStatPhys | e4010480d3c7977829c1b3fdeaf51401a2409373 | [
"MIT"
] | 2 | 2020-01-21T08:54:05.000Z | 2020-01-21T09:29:10.000Z | clstatphys/clstatphys/tools/auto_correlation_function.hpp | FIshikawa/ClassicalStatPhys | e4010480d3c7977829c1b3fdeaf51401a2409373 | [
"MIT"
] | 2 | 2020-07-18T03:36:32.000Z | 2021-07-21T22:58:27.000Z | #ifndef AUTO_CORRELATION_FUNCTION_HPP
#define AUTO_CORRELATION_FUNCTION_HPP
#include <string>
#include <vector>
#include <cmath>
namespace correlation{
class AutoCorrelationFunction{
public:
AutoCorrelationFunction(int dim=1, int Nl=1) : dim_(dim), Nl_(Nl), counter_(0), correlation_(dim,0.0), mean_(dim,0.0){}
void initialize(int dim, int Nl){
counter_ = 0;
dim_ = dim;
Nl_ = Nl;
correlation_.resize(dim);
mean_.resize(dim);
for(int i = 0 ; i < dim; ++i){
correlation_[i] = 0.0;
mean_[i] = 0.0;
}
}
void operator<< (const double value) {
if(counter_ == 0 || counter_ > dim_ - 1 ){
value_init_ = value;
counter_ = 0;
}
mean_[counter_] += value / Nl_;
correlation_[counter_] += value * value_init_ / Nl_;
counter_++ ;
}
std::vector<double> result(){
std::vector<double> acf_t(dim_, 0.0);
for(int i = 0; i < dim_ ; ++i) acf_t[i] = correlation_[i] - mean_[counter_] * mean_[0];
return acf_t;
}
void calc( std::vector<double>const& z, std::vector<double>& ACF) const {
double N = (double)dim_ ;
for (int i = 0; i < dim_ ; ++i) ACF[i] += z[i]*z[0]/Nl_ ;
}
private:
int dim_, Nl_, counter_;
double value_init_;
std::vector<double> correlation_, mean_;
};
} //end namespace
#endif // AUTO_CORRELATION_FUNCTION_HPP
| 23.821429 | 121 | 0.623688 | FIshikawa |
f1c3743bb7c377984f1883af63580bbba63f752d | 1,839 | cc | C++ | device/serial/buffer.cc | zipated/src | 2b8388091c71e442910a21ada3d97ae8bc1845d3 | [
"BSD-3-Clause"
] | 2,151 | 2020-04-18T07:31:17.000Z | 2022-03-31T08:39:18.000Z | device/serial/buffer.cc | cangulcan/src | 2b8388091c71e442910a21ada3d97ae8bc1845d3 | [
"BSD-3-Clause"
] | 395 | 2020-04-18T08:22:18.000Z | 2021-12-08T13:04:49.000Z | device/serial/buffer.cc | cangulcan/src | 2b8388091c71e442910a21ada3d97ae8bc1845d3 | [
"BSD-3-Clause"
] | 338 | 2020-04-18T08:03:10.000Z | 2022-03-29T12:33:22.000Z | // Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/numerics/safe_conversions.h"
#include "device/serial/buffer.h"
#include "net/base/io_buffer.h"
namespace device {
ReadOnlyBuffer::~ReadOnlyBuffer() = default;
WritableBuffer::~WritableBuffer() = default;
SendBuffer::SendBuffer(const std::vector<uint8_t>& data,
SendCompleteCallback callback)
: data_(data), callback_(std::move(callback)) {}
SendBuffer::~SendBuffer() = default;
const uint8_t* SendBuffer::GetData() {
return data_.data();
}
uint32_t SendBuffer::GetSize() {
return base::checked_cast<uint32_t>(data_.size());
}
void SendBuffer::Done(uint32_t bytes_read) {
std::move(callback_).Run(bytes_read, device::mojom::SerialSendError::NONE);
}
void SendBuffer::DoneWithError(uint32_t bytes_read, int32_t error) {
std::move(callback_).Run(bytes_read,
static_cast<device::mojom::SerialSendError>(error));
}
ReceiveBuffer::ReceiveBuffer(scoped_refptr<net::IOBuffer> buffer,
uint32_t size,
ReceiveCompleteCallback callback)
: buffer_(buffer), size_(size), callback_(std::move(callback)) {}
ReceiveBuffer::~ReceiveBuffer() = default;
char* ReceiveBuffer::GetData() {
return buffer_->data();
}
uint32_t ReceiveBuffer::GetSize() {
return size_;
}
void ReceiveBuffer::Done(uint32_t bytes_written) {
std::move(callback_).Run(bytes_written,
device::mojom::SerialReceiveError::NONE);
}
void ReceiveBuffer::DoneWithError(uint32_t bytes_written, int32_t error) {
std::move(callback_).Run(
bytes_written, static_cast<device::mojom::SerialReceiveError>(error));
}
} // namespace device
| 28.734375 | 79 | 0.69603 | zipated |
f1c408765e190153494a9c37b38d278a39d71c1e | 351 | cpp | C++ | module-01/ex01/ZombieHorde.cpp | kotabrog/CPP-module | db858e57ac194d4ca9b38667ff3820418b42e9c8 | [
"MIT"
] | 1 | 2021-09-05T14:59:20.000Z | 2021-09-05T14:59:20.000Z | module-01/ex01/ZombieHorde.cpp | kotabrog/CPP-module | db858e57ac194d4ca9b38667ff3820418b42e9c8 | [
"MIT"
] | null | null | null | module-01/ex01/ZombieHorde.cpp | kotabrog/CPP-module | db858e57ac194d4ca9b38667ff3820418b42e9c8 | [
"MIT"
] | null | null | null | #include "Zombie.hpp"
Zombie* zombieHorde( int N, std::string name )
{
Zombie *z;
if (N < 0)
return (NULL);
try
{
z = new Zombie[N];
}
catch(const std::exception& e)
{
std::cerr << e.what() << '\n';
exit(1);
}
while (N--)
{
z[N].set_name(name);
}
return z;
}
| 13.5 | 46 | 0.435897 | kotabrog |
f1c8958542fa299f3a4e485f1ab73146a8dd6dec | 5,636 | cpp | C++ | utils/nlxml_transformer.cpp | Twinklebear/nlxml | 2044a89c8b0b1d69c7762dc48406770f4b7c9834 | [
"MIT"
] | null | null | null | utils/nlxml_transformer.cpp | Twinklebear/nlxml | 2044a89c8b0b1d69c7762dc48406770f4b7c9834 | [
"MIT"
] | 1 | 2019-03-18T00:37:08.000Z | 2019-03-18T21:45:35.000Z | utils/nlxml_transformer.cpp | Twinklebear/nlxml | 2044a89c8b0b1d69c7762dc48406770f4b7c9834 | [
"MIT"
] | null | null | null | #include <iostream>
#include <algorithm>
#include <functional>
#include <glm/glm.hpp>
#include <glm/ext.hpp>
#include "nlxml.h"
using namespace nlxml;
template<typename F>
void transform_neuron_data(nlxml::NeuronData &inout, const F &transform) {
auto transform_points = [&](auto &points) {
std::transform(std::begin(points), std::end(points), std::begin(points), transform);
};
auto transform_markers = [&](auto &markers) {
for (auto &m : markers) {
transform_points(m.points);
}
};
std::function<void(nlxml::Branch &)> transform_branch = [&](nlxml::Branch &b) {
transform_points(b.points);
transform_markers(b.markers);
for (auto &child_b : b.branches) {
transform_branch(child_b);
}
};
for (auto &t : inout.trees) {
transform_points(t.points);
for (auto &b : t.branches) {
transform_branch(b);
}
transform_markers(t.markers);
}
transform_markers(inout.markers);
}
/* This program will take an NLXML file and apply its image transform
* to all its points, taking its transform to identity.
*
* the -to-space flag can be used to bring you into another file's space,
* by taking a the input file (with an assumed identity transform) and
* applying the inverse of the to space file's transform to its points
* and setting the to space file's transform as the input file's transform
*
* the -make-nl-start will generate a starting point neurolucida file we
* can send to Fred, with a single marker at the starting point instead of
* other points.
*
* the -flip-z will flip the z coordinates of all points in the file
*/
int main(int argc, char **argv) {
std::string input, output, to_space, apply;
bool make_nl_start = false;
bool flip_z = false;
for (int i = 1; i < argc; ++i) {
if (std::strcmp(argv[i], "-o") == 0) {
output = argv[++i];
} else if (std::strcmp(argv[i], "-to-space") == 0) {
to_space = argv[++i];
} else if (std::strcmp(argv[i], "-apply") == 0) {
apply = argv[++i];
} else if (std::strcmp(argv[i], "-h") == 0) {
std::cout << "Usage: ./nlxml_transformer <input> -o <output> [-to-space <file>] [-make-nl-start]"
<< " [-apply <file>]\n"
<< "\t-to-space will transform the input into the space of the specified file\n"
<< "\t-apply will take the transform from the file and apply it to this one\n"
<< "\t-make-nl-start will turn the first point on the tree in the file into a marker\n"
<< "\t-flip-z will flip the z coordinates of all points\n";
return 0;
} else if (std::strcmp(argv[i], "-make-nl-start") == 0) {
make_nl_start = true;
} else if (std::strcmp(argv[i], "-flip-z") == 0) {
flip_z = true;
} else {
input = argv[i];
}
}
if (input.empty() || output.empty()) {
std::cout << "Error: an input and output file are needed.\n"
<< "Usage: ./nlxml_transformer <input> -o <output> [-to-space <file>]\n";
return 1;
}
if (!to_space.empty() && !apply.empty()) {
std::cout << "Error: apply and to space are mutually exclusive!\n";
return 1;
}
NeuronData data = import_file(input);
if (apply.empty() && to_space.empty() && data.images.empty()) {
std::cout << "Warning: did not find transform data in '" << input << "'\n";
export_file(data, output);
return 0;
}
if (make_nl_start && (data.trees.empty() || data.trees[0].points.empty())) {
std::cout << "Error: no trees in file to make start point from\n";
return 1;
}
const float z_scale = flip_z ? -1.f : 1.f;
NeuronData to_data;
glm::mat4 mat(1);
if (!to_space.empty()) {
to_data = import_file(to_space);
// TODO: are these transforms correct in general? Some files have a transform
// already, so ignoring it is incorrect.
glm::mat4 from_mat(1);
#if 0
if (!data.images.empty()) {
from_mat = glm::translate(glm::vec3(data.images[0].coord[0],
data.images[0].coord[1], data.images[0].coord[2]))
* glm::scale(glm::vec3(data.images[0].scale[0], data.images[0].scale[1],
data.images[0].z_spacing * z_scale));
}
#endif
glm::mat4 to_mat(1);
if (!to_data.images.empty()) {
to_mat = glm::translate(glm::vec3(to_data.images[0].coord[0],
to_data.images[0].coord[1], to_data.images[0].coord[2]))
* glm::scale(glm::vec3(to_data.images[0].scale[0], to_data.images[0].scale[1],
to_data.images[0].z_spacing * z_scale));
}
// Shouldn't we apply to_mat, not its inverse? or apply from mat
// not its inverse?
mat = glm::inverse(to_mat) * glm::inverse(from_mat);
} else if (!apply.empty()) {
NeuronData ap = import_file(apply);
mat = glm::translate(glm::vec3(ap.images[0].coord[0], ap.images[0].coord[1], ap.images[0].coord[2]))
* glm::scale(glm::vec3(ap.images[0].scale[0], ap.images[0].scale[1],
ap.images[0].z_spacing * z_scale));
} else {
mat = glm::translate(glm::vec3(data.images[0].coord[0], data.images[0].coord[1], data.images[0].coord[2]))
* glm::scale(glm::vec3(data.images[0].scale[0], data.images[0].scale[1], data.images[0].z_spacing * z_scale));
}
transform_neuron_data(data,
[mat](const Point &p) {
auto a = mat * glm::vec4(p.x, p.y, p.z, 1.f);
return nlxml::Point(a.x, a.y, a.z, p.d);
});
if (!to_space.empty()) {
data.images = to_data.images;
} else if (!data.images.empty()) {
// Set the transform to identity now that we've applied it
data.images[0].coord.fill(0.f);
data.images[0].scale.fill(1.f);
}
if (make_nl_start) {
Marker start_pt = Marker{"FilledCircle", "Start", Color(1, 1, 1), false, {data.trees[0].points[0]}};
data.trees.clear();
data.markers.clear();
data.contours.clear();
data.images.clear();
data.markers.push_back(start_pt);
}
export_file(data, output);
return 0;
}
| 33.349112 | 113 | 0.6478 | Twinklebear |
f1c8a4b71dd5ac17a96688419b510a09f599d7cf | 7,917 | cpp | C++ | lib/Resources/src/resources.cpp | jfcameron/WebAssembly-C-Cpp-Reference | b0d276565097920e700776fbf621f24ef82972cd | [
"MIT"
] | 1 | 2018-06-05T07:54:52.000Z | 2018-06-05T07:54:52.000Z | lib/Resources/src/resources.cpp | jfcameron/WebAssembly-C-Cpp-Reference | b0d276565097920e700776fbf621f24ef82972cd | [
"MIT"
] | 8 | 2018-08-11T05:13:57.000Z | 2018-11-15T00:14:42.000Z | lib/Resources/src/resources.cpp | jfcameron/WebAssembly-C-Cpp-Reference | b0d276565097920e700776fbf621f24ef82972cd | [
"MIT"
] | null | null | null | // © 2018 Joseph Cameron - All Rights Reserved
#include <gdk/locking_queue.h>
#include <gdk/resources.h>
#include <gdk/resources_protected.h>
#include <gdk/resources_private.h>
#include <gdkresources/buildinfo.h>
#if defined JFC_TARGET_PLATFORM_Emscripten
#include <emscripten.h>
#include <emscripten/fetch.h>
#endif
#if defined JFC_TARGET_PLATFORM_Darwin || defined JFC_TARGET_PLATFORM_Windows || defined JFC_TARGET_PLATFORM_Linux
#include <curl/curl.h>
#include <curl/easy.h>
#endif
#include <algorithm>
#include <cstdio>
#include <fstream>
#include <iostream>
#include <iterator>
#include <map>
#include <memory>
#include <string>
#include <type_traits>
#include <vector>
static constexpr char TAG[] = "Resources";
namespace gdk::resources::local
{
void fetchFile(const std::string aPath, response_handler_type aResponse)
{
PRIVATE::queued_fetches.push([=]()
{
PROTECTED::logging_interface::log(TAG, std::string("worker fetching ") + aPath);
std::ifstream input(aPath, std::ios::binary);
std::vector<char> buffer(std::istreambuf_iterator<char>(input), (std::istreambuf_iterator<char>()));
PRIVATE::queued_responses.push([=]()
{
PROTECTED::logging_interface::log(TAG, std::string("main is responding to ") + aPath);
auto output = (std::vector<unsigned char>){buffer.begin(), buffer.end()};
aResponse(true, output);
});
});
}
}
#if defined JFC_TARGET_PLATFORM_Darwin || defined JFC_TARGET_PLATFORM_Windows || defined JFC_TARGET_PLATFORM_Linux
// Buffer in system memory, used to store binary data fetched from remote server
struct MemoryStruct
{
char *memory;
size_t size;
};
static_assert(std::is_trivial<MemoryStruct>::value, "MemoryStruct must be C compatible");
static_assert(std::is_standard_layout<MemoryStruct>::value, "MemoryStruct must be C compatible");
static size_t WriteMemoryCallback(void *const contentPointer, const size_t contentItemSize, const size_t contentItemCount, void *const userPointer)
{
const size_t contentByteCount = contentItemSize * contentItemCount;
auto pResponseBuffer = static_cast<struct MemoryStruct *const>(userPointer);
return [&contentPointer, &contentByteCount, &pResponseBuffer]()
{
if ((pResponseBuffer->memory = static_cast<char *>(realloc(pResponseBuffer->memory, pResponseBuffer->size + contentByteCount + 1))) == nullptr)
throw std::runtime_error(std::string(TAG).append("gdk::resources::remote fetch failed: could not allocate system memory to store fetched content!"));
memcpy(&(pResponseBuffer->memory[pResponseBuffer->size]), contentPointer, contentByteCount);
pResponseBuffer->size += contentByteCount;
pResponseBuffer->memory[pResponseBuffer->size] = {0};
return contentByteCount;
}();
}
#endif
namespace gdk::resources::remote
{
void fetchFile(const std::string aURL, response_handler_type aResponseHandler)
{
#if defined JFC_TARGET_PLATFORM_Emscripten
emscripten_fetch_attr_t attr;
emscripten_fetch_attr_init(&attr);
strcpy(attr.requestMethod, "GET");
attr.attributes = EMSCRIPTEN_FETCH_LOAD_TO_MEMORY;
using callback_type = std::function<void(const bool, std::vector<unsigned char> *const)>;
PROTECTED::logging_interface::log(TAG, "fetch has begun");
attr.onsuccess = [](emscripten_fetch_t *const fetch)
{
PROTECTED::logging_interface::log(TAG, "fetch has succeeded");
std::vector<unsigned char> binaryData = std::vector<unsigned char>(&(fetch->data[0]), &(fetch->data[fetch->numBytes]));
auto pCallback = static_cast<callback_type *>(fetch->userData);
(*pCallback)(true, &binaryData);
delete pCallback;
emscripten_fetch_close(fetch);
};
attr.onerror = [](emscripten_fetch_t *const fetch)
{
PROTECTED::logging_interface::log(TAG, std::string("fetch has failed. url: ") + std::string(fetch->url) + std::string(", status: ") + std::string(fetch->status));
auto pCallback = static_cast<callback_type *>(fetch->userData);
(*pCallback)(false, nullptr);
delete pCallback;
emscripten_fetch_close(fetch);
};
attr.userData = static_cast<void *>(new callback_type([aResponseHandler](const bool aSucceeded, std::vector<unsigned char> *const aBytes)
{
aResponseHandler(aSucceeded, *aBytes);
}));
emscripten_fetch(&attr, aURL.c_str());
#elif defined JFC_TARGET_PLATFORM_Darwin || defined JFC_TARGET_PLATFORM_Windows || defined JFC_TARGET_PLATFORM_Linux
PRIVATE::queued_fetches.push([=]()
{
PROTECTED::logging_interface::log(TAG, std::string("worker fetching ") + aURL);
curl_global_init(CURL_GLOBAL_ALL); // MOVE TO A CURL WRAPPER
if (CURL * curl_handle = curl_easy_init())
{
struct MemoryStruct chunk = (MemoryStruct)
{
.memory = [](){
if (auto pHeap = static_cast<char *>(malloc(1))) return pHeap;
else throw std::runtime_error(std::string(TAG).append("could not allocate space on the heap"));
}(),
.size = 0
};
curl_easy_setopt(curl_handle, CURLOPT_URL, aURL.c_str()); // specify URL to get
curl_easy_setopt(curl_handle, CURLOPT_USERAGENT, "libcurl-agent/1.0"); // some servers don't like requests that are made without a user-agent field, so we provide one
curl_easy_setopt(curl_handle, CURLOPT_WRITEFUNCTION, WriteMemoryCallback); // send all data to this function
curl_easy_setopt(curl_handle, CURLOPT_WRITEDATA, (void *)&chunk); // we pass our 'chunk' struct to the callback function
const CURLcode curlResult = curl_easy_perform(curl_handle);
if (curlResult == CURLE_OK) // on macos/xcode saw an issue where this passsed although it was not CURLE_OK
{
//printf("%lu bytes retrieved\n", static_cast<unsigned long>(chunk.size));
// This writes to a file then loads it from file
//FILE *fp = fopen(std::string("resource/mia.png").c_str(), "wb");
//fwrite(chunk.memory, 1, chunk.size, fp); //This works! the file looks good.
//fclose(fp);
//auto output = gdk::resources::local::fetchFile("resource/mia.png");
std::vector<unsigned char> output(chunk.memory, chunk.memory + chunk.size);
PRIVATE::queued_responses.push([=]()
{
PROTECTED::logging_interface::log(TAG, std::string("main is responding to ") + aURL);
aResponseHandler(true, output);
});
}
else throw std::runtime_error(std::string(TAG).append("curl_easy_perform failed: ").append(curl_easy_strerror(curlResult)));
curl_easy_cleanup(curl_handle);
free(chunk.memory);
}
else throw std::runtime_error(std::string(TAG).append("Failed to initialize a curl session."));
curl_global_cleanup(); // MOVE TO A CURL WRAPPER
});
#else
#error fetchFile is unimplemented on the current platform
#endif
}
}
| 38.808824 | 186 | 0.610206 | jfcameron |
f1c93a2a9c228fe9a34700902c910d3bba0a5741 | 355 | hpp | C++ | src/Wave/IVoice.hpp | clauderichard/Wgx | eec7bbab51e2245191424895073ab688ff6f8ac4 | [
"MIT"
] | null | null | null | src/Wave/IVoice.hpp | clauderichard/Wgx | eec7bbab51e2245191424895073ab688ff6f8ac4 | [
"MIT"
] | null | null | null | src/Wave/IVoice.hpp | clauderichard/Wgx | eec7bbab51e2245191424895073ab688ff6f8ac4 | [
"MIT"
] | null | null | null | #ifndef __IVOICE_HPP__
#define __IVOICE_HPP__
#include "IWaveSampler.hpp"
#include <vector>
using namespace std;
class IVoice : public IWaveSampler
{
public:
virtual ~IVoice(){}
virtual IVoice *copy() = 0;
virtual void resetParams(size_t numSamples, initializer_list<double> params) = 0;
virtual void printt(){}
};
#endif | 20.882353 | 86 | 0.695775 | clauderichard |
f1c967503ead248e5269a51c98a133a1912ecf5d | 534 | cpp | C++ | src/duktype/AsyncObjectScope.cpp | CasperTech/duktype | dbfd2838d23c3c2d8c3574f61e23f197c00b0fa2 | [
"MIT"
] | 1 | 2021-11-01T20:18:08.000Z | 2021-11-01T20:18:08.000Z | src/duktype/AsyncObjectScope.cpp | CasperTech/duktype | dbfd2838d23c3c2d8c3574f61e23f197c00b0fa2 | [
"MIT"
] | 1 | 2021-01-28T17:46:47.000Z | 2021-01-28T17:46:47.000Z | src/duktype/AsyncObjectScope.cpp | CasperTech/duktype | dbfd2838d23c3c2d8c3574f61e23f197c00b0fa2 | [
"MIT"
] | null | null | null | #include "AsyncObjectScope.h"
#include "AsyncContext.h"
namespace Duktype
{
void AsyncObjectScope::createObjectAsync(const Nan::FunctionCallbackInfo<v8::Value> &info)
{
if (info.Length() > 0 && info[0]->IsString())
{
std::string objNameStr = *Nan::Utf8String(info[0]);
std::static_pointer_cast<AsyncContext>(_ctx)->createObjectAsync(_handle, info);
}
else
{
Nan::ThrowError("Invalid or wrong number of arguments");
}
}
} | 29.666667 | 95 | 0.586142 | CasperTech |
f1ca0b122a3a56602d7b95ec703416109cf92159 | 15,400 | cpp | C++ | code/src/qvgeNodeEditorUIController.cpp | turbanoff/qvge | 53508adadb4ca566c011b2b41d432030a5fa5fac | [
"MIT"
] | null | null | null | code/src/qvgeNodeEditorUIController.cpp | turbanoff/qvge | 53508adadb4ca566c011b2b41d432030a5fa5fac | [
"MIT"
] | null | null | null | code/src/qvgeNodeEditorUIController.cpp | turbanoff/qvge | 53508adadb4ca566c011b2b41d432030a5fa5fac | [
"MIT"
] | null | null | null | /*
This file is a part of
QVGE - Qt Visual Graph Editor
(c) 2016-2018 Ars L. Masiuk (ars.masiuk@gmail.com)
It can be used freely, maintaining the information above.
*/
#include <qvgeNodeEditorUIController.h>
#include <qvgeMainWindow.h>
#include <CCommutationTable.h>
#include <CSceneOptionsDialog.h>
#include <CNodeEdgePropertiesUI.h>
#include <CClassAttributesEditorUI.h>
#include <COGDFLayoutUIController.h>
#include <COGDFNewGraphDialog.h>
#include <COGDFLayout.h>
#include <qvge/CNode.h>
#include <qvge/CConnection.h>
#include <qvge/CImageExport.h>
#include <qvge/CPDFExport.h>
#include <qvge/CNodeEditorScene.h>
#include <qvge/CEditorView.h>
#include <qvge/CFileSerializerGEXF.h>
#include <qvge/CFileSerializerGraphML.h>
#include <qvge/CFileSerializerXGR.h>
#include <qvge/CFileSerializerDOT.h>
#include <QMenuBar>
#include <QStatusBar>
#include <QDockWidget>
#include <QMenu>
#include <QToolButton>
#include <QWidgetAction>
#include <QResizeEvent>
#include <QDebug>
#include <QPixmapCache>
#include <QFileDialog>
qvgeNodeEditorUIController::qvgeNodeEditorUIController(qvgeMainWindow *parent) :
QObject(parent),
m_parent(parent)
{
// create document
m_editorScene = new CNodeEditorScene(parent);
m_editorView = new CEditorView(m_editorScene, parent);
parent->setCentralWidget(m_editorView);
// connect scene
connect(m_editorScene, &CEditorScene::sceneChanged, parent, &CMainWindow::onDocumentChanged);
connect(m_editorScene, &CEditorScene::sceneChanged, this, &qvgeNodeEditorUIController::onSceneChanged);
connect(m_editorScene, &CEditorScene::selectionChanged, this, &qvgeNodeEditorUIController::onSelectionChanged);
// connect view
connect(m_editorView, SIGNAL(scaleChanged(double)), this, SLOT(onZoomChanged(double)));
// slider2d
createNavigator();
// menus & actions
createMenus();
// dock panels
createPanels();
// status bar
m_statusLabel = new QLabel();
parent->statusBar()->addPermanentWidget(m_statusLabel);
// update actions
onSceneChanged();
onSelectionChanged();
onZoomChanged(1);
// OGDF
m_ogdfController = new COGDFLayoutUIController(parent, m_editorScene);
}
void qvgeNodeEditorUIController::createMenus()
{
// file actions
QAction *exportAction = m_parent->getFileExportAction();
exportAction->setVisible(true);
exportAction->setText(tr("Export to &Image..."));
connect(exportAction, &QAction::triggered, this, &qvgeNodeEditorUIController::exportFile);
QAction *exportActionPDF = new QAction(tr("Export to &PDF..."));
m_parent->getFileMenu()->insertAction(exportAction, exportActionPDF);
connect(exportActionPDF, &QAction::triggered, this, &qvgeNodeEditorUIController::exportPDF);
QAction *exportActionDOT = new QAction(tr("Export to &DOT/GraphViz..."));
m_parent->getFileMenu()->insertAction(exportActionPDF, exportActionDOT);
connect(exportActionDOT, &QAction::triggered, this, &qvgeNodeEditorUIController::exportDOT);
m_parent->getFileMenu()->insertSeparator(exportActionDOT);
// add edit menu
QMenu *editMenu = new QMenu(tr("&Edit"));
m_parent->menuBar()->insertMenu(m_parent->getWindowMenuAction(), editMenu);
QAction *undoAction = editMenu->addAction(QIcon(":/Icons/Undo"), tr("&Undo"));
undoAction->setStatusTip(tr("Undo latest action"));
undoAction->setShortcut(QKeySequence::Undo);
connect(undoAction, &QAction::triggered, m_editorScene, &CEditorScene::undo);
connect(m_editorScene, &CEditorScene::undoAvailable, undoAction, &QAction::setEnabled);
undoAction->setEnabled(m_editorScene->availableUndoCount());
QAction *redoAction = editMenu->addAction(QIcon(":/Icons/Redo"), tr("&Redo"));
redoAction->setStatusTip(tr("Redo latest action"));
redoAction->setShortcut(QKeySequence::Redo);
connect(redoAction, &QAction::triggered, m_editorScene, &CEditorScene::redo);
connect(m_editorScene, &CEditorScene::redoAvailable, redoAction, &QAction::setEnabled);
redoAction->setEnabled(m_editorScene->availableRedoCount());
editMenu->addSeparator();
cutAction = editMenu->addAction(QIcon(":/Icons/Cut"), tr("Cu&t"));
cutAction->setStatusTip(tr("Cut selection to clipboard"));
cutAction->setShortcut(QKeySequence::Cut);
connect(cutAction, &QAction::triggered, m_editorScene, &CEditorScene::cut);
copyAction = editMenu->addAction(QIcon(":/Icons/Copy"), tr("&Copy"));
copyAction->setStatusTip(tr("Copy selection to clipboard"));
copyAction->setShortcut(QKeySequence::Copy);
connect(copyAction, &QAction::triggered, m_editorScene, &CEditorScene::copy);
pasteAction = editMenu->addAction(QIcon(":/Icons/Paste"), tr("&Paste"));
pasteAction->setStatusTip(tr("Paste selection from clipboard"));
pasteAction->setShortcut(QKeySequence::Paste);
connect(pasteAction, &QAction::triggered, m_editorScene, &CEditorScene::paste);
delAction = editMenu->addAction(QIcon(":/Icons/Delete"), tr("&Delete"));
delAction->setStatusTip(tr("Delete selection"));
delAction->setShortcut(QKeySequence::Delete);
connect(delAction, &QAction::triggered, m_editorScene, &CEditorScene::del);
editMenu->addSeparator();
unlinkAction = editMenu->addAction(QIcon(":/Icons/Unlink"), tr("&Unlink"));
unlinkAction->setStatusTip(tr("Unlink selected nodes"));
connect(unlinkAction, &QAction::triggered, m_editorScene, &CNodeEditorScene::onActionUnlink);
// scene options
editMenu->addSeparator();
QAction *sceneAction = editMenu->addAction(QIcon(":/Icons/Settings"), tr("&Options..."));
sceneAction->setStatusTip(tr("Set up the scene"));
connect(sceneAction, &QAction::triggered, this, &qvgeNodeEditorUIController::sceneOptions);
// add edit toolbar
QToolBar *editToolbar = m_parent->addToolBar(tr("Edit"));
editToolbar->setObjectName("editToolbar");
editToolbar->setToolButtonStyle(Qt::ToolButtonTextUnderIcon);
editToolbar->addAction(undoAction);
editToolbar->addAction(redoAction);
editToolbar->addSeparator();
editToolbar->addAction(cutAction);
editToolbar->addAction(copyAction);
editToolbar->addAction(pasteAction);
editToolbar->addAction(delAction);
// add view menu
QMenu *viewMenu = new QMenu(tr("&View"));
m_parent->menuBar()->insertMenu(m_parent->getWindowMenuAction(), viewMenu);
gridAction = viewMenu->addAction(QIcon(":/Icons/Grid-Show"), tr("Show &Grid"));
gridAction->setCheckable(true);
gridAction->setStatusTip(tr("Show/hide background grid"));
gridAction->setChecked(m_editorScene->gridEnabled());
connect(gridAction, SIGNAL(toggled(bool)), m_editorScene, SLOT(enableGrid(bool)));
gridSnapAction = viewMenu->addAction(QIcon(":/Icons/Grid-Snap"), tr("&Snap to Grid"));
gridSnapAction->setCheckable(true);
gridSnapAction->setStatusTip(tr("Snap to grid when dragging"));
gridSnapAction->setChecked(m_editorScene->gridSnapEnabled());
connect(gridSnapAction, SIGNAL(toggled(bool)), m_editorScene, SLOT(enableGridSnap(bool)));
actionShowLabels = viewMenu->addAction(QIcon(":/Icons/Label"), tr("Show &Labels"));
actionShowLabels->setCheckable(true);
actionShowLabels->setStatusTip(tr("Show/hide item labels"));
actionShowLabels->setChecked(m_editorScene->itemLabelsEnabled());
connect(actionShowLabels, SIGNAL(toggled(bool)), m_editorScene, SLOT(enableItemLabels(bool)));
viewMenu->addSeparator();
zoomAction = viewMenu->addAction(QIcon(":/Icons/ZoomIn"), tr("&Zoom"));
zoomAction->setStatusTip(tr("Zoom view in"));
zoomAction->setShortcut(QKeySequence::ZoomIn);
connect(zoomAction, &QAction::triggered, this, &qvgeNodeEditorUIController::zoom);
unzoomAction = viewMenu->addAction(QIcon(":/Icons/ZoomOut"), tr("&Unzoom"));
unzoomAction->setStatusTip(tr("Zoom view out"));
unzoomAction->setShortcut(QKeySequence::ZoomOut);
connect(unzoomAction, &QAction::triggered, this, &qvgeNodeEditorUIController::unzoom);
resetZoomAction = viewMenu->addAction(QIcon(":/Icons/ZoomReset"), tr("&Reset Zoom"));
resetZoomAction->setStatusTip(tr("Zoom view to 100%"));
connect(resetZoomAction, &QAction::triggered, this, &qvgeNodeEditorUIController::resetZoom);
fitZoomAction = viewMenu->addAction(QIcon(":/Icons/ZoomFit"), tr("&Fit to View"));
fitZoomAction->setStatusTip(tr("Zoom to fit all the items to view"));
connect(fitZoomAction, &QAction::triggered, m_editorView, &CEditorView::fitToView);
// add view toolbar
QToolBar *zoomToolbar = m_parent->addToolBar(tr("View"));
zoomToolbar->setObjectName("viewToolbar");
zoomToolbar->setToolButtonStyle(Qt::ToolButtonTextUnderIcon);
zoomToolbar->addAction(zoomAction);
resetZoomAction2 = zoomToolbar->addAction(QIcon(":/Icons/Zoom"), "");
resetZoomAction2->setStatusTip(resetZoomAction->statusTip());
resetZoomAction2->setToolTip(resetZoomAction->statusTip());
connect(resetZoomAction2, &QAction::triggered, this, &qvgeNodeEditorUIController::resetZoom);
zoomToolbar->addAction(unzoomAction);
zoomToolbar->addAction(fitZoomAction);
}
void qvgeNodeEditorUIController::createPanels()
{
// propertis
QDockWidget *propertyDock = new QDockWidget(tr("Item Properties"));
propertyDock->setObjectName("propertyDock");
m_parent->addDockWidget(Qt::RightDockWidgetArea, propertyDock);
CNodeEdgePropertiesUI *propertiesPanel = new CNodeEdgePropertiesUI(propertyDock);
propertiesPanel->setScene(m_editorScene);
propertyDock->setWidget(propertiesPanel);
// connections
QDockWidget *connectionsDock = new QDockWidget(tr("Topology"));
connectionsDock->setObjectName("connectionsDock");
m_parent->addDockWidget(Qt::RightDockWidgetArea, connectionsDock);
CCommutationTable *connectionsPanel = new CCommutationTable(connectionsDock);
connectionsDock->setWidget(connectionsPanel);
connectionsPanel->setScene(m_editorScene);
// default properties
QDockWidget *defaultsDock = new QDockWidget(tr("Default Properties"));
defaultsDock ->setObjectName("defaultsDock");
m_parent->addDockWidget(Qt::LeftDockWidgetArea, defaultsDock);
CClassAttributesEditorUI *defaultsPanel = new CClassAttributesEditorUI(defaultsDock);
defaultsPanel->setScene(m_editorScene);
defaultsDock->setWidget(defaultsPanel);
}
void qvgeNodeEditorUIController::createNavigator()
{
m_sliderView = new QSint::Slider2d(m_parent);
m_sliderView->connectSource(m_editorView);
QToolButton *sliderButton = m_sliderView->makeAsButton();
m_editorView->setCornerWidget(sliderButton);
sliderButton->setIcon(QIcon(":/Icons/Navigator"));
sliderButton->setToolTip(tr("Show scene navigator"));
connect(m_sliderView, SIGNAL(aboutToShow()), this, SLOT(onNavigatorShown()));
m_sliderView->setFixedSize(200,200);
m_sliderView->setSliderOpacity(0.3);
m_sliderView->setSliderBrush(Qt::green);
}
void qvgeNodeEditorUIController::onNavigatorShown()
{
double w = m_editorScene->sceneRect().width();
double h = m_editorScene->sceneRect().height();
double cw = w > h ? 200.0 : 200.0 * (w/h);
double ch = h > w ? 200.0 : 200.0 * (h/w) ;
m_sliderView->setFixedSize(cw, ch);
// Qt bug: update menu size
QResizeEvent re(m_sliderView->size(), m_sliderView->parentWidget()->size());
qApp->sendEvent(m_sliderView->parentWidget(), &re);
QPixmap pm(m_sliderView->size());
QPainter p(&pm);
m_editorScene->render(&p);
m_sliderView->setBackgroundBrush(pm);
}
qvgeNodeEditorUIController::~qvgeNodeEditorUIController()
{
}
void qvgeNodeEditorUIController::onSelectionChanged()
{
int selectionCount = m_editorScene->selectedItems().size();
cutAction->setEnabled(selectionCount > 0);
copyAction->setEnabled(selectionCount > 0);
delAction->setEnabled(selectionCount > 0);
auto nodes = m_editorScene->getSelectedItems<CNode>();
unlinkAction->setEnabled(nodes.size() > 0);
}
void qvgeNodeEditorUIController::onSceneChanged()
{
auto nodes = m_editorScene->getItems<CNode>();
auto edges = m_editorScene->getItems<CConnection>();
m_statusLabel->setText(tr("Nodes: %1 | Edges: %2").arg(nodes.size()).arg(edges.size()));
}
void qvgeNodeEditorUIController::onZoomChanged(double currentZoom)
{
resetZoomAction2->setText(QString("%1%").arg((int)(currentZoom * 100)));
}
void qvgeNodeEditorUIController::zoom()
{
m_editorView->zoomBy(1.3);
}
void qvgeNodeEditorUIController::unzoom()
{
m_editorView->zoomBy(1.0 / 1.3);
}
void qvgeNodeEditorUIController::resetZoom()
{
m_editorView->zoomTo(1.0);
}
void qvgeNodeEditorUIController::sceneOptions()
{
CSceneOptionsDialog dialog;
if (dialog.exec(*m_editorScene, *m_editorView))
{
gridAction->setChecked(m_editorScene->gridEnabled());
gridSnapAction->setChecked(m_editorScene->gridSnapEnabled());
actionShowLabels->setChecked(m_editorScene->itemLabelsEnabled());
m_parent->writeSettings();
}
}
void qvgeNodeEditorUIController::exportFile()
{
if (CImageExport::write(*m_editorScene, m_parent->getCurrentFileName()))
{
m_parent->statusBar()->showMessage(tr("Export successful"));
}
else
{
m_parent->statusBar()->showMessage(tr("Export failed"));
}
}
void qvgeNodeEditorUIController::exportDOT()
{
CFileSerializerDOT dot;
QString fileName = CUtils::cutLastSuffix(m_parent->getCurrentFileName());
QString path = QFileDialog::getSaveFileName(NULL,
QObject::tr("Export as GraphViz graph"),
fileName,
dot.description() + " (" + dot.filters() + ")");
if (path.isEmpty())
return;
if (dot.save(path, *m_editorScene))
{
m_parent->statusBar()->showMessage(tr("Export successful"));
}
else
{
m_parent->statusBar()->showMessage(tr("Export failed"));
}
}
void qvgeNodeEditorUIController::exportPDF()
{
if (CPDFExport::write(*m_editorScene, m_parent->getCurrentFileName()))
{
m_parent->statusBar()->showMessage(tr("Export successful"));
}
else
{
m_parent->statusBar()->showMessage(tr("Export failed"));
}
}
void qvgeNodeEditorUIController::doReadSettings(QSettings& settings)
{
bool isAA = m_editorView->renderHints().testFlag(QPainter::Antialiasing);
isAA = settings.value("antialiasing", isAA).toBool();
m_editorView->setRenderHint(QPainter::Antialiasing, isAA);
int cacheRam = QPixmapCache::cacheLimit();
cacheRam = settings.value("cacheRam", cacheRam).toInt();
QPixmapCache::setCacheLimit(cacheRam);
}
void qvgeNodeEditorUIController::doWriteSettings(QSettings& settings)
{
bool isAA = m_editorView->renderHints().testFlag(QPainter::Antialiasing);
settings.setValue("antialiasing", isAA);
int cacheRam = QPixmapCache::cacheLimit();
settings.setValue("cacheRam", cacheRam);
}
bool qvgeNodeEditorUIController::loadFromFile(const QString &fileName, const QString &format)
{
if (format == "xgr")
{
return (CFileSerializerXGR().load(fileName, *m_editorScene));
}
if (format == "graphml")
{
return (CFileSerializerGraphML().load(fileName, *m_editorScene));
}
if (format == "gexf")
{
return (CFileSerializerGEXF().load(fileName, *m_editorScene));
}
// else via ogdf
return (COGDFLayout::loadGraph(fileName.toStdString(), *m_editorScene));
}
bool qvgeNodeEditorUIController::saveToFile(const QString &fileName, const QString &format)
{
if (format == "xgr")
return (CFileSerializerXGR().save(fileName, *m_editorScene));
if (format == "dot")
return (CFileSerializerDOT().save(fileName, *m_editorScene));
return false;
}
void qvgeNodeEditorUIController::onNewDocumentCreated()
{
COGDFNewGraphDialog dialog;
if (dialog.exec(*m_editorScene))
{
// update scene info
//onSceneChanged();
}
}
| 32.016632 | 112 | 0.745584 | turbanoff |
f1cda3d7607fd6ff5c36aa04ffde53f78ec6eef5 | 2,159 | hpp | C++ | BoostGraph/include/ATG.hpp | Arka2009/ita3e | 1b33e9a0ca167449c68596b7065ea84af2ed3942 | [
"BSD-2-Clause"
] | null | null | null | BoostGraph/include/ATG.hpp | Arka2009/ita3e | 1b33e9a0ca167449c68596b7065ea84af2ed3942 | [
"BSD-2-Clause"
] | 1 | 2017-04-07T19:19:09.000Z | 2017-04-07T19:19:09.000Z | BoostGraph/include/ATG.hpp | Arka2009/ita3e | 1b33e9a0ca167449c68596b7065ea84af2ed3942 | [
"BSD-2-Clause"
] | null | null | null | #ifndef DEF_ATG_H
#define DEF_ATG_H
#include <iostream>
#include <vector>
#include <algorithm>
#include <set>
#include <queue>
#include <boost/graph/adjacency_list.hpp>
void testATG(int N, double p);
void randDAGGen();
std::ostream& operator<<(std::ostream &os, const NodeType &et);
class AppTaskGraph {
private:
DAGType gSS;
std::vector<DAGVertexType> vdMap; /* Node identifier set, vdMap[N-1] is a sink */
std::map<DAGVertexType,std::vector<DAGVertexType>> predVdMap; /* Map of predecessors */
std::map<DAGVertexType,std::vector<DAGVertexType>> succVdMap; /* Map of successors */
bool initialized;
bool scheduled;
double t; /* Execution time spent */
int util; /* Number of processors used */
int finishCounter;
double wC; /* Worst case makespan */
double deadline;
void computeNbdMap();
int execute(int M_X, double delta);
int KlausHuangLP(int M_X, double rho, int mu);
double KlausHuangListM(int M_X, double delta);
void computeHeight(bool schedule);
void reset(); /* Reset the schedules and status */
public:
AppTaskGraph(int N, double p);
AppTaskGraph(std::string dotFile); /* Read a dot file */
AppTaskGraph();
void writeDot(int iter);
std::vector<DAGVertexType> getPreds(int jid) { return this->predVdMap[this->vdMap[jid]]; };
std::vector<DAGVertexType> getPreds(DAGVertexType vptr) { return this->predVdMap[vptr]; };
std::vector<DAGVertexType> getSuccs(int jid) { return this->succVdMap[this->vdMap[jid]]; };
std::vector<DAGVertexType> getSuccs(DAGVertexType vptr) { return this->succVdMap[vptr]; };
NodeType getNode(int jid) { return this->gSS[this->vdMap[jid]]; };
NodeType getNode(DAGVertexType vptr) { return this->gSS[vptr]; };
double tau_j(int jid, int l);
int getSize() { return this->vdMap.size(); };
double KlausHuangMethodSched(int M_X);
void KHMBinSearch(int K); /* Minimum peak power scheduling */
void genStats();
};
#endif | 42.333333 | 99 | 0.633627 | Arka2009 |
f1ce6b6911bff1a229eb1bd792ce95f284ff51ac | 1,860 | cpp | C++ | src/0.3.7-R1/KeyStuff.cpp | DarkP1xel/SAMP-API | 0d43a3603239f2f4bc65b8305ffc72177386cc29 | [
"MIT"
] | 7 | 2019-09-23T10:19:40.000Z | 2021-07-25T06:17:27.000Z | src/0.3.7-R1/KeyStuff.cpp | DarkP1xel/SAMP-API | 0d43a3603239f2f4bc65b8305ffc72177386cc29 | [
"MIT"
] | null | null | null | src/0.3.7-R1/KeyStuff.cpp | DarkP1xel/SAMP-API | 0d43a3603239f2f4bc65b8305ffc72177386cc29 | [
"MIT"
] | 1 | 2021-04-11T17:13:00.000Z | 2021-04-11T17:13:00.000Z | /*
This is a SAMP (0.3.7-R1) API project file.
Developer: LUCHARE <luchare.dev@gmail.com>
See more here https://github.com/LUCHARE/SAMP-API
Copyright (c) 2018 BlastHack Team <BlastHack.Net>. All rights reserved.
*/
#include "KeyStuff.h"
CPad *&SAMP::KeyStuff::pInternalKeys = *(CPad **)SAMP_ADDROF(0x1016E8);
CPad *SAMP::KeyStuff::pLocalPlayerKeys = (CPad *)SAMP_ADDROF(0x13D2C0);
CPad *SAMP::KeyStuff::aPlayerKeys = (CPad *)SAMP_ADDROF(0x13D3F8);
bool *&SAMP::KeyStuff::pDriveByLeft = *(bool **)SAMP_ADDROF(0x1016EC);
bool *&SAMP::KeyStuff::pDriveByRight = *(bool **)SAMP_ADDROF(0x1016F0);
bool &SAMP::KeyStuff::bSavedDriveByLeft = *(bool *)SAMP_ADDROF(0x14D0A0);
bool &SAMP::KeyStuff::bSavedDriveByRight = *(bool *)SAMP_ADDROF(0x14D0A1);
void SAMP::KeyStuff::Initialize() {
((void(__cdecl *)())SAMP_ADDROF(0xA2240))();
}
void SAMP::KeyStuff::ApplyKeys() {
((void(__cdecl *)())SAMP_ADDROF(0xA2260))();
}
void SAMP::KeyStuff::UpdateKeys() {
((void(__cdecl *)())SAMP_ADDROF(0xA22A0))();
}
void SAMP::KeyStuff::SetKeys(int nPlayerNumber, const CPad *pPad) {
((void(__cdecl *)(int, const CPad *))SAMP_ADDROF(0xA22E0))(nPlayerNumber, pPad);
}
void SAMP::KeyStuff::ApplyKeys(int nPlayerNumber) {
((void(__cdecl *)(int))SAMP_ADDROF(0xA2300))(nPlayerNumber);
}
CPad *SAMP::KeyStuff::GetInternalKeys() {
return ((CPad *(__cdecl *)())SAMP_ADDROF(0xA2350))();
}
CPad *SAMP::KeyStuff::GetKeys(int nPlayerNumber) {
return ((CPad *(__cdecl *)(int))SAMP_ADDROF(0xA2370))(nPlayerNumber);
}
void SAMP::KeyStuff::ResetKeys(int nPlayerNumber) {
((void(__cdecl *)(int))SAMP_ADDROF(0xA2380))(nPlayerNumber);
}
void SAMP::KeyStuff::ResetInternalKeys() {
((void(__cdecl *)())SAMP_ADDROF(0xA23A0))();
}
CPad *SAMP::KeyStuff::GetKeys() {
return ((::CPad *(__cdecl *)())SAMP_ADDROF(0xA2360))();
}
| 31.525424 | 82 | 0.682796 | DarkP1xel |
f1d08d0158b9c32fb53f4b7af8c2d00d2c1edbd8 | 710 | hpp | C++ | src/mettle/test_command.hpp | jimporter/mettle | c65aa75b04a08b550b3572f4c080c68e26ad86fa | [
"BSD-3-Clause"
] | 82 | 2015-01-05T10:06:44.000Z | 2022-03-07T01:41:28.000Z | src/mettle/test_command.hpp | JohnGalbraith/mettle | 38b70fe1dc0f30e98b768a37108196328182b5f4 | [
"BSD-3-Clause"
] | 44 | 2015-01-08T08:40:54.000Z | 2021-10-29T23:28:56.000Z | src/mettle/test_command.hpp | jimporter/mettle | c65aa75b04a08b550b3572f4c080c68e26ad86fa | [
"BSD-3-Clause"
] | 13 | 2015-06-23T07:41:54.000Z | 2020-02-14T15:35:07.000Z | #ifndef INC_METTLE_SRC_METTLE_TEST_COMMAND_HPP
#define INC_METTLE_SRC_METTLE_TEST_COMMAND_HPP
#include <memory>
#include <string>
#include <vector>
#include <boost/any.hpp>
namespace mettle {
class test_command {
public:
test_command(std::string command);
const std::string & command() const {
return command_;
}
operator const std::string &() const {
return command_;
}
const std::vector<std::string> & args() const {
return args_;
}
private:
std::string command_;
std::vector<std::string> args_;
};
void validate(boost::any &v, const std::vector<std::string> &values,
test_command*, int);
} // namespace mettle
#endif
| 18.684211 | 70 | 0.657746 | jimporter |
f1d0e5c99102573c592e0691fd77267909efc6f1 | 600 | cpp | C++ | samples/cppassert.cpp | aminya/cppassert | a686985e16698dfbcfa60e512c33a2eedf240892 | [
"BSD-3-Clause"
] | 3 | 2016-05-12T13:21:26.000Z | 2019-02-06T14:00:24.000Z | samples/cppassert.cpp | aminya/cppassert | a686985e16698dfbcfa60e512c33a2eedf240892 | [
"BSD-3-Clause"
] | 1 | 2022-02-17T23:07:54.000Z | 2022-02-17T23:07:54.000Z | samples/cppassert.cpp | DariuszOstolski/cppassert | 3b0c3f800297e368a7c452ec039c3eba268fbfd3 | [
"BSD-3-Clause"
] | null | null | null | #include <iostream>
#include <cppassert/Assertion.hpp>
using namespace std;
int check_args(int size)
{
const int ARGS_MIN = 2;
(void)ARGS_MIN;
//bool condition = (size!=0);
CPP_ASSERT_ALWAYS(size!=0, "Size shouldnt be 0");
return 0;
}
int main(int argc, char **)
{
const int ARGS_MAX = 5;
std::cerr<<"Hello "<<argc<<" max: "<<ARGS_MAX<<std::endl;
CPP_ASSERT_LT(argc, ARGS_MAX);
CPP_ASSERT_LT(argc, ARGS_MAX, " argc is not lower than "
<<ARGS_MAX<<std::endl);
std::cerr<<"World "<<std::endl;
check_args(0);
return 0;
} | 24 | 61 | 0.6 | aminya |
f1d1cace8a50a0dc5605adfcf33c39b328c9248a | 644 | cpp | C++ | P/2032.cpp | langonginc/cfile | 46458897b8a4a8d58a2bc63ecb6ef84f76bdb61f | [
"MIT"
] | 1 | 2020-09-13T02:51:25.000Z | 2020-09-13T02:51:25.000Z | P/2032.cpp | langonginc/cfile | 46458897b8a4a8d58a2bc63ecb6ef84f76bdb61f | [
"MIT"
] | null | null | null | P/2032.cpp | langonginc/cfile | 46458897b8a4a8d58a2bc63ecb6ef84f76bdb61f | [
"MIT"
] | 1 | 2021-06-05T03:37:57.000Z | 2021-06-05T03:37:57.000Z | #include<iostream>
#include<stdio.h>
#include<deque>
#define max(_1,_2) ((_1)>(_2)?(_1):(_2))
#define min(_1,_2) ((_1)>(_2)?(_1):(_2))
using namespace std;
struct cr{
int num,id;
inline void put(int _num,int _id){
num=_num,id=_id;
}
}a[1000005];
int num,n,m;
void work(int _if_min){
deque<cr>q;
for(int i=1;i<=n;i++){
while(!q.empty()&&q.back().num*_if_min>=a[i].num*_if_min){
q.pop_back();
}
q.push_back(a[i]);
if(q.front().id==i-m){
q.pop_front();
}
if(i>=m)printf("%d\n",q.front().num);
}
}
int main(){
scanf("%d%d",&n,&m);
for(int i=1;i<=n;i++){
scanf("%d",&num);
a[i].put(num,i);
}
work(-1);
return 0;
}
| 17.888889 | 60 | 0.56677 | langonginc |
f1d369cce4acafd0ffd0df540a2895f7eb84e1bc | 231 | hpp | C++ | cegui/src/ScriptModules/Python/bindings/output/CEGUI/AnimationManager.pypp.hpp | OpenTechEngine-Libraries/CEGUI | 6f00952d31f318f9482766d1ad2206cb540a78b9 | [
"MIT"
] | 257 | 2020-01-03T10:13:29.000Z | 2022-03-26T14:55:12.000Z | cegui/src/ScriptModules/Python/bindings/output/CEGUI/AnimationManager.pypp.hpp | OpenTechEngine-Libraries/CEGUI | 6f00952d31f318f9482766d1ad2206cb540a78b9 | [
"MIT"
] | 116 | 2020-01-09T18:13:13.000Z | 2022-03-15T18:32:02.000Z | cegui/src/ScriptModules/Python/bindings/output/CEGUI/AnimationManager.pypp.hpp | OpenTechEngine-Libraries/CEGUI | 6f00952d31f318f9482766d1ad2206cb540a78b9 | [
"MIT"
] | 58 | 2020-01-09T03:07:02.000Z | 2022-03-22T17:21:36.000Z | // This file has been generated by Py++.
#ifndef AnimationManager_hpp__pyplusplus_wrapper
#define AnimationManager_hpp__pyplusplus_wrapper
void register_AnimationManager_class();
#endif//AnimationManager_hpp__pyplusplus_wrapper
| 25.666667 | 48 | 0.865801 | OpenTechEngine-Libraries |
f1d547e1a53f5d72571bcdaa396fdb7408d6c43b | 767 | cpp | C++ | PyCommon/externalLibs/BaseLib/motion/Retarget_JH/MATHCLASS/cross_product.cpp | hpgit/HumanFoot | f9a1a341b7c43747bddcd5584b8c98a0d1ac2973 | [
"Apache-2.0"
] | 4 | 2017-04-15T09:16:10.000Z | 2018-04-19T09:28:54.000Z | PyCommon/externalLibs/BaseLib/motion/Retarget_JH/MATHCLASS/cross_product.cpp | hpgit/HumanFoot | f9a1a341b7c43747bddcd5584b8c98a0d1ac2973 | [
"Apache-2.0"
] | null | null | null | PyCommon/externalLibs/BaseLib/motion/Retarget_JH/MATHCLASS/cross_product.cpp | hpgit/HumanFoot | f9a1a341b7c43747bddcd5584b8c98a0d1ac2973 | [
"Apache-2.0"
] | 1 | 2021-07-26T15:13:55.000Z | 2021-07-26T15:13:55.000Z |
#include "mathclass.h"
namespace jhm {
position operator*( position const& a, unit_vector const& b )
{
position c;
c.p[0] = a.p[1]*b.p[2] - a.p[2]*b.p[1];
c.p[1] = a.p[2]*b.p[0] - a.p[0]*b.p[2];
c.p[2] = a.p[0]*b.p[1] - a.p[1]*b.p[0];
return c;
}
position operator*( unit_vector const& b, position const& a )
{
position c;
c.p[0] = a.p[1]*b.p[2] - a.p[2]*b.p[1];
c.p[1] = a.p[2]*b.p[0] - a.p[0]*b.p[2];
c.p[2] = a.p[0]*b.p[1] - a.p[1]*b.p[0];
return c;
}
vector operator*( vector const& a, vector const& b )
{
vector c;
c.p[0] = a.p[1]*b.p[2] - a.p[2]*b.p[1];
c.p[1] = a.p[2]*b.p[0] - a.p[0]*b.p[2];
c.p[2] = a.p[0]*b.p[1] - a.p[1]*b.p[0];
return c;
}
}
| 18.707317 | 62 | 0.445893 | hpgit |
f1d558f3d6294a7cebd557d1c8c20082674e5bf0 | 7,024 | cpp | C++ | examples/Example_reconstruct_image.cpp | MSusik/LibAPR | 5338da714905577642342c80120524bdebab5bb6 | [
"Apache-2.0"
] | null | null | null | examples/Example_reconstruct_image.cpp | MSusik/LibAPR | 5338da714905577642342c80120524bdebab5bb6 | [
"Apache-2.0"
] | null | null | null | examples/Example_reconstruct_image.cpp | MSusik/LibAPR | 5338da714905577642342c80120524bdebab5bb6 | [
"Apache-2.0"
] | null | null | null | //
// Created by cheesema on 14/03/17.
//
////////////////////////////////////////
///
/// Bevan Cheeseman 2018
///
const char* usage = R"(
APR pixel image reconstruction example:
Outputs various reconstructed images from the APR.
Usage:
(using *_apr.h5 output of Example_get_apr)
Example_reconstruct_image -i inputfile [-d directory] -o output_name
e.g. Example_reconstruct_image -i nuc_apr.h5 -d /Test/Input_examples/ -o nuclei
Default: Piece-wise constant reconstruction
Options:
-pc_recon (outputs piece-wise reconstruction (Default))
-smooth_recon (Outputs a smooth reconstruction)
-apr_properties (Outputs all Particle Cell information (x,y,z,l) and type to pc images
)";
#include <algorithm>
#include <iostream>
#include "data_structures/APR/APR.hpp"
#include "io/TiffUtils.hpp"
struct cmdLineOptions{
std::string output = "output";
std::string directory = "";
std::string input = "";
bool output_spatial_properties = false;
bool output_pc_recon = false;
bool output_smooth_recon = false;
};
static bool command_option_exists(char **begin, char **end, const std::string &option) {
return std::find(begin, end, option) != end;
}
static const char* get_command_option(char **begin, char **end, const std::string &option) {
char **itr = std::find(begin, end, option);
if (itr != end && ++itr != end) {
return *itr;
}
return nullptr;
}
static cmdLineOptions read_command_line_options(int argc, char **argv) {
cmdLineOptions result;
if (argc == 1) {
std::cerr << usage << std::endl;
exit(1);
}
if (command_option_exists(argv, argv + argc, "-i")) {
result.input = std::string(get_command_option(argv, argv + argc, "-i"));
}
else {
std::cerr << "Input file required" << std::endl;
exit(2);
}
if (command_option_exists(argv, argv + argc, "-d")) {
result.directory = std::string(get_command_option(argv, argv + argc, "-d"));
}
if (command_option_exists(argv, argv + argc, "-o")) {
result.output = std::string(get_command_option(argv, argv + argc, "-o"));
}
if (command_option_exists(argv, argv + argc, "-pc_recon")) {
result.output_pc_recon = true;
}
if (command_option_exists(argv, argv + argc, "-smooth_recon")) {
result.output_smooth_recon = true;
}
if (command_option_exists(argv, argv + argc, "-apr_properties")) {
result.output_spatial_properties = true;
}
if(!(result.output_pc_recon || result.output_smooth_recon || result.output_spatial_properties)){
//default is pc recon
result.output_pc_recon = true;
}
return result;
}
int main(int argc, char **argv) {
// INPUT PARSING
cmdLineOptions options = read_command_line_options(argc, argv);
// Read the apr file into the part cell structure
APRTimer timer;
timer.verbose_flag = true;
// APR datastructure
APR<uint16_t> apr;
//read file
std::string file_name = options.directory + options.input;
apr.read_apr(file_name);
apr.name = options.output;
// Intentionaly block-scoped since local recon_pc will be destructed when block ends and release memory.
{
if(options.output_pc_recon) {
//create mesh data structure for reconstruction
MeshData<uint16_t> recon_pc;
timer.start_timer("pc interp");
//perform piece-wise constant interpolation
apr.interp_img(recon_pc, apr.particles_intensities);
timer.stop_timer();
float elapsed_seconds = timer.t2 - timer.t1;
std::cout << "PC recon "
<< (recon_pc.x_num * recon_pc.y_num * recon_pc.z_num * 2) / (elapsed_seconds * 1000000.0f)
<< " MB per second" << std::endl;
//write output as tiff
TiffUtils::saveMeshAsTiff(options.directory + apr.name + "_pc.tif", recon_pc);
}
}
//////////////////////////
/// Create a particle dataset with the particle type and pc construct it
////////////////////////////
if(options.output_spatial_properties) {
//initialization of the iteration structures
APRIterator<uint16_t> apr_iterator(apr); //this is required for parallel access
//create particle dataset
ExtraParticleData<uint16_t> type(apr);
ExtraParticleData<uint16_t> level(apr);
ExtraParticleData<uint16_t> x(apr);
ExtraParticleData<uint16_t> y(apr);
ExtraParticleData<uint16_t> z(apr);
timer.start_timer("APR parallel iterator loop");
#ifdef HAVE_OPENMP
#pragma omp parallel for schedule(static) firstprivate(apr_iterator)
#endif
for (uint64_t particle_number = 0; particle_number < apr_iterator.total_number_particles(); ++particle_number) {
//needed step for any parallel loop (update to the next part)
apr_iterator.set_iterator_to_particle_by_number(particle_number);
type[apr_iterator] = apr_iterator.type();
level[apr_iterator] = apr_iterator.level();
x[apr_iterator] = apr_iterator.x();
y[apr_iterator] = apr_iterator.y();
z[apr_iterator] = apr_iterator.z();
}
timer.stop_timer();
// Intentionaly block-scoped since local type_recon will be destructed when block ends and release memory.
{
MeshData<uint16_t> type_recon;
apr.interp_img(type_recon, type);
TiffUtils::saveMeshAsTiff(options.directory + apr.name + "_type.tif", type_recon);
//pc interp
apr.interp_img(type_recon, level);
TiffUtils::saveMeshAsTiff(options.directory + apr.name + "_level.tif", type_recon);
//pc interp
apr.interp_img(type_recon, x);
TiffUtils::saveMeshAsTiff(options.directory + apr.name + "_x.tif", type_recon);
//pc interp
apr.interp_img(type_recon, y);
TiffUtils::saveMeshAsTiff(options.directory + apr.name + "_y.tif", type_recon);
//pc interp
apr.interp_img(type_recon, z);
TiffUtils::saveMeshAsTiff(options.directory + apr.name + "_z.tif", type_recon);
}
}
if(options.output_smooth_recon) {
//smooth reconstruction - requires float
MeshData<float> recon_smooth;
std::vector<float> scale_d = {2, 2, 2};
timer.start_timer("smooth reconstrution");
apr.interp_parts_smooth(recon_smooth, apr.particles_intensities, scale_d);
timer.stop_timer();
float elapsed_seconds = timer.t2 - timer.t1;
std::cout << "Smooth recon "
<< (recon_smooth.x_num * recon_smooth.y_num * recon_smooth.z_num * 2) / (elapsed_seconds * 1000000.0f)
<< " MB per second" << std::endl;
//write to tiff casting to unsigned 16 bit integer
TiffUtils::saveMeshAsTiffUint16(options.directory + apr.name + "_smooth.tif", recon_smooth);
}
}
| 31.63964 | 120 | 0.632118 | MSusik |
f1d637ee274d74c7f3dc883b100d9db2cd160fd2 | 329 | cpp | C++ | tests/FirstBadVersionTest.cpp | yanzhe-chen/LeetCode | d82f0b9721ea613ab216c78e7286671d0e9e4187 | [
"MIT"
] | 43 | 2015-10-10T12:59:52.000Z | 2018-07-11T18:07:00.000Z | tests/FirstBadVersionTest.cpp | yanzhe-chen/LeetCode | d82f0b9721ea613ab216c78e7286671d0e9e4187 | [
"MIT"
] | null | null | null | tests/FirstBadVersionTest.cpp | yanzhe-chen/LeetCode | d82f0b9721ea613ab216c78e7286671d0e9e4187 | [
"MIT"
] | 11 | 2015-10-10T14:41:11.000Z | 2018-07-28T06:03:16.000Z | #include "catch.hpp"
#include "FirstBadVersion.hpp"
TEST_CASE("First Bad Version") {
const int N = 2126753390;
const int M = 1702766719;
auto isBad = [](int version) {
return version >= M;
};
FirstBadVersion s(isBad);
SECTION("Sample test") {
REQUIRE(s.firstBadVersion(N) == M);
}
}
| 20.5625 | 43 | 0.601824 | yanzhe-chen |
f1d751e953d0eed8d2a4d0d74293687e75404e1c | 2,719 | cpp | C++ | Shadows/Game.cpp | EmilianC/Jewel3D-Samples | 077c5f2531814ffe9041021c5ba5fe93e0461348 | [
"MIT"
] | 6 | 2017-02-04T21:47:01.000Z | 2019-06-01T00:33:56.000Z | Shadows/Game.cpp | EmilianC/Gemcutter-Samples | 077c5f2531814ffe9041021c5ba5fe93e0461348 | [
"MIT"
] | null | null | null | Shadows/Game.cpp | EmilianC/Gemcutter-Samples | 077c5f2531814ffe9041021c5ba5fe93e0461348 | [
"MIT"
] | 3 | 2018-04-12T04:09:15.000Z | 2019-12-19T20:06:49.000Z | #include "Game.h"
#include <gemcutter/Entity/Hierarchy.h>
#include <gemcutter/Input/Input.h>
#include <gemcutter/Math/Matrix.h>
#include <gemcutter/Rendering/Camera.h>
#include <gemcutter/Rendering/Light.h>
#include <gemcutter/Rendering/Mesh.h>
#include <gemcutter/Resource/Material.h>
#include <gemcutter/Resource/Model.h>
Game::Game(ConfigTable &config)
: config(config)
{
onResized = [this](auto& e) {
mainCamera->Get<Camera>().SetAspectRatio(e.GetAspectRatio());
};
}
bool Game::Init()
{
// Setup Light.
shadowCamera->Add<Camera>(-20.0f, 20.0f, 20.0f, -20.0f, -100.0f, 100.0f);
shadowCamera->Add<Light>(vec3(1.0f), Light::Type::Directional);
shadowCamera->LookAt(vec3(2.0f, 1.0f, 0.5f), vec3(0.0f));
// Prepare ground object.
ground->Add<Mesh>(Load<Model>("Models/Ground"), Load<Material>("Materials/GroundShadowed"));
ground->scale = vec3(15.0f);
// Prepare shack object.
shack->Add<Mesh>(Load<Model>("Models/Shack"), Load<Material>("Materials/ShackShadowed"));
shack->scale = vec3(1.33f);
// Link light data.
auto shader = shack->Get<Renderable>().GetMaterial().shader;
shader->buffers.Add(shadowCamera->Get<Light>().GetBuffer(), 0);
worldToShadow = shader->buffers[2].MakeHandle<mat4>("WorldToShadow");
// Setup Scene.
rootNode->Get<Hierarchy>().AddChild(ground);
rootNode->Get<Hierarchy>().AddChild(shack);
// Setup Camera.
mainCamera->Add<Camera>(60.0f, Application.GetAspectRatio(), 1.0f, 10000.0f);
mainCamera->position = vec3(0.0f, 10.0f, 25.0f);
mainCamera->RotateX(-20.0f);
// Setup up renderer.
shadowMap = RenderTarget::MakeNew(1024, 1024, 0, true);
shadowMap->GetDepthTexture()->SetPCF(true);
if (!shadowMap->Validate())
return false;
shadowRenderPass.SetCamera(shadowCamera);
shadowRenderPass.SetShader(Shader::MakeNewPassThrough());
shadowRenderPass.SetTarget(shadowMap);
mainRenderPass.textures.Add(shadowMap->GetDepthTexture(), 1);
mainRenderPass.SetCamera(mainCamera);
// Set background color to black.
SetClearColor(0.0f, 0.0f, 0.0f, 0.0f);
return true;
}
void Game::Update()
{
if (Input.IsDown(Key::Escape))
{
Application.Exit();
return;
}
rootNode->RotateY(Application.GetDeltaTime() * -12.0f);
// Keep shadow direction and world-space to shadowMap matrix up to date.
worldToShadow =
mat4(0.5f, 0.0f, 0.0f, 0.5f,
0.0f, 0.5f, 0.0f, 0.5f,
0.0f, 0.0f, 0.5f, 0.5f,
0.0f, 0.0f, 0.0f, 1.0f) * shadowCamera->Get<Camera>().GetViewProjMatrix();
// Engine systems and components are updated here.
Application.UpdateEngine();
}
void Game::Draw()
{
ClearBackBuffer();
shadowMap->Clear();
shadowRenderPass.Bind();
shadowRenderPass.RenderRoot(*rootNode);
mainRenderPass.Bind();
mainRenderPass.RenderRoot(*rootNode);
}
| 26.920792 | 93 | 0.709084 | EmilianC |
f1d81120239f2881c17133155996f17f280e2788 | 1,966 | ipp | C++ | implement/oglplus/images/sphere_bmap.ipp | matus-chochlik/oglplus | 76dd964e590967ff13ddff8945e9dcf355e0c952 | [
"BSL-1.0"
] | 364 | 2015-01-01T09:38:23.000Z | 2022-03-22T05:32:00.000Z | implement/oglplus/images/sphere_bmap.ipp | matus-chochlik/oglplus | 76dd964e590967ff13ddff8945e9dcf355e0c952 | [
"BSL-1.0"
] | 55 | 2015-01-06T16:42:55.000Z | 2020-07-09T04:21:41.000Z | implement/oglplus/images/sphere_bmap.ipp | matus-chochlik/oglplus | 76dd964e590967ff13ddff8945e9dcf355e0c952 | [
"BSL-1.0"
] | 57 | 2015-01-07T18:35:49.000Z | 2022-03-22T05:32:04.000Z | /**
* @file oglplus/images/sphere_bmap.ipp
* @brief Implementation of images::SphereBumpMap
*
* @author Matus Chochlik
*
* Copyright 2010-2019 Matus Chochlik. Distributed under the Boost
* Software License, Version 1.0. (See accompanying file
* LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
*/
#include <oglplus/lib/incl_begin.ipp>
#include <oglplus/lib/incl_end.ipp>
#include <oglplus/math/vector.hpp>
#include <cassert>
#include <cmath>
namespace oglplus {
namespace images {
OGLPLUS_LIB_FUNC
SphereBumpMap::SphereBumpMap(
SizeType width, SizeType height, SizeType xrep, SizeType yrep)
: Image(
width,
height,
1,
4,
&TypeTag<GLfloat>(),
PixelDataFormat::RGBA,
PixelDataInternalFormat::RGBA16F) {
assert(width != 0 && height != 0);
assert(xrep != 0 && yrep != 0);
using number = double;
number one = number(1);
number invw = number(2 * xrep) / number(width);
number invh = number(2 * yrep) / number(height);
GLsizei hi = width / xrep;
GLsizei hj = height / yrep;
auto p = this->_begin<GLfloat>();
for(GLsizei j = 0; j < height; ++j) {
number y = number((j % hj) - hj / 2) * invh;
for(GLsizei i = 0; i < width; ++i) {
number x = number((i % hi) - hi / 2) * invw;
number l = std::sqrt(x * x + y * y);
number d = sqrt(one - l * l);
Vector<number, 3> z(0.0, 0.0, one);
Vector<number, 3> n(-x, -y, d);
Vector<number, 3> v = (l >= one) ? z : Normalized(z + n);
if(l >= one)
d = 0;
assert(p != this->_end<GLfloat>());
*p = GLfloat(v.x());
++p;
*p = GLfloat(v.y());
++p;
*p = GLfloat(v.z());
++p;
*p = GLfloat(d);
++p;
}
}
assert(p == this->_end<GLfloat>());
}
} // namespace images
} // namespace oglplus
| 27.690141 | 69 | 0.539166 | matus-chochlik |
f1ed79f793e5fa7fe143dcd2981a49c9d124d13a | 2,795 | cpp | C++ | Source/BehaviorTreeExtension/Private/BTStateMachineSelector.cpp | bretvincent/UnrealBehaviorTreeExtension | dc82c9297a43bb5096c25c262d661609534f9897 | [
"MIT"
] | 2 | 2021-09-11T11:45:31.000Z | 2021-12-23T07:35:41.000Z | Source/BehaviorTreeExtension/Private/BTStateMachineSelector.cpp | bretvincent/UnrealBehaviorTreeExtension | dc82c9297a43bb5096c25c262d661609534f9897 | [
"MIT"
] | null | null | null | Source/BehaviorTreeExtension/Private/BTStateMachineSelector.cpp | bretvincent/UnrealBehaviorTreeExtension | dc82c9297a43bb5096c25c262d661609534f9897 | [
"MIT"
] | 1 | 2021-09-11T11:45:34.000Z | 2021-09-11T11:45:34.000Z | // Copyright 2019 Vincent Breton Kochanowski. All Rights Reserved.
#include "BTStateMachineSelector.h"
#include "BTStateTransitionDecorator.h"
void UBTStateMachineSelector::OnGameplayTaskActivated(UGameplayTask& Task)
{
}
void UBTStateMachineSelector::OnGameplayTaskDeactivated(UGameplayTask& Task)
{
}
UBTStateMachineSelector::UBTStateMachineSelector(const FObjectInitializer& ObjectInitializer) : Super(ObjectInitializer),
InitialStateId(0), ExitStateMachineId(0)
{
NodeName = "StateMachineSelector";
}
int32 UBTStateMachineSelector::GetNextChildHandler(FBehaviorTreeSearchData& SearchData, int32 PrevChild, EBTNodeResult::Type LastResult) const
{
if (PrevChild == BTSpecialChild::NotInitialized)
{
// go to initial state of state machine
return FindChildIndexFromStateId(InitialStateId);
}
// if we failed we exit state machine
if (LastResult == EBTNodeResult::Failed)
{
return BTSpecialChild::ReturnToParent;
}
return ShouldTransition(SearchData, PrevChild);
}
int32 UBTStateMachineSelector::FindChildIndexFromStateId(int32 StateId) const
{
if (StateId == ExitStateMachineId)
{
return BTSpecialChild::ReturnToParent;
}
for (int i = 0; i < Children.Num(); ++i)
{
auto const& ChildInfo = Children[i];
for (auto Decorator : ChildInfo.Decorators)
{
const auto StateTransitionDecorator = Cast<UBTStateTransitionDecorator>(Decorator);
if (StateTransitionDecorator)
{
if (StateTransitionDecorator->StateId == StateId)
{
return i;
}
}
}
}
UE_LOG(LogBehaviorTree, Error, TEXT("No State with Id: %d found!"), StateId);
return 0;
}
int32 UBTStateMachineSelector::ShouldTransition(FBehaviorTreeSearchData& SearchData, int32 ChildIndex) const
{
if (ChildIndex < 0 || ChildIndex > Children.Num())
{
// no children means that something went wrong
return BTSpecialChild::ReturnToParent;
}
auto const& ChildInfo = Children[ChildIndex];
for (auto Decorator : ChildInfo.Decorators)
{
const auto StateTransitionDecorator = Cast<UBTStateTransitionDecorator>(Decorator);
if (StateTransitionDecorator)
{
// search if any transitions have activated; will return first one found
if (StateTransitionDecorator->ShouldTransition(SearchData))
{
const auto StateDecoratorMemory = StateTransitionDecorator->GetNodeMemory<FBTStateDecoratorMemory>(SearchData);
int32 childId = StateDecoratorMemory->ChildToTransitionToId;
return FindChildIndexFromStateId(childId);
}
}
}
// if no transitions have activated; just continue executing node
return ChildIndex;
}
#if WITH_EDITOR
FName UBTStateMachineSelector::GetNodeIconName() const
{
return FName("BTEditor.Graph.BTNode.Composite.Selector.Icon");
}
#endif
| 28.814433 | 143 | 0.743828 | bretvincent |
f1f069dbe0c7d96784f129f043ead6c4d09d247d | 4,291 | cc | C++ | engine/Node.cc | zhouzhiyong18/SubgraphMatch | e34b088ed5efc67556c91ad9591dc8718a048d59 | [
"Apache-2.0"
] | 4 | 2018-11-19T08:05:41.000Z | 2018-11-19T11:55:55.000Z | engine/Node.cc | ChengTsang/SSSP | 6d6680b87e0edaac5668cbf9cb4d39ee7a77d150 | [
"Apache-2.0"
] | null | null | null | engine/Node.cc | ChengTsang/SSSP | 6d6680b87e0edaac5668cbf9cb4d39ee7a77d150 | [
"Apache-2.0"
] | null | null | null | /**
* @file Node.cc
* @author Songjie Niu, Shimin Chen
* @version 0.1
*
* @section LICENSE
*
* Copyright 2016 Shimin Chen (chensm@ict.ac.cn) and
* Songjie Niu (niusongjie@ict.ac.cn)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* @section DESCRIPTION
*
* @see Node.h
*
*/
#include <new>
#include <string.h>
#include "Node.h"
#include "Worker.h"
extern Worker worker;
int Edge::e_value_size;
int Edge::e_size;
int Msg::m_value_size;
int Msg::m_size;
int Node::n_value_size;
int Node::n_size;
Node& Node::getNode(int index) {
return *( (Node *)( (char *)(worker.m_pnode) + index * Node::n_size ) );
}
Edge& Node::getEdge(int index) {
return *( (Edge *)( (char *)(worker.m_pedge) + index * Edge::e_size ) );
}
void Node::initInMsg() {
new(&m_cur_in_msg) std::vector<Msg*>();
}
void Node::recvNewMsg(Msg* pmsg) {
m_cur_in_msg.push_back(pmsg);
if (! m_active) {
m_active = true;
++worker.m_wm_curssfinish.act_vertex;
}
}
void Node::clearCurInMsg() {
if ( m_cur_in_msg.size() ) {
for (int i = m_cur_in_msg.size() - 1; i >= 0; --i) {
worker.m_free_list.free(m_cur_in_msg[i]);
}
m_cur_in_msg.clear();
}
}
void Node::freeInMsgVector() {
(&m_cur_in_msg)->~vector<Msg*>();
}
int Node::getSuperstep() const { return worker.m_wm_curssfinish.superstep; }
int64_t Node::getVertexId() const { return m_v_id; }
void Node::voteToHalt() {
m_active = false;
--worker.m_wm_curssfinish.act_vertex;
}
GenericLinkIterator* Node::getGenericLinkIterator() {
return new GenericLinkIterator(&m_cur_in_msg);
}
/*
GenericArrayIterator* Node::getGenericArrayIterator() {
return new GenericArrayIterator(
(char *)(worker.m_pedge) + m_edge_index * Edge::e_size,
(char *)(worker.m_pedge) + (m_edge_index + m_out_degree) * Edge::e_size,
Edge::e_size );
}
*/
void Node::sendMessageTo(int64_t dest_vertex, const char* pmessage) {
int wk = dest_vertex % (worker.m_machine_cnt - 1) + 1; // hash partition
if (wk == worker.m_addr_self.id) { // self message, can be commented for debug 1
Msg* pmsg = (Msg *)( worker.m_free_list.allocate() );
pmsg->s_id = m_v_id;
pmsg->d_id = dest_vertex;
memcpy(pmsg->message, pmessage, Msg::m_value_size);
worker.recvNewNodeMsg(pmsg); // self message certainly not for next next
// superstep but for next
} else { // message to another worker
// check if we should send messages
if (worker.m_psendlist_curpos[wk] == SENDLIST_LEN) {
while ( worker.sendNodeMessage(wk, worker.m_psendlist_curpos[wk]) );
// How to change into wait ?
worker.m_psendlist_curpos[wk] = 0;
++worker.m_wm_curssfinish.worker_msg[wk];
}
// copy the message into send message list
Msg* pmsg = (Msg *)(worker.m_pww_sendlist[wk].msgs.data + worker.m_psendlist_curpos[wk] * Msg::m_size);
pmsg->s_id = m_v_id;
pmsg->d_id = dest_vertex;
memcpy(pmsg->message, pmessage, Msg::m_value_size);
++worker.m_psendlist_curpos[wk];
// printf("wrote one piece of node2node message to sendlist[%d]\n", wk); //
}
++worker.m_wm_curssfinish.sent_msg;
}
void Node::sendMessageToAllNeighbors(const char* pmessage) {
char* pedge = (char *)(worker.m_pedge) + m_edge_index * Edge::e_size;
for (int i = 0; i < m_out_degree; ++i) {
sendMessageTo( ( (Edge *)pedge )->to, pmessage );
pedge += Edge::e_size;
}
}
const void* Node::getAggrGlobal(int aggr) {
return worker.m_pmy_aggregator[aggr]->getGlobal();
}
void Node::accumulateAggr(int aggr, const void* p) {
worker.m_pmy_aggregator[aggr]->accumulate(p);
}
| 28.230263 | 111 | 0.646003 | zhouzhiyong18 |
f1feac745c1dfd8af36ee7527ab1c2ca296ffaee | 1,324 | cpp | C++ | samples/flags/main.cpp | Nitta-K-git/vcglib_sample | dbe70bd21bbc618286bbdf7533782a398a4f276d | [
"MIT"
] | 12 | 2020-12-07T21:04:31.000Z | 2022-01-04T13:48:22.000Z | samples/flags/main.cpp | Nitta-K-git/vcglib_samples | dbe70bd21bbc618286bbdf7533782a398a4f276d | [
"MIT"
] | null | null | null | samples/flags/main.cpp | Nitta-K-git/vcglib_samples | dbe70bd21bbc618286bbdf7533782a398a4f276d | [
"MIT"
] | null | null | null | #include <vcg/complex/algorithms/create/platonic.h>
using namespace vcg;
using namespace std;
class MyFace;
class MyVertex;
struct MyUsedTypes : public UsedTypes< Use<MyVertex>::AsVertexType, Use<MyFace>::AsFaceType> {};
class MyVertex : public Vertex< MyUsedTypes,
vertex::Coord3f,
vertex::Normal3f,
vertex::BitFlags,
vertex::VFAdj
> {};
class MyFace : public Face< MyUsedTypes,
face::VertexRef,
face::Normal3f,
face::VFAdj,
face::BitFlags
> {};
class MyMesh : public tri::TriMesh<
vector<MyVertex>,
vector<MyFace>
> {};
int main(){
MyMesh mesh;
vcg::tri::Tetrahedron(mesh); // create preset mesh data
cout << "vert" << endl;
// set selected flag manually
MyMesh::VertexPointer vp;
vp = &(mesh.vert[0]);
vp->SetS();
vp = &(mesh.vert[2]);
vp->SetS();
// get all selected flag state
MyMesh::VertexIterator vi;
for(auto &&vi : mesh.vert){
cout << vi.IsS() << endl;
}
// clear all select flag
tri::UpdateFlags<MyMesh>::VertexClearS(mesh);
cout << "face" << endl;
// set selected flag manually
MyMesh::FacePointer fp;
fp = &(mesh.face[0]);
fp->SetS();
// get all selected flag state
MyMesh::FaceIterator fi;
for(auto &&fi : mesh.face){
cout << fi.IsS() << endl;
}
// clear all select flag
tri::UpdateFlags<MyMesh>::FaceClearS(mesh);
return 1;
}
| 20.060606 | 96 | 0.659366 | Nitta-K-git |
7b008b951d5d37d8e11c711155bc85bb79ee924b | 1,483 | cpp | C++ | src/2015/day03.cpp | AnthonyN1/Advent-of-Code | 28782b219a50d92b2621dc88ecf00ae2e3b45b0a | [
"MIT"
] | null | null | null | src/2015/day03.cpp | AnthonyN1/Advent-of-Code | 28782b219a50d92b2621dc88ecf00ae2e3b45b0a | [
"MIT"
] | null | null | null | src/2015/day03.cpp | AnthonyN1/Advent-of-Code | 28782b219a50d92b2621dc88ecf00ae2e3b45b0a | [
"MIT"
] | null | null | null | /*
Advent of Code - 2015: Day 03
Author: Anthony Nool (AnthonyN1)
*/
#include <iostream>
#include <set>
#include <string>
#include <vector>
#include "../../includes/point.h"
/*
Updates pt based on the direction ch is pointing.
ch is either '^', 'v', '<', or '>'.
*/
void updatePoint(Point &pt, char ch){
switch(ch){
case '^': pt.shift(0, 1); break;
case 'v': pt.shift(0, -1); break;
case '<': pt.shift(-1, 0); break;
case '>': pt.shift(1, 0); break;
}
}
void partOne(const std::vector<std::string> &input){
std::set<Point> visited;
// The origin is visited.
Point curr;
visited.insert(curr);
for(const std::string &str : input){
for(char ch : str){
// Keeps track of every visited point, and the number of times visited.
updatePoint(curr, ch);
visited.insert(curr);
}
}
std::cout << visited.size() << std::endl;
}
void partTwo(const std::vector<std::string> &input){
std::set<Point> visited;
// The origin is visited.
Point santa, robot;
visited.insert(santa);
bool toggle = false;
for(const std::string &str : input){
for(char ch : str){
// Keeps track of every visited point, and the number of times visited.
// toggle switches after every char.
// This represents Santa and Robo-Santa taking turns.
if(!toggle){
updatePoint(santa, ch);
visited.insert(santa);
} else{
updatePoint(robot, ch);
visited.insert(robot);
}
toggle = !toggle;
}
}
std::cout << visited.size() << std::endl;
}
| 20.040541 | 74 | 0.633176 | AnthonyN1 |
7b00e5214a6dc4fe1cf09dc9c1a76352eeafbc3e | 14,395 | cpp | C++ | blast/src/connect/services/netschedule_api_getjob.cpp | mycolab/ncbi-blast | e59746cec78044d2bf6d65de644717c42f80b098 | [
"Apache-2.0"
] | null | null | null | blast/src/connect/services/netschedule_api_getjob.cpp | mycolab/ncbi-blast | e59746cec78044d2bf6d65de644717c42f80b098 | [
"Apache-2.0"
] | null | null | null | blast/src/connect/services/netschedule_api_getjob.cpp | mycolab/ncbi-blast | e59746cec78044d2bf6d65de644717c42f80b098 | [
"Apache-2.0"
] | null | null | null | /* $Id: netschedule_api_getjob.cpp 607687 2020-05-06 16:16:59Z sadyrovr $
* ===========================================================================
*
* PUBLIC DOMAIN NOTICE
* National Center for Biotechnology Information
*
* This software/database is a "United States Government Work" under the
* terms of the United States Copyright Act. It was written as part of
* the author's official duties as a United States Government employee and
* thus cannot be copyrighted. This software/database is freely available
* to the public for use. The National Library of Medicine and the U.S.
* Government have not placed any restriction on its use or reproduction.
*
* Although all reasonable efforts have been taken to ensure the accuracy
* and reliability of the software and data, the NLM and the U.S.
* Government do not and cannot warrant the performance or results that
* may be obtained by using this software or data. The NLM and the U.S.
* Government disclaim all warranties, express or implied, including
* warranties of performance, merchantability or fitness for any particular
* purpose.
*
* Please cite the author in any work or product based on this material.
*
* ===========================================================================
*
* Authors: Rafael Sadyrov
*
* File Description:
* NetSchedule API get/read job implementation.
*
*/
#include <ncbi_pch.hpp>
#include "grid_worker_impl.hpp"
#include "netschedule_api_impl.hpp"
#include "netservice_api_impl.hpp"
#include "netschedule_api_getjob.hpp"
BEGIN_NCBI_SCOPE
template <class TImpl> class CAnyAffinityJob;
template <class TImpl> class CMostAffinityJob;
template <class TImpl>
CNetScheduleGetJob::EResult CNetScheduleGetJobImpl<TImpl>::GetJob(
const CDeadline& deadline,
CNetScheduleJob& job,
CNetScheduleAPI::EJobStatus* job_status,
bool any_affinity)
{
if (any_affinity) {
CAnyAffinityJob<TImpl> holder(job, job_status, m_ImmediateActions);
return GetJobImpl(deadline, holder);
} else {
ReturnNotFullyCheckedServers();
CMostAffinityJob<TImpl> holder(job, job_status, m_ImmediateActions, m_Impl);
return GetJobImpl(deadline, holder);
}
}
typedef list<SSocketAddress> TServers;
typedef list<CNetScheduleGetJob::SEntry> TTimeline;
typedef TTimeline::iterator TIterator;
template <class TImpl>
class CAnyAffinityJob
{
public:
CNetScheduleJob& job;
CNetScheduleAPI::EJobStatus* job_status;
CAnyAffinityJob(CNetScheduleJob& j, CNetScheduleAPI::EJobStatus* js,
TTimeline& timeline) :
job(j), job_status(js), m_Timeline(timeline)
{}
void Interrupt() {}
TIterator Begin() { return m_Timeline.begin(); }
TIterator Next(bool) { return m_Timeline.begin(); }
const string& Affinity() const { return kEmptyStr; }
bool Done() { return true; }
bool HasJob() const { return false; }
private:
TTimeline& m_Timeline;
};
template <class TImpl>
class CMostAffinityJob
{
public:
CNetScheduleJob& job;
CNetScheduleAPI::EJobStatus* job_status;
CMostAffinityJob(CNetScheduleJob& j, CNetScheduleAPI::EJobStatus* js,
TTimeline& timeline, TImpl& get_job_impl) :
job(j), job_status(js), m_JobPriority(numeric_limits<size_t>::max()),
m_Timeline(timeline), m_Iterator(timeline.end()),
m_GetJobImpl(get_job_impl)
{
_ASSERT(m_GetJobImpl.m_API->m_AffinityLadder.size());
}
void Interrupt()
{
if (HasJob()) {
m_GetJobImpl.ReturnJob(job);
job.Reset();
}
}
TIterator Begin()
{
m_Iterator = m_Timeline.end();
return m_Timeline.begin();
}
TIterator Next(bool increment)
{
if (increment) {
if (m_Iterator == m_Timeline.end()) {
m_Iterator = m_Timeline.begin();
} else {
++m_Iterator;
}
// We've already got a job from an entry at m_Iterator + 1
// (that is why increment is true), so must not happen
_ASSERT(m_Iterator != m_Timeline.end());
} else if (m_Iterator == m_Timeline.end()) {
return m_Timeline.begin();
}
TIterator ret = m_Iterator;
return ++ret;
}
const string& Affinity() const
{
// Must not happen, since otherwise Done() has returned true already
_ASSERT(m_JobPriority);
CNetScheduleGetJob::TAffinityLadder&
affinity_ladder(m_GetJobImpl.m_API->m_AffinityLadder);
if (HasJob()) {
// Only affinities that are higher that current job's one
return affinity_ladder[m_JobPriority - 1].second;
} else {
// All affinities
return affinity_ladder.back().second;
}
}
bool Done()
{
// Must not happen, since otherwise Done() has returned true already
_ASSERT(m_JobPriority);
// Return a less-priority job back
if (HasJob()) {
m_GetJobImpl.ReturnJob(m_PreviousJob);
}
m_PreviousJob = job;
CNetScheduleGetJob::TAffinityLadder&
affinity_ladder(m_GetJobImpl.m_API->m_AffinityLadder);
size_t priority = min(affinity_ladder.size(), m_JobPriority) - 1;
do {
if (job.affinity == affinity_ladder[priority].first) {
m_JobPriority = priority;
// Return true, if job has the highest priority (zero)
return !m_JobPriority;
}
} while (priority-- > 0);
// Whether affinities not from the ladder are allowed
if (m_GetJobImpl.m_API->m_AffinityPreference ==
CNetScheduleExecutor::eAnyJob) {
// Make it the least-priority
m_JobPriority = affinity_ladder.size();
} else {
// Should not happen
ERR_POST("Got a job " << job.job_id <<
" with unexpected affinity " << job.affinity);
m_JobPriority = numeric_limits<size_t>::max();
}
return false;
}
bool HasJob() const
{
return m_JobPriority < numeric_limits<size_t>::max();
}
private:
size_t m_JobPriority;
TTimeline& m_Timeline;
TIterator m_Iterator;
TImpl& m_GetJobImpl;
CNetScheduleJob m_PreviousJob;
};
template <class TImpl>
template <class TJobHolder>
CNetScheduleGetJob::EResult CNetScheduleGetJobImpl<TImpl>::GetJobImmediately(TJobHolder& holder)
{
TIterator i = holder.Begin();
for (;;) {
EState state = m_Impl.CheckState();
if (state == eStopped) {
holder.Interrupt();
return eInterrupt;
}
if (state == eRestarted) {
Restart();
i = holder.Begin();
continue;
}
// We must check i here to let state be checked before leaving loop
if (i == m_ImmediateActions.end()) {
return holder.HasJob() ? eJob : eAgain;
}
if (*i == m_DiscoveryAction) {
NextDiscoveryIteration();
i = holder.Begin();
continue;
}
// Whether to move to the next entry
// (false means we are already at the next entry due to splice/erase)
bool increment = false;
try {
// Get prioritized affinity list and
// a flag whether any affinity job is appropriate
const string& prio_aff_list = holder.Affinity();
const bool any_affinity = !holder.HasJob();
if (m_Impl.CheckEntry(*i, prio_aff_list, any_affinity,
holder.job, holder.job_status)) {
if (i == m_ImmediateActions.begin()) {
increment = true;
} else {
// We have got a more prioritized job from this server.
// Move this server to the top of immediate actions,
// so we will have servers ordered (most-to-least)
// by affinities of the jobs they have returned last
m_ImmediateActions.splice(m_ImmediateActions.begin(),
m_ImmediateActions, i);
}
// A job has been returned; keep the server in
// immediate actions because there can be more
// jobs in the queue.
if (holder.Done()) {
return eJob;
}
} else {
// No job has been returned by this server;
// query the server later.
i->deadline = CDeadline(m_Impl.m_Timeout, 0);
i->all_affinities_checked = any_affinity;
m_ScheduledActions.splice(m_ScheduledActions.end(),
m_ImmediateActions, i);
}
}
catch (CNetSrvConnException& e) {
// Because a connection error has occurred, do not
// put this server back to the timeline.
m_ImmediateActions.erase(i);
ERR_POST(Warning << e.GetMsg());
}
catch (...) {
m_ImmediateActions.erase(i);
if (holder.HasJob()) {
return eJob;
}
throw;
}
// Check all servers that have timeout expired
while (!m_ScheduledActions.empty() &&
m_ScheduledActions.front().deadline.GetRemainingTime().IsZero()) {
m_ImmediateActions.splice(m_ImmediateActions.end(),
m_ScheduledActions, m_ScheduledActions.begin());
}
// Check if there's a notification in the UDP socket.
while (CNetServer server = m_Impl.ReadNotifications()) {
MoveToImmediateActions(server);
}
i = holder.Next(increment);
}
}
template <class TImpl>
template <class TJobHolder>
CNetScheduleGetJob::EResult CNetScheduleGetJobImpl<TImpl>::GetJobImpl(
const CDeadline& deadline, TJobHolder& holder)
{
for (;;) {
EResult ret = GetJobImmediately(holder);
if (ret != eAgain) {
return ret;
}
auto entry_has_more_jobs = [&](const SEntry& entry) {
return m_Impl.MoreJobs(entry);
};
// If MoreJobs() returned false for all entries of m_ScheduledActions
if (find_if(m_ScheduledActions.begin(), m_ScheduledActions.end(),
entry_has_more_jobs) == m_ScheduledActions.end()) {
return eNoJobs;
}
if (deadline.IsExpired())
return eAgain;
// At least, the discovery action must be there
_ASSERT(!m_ScheduledActions.empty());
// There's still time. Wait for notifications and query the servers.
CDeadline next_event_time = m_ScheduledActions.front().deadline;
bool last_wait = deadline < next_event_time;
if (last_wait) next_event_time = deadline;
if (CNetServer server = m_Impl.WaitForNotifications(next_event_time)) {
do {
MoveToImmediateActions(server);
} while ((server = m_Impl.ReadNotifications()));
} else if (last_wait) {
return eAgain;
} else {
m_ImmediateActions.splice(m_ImmediateActions.end(),
m_ScheduledActions, m_ScheduledActions.begin());
}
}
}
inline void Filter(TTimeline& timeline, TServers& servers)
{
TTimeline::iterator i = timeline.begin();
while (i != timeline.end()) {
const SSocketAddress& address(i->server_address);
TServers::iterator j = find(servers.begin(), servers.end(), address);
// If this server is still valid
if (j != servers.end()) {
servers.erase(j);
++i;
} else {
timeline.erase(i++);
}
}
}
template <class TImpl>
void CNetScheduleGetJobImpl<TImpl>::Restart()
{
// Rediscover all servers
m_ImmediateActions.clear();
m_ScheduledActions.clear();
NextDiscoveryIteration();
}
template <class TImpl>
void CNetScheduleGetJobImpl<TImpl>::MoveToImmediateActions(SNetServerImpl* server_impl)
{
SEntry entry(server_impl->m_ServerInPool->m_Address);
TTimeline::iterator i = find(m_ScheduledActions.begin(),
m_ScheduledActions.end(), entry);
// Server was postponed, move to immediate
if (i != m_ScheduledActions.end()) {
m_ImmediateActions.splice(m_ImmediateActions.end(),
m_ScheduledActions, i);
return;
}
TTimeline::iterator j = find(m_ImmediateActions.begin(),
m_ImmediateActions.end(), entry);
// It's new server, add to immediate
if (j == m_ImmediateActions.end()) {
m_ImmediateActions.push_back(entry);
}
}
template <class TImpl>
void CNetScheduleGetJobImpl<TImpl>::NextDiscoveryIteration()
{
TServers servers;
for (CNetServiceIterator it =
m_Impl.m_API.GetService().Iterate(
CNetService::eIncludePenalized); it; ++it) {
servers.push_back((*it)->m_ServerInPool->m_Address);
}
// Keep up to date servers
Filter(m_ImmediateActions, servers);
Filter(m_ScheduledActions, servers);
// Add newly discovered servers
for (TServers::const_iterator i = servers.begin();
i != servers.end(); ++i) {
m_ImmediateActions.push_back(*i);
}
// Reschedule discovery after timeout
m_DiscoveryAction.deadline = CDeadline(m_Impl.m_Timeout, 0);
m_ScheduledActions.push_back(m_DiscoveryAction);
}
template <class TImpl>
void CNetScheduleGetJobImpl<TImpl>::ReturnNotFullyCheckedServers()
{
// Return back to immediate actions
// all servers that have not been checked for all possible affinities
TIterator i = m_ScheduledActions.begin();
while (i != m_ScheduledActions.end()) {
if (i->all_affinities_checked) {
++i;
} else {
m_ImmediateActions.splice(m_ImmediateActions.end(),
m_ScheduledActions, i++);
}
}
}
template class CNetScheduleGetJobImpl<CMainLoopThread::CImpl>;
template class CNetScheduleGetJobImpl<SNetScheduleJobReaderImpl::CImpl>;
END_NCBI_SCOPE
| 31.293478 | 96 | 0.601737 | mycolab |
7b0277669cf1498f84f4fd55b963de55dc6848df | 13,608 | cpp | C++ | AppForm.cpp | DarknessFX/UEPlugins_DisableDefault | dbaf4c83a8dbe1bb745bb00bd0a5f5a87c0b3271 | [
"MIT"
] | 1 | 2021-12-01T13:57:54.000Z | 2021-12-01T13:57:54.000Z | AppForm.cpp | DarknessFX/UEPlugins_DisableDefault | dbaf4c83a8dbe1bb745bb00bd0a5f5a87c0b3271 | [
"MIT"
] | null | null | null | AppForm.cpp | DarknessFX/UEPlugins_DisableDefault | dbaf4c83a8dbe1bb745bb00bd0a5f5a87c0b3271 | [
"MIT"
] | null | null | null | ///
/// Created by DarknessFX - https://dfx.lv - @DrkFX
/// Source Code at https://github.com/DarknessFX/UEPlugins_DisableDefault
///
#include <windows.h>
#include "AppForm.h"
using namespace System;
using namespace System::Windows::Forms;
using namespace System::IO;
using namespace Microsoft::Win32;
using namespace UEPluginsDisableDefault;
void AppForm::AppForm_Load ( System::Object^ sender, System::EventArgs^ e )
{
StateUpdate(AppState::Wait);
cmbUEFolder->Items->Clear();
dtbPlugins->Clear();
StatusUpdate("Finding default Unreal Engine folder in Registry.");
String^ UEDefaultPath = Registry::ClassesRoot->OpenSubKey("Unreal.ProjectFile\\shell\\open\\command")->GetValue("")->ToString();
StatusUpdate("Reading Unreal Engine folder path in Registry \\\\HKEY_LOCAL_MACHINE\\SOFTWARE\\EpicGames\\Unreal Engine");
RegistryKey^ UEKey = Registry::LocalMachine->OpenSubKey("SOFTWARE\\EpicGames\\Unreal Engine");
for each (String^ SubKey in UEKey->GetSubKeyNames())
{
RegistryKey^ BuildKey = UEKey->OpenSubKey(SubKey);
String^ UEPath = BuildKey->GetValue("InstalledDirectory")->ToString();
cmbUEFolder->Items->Add(UEPath);
if ( UEDefaultPath->Contains(UEPath) )
{
cmbUEFolder->SelectedIndex = cmbUEFolder->Items->Count - 1;
}
}
StatusUpdate("");
UpdateFlow();
StateUpdate(AppState::Default);
}
void AppForm::AppForm_SizeChanged ( System::Object^ sender, System::EventArgs^ e )
{
UpdateFlow();
}
void AppForm::UpdateFlow ( )
{
cmbUEFolder->Width = AppForm::ClientSize.Width - btnBrowse->Width - lblUEFolder->Width - 20;
}
void AppForm::StatusUpdate(String^ Message)
{
AppForm::lblStatus->Text = Message;
}
void AppForm::mnuShowAll_Click ( System::Object^ sender, System::EventArgs^ e )
{
if (mnuShowDefault->Checked)
{
mnuShowDefault->Checked = false;
}
btnMenu->Text = mnuShowAll->Text;
for ( int iRow = 0; iRow < AppForm::grdPlugins->Rows->Count; iRow++ )
{
AppForm::grdPlugins->Rows[iRow]->Visible = true;
}
}
void AppForm::mnuShowDefault_Click ( System::Object^ sender, System::EventArgs^ e )
{
if (mnuShowAll->Checked)
{
mnuShowAll->Checked = false;
}
btnMenu->Text = mnuShowDefault->Text;
for ( int iRow = 0; iRow < AppForm::grdPlugins->Rows->Count; iRow++ )
{
if ((bool)AppForm::grdPlugins->Rows[iRow]->Cells[0]->Value != true)
{
AppForm::grdPlugins->Rows[iRow]->Visible = false;
}
}
}
void AppForm::cmbUEFolder_SelectedIndexChanged ( System::Object^ sender, System::EventArgs^ e )
{
Searching(cmbUEFolder->SelectedItem->ToString());
}
void AppForm::btnBrowse_Click ( System::Object^ sender, System::EventArgs^ e )
{
::DialogResult result = dlgBrowse->ShowDialog();
if( result == ::DialogResult::OK )
{
if ( !cmbUEFolder->Items->Contains(dlgBrowse->SelectedPath) )
{
StatusUpdate(Append("Adding UE Folder Path: ", dlgBrowse->SelectedPath));
cmbUEFolder->Items->Add(dlgBrowse->SelectedPath);
cmbUEFolder->SelectedIndex = cmbUEFolder->Items->Count - 1;
}
else
{
StatusUpdate(Append("Path already added : ", dlgBrowse->SelectedPath));
}
}
}
void AppForm::btnMinimal_Click ( System::Object^ sender, System::EventArgs^ e )
{
array<String^>^ aMinimal = {"AISupport", "ContentBrowserAssetDataSource", "ContentBrowserClassDataSource", "CurveEditorTools", "TextureFormateOodle", "OodleData", "PluginBrowser", "PluginUtils", "PropertyAccessEditor"};
for ( int iRow = 0; iRow < AppForm::dtbPlugins->Rows->Count; iRow++ )
{
DataRow^ mDR = AppForm::dtbPlugins->Rows[iRow];
mDR["celEnabledByDefault"] = false;
for each (String^ sPlugin in aMinimal)
{
if (mDR["celName"]->ToString()->ToLower() == sPlugin->ToLower() )
{
mDR["celEnabledByDefault"] = true;
break;
}
}
}
}
void AppForm::btnSave_Click ( System::Object^ sender, System::EventArgs^ e )
{
StateUpdate(AppState::Wait);
AppForm::StatusUpdate("Saving .uplugin changes...");
ControlsStateChange(ControlsState::Wait);
String^ dirPlugin = Append(cmbUEFolder->SelectedItem->ToString(), "\\Engine\\Plugins\\");
String^ filePlugin = "";
int iMod = 0;
for each (DataRow^ mDRPlug in dtbPlugins->Rows)
{
for each (DataRow^ mDROrig in dtbPluginsOrig->Rows)
{
if (mDRPlug["celName"] == mDROrig["celName"])
{
if ((bool)mDRPlug["celEnabledByDefault"] != (bool)mDROrig["celEnabledByDefault"])
{
filePlugin = Append(dirPlugin, mDRPlug["celPath"]->ToString());
File::Move(filePlugin, Append(filePlugin, "_UEPlugins_DisableDefault"));
Application::DoEvents();
FileStream^ origStream = gcnew FileStream(Append(filePlugin, "_UEPlugins_DisableDefault"), FileMode::Open);
StreamReader^ origReader = gcnew StreamReader(origStream);
FileStream^ newStream = gcnew FileStream(filePlugin, FileMode::CreateNew);
StreamWriter^ newWriter = gcnew StreamWriter(newStream);
while ( !origReader->EndOfStream )
{
String^ line = origReader->ReadLine();
if ( line->Contains("EnabledByDefault") )
{
line = line->Replace(mDROrig["celEnabledByDefault"]->ToString()->ToLower(), mDRPlug["celEnabledByDefault"]->ToString()->ToLower());
}
newWriter->WriteLine(line);
}
origStream->Close();
origStream->Close();
newWriter->Close();
newStream->Close();
File::Delete(Append(filePlugin, "_UEPlugins_DisableDefault"));
iMod++;
mDROrig->Delete();
dtbPluginsOrig->AcceptChanges();
}
break;
}
}
}
dtbPluginsOrig = dtbPlugins->Copy();
ControlsStateChange(ControlsState::Default);
StatusUpdate(Append(iMod.ToString(), " plugins changed."));
StateUpdate(AppState::Default);
}
void AppForm::grdPlugins_CurrentCellDirtyStateChanged ( System::Object^ sender, System::EventArgs^ e )
{
if ( grdPlugins->IsCurrentCellDirty )
{
grdPlugins->CommitEdit(DataGridViewDataErrorContexts::Commit);
grdPlugins->EndEdit();
dtbPlugins->AcceptChanges();
}
}
void StateUpdate(AppState State)
{
switch ( State ) {
case AppState::Default:
Application::UseWaitCursor = false;
break;
case AppState::Wait:
Application::UseWaitCursor = true;
break;
}
}
String^ Append(String^ str0, String^ str1)
{
return str0->Insert(str0->Length, str1);
}
String^ ReplaceSlashes(String^ Path)
{
Path = Path->Replace("\\\\", "\\");
Path = Path->Replace("\\", "/");
return Path;
}
void Searching(String^ Path)
{
StateUpdate(AppState::Wait);
AppForm::StatusUpdate("Searching UPlugins ...");
ControlsStateChange(ControlsState::Wait);
AppForm::dtbPlugins->Clear();
AppForm::dtbPluginsOrig->Clear();
AppForm::grdPlugins->DataSource = nullptr;
if ( !Path->Contains("Plugins") )
{
Path = Append(Path, "\\Engine\\Plugins");
}
//for each (String^ dirCategory in Directory::EnumerateDirectories(Path) )
//{
FindUPlugin(Path);
//}
AppForm::dtbPlugins->DefaultView->Sort = "celEnabledByDefault DESC, celFriendlyName ASC, celCategory ASC";
AppForm::dtbPlugins->AcceptChanges();
AppForm::dtbPluginsOrig = AppForm::dtbPlugins->Copy();
AppForm::dtbPluginsOrig->DefaultView->Sort = "celEnabledByDefault DESC, celFriendlyName ASC, celCategory ASC";
AppForm::grdPlugins->DataSource = AppForm::dtbPlugins;
AppForm::grdPlugins->CurrentCell = AppForm::grdPlugins[0,0];
ControlsStateChange(ControlsState::Default);
AppForm::StatusUpdate(Append(Append(Append(CountEnabledByDefault().ToString(), " plugins Enabled By Default. " ), AppForm::dtbPlugins->Rows->Count.ToString()), " plugins total."));
StateUpdate(AppState::Default);
}
void FindUPlugin(String^ Path)
{
for each (String^ dirPlugin in Directory::EnumerateDirectories(Path) )
{
if ( IsIgnoredFolder(dirPlugin) )
{
continue;
}
AppForm::StatusUpdate(Append("Searching UPlugins :", dirPlugin));
Application::DoEvents();
for each (String^ mFile in Directory::GetFiles( dirPlugin, "*.uplugin" ) )
{
AddUPlugin(mFile);
break;
}
if ( Directory::GetDirectories(dirPlugin)->Length != 0 )
{
FindUPlugin(dirPlugin);
}
}
}
void AddUPlugin(String^ FileUPlugin)
{
DataRow^ mDR = AppForm::dtbPlugins->NewRow();
ReadUPlugin(FileUPlugin, mDR);
AppForm::dtbPlugins->Rows->Add(mDR);
}
void ReadUPlugin(String^ FileUPlugin, DataRow^& mDataRow)
{
FileStream^ filestream = gcnew FileStream(FileUPlugin, FileMode::Open);
StreamReader^ reader = gcnew StreamReader(filestream);
mDataRow["celName"] = Path::GetFileNameWithoutExtension(FileUPlugin);
mDataRow["celPath"] = FileUPlugin->Substring(FileUPlugin->IndexOf("plugins", StringComparison::CurrentCultureIgnoreCase) + 8);
if ( File::Exists(Append(Path::GetDirectoryName(FileUPlugin), "\\Resources\\icon128.png")) )
{
mDataRow["celIcon"] = Drawing::Image::FromFile( Append(Path::GetDirectoryName(FileUPlugin), "\\Resources\\icon128.png") );
}
else
{
mDataRow["celIcon"] = Drawing::Image::FromFile( Append(AppForm::cmbUEFolder->SelectedItem->ToString(), "\\Engine\\Plugins\\Editor\\PluginBrowser\\Resources\\DefaultIcon128.png") );
}
while ( !reader->EndOfStream )
{
String^ line = reader->ReadLine();
if ( line->Contains("VersionName") )
{
GetJSONValue(line);
mDataRow["celVersionName"] = line;
continue;
}
if ( line->Contains("EnabledByDefault") )
{
GetJSONValue(line);
mDataRow["celEnabledByDefault"] = line;
continue;
}
if ( line->Contains("FriendlyName") )
{
GetJSONValue(line);
mDataRow["celFriendlyName"] = line;
continue;
}
if ( line->Contains("Description") )
{
GetJSONValue(line);
mDataRow["celDescription"] = line;
continue;
}
if ( line->Contains("Category") )
{
GetJSONValue(line);
mDataRow["celCategory"] = line;
continue;
}
}
reader->Close();
filestream->Close();
};
void GetJSONValue(String^& str0)
{
if ( str0->Contains(": \"") )
{
str0 = str0->Substring(str0->IndexOf(": \"") + 3);
str0 = str0->Substring(0, str0->Length - 2);
}
else
{
str0 = str0->Substring(str0->IndexOf(": ") + 2);
str0 = str0->Substring(0, str0->Length - 1);
}
}
int CountEnabledByDefault()
{
int iCounted = 0;
for each ( DataRow^ mDR in AppForm::dtbPlugins->Rows )
{
if ((bool)mDR["celEnabledByDefault"] == true)
{
iCounted++;
}
}
return iCounted;
}
bool IsIgnoredFolder(String^ Path)
{
bool bIsIgnoreFolder = false;
array<String^>^ aIgnoreFolders = {"Binaries", "Config", "Content", "Docs", "Intermediate", "Library", "Private", "Public", "Resources", "SDK", "SDKs", "Shaders", "Source", "SourceArt", "ThirdParty"};
for each (String^ sIgnoreFolder in aIgnoreFolders)
{
if (sIgnoreFolder->ToLower() == Path->Substring(System::IO::Path::GetDirectoryName(Path)->Length + 1)->ToLower() )
{
bIsIgnoreFolder = true;
break;
}
}
return bIsIgnoreFolder;
}
void ControlsStateChange(ControlsState State)
{
bool bState = (bool)State;
AppForm::cmbUEFolder->Enabled = bState;
AppForm::btnBrowse->Enabled = bState;
AppForm::btnMenu->Enabled = bState;
AppForm::btnMinimal->Enabled = bState;
AppForm::btnSave->Enabled = bState;
AppForm::grdPlugins->Enabled = bState;
switch ( bState ) {
case false:
AppForm::btnSave->BackColor = Color::Empty;
break;
case true:
AppForm::btnSave->BackColor = Color::LightGreen;
break;
}
} | 33.935162 | 378 | 0.568122 | DarknessFX |
7b039b471b189e5416aa8eebf51d95d078696b02 | 8,365 | cpp | C++ | LifeBrush/Source/LifeBrush/VRInterface/CollectionSpace.cpp | timdecode/LifeBrush | dbc65bcc0ec77f9168e08cf7b39539af94420725 | [
"MIT"
] | 33 | 2019-04-23T23:00:09.000Z | 2021-11-09T11:44:09.000Z | LifeBrush/Source/LifeBrush/VRInterface/CollectionSpace.cpp | MyelinsheathXD/LifeBrush | dbc65bcc0ec77f9168e08cf7b39539af94420725 | [
"MIT"
] | 1 | 2019-10-09T15:57:56.000Z | 2020-03-05T20:01:01.000Z | LifeBrush/Source/LifeBrush/VRInterface/CollectionSpace.cpp | MyelinsheathXD/LifeBrush | dbc65bcc0ec77f9168e08cf7b39539af94420725 | [
"MIT"
] | 6 | 2019-04-25T00:10:55.000Z | 2021-04-12T05:16:28.000Z | // Copyright 2018, Timothy Davison. All rights reserved.
#include "LifeBrush.h"
#include <cmath>
#include "CollectionSpace.h"
#include <../Plugins/Experimental/AlembicImporter/Source/ThirdParty/Alembic/Deploy/include/OpenEXR/halfLimits.h>
UInteractionSpace::UInteractionSpace()
{
// Set this component to be initialized when the game starts, and to be ticked every frame. You can turn these features
// off to improve performance if you don't need them.
PrimaryComponentTick.bCanEverTick = true;
// ...
}
UCollectionSpace::UCollectionSpace()
{
PrimaryComponentTick.bCanEverTick = true;
bAutoActivate = true;
bTickInEditor = false;
}
void UCollectionSpace::BeginPlay()
{
Super::BeginPlay();
_boundsMesh = NewObject<UStaticMeshComponent>(this);
_boundsMesh->AttachToComponent(this, FAttachmentTransformRules::KeepRelativeTransform);
_boundsMesh->SetStaticMesh(backgroundMesh);
_boundsMesh->SetMaterial(0, backgroundMaterial);
_boundsMesh->SetCollisionEnabled(ECollisionEnabled::NoCollision);
_boundsMesh->RegisterComponent();
}
// Called every frame
void UCollectionSpace::TickComponent(float DeltaTime, ELevelTick TickType, FActorComponentTickFunction* ThisTickFunction)
{
Super::TickComponent(DeltaTime, TickType, ThisTickFunction);
_velocity -= _velocity * damping * DeltaTime;
// apply velocity
this->AddRelativeLocation(_velocity * DeltaTime, false, nullptr, ETeleportType::TeleportPhysics);
FVector location = GetRelativeTransform().GetLocation();
FBox boundsInRoot = _bounds.TransformBy(GetRelativeTransform());
const float halfWidth = width * 0.5f;
if( boundsInRoot.Min[Forward] > -halfWidth )
{
_velocity = FVector::ZeroVector;
location[Forward] += (-halfWidth - boundsInRoot.Min[Forward]);
this->SetRelativeLocation(location, false, nullptr, ETeleportType::TeleportPhysics);
}
else if( boundsInRoot.Max[Forward] < halfWidth )
{
_velocity = FVector::ZeroVector;
location[Forward] += (halfWidth - boundsInRoot.Max[Forward]);
this->SetRelativeLocation(location, false, nullptr, ETeleportType::TeleportPhysics);
}
}
void UCollectionSpace::insertItemsAt(TArray<uint32> indices)
{
}
void UCollectionSpace::reloadData()
{
_cells.Empty();
if(!dataSource->Implements<UCollectionSpaceDataSource>())
return;
int32 n = ICollectionSpaceDataSource::Execute_count(dataSource);
for (int32 i = 0; i < n; ++i)
{
UPrimitiveComponent * cell = ICollectionSpaceDataSource::Execute_primitiveCellAt(dataSource, i);
_cells.Add(cell);
}
_layout();
}
FVector UCollectionSpace::nearest(UPrimitiveComponent * interactionPoint)
{
FVector localPoint = this->GetComponentTransform().InverseTransformPosition(interactionPoint->GetComponentLocation());
FVector nearestPoint = FVector::ZeroVector;
float nearestDistanceSqrd = std::numeric_limits<float>::max();
auto updateNearest = [&](FBox& bounds) {
if (bounds.IsInside(localPoint))
{
FVector point = bounds.GetClosestPointTo(localPoint);
float distSqrd = FVector::DistSquared(localPoint, point);
if (distSqrd < nearestDistanceSqrd)
{
nearestDistanceSqrd = distSqrd;
nearestPoint = point;
}
}
};
// check if we overlap one of the items
for (int i = 0; i < _localBounds.Num(); ++i)
{
FBox& bounds = _localBounds[i];
updateNearest(bounds);
}
{
FTransform toLocal = this->GetComponentTransform().Inverse();
FBox bounds = _boundsMesh->Bounds.GetBox().TransformBy(toLocal);
updateNearest(bounds);
}
return GetComponentTransform().TransformPosition(nearestPoint);
}
void UCollectionSpace::begin_oneHand(UPrimitiveComponent * interactionPoint)
{
FVector localPoint = this->GetComponentTransform().InverseTransformPosition(interactionPoint->GetComponentLocation());
if (_bounds.IsInside(localPoint))
_interactionMode = InteractionMode::Pan;
else
_interactionMode = InteractionMode::None;
// check if we overlap one of the items
for (int i = 0; i < _localBounds.Num(); ++i)
{
FBox& bounds = _localBounds[i];
if (bounds.IsInside(localPoint) && delegate)
{
ICollectionSpaceDelegate::Execute_didGrab(delegate, i, interactionPoint->GetComponentTransform(), _cells[i], _cells[i]->GetComponentTransform(), bounds);
_interactionMode = InteractionMode::Grab;
return;
}
}
}
void UCollectionSpace::update_oneHand(float dt, UPrimitiveComponent * interactionPoint, FTransform lastTransform)
{
if (_interactionMode == InteractionMode::Pan)
{
_update_pan(dt, interactionPoint, lastTransform);
}
}
void UCollectionSpace::_update_pan(float dt, UPrimitiveComponent * interactionPoint, FTransform lastTransform)
{
FVector localPoint = this->GetComponentTransform().InverseTransformPosition(interactionPoint->GetComponentLocation());
if (_bounds.IsInside(localPoint))
{
FTransform parentTransform = this->GetAttachParent()->GetComponentTransform();
FVector pointInParent = parentTransform.InverseTransformPosition(interactionPoint->GetComponentLocation());
FVector lastInParent = parentTransform.InverseTransformPosition(lastTransform.GetLocation());
FVector delta = pointInParent - lastInParent;
// only move in forward direction
delta.X = 0.0f;
delta.Z = 0.0f;
_velocity = delta / dt;
}
}
void UCollectionSpace::end_oneHand(UPrimitiveComponent * interactionPoint)
{
}
void UCollectionSpace::grab(UPrimitiveComponent * interactionPoint)
{
}
void UCollectionSpace::query(UPrimitiveComponent * interactionPoint)
{
}
TSet<int32> UCollectionSpace::selection()
{
return _selection;
}
void UCollectionSpace::_layout()
{
_layoutCells();
_layoutBackground();
}
void UCollectionSpace::_layoutCells()
{
const float totalWidth = _totalWidth();
const float stepSize = _stepSize();
FVector _startP = FVector::ZeroVector;
_startP[Forward] = -totalWidth / 2.0f;
_startP.Z = -(cellExtents * rows + cellSpacing * (rows - 1)) * .5f;
const FVector startP = _startP;
_bounds = FBox(EForceInit::ForceInitToZero);
int n = _cells.Num();
_localBounds.SetNumZeroed(_cells.Num(), true);
FTransform toComponent = this->GetComponentTransform().Inverse();
const int32 nColumns = std::ceil(float(n) / float(rows));
for (int32 i = 0; i < n; ++i)
{
UPrimitiveComponent * cell = _cells[i];
// cell->SetCollisionEnabled(ECollisionEnabled::QueryOnly);
checkf(cell->GetAttachParent() == this, TEXT("The AttachParent of the cell must be the UCollectionSpace"));
const float scale = _scaleForCell(*cell);
const FVector offset = _offsetForCell(toComponent, *cell);
FVector p = startP;;
{
const int32 rowIndex = std::floor(i / nColumns);
const int32 colIndex = i % nColumns;
p[Forward] += stepSize * colIndex;
p.Z += stepSize * rowIndex;
}
FTransform transform(FQuat::Identity, p + offset, FVector(scale));
cell->SetRelativeTransform(transform, false, nullptr, ETeleportType::TeleportPhysics);
//p[Forward] += stepSize;
FBoxSphereBounds localBounds = cell->Bounds.TransformBy(toComponent);
_localBounds[i] = localBounds.GetBox();
_bounds += localBounds.GetBox();
}
_bounds.Min.Z -= cellSpacing;
_bounds.Max.Z += cellSpacing;
_bounds.Min.Z -= cellSpacing;
_bounds.Max.Z += cellSpacing;
}
void UCollectionSpace::_layoutBackground()
{
FBoxSphereBounds bounds = _boundsMesh->GetStaticMesh()->GetBounds();
const float yScale = _bounds.GetSize()[Forward] / bounds.GetBox().GetSize()[Forward];
const float zScale = _bounds.GetSize().Z / bounds.GetBox().GetSize().Z;
FVector location = _bounds.GetCenter();
location.X = _bounds.Min.X;
_boundsMesh->SetRelativeLocation(location);
FVector scaleVector(0.02f, yScale, zScale);
_boundsMesh->SetRelativeScale3D(scaleVector);
}
float UCollectionSpace::_totalWidth()
{
return (float(_cells.Num()) * cellExtents) - (float(_cells.Num() - 1) * cellSpacing);
}
float UCollectionSpace::_stepSize()
{
return cellExtents + cellSpacing;
}
float UCollectionSpace::_scaleForCell(UPrimitiveComponent& component)
{
return (cellExtents * 0.5f) / component.Bounds.SphereRadius;
}
FVector UCollectionSpace::_offsetForCell(const FTransform& toComponent, UPrimitiveComponent& component)
{
FBoxSphereBounds bounds = component.Bounds;
// get it into the coordinate frame of this
bounds = bounds.TransformBy(toComponent);
FVector offset = bounds.Origin;
offset[Forward] = -bounds.GetBox().Max[Forward];
return -offset * _scaleForCell(component);
}
| 24.895833 | 157 | 0.749671 | timdecode |
7b0885fff9dc7b16718e7cdc3d7d0b364df2b9b0 | 861 | cpp | C++ | unit_tests/test_flush.cpp | brridder/nanoarq | 5aa9e5d9dda002f569aa70dcec0eded67404c44d | [
"Unlicense"
] | 1 | 2021-12-07T18:33:20.000Z | 2021-12-07T18:33:20.000Z | unit_tests/test_flush.cpp | skyformat99/nanoarq | 167425b0a073c4e41f4d32eafc21cb1a71b186e3 | [
"Unlicense"
] | null | null | null | unit_tests/test_flush.cpp | skyformat99/nanoarq | 167425b0a073c4e41f4d32eafc21cb1a71b186e3 | [
"Unlicense"
] | null | null | null | #include "arq_in_unit_tests.h"
#include "arq_runtime_mock_plugin.h"
#include <CppUTest/TestHarness.h>
#include <CppUTestExt/MockSupport.h>
#include <cstring>
TEST_GROUP(flush) {};
namespace {
TEST(flush, invalid_params)
{
CHECK_EQUAL(ARQ_ERR_INVALID_PARAM, arq_flush(nullptr));
}
void MockSendWndFlush(arq__send_wnd_t *sw)
{
mock().actualCall("arq__send_wnd_flush").withParameter("sw", sw);
}
TEST(flush, forwards_send_window_to_send_wnd_flush)
{
arq_t arq;
ARQ_MOCK_HOOK(arq__send_wnd_flush, MockSendWndFlush);
mock().expectOneCall("arq__send_wnd_flush").withParameter("sw", &arq.send_wnd);
arq_flush(&arq);
}
TEST(flush, returns_success)
{
arq_t arq;
ARQ_MOCK_HOOK(arq__send_wnd_flush, MockSendWndFlush);
mock().ignoreOtherCalls();
arq_err_t const e = arq_flush(&arq);
CHECK_EQUAL(ARQ_OK_COMPLETED, e);
}
}
| 21.525 | 83 | 0.744483 | brridder |
7b0bb8a60fa230961fe485904d1fc910c3f6be90 | 333 | cpp | C++ | C++/bulb-switcher-iv.cpp | Akhil-Kashyap/LeetCode-Solutions | c671a588f96f4e4bbde4512727322ff9b1c8ae6a | [
"MIT"
] | 3,269 | 2018-10-12T01:29:40.000Z | 2022-03-31T17:58:41.000Z | C++/bulb-switcher-iv.cpp | Akhil-Kashyap/LeetCode-Solutions | c671a588f96f4e4bbde4512727322ff9b1c8ae6a | [
"MIT"
] | 53 | 2018-12-16T22:54:20.000Z | 2022-02-25T08:31:20.000Z | C++/bulb-switcher-iv.cpp | Akhil-Kashyap/LeetCode-Solutions | c671a588f96f4e4bbde4512727322ff9b1c8ae6a | [
"MIT"
] | 1,236 | 2018-10-12T02:51:40.000Z | 2022-03-30T13:30:37.000Z | // Time: O(n)
// Space: O(1)
class Solution {
public:
int minFlips(string target) {
int result = 0;
char curr = '0';
for (const auto& c : target) {
if (c == curr) {
continue;
}
curr = c;
++result;
}
return result;
}
};
| 17.526316 | 38 | 0.393393 | Akhil-Kashyap |
7b0c268235e24726c5e4271e75d91c71fe37a633 | 1,691 | cpp | C++ | libs/modelmd3/impl/src/model/md3/impl/read_string.cpp | cpreh/spacegameengine | 313a1c34160b42a5135f8223ffaa3a31bc075a01 | [
"BSL-1.0"
] | 2 | 2016-01-27T13:18:14.000Z | 2018-05-11T01:11:32.000Z | libs/modelmd3/impl/src/model/md3/impl/read_string.cpp | cpreh/spacegameengine | 313a1c34160b42a5135f8223ffaa3a31bc075a01 | [
"BSL-1.0"
] | null | null | null | libs/modelmd3/impl/src/model/md3/impl/read_string.cpp | cpreh/spacegameengine | 313a1c34160b42a5135f8223ffaa3a31bc075a01 | [
"BSL-1.0"
] | 3 | 2018-05-11T01:11:34.000Z | 2021-04-24T19:47:45.000Z | // Copyright Carl Philipp Reh 2006 - 2019.
// Distributed under the Boost Software License, Version 1.0.
// (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
#include <sge/model/md3/exception.hpp>
#include <sge/model/md3/string.hpp>
#include <sge/model/md3/impl/max_qpath.hpp>
#include <sge/model/md3/impl/read_string.hpp>
#include <fcppt/no_init.hpp>
#include <fcppt/text.hpp>
#include <fcppt/array/object_impl.hpp>
#include <fcppt/cast/size.hpp>
#include <fcppt/cast/to_char_ptr.hpp>
#include <fcppt/cast/to_signed.hpp>
#include <fcppt/config/external_begin.hpp>
#include <algorithm>
#include <ios>
#include <istream>
#include <fcppt/config/external_end.hpp>
template <sge::model::md3::string::size_type Max>
sge::model::md3::string sge::model::md3::impl::read_string(std::istream &_stream)
{
fcppt::array::object<sge::model::md3::string::value_type, Max> tmp_name{fcppt::no_init{}};
if (!_stream.read(
fcppt::cast::to_char_ptr<char *>(tmp_name.data()),
fcppt::cast::size<std::streamsize>(fcppt::cast::to_signed(tmp_name.size()))))
{
throw sge::model::md3::exception(FCPPT_TEXT("Reading a string failed"));
}
if (!std::count(tmp_name.begin(), tmp_name.end(), 0))
{
throw sge::model::md3::exception(FCPPT_TEXT("String in md3 file not ended with a 0!"));
}
return sge::model::md3::string{tmp_name.data()};
}
#define SGE_MODEL_MD3_INSTANTIATE_READ_STRING(maxc) \
template sge::model::md3::string sge::model::md3::impl::read_string<maxc>(std::istream &)
SGE_MODEL_MD3_INSTANTIATE_READ_STRING(sge::model::md3::impl::max_qpath::value);
SGE_MODEL_MD3_INSTANTIATE_READ_STRING(16);
| 35.229167 | 92 | 0.716144 | cpreh |
7b0df066af04530745ec08a92576b72a336a1dee | 5,864 | cpp | C++ | oja_unsupervised/main.cpp | Shimmen/NeuralNetworkStuff | f9058a7ce8aa9d1929083e974568795245304159 | [
"MIT"
] | null | null | null | oja_unsupervised/main.cpp | Shimmen/NeuralNetworkStuff | f9058a7ce8aa9d1929083e974568795245304159 | [
"MIT"
] | null | null | null | oja_unsupervised/main.cpp | Shimmen/NeuralNetworkStuff | f9058a7ce8aa9d1929083e974568795245304159 | [
"MIT"
] | null | null | null | #include <iostream>
#include <fstream>
#include <random>
#define WITHOUT_NUMPY
#include <matplotlib-cpp/matplotlibcpp.h>
namespace plt = matplotlibcpp;
///////////////////////////////////////////////
// Data reading, parsing, preprocessing
struct Pattern
{
double x;
double y;
Pattern(double x, double y) : x(x), y(y) {}
};
std::vector<Pattern>
read_and_parse_data(const std::string& file_name)
{
std::vector<Pattern> patterns;
std::ifstream ifs(file_name);
double x;
double y;
while (ifs >> x >> y) {
patterns.emplace_back(x, y);
}
return patterns;
}
void
normalize_training_data_mean(std::vector<Pattern>& training_data)
{
double length = training_data.size();
double x_sum = 0.0;
double y_sum = 0.0;
for (auto &pattern : training_data) {
x_sum += pattern.x;
y_sum += pattern.y;
}
double x_mean = x_sum / length;
double y_mean = y_sum / length;
for (size_t i = 0; i < training_data.size(); ++i) {
training_data[i].x -= x_mean;
training_data[i].y -= y_mean;
}
}
///////////////////////////////////////////////
// Plotting
void
plot_weight_and_training_data(const double weights[2], const std::vector<Pattern>& training_data)
{
// Plot input data
size_t num_inputs = training_data.size();
std::vector<double> input_x(num_inputs);
std::vector<double> input_y(num_inputs);
for (size_t i = 0; i < num_inputs; ++i) {
auto& point = training_data[i];
input_x[i] = point.x;
input_y[i] = point.y;
}
plt::named_plot("Input patterns", input_x, input_y, "rx");
// Plot network weight (hopefully the principle direction)
plt::named_plot("Weights", { 0.0, weights[0] }, { 0.0, weights[1] }, "b-");
plt::plot({ weights[0] }, { weights[1] }, "bo");
plt::grid(true);
plt::legend();
plt::xlabel("x1");
plt::ylabel("x2");
}
///////////////////////////////////////////////
// Test procedure
void train_network(double weights[2], const Pattern& pattern, const double learning_rate)
{
// Oja's rule
double target = pattern.x * weights[0] + pattern.y * weights[1];
double delta_x = learning_rate * target * (pattern.x - target * weights[0]);
double delta_y = learning_rate * target * (pattern.y - target * weights[1]);
weights[0] += delta_x;
weights[1] += delta_y;
}
int
main()
{
clock_t start_time = std::clock();
std::default_random_engine rng;
rng.seed(static_cast<unsigned int>(start_time));
const double LEARNING_RATE = 0.001;
const size_t NUM_UPDATE_STEPS = 20000;
// Load training data
std::vector<Pattern> training_data = read_and_parse_data("oja_unsupervised/data.txt");
std::uniform_int_distribution<size_t> random_training_data_sample_index(0, training_data.size() - 1);
std::uniform_real_distribution<double> random_double(-1.0, 1.0);
// Set up for plotting
std::vector<double> weight_magnitude_key(NUM_UPDATE_STEPS);
std::vector<double> weight_magnitude_val(NUM_UPDATE_STEPS);
plt::plot();
//
// Train with un-normalized data
//
{
// Set up network
double weights[2] = { random_double(rng), random_double(rng) };
for (size_t step = 0; step < NUM_UPDATE_STEPS; ++step) {
// Perform one training step
size_t i = random_training_data_sample_index(rng);
train_network(weights, training_data[i], LEARNING_RATE);
// Record weight magnitude
weight_magnitude_key[step] = step;
weight_magnitude_val[step] = std::sqrt(weights[0] * weights[0] + weights[1] * weights[1]);
}
// Plot weight magnitude/modulus in panel 1
plt::subplot(2, 2, 1);
plt::title("Weight magnitude over time");
plt::named_plot("Weight magnitude", weight_magnitude_key, weight_magnitude_val, "r-");
plt::ylim(0.0, 1.25);
plt::grid(true);
plt::legend();
// Plot input data and weights
plt::subplot(2, 2, 3);
plt::title("Input patterns & network weight");
plot_weight_and_training_data(weights, training_data);
plt::xlim(-2.0, 13.0);
plt::ylim(-1.0, 4.0);
plt::grid(true);
plt::legend();
}
//
// Train with normalized data
//
{
// Set up network
double weights[2] = { random_double(rng), random_double(rng) };
// This time, normalize the training data!
normalize_training_data_mean(training_data);
for (size_t step = 0; step < NUM_UPDATE_STEPS; ++step) {
// Perform one training step
size_t i = random_training_data_sample_index(rng);
train_network(weights, training_data[i], LEARNING_RATE);
// Record weight magnitude
weight_magnitude_key[step] = step;
weight_magnitude_val[step] = std::sqrt(weights[0] * weights[0] + weights[1] * weights[1]);
}
// Plot weight magnitude/modulus in panel 2
plt::subplot(2, 2, 2);
plt::title("Weight magnitude over time (data with zero mean)");
plt::named_plot("Weight magnitude", weight_magnitude_key, weight_magnitude_val, "r-");
plt::ylim(0.0, 1.25);
plt::grid(true);
plt::legend();
// Plot input data and weights
plt::subplot(2, 2, 4);
plt::title("Input patterns & network weight (data with zero mean)");
plot_weight_and_training_data(weights, training_data);
plt::grid(true);
plt::legend();
}
clock_t end_time = std::clock();
double time_elapsed_s = static_cast<double>(end_time - start_time) / CLOCKS_PER_SEC;
std::cout << "Time elapsed: " << time_elapsed_s << " s" << std::endl;
// Blocks, so perform timing before this call
plt::show();
return 0;
}
| 28.8867 | 105 | 0.604707 | Shimmen |
7b0f314c43ab2b27cff9d41d0ea5fd2ebdd5bb76 | 1,048 | hpp | C++ | src/bridge/jni/NativeReference.hpp | codegitpro/qt-app | d8cdc29156c324d174362ac971d11b7989483395 | [
"Libpng",
"Zlib"
] | null | null | null | src/bridge/jni/NativeReference.hpp | codegitpro/qt-app | d8cdc29156c324d174362ac971d11b7989483395 | [
"Libpng",
"Zlib"
] | null | null | null | src/bridge/jni/NativeReference.hpp | codegitpro/qt-app | d8cdc29156c324d174362ac971d11b7989483395 | [
"Libpng",
"Zlib"
] | null | null | null | // AUTOGENERATED FILE - DO NOT MODIFY!
// This file generated by Djinni from form2.djinni
#pragma once
#include "djinni_support.hpp"
#include "reference.hpp"
namespace djinni_generated {
class NativeReference final {
public:
using CppType = ::ai::Reference;
using JniType = jobject;
using Boxed = NativeReference;
~NativeReference();
static CppType toCpp(JNIEnv* jniEnv, JniType j);
static ::djinni::LocalRef<JniType> fromCpp(JNIEnv* jniEnv, const CppType& c);
private:
NativeReference();
friend ::djinni::JniClass<NativeReference>;
const ::djinni::GlobalRef<jclass> clazz { ::djinni::jniFindClass("net/ai/Reference") };
const jmethodID jconstructor { ::djinni::jniGetMethodID(clazz.get(), "<init>", "(Ljava/lang/String;Ljava/util/ArrayList;)V") };
const jfieldID field_mName { ::djinni::jniGetFieldID(clazz.get(), "mName", "Ljava/lang/String;") };
const jfieldID field_mItems { ::djinni::jniGetFieldID(clazz.get(), "mItems", "Ljava/util/ArrayList;") };
};
} // namespace djinni_generated
| 30.823529 | 131 | 0.706107 | codegitpro |
7b16248afcbad5b4b9806804ed397b811651c375 | 9,742 | cpp | C++ | source/folders/camera/camera.cpp | jacoblammert/programmiersprachen-raytracer-cpp | d40ead374762094ee67d04c487764196f94748d6 | [
"MIT"
] | null | null | null | source/folders/camera/camera.cpp | jacoblammert/programmiersprachen-raytracer-cpp | d40ead374762094ee67d04c487764196f94748d6 | [
"MIT"
] | null | null | null | source/folders/camera/camera.cpp | jacoblammert/programmiersprachen-raytracer-cpp | d40ead374762094ee67d04c487764196f94748d6 | [
"MIT"
] | null | null | null | #include "camera.hpp"
/**
Camera constructor with angle/ fov
@param name of the camera
@param fov field of view in degrees
*/
Camera::Camera (std::string const& name, float fov_x) :
name_ {name},
position_ {0.0f, 0.0f, 0.0f},
direction_ {0.0000001f, 0.00000001f, -1.0f},
width_ {1},
height_ {1}, //TODO Berechnen durch fov in y Richtung?
distance_ {fov_x},
fov_x_ {fov_x}
{
distance_ *= MATH_PI / 360.0f;
distance_ = (float) (0.5f / std::tan(distance_)); // from presentation
}
/**
Camera constructor with angle/ fov + position + direction + up-vector of camera
@param name of the camera
@param fov field of view in degrees
@param eye position of the camera as vec3
@param direction of the camera as vec3
@param up up-vector of the camera as vec3
*/
Camera::Camera (std::string const& name, float fov_x, glm::vec3 const& eye, glm::vec3 const& direction, glm::vec3 const& up) :
name_ {name},
position_ {eye},
direction_ {direction},
width_{1},
height_{1}, //TODO Berechnen durch fov in y Richtung?
distance_{fov_x},
up_vector_ {up},
fov_x_ {fov_x}
{
distance_ *= MATH_PI / 360.0f;
distance_ = (float) (0.5f / std::tan(distance_)); // from presentation
}
/**
* Camera constructor without angle / fov
* @param position of the camera as vec3
* @param direction of the camera as vec3
* @param width of the screen in pixel/ int
* @param height of the screen in pixel/ int
* @param distance of the cameraplane from the cameraorigin to the cameraplane
*/
Camera::Camera (glm::vec3 const &position, glm::vec3 const &direction, int width, int height, float distance) :
position_ {position},
direction_ {direction},
width_ {width},
height_ {height},
distance_ {distance}
{
this->direction_ = glm::normalize(this->direction_);
}
/**
*
* Camera constructor with fov - looks at 0, 0, -1
* @param position of the camera as vec3
* @param direction of the camera as vec3
* @param width of the screen in pixel/ int
* @param height of the screen in pixel/ int
* @param fov field of view in degrees
*/
Camera::Camera (glm::vec3 const &position, int width, int height, float fov_x) :
position_ {position},
direction_ {0.0f, 0.0f, -1.0f},
width_ {width},
height_ {height},
distance_ {fov_x}
{
distance_ *= MATH_PI / 360.0f;
distance_ = (float) (0.5f / std::tan(distance_)); // from presentation
}
/**
* Generates a ray from the cameras position onto the camera plane. If the fov should be changed, the distance of the camera plane needs to change.
* The x and y positions are in pixelspace, like the width and the height of the camera
*
* @param x pos of the Pixel on the screen 0 to width
* @param y pos of the Pixel on the screen 0 to height
* @return a new Ray with the pos of the Camera and the direction onto the camera plane in worldspace
*/
Ray Camera::generate_ray (int x, int y) const {
// from the left of the camera plane to the right (normalized)
glm::vec3 right = glm::normalize (glm::cross(direction_, up_vector_));
// from bottom of the camera plane to the top (normalized)
glm::vec3 top = glm::normalize (glm::cross(direction_, right));
// TODO could be simplified to save two variables
float x_percentage = (float) x / (float) width_;
float y_percentage = (float) y / (float) height_;
// now a range from -1 to 1 depending on the x to width ratio
float scale_x = -(x_percentage - 0.5f) * 2 * ((float) width_ / (float) height_);
// now a range from -1 to 1 depending on the y to height ratio
float scale_y = -(y_percentage - 0.5f) * 2;
// makes image blurry - like looking through the rong glasses / uneven glass
float a = random_float();
float b = random_float();
float c = random_float();
// vector in random direction
glm::vec3 blur = glm::normalize (glm::vec3 {a, b, c});
// depth of field
// "strength of depth effect" - standard 0
blur *= dof_strength_;
glm::vec3 direction = glm::normalize((direction_ * distance_) + (top * scale_y) + (right * scale_x));
/// The focal point will be set to a given distance, therefore there must be an offset in position and direction which cancel at the direction
direction *= focal_length_;
// new custom camera ray for the given pixel
return {- blur + position_, direction + blur};
}
/**
* Sets the position of the camera to a new vector
* @param pos new position of the camera
*/
void Camera::set_position (glm::vec3 const& pos) {
position_ = pos;
}
/**
Sets the width and height of the screen
@param width of the screen in pixel/ int
@param height of the screen in pixel/ int
*/
void Camera::set_width_height (int width, int height) {
width_ = width;
height_ = height;
}
/**
Sets the direction in which the camera faces - camera can be moved by changing mouse position
@param window of scene
*/
void Camera::set_direction (Window const &window) {
glm::vec2 mouse = window.mouse_position();
// looks if mouse is in window
if (0 < mouse[0] && mouse[0] < window.window_size()[0] && 0 < mouse[1] && mouse[1] < window.window_size()[1]) {
float mouse_x = -mouse[0] / window.window_size()[0];
float mouse_y = mouse[1] / window.window_size()[1];
// x, y and z are calculated by the position of the mouse (moving mose to the left side = direction goes to a position on the "left" side)
float x = sin (3.14f * 2 * mouse_x); // sin -> rotation around origin, 3.14f * 2 * mouse_x for a range from 0 to 2 pi
float y = cos (3.14f * 2 * mouse_x); // cos -> rotation around origin, 3.14f * 2 * mouse_y for a range from 0 to 2 pi
float z = cos (3.14f * mouse_y);
// Need to be scaled because we dont have a cylinder, but a sphere (z = 1/ z = 0) => x or y must be close to zero and not 1 as largest possible value
x *= (1 - abs(z));
// Need to be scaled because we dont have a cylinder, but a sphere (z = 1/ z = 0) => x or y must be close to zero and not 1 as largest possible value
y *= (1 - abs(z));
direction_ = glm::normalize(glm::vec3 {x, z, -y});
}
}
/**
Sets the depth of field by getting the strength of the depth effect and the length of the focal area
@param dof_strength strength of depth effect - standard 0
@param focal_length legth of focal area
*/
void Camera::set_depth_of_field (float dof_strength, float focal_length) {
// The focal point will be set to a given distance, therefore there must be an offset in position and direction which cancel at the direction
dof_strength_ = focal_length < INFINITY ? (focal_length / 250) : 0.1f;
focal_length_ = focal_length < INFINITY ? focal_length : 4.0f;
}
/**
Gives the position of the camera
*/
glm::vec3 Camera::get_position() const {
return position_;
}
/**
Gives the name of the camera
*/
std::string Camera::get_name() const {
return name_;
}
/**
Gives informations of the camera (name, position, ..) as a string - for creating the .sdf
*/
std::string Camera::get_information() const {
std::string information = " " + name_ + " " + std::to_string(fov_x_) + " " + std::to_string(position_[0]) + " " + std::to_string(position_[1]) + " " + std::to_string(position_[2]) + " " + std::to_string(direction_[0]) + " " + std::to_string(direction_[1]) + " " + std::to_string(direction_[2]) + " " + std::to_string(up_vector_[0]) + " " + std::to_string(up_vector_[1]) + " " + std::to_string(up_vector_[2]) + "\n";
return information;
}
/**
Prints the position and direction of the camera
*/
void Camera::print() const {
std::cout << "Camera position: x: " << this->position_[0] << " y: " << this->position_[1] << " z: " << this->position_[2] << std::endl;
std::cout << "Camera direction: x: " << this->direction_[0] << " y: " << this->direction_[1] << " z: " << this->direction_[2] << std::endl;
}
/**
Translates the camera by using given position
@param pos vector the camera is translated by
*/
void Camera::translate (glm::vec3 const &pos) {
// translating the camera by pos
position_ += pos;
}
/**
* Giving the camera a position to point at - we can change the direction
* @param pos position to look at
*/
void Camera::look_at (glm::vec3 const &pos) {
direction_ = pos - position_;
direction_ = glm::normalize (direction_);
}
/**
Camera can be moved by using wasd, space and shift
@param window of the scene
*/
void Camera::move (Window const &window) {
int w = window.get_key(87);
int s = window.get_key(83);
int a = window.get_key(65);
int d = window.get_key(68);
int space = window.get_key(32);
int shift = window.get_key(340);
// w: forwards movement
// a: left movement
// s: backwards movement
// d: right movement
// space: up movement
// shift: down movement
float x = w - s;
float z = a - d;
float y = space - shift;
// Calculating the direction the camera moves in with when one of the w a s d Keys are pressed (ws in one direction (dir) and dir_orthogonal for a and d)
glm::vec3 dir = glm::normalize (glm::vec3 {direction_[0], 0, direction_[2]});
glm::vec3 dir_orthogonal = glm::normalize (glm::cross (dir, up_vector_));
dir *= x;
dir_orthogonal *= z;
// New position calculated
position_ += glm::vec3 {0, y, 0} + dir + dir_orthogonal;
}
/**
* for roughness - get a value between -1 and 1
* @return random float value
*/
float Camera::random_float() const {
// range from -range to range
return ((((float) rand()) / (float) (RAND_MAX / 2.0f)) - 1.0f);
}
| 32.912162 | 419 | 0.649559 | jacoblammert |
7b17e7e7fb6a568984fbf6c4e5913cb6c01ed11a | 10,347 | cpp | C++ | Source/Scene/imstkcpdScene.cpp | quantingxie/vibe | 965a79089ac3ec821ad65c45ac50e69bf32dc92f | [
"Apache-2.0"
] | 2 | 2020-08-14T07:21:30.000Z | 2021-08-30T09:39:09.000Z | Source/Scene/imstkcpdScene.cpp | quantingxie/vibe | 965a79089ac3ec821ad65c45ac50e69bf32dc92f | [
"Apache-2.0"
] | null | null | null | Source/Scene/imstkcpdScene.cpp | quantingxie/vibe | 965a79089ac3ec821ad65c45ac50e69bf32dc92f | [
"Apache-2.0"
] | 1 | 2020-08-14T07:00:31.000Z | 2020-08-14T07:00:31.000Z | #include "imstkcpdScene.h"
#include <chrono>
using Clock = std::chrono::steady_clock;
const int nit = 1000;
namespace cpd
{
Scene::Scene()
{
m_constraintSolver = std::make_shared<ConstraintSolver>();
m_externalForceSolver = std::make_shared<ExternalForceSolver>();
m_time.setZero(nit);
m_pos.setZero(nit);
//Eigen::initParallel();
}
unsigned Scene::getParticleCount()
{
unsigned nbrParticle = 0;
for (auto& object : m_objects)
{
nbrParticle += object->getParticleCount();
}
return nbrParticle;
}
void Scene::addCollisionPair(ParticleObjectPtr p_object1, ParticleObjectPtr p_object2)
{
auto& pair = std::make_shared<CollisionPair>(p_object1, p_object2, COLLISION_SOLVER_ITERATION);
m_constraintSolver->addCollisionPair(pair);
}
void Scene::initializeConstraints()
{
unsigned priority = 0;
for (auto& object : m_objects)
{
ConstraintSetPtr constraintSet = std::make_shared<ConstraintSet>(object, priority);
object->addConstraintSet(constraintSet);
auto& types = object->getConstraintTypes();
for (auto& type : types)
{
auto subtype = type.getSubType();
switch (type.getType())
{
case ConstraintBase::Type::Distance:
constraintSet->initializeDistanceConstraints(subtype);
break;
case ConstraintBase::Type::Dihedral:
constraintSet->initializeDihedralConstraints();
break;
case ConstraintBase::Type::Area:
constraintSet->initializeAreaConstraints();
break;
case ConstraintBase::Type::CST2D:
constraintSet->initializeCSTEnergyConstraints(subtype);
break;
case ConstraintBase::Type::CST3D:
constraintSet->initializeCSVEnergyConstraints(subtype);
break;
case ConstraintBase::Type::Linear1D:
constraintSet->initializeOneDEnergyConstraints();
break;
default:
break;
}
}
constraintSet->setTimeStepSize(m_dt);
std::cout << "dt = " << m_dt << std::endl;
m_constraintSolver->addConstraintSet(constraintSet);
std::cout << "Number of constraints = " << constraintSet->getConstraints().size() << std::endl;
}
}
void Scene::preSimulation()
{
std::cout << "Initialization ..." << std::endl;
for (auto& object : m_objects)
{
object->preSimulation(m_dt);
}
m_externalForceSolver->setTimeStepSize(m_dt);
for (auto& force : m_externalForceSolver->getExternalForces())
{
if (force->isForAll())
{
for (auto& obj : m_objects)
{
force->addAffectedObject(obj);
obj->addExternalForce(force->getForce());
}
}
else
{
for (auto& obj : force->getAffectedObjects())
{
obj->addExternalForce(force->getForce());
}
}
}
for (auto& force : m_externalForceSolver->getDistributedForces())
{
if (force->isForAll())
{
for (auto& obj : m_objects)
{
force->addAffectedObject(obj);
obj->addDistributedForce(force->getForce());
}
}
else
{
for (auto& obj : force->getAffectedObjects())
{
obj->addDistributedForce(force->getForce());
}
}
}
m_externalForceSolver->updateAffectedObject();
for (auto& object : m_objects)
{
object->updateParticleAcceleration();
}
initializeConstraints();
std::cout << "Initialization Finished." << std::endl;
}
bool Scene::simulate()
{
// reset objects and constraints if requested
bool resetConstraint = false;
for (auto& obj : m_objects)
{
if (obj->checkForReset())
{
obj->reset();
resetConstraint = true;
}
}
if (resetConstraint) {
m_constraintSolver->resetLambda();
}
auto tStart = Clock::now();
unsigned iter;
// time-stepping for external force
m_externalForceSolver->exertForces();
// solve constraints and collisions
iter = m_constraintSolver->solve();
auto tSolveEnd = Clock::now();
double pos;
double res = 0.0;
// update position and velocity
for (auto& obj : m_objects)
{
//if (!obj->isPBD())
// obj->updateTempPositions();
obj->updatePositions();
}
auto tEnd = Clock::now();
double time = std::chrono::duration_cast<std::chrono::nanoseconds>(tEnd - tStart).count();
bool finalstate = tempPrint(time, iter);
return finalstate;
}
void Scene::postSimulation()
{
std::cout << "Cleaning up ..." << std::endl;
bool write = true;
if (write)
{
writeVectorMatlabPlot(m_time, "time.m");
writeVectorMatlabPlot(m_pos, "res.m");
for (auto& obj : m_objects)
{
obj->writePositions();
auto& sets = obj->getConstraintSets();
std::ofstream mfile("test.m");
mfile << "strain = [";
mfile.close();
for (auto& s : sets)
{
auto& cs = s->getConstraints();
for (auto& c : cs)
{
c->writeResults("test.m");
}
}
mfile.open("test.m", std::ofstream::out | std::ofstream::app);
mfile << "];";
mfile.close();
}
}
std::cout << "Clean-up Finished." << std::endl;
}
bool Scene::tempPrint(double time, double res)
{
//std::cout << '.';
const int step = 150;
const int size = 21;
static double xx[step][size][3];
static int cstep = 0;
static int st = 0;
static int dt = ceil((1.3 / m_dt) / step);
static bool chain = false;
if (chain)
{
if (st%dt == 0)
{
for (auto& obj : m_objects)
{
auto& ps = obj->getPositions();//obj->getPrevPositions();
for (int k = 0; k < size; k++)
{
xx[cstep][k][0] = ps[k][0];
xx[cstep][k][1] = ps[k][1];
xx[cstep][k][2] = ps[k][2];
}
}
cstep++;
std::cout << cstep << '/' << st << std::endl;
}
st++;
if (cstep == step)
{
chain = false;
std::ofstream mfile("chaincpd4.m");
if (!mfile.is_open())
{
std::cout << "Unable to create or open file.";
}
std::string tempstr = std::string("chaincpd.m");
mfile << "u = [\n";
for (int c = 0; c < step; ++c)
{
for (auto i = 0; i < size; ++i)
{
mfile << std::setprecision(std::numeric_limits<long double>::digits10 + 1) << xx[c][i][0] << ' ';
}
mfile << "\n";
}
mfile << "];\n";
mfile << "v = [\n";
for (int c = 0; c < step; ++c)
{
for (auto i = 0; i < size; ++i)
{
mfile << std::setprecision(std::numeric_limits<long double>::digits10 + 1) << xx[c][i][1] << ' ';
}
mfile << "\n";
}
mfile << "];\n";
mfile << "w = [\n";
for (int c = 0; c < step; ++c)
{
for (auto i = 0; i < size; ++i)
{
mfile << std::setprecision(std::numeric_limits<long double>::digits10 + 1) << xx[c][i][2] << ' ';
}
mfile << "\n";
}
mfile << "];\n";
mfile.close();
std::cout << "Write finished.";
}
}
bool printout = true;
if (printout)
for (auto& obj : m_objects)
{
static int it = 0;
static double max = 900.0;
static bool tryit = true;
// iteration at which max x displacement is reached
if (tryit)
{
if ((obj->getPosition(obj->getParticleCount() - 1)).y() <= max)
max = (obj->getPosition(obj->getParticleCount() - 1)).y();
else
{
tryit = false;
std::cout << "it = " << it << ", max = " << max << std::endl;
}
}
static double totaltime = 0;
static int totaliter = 0;
if (it < nit)
{
m_time[it] = time;
m_pos[it] = res;
std::cout << it << std::endl;
totaltime += time;
totaliter += res;
}
else if (it == nit)
{
double average = totaltime / nit;
for (unsigned ave = 0; ave < nit; ave++)
{
if (m_time[ave] > 1.5*average)
totaltime += average - m_time[ave];
}
std::cout << std::endl << "Average time = " << totaltime / nit << std::endl << std::endl;
std::cout << std::endl << "Average iter = " << totaliter / nit << std::endl << std::endl;
}
it++;
// static state
static double dist = 1.0;
static Vec3d pos = Vec3d(0, 0, 0);
static Vec3d posp = Vec3d(0, 0, 0);
static bool onetime = true;
pos = obj->getDisplacement(obj->getParticleCount() - 1);
static bool first = true;
if (first)
{
first = false;
pos = Vec3d(1.0, 0.0, 0.0);
}
dist = (pos - posp).norm() / pos.norm();
int n = int(1 / TIMESTEP);
//n = 1;
if ((it%n) == 0)
{
std::cout << "it = " << it << ':' << dist * n << std::endl;
printVector(obj->getPosition(obj->getParticleCount() - 1));
std::cout << (pos - posp).norm() << ',' << pos.norm() << ',' << dist << ", cr = " << dist / TIMESTEP << std::endl;
}
posp = pos;
static bool converge = false;
static int cv = 0;
if (onetime) {
if (dist / TIMESTEP < 1e-6)
{
printVector(obj->getPosition(obj->getParticleCount() - 2));
printVector(obj->getPosition(obj->getParticleCount() - 1));
std::cout << std::endl;
if (cv > 10)
{
onetime = false;
}
converge = true;
cv++;
std::cout << "it = " << it << ':' << dist / TIMESTEP << std::endl;
printVector(obj->getPosition(obj->getParticleCount() - 1));
}
else
{
converge = false;
cv = 0;
}
}
if (it > nit)
onetime = false;
return onetime;
}
}
}
| 25.802993 | 124 | 0.50662 | quantingxie |
7b1b0dfd044f5948256c953748dddbf54b8266c7 | 10,573 | cpp | C++ | applications/physbam/physbam-lib/Public_Library/PhysBAM_Dynamics/Incompressible_Flows/IMPLICIT_VISCOSITY_MULTIPHASE_UNIFORM.cpp | schinmayee/nimbus | 170cd15e24a7a88243a6ea80aabadc0fc0e6e177 | [
"BSD-3-Clause"
] | 20 | 2017-07-03T19:09:09.000Z | 2021-09-10T02:53:56.000Z | applications/physbam/physbam-lib/Public_Library/PhysBAM_Dynamics/Incompressible_Flows/IMPLICIT_VISCOSITY_MULTIPHASE_UNIFORM.cpp | schinmayee/nimbus | 170cd15e24a7a88243a6ea80aabadc0fc0e6e177 | [
"BSD-3-Clause"
] | null | null | null | applications/physbam/physbam-lib/Public_Library/PhysBAM_Dynamics/Incompressible_Flows/IMPLICIT_VISCOSITY_MULTIPHASE_UNIFORM.cpp | schinmayee/nimbus | 170cd15e24a7a88243a6ea80aabadc0fc0e6e177 | [
"BSD-3-Clause"
] | 9 | 2017-09-17T02:05:06.000Z | 2020-01-31T00:12:01.000Z | //#####################################################################
// Copyright 2005-2006, Geoffrey Irving, Frank Losasso, Tamar Shinar.
// This file is part of PhysBAM whose distribution is governed by the license contained in the accompanying file PHYSBAM_COPYRIGHT.txt.
//#####################################################################
#include <PhysBAM_Tools/Grids_Uniform/UNIFORM_GRID_ITERATOR_CELL.h>
#include <PhysBAM_Tools/Grids_Uniform/UNIFORM_GRID_ITERATOR_FACE.h>
#include <PhysBAM_Tools/Grids_Uniform_Interpolation/AVERAGING_UNIFORM.h>
#include <PhysBAM_Tools/Read_Write/Grids_Uniform/READ_WRITE_GRID.h>
#include <PhysBAM_Tools/Read_Write/Grids_Uniform_Arrays/READ_WRITE_ARRAYS.h>
#include <PhysBAM_Tools/Read_Write/Grids_Uniform_Arrays/READ_WRITE_FACE_ARRAYS.h>
#include <PhysBAM_Tools/Read_Write/Utilities/FILE_UTILITIES.h>
#include <PhysBAM_Geometry/Grids_Uniform_PDE_Linear/POISSON_COLLIDABLE_UNIFORM.h>
#include <PhysBAM_Geometry/Read_Write/Grids_Uniform_Level_Sets/READ_WRITE_FAST_LEVELSET.h>
#include <PhysBAM_Geometry/Read_Write/Grids_Uniform_Level_Sets/READ_WRITE_LEVELSET_1D.h>
#include <PhysBAM_Geometry/Read_Write/Grids_Uniform_Level_Sets/READ_WRITE_LEVELSET_2D.h>
#include <PhysBAM_Geometry/Read_Write/Grids_Uniform_Level_Sets/READ_WRITE_LEVELSET_3D.h>
#include <PhysBAM_Dynamics/Heat_Flows/HEAT_LAPLACE.h>
#include <PhysBAM_Dynamics/Incompressible_Flows/IMPLICIT_VISCOSITY_MULTIPHASE_UNIFORM.h>
#include <PhysBAM_Dynamics/Incompressible_Flows/INCOMPRESSIBLE_MULTIPHASE_UNIFORM.h>
using namespace PhysBAM;
//#####################################################################
// Constructor
//#####################################################################
template<class T_GRID> IMPLICIT_VISCOSITY_MULTIPHASE_UNIFORM<T_GRID>::
IMPLICIT_VISCOSITY_MULTIPHASE_UNIFORM(PROJECTION_DYNAMICS_UNIFORM<T_GRID>& projection_input,const T_ARRAYS_SCALAR& variable_viscosity_input,const ARRAY<T>& densities_input,const ARRAY<T>& viscosities_input,T_MPI_GRID* mpi_grid_input,const int axis_input,bool use_variable_viscosity_input)
:IMPLICIT_VISCOSITY_UNIFORM<T_GRID>(*projection_input.elliptic_solver,variable_viscosity_input,(T)0,(T)0,mpi_grid_input,axis_input,use_variable_viscosity_input,false),projection(projection_input),densities(densities_input),viscosities(viscosities_input)
{}
//#####################################################################
// Destructor
//#####################################################################
template<class T_GRID> IMPLICIT_VISCOSITY_MULTIPHASE_UNIFORM<T_GRID>::
~IMPLICIT_VISCOSITY_MULTIPHASE_UNIFORM()
{}
//#####################################################################
// Function Allocate_Heat_Solver
//#####################################################################
template<class T_GRID> void IMPLICIT_VISCOSITY_MULTIPHASE_UNIFORM<T_GRID>::
Allocate_Heat_Solver()
{
heat_solver=new HEAT_LAPLACE<POISSON_COLLIDABLE_UNIFORM<T_GRID> >(face_grid,u);
}
//#####################################################################
// Function Setup_Viscosity
//#####################################################################
template<class T_GRID> void IMPLICIT_VISCOSITY_MULTIPHASE_UNIFORM<T_GRID>::
Setup_Viscosity(const T dt)
{
if(use_variable_viscosity) PHYSBAM_NOT_IMPLEMENTED();
POISSON_COLLIDABLE_UNIFORM<T_GRID>& heat_poisson=dynamic_cast<POISSON_COLLIDABLE_UNIFORM<T_GRID>&>(*heat_solver);
PROJECTION_DYNAMICS_UNIFORM<T_GRID>& projection_dynamics=dynamic_cast<PROJECTION_DYNAMICS_UNIFORM<T_GRID>&>(projection);
heat_poisson.multiphase=true;
const int number_of_regions=densities.m;
// set viscosity coefficients
ARRAY<T> dt_times_kinematic_viscosity(number_of_regions);
for(int i=1;i<=number_of_regions;i++) dt_times_kinematic_viscosity(i)=dt*viscosities(i)/densities(i);
heat_poisson.Set_Constant_beta(dt_times_kinematic_viscosity);
// set up internal levelset
heat_poisson.Use_Internal_Level_Set(number_of_regions);
T_AVERAGING averaging;const T_LEVELSET_MULTIPLE& cell_centered_levelset_multiple=*projection_dynamics.poisson_collidable->levelset_multiple;
for(CELL_ITERATOR iterator(face_grid,2);iterator.Valid();iterator.Next()){TV_INT cell_index=iterator.Cell_Index(),p_face_index=cell_index;
for(int i=1;i<=number_of_regions;i++) heat_poisson.levelset_multiple->phis(i)(cell_index)=averaging.Cell_To_Face(projection.p_grid,axis,p_face_index,cell_centered_levelset_multiple.phis(i));}
heat_poisson.levelset_multiple->Project_Levelset(2);
if(projection_dynamics.flame) Calculate_Velocity_Jump();
heat_poisson.Find_Constant_beta_Multiphase(heat_poisson.levelset_multiple->phis);
}
//#####################################################################
// Function Setup_Boundary_Conditions
//#####################################################################
template<class T_GRID> void IMPLICIT_VISCOSITY_MULTIPHASE_UNIFORM<T_GRID>::
Setup_Boundary_Conditions(const T_FACE_ARRAYS_SCALAR& face_velocities)
{
IMPLICIT_VISCOSITY_UNIFORM<T_GRID>::Setup_Boundary_Conditions(face_velocities);
// set neumann b.c. at zero viscosity faces
POISSON_COLLIDABLE_UNIFORM<T_GRID>& heat_poisson=dynamic_cast<POISSON_COLLIDABLE_UNIFORM<T_GRID>&>(*heat_solver);
for(FACE_ITERATOR iterator(face_grid);iterator.Valid();iterator.Next()){int face_axis=iterator.Axis();TV_INT face=iterator.Face_Index();
if(!heat_poisson.beta_face(face_axis,face)) heat_poisson.psi_N(face_axis,face)=true;}
}
//#####################################################################
// Function Calculate_Velocity_Jump
//#####################################################################
template<class T_GRID> void IMPLICIT_VISCOSITY_MULTIPHASE_UNIFORM<T_GRID>::
Calculate_Velocity_Jump()
{
POISSON_COLLIDABLE_UNIFORM<T_GRID>& heat_poisson=dynamic_cast<POISSON_COLLIDABLE_UNIFORM<T_GRID>&>(*heat_solver);
PROJECTION_DYNAMICS_UNIFORM<T_GRID>& projection_dynamics=dynamic_cast<PROJECTION_DYNAMICS_UNIFORM<T_GRID>&>(projection);
heat_poisson.Set_Jump_Multiphase();
const ARRAY<TRIPLE<T,T,T> ,VECTOR<int,2> >& flame_speed_constants=projection_dynamics.flame_speed_constants;
TV_INT axis_offset=TV_INT::Axis_Vector(axis);
for(FACE_ITERATOR iterator(face_grid);iterator.Valid();iterator.Next()){TV_INT face_index=iterator.Face_Index();TV_INT face_axis_offset=TV_INT::Axis_Vector(iterator.Axis());
int region_1=heat_poisson.levelset_multiple->Inside_Region(iterator.First_Cell_Index()),region_2=heat_poisson.levelset_multiple->Inside_Region(iterator.Second_Cell_Index());
// [Vn]=M*[1/density] with M=-density_fuel*flame_speed, [1/density]=(1/density_fuel-1/density_products), flame_speed_constant.z=(-density_fuel*[1/density])
int fuel_region=region_1,product_region=region_2;
if(densities(region_1)<densities(region_2)){fuel_region=region_2;product_region=region_1;}
const TRIPLE<T,T,T>& constants=flame_speed_constants(fuel_region,product_region);if(constants.z==0)continue;
const T_LEVELSET& levelset=*projection_dynamics.poisson_collidable->levelset_multiple->levelsets(fuel_region);
T flame_speed=constants.x;
if(constants.y){T face_curvature;TV_INT p_face_index=face_index;
if(iterator.Axis()==axis)face_curvature=(*levelset.curvature)(p_face_index-axis_offset);
else{face_curvature=(T).25*((*levelset.curvature)(p_face_index-axis_offset-face_axis_offset)+(*levelset.curvature)(p_face_index-face_axis_offset)+
(*levelset.curvature)(p_face_index)+(*levelset.curvature)(p_face_index-axis_offset));}
flame_speed+=constants.y*face_curvature;}
T face_normal;TV_INT p_face_index=face_index;
if(iterator.Axis()==axis)face_normal=(*levelset.normals)(p_face_index-axis_offset)[axis];
else{face_normal=((*levelset.normals)(p_face_index-axis_offset-face_axis_offset)+(*levelset.normals)(p_face_index-face_axis_offset)+
(*levelset.normals)(p_face_index)+(*levelset.normals)(p_face_index-axis_offset)).Normalized()[axis];}
heat_poisson.u_jump_face.Component(iterator.Axis())(face_index)=LEVELSET_MULTIPLE_UNIFORM<T_GRID>::Sign(product_region,fuel_region)*constants.z*flame_speed*face_normal;}
}
//#####################################################################
// Debug_Write
//#####################################################################
template<class T_GRID> void IMPLICIT_VISCOSITY_MULTIPHASE_UNIFORM<T_GRID>::
Debug_Write(const std::string& output_directory_input)
{
POISSON_COLLIDABLE_UNIFORM<T_GRID>& heat_poisson=dynamic_cast<POISSON_COLLIDABLE_UNIFORM<T_GRID>&>(*heat_solver);
static int frame[3]={0,0,0};
std::string output_directory=output_directory_input;if(mpi_grid) output_directory+=STRING_UTILITIES::string_sprintf("/processor%d",mpi_grid->rank);
FILE_UTILITIES::Create_Directory(output_directory);
std::string output_directory_axis=STRING_UTILITIES::string_sprintf("%s/%d",output_directory.c_str(),axis);FILE_UTILITIES::Create_Directory(output_directory_axis);
std::string f=STRING_UTILITIES::Value_To_String(frame[axis-1]);
FILE_UTILITIES::Write_To_File<T>(output_directory_axis+"/grid",face_grid);
FILE_UTILITIES::Write_To_File<T>(output_directory_axis+"/psi_N."+f,heat_poisson.psi_N);
FILE_UTILITIES::Write_To_File<T>(output_directory_axis+"/psi_D."+f,heat_poisson.psi_D);
FILE_UTILITIES::Write_To_File<T>(output_directory_axis+"/colors."+f,heat_poisson.filled_region_colors);
FILE_UTILITIES::Write_To_File<T>(output_directory_axis+"/beta_face."+f,heat_poisson.beta_face);
for(int i=1;i<=densities.m;i++){
std::string filename=STRING_UTILITIES::string_sprintf("/levelset_%d.%s",i,f.c_str());
FILE_UTILITIES::Write_To_File<T>(output_directory_axis+filename,*heat_poisson.levelset_multiple->levelsets(i));}
FILE_UTILITIES::Write_To_Text_File(output_directory_axis+"/common/last_frame",frame[axis-1]);frame[axis-1]+=1;
}
//#####################################################################
template class IMPLICIT_VISCOSITY_MULTIPHASE_UNIFORM<GRID<VECTOR<float,1> > >;
template class IMPLICIT_VISCOSITY_MULTIPHASE_UNIFORM<GRID<VECTOR<float,2> > >;
template class IMPLICIT_VISCOSITY_MULTIPHASE_UNIFORM<GRID<VECTOR<float,3> > >;
#ifndef COMPILE_WITHOUT_DOUBLE_SUPPORT
template class IMPLICIT_VISCOSITY_MULTIPHASE_UNIFORM<GRID<VECTOR<double,1> > >;
template class IMPLICIT_VISCOSITY_MULTIPHASE_UNIFORM<GRID<VECTOR<double,2> > >;
template class IMPLICIT_VISCOSITY_MULTIPHASE_UNIFORM<GRID<VECTOR<double,3> > >;
#endif
| 73.937063 | 288 | 0.720609 | schinmayee |
7b1bcb43e5a3ff72c14e8f82bf2ce5866fbceff2 | 350 | cpp | C++ | CodeForces/Complete/300-399/339B-XeniaAndRingroad.cpp | Ashwanigupta9125/code-DS-ALGO | 49f6cf7d0c682da669db23619aef3f80697b352b | [
"MIT"
] | 36 | 2019-12-27T08:23:08.000Z | 2022-01-24T20:35:47.000Z | CodeForces/Complete/300-399/339B-XeniaAndRingroad.cpp | Ashwanigupta9125/code-DS-ALGO | 49f6cf7d0c682da669db23619aef3f80697b352b | [
"MIT"
] | 10 | 2019-11-13T02:55:18.000Z | 2021-10-13T23:28:09.000Z | CodeForces/Complete/300-399/339B-XeniaAndRingroad.cpp | Ashwanigupta9125/code-DS-ALGO | 49f6cf7d0c682da669db23619aef3f80697b352b | [
"MIT"
] | 53 | 2020-08-15T11:08:40.000Z | 2021-10-09T15:51:38.000Z | #include <cstdio>
int main(){
long n(0), m(0); scanf("%ld %ld", &n, &m);
long current(0), last(1);
long long total(0);
for(int k = 0; k < m; k++){
scanf("%ld", ¤t);
total += current - last;
if(current < last){total += n;}
last = current;
}
printf("%lld\n", total);
return 0;
}
| 19.444444 | 46 | 0.465714 | Ashwanigupta9125 |
7b209de8b83eb3fa2daf1d2844d249798e75317a | 4,221 | cpp | C++ | kxEditor/CmdMgr.cpp | kxkx5150/KXEditor | d131cc6312b5cbff0c297bd269a253c0c7c6380a | [
"MIT"
] | null | null | null | kxEditor/CmdMgr.cpp | kxkx5150/KXEditor | d131cc6312b5cbff0c297bd269a253c0c7c6380a | [
"MIT"
] | null | null | null | kxEditor/CmdMgr.cpp | kxkx5150/KXEditor | d131cc6312b5cbff0c297bd269a253c0c7c6380a | [
"MIT"
] | null | null | null | #include "CmdMgr.h"
#include <nlohmann/json.hpp>
#include <windows.h>
CmdMgr::CmdMgr(NodeMgr* nodemgr,ContMgr* contmgr)
{
m_nodemgr = nodemgr;
m_contmgr = contmgr;
}
CmdMgr::~CmdMgr()
{
}
void CmdMgr::set_hwnd(HWND hwnd)
{
m_hwnd = hwnd;
}
LONG CmdMgr::on_keydown(int contno, UINT nKeyCode, UINT nFlags)
{
bool ctrl = GetKeyState(VK_CONTROL) < 0 ? true : false;
bool shift = GetKeyState(VK_SHIFT) < 0 ? true : false;
bool alt = GetKeyState(VK_MENU) < 0 ? true : false;
switch (nKeyCode) {
case VK_SHIFT:
case VK_CONTROL:
break;
case 'A':
case 'B':
case 'C':
case 'D':
case 'E':
case 'F':
case 'G':
case 'H':
case 'I':
case 'J':
case 'K':
case 'L':
case 'M':
case 'N':
case 'O':
case 'P':
case 'Q':
case 'R':
case 'S':
case 'T':
case 'U':
case 'V':
case 'W':
case 'X':
case 'Y':
case 'Z':
if (ctrl || alt)
send_keycode_json(contno, "keydown", nKeyCode, ctrl, shift, alt);
break;
case VK_LEFT:
case VK_RIGHT:
case VK_UP:
case VK_DOWN:
case VK_BACK:
case VK_DELETE:
send_keycode_json(contno, "keydown", nKeyCode, ctrl, shift, alt);
break;
}
return 0;
}
LONG CmdMgr::OnChar(int contno, UINT nChar, UINT nFlags)
{
TCHAR ch = (TCHAR)nChar;
if (nChar == '\r') {
OnChar(contno, (UINT)'\n', nFlags);
} else if (nChar == '\n') {
m_contmgr->m_containers[contno].tabs->m_active_tab->m_docmgr->keydown_enter(&ch, 1);
} else if (nChar > 31 || nChar == '\t') {
m_contmgr->m_containers[contno].tabs->m_active_tab->m_docmgr->keydown_text(&ch, 1);
}
return 0;
}
void CmdMgr::send_keycode_json(int contno, std::string type, UINT nKeyCode, BOOL ctrl, BOOL shift, BOOL alt)
{
nlohmann::json j;
j["contno"] = contno;
j["tabno"] = m_contmgr->m_containers[contno].tabs->m_active_tab_no;
j["mode"] = m_contmgr->m_containers[contno].tabs->m_active_tab->m_docmgr->m_editor_mode;
j["type"] = type;
j["keycode"] = nKeyCode;
j["ctrl"] = ctrl;
j["shift"] = shift;
j["alt"] = alt;
m_nodemgr->beast_ws_write(m_contmgr,j);
}
LONG CmdMgr::exec(std::string message)
{
nlohmann::json j = nlohmann::json::parse(message);
for (int i = 0; i < j["commands"].size(); ++i) {
if (j["commands"][i]["type"] == "caret") {
if (j["commands"][i]["command"] == "right") {
m_contmgr->m_containers[j["contno"]].tabs->m_tabs[j["tabno"]]->m_docmgr->move_caret(VK_RIGHT);
} else if (j["commands"][i]["command"] == "left") {
m_contmgr->m_containers[j["contno"]].tabs->m_tabs[j["tabno"]]->m_docmgr->move_caret(VK_LEFT);
} else if (j["commands"][i]["command"] == "up") {
m_contmgr->m_containers[j["contno"]].tabs->m_tabs[j["tabno"]]->m_docmgr->move_caret(VK_UP);
} else if (j["commands"][i]["command"] == "down") {
m_contmgr->m_containers[j["contno"]].tabs->m_tabs[j["tabno"]]->m_docmgr->move_caret(VK_DOWN);
}
} else if (j["commands"][i]["type"] == "clipboard") {
if (j["commands"][i]["command"] == "paste") {
m_contmgr->m_containers[j["contno"]].tabs->m_tabs[j["tabno"]]->m_docmgr->on_paste();
} else if (j["commands"][i]["command"] == "copy") {
m_contmgr->m_containers[j["contno"]].tabs->m_tabs[j["tabno"]]->m_docmgr->on_copy();
} else if (j["commands"][i]["command"] == "cut") {
m_contmgr->m_containers[j["contno"]].tabs->m_tabs[j["tabno"]]->m_docmgr->on_cut();
}
} else if (j["commands"][i]["type"] == "input") {
if (j["commands"][i]["command"] == "backspace") {
m_contmgr->m_containers[j["contno"]].tabs->m_tabs[j["tabno"]]->m_docmgr->keydown_backspace();
} else if (j["commands"][i]["command"] == "delete") {
m_contmgr->m_containers[j["contno"]].tabs->m_tabs[j["tabno"]]->m_docmgr->keydown_delete();
}
} else if (j["type"] == "none") {
OutputDebugString(L"none\n");
}
}
return 0;
}
| 28.714286 | 110 | 0.548685 | kxkx5150 |
7b2292fa629b25a62f8a6ccb64d25887a6cf388d | 1,757 | cpp | C++ | Codes/Codes_for_xStar/diploidSequence.cpp | CasperLumby/Bottleneck_Size_Estimation | 9f9d81e35c1ac9dc74541401e8da70d428be1ad1 | [
"MIT"
] | null | null | null | Codes/Codes_for_xStar/diploidSequence.cpp | CasperLumby/Bottleneck_Size_Estimation | 9f9d81e35c1ac9dc74541401e8da70d428be1ad1 | [
"MIT"
] | null | null | null | Codes/Codes_for_xStar/diploidSequence.cpp | CasperLumby/Bottleneck_Size_Estimation | 9f9d81e35c1ac9dc74541401e8da70d428be1ad1 | [
"MIT"
] | 1 | 2019-06-12T13:25:36.000Z | 2019-06-12T13:25:36.000Z | //
// diploidSequence.cpp
// TransmissionProject
//
// Created by Casper Lumby on 28/10/2015.
// Copyright © 2015 Casper Lumby. All rights reserved.
//
#include <stdio.h>
#include "iostream"
#include "diploidSequence.h"
using namespace std;
DiploidSequence::DiploidSequence() {}
DiploidSequence::DiploidSequence(int length) : majorSeq(Sequence(length)), minorSeq(Sequence(length)) { }
DiploidSequence::DiploidSequence(Sequence & ma, Sequence & mi) : majorSeq(ma), minorSeq(mi) { }
DiploidSequence::DiploidSequence(string ma, string mi) : majorSeq(Sequence(ma)), minorSeq(Sequence(mi)) { }
DiploidSequence::~DiploidSequence() {}
//Getters and setters
void DiploidSequence::setMajor(Sequence & ma) { majorSeq = ma; }
void DiploidSequence::setMinor(Sequence & mi){ minorSeq = mi; }
Sequence DiploidSequence::getMajor() { return majorSeq; }
Sequence DiploidSequence::getMinor() { return minorSeq; }
void DiploidSequence::setMajor(int index, char ma) { majorSeq.setBase(index, ma); }
void DiploidSequence::setMinor(int index, char mi) { minorSeq.setBase(index, mi); }
void DiploidSequence::print() {
for(int i=0;i<majorSeq.getLength();i++) {
cout << majorSeq.getBase(i) << "/" << minorSeq.getBase(i) << ",";
}
cout << "\n";
}
string DiploidSequence::printToString() {
string output;
for(int i=0;i<majorSeq.getLength();i++) {
output += majorSeq.getBase(i);
output += "/";
output += minorSeq.getBase(i);
if(i<majorSeq.getLength()-1) {
output += " , "; //Don't add after the last pair of bases
}
}
return output;
}
int DiploidSequence::getLength() {
return majorSeq.getLength();
}
void DiploidSequence::removeBase(int index) {
majorSeq.removeBase(index);
minorSeq.removeBase(index);
}
| 27.888889 | 107 | 0.69152 | CasperLumby |
7b242039bcfcd3edc867bfae06077d799306bcee | 6,764 | cc | C++ | vowpalwabbit/core/tests/cache_test.cc | HollowMan6/vowpal_wabbit | eecdaccce568b53ed195bc4d50a6a582ab9a83d5 | [
"BSD-3-Clause"
] | 4,332 | 2015-01-01T10:26:51.000Z | 2018-10-01T14:05:43.000Z | vowpalwabbit/core/tests/cache_test.cc | HollowMan6/vowpal_wabbit | eecdaccce568b53ed195bc4d50a6a582ab9a83d5 | [
"BSD-3-Clause"
] | 1,004 | 2015-01-01T12:00:54.000Z | 2018-09-30T22:13:42.000Z | vowpalwabbit/core/tests/cache_test.cc | HollowMan6/vowpal_wabbit | eecdaccce568b53ed195bc4d50a6a582ab9a83d5 | [
"BSD-3-Clause"
] | 1,182 | 2015-01-02T20:38:55.000Z | 2018-09-26T02:47:37.000Z | // Copyright (c) by respective owners including Yahoo!, Microsoft, and
// individual contributors. All rights reserved. Released under a BSD (revised)
// license as described in the file LICENSE.
#include "vw/core/cache.h"
#include "vw/core/parse_example.h"
#include "vw/core/vw.h"
#include "vw/core/vw_fwd.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include <memory>
using namespace ::testing;
#include <string>
TEST(cache_tests, write_and_read_example)
{
auto& workspace = *VW::initialize("--quiet");
VW::example src_ex;
VW::read_line(workspace, &src_ex, "3.5 |ns1 example value test |ss2 ex:0.5");
auto backing_vector = std::make_shared<std::vector<char>>();
io_buf io_writer;
io_writer.add_file(VW::io::create_vector_writer(backing_vector));
VW::details::cache_temp_buffer temp_buffer;
VW::write_example_to_cache(
io_writer, &src_ex, workspace.example_parser->lbl_parser, workspace.parse_mask, temp_buffer);
io_writer.flush();
io_buf io_reader;
io_reader.add_file(VW::io::create_buffer_view(backing_vector->data(), backing_vector->size()));
VW::multi_ex examples;
VW::example dest_ex;
examples.push_back(&dest_ex);
VW::read_example_from_cache(&workspace, io_reader, examples);
EXPECT_EQ(dest_ex.indices.size(), 2);
EXPECT_EQ(dest_ex.feature_space['n'].size(), 3);
EXPECT_EQ(dest_ex.feature_space['s'].size(), 1);
EXPECT_THAT(src_ex.feature_space['s'].values, Pointwise(FloatNear(1e-3f), dest_ex.feature_space['s'].values));
EXPECT_THAT(src_ex.feature_space['s'].indices, Pointwise(Eq(), dest_ex.feature_space['s'].indices));
EXPECT_THAT(src_ex.feature_space['n'].values, Pointwise(FloatNear(1e-3f), dest_ex.feature_space['n'].values));
EXPECT_THAT(src_ex.feature_space['n'].indices, Pointwise(Eq(), dest_ex.feature_space['n'].indices));
EXPECT_FLOAT_EQ(src_ex.l.simple.label, dest_ex.l.simple.label);
VW::finish(workspace);
}
TEST(cache_tests, write_and_read_large_example)
{
auto& workspace = *VW::initialize("--quiet");
VW::example src_ex;
VW::read_line(workspace, &src_ex,
"| example value test a b:0.3 c:0.1 d e f:0.3 g h i j k l m n o p q r s t u v w x y:5.5 z a1 b1:0.343 c1:0.1 d1 "
"e1 f1:0.3 g1 h1 i1 j1 k1 l1 m1 n1 o1 p1 q1 r1 s1 t1 u1 v1 w1 x1 y1:5.5 z1"
"|a example value test a b:0.3 c:0.1 d e f:0.3 g h i j k l m n o p q r s t u v w x y:5.5 z a1 b1:0.343 c1:0.1 d1 "
"e1 f1:0.3 g1 h1 i1 j1 k1 l1 m1 n1 o1 p1 q1 r1 s1 t1 u1 v1 w1 x1 y1:5.5 z1"
"|b example value test a b:0.3 c:0.1 d e f:0.3 g h i j k l m n o p q r s t u v w x y:5.5 z a1 b1:0.343 c1:0.1 d1 "
"e1 f1:0.3 g1 h1 i1 j1 k1 l1 m1 n1 o1 p1 q1 r1 s1 t1 u1 v1 w1 x1 y1:5.5 z1"
"|c example value test a b:0.3 c:0.1 d e f:0.3 g h i j k l m n o p q r s t u v w x y:5.5 z a1 b1:0.343 c1:0.1 d1 "
"e1 f1:0.3 g1 h1 i1 j1 k1 l1 m1 n1 o1 p1 q1 r1 s1 t1 u1 v1 w1 x1 y1:5.5 z1"
"|d example value test a b:0.3 c:0.1 d e f:0.3 g h i j k l m n o p q r s t u v w x y:5.5 z a1 b1:0.343 c1:0.1 d1 "
"e1 f1:0.3 g1 h1 i1 j1 k1 l1 m1 n1 o1 p1 q1 r1 s1 t1 u1 v1 w1 x1 y1:5.5 z1"
"|e example value test a b:0.3 c:0.1 d e f:0.3 g h i j k l m n o p q r s t u v w x y:5.5 z a1 b1:0.343 c1:0.1 d1 "
"e1 f1:0.3 g1 h1 i1 j1 k1 l1 m1 n1 o1 p1 q1 r1 s1 t1 u1 v1 w1 x1 y1:5.5 z1"
"|f example value test a b:0.3 c:0.1 d e f:0.3 g h i j k l m n o p q r s t u v w x y:5.5 z a1 b1:0.343 c1:0.1 d1 "
"e1 f1:0.3 g1 h1 i1 j1 k1 l1 m1 n1 o1 p1 q1 r1 s1 t1 u1 v1 w1 x1 y1:5.5 z1");
auto backing_vector = std::make_shared<std::vector<char>>();
io_buf io_writer;
io_writer.add_file(VW::io::create_vector_writer(backing_vector));
VW::details::cache_temp_buffer temp_buffer;
VW::write_example_to_cache(
io_writer, &src_ex, workspace.example_parser->lbl_parser, workspace.parse_mask, temp_buffer);
io_writer.flush();
io_buf io_reader;
io_reader.add_file(VW::io::create_buffer_view(backing_vector->data(), backing_vector->size()));
VW::multi_ex examples;
VW::example dest_ex;
examples.push_back(&dest_ex);
VW::read_example_from_cache(&workspace, io_reader, examples);
EXPECT_EQ(src_ex.indices.size(), dest_ex.indices.size());
for (auto idx : {' ', 'a', 'b', 'c', 'd', 'e', 'f'})
{
EXPECT_EQ(src_ex.feature_space[idx].size(), dest_ex.feature_space[idx].size());
EXPECT_THAT(src_ex.feature_space[idx].values, Pointwise(FloatNear(1e-3f), dest_ex.feature_space[idx].values));
EXPECT_THAT(src_ex.feature_space[idx].indices, Pointwise(Eq(), dest_ex.feature_space[idx].indices));
}
VW::finish(workspace);
}
TEST(cache_tests, write_and_read_tag)
{
v_array<char> tag;
tag.push_back('m');
tag.push_back('y');
tag.push_back(' ');
tag.push_back('t');
tag.push_back('a');
tag.push_back('g');
auto backing_vector = std::make_shared<std::vector<char>>();
io_buf io_writer;
io_writer.add_file(VW::io::create_vector_writer(backing_vector));
VW::details::cache_tag(io_writer, tag);
io_writer.flush();
io_buf io_reader;
io_reader.add_file(VW::io::create_buffer_view(backing_vector->data(), backing_vector->size()));
v_array<char> read_tag;
VW::details::read_cached_tag(io_reader, read_tag);
EXPECT_THAT(tag, Pointwise(Eq(), read_tag));
}
TEST(cache_tests, write_and_read_index)
{
auto backing_vector = std::make_shared<std::vector<char>>();
io_buf io_writer;
io_writer.add_file(VW::io::create_vector_writer(backing_vector));
VW::namespace_index index = 79;
VW::details::cache_index(io_writer, index);
io_writer.flush();
io_buf io_reader;
io_reader.add_file(VW::io::create_buffer_view(backing_vector->data(), backing_vector->size()));
VW::namespace_index read_index = 0;
VW::details::read_cached_index(io_reader, read_index);
EXPECT_EQ(index, read_index);
}
TEST(cache_tests, write_and_read_features)
{
auto backing_vector = std::make_shared<std::vector<char>>();
io_buf io_writer;
io_writer.add_file(VW::io::create_vector_writer(backing_vector));
uint64_t mask = (1 << 18) - 1;
features feats;
feats.push_back(1.f, 23424542 & mask);
feats.push_back(4.f, 1231987 & mask);
feats.push_back(1.1f, 675 & mask);
feats.push_back(1.34f, 1 & mask);
feats.push_back(1.1f, 567 & mask);
VW::details::cache_features(io_writer, feats, mask);
io_writer.flush();
io_buf io_reader;
io_reader.add_file(VW::io::create_buffer_view(backing_vector->data(), backing_vector->size()));
features read_feats;
bool sorted = false;
VW::details::read_cached_features(io_reader, read_feats, sorted);
EXPECT_EQ(feats.size(), read_feats.size());
for (auto it = feats.begin(), read_it = read_feats.begin(); it != feats.end(); ++it, ++read_it)
{
EXPECT_EQ(it.index(), read_it.index());
EXPECT_FLOAT_EQ(it.value(), read_it.value());
}
}
| 37.164835 | 120 | 0.689237 | HollowMan6 |
7b28229a8e9f3c34ef1117d7f38cc381de7f3050 | 778 | cpp | C++ | Code/src/engine/graphics/fonts/FontContainer.cpp | Thraix/MasterThesis | 4e4cb94b2a4ee261b2b9974aa4b20f6643eb6595 | [
"MIT"
] | 1 | 2021-04-16T10:54:38.000Z | 2021-04-16T10:54:38.000Z | Code/src/engine/graphics/fonts/FontContainer.cpp | Thraix/MasterThesis | 4e4cb94b2a4ee261b2b9974aa4b20f6643eb6595 | [
"MIT"
] | null | null | null | Code/src/engine/graphics/fonts/FontContainer.cpp | Thraix/MasterThesis | 4e4cb94b2a4ee261b2b9974aa4b20f6643eb6595 | [
"MIT"
] | null | null | null | #include "FontContainer.h"
namespace Greet{
FontContainer::FontContainer(const std::string& filename, const std::string& name)
: m_filename(filename),m_data(NULL),m_datasize(0), m_name(name)
{
}
FontContainer::FontContainer(const byte* data, uint datasize, const std::string& name)
: m_filename(""),m_data(data),m_datasize(datasize), m_name(name)
{
}
FontContainer::~FontContainer()
{
auto it = m_fonts.begin();
while(it != m_fonts.end())
{
delete (*it);
it++;
}
m_fonts.clear();
}
Font* FontContainer::GetSize(uint size)
{
auto it = m_fonts.find(size);
if (it == m_fonts.end())
{
Font* font = new Font(this,size);
m_fonts.emplace(font);
return font;
}
return *it;
}
}
| 19.45 | 88 | 0.611825 | Thraix |
7b2c9e3024ec3150c3e3013fa41163fbcf564d2c | 400 | cpp | C++ | engine/source/private/core/threads.cpp | kociap/GameEngine | ff5f1ca589df5b44887c3383919a73bbe0ab05a0 | [
"MIT"
] | 12 | 2019-01-02T11:13:19.000Z | 2020-06-02T10:58:20.000Z | engine/source/private/core/threads.cpp | kociap/GameEngine | ff5f1ca589df5b44887c3383919a73bbe0ab05a0 | [
"MIT"
] | null | null | null | engine/source/private/core/threads.cpp | kociap/GameEngine | ff5f1ca589df5b44887c3383919a73bbe0ab05a0 | [
"MIT"
] | 1 | 2020-04-03T11:54:53.000Z | 2020-04-03T11:54:53.000Z | #include <core/threads.hpp>
#include <thread>
#if defined(_WIN32) || defined(_WIN64)
extern "C" {
// xthreads.h
struct xtime;
void _Thrd_sleep(const xtime*);
}
namespace anton_engine::threads {
void sleep(Timespec const& duration) {
_Thrd_sleep(reinterpret_cast<xtime const*>(&duration));
}
}
#else
#error threads not implemented for non-windows builds.
#endif
| 16.666667 | 63 | 0.6775 | kociap |
7b2cf1c96808f8d5344dd91b27052c554ab513bd | 1,874 | cpp | C++ | test/smoke/vasp1/vasp1.cpp | raramakr/aomp | 9a224fe01ca8eff4209b8b79aa1fa15a18da65db | [
"Apache-2.0"
] | 106 | 2019-02-05T13:07:36.000Z | 2022-03-20T11:15:03.000Z | test/smoke/vasp1/vasp1.cpp | raramakr/aomp | 9a224fe01ca8eff4209b8b79aa1fa15a18da65db | [
"Apache-2.0"
] | 195 | 2019-02-26T23:42:40.000Z | 2022-03-29T10:08:19.000Z | test/smoke/vasp1/vasp1.cpp | raramakr/aomp | 9a224fe01ca8eff4209b8b79aa1fa15a18da65db | [
"Apache-2.0"
] | 27 | 2019-05-17T10:33:28.000Z | 2022-03-25T16:17:48.000Z | #include <omp.h>
#include <stdio.h>
#include <sched.h>
int main( int argc, char **argv){
int execution_space_gpu = 0;
if (argc > 1){
fprintf(stderr,"argv[0] = %s\n",argv[1]);
execution_space_gpu = atoi(argv[1]);
}
int N = 1024*1024*100;
int Niter = 10;
float *A, *B;
A = new float[N];
B = new float[N];
double GB = (double)N * sizeof(float) / (1024.0*1024.0*1024.0);
int ndevices = omp_get_device_num();
printf("ndevices= %d\n",ndevices);
int cpuid[omp_get_max_threads()];
#pragma omp parallel
{
cpuid[ omp_get_thread_num()] = sched_getcpu();
}
for (int i=0; i < omp_get_max_threads(); ++i)
printf("tid = %d, cpuid = %d\n",i,cpuid[i]);
#pragma omp parallel for
for (int i=0; i < N; ++i){
A[i] = i*0.0001;
B[i] = 0.0;
}
#pragma omp target enter data map(to:A[0:N],B[0:N]) if(execution_space_gpu)
#pragma omp target teams distribute parallel for thread_limit(512) num_teams(120*10) schedule(static,1) if(target:execution_space_gpu)
for (int i=0; i < N; ++i) B[i] = omp_get_thread_num();
#if 1
#pragma omp target update from(B[0:N]) if(execution_space_gpu)
for (int i=0; i < 70*1; i+=1)
printf(" B[%d] = %g\n",i,B[i]);
#endif
double t1 = omp_get_wtime();
for (int iter = 0 ; iter < Niter; ++iter){
#pragma omp target teams distribute parallel for if(target:execution_space_gpu)
for (int i=0; i < N; ++i) B[i] = A[i];
}
double t2 = omp_get_wtime();
printf("memcpy time = %g [s] BW = %g [GB/s]\n",(t2-t1)/Niter, 2.0*GB/((t2-t1)/Niter));
#pragma omp target exit data map(release:A[0:N]) map(from:B[0:N]) if(execution_space_gpu)
for (int i=0; i < 10; ++i)
printf("A[%d] = %g, B[%d] = %g\n",i,A[i],i,B[i]);
delete[] A;
delete[] B;
return 0;
}
| 26.394366 | 142 | 0.566169 | raramakr |
7b316fc3f854138c2fe13cd184a97734b67f18f2 | 14,421 | cc | C++ | ge/graph/manager/graph_caching_allocator.cc | mindspore-ai/graphengine | 460406cbd691b963d125837f022be5d8abd1a637 | [
"Apache-2.0"
] | 207 | 2020-03-28T02:12:50.000Z | 2021-11-23T18:27:45.000Z | ge/graph/manager/graph_caching_allocator.cc | mindspore-ai/graphengine | 460406cbd691b963d125837f022be5d8abd1a637 | [
"Apache-2.0"
] | 4 | 2020-04-17T07:32:44.000Z | 2021-06-26T04:55:03.000Z | ge/graph/manager/graph_caching_allocator.cc | mindspore-ai/graphengine | 460406cbd691b963d125837f022be5d8abd1a637 | [
"Apache-2.0"
] | 13 | 2020-03-28T02:52:26.000Z | 2021-07-03T23:12:54.000Z | /**
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "graph/manager/graph_caching_allocator.h"
#include <set>
#include <string>
#include <utility>
#include "graph/manager/graph_mem_manager.h"
namespace ge {
const size_t bin_ranges[kNumBins] = {kRoundBlockSize * kKByteSize,
kBinSizeUnit8 * kMByteSize,
kBinSizeUnit32 * kMByteSize,
kBinSizeUnit128 * kMByteSize,
kBinSizeUnit256 * kMByteSize,
kBinSizeUnit512 * kMByteSize,
kGByteSize};
static bool BlockComparator(const Block *left, const Block *right) {
if (left->size != right->size) {
return left->size < right->size;
}
return reinterpret_cast<uintptr_t>(left->ptr) < reinterpret_cast<uintptr_t>(right->ptr);
}
bool CanMerge(Block *block) {
if ((block == nullptr) || block->allocated || !block->IsSplit()) {
return false;
}
return true;
}
size_t GetBinIndex(size_t size) {
size_t index = 0;
for (auto range : bin_ranges) {
if (size <= range) {
break;
}
index++;
}
if (index > kNumBins - 1) {
index = kNumBins - 1;
}
return index;
}
size_t GetAllocationSize(size_t size) {
size_t index = GetBinIndex(size);
if (bin_ranges[index] >= size) {
return bin_ranges[index];
}
return kGByteSize * ((size + kGByteSize - 1) / kGByteSize);
}
///
/// @ingroup ge_graph
/// @brief block size based on alignment
/// @param [in] original malloc size
/// @return allocation size
///
size_t GetBlockSize(size_t size) {
if (size == 0) {
return kRoundBlockSize;
}
return kRoundBlockSize * ((size + kRoundBlockSize - 1) / kRoundBlockSize);
}
bool ShouldSplit(const Block *block, size_t size) {
return static_cast<double>(size) <= (static_cast<double>(block->size) * kSplitThreshold);
}
void IncreaseCount(std::map<size_t, size_t> &count, size_t size) {
auto it = count.find(size);
if (it == count.end()) {
count.emplace(size, 1);
} else {
it->second++;
}
}
CachingAllocator::CachingAllocator(rtMemType_t memory_type)
: memory_type_(memory_type), memory_allocator_(nullptr), called_malloc_counts_(0), called_free_counts_(0) {
for (uint32_t i = 0; i < kNumBins; i++) {
free_block_bins_[i] = nullptr;
}
}
Status CachingAllocator::Initialize(uint32_t device_id) {
GELOGI("Device id %u", device_id);
// when redo Initialize free old memory
FreeBlocks();
std::lock_guard<std::recursive_mutex> lock(mutex_);
for (uint32_t i = 0; i < kNumBins; i++) {
if (free_block_bins_[i] != nullptr) {
continue;
}
auto bin_ptr = new (std::nothrow) BlockBin(BlockComparator);
if (bin_ptr == nullptr) {
REPORT_CALL_ERROR("E19999", "New BlockBin fail, device_id:%u", device_id);
GELOGE(ACL_ERROR_GE_MEMORY_ALLOCATION, "[Alloc][BlockBin] failed, device_id:%u", device_id);
return ACL_ERROR_GE_MEMORY_ALLOCATION;
}
free_block_bins_[i] = bin_ptr;
}
memory_allocator_ = &MemManager::Instance().MemInstance(memory_type_);
if (memory_allocator_ == nullptr) {
return ACL_ERROR_GE_INTERNAL_ERROR;
}
called_malloc_counts_ = 0;
called_free_counts_ = 0;
return ge::SUCCESS;
}
void CachingAllocator::Finalize(uint32_t device_id) {
GELOGI("Device id %u", device_id);
PrintStatics();
FreeBlocks();
FreeBlockBins();
}
uint8_t *CachingAllocator::Malloc(size_t size, uint8_t *org_ptr, uint32_t device_id) {
GELOGI("Start malloc pool memory, size = %zu, device id = %u", size, device_id);
called_malloc_counts_++;
size = GetBlockSize(size);
uint8_t *ptr = nullptr;
Block *block = FindFreeBlock(size, org_ptr, device_id);
if (block == nullptr) {
std::lock_guard<std::recursive_mutex> lock(mutex_);
if (ge::SUCCESS == TryExtendCache(size, device_id)) {
block = FindFreeBlock(size, org_ptr, device_id);
if (block != nullptr) {
ptr = block->ptr;
}
}
} else {
ptr = block->ptr;
}
if (ptr == nullptr) {
REPORT_INNER_ERROR("E19999", "FindFreeBlock fail, size:%zu, device_id:%u", size, device_id);
GELOGE(FAILED, "[Check][Param] FindFreeBlock failed device id = %u, size= %zu", device_id, size);
}
return ptr;
}
Status CachingAllocator::Free(uint8_t *ptr, uint32_t device_id) {
GELOGI("Free device id = %u", device_id);
called_free_counts_++;
if (ptr == nullptr) {
REPORT_INNER_ERROR("E19999", "Param ptr is nullptr, device_id:%u, check invalid", device_id);
GELOGE(PARAM_INVALID, "[Check][Param] Invalid memory pointer, device_id:%u", device_id);
return ge::PARAM_INVALID;
}
std::lock_guard<std::recursive_mutex> lock(mutex_);
auto it = allocated_blocks_.find(ptr);
if (it == allocated_blocks_.end()) {
REPORT_INNER_ERROR("E19999", "Param ptr not allocated before, device_id:%u, check invalid", device_id);
GELOGE(PARAM_INVALID, "[Check][Param] Param ptr not allocated before, device_id:%u", device_id);
return ge::PARAM_INVALID;
}
Block *block = it->second;
allocated_blocks_.erase(it);
FreeBlock(block);
return ge::SUCCESS;
}
void CachingAllocator::FreeBlock(Block *block) {
if ((block == nullptr) || !block->allocated) {
return;
}
GELOGI("Free block size = %zu", block->size);
std::lock_guard<std::recursive_mutex> lock(mutex_);
block->allocated = false;
auto &bin = *block->bin;
Block *merge_blocks[] = {block->prev, block->next};
for (Block *merge_block : merge_blocks) {
MergeBlocks(block, merge_block, bin);
}
bin.insert(block);
}
void CachingAllocator::MergeBlocks(Block *dst, Block *src, BlockBin &bin) {
if (!CanMerge(src) || !CanMerge(dst)) {
return;
}
if (dst->prev == src) {
dst->ptr = src->ptr;
dst->prev = src->prev;
if (dst->prev != nullptr) {
dst->prev->next = dst;
}
} else {
dst->next = src->next;
if (dst->next != nullptr) {
dst->next->prev = dst;
}
}
dst->size += src->size;
bin.erase(src);
delete src;
}
BlockBin *CachingAllocator::GetBlockBin(size_t size) {
size_t index = GetBinIndex(size);
return free_block_bins_[index];
}
Block *CachingAllocator::FindFreeBlock(size_t size, uint8_t *org_ptr, uint32_t device_id) {
Block key(device_id, size, org_ptr);
BlockBin *bin = GetBlockBin(size);
if (bin == nullptr) {
REPORT_INNER_ERROR("E19999", "GetBlockBin fail, size:%zu, device_id:%u", size, device_id);
GELOGE(ge::FAILED, "[Get][BlockBin] failed, size:%zu, device_id:%u", size, device_id);
return nullptr;
}
std::lock_guard<std::recursive_mutex> lock(mutex_);
auto it = bin->lower_bound(&key);
if (it != bin->end()) {
Block *block = *it;
bin->erase(it);
if (block != nullptr) {
GELOGI("Find block size = %zu", block->size);
if (ShouldSplit(block, size)) {
block = SplitBlock(block, size, *bin, device_id);
}
if (block->ptr != nullptr) {
block->allocated = true;
allocated_blocks_[block->ptr] = block;
GELOGI("Malloc device id = %u, size= %zu", device_id, size);
}
}
return block;
}
return nullptr;
}
Block *CachingAllocator::SplitBlock(Block *block, size_t size, BlockBin &bin, uint32_t device_id) {
// block has been checked, should not be nullptr
Block *remaining = block;
Block *new_block = new (std::nothrow) Block(device_id, size, &bin, block->ptr);
if (new_block == nullptr) {
REPORT_CALL_ERROR("E19999", "New Block fail, size:%zu, device_id:%u", size, device_id);
GELOGE(ge::FAILED, "[Alloc][Block] failed, size:%zu, device_id:%u", size, device_id);
return block;
}
new_block->prev = remaining->prev;
if (new_block->prev != nullptr) {
new_block->prev->next = new_block;
}
new_block->next = remaining;
remaining->prev = new_block;
remaining->ptr = remaining->ptr + size;
remaining->size -= size;
bin.insert(remaining);
return new_block;
}
Status CachingAllocator::TryExtendCache(size_t size, uint32_t device_id) {
GELOGI("Try to extend cache. size = %zu, device id = %u", size, device_id);
auto memory_size = GetAllocationSize(size);
const std::string purpose = "Memory for caching.";
auto memory_addr = memory_allocator_->MallocMemory(purpose, memory_size, device_id);
// try to free caches and malloc again when malloc memory failed
if (memory_addr == nullptr) {
size_t free_cached_memory_size = FreeCachedBlocks();
memory_addr = memory_allocator_->MallocMemory(purpose, memory_size, device_id);
if (memory_addr == nullptr) {
GELOGE(ge::FAILED, "[Malloc][Memory] failed, no enough memory for size = %zu, device_id = %u", memory_size,
device_id);
PrintStatics(DLOG_ERROR);
return ge::FAILED;
}
GELOGT(TRACE_RUNNING, "Try to free cached memory size:%zu and malloc memory size:%zu success.",
free_cached_memory_size, memory_size);
}
if (AddToBlockBin(memory_addr, memory_size, device_id) != ge::SUCCESS) {
(void)memory_allocator_->FreeMemory(memory_addr);
return ge::FAILED;
}
PrintStatics();
return ge::SUCCESS;
}
Status CachingAllocator::AddToBlockBin(uint8_t *ptr, size_t size, uint32_t device_id) {
BlockBin *bin = GetBlockBin(size);
if (bin == nullptr) {
REPORT_INNER_ERROR("E19999", "GetBlockBin fail, size:%zu, device_id:%u", size, device_id);
GELOGE(ge::FAILED, "[Get][BlockBin] failed, size:%zu, device_id:%u", size, device_id);
return ge::FAILED;
}
Block *block = new (std::nothrow) Block(device_id, size, bin, nullptr);
if (block == nullptr) {
REPORT_CALL_ERROR("E19999", "New Block fail, size:%zu, device_id:%u", size, device_id);
GELOGE(ge::FAILED, "[Alloc][Block] failed, size:%zu, device_id:%u", size, device_id);
return ge::FAILED;
}
GELOGI("Block size = %zu", size);
block->ptr = ptr;
block->size = size;
std::lock_guard<std::recursive_mutex> lock(mutex_);
IncreaseCount(malloced_memory_, block->size);
bin->insert(block);
return ge::SUCCESS;
}
size_t CachingAllocator::FreeCachedBlocks() {
GELOGI("Free cached blocks");
std::lock_guard<std::recursive_mutex> lock(mutex_);
size_t free_cached_memory_size = 0;
for (uint32_t i = 0; i < kNumBins; i++) {
auto pool = free_block_bins_[i];
if (pool == nullptr) {
continue;
}
for (auto it = pool->begin(); it != pool->end();) {
Block *block = *it;
// free block memory that has not been split
if ((block != nullptr) && (block->ptr != nullptr) &&
(block->prev == nullptr) && (block->next == nullptr) &&
(memory_allocator_->FreeMemory(block->ptr) == ge::SUCCESS)) {
auto itcount = malloced_memory_.find(block->size);
free_cached_memory_size += block->size;
if (itcount != malloced_memory_.end()) {
itcount->second--;
if (itcount->second == 0) {
malloced_memory_.erase(itcount);
}
}
pool->erase(it++);
delete block;
continue;
}
++it;
}
}
return free_cached_memory_size;
}
void CachingAllocator::FreeBlocks() {
GELOGI("Free blocks.");
std::lock_guard<std::recursive_mutex> lock(mutex_);
// free allocated blocks and put to cache
for (auto &it : allocated_blocks_) {
FreeBlock(it.second);
}
allocated_blocks_.clear();
(void) FreeCachedBlocks();
}
void CachingAllocator::TryFreeBlocks() {
GELOGI("Try free blocks.");
std::lock_guard<std::recursive_mutex> lock(mutex_);
if (allocated_blocks_.empty()) {
(void) FreeCachedBlocks();
}
}
void CachingAllocator::FreeBlockBins() {
GELOGI("Free block bins.");
std::lock_guard<std::recursive_mutex> lock(mutex_);
for (uint32_t i = 0; i < kNumBins; i++) {
if (free_block_bins_[i] != nullptr) {
delete free_block_bins_[i];
free_block_bins_[i] = nullptr;
}
}
}
void PrintCount(std::map<size_t, size_t> &count, const std::string &name, size_t total_size, size_t total_count) {
GEEVENT("%6s total[size:%11zu count:%11zu].", name.c_str(), total_size, total_count);
for (auto &it : count) {
GEEVENT(" |- block[size:%11zu count:%11zu].", it.first, it.second);
}
}
void CachingAllocator::PrintStatics(int32_t level) {
if (!IsLogEnable(GE_MODULE_NAME, level)) {
return;
}
size_t total_using_size = 0;
size_t total_using_count = 0;
size_t total_free_size = 0;
size_t total_free_count = 0;
size_t total_malloc_size = 0;
size_t total_malloc_count = 0;
std::map<size_t, size_t> using_block_stat;
std::map<size_t, size_t> free_block_stat;
std::map<size_t, size_t> malloc_block_stat;
do {
std::lock_guard<std::recursive_mutex> lock(mutex_);
for (uint32_t i = 0; i < kNumBins; i++) {
auto pool = free_block_bins_[i];
if (pool == nullptr) {
continue;
}
for (auto it = pool->begin(); it != pool->end(); it++) {
if ((*it) != nullptr) {
total_free_size += (*it)->size;
IncreaseCount(free_block_stat, (*it)->size);
total_free_count++;
}
}
}
for (auto &it : allocated_blocks_) {
if (it.second != nullptr) {
total_using_size += it.second->size;
IncreaseCount(using_block_stat, it.second->size);
total_using_count++;
}
}
for (auto &it : malloced_memory_) {
total_malloc_size += it.first * it.second;
total_malloc_count += it.second;
malloc_block_stat[it.first] = it.second;
}
} while (0);
GEEVENT("Called counts[malloc:%11zu free:%11zu].", called_malloc_counts_.load(), called_free_counts_.load());
PrintCount(malloc_block_stat, "Malloc", total_malloc_size, total_malloc_count);
PrintCount(using_block_stat, "Using", total_using_size, total_using_count);
PrintCount(free_block_stat, "Free", total_free_size, total_free_count);
}
} // namespace ge
| 32.11804 | 114 | 0.654324 | mindspore-ai |
7b343a347a312d83c60063c15f44403f4a925f51 | 22,936 | cpp | C++ | comp477-a1/src/glMain.cpp | aidendeom/comp477-assignment | a2b4d752c40c23416f518047e92c29beaacf5e2c | [
"MIT"
] | null | null | null | comp477-a1/src/glMain.cpp | aidendeom/comp477-assignment | a2b4d752c40c23416f518047e92c29beaacf5e2c | [
"MIT"
] | null | null | null | comp477-a1/src/glMain.cpp | aidendeom/comp477-assignment | a2b4d752c40c23416f518047e92c29beaacf5e2c | [
"MIT"
] | null | null | null | #ifdef __APPLE__
#include <GLUT/glut.h>
#else
#ifdef _WIN32
#include "GL/glut.h"
#else
#include <GL/freeglut.h>
#endif
#endif
#include <iostream>
#include <fstream>
#include <cmath>
#include <cstring>
#include <csignal>
#include <chrono>
#include <functional>
#include <string>
#include <Shlwapi.h>
#include <Windows.h>
#include "skeleton.h"
#include "defMesh.h"
#include "AnimationMode.h"
#include "Quatf.h"
using namespace std;
// Constants
const string ANIMATIONS_PATH{ "resources/animations/" };
const float durationDelta = 0.25f;
const float durationMin = 0.1f;
const float durationMax = 5.0f;
// Function declarations
auto displayInstructions() -> void;
auto nextKeyFrameEdit() -> void;
auto prevKeyFrameEdit() -> void;
auto captureCurrentPose() -> void;
auto nextKeyFramePlayback() -> void;
auto prevKeyFramePlayback() -> void;
auto chooseInterpFunction(char c) -> void;
auto matLerp(const Quatf& from, const Quatf& to, float t) -> Quatf;
auto eurlerAngleLerp(const Quatf& from, const Quatf& to, float t) -> Quatf;
auto increaseSpeed() -> void;
auto decreaseSpeed() -> void;
auto clampSpeed() -> void;
auto saveCurrentAnimation() -> void;
auto loadAnimation() -> void;
auto togglePlayAnimation() -> void;
auto playAnimation() -> void;
auto stopAnimation() -> void;
auto updateCurrentFrame() -> void;
float animDuration = 1.0f;
bool playingAnimation = false;
bool loopAnimation = false;
AnimationMode animationMode{ AnimationMode::Edit };
//Create Mesh
DefMesh myDefMesh;
//Switches
int meshModel=0;
bool drawSkeleton=true;
//Window parameters
int width = 1024;
int height = 768;
///* Ortho (if used) */
double _left = 0.0; /* ortho view volume params */
double _right = 0.0;
double _bottom = 0.0;
double _top = 0.0;
double _zNear = 0.1;
double _zFar = 50.0;
double fovy = 45.0;
double prev_z = 0;
//Model matrices
double _matrix[16];
double _matrixI[16];
/* Mouse Interface */
int _mouseX = 0; /* mouse control variables */
int _mouseY = 0;
bool _mouseLeft = false;
bool _mouseMiddle = false;
bool _mouseRight = false;
double _dragPosX = 0.0;
double _dragPosY = 0.0;
double _dragPosZ = 0.0;
double vlen(double x, double y, double z)
{
return sqrt(x * x + y * y + z * z);
}
float getAngle(Vector2f v1, Vector2f v2)
{
auto l1 = v1.lengthSq();
auto l2 = v2.lengthSq();
// Make sure we don't divide by zero
if (std::abs(l1) < EPSILON || std::abs(l2) < EPSILON)
return 0;
v1.normalize();
v2.normalize();
auto angle = std::acos(Vector2f::dot(v1, v2));
auto orientation = (v1.x * v2.y) - (v2.x * v1.y);
if (orientation > 0)
angle = -angle;
// Radians to degrees
return static_cast<float>(angle * (180.0 / M_PI));
}
Vector3f getEyePosition()
{
auto& m = _matrix;
return -Vector3f
{
static_cast<float>(m[12]),
static_cast<float>(m[13]),
static_cast<float>(m[14])
};
}
Vector3f getEyeDirection()
{
Matrix4d m{ _matrixI };
Vector4d forward{ 0, 0, -1, 0 };
Vector4f f{ m * forward };
return Vector3f{ f.x, f.y, f.z };
}
void invertMatrix(const GLdouble * m, GLdouble * out)
{
/* NB. OpenGL Matrices are COLUMN major. */
#define MAT(m,r,c) (m)[(c)*4+(r)]
/* Here's some shorthand converting standard (row,column) to index. */
#define m11 MAT(m,0,0)
#define m12 MAT(m,0,1)
#define m13 MAT(m,0,2)
#define m14 MAT(m,0,3)
#define m21 MAT(m,1,0)
#define m22 MAT(m,1,1)
#define m23 MAT(m,1,2)
#define m24 MAT(m,1,3)
#define m31 MAT(m,2,0)
#define m32 MAT(m,2,1)
#define m33 MAT(m,2,2)
#define m34 MAT(m,2,3)
#define m41 MAT(m,3,0)
#define m42 MAT(m,3,1)
#define m43 MAT(m,3,2)
#define m44 MAT(m,3,3)
GLdouble det;
GLdouble d12, d13, d23, d24, d34, d41;
GLdouble tmp[16]; /* Allow out == in. */
/* Inverse = adjoint / det. (See linear algebra texts.) */
/* pre-compute 2x2 dets for last two rows when computing */
/* cofactors of first two rows. */
d12 = (m31 * m42 - m41 * m32);
d13 = (m31 * m43 - m41 * m33);
d23 = (m32 * m43 - m42 * m33);
d24 = (m32 * m44 - m42 * m34);
d34 = (m33 * m44 - m43 * m34);
d41 = (m34 * m41 - m44 * m31);
tmp[0] = (m22 * d34 - m23 * d24 + m24 * d23);
tmp[1] = -(m21 * d34 + m23 * d41 + m24 * d13);
tmp[2] = (m21 * d24 + m22 * d41 + m24 * d12);
tmp[3] = -(m21 * d23 - m22 * d13 + m23 * d12);
/* Compute determinant as early as possible using these cofactors. */
det = m11 * tmp[0] + m12 * tmp[1] + m13 * tmp[2] + m14 * tmp[3];
/* Run singularity test. */
if (det == 0.0) {
/* printf("invert_matrix: Warning: Singular matrix.\n"); */
/* memcpy(out,_identity,16*sizeof(double)); */
} else {
GLdouble invDet = 1.0 / det;
/* Compute rest of inverse. */
tmp[0] *= invDet;
tmp[1] *= invDet;
tmp[2] *= invDet;
tmp[3] *= invDet;
tmp[4] = -(m12 * d34 - m13 * d24 + m14 * d23) * invDet;
tmp[5] = (m11 * d34 + m13 * d41 + m14 * d13) * invDet;
tmp[6] = -(m11 * d24 + m12 * d41 + m14 * d12) * invDet;
tmp[7] = (m11 * d23 - m12 * d13 + m13 * d12) * invDet;
/* Pre-compute 2x2 dets for first two rows when computing */
/* cofactors of last two rows. */
d12 = m11 * m22 - m21 * m12;
d13 = m11 * m23 - m21 * m13;
d23 = m12 * m23 - m22 * m13;
d24 = m12 * m24 - m22 * m14;
d34 = m13 * m24 - m23 * m14;
d41 = m14 * m21 - m24 * m11;
tmp[8] = (m42 * d34 - m43 * d24 + m44 * d23) * invDet;
tmp[9] = -(m41 * d34 + m43 * d41 + m44 * d13) * invDet;
tmp[10] = (m41 * d24 + m42 * d41 + m44 * d12) * invDet;
tmp[11] = -(m41 * d23 - m42 * d13 + m43 * d12) * invDet;
tmp[12] = -(m32 * d34 - m33 * d24 + m34 * d23) * invDet;
tmp[13] = (m31 * d34 + m33 * d41 + m34 * d13) * invDet;
tmp[14] = -(m31 * d24 + m32 * d41 + m34 * d12) * invDet;
tmp[15] = (m31 * d23 - m32 * d13 + m33 * d12) * invDet;
memcpy(out, tmp, 16 * sizeof(GLdouble));
}
#undef m11
#undef m12
#undef m13
#undef m14
#undef m21
#undef m22
#undef m23
#undef m24
#undef m31
#undef m32
#undef m33
#undef m34
#undef m41
#undef m42
#undef m43
#undef m44
#undef MAT
}
void screenToWorldPos(double *px, double *py, double *pz, const int x, const int y,
const int *viewport)
{
/*
Use the ortho projection and viewport information
to map from mouse co-ordinates back into world
co-ordinates
*/
*px = (double) (x - viewport[0]) / (double) (viewport[2]);
*py = (double) (y - viewport[1]) / (double) (viewport[3]);
*px = _left + (*px) * (_right - _left);
*py = _top + (*py) * (_bottom - _top);
*pz = _zNear;
}
void getMatrix()
{
glGetDoublev(GL_MODELVIEW_MATRIX, _matrix);
invertMatrix(_matrix, _matrixI);
}
void init()
{
//OpenGL initialize functions goes here
/*glutInitContextVersion(4, 2);
glutInitContextProfile(GLUT_CORE_PROFILE);
glutInitContextFlags(GLUT_DEBUG);
std::cout<<"Vendor: "<<glGetString(GL_VENDOR)<<std::endl;
std::cout<<"Version: "<<glGetString(GL_VERSION)<<std::endl;
std::cout<<"GLSL: "<<glGetString(GL_SHADING_LANGUAGE_VERSION)<<std::endl;*/
//Light values and coordinates
GLfloat ambientLight[] = { 0.3f, 0.3f, 0.3f, 1.0f };
GLfloat diffuseLight[] = { 0.7f, 0.7f, 0.7f, 1.0f };
GLfloat lightPos[] = {20.0f, 20.0f, 50.0f};
glEnable(GL_DEPTH_TEST);
glFrontFace(GL_CCW);
//glEnable(GL_CULL_FACE);
glPolygonMode(GL_FRONT_AND_BACK, GL_FILL);
// Hidden surface removal // Counterclockwise polygons face out // Do not calculate inside of jet // Enable lighting
glEnable(GL_LIGHTING);
// Set up and enable light 0
glLightfv(GL_LIGHT0,GL_AMBIENT,ambientLight);
glLightfv(GL_LIGHT0,GL_DIFFUSE,diffuseLight);
glEnable(GL_LIGHT0);
// Enable color tracking
glEnable(GL_COLOR_MATERIAL);
// Set material properties to follow glColor values
glColorMaterial(GL_FRONT, GL_AMBIENT_AND_DIFFUSE);
glClearColor(0.2f, 0.2f, 0.2f, 3.0f );
//Rescale normals to unit length
glEnable(GL_NORMALIZE);
glLightfv(GL_LIGHT0,GL_POSITION,lightPos);
glShadeModel(GL_FLAT);
getMatrix(); //Init matrix
//Translate camera
glPushMatrix();
glLoadIdentity();
glTranslatef(0,0,-5.0);
glMultMatrixd(_matrix);
getMatrix();
glPopMatrix();
}
void changeSize(int w, int h)
{
//GLfloat aspectRatio;
//if(h==0)
// h = 1;
//glViewport(0, 0, w, h);
//glMatrixMode(GL_PROJECTION);
//glLoadIdentity();
//aspectRatio = (GLfloat)w / (GLfloat)h;
//gluPerspective(45.0f, aspectRatio, 1.0f, 900.0f); //using perspective
//
//glMatrixMode(GL_MODELVIEW);
//glLoadIdentity();
glViewport(0, 0, w, h);
_top = 1.0;
_bottom = -1.0;
_left = -(double) w / (double) h;
_right = -_left;
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
/* glOrtho(_left,_right,_bottom,_top,_zNear,_zFar); Ortho */
gluPerspective(fovy, (double) w / (double) h, _zNear, _zFar); /* PErspective for stereo */
glMatrixMode(GL_MODELVIEW);
}
void timerFunction(int value)
{
glutTimerFunc(10,timerFunction,1);
glutPostRedisplay();
}
void handleKeyPress(unsigned char key, int x, int y)
{
int i{ 0 }; // have to init here, or compiler will complain
switch(key)
{
case 'n':
i = static_cast<int>(animationMode);
i++;
i = i % static_cast<int>(AnimationMode::Count);
animationMode = static_cast<AnimationMode>(i);
cout << getAnimationModeString(animationMode) << " mode activated" << endl;
break;
case '=':
if (animationMode == AnimationMode::Edit)
nextKeyFrameEdit();
else if (animationMode == AnimationMode::Playback)
nextKeyFramePlayback();
break;
case '-':
if (animationMode == AnimationMode::Edit)
prevKeyFrameEdit();
else if (animationMode == AnimationMode::Playback)
prevKeyFramePlayback();
break;
case 13: // Enter key
if (animationMode == AnimationMode::Edit)
captureCurrentPose();
break;
case 'j':
decreaseSpeed();
break;
case 'k':
increaseSpeed();
break;
case '1':
case '2':
case '3':
case '4':
chooseInterpFunction(key);
break;
case 's':
saveCurrentAnimation();
break;
case 'l':
loadAnimation();
break;
case 'p':
togglePlayAnimation();
break;
case 'o':
loopAnimation = !loopAnimation;
cout << "Loop animation: " << (loopAnimation ? "true" : "false") << endl;
break;
case 'm':
meshModel = (meshModel + 1) % 3; break;
case 27: // ESC key
case 'q':
exit(0);
break;
//default:
//cout << key << endl;
}
}
auto nextKeyFrameEdit() -> void
{
// Alias to the list of keyframes
auto& frames = myDefMesh.mySkeleton.animation.keyframes;
auto& idx = myDefMesh.mySkeleton.currentFrameIdx;
// It's at the last, add a new frame
if (idx == frames.size() - 1)
{
idx++;
cout << "Adding new key frame " << idx + 1 << endl;
auto frameCopy = frames.back();
frames.push_back(frameCopy);
}
else
{
idx++;
auto& f = frames[idx];
myDefMesh.mySkeleton.setPose(f);
cout << "Frame " << idx + 1 << "/" << frames.size() << endl;
}
}
auto prevKeyFrameEdit() -> void
{
auto& frames = myDefMesh.mySkeleton.animation.keyframes;
auto& idx = myDefMesh.mySkeleton.currentFrameIdx;
if (idx > 0)
{
idx--;
auto& f = frames[idx];
myDefMesh.mySkeleton.setPose(f);
cout << "Frame " << idx + 1 << "/" << frames.size() << endl;
}
}
auto captureCurrentPose() -> void
{
auto& skel = myDefMesh.mySkeleton;
auto& anim = skel.animation;
auto& idx = skel.currentFrameIdx;
auto& frame = anim.keyframes[idx];
frame.capture(skel);
cout << (idx + 1) << "/" << anim.keyframes.size() << " captured" << endl;
}
auto nextKeyFramePlayback() -> void
{
auto& frames = myDefMesh.mySkeleton.animation.keyframes;
auto& idx = myDefMesh.mySkeleton.currentFrameIdx;
if (idx < frames.size() - 1)
{
auto& skel = myDefMesh.mySkeleton;
skel.from = &frames[idx];
idx++;
skel.to = &frames[idx];
skel.time = 0;
skel.duration = animDuration;
}
}
auto prevKeyFramePlayback() -> void
{
auto& frames = myDefMesh.mySkeleton.animation.keyframes;
auto& idx = myDefMesh.mySkeleton.currentFrameIdx;
if (idx > 0)
{
auto& skel = myDefMesh.mySkeleton;
skel.from = &frames[idx];
idx--;
skel.to = &frames[idx];
skel.time = 0;
skel.duration = animDuration;
}
}
auto increaseSpeed() -> void
{
animDuration -= durationDelta;
clampSpeed();
}
auto decreaseSpeed() -> void
{
animDuration += durationDelta;
clampSpeed();
}
auto clampSpeed() -> void
{
animDuration = max(durationMin, min(animDuration, durationMax));
cout << "Duration is now " << animDuration << endl;
}
auto saveCurrentAnimation() -> void
{
string filename;
cout << "== SAVE ==\n"
<< "Enter filename: ";
cin >> filename;
string filepath = ANIMATIONS_PATH + filename;
wstring wfilepath{ begin(filepath), end(filepath) };
BOOL exists = PathFileExists(wfilepath.c_str());
while (exists)
{
cout << "That file already exists.\nChoose a different name: ";
cin >> filename;
filepath = ANIMATIONS_PATH + filename;
wfilepath = wstring{ begin(filepath), end(filepath) };
exists = PathFileExists(wfilepath.c_str());
}
myDefMesh.mySkeleton.animation.saveToFile(filepath);
cout << "Animation saved to " << filepath << endl;
}
auto loadAnimation() -> void
{
string filename;
cout << "== LOAD ==\n"
<< "Enter filename: ";
cin >> filename;
string filepath = ANIMATIONS_PATH + filename;
wstring wfilepath{ begin(filepath), end(filepath) };
BOOL exists = PathFileExists(wfilepath.c_str());
while (!exists)
{
cout << "That file does not exists.\nEnter filename: ";
cin >> filename;
filepath = ANIMATIONS_PATH + filename;
wfilepath = wstring{ begin(filepath), end(filepath) };
exists = PathFileExists(wfilepath.c_str());
}
Animation anim = Animation::loadFromFile(filepath);
auto& skel = myDefMesh.mySkeleton;
skel.animation = anim;
skel.currentFrameIdx = 0;
skel.resetAnimParams();
cout << "Animation " << filename << " loaded successfully" << endl;
}
auto togglePlayAnimation() -> void
{
if (animationMode != AnimationMode::Playback)
return;
if (!playingAnimation)
playAnimation();
else
stopAnimation();
}
auto playAnimation() -> void
{
auto& skel = myDefMesh.mySkeleton;
skel.currentFrameIdx = 0;
playingAnimation = true;
cout << "Playing animation" << endl;
}
auto stopAnimation() -> void
{
playingAnimation = false;
cout << "Stopping animation" << endl;
}
auto updateCurrentFrame() -> void
{
if (!playingAnimation)
return;
auto& skel = myDefMesh.mySkeleton;
// It's time to play next frame
if (skel.from == nullptr && skel.to == nullptr)
nextKeyFramePlayback();
if (skel.currentFrameIdx >= skel.animation.keyframes.size() - 1
&& skel.from == nullptr && skel.to == nullptr)
{
if (loopAnimation)
{
auto& frames = skel.animation.keyframes;
auto& idx = skel.currentFrameIdx;
skel.from = &frames[idx];
idx = 0;
skel.to = &frames[idx];
skel.time = 0;
skel.duration = animDuration;
skel.currentFrameIdx = 0;
}
else
stopAnimation();
}
}
auto chooseInterpFunction(char c) -> void
{
auto& f = myDefMesh.mySkeleton.interpFunction;
string msg;
switch (c)
{
case '1':
msg = "Using matrix lerp";
f = matLerp;
break;
case '2':
msg = "Using euler angle lerp";
f = eurlerAngleLerp;
break;
case '3':
f = Quatf::lerp;
msg = "Using lerp";
break;
case '4':
f = Quatf::slerp;
msg = "Using slerp";
break;
}
cout << msg << endl;
}
auto matLerp(const Quatf& from, const Quatf& to, float t) -> Quatf
{
t = max(0.0f, min(t, 1.0f));
float t2 = 1 - t;
const auto mf = from.mat4();
const auto mt = to.mat4();
auto res = mf * t2 + mt * t;
return Quatf::fromMat4(res);
}
auto eurlerAngleLerp(const Quatf& from, const Quatf& to, float t) -> Quatf
{
t = max(0.0f, min(t, 1.0f));
float t2 = 1 - t;
const auto ef = from.toEulerAngles();
const auto et = to.toEulerAngles();
auto res = ef * t2 + et * t;
return Quatf::fromEulerAngles(res);
}
void mouseEvent(int button, int state, int x, int y)
{
int viewport[4];
_mouseX = x;
_mouseY = y;
if (state == GLUT_UP)
switch (button) {
case GLUT_LEFT_BUTTON:
if (myDefMesh.mySkeleton.hasJointSelected)
captureCurrentPose();
myDefMesh.mySkeleton.release();
_mouseLeft = false;
break;
case GLUT_MIDDLE_BUTTON:
_mouseMiddle = false;
break;
case GLUT_RIGHT_BUTTON:
_mouseRight = false;
break;
} else
switch (button) {
case GLUT_LEFT_BUTTON:
myDefMesh.mySkeleton.selectOrReleaseJoint();
_mouseLeft = true;
break;
case GLUT_MIDDLE_BUTTON:
_mouseMiddle = true;
break;
case GLUT_RIGHT_BUTTON:
_mouseRight = true;
break;
case 4: //Zoomout
glLoadIdentity();
glTranslatef(0.0f, 0.0f, -0.1f);
glMultMatrixd(_matrix);
getMatrix();
glutPostRedisplay();
break;
case 3: //Zoomin
glLoadIdentity();
glTranslatef(0.0f,0.0f,0.1f);
glMultMatrixd(_matrix);
getMatrix();
glutPostRedisplay();
break;
default:
break;
//std::cout<<button<<std::endl;
}
glGetIntegerv(GL_VIEWPORT, viewport);
screenToWorldPos(&_dragPosX, &_dragPosY, &_dragPosZ, x, y, viewport);
}
void mousePassiveFunc(int x, int y)
{
myDefMesh.mySkeleton.checkHoveringStatus(x, y);
}
void mouseMoveEvent(int x, int y)
{
const int dx = x - _mouseX;
const int dy = y - _mouseY;
int viewport[4];
glGetIntegerv(GL_VIEWPORT, viewport);
if (!myDefMesh.mySkeleton.hasJointSelected)
{
bool changed = false;
if (dx == 0 && dy == 0)
return;
if (_mouseMiddle || (_mouseLeft && _mouseRight)) {
/* double s = exp((double)dy*0.01); */
/* glScalef(s,s,s); */
/* if(abs(prev_z) <= 1.0) */
glLoadIdentity();
glTranslatef(0.0f, 0.0f, dy * 0.01f);
glMultMatrixd(_matrix);
changed = true;
} else if (_mouseLeft) {
double ax, ay, az;
double bx, by, bz;
double angle;
ax = dy;
ay = dx;
az = 0.0;
angle = vlen(ax, ay, az) / (double) (viewport[2] + 1) * 180.0;
/* Use inverse matrix to determine local axis of rotation */
bx = _matrixI[0] * ax + _matrixI[4] * ay + _matrixI[8] * az;
by = _matrixI[1] * ax + _matrixI[5] * ay + _matrixI[9] * az;
bz = _matrixI[2] * ax + _matrixI[6] * ay + _matrixI[10] * az;
glRotated(angle, bx, by, bz);
changed = true;
} else if (_mouseRight) {
double px, py, pz;
screenToWorldPos(&px, &py, &pz, x, y, viewport);
glLoadIdentity();
glTranslated(px - _dragPosX, py - _dragPosY, pz - _dragPosZ);
glMultMatrixd(_matrix);
_dragPosX = px;
_dragPosY = py;
_dragPosZ = pz;
changed = true;
}
_mouseX = x;
_mouseY = y;
if (changed) {
getMatrix();
glutPostRedisplay();
}
}
/*
* Do joint jobs
*/
else if (animationMode == AnimationMode::Edit)
{
Joint* selectedJoint = myDefMesh.mySkeleton.getSelectedJoint();
// Leave if there is no selected joint or if it is the root
// Might be fun to translate the model if the root is selected
if (selectedJoint == nullptr || selectedJoint->transform.getParent() == nullptr)
return;
auto& j = *selectedJoint;
auto& p = *j.transform.getParent()->getJoint();
Vector2i mousePos{ x, y };
Vector2f v1 = mousePos - p.screenCoord;
Vector2f v2 = j.screenCoord - p.screenCoord;
float angle = getAngle(v1, v2);
Quatf rot = Quatf::angleAxis(angle, getEyeDirection());
auto newRot = rot * j.transform.getLocalRotation();
// Set the delta for this frame
//j.delta.setLocalRotation(rot);
//j.setDelta(true);
j.transform.setLocalRotation(newRot);
}
}
// Timer stuff
using fast_clock = std::chrono::high_resolution_clock;
using time_unit = std::chrono::milliseconds;
auto t1 = fast_clock::now();
auto t2 = t1;
void display()
{
auto diff = t2 - t1;
auto delta = std::chrono::duration_cast<time_unit>(diff).count();
auto deltaSeconds = delta / 1000.0f;
t1 = fast_clock::now();
updateCurrentFrame();
myDefMesh.mySkeleton.updateAnimation(deltaSeconds);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glLoadIdentity();
glMultMatrixd(_matrix);
//draw terrain
//glColor3f(0.5,0.5,0.5);
glPushMatrix();
glColor3f(0.7f, 0.7f, 0.7f);
glBegin(GL_QUADS);
glVertex3f(-3.0f, -0.85f, 3.0f);
glVertex3f(3.0f, -0.85f, 3.0f);
glVertex3f(3.0f, -0.85f, -3.0f);
glVertex3f(-3.0f, -0.85f, -3.0f);
glEnd();
glPopMatrix();
glPushMatrix();
myDefMesh.glDraw(meshModel);
myDefMesh.resetSkeletonDeltas();
glPopMatrix();
// Drawing reference coord system
const int length = 3;
const GLfloat r[]{ 1, 0, 0, 1 };
const GLfloat g[]{ 0, 1, 0, 1 };
const GLfloat b[]{ 0, 0, 1, 1 };
glLineWidth(5);
glBegin(GL_LINES);
glColor4fv(r);
glVertex3i(0, 0, 0);
glVertex3i(length, 0, 0);
glColor4fv(g);
glVertex3i(0, 0, 0);
glVertex3i(0, length, 0);
glColor4fv(b);
glVertex3i(0, 0, 0);
glVertex3i(0, 0, length);
glEnd();
glutSwapBuffers();
t2 = fast_clock::now();
}
auto displayInstructions() -> void
{
cout << "=== INSTRUCTIONS ===" << endl
<< "=: Increment current frame index" << endl
<< "-: Decrement current frame index" << endl
<< "Enter: Capture current pose and save in current frame" << endl
<< "n: Switch between Edit/Playback mode" << endl
<< "p: Play current animation" << endl
<< "o: Allow animation to loop" << endl
<< "m: Switch redering modes" << endl
<< "1: Use slerp" << endl
<< "2: Use lerp" << endl
<< "3: Use lerp with matrix" << endl
<< "4: Use lerp with euler angles" << endl;
cout << "Starting in Edit Mode" << endl;
cout << "Starting with slerp" << endl;
}
int main(int argc, char **argv)
{
glutInit(&argc, argv);
//Print context info
glutInitDisplayMode(GLUT_DOUBLE | GLUT_RGB | GLUT_DEPTH); //double buffer
glutInitWindowSize(width, height);
glutInitWindowPosition(0, 0);
glutCreateWindow("COMP477");
glutDisplayFunc(display);
glutReshapeFunc(changeSize);
glutTimerFunc(10, timerFunction, 1);
glutMouseFunc(mouseEvent);
glutMotionFunc(mouseMoveEvent);
glutKeyboardFunc(handleKeyPress);
glutPassiveMotionFunc(mousePassiveFunc);
displayInstructions();
if (argc > 1)
{
cout << "Attempting to load animation argument" << endl;
char* arg1 = argv[0];
string filename{ arg1 };
string filepath = ANIMATIONS_PATH + filename;
wstring wfilepath{ begin(filepath), end(filepath) };
BOOL exists = PathFileExists(wfilepath.c_str());
if (!exists)
{
Animation anim = Animation::loadFromFile(filepath);
auto& skel = myDefMesh.mySkeleton;
skel.animation = anim;
skel.currentFrameIdx = 0;
skel.resetAnimParams();
cout << "Animation " << filename << " loaded successfully" << endl;
}
else
{
cout << "Tried running with load command on file that does not exist" << endl;
}
}
init();
glutMainLoop();
//delete something
return 0;
}
| 23.427988 | 121 | 0.635943 | aidendeom |
7b38e38f837413b6622534a45a79f92246d736f0 | 18,513 | cpp | C++ | test/pt-client/test_pt_client.cpp | costanic/mbed-edge | 4900e950ff67f8974b7aeef955289ef56606c964 | [
"Apache-2.0"
] | 24 | 2018-03-27T16:44:18.000Z | 2020-04-28T15:28:34.000Z | test/pt-client/test_pt_client.cpp | costanic/mbed-edge | 4900e950ff67f8974b7aeef955289ef56606c964 | [
"Apache-2.0"
] | 19 | 2021-01-28T20:14:45.000Z | 2021-11-23T13:08:59.000Z | test/pt-client/test_pt_client.cpp | costanic/mbed-edge | 4900e950ff67f8974b7aeef955289ef56606c964 | [
"Apache-2.0"
] | 28 | 2018-04-02T02:36:48.000Z | 2020-10-13T05:37:16.000Z | #include "CppUTest/TestHarness.h"
#include "CppUTestExt/MockSupport.h"
#include "cpputest-custom-types/value_pointer.h"
extern "C" {
#include "jansson.h"
#include <string.h>
#include <event2/event.h>
#include <event2/bufferevent.h>
#include "pt-client/pt_api.h"
#include "pt-client/pt_api_internal.h"
#include "test-lib/evbase_mock.h"
#include "pt-client/client.h"
#include "common/edge_mutex.h"
#include "common/default_message_id_generator.h"
#include "common/websocket_comm.h"
#include "libwebsockets.h"
#include "libwebsocket-mock/lws_mock.h"
#include "cpputest-custom-types/my_json_frame.h"
#include "test-lib/json_helper.h"
#include "common/edge_trace.h"
}
#define EXPECTED_EDGE_PROTOCOL_API_VERSION "/1/pt"
int dummy_callback(struct lws *wsi, enum lws_callback_reasons reason, void *user, void *in, size_t len)
{
return 0;
}
struct interrupt_parameter;
typedef void (*test_func_t)(struct interrupt_parameter *);
struct interrupt_parameter {
struct event_base *base;
struct connection **connection;
void *userdata;
short events;
const char *socket_path;
test_func_t test_func;
pthread_t *tester_thread;
bool connection_fails;
bool expect_evthread_use_pthreads_failure;
bool expect_cb_failure;
};
static struct interrupt_parameter *create_interrupt_parameter(struct event_base *base)
{
struct interrupt_parameter *parameter =
(struct interrupt_parameter *) calloc(1, sizeof(struct interrupt_parameter));
parameter->base = base;
parameter->connection = (struct connection**) calloc(1, sizeof(struct connection*));
return parameter;
}
TEST_GROUP(pt_client) {
void setup()
{
}
void teardown()
{
}
};
static void expect_mutex_init_deinit()
{
mock().expectOneCall("edge_mutex_init")
.withPointerParameter("mutex", (void *) &rpc_mutex)
.withUnsignedIntParameter("type", PTHREAD_MUTEX_ERRORCHECK)
.andReturnValue(0);
mock().expectOneCall("edge_mutex_destroy").withPointerParameter("mutex", (void *) &rpc_mutex).andReturnValue(0);
}
static void expect_mutexing()
{
mock().expectOneCall("edge_mutex_lock").withPointerParameter("mutex", (void *) &rpc_mutex).andReturnValue(0);
mock().expectOneCall("edge_mutex_unlock").withPointerParameter("mutex", (void *) &rpc_mutex).andReturnValue(0);
}
void test_connection_ready_handler(struct connection *connection, void* userdata)
{
mock()
.actualCall("test_connection_ready_handler")
.withPointerParameter("connection", connection)
.withPointerParameter("userdata", userdata);
}
void disconnected_handler(struct connection *connection, void *userdata)
{
mock().actualCall("disconnected_handler");
}
void shutdown_handler(struct connection **connection, void *userdata)
{
mock().actualCall("shutdown_handler")
.withPointerParameter("connection", (void *) connection)
.withPointerParameter("userdata", (void *) userdata);
}
static int write_handler(struct connection *connection,
const char *device_id, const uint16_t object_id,
const uint16_t instance_id,
const uint16_t resource_id,
const unsigned int operation,
const uint8_t *value, const uint32_t value_size,
void *userdata)
{
ValuePointer *value_pointer = new ValuePointer((uint8_t *) value, value_size);
int ret_val = mock().actualCall("write_handler")
.withPointerParameter("connnection", connection)
.withStringParameter("device_id", device_id)
.withIntParameter("object_id", object_id)
.withIntParameter("instance_id", instance_id)
.withIntParameter("resource_id", resource_id)
.withIntParameter("operation", operation)
.withParameterOfType("ValuePointer", "value", (void *) value_pointer)
.withLongIntParameter("value_size", value_size)
.withPointerParameter("userdata", userdata)
.returnIntValue();
delete value_pointer;
return ret_val;
}
static void init_pt_cbs_to_null(protocol_translator_callbacks_t *pt_cbs)
{
pt_cbs->connection_ready_cb = NULL;
pt_cbs->received_write_cb = NULL;
pt_cbs->connection_shutdown_cb = NULL;
}
static void init_pt_cbs(protocol_translator_callbacks_t *pt_cbs)
{
pt_cbs->connection_ready_cb = test_connection_ready_handler;
pt_cbs->received_write_cb = write_handler;
pt_cbs->connection_shutdown_cb = shutdown_handler;
pt_cbs->disconnected_cb = disconnected_handler;
}
static bool close_condition_check_no_retries(bool close_client) {
return 1; // Always close
}
static void start_client(struct interrupt_parameter *parameter,
protocol_translator_callbacks_t *pt_cbs,
int dispatch_return_value)
{
char *name = (char*) "example_client";
pt_init_check_close_condition_function(close_condition_check_no_retries);
int evthread_use_pthreads_return_value = parameter->expect_evthread_use_pthreads_failure ? -1 : 0;
mock().expectOneCall("evthread_use_pthreads").andReturnValue(evthread_use_pthreads_return_value);
if(evthread_use_pthreads_return_value != -1) {
mock().expectOneCall("event_base_new")
.andReturnValue(parameter->base);
}
if (!parameter->expect_evthread_use_pthreads_failure && !parameter->expect_cb_failure) {
expect_mutexing();
if (parameter->base) {
mock().expectOneCall("lws_set_log_level");
mock().expectOneCall("lws_create_context");
if (parameter->connection_fails) {
lws_mock_setup_connection_failure();
}
mock().expectOneCall("lws_client_connect_via_info")
.withStringParameter("path", EXPECTED_EDGE_PROTOCOL_API_VERSION);
if (parameter->connection_fails) {
mock().expectOneCall("disconnected_handler");
mock().expectOneCall("event_base_loopbreak").withPointerParameter("base", (void *) parameter->base);
mock().expectOneCall("shutdown_handler")
.withPointerParameter("connection", parameter->connection)
.withPointerParameter("userdata", parameter->userdata);
} else {
mock().expectOneCall("event_base_dispatch")
.withPointerParameter("base", parameter->base)
.andReturnValue(dispatch_return_value);
if (dispatch_return_value != -1) {
mock().expectOneCall("event_base_loopbreak").withPointerParameter("base", (void *) parameter->base);
mock().expectOneCall("shutdown_handler")
.withPointerParameter("connection", parameter->connection)
.withPointerParameter("userdata", parameter->userdata);
}
}
mock().expectOneCall("lws_context_destroy");
}
}
mock().expectOneCall("event_base_free")
.withPointerParameter("base", parameter->base);
mock().expectOneCall("libevent_global_shutdown");
pt_client_start(parameter->socket_path, name, pt_cbs, parameter->userdata, parameter->connection);
}
TEST(pt_client, test_initialize_and_destroy_trace_api)
{
mock().expectOneCall("edge_mutex_init")
.withPointerParameter("mutex", &trace_mutex)
.withIntParameter("type", PTHREAD_MUTEX_RECURSIVE)
.andReturnValue(0);
edge_trace_init(1);
mock().checkExpectations();
mock().expectOneCall("edge_mutex_destroy").withPointerParameter("mutex", &trace_mutex).andReturnValue(0);
edge_trace_destroy();
mock().checkExpectations();
}
static void *client_shutdown_test_thread(void * param)
{
struct interrupt_parameter *parameter = (struct interrupt_parameter *) param;
struct event_base *base = parameter->base;
evbase_mock_wait_until_event_loop(base);
struct lws *wsi = lws_mock_get_wsi();
websocket_connection_t *connection = (websocket_connection_t *) wsi->userdata;
struct connection *conn = connection->conn;
mock().expectOneCall("test_connection_ready_handler")
.withPointerParameter("connection", conn)
.withPointerParameter("userdata", conn->userdata);
lws_mock_connection_established(lws_mock_get_wsi(), LWS_CALLBACK_CLIENT_ESTABLISHED);
if (parameter->test_func) {
parameter->test_func(parameter);
}
mock().expectOneCall("disconnected_handler");
lws_mock_connection_closed(lws_mock_get_wsi());
pt_client_shutdown(*(parameter->connection));
return NULL;
}
static void clean_interrupt_thread_and_parameter(struct interrupt_parameter *parameter)
{
if (parameter->base && parameter->base->event_loop_wait_simulation && parameter->tester_thread) {
evbase_mock_release_interrupt_thread(parameter->base);
pthread_join(*(parameter->tester_thread), NULL);
}
if (parameter->base) {
if (parameter->base->event_loop_wait_simulation) {
evbase_mock_release_interrupt_thread(parameter->base);
}
evbase_mock_delete(parameter->base);
}
free(parameter->tester_thread);
free(*parameter->connection);
free(parameter->connection);
free(parameter);
}
static void test_start_and_shutdown_variant_common(const char *socket_path,
bool connection_fails,
test_func_t test_func)
{
struct event_base *base = evbase_mock_new();
struct interrupt_parameter *parameter = create_interrupt_parameter(base);
parameter->connection_fails = connection_fails;
parameter->socket_path = socket_path;
parameter->test_func = test_func;
expect_mutex_init_deinit();
protocol_translator_callbacks_t pt_cbs;
init_pt_cbs(&pt_cbs);
if (!connection_fails) {
evbase_mock_setup_event_loop_wait(base);
parameter->tester_thread = (pthread_t *) calloc(1, sizeof(pthread_t));
pthread_create(parameter->tester_thread, NULL, client_shutdown_test_thread, (void *) parameter);
}
start_client(parameter, &pt_cbs, 0);
clean_interrupt_thread_and_parameter(parameter);
}
static void test_start_and_shutdown_variant(const char *socket_path, bool connection_fails)
{
test_start_and_shutdown_variant_common(socket_path, connection_fails, NULL);
}
static void test_start_and_shutdown_variant_with_test_func(const char *socket_path, test_func_t test_func)
{
test_start_and_shutdown_variant_common(socket_path, false, test_func);
}
static void test_callback_combination(pt_connection_ready_cb connection_ready_cb,
pt_received_write_handler received_write_cb,
pt_connection_shutdown_cb connection_shutdown_cb)
{
struct interrupt_parameter *parameter = create_interrupt_parameter(NULL);
parameter->expect_cb_failure = true;
expect_mutex_init_deinit();
expect_mutexing();
protocol_translator_callbacks_t pt_cbs;
pt_cbs.connection_ready_cb = connection_ready_cb;
pt_cbs.received_write_cb = received_write_cb;
pt_cbs.connection_shutdown_cb = connection_shutdown_cb;
start_client(parameter, &pt_cbs, -1);
clean_interrupt_thread_and_parameter(parameter);
}
TEST(pt_client, test_start_client_and_shutdown)
{
test_start_and_shutdown_variant(NULL, false);
mock().checkExpectations();
}
TEST(pt_client, test_start_client_and_shutdown_with_localhost)
{
test_start_and_shutdown_variant("default-pt-socket", false);
mock().checkExpectations();
}
static void test_lws_client_receive_invalid(struct interrupt_parameter *parameter)
{
(void) parameter;
unsigned char *data = (unsigned char *) "{ \"jsonrpc\": \"2.0\", \"method\": \"subtract\", \"id\" : \"1\" }";
size_t len = 54;
const char *expected_response =
"{\"error\":{\"code\":-32601,\"message\":\"Method not found\"},\"id\":\"1\",\"jsonrpc\":\"2.0\"}";
int32_t response_len = 79;
MyJsonFrameComparator comparator;
mock().installComparator("MyJsonFrame", comparator);
mock().expectOneCall("lws_remaining_packet_payload").andReturnValue(0);
mock().expectOneCall("lws_callback_on_writable").andReturnValue(0);
MyJsonFrame *frame = new MyJsonFrame((const char *) expected_response, response_len);
mock().expectOneCall("lws_write").withParameterOfType("MyJsonFrame", "buf", (const void *) frame).andReturnValue(0);
lws_mock_callback_client_receive(data, len, 0);
delete frame;
}
TEST(pt_client, test_lws_client_receive_callback_invalid_message)
{
test_start_and_shutdown_variant_with_test_func("default-pt-socket", test_lws_client_receive_invalid);
mock().checkExpectations();
}
static void test_lws_client_receive_protocol_error(struct interrupt_parameter *parameter)
{
(void) parameter;
// Note: following is not valid json.
unsigned char *data = (unsigned char *) "[ \"jsonrpc\": \"2.0\", \"method\": \"subtract\", \"id\" : \"1\" }";
size_t len = 54;
mock().expectOneCall("lws_remaining_packet_payload").andReturnValue(0);
mock().expectOneCall("lws_callback_on_writable").andReturnValue(-1);
mock().expectOneCall("lws_close_reason");
lws_mock_callback_client_receive(data, len, 0);
}
TEST(pt_client, test_lws_client_receive_callback_protocol_error)
{
test_start_and_shutdown_variant_with_test_func("default-pt-socket", test_lws_client_receive_protocol_error);
mock().checkExpectations();
}
static void test_lws_client_receive_valid(struct interrupt_parameter *parameter)
{
#define TEST_WRITE_FROM_EDGE_CORE TEST_DATA_DIR "/write_from_edge_core_test.json"
// Load device registration jsonrpc parameters structure from file
json_t *request = load_json_params(TEST_WRITE_FROM_EDGE_CORE);
// Build device registration jsonrpc structure
unsigned char *data = (unsigned char *) json_dumps(request, JSON_COMPACT);
size_t len = strlen((char *) data);
const char *expected_response = "{\"id\":\"1234567890\",\"jsonrpc\":\"2.0\",\"result\":\"ok\"}";
int32_t response_len = 49;
MyJsonFrameComparator comparator;
mock().installComparator("MyJsonFrame", comparator);
mock().expectOneCall("lws_remaining_packet_payload").andReturnValue(0);
mock().expectOneCall("lws_callback_on_writable").andReturnValue(0);
struct connection *connection = *parameter->connection;
ValuePointer *value_pointer = new ValuePointer((const uint8_t *) "@(=p\243\327\n=", 8);
mock().expectOneCall("write_handler")
.withPointerParameter("connnection", connection)
.withStringParameter("device_id", "device-id-1")
.withIntParameter("object_id", 3306)
.withIntParameter("instance_id", 0)
.withIntParameter("resource_id", 5700)
.withIntParameter("operation", 2)
.withParameterOfType("ValuePointer", "value", value_pointer)
.withLongIntParameter("value_size", 8)
.withPointerParameter("userdata", 0)
.andReturnValue(0);
MyJsonFrame *response_frame = new MyJsonFrame((const char *) expected_response, response_len);
mock().expectOneCall("lws_write")
.withParameterOfType("MyJsonFrame", "buf", (const void *) response_frame)
.andReturnValue(0);
lws_mock_callback_client_receive(data, len, 0);
free(data);
json_decref(request);
delete response_frame;
delete value_pointer;
}
TEST(pt_client, test_lws_client_receive_callback_valid_message)
{
test_start_and_shutdown_variant_with_test_func("default-pt-socket", test_lws_client_receive_valid);
mock().checkExpectations();
}
TEST(pt_client, test_lws_connection_fails)
{
test_start_and_shutdown_variant_common("default-pt-socket", true /* connection fails */, NULL);
mock().checkExpectations();
}
TEST(pt_client, test_start_client_libevent_configuration_fails)
{
struct interrupt_parameter *parameter = create_interrupt_parameter(NULL);
parameter->expect_evthread_use_pthreads_failure = -1;
expect_mutex_init_deinit();
protocol_translator_callbacks_t pt_cbs;
init_pt_cbs_to_null(&pt_cbs);
expect_mutexing();
start_client(parameter, &pt_cbs, -1);
clean_interrupt_thread_and_parameter(parameter);
mock().checkExpectations();
}
TEST(pt_client, test_start_client_incorrect_protocol_translator_callbacks)
{
test_callback_combination(NULL, NULL, NULL);
test_callback_combination(NULL, NULL, shutdown_handler);
test_callback_combination(NULL, write_handler, NULL);
test_callback_combination(NULL, write_handler, shutdown_handler);
test_callback_combination(test_connection_ready_handler, NULL, NULL);
test_callback_combination(test_connection_ready_handler, NULL, shutdown_handler);
test_callback_combination(test_connection_ready_handler, write_handler, NULL);
mock().checkExpectations();
}
TEST(pt_client, test_start_client_and_shutdown_base_allocation_fails)
{
struct interrupt_parameter *parameter = create_interrupt_parameter(NULL);
expect_mutex_init_deinit();
protocol_translator_callbacks_t pt_cbs;
init_pt_cbs(&pt_cbs);
start_client(parameter, &pt_cbs, -1);
clean_interrupt_thread_and_parameter(parameter);
mock().checkExpectations();
}
TEST(pt_client, test_start_client_and_shutdown_with_failing_dispatch)
{
struct event_base *base = evbase_mock_new();
struct interrupt_parameter *parameter = create_interrupt_parameter(base);
expect_mutex_init_deinit();
mock().expectOneCall("shutdown_handler")
.withPointerParameter("connection", parameter->connection)
.withPointerParameter("userdata", parameter->userdata);
protocol_translator_callbacks_t pt_cbs;
init_pt_cbs(&pt_cbs);
start_client(parameter, &pt_cbs, -1);
clean_interrupt_thread_and_parameter(parameter);
mock().checkExpectations();
}
TEST(pt_client, test_setting_message_id_generator)
{
pt_client_set_msg_id_generator(edge_default_generate_msg_id);
mock().checkExpectations();
}
TEST(pt_client, test_setting_default_message_id_generator)
{
pt_client_set_msg_id_generator(NULL);
mock().checkExpectations();
}
| 39.056962 | 120 | 0.708097 | costanic |
7b39815fa0ad8c240b0c8acbde068a8017b38d88 | 667 | cpp | C++ | OJ/1423/src/main.cpp | RabitDash/practice | 2becb4367d0fa7d6ad0226dfa9b67765b5c59af9 | [
"MIT"
] | null | null | null | OJ/1423/src/main.cpp | RabitDash/practice | 2becb4367d0fa7d6ad0226dfa9b67765b5c59af9 | [
"MIT"
] | null | null | null | OJ/1423/src/main.cpp | RabitDash/practice | 2becb4367d0fa7d6ad0226dfa9b67765b5c59af9 | [
"MIT"
] | 1 | 2021-04-06T08:11:19.000Z | 2021-04-06T08:11:19.000Z | #include <iostream>
#include <vector>
using namespace std;
int main()
{
int n;
while(cin >> n)
{
vector<int> a;
vector<int> shit;
int max = 0;
a.resize(2 * n + 1);
a[1] = 1;
for(int i = 2; i <= n; i++)
{
if(i % 2 == 0)
a[i] = a[i/2] + 1;
else
a[i] = a[(i-1)/2] + a[(i-1)/2+1];
}
for(int i = 1; i <= n; i++)
{
if(max < a[i])
{
max = a[i];
shit.clear();
shit.push_back(i);
}
else if(max == a[i])
{
shit.push_back(i);
}
}
cout << a[n] << endl;
for(int i = 0; i < shit.size(); i++)
{
cout << shit[i] << " ";
}
cout << endl;
}
return 0;
}
| 15.159091 | 39 | 0.401799 | RabitDash |
7b3caed6220b47186ac5669ad54b9dc8fc26b4ed | 7,993 | cpp | C++ | src/transport/ofed/Utils.cpp | IBM/tulips | 040f79f9a54bb7da85bdb87daefb9e9b2cdeade2 | [
"BSD-2-Clause"
] | 5 | 2020-09-22T06:02:48.000Z | 2021-11-17T09:26:30.000Z | src/transport/ofed/Utils.cpp | xguerin/tulips | 040f79f9a54bb7da85bdb87daefb9e9b2cdeade2 | [
"BSD-2-Clause"
] | null | null | null | src/transport/ofed/Utils.cpp | xguerin/tulips | 040f79f9a54bb7da85bdb87daefb9e9b2cdeade2 | [
"BSD-2-Clause"
] | 4 | 2021-03-22T09:00:24.000Z | 2021-06-28T09:13:44.000Z | /*
* Copyright (c) 2020, International Business Machines
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include "Utils.h"
#include <tulips/system/Utils.h>
#include <cstring>
#include <fstream>
#include <stdexcept>
#include <string>
#include <vector>
#include <dirent.h>
#include <fcntl.h>
#include <sys/mman.h>
#include <unistd.h>
static bool
getInterfaceDriverName(std::string const& ifn, std::string& drv)
{
char path[PATH_MAX], target[PATH_MAX];
memset(path, 0, PATH_MAX);
memset(target, 0, PATH_MAX);
sprintf(path, "/sys/class/net/%s/device/driver", ifn.c_str());
if (readlink(path, target, PATH_MAX) < 0) {
LOG("OFED", "cannot readlink() " << path);
return false;
}
std::vector<std::string> parts;
tulips::system::utils::split(std::string(target), '/', parts);
drv = *parts.rbegin();
return true;
}
static int
filterInfinibandEntry(const struct dirent* d)
{
std::string e(d->d_name);
return e != "." && e != "..";
}
bool
getInterfaceDeviceAndPortIds(std::string const& ifn, std::string& name,
int& portid)
{
std::ifstream ifs;
char path[PATH_MAX];
memset(path, 0, PATH_MAX);
/*
* Get the device name.
*/
struct dirent** entries;
sprintf(path, "/sys/class/net/%s/device/infiniband", ifn.c_str());
if (scandir(path, &entries, filterInfinibandEntry, ::alphasort) != 1) {
return false;
}
name = std::string(entries[0]->d_name);
free(entries[0]);
free(entries);
/*
* Get the port ID.
*/
sprintf(path, "/sys/class/net/%s/dev_port", ifn.c_str());
ifs.open(path);
if (!ifs.good()) {
return false;
}
ifs >> portid;
ifs.close();
return true;
}
bool
isSupportedDevice(std::string const& ifn)
{
std::string drvname;
if (!getInterfaceDriverName(ifn, drvname)) {
return false;
}
return drvname == "mlx4_core" || drvname == "mlx5_core";
}
static int
supportedDeviceFilter(const struct dirent* d)
{
std::string ifn(d->d_name);
if (ifn == "." || ifn == ".." || ifn == "lo") {
return 0;
}
return isSupportedDevice(ifn) ? 1 : 0;
}
bool
findSupportedInterface(std::string& ifn)
{
struct dirent** sel;
int count =
scandir("/sys/class/net", &sel, supportedDeviceFilter, ::alphasort);
/*
* Check if any entry is valid.
*/
if (count <= 0) {
return false;
}
/*
* Grab the first available entry.
*/
ifn = std::string(sel[0]->d_name);
/*
* Clean-up.
*/
for (int i = 0; i < count; i += 1) {
free(sel[i]);
}
free(sel);
return true;
}
void
setup(ibv_context* context, ibv_pd* pd, const uint8_t port, const uint16_t nbuf,
const size_t sndlen, const size_t rcvlen, ibv_comp_channel*& comp,
ibv_cq*& sendcq, ibv_cq*& recvcq, ibv_qp*& qp, uint8_t*& sendbuf,
uint8_t*& recvbuf, ibv_mr*& sendmr, ibv_mr*& recvmr)
{
/*
* Create a completion channel for the receive CQ.
*/
comp = ibv_create_comp_channel(context);
if (comp == nullptr) {
throw std::runtime_error("Cannot create receive completion channel");
}
/*
* Create a send/recv completion queues.
*/
struct ibv_exp_cq_init_attr cq_attr = {
.comp_mask = IBV_EXP_CQ_INIT_ATTR_FLAGS,
.flags = IBV_EXP_CQ_TIMESTAMP,
.res_domain = nullptr,
.peer_direct_attrs = nullptr,
};
sendcq = ibv_exp_create_cq(context, nbuf, nullptr, nullptr, 0, &cq_attr);
if (sendcq == nullptr) {
throw std::runtime_error("Cannot create send completion queue");
}
recvcq = ibv_exp_create_cq(context, nbuf, nullptr, comp, 0, &cq_attr);
if (recvcq == nullptr) {
throw std::runtime_error("Cannot create receive completion queue");
}
/*
* Change the blocking mode of the completion channel
*/
int flags = fcntl(comp->fd, F_GETFL);
if (fcntl(comp->fd, F_SETFL, flags | O_NONBLOCK) < 0) {
throw std::runtime_error("Cannot make the completion channel async");
}
/*
* Setup the QP attributes.
*/
struct ibv_exp_qp_init_attr qp_init_attr;
qp_init_attr.comp_mask = IBV_EXP_QP_INIT_ATTR_PD;
qp_init_attr.qp_context = nullptr;
qp_init_attr.send_cq = sendcq;
qp_init_attr.recv_cq = recvcq;
qp_init_attr.srq = nullptr;
qp_init_attr.qp_type = IBV_QPT_RAW_PACKET;
qp_init_attr.sq_sig_all = 0;
qp_init_attr.pd = pd;
/*
* Setup the TSO header
*/
#ifdef TULIPS_HAS_HW_TSO
qp_init_attr.comp_mask |= IBV_EXP_QP_INIT_ATTR_MAX_TSO_HEADER;
qp_init_attr.max_tso_header = 58;
#endif
/*
* Setup the QP capavbilities (this should be taken from the device).
*/
qp_init_attr.cap.max_send_wr = nbuf;
qp_init_attr.cap.max_recv_wr = nbuf;
qp_init_attr.cap.max_send_sge = 1;
qp_init_attr.cap.max_recv_sge = 1;
qp_init_attr.cap.max_inline_data =
tulips::transport::ofed::Device::INLINE_DATA_THRESHOLD;
/*
* Create the queue pair.
*/
qp = ibv_exp_create_qp(context, &qp_init_attr);
if (qp == nullptr) {
throw std::runtime_error("Cannot create queue pair");
}
/*
* Initialize the QP with its ports.
*/
int qp_flags = 0;
struct ibv_qp_attr qp_attr;
memset(&qp_attr, 0, sizeof(qp_attr));
qp_flags = IBV_QP_STATE | IBV_QP_PORT;
qp_attr.qp_state = IBV_QPS_INIT;
qp_attr.port_num = port + 1;
if (ibv_modify_qp(qp, &qp_attr, qp_flags) != 0) {
throw std::runtime_error("Cannot switch QP to INIT state");
}
/*
* Move to ready to receive.
*/
memset(&qp_attr, 0, sizeof(qp_attr));
qp_flags = IBV_QP_STATE;
qp_attr.qp_state = IBV_QPS_RTR;
if (ibv_modify_qp(qp, &qp_attr, qp_flags) != 0) {
throw std::runtime_error("Cannot switch QP to RTR state");
}
/*
* Move to ready to send.
*/
memset(&qp_attr, 0, sizeof(qp_attr));
qp_flags = IBV_QP_STATE;
qp_attr.qp_state = IBV_QPS_RTS;
if (ibv_modify_qp(qp, &qp_attr, qp_flags) != 0) {
throw std::runtime_error("Cannot switch QP to RTS state");
}
/*
* Create and register send buffers.
*/
sendbuf = (uint8_t*)mmap(nullptr, nbuf * sndlen, PROT_READ | PROT_WRITE,
MAP_SHARED | MAP_ANONYMOUS | MAP_LOCKED, -1, 0);
if (sendbuf == nullptr) {
throw std::runtime_error("Cannot MMAP() buffer");
}
sendmr = ibv_reg_mr(pd, sendbuf, nbuf * sndlen, IBV_ACCESS_LOCAL_WRITE);
if (sendmr == nullptr) {
throw std::runtime_error("Cannot create a memory region");
}
/*
* Create and register receive buffers.
*/
recvbuf = (uint8_t*)mmap(nullptr, nbuf * rcvlen, PROT_READ | PROT_WRITE,
MAP_SHARED | MAP_ANONYMOUS | MAP_LOCKED, -1, 0);
if (recvbuf == nullptr) {
throw std::runtime_error("Cannot MMAP() buffer");
}
recvmr = ibv_reg_mr(pd, recvbuf, nbuf * rcvlen, IBV_ACCESS_LOCAL_WRITE);
if (recvmr == nullptr) {
throw std::runtime_error("Cannot create a memory region");
}
}
| 29.824627 | 80 | 0.676592 | IBM |
7b3d18818aba67a65bf5dcb69d54fcba1aed292f | 21,118 | cpp | C++ | src/engine/render/Model.cpp | italrr/astro | 1c1ac28eacb081eee17a19c833d7866dfb348967 | [
"MIT"
] | 2 | 2020-11-30T23:18:12.000Z | 2021-10-16T05:26:16.000Z | src/engine/render/Model.cpp | italrr/astro | 1c1ac28eacb081eee17a19c833d7866dfb348967 | [
"MIT"
] | null | null | null | src/engine/render/Model.cpp | italrr/astro | 1c1ac28eacb081eee17a19c833d7866dfb348967 | [
"MIT"
] | null | null | null | #include <cmath>
#include <assimp/Importer.hpp>
#include <assimp/scene.h>
#include <assimp/postprocess.h>
#include "../common/Type.hpp"
#include "../common/Result.hpp"
#include "../Core.hpp"
#include "Model.hpp"
static astro::Mat<4,4, float> aiMat2Astro(const aiMatrix4x4 &mat){
astro::Mat<4,4, float> result;
result.mat[0 + 0*4] = mat.a1; result.mat[0 + 1*4] = mat.a2; result.mat[0 + 2*4] = mat.a3; result.mat[0 + 3*4] = mat.a4;
result.mat[1 + 0*4] = mat.b1; result.mat[1 + 1*4] = mat.b2; result.mat[1 + 2*4] = mat.b3; result.mat[1 + 3*4] = mat.b4;
result.mat[2 + 0*4] = mat.c1; result.mat[2 + 1*4] = mat.c2; result.mat[2 + 2*4] = mat.c3; result.mat[2 + 3*4] = mat.c4;
result.mat[3 + 0*4] = mat.d1; result.mat[3 + 1*4] = mat.d2; result.mat[3 + 2*4] = mat.d3; result.mat[3 + 3*4] = mat.d4;
return result;
}
static astro::Mat<4,4, float> aiMat2Astro(const aiMatrix3x3 &mat){
astro::Mat<4,4, float> result;
result.mat[0 + 0*4] = mat.a1; result.mat[0 + 1*4] = mat.a2; result.mat[0 + 2*4] = mat.a3; result.mat[0 + 3*4] = 0.0f;
result.mat[1 + 0*4] = mat.b1; result.mat[1 + 1*4] = mat.b2; result.mat[1 + 2*4] = mat.b3; result.mat[1 + 3*4] = 0.0f;
result.mat[2 + 0*4] = mat.c1; result.mat[2 + 1*4] = mat.c2; result.mat[2 + 2*4] = mat.c3; result.mat[2 + 3*4] = 0.0f;
result.mat[3 + 0*4] = 0.0f; result.mat[3 + 1*4] = 0.0f; result.mat[3 + 2*4] = 0.0f; result.mat[3 + 3*4] = 1.0f;
return result;
}
static void initMesh( const unsigned int index,
const aiScene* pScene,
const aiMesh *mesh,
astro::Gfx::Mesh *amesh,
astro::Gfx::Model *model,
std::vector<astro::Gfx::Vertex> &verts,
std::vector<unsigned int> &ind,
std::vector<astro::Gfx::TextureDependency> &textures);
static void initScene(const aiScene* pScene, astro::Gfx::Model *model);
static void initBones(const unsigned int index, const aiMesh *mesh, astro::Gfx::Mesh *amesh, astro::Gfx::Model *model);
static void initTexture(const aiScene* scene, const aiMesh *mesh, std::vector<astro::Gfx::TextureDependency> &textures);
static void initAnimation(const aiScene* scene, astro::Gfx::Model *model);
static void initScene(const aiScene* scene, astro::Gfx::Model *model){
int nmeshes = scene->mNumMeshes;
model->meshes.resize(nmeshes);
unsigned int nVert = 0;
unsigned int nIndices = 0;
model->gInvTrans = aiMat2Astro(scene->mRootNode->mTransformation).inverse();
// setup
for (unsigned int i = 0; i < nmeshes; ++i) {
// create meshes
auto mesh = std::make_shared<astro::Gfx::Mesh>(astro::Gfx::Mesh());
model->meshes[i] = mesh;
model->meshes[i]->mIndex = scene->mMeshes[i]->mMaterialIndex;
// metadata
model->meshes[i]->nIndices = scene->mMeshes[i]->mNumFaces * 3;
model->meshes[i]->bVertex = nVert;
model->meshes[i]->bIndex = nIndices;
nVert += scene->mMeshes[i]->mNumVertices;
nIndices += model->meshes[i]->nIndices;
}
// init everything
for(unsigned int i = 0; i < nmeshes; ++i){
auto &mesh = model->meshes[i];
initMesh(i, scene, scene->mMeshes[i], mesh.get(), model, mesh->vertices, mesh->indices, model->texDeps);
}
// init animation
initAnimation(scene, model);
}
static void initBones(const unsigned int index, const aiMesh *mesh, astro::Gfx::Mesh *amesh, astro::Gfx::Model *model){
for(int i = 0; i < mesh->mNumBones; ++i){
auto aiBone = mesh->mBones[i];
std::string name(aiBone->mName.data);
int boneIndex = model->boneInfo.size();
if(model->boneMapping.find(name) == model->boneMapping.end()){
// map index
model->boneMapping[name] = boneIndex;
// add bone info
astro::Gfx::BoneInfo boneInfo;
model->boneInfo.push_back(boneInfo);
}else{
boneIndex = model->boneMapping[name];
}
model->boneInfo[boneIndex].offset = aiMat2Astro(aiBone->mOffsetMatrix);
model->boneInfo[boneIndex].name = name;
// add weights
for(int j = 0; j < aiBone->mNumWeights; ++j){
unsigned int vertId = aiBone->mWeights[j].mVertexId;
float weight = aiBone->mWeights[j].mWeight;
amesh->vertices[vertId].setBoneData(boneIndex, weight);
}
}
}
static void initTexture(const aiScene* pScene, const aiMesh *mesh, std::vector<astro::Gfx::TextureDependency> &textures){
auto indexer = astro::Core::getIndexer();
static auto process = [&](aiMaterial *mat, aiTextureType type, int rtypeName){
for(unsigned int i = 0; i < mat->GetTextureCount(type); ++i){
aiString str;
mat->GetTexture(type, i, &str);
astro::Gfx::TextureDependency dep;
dep.role = rtypeName;
dep.file = indexer->findByName(str.C_Str());
if(dep.file.get() == NULL){
return;
}
textures.push_back(dep);
}
};
aiMaterial* material = pScene->mMaterials[mesh->mMaterialIndex];
process(material, aiTextureType_DIFFUSE, astro::Gfx::TextureRole::DIFFUSE);
process(material, aiTextureType_SPECULAR, astro::Gfx::TextureRole::SPECULAR);
process(material, aiTextureType_HEIGHT, astro::Gfx::TextureRole::NORMAL);
process(material, aiTextureType_AMBIENT, astro::Gfx::TextureRole::HEIGHT);
};
static void initMesh( const unsigned int index,
const aiScene* pScene,
const aiMesh *mesh,
astro::Gfx::Mesh *amesh,
astro::Gfx::Model *model,
std::vector<astro::Gfx::Vertex> &verts,
std::vector<unsigned int> &ind,
std::vector<astro::Gfx::TextureDependency> &textures){
// TODO: this can be parallelized
// vertex data
for(unsigned int i = 0; i < mesh->mNumVertices; ++i){
astro::Gfx::Vertex vertex;
// positions
vertex.position.set(mesh->mVertices[i].x, mesh->mVertices[i].y, mesh->mVertices[i].z);
// normals
if (mesh->HasNormals()){
vertex.normal.set(mesh->mNormals[i].x, mesh->mNormals[i].y, mesh->mNormals[i].z);
}
// texture coordinates
if(mesh->HasTextureCoords(0)){
vertex.texCoords.set(mesh->mTextureCoords[0][i].x, mesh->mTextureCoords[0][i].y);
}else{
vertex.texCoords.set(0.0f, 0.0f);
}
// tangent and bittangent
if(mesh->HasTangentsAndBitangents()){
// tangent
vertex.tangent.set(mesh->mTangents[i].x, mesh->mTangents[i].y, mesh->mTangents[i].z);
// bitangent
vertex.bitangent.set(mesh->mBitangents[i].x, mesh->mBitangents[i].y, mesh->mBitangents[i].z);
}else{
// TODO: maybe generate these if not available?
vertex.tangent.set(0);
vertex.bitangent.set(0);
}
verts.push_back(vertex);
}
// indices
for(unsigned int i = 0; i < mesh->mNumFaces; ++i){
aiFace face = mesh->mFaces[i];
for(int j = 0; j < face.mNumIndices; ++j){
ind.push_back(face.mIndices[j]);
}
}
// bones
if(mesh->HasBones()){
initBones(index, mesh, amesh, model);
}
// textures
initTexture(pScene, mesh, textures);
}
static void initAnimation(const aiScene* scene, astro::Gfx::Model *model){
auto nanim = scene->mNumAnimations;
std::unordered_map<std::string, std::shared_ptr<astro::Gfx::SkeletalHierarchy>> skelHierarchy;
typedef std::shared_ptr<astro::Gfx::SkeletalAnimation> asSkAnim;
typedef std::shared_ptr<astro::Gfx::SkeletalHierarchy> asSkHier;
std::function<asSkHier(const aiNode *node, const asSkHier &parent)> readHierarchy = [&](const aiNode *node, const asSkHier &parent){
std::string nodeName(node->mName.data);
// add to hierarchy
auto current = std::make_shared<astro::Gfx::SkeletalHierarchy>(astro::Gfx::SkeletalHierarchy());
current->mat = aiMat2Astro(node->mTransformation);
current->parent = parent;
current->name = nodeName;
skelHierarchy[nodeName] = current;
// iterate children
for(int i = 0; i < node->mNumChildren; ++i){
current->children.push_back(readHierarchy(node->mChildren[i], current));
}
return current;
};
std::function<void(const aiNode *node, const aiAnimation *aiAnim, asSkAnim &anim)> readAnimations = [&](const aiNode *node, const aiAnimation *aiAnim, asSkAnim &anim){
std::string nodeName(node->mName.data);
// find channels
for(int i = 0; i < aiAnim->mNumChannels; ++i){
const aiNodeAnim* pNodeAnim = aiAnim->mChannels[i];
if(nodeName != std::string(pNodeAnim->mNodeName.data)){
continue;
}
auto fbone = anim->bones.find(nodeName);
if(fbone == anim->bones.end()){
auto animbone = std::make_shared<astro::Gfx::Bone>(astro::Gfx::Bone());
animbone->name = nodeName;
anim->bones[nodeName] = animbone;
fbone = anim->bones.find(nodeName);
}
if(fbone != anim->bones.end()){
// rotation
for(unsigned int j = 0; j < pNodeAnim->mNumRotationKeys; ++j){
astro::Gfx::SkeletalFrameRotation rot;
auto airot = pNodeAnim->mRotationKeys[j];
rot.time = (float)airot.mTime;
rot.rotation = astro::Vec4<float>(airot.mValue.x, airot.mValue.y, airot.mValue.z, airot.mValue.w);
fbone->second->rotations.push_back(rot);
}
// scaling
for (unsigned int j = 0; j < pNodeAnim->mNumScalingKeys; ++j) {
astro::Gfx::SkeletalFrameScaling scaling;
auto aiscaling = pNodeAnim->mScalingKeys[j];
scaling.time = (float)aiscaling.mTime;
scaling.scaling = astro::Vec3<float>(aiscaling.mValue.x, aiscaling.mValue.y, aiscaling.mValue.z);
fbone->second->scalings.push_back(scaling);
}
// translation
for (unsigned int j = 0; j < pNodeAnim->mNumPositionKeys; ++j) {
astro::Gfx::SkeletalFrameTranslation trans;
auto aitrans = pNodeAnim->mPositionKeys[j];
trans.time = (float)aitrans.mTime;
trans.translation = astro::Vec3<float>(aitrans.mValue.x, aitrans.mValue.y, aitrans.mValue.z);
fbone->second->translations.push_back(trans);
}
}
}
// iterate children
for(int i = 0; i < node->mNumChildren; ++i){
readAnimations(node->mChildren[i], aiAnim, anim);
}
};
// build hierarchy
model->skeletonRoot = readHierarchy(scene->mRootNode, asSkHier(NULL));
model->skeleton = skelHierarchy;
// read animations
for(unsigned int i = 0; i < nanim; ++i){
auto anim = std::make_shared<astro::Gfx::SkeletalAnimation>(astro::Gfx::SkeletalAnimation());
anim->name = std::string(scene->mAnimations[i]->mName.data);
anim->ticksPerSecond = scene->mAnimations[i]->mTicksPerSecond;
anim->duration = scene->mAnimations[i]->mDuration;
// init bones for this animation
for(int i = 0; i < model->boneInfo.size(); ++i){
auto &bi = model->boneInfo[i];
auto bone = std::make_shared<astro::Gfx::Bone>(astro::Gfx::Bone());
bone->name = bi.name;
anim->bones[bi.name] = bone;
}
// read anim keys
readAnimations(scene->mRootNode, scene->mAnimations[i], anim);
model->animations[anim->name] = anim;
}
// asign default animation (if any)
if(nanim > 0){
model->currentAnim = model->animations.begin()->second;
}
}
std::shared_ptr<astro::Result> astro::Gfx::Model::load(const std::shared_ptr<astro::Indexing::Index> &file){
auto result = astro::makeResult(astro::ResultType::Waiting);
// only use diffuse for now
this->transform->resetMatMode();
this->transform->enMatMode(Gfx::MaterialMode::DIFFUSE);
result->job = astro::spawn([&, file, result](astro::Job &ctx){
Assimp::Importer import;
const aiScene *scene = import.ReadFile(file->path.c_str(),
aiProcess_JoinIdenticalVertices |
aiProcess_SortByPType |
aiProcess_Triangulate |
aiProcess_GenSmoothNormals |
aiProcess_FlipUVs |
aiProcess_LimitBoneWeights);
if(!scene || scene->mFlags & AI_SCENE_FLAGS_INCOMPLETE || !scene->mRootNode){
result->setFailure(astro::String::format("Model::load: failed to load model '%s': %s\n", file->fname.c_str(), import.GetErrorString()));
return;
}
initScene(scene, this);
auto rscmngr = Core::getResourceMngr();
// create expect list
std::vector<std::shared_ptr<astro::Result>> results;
for(int i = 0; i < this->texDeps.size(); ++i){
auto &tex = this->texDeps[i];
results.push_back(rscmngr->load(tex.file, std::make_shared<astro::Gfx::Texture>(astro::Gfx::Texture())));
}
// expect all textures to load
astro::expect(results, [&, result,rscmngr, results](astro::Job &ctx){
if(!ctx.succDeps){
std::string messages = "";
for(int i =0; i < ctx.listeners.size(); ++i){
messages += astro::String::format("%s%s ", results[i]->msg.c_str(), (i < ctx.listeners.size()-1 ? "," : ""));
}
astro::log("Model::load: warning: not all textures were loaded succesfully: %s\n", messages.c_str());
}
for(int i = 0; i < this->texDeps.size(); ++i){
auto &tex = this->texDeps[i];
if(tex.file.get() == NULL){
continue;
}
auto texture = rscmngr->findByName(tex.file->fname);
this->transform->textures.push_back(BindTexture(std::static_pointer_cast<astro::Gfx::Texture>(texture), tex.role));
}
auto jgfx = astro::findJob({"astro_gfx"});
if(jgfx.get() == NULL){
result->setFailure(astro::String::format("gfx job not found: cannot load model '%s'", file->fname.c_str()));
return;
}
// load meshes to gpu
jgfx->addBacklog([&, result](astro::Job &ctx){
auto ren = astro::Gfx::getRenderEngine();
for(int i = 0; i < meshes.size(); ++i){
auto meshres = ren->generateMesh(meshes[i]->vertices, meshes[i]->indices, this->transform->useBones);
meshres->payload->reset();
meshres->payload->read(&meshes[i]->vao, sizeof(meshes[i]->vao));
meshres->payload->read(&meshes[i]->vbo, sizeof(meshes[i]->vbo));
meshres->payload->read(&meshes[i]->ebo, sizeof(meshes[i]->ebo));
}
result->set(astro::ResultType::Success);
});
}, false);
}, true, false, true);
return result;
}
std::shared_ptr<astro::Result> astro::Gfx::Model::unload(){
// TODO: unload code
return astro::makeResult(astro::ResultType::Success);
}
void astro::Gfx::Model::updateAnim(float time){
auto &anim = this->currentAnim;
float tps = time * anim->ticksPerSecond;
float animTime = std::fmod(tps, anim->duration);
static auto findRotation = [&](const std::shared_ptr<astro::Gfx::Bone> &bone, float animTime){
for(int i = 0; i < bone->rotations.size() - 1; i++){
if(animTime < bone->rotations[i + 1].time){
return i;
}
}
return 0;
};
static auto findTrans = [&](const std::shared_ptr<astro::Gfx::Bone> &bone, float animTime){
for(int i = 0; i < bone->translations.size() - 1; i++){
if(animTime < bone->translations[i + 1].time){
return i;
}
}
return 0;
};
static auto interpRotation = [&](const std::shared_ptr<astro::Gfx::Bone> &bone){
if(bone->rotations.size() == 1){
auto start = aiQuaternion(bone->rotations[0].rotation.w, bone->rotations[0].rotation.x, bone->rotations[0].rotation.y, bone->rotations[0].rotation.z);
return aiMat2Astro(start.GetMatrix());
}
int current = findRotation(bone, animTime);
int next = current + 1;
float dt = bone->rotations[next].time - bone->rotations[current].time;
float factor = (animTime - bone->rotations[current].time) / dt;
auto start = aiQuaternion(bone->rotations[current].rotation.w, bone->rotations[current].rotation.x, bone->rotations[current].rotation.y, bone->rotations[current].rotation.z);
auto end = aiQuaternion(bone->rotations[next].rotation.w, bone->rotations[next].rotation.x, bone->rotations[next].rotation.y, bone->rotations[next].rotation.z);
aiQuaternion result;
aiQuaternion::Interpolate(result, start, end, factor);
result = result.Normalize();
return aiMat2Astro(result.GetMatrix());
};
static auto interpTrans = [&](const std::shared_ptr<astro::Gfx::Bone> &bone){
if(bone->translations.size() == 1){
return astro::MAT4Identity.translate(bone->translations[0].translation);
}
int current = findTrans(bone, animTime);
int next = current + 1;
float dt = bone->translations[next].time - bone->translations[current].time;
float factor = (animTime - bone->translations[current].time) / dt;
const aiVector3D start = aiVector3D(bone->translations[current].translation.x, bone->translations[current].translation.y, bone->translations[current].translation.z);
const aiVector3D end = aiVector3D(bone->translations[next].translation.x, bone->translations[next].translation.y, bone->translations[next].translation.z);
aiVector3D delta = end - start;
auto result = start + factor * delta;
return astro::MAT4Identity.translate(astro::Vec3<float>(result.x, result.y, result.z));
};
// calc
std::unordered_map<std::string, astro::Mat<4, 4, float>> builtSkel;
typedef std::function<void(const std::shared_ptr<astro::Gfx::SkeletalHierarchy> &top, const astro::Mat<4, 4, float> &parent)> bCHType;
static bCHType buildCurrentHirarchy = [&](const std::shared_ptr<astro::Gfx::SkeletalHierarchy> &top, const astro::Mat<4, 4, float> &parent){
auto node = top->mat;
auto fbone = anim->bones.find(top->name);
if(fbone != anim->bones.end() && fbone->second->rotations.size() > 0 && fbone->second->translations.size() > 0){
auto &bone = fbone->second;
auto trans = interpTrans(bone);
auto rot = interpRotation(bone);
node = trans * rot;
}
auto transform = parent * node;
builtSkel[top->name] = transform;
for(int i = 0; i < top->children.size(); ++i){
buildCurrentHirarchy(top->children[i], transform);
}
};
buildCurrentHirarchy(skeletonRoot, astro::MAT4Identity);
for(int i = 0; i < boneInfo.size(); ++i){
auto &bi = boneInfo[i];
// build skeleton based on current animation
astro::Mat<4, 4, float> globalTrans = builtSkel[bi.name];
// final transformation
bi.transf = gInvTrans * globalTrans * bi.offset;
}
}
void astro::Gfx::Model::render(){
auto ren = astro::Gfx::getRenderEngine();
updateAnim(ren->currentTime);
for(int i = 0; i < boneInfo.size(); ++i){
std::string name = astro::String::format("gBones[%i]", i);
this->transform->shAttrs[name] = std::make_shared<astro::Gfx::ShaderAttrMat4>(boneInfo[i].transf);
}
for(int i = 0; i < meshes.size(); ++i){
meshes[i]->transform = this->transform;
ren->renderMesh(meshes[i].get());
}
}
void astro::Gfx::Mesh::render(){
auto ren = astro::Gfx::getRenderEngine();
ren->renderMesh(this);
} | 42.490946 | 183 | 0.564116 | italrr |
7b414cb0a59aa01e8d876de89b356fdd33d77f8e | 5,672 | cpp | C++ | DialogSystemSolution/DialogSystem/j1DialogSystem.cpp | RoperoIvan/Dialogue-System | f9619decf859c452addc6a5ba37fcc26c892d9ea | [
"MIT"
] | null | null | null | DialogSystemSolution/DialogSystem/j1DialogSystem.cpp | RoperoIvan/Dialogue-System | f9619decf859c452addc6a5ba37fcc26c892d9ea | [
"MIT"
] | null | null | null | DialogSystemSolution/DialogSystem/j1DialogSystem.cpp | RoperoIvan/Dialogue-System | f9619decf859c452addc6a5ba37fcc26c892d9ea | [
"MIT"
] | null | null | null | #include "j1App.h"
#include "j1UIManager.h"
#include "j1Fonts.h"
#include "j1DialogSystem.h"
#include "j1Input.h"
#include "GUI_Label.h"
#include "GUI_Button.h"
j1DialogSystem::j1DialogSystem()
{
}
j1DialogSystem::~j1DialogSystem()
{
}
bool j1DialogSystem::Start()
{
bool ret = true;
LoadDialogue("Dialog.xml");
currentNode = dialogTrees[treeid]->dialogNodes[0];
PerformDialogue(treeid);
return ret;
}
bool j1DialogSystem::Update(float dt)
{
bool ret = true;
if (App->input->GetKey(SDL_SCANCODE_F1) == KEY_DOWN)
{
App->ui_manager->DeleteAllUIElements();
treeid = 0;
currentNode = dialogTrees[treeid]->dialogNodes[0];
input = 7;
PerformDialogue(treeid);
}
if (App->input->GetKey(SDL_SCANCODE_F2) == KEY_DOWN)
{
App->ui_manager->DeleteAllUIElements();
treeid = 1;
currentNode = dialogTrees[treeid]->dialogNodes[0];
input = 7;
PerformDialogue(treeid);
}
if (App->input->GetKey(SDL_SCANCODE_F3) == KEY_DOWN)
{
App->ui_manager->DeleteAllUIElements();
treeid = 2;
currentNode = dialogTrees[treeid]->dialogNodes[0];
input = 7;
PerformDialogue(treeid);
}
if (App->input->GetKey(SDL_SCANCODE_1) == KEY_DOWN)
{
App->ui_manager->DeleteAllUIElements();
input = 0;
PerformDialogue(treeid);
}
if (App->input->GetKey(SDL_SCANCODE_2) == KEY_DOWN)
{
App->ui_manager->DeleteAllUIElements();
input = 1;
PerformDialogue(treeid);
}
if (App->input->GetKey(SDL_SCANCODE_3) == KEY_DOWN)
{
App->ui_manager->DeleteAllUIElements();
input = 2;
PerformDialogue(treeid);
}
if (App->input->GetKey(SDL_SCANCODE_R) == KEY_DOWN)
{
App->ui_manager->DeleteAllUIElements();
dialogTrees[treeid]->karma = 0;
currentNode = dialogTrees[treeid]->dialogNodes[0];
input = 7;
PerformDialogue(treeid);
}
return ret;
}
bool j1DialogSystem::CleanUp()
{
bool ret = true;
for (int j = 0; j < dialogTrees.size(); j++)
{
for (int i = 0; i < dialogTrees[j]->dialogNodes.size(); i++)
delete dialogTrees[j]->dialogNodes[i];
dialogTrees[j]->dialogNodes.clear();
delete dialogTrees[j];
}
dialogTrees.clear();
return ret;
}
void j1DialogSystem::PerformDialogue(int tr_id)
{
if (dialogTrees.empty())
LOG("TreeEmpty");
if (CompareKarma() == true)
{
//Find the next node
if (input >= 0 && input < currentNode->dialogOptions.size()) //Only if the input is valid
{
for (int j = 0; j < dialogTrees[tr_id]->dialogNodes.size(); j++)
{
if (currentNode->dialogOptions[input]->nextnode == dialogTrees[tr_id]->dialogNodes[j]->id) //If the option id is the same as one of the nodes ids in the tree
{
CheckForKarma(currentNode);
currentNode = dialogTrees[tr_id]->dialogNodes[j]; // we assign our node pointer to the next node in the tree
break;
}
}
}
}
else if (CompareKarma() == false)
{
for (int i = 0; i < dialogTrees[tr_id]->dialogNodes.size(); i++)
{
// We search the mood of the bad response bad response = -1 / neutral = 0
if (dialogTrees[tr_id]->karma == dialogTrees[tr_id]->dialogNodes[i]->karma)
{
currentNode = dialogTrees[tr_id]->dialogNodes[i]; //This node is the bad response from the npc
}
}
}
//Put the player's name in the lines of the npc dialog
while(currentNode->text.find("PLAYERNAME") != std::string::npos)
{
currentNode->text.replace(currentNode->text.find("PLAYERNAME"), 10, "Ivan");
}
// Print the dialog in the screen
BlitDialog();
}
void j1DialogSystem::BlitDialog()
{
App->ui_manager->AddLabel(150, 180, currentNode->text.c_str(), 50, App->ui_manager->screen, WHITE, "fonts/Final_Fantasy_font.ttf", this);
int space = 200;
for (int i = 0; i < currentNode->dialogOptions.size(); i++)
App->ui_manager->AddLabel(150, space += 30, currentNode->dialogOptions[i]->text.c_str(), 45, App->ui_manager->screen, GREEN, "fonts/Final_Fantasy_font.ttf", this);
}
bool j1DialogSystem::CompareKarma()
{
bool ret = true;
if (dialogTrees[treeid]->karma < 0)
ret = false;
return ret;
}
void j1DialogSystem::CheckForKarma(DialogNode* karmaNode)
{
dialogTrees[treeid]->karma += karmaNode->dialogOptions[input]->karma;
}
bool j1DialogSystem::LoadDialogue(const char* file)
{
bool ret = true;
pugi::xml_parse_result result = tree_file.load_file(file);
if (result == NULL)
{
LOG("Could not load map xml file %s. pugi error: %s", file, result.description());
ret = false;
}
else
LOG("XML was loaded succesfully!");
for (pugi::xml_node t = tree_file.child("dialogue").child("dialogtree"); t != NULL; t = t.next_sibling("dialogtree"))
{
DialogTree* tr = new DialogTree;
tr->treeid = t.attribute("treeid").as_int();
tr->karma = t.attribute("karma").as_int();
LoadTreeData(t, tr);
dialogTrees.push_back(tr);
}
return ret;
}
bool j1DialogSystem::LoadTreeData(pugi::xml_node& trees, DialogTree* oak)
{
bool ret = true;
//Filling the dialogue tree information
for (pugi::xml_node n = trees.child("node");n != NULL; n = n.next_sibling("node"))
{
DialogNode* node = new DialogNode;
node->text.assign(n.attribute("line").as_string());
node->id = n.attribute("id").as_int();
node->karma = n.attribute("karma").as_int();
LoadNodesDetails(n, node);
oak->dialogNodes.push_back(node);
}
return ret;
}
bool j1DialogSystem::LoadNodesDetails(pugi::xml_node& text_node, DialogNode* npc)
{
bool ret = true;
for (pugi::xml_node op = text_node.child("option"); op != NULL; op = op.next_sibling("option"))
{
DialogOption* option = new DialogOption;
option->text.assign(op.attribute("line").as_string());
option->nextnode = op.attribute("nextnode").as_int();
option->karma = op.attribute("karma").as_int();
npc->dialogOptions.push_back(option);
}
return ret;
} | 24.448276 | 165 | 0.681594 | RoperoIvan |