code
stringlengths 3
1.05M
| repo_name
stringlengths 4
116
| path
stringlengths 4
991
| language
stringclasses 9
values | license
stringclasses 15
values | size
int32 3
1.05M
|
---|---|---|---|---|---|
// Copyright 2014 Samsung Electronics Co., Ltd.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
assert(Math.atan2(+0, -Infinity) === Math.PI);
| tilmannOSG/jerryscript | tests/jerry-test-suite/15/15.08/15.08.02/15.08.02.05/15.08.02.05-010.js | JavaScript | apache-2.0 | 653 |
/** @file
A brief file description
@section license License
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
//////////////////////////////////////////////////////////////////////
//
// The EThread Class
//
/////////////////////////////////////////////////////////////////////
#include "P_EventSystem.h"
#if HAVE_EVENTFD
#include <sys/eventfd.h>
#endif
struct AIOCallback;
#define MAX_HEARTBEATS_MISSED 10
#define NO_HEARTBEAT -1
#define THREAD_MAX_HEARTBEAT_MSECONDS 60
volatile bool shutdown_event_system = false;
EThread::EThread()
{
memset(thread_private, 0, PER_THREAD_DATA);
}
EThread::EThread(ThreadType att, int anid) : id(anid), tt(att)
{
ethreads_to_be_signalled = (EThread **)ats_malloc(MAX_EVENT_THREADS * sizeof(EThread *));
memset(ethreads_to_be_signalled, 0, MAX_EVENT_THREADS * sizeof(EThread *));
memset(thread_private, 0, PER_THREAD_DATA);
#if HAVE_EVENTFD
evfd = eventfd(0, EFD_NONBLOCK | EFD_CLOEXEC);
if (evfd < 0) {
if (errno == EINVAL) { // flags invalid for kernel <= 2.6.26
evfd = eventfd(0, 0);
if (evfd < 0) {
Fatal("EThread::EThread: %d=eventfd(0,0),errno(%d)", evfd, errno);
}
} else {
Fatal("EThread::EThread: %d=eventfd(0,EFD_NONBLOCK | EFD_CLOEXEC),errno(%d)", evfd, errno);
}
}
#elif TS_USE_PORT
/* Solaris ports requires no crutches to do cross thread signaling.
* We'll just port_send the event straight over the port.
*/
#else
ink_release_assert(pipe(evpipe) >= 0);
fcntl(evpipe[0], F_SETFD, FD_CLOEXEC);
fcntl(evpipe[0], F_SETFL, O_NONBLOCK);
fcntl(evpipe[1], F_SETFD, FD_CLOEXEC);
fcntl(evpipe[1], F_SETFL, O_NONBLOCK);
#endif
}
EThread::EThread(ThreadType att, Event *e) : tt(att), start_event(e)
{
ink_assert(att == DEDICATED);
memset(thread_private, 0, PER_THREAD_DATA);
}
// Provide a destructor so that SDK functions which create and destroy
// threads won't have to deal with EThread memory deallocation.
EThread::~EThread()
{
if (n_ethreads_to_be_signalled > 0) {
flush_signals(this);
}
ats_free(ethreads_to_be_signalled);
// TODO: This can't be deleted ....
// delete[]l1_hash;
}
bool
EThread::is_event_type(EventType et)
{
return (event_types & (1 << static_cast<int>(et))) != 0;
}
void
EThread::set_event_type(EventType et)
{
event_types |= (1 << static_cast<int>(et));
}
void
EThread::process_event(Event *e, int calling_code)
{
ink_assert((!e->in_the_prot_queue && !e->in_the_priority_queue));
MUTEX_TRY_LOCK_FOR(lock, e->mutex, this, e->continuation);
if (!lock.is_locked()) {
e->timeout_at = cur_time + DELAY_FOR_RETRY;
EventQueueExternal.enqueue_local(e);
} else {
if (e->cancelled) {
free_event(e);
return;
}
Continuation *c_temp = e->continuation;
e->continuation->handleEvent(calling_code, e);
ink_assert(!e->in_the_priority_queue);
ink_assert(c_temp == e->continuation);
MUTEX_RELEASE(lock);
if (e->period) {
if (!e->in_the_prot_queue && !e->in_the_priority_queue) {
if (e->period < 0) {
e->timeout_at = e->period;
} else {
this->get_hrtime_updated();
e->timeout_at = cur_time + e->period;
if (e->timeout_at < cur_time) {
e->timeout_at = cur_time;
}
}
EventQueueExternal.enqueue_local(e);
}
} else if (!e->in_the_prot_queue && !e->in_the_priority_queue) {
free_event(e);
}
}
}
//
// void EThread::execute()
//
// Execute loops forever on:
// Find the earliest event.
// Sleep until the event time or until an earlier event is inserted
// When its time for the event, try to get the appropriate continuation
// lock. If successful, call the continuation, otherwise put the event back
// into the queue.
//
void
EThread::execute()
{
// Do the start event first.
// coverity[lock]
if (start_event) {
MUTEX_TAKE_LOCK_FOR(start_event->mutex, this, start_event->continuation);
start_event->continuation->handleEvent(EVENT_IMMEDIATE, start_event);
MUTEX_UNTAKE_LOCK(start_event->mutex, this);
free_event(start_event);
start_event = nullptr;
}
switch (tt) {
case REGULAR: {
Event *e;
Que(Event, link) NegativeQueue;
ink_hrtime next_time = 0;
// give priority to immediate events
for (;;) {
if (unlikely(shutdown_event_system == true)) {
return;
}
// execute all the available external events that have
// already been dequeued
cur_time = Thread::get_hrtime_updated();
while ((e = EventQueueExternal.dequeue_local())) {
if (e->cancelled) {
free_event(e);
} else if (!e->timeout_at) { // IMMEDIATE
ink_assert(e->period == 0);
process_event(e, e->callback_event);
} else if (e->timeout_at > 0) { // INTERVAL
EventQueue.enqueue(e, cur_time);
} else { // NEGATIVE
Event *p = nullptr;
Event *a = NegativeQueue.head;
while (a && a->timeout_at > e->timeout_at) {
p = a;
a = a->link.next;
}
if (!a) {
NegativeQueue.enqueue(e);
} else {
NegativeQueue.insert(e, p);
}
}
}
bool done_one;
do {
done_one = false;
// execute all the eligible internal events
EventQueue.check_ready(cur_time, this);
while ((e = EventQueue.dequeue_ready(cur_time))) {
ink_assert(e);
ink_assert(e->timeout_at > 0);
if (e->cancelled) {
free_event(e);
} else {
done_one = true;
process_event(e, e->callback_event);
}
}
} while (done_one);
// execute any negative (poll) events
if (NegativeQueue.head) {
if (n_ethreads_to_be_signalled) {
flush_signals(this);
}
// dequeue all the external events and put them in a local
// queue. If there are no external events available, don't
// do a cond_timedwait.
if (!INK_ATOMICLIST_EMPTY(EventQueueExternal.al)) {
EventQueueExternal.dequeue_timed(cur_time, next_time, false);
}
while ((e = EventQueueExternal.dequeue_local())) {
if (!e->timeout_at) {
process_event(e, e->callback_event);
} else {
if (e->cancelled) {
free_event(e);
} else {
// If its a negative event, it must be a result of
// a negative event, which has been turned into a
// timed-event (because of a missed lock), executed
// before the poll. So, it must
// be executed in this round (because you can't have
// more than one poll between two executions of a
// negative event)
if (e->timeout_at < 0) {
Event *p = nullptr;
Event *a = NegativeQueue.head;
while (a && a->timeout_at > e->timeout_at) {
p = a;
a = a->link.next;
}
if (!a) {
NegativeQueue.enqueue(e);
} else {
NegativeQueue.insert(e, p);
}
} else {
EventQueue.enqueue(e, cur_time);
}
}
}
}
// execute poll events
while ((e = NegativeQueue.dequeue())) {
process_event(e, EVENT_POLL);
}
if (!INK_ATOMICLIST_EMPTY(EventQueueExternal.al)) {
EventQueueExternal.dequeue_timed(cur_time, next_time, false);
}
} else { // Means there are no negative events
next_time = EventQueue.earliest_timeout();
ink_hrtime sleep_time = next_time - cur_time;
if (sleep_time > THREAD_MAX_HEARTBEAT_MSECONDS * HRTIME_MSECOND) {
next_time = cur_time + THREAD_MAX_HEARTBEAT_MSECONDS * HRTIME_MSECOND;
}
// dequeue all the external events and put them in a local
// queue. If there are no external events available, do a
// cond_timedwait.
if (n_ethreads_to_be_signalled) {
flush_signals(this);
}
EventQueueExternal.dequeue_timed(cur_time, next_time, true);
}
}
}
case DEDICATED: {
break;
}
default:
ink_assert(!"bad case value (execute)");
break;
} /* End switch */
// coverity[missing_unlock]
}
| rahmalik/trafficserver | iocore/eventsystem/UnixEThread.cc | C++ | apache-2.0 | 9,183 |
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <stdio.h>
#include <stdlib.h>
#include <iostream>
#include <fstream>
#include <sstream>
#include <mesos/log/log.hpp>
#include <process/clock.hpp>
#include <process/future.hpp>
#include <process/process.hpp>
#include <process/time.hpp>
#include <stout/bytes.hpp>
#include <stout/error.hpp>
#include <stout/foreach.hpp>
#include <stout/os.hpp>
#include <stout/stopwatch.hpp>
#include <stout/strings.hpp>
#include <stout/os/read.hpp>
#include "log/tool/initialize.hpp"
#include "log/tool/benchmark.hpp"
#include "logging/logging.hpp"
using namespace process;
using std::cout;
using std::endl;
using std::ifstream;
using std::ofstream;
using std::string;
using std::vector;
using mesos::log::Log;
namespace mesos {
namespace internal {
namespace log {
namespace tool {
Benchmark::Flags::Flags()
{
add(&Flags::quorum,
"quorum",
"Quorum size");
add(&Flags::path,
"path",
"Path to the log");
add(&Flags::servers,
"servers",
"ZooKeeper servers");
add(&Flags::znode,
"znode",
"ZooKeeper znode");
add(&Flags::input,
"input",
"Path to the input trace file. Each line in the trace file\n"
"specifies the size of the append (e.g. 100B, 2MB, etc.)");
add(&Flags::output,
"output",
"Path to the output file");
add(&Flags::type,
"type",
"Type of data to be written (zero, one, random)\n"
" zero: all bits are 0\n"
" one: all bits are 1\n"
" random: all bits are randomly chosen\n",
"random");
add(&Flags::initialize,
"initialize",
"Whether to initialize the log",
true);
}
Try<Nothing> Benchmark::execute(int argc, char** argv)
{
flags.setUsageMessage(
"Usage: " + name() + " [options]\n"
"\n"
"This command is used to do performance test on the\n"
"replicated log. It takes a trace file of write sizes\n"
"and replay that trace to measure the latency of each\n"
"write. The data to be written for each write can be\n"
"specified using the --type flag.\n"
"\n");
// Configure the tool by parsing command line arguments.
if (argc > 0 && argv != nullptr) {
Try<flags::Warnings> load = flags.load(None(), argc, argv);
if (load.isError()) {
return Error(flags.usage(load.error()));
}
if (flags.help) {
return Error(flags.usage());
}
process::initialize();
logging::initialize(argv[0], false, flags);
// Log any flag warnings (after logging is initialized).
foreach (const flags::Warning& warning, load->warnings) {
LOG(WARNING) << warning.message;
}
}
if (flags.quorum.isNone()) {
return Error(flags.usage("Missing required option --quorum"));
}
if (flags.path.isNone()) {
return Error(flags.usage("Missing required option --path"));
}
if (flags.servers.isNone()) {
return Error(flags.usage("Missing required option --servers"));
}
if (flags.znode.isNone()) {
return Error(flags.usage("Missing required option --znode"));
}
if (flags.input.isNone()) {
return Error(flags.usage("Missing required option --input"));
}
if (flags.output.isNone()) {
return Error(flags.usage("Missing required option --output"));
}
// Initialize the log.
if (flags.initialize) {
Initialize initialize;
initialize.flags.path = flags.path;
Try<Nothing> execution = initialize.execute();
if (execution.isError()) {
return Error(execution.error());
}
}
// Create the log.
Log log(
flags.quorum.get(),
flags.path.get(),
flags.servers.get(),
Seconds(10),
flags.znode.get());
// Create the log writer.
Log::Writer writer(&log);
Future<Option<Log::Position>> position = writer.start();
if (!position.await(Seconds(15))) {
return Error("Failed to start a log writer: timed out");
} else if (!position.isReady()) {
return Error("Failed to start a log writer: " +
(position.isFailed()
? position.failure()
: "Discarded future"));
}
// Statistics to output.
vector<Bytes> sizes;
vector<Duration> durations;
vector<Time> timestamps;
// Read sizes from the input trace file.
ifstream input(flags.input.get().c_str());
if (!input.is_open()) {
return Error("Failed to open the trace file " + flags.input.get());
}
string line;
while (getline(input, line)) {
Try<Bytes> size = Bytes::parse(strings::trim(line));
if (size.isError()) {
return Error("Failed to parse the trace file: " + size.error());
}
sizes.push_back(size.get());
}
input.close();
// Generate the data to be written.
vector<string> data;
for (size_t i = 0; i < sizes.size(); i++) {
if (flags.type == "one") {
data.push_back(string(sizes[i].bytes(), static_cast<char>(0xff)));
} else if (flags.type == "random") {
data.push_back(string(sizes[i].bytes(), os::random() % 256));
} else {
data.push_back(string(sizes[i].bytes(), 0));
}
}
Stopwatch stopwatch;
stopwatch.start();
for (size_t i = 0; i < sizes.size(); i++) {
Stopwatch stopwatch;
stopwatch.start();
position = writer.append(data[i]);
if (!position.await(Seconds(10))) {
return Error("Failed to append: timed out");
} else if (!position.isReady()) {
return Error("Failed to append: " +
(position.isFailed()
? position.failure()
: "Discarded future"));
} else if (position.get().isNone()) {
return Error("Failed to append: exclusive write promise lost");
}
durations.push_back(stopwatch.elapsed());
timestamps.push_back(Clock::now());
}
cout << "Total number of appends: " << sizes.size() << endl;
cout << "Total time used: " << stopwatch.elapsed() << endl;
// Ouput statistics.
ofstream output(flags.output.get().c_str());
if (!output.is_open()) {
return Error("Failed to open the output file " + flags.output.get());
}
for (size_t i = 0; i < sizes.size(); i++) {
output << timestamps[i]
<< " Appended " << sizes[i].bytes() << " bytes"
<< " in " << durations[i].ms() << " ms" << endl;
}
return Nothing();
}
} // namespace tool {
} // namespace log {
} // namespace internal {
} // namespace mesos {
| shakamunyi/mesos | src/log/tool/benchmark.cpp | C++ | apache-2.0 | 7,142 |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License. See License.txt in the project root for
// license information.
//
// Code generated by Microsoft (R) AutoRest Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is
// regenerated.
namespace Microsoft.Azure.Management.Automation.Models
{
using Microsoft.Azure;
using Microsoft.Azure.Management;
using Microsoft.Azure.Management.Automation;
using Newtonsoft.Json;
using System.Linq;
/// <summary>
/// The connection type property associated with the entity.
/// </summary>
public partial class ConnectionTypeAssociationProperty
{
/// <summary>
/// Initializes a new instance of the ConnectionTypeAssociationProperty
/// class.
/// </summary>
public ConnectionTypeAssociationProperty()
{
CustomInit();
}
/// <summary>
/// Initializes a new instance of the ConnectionTypeAssociationProperty
/// class.
/// </summary>
/// <param name="name">Gets or sets the name of the connection
/// type.</param>
public ConnectionTypeAssociationProperty(string name = default(string))
{
Name = name;
CustomInit();
}
/// <summary>
/// An initialization method that performs custom operations like setting defaults
/// </summary>
partial void CustomInit();
/// <summary>
/// Gets or sets the name of the connection type.
/// </summary>
[JsonProperty(PropertyName = "name")]
public string Name { get; set; }
}
}
| SiddharthChatrolaMs/azure-sdk-for-net | src/SDKs/Automation/Management.Automation/Generated/Models/ConnectionTypeAssociationProperty.cs | C# | apache-2.0 | 1,705 |
/*
Copyright 2007 Brian Tanner
brian@tannerpages.com
http://brian.tannerpages.com
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package rlVizLib.visualization;
import java.util.Observable;
import rlVizLib.visualization.interfaces.AgentOnValueFunctionDataProvider;
import rlVizLib.utilities.UtilityShop;
import java.awt.Color;
import java.awt.Graphics2D;
import java.awt.geom.Rectangle2D;
import java.util.Observer;
import org.rlcommunity.rlglue.codec.types.Reward_observation_action_terminal;
import rlVizLib.general.TinyGlue;
import rlVizLib.visualization.interfaces.GlueStateProvider;
public class AgentOnValueFunctionVizComponent implements SelfUpdatingVizComponent, Observer {
private VizComponentChangeListener theChangeListener;
private AgentOnValueFunctionDataProvider dataProvider;
private boolean enabled = true;
public AgentOnValueFunctionVizComponent(AgentOnValueFunctionDataProvider dataProvider, TinyGlue theGlueState) {
this.dataProvider = dataProvider;
theGlueState.addObserver(this);
}
public void setEnabled(boolean newEnableValue) {
if (newEnableValue == false && this.enabled) {
disable();
}
if (newEnableValue == true && !this.enabled) {
enable();
}
}
private void disable() {
enabled = false;
theChangeListener.vizComponentChanged(this);
}
private void enable() {
enabled = true;
}
public void render(Graphics2D g) {
if (!enabled) {
Color myClearColor = new Color(0.0f, 0.0f, 0.0f, 0.0f);
g.setColor(myClearColor);
g.setBackground(myClearColor);
g.clearRect(0, 0, 1, 1);
return;
}
dataProvider.updateAgentState();
g.setColor(Color.BLUE);
double transX = UtilityShop.normalizeValue(dataProvider.getCurrentStateInDimension(0),
dataProvider.getMinValueForDim(0),
dataProvider.getMaxValueForDim(0));
double transY = UtilityShop.normalizeValue(dataProvider.getCurrentStateInDimension(1),
dataProvider.getMinValueForDim(1),
dataProvider.getMaxValueForDim(1));
Rectangle2D agentRect = new Rectangle2D.Double(transX-.01, transY-.01, .02, .02);
g.fill(agentRect);
}
public void setVizComponentChangeListener(VizComponentChangeListener theChangeListener) {
this.theChangeListener = theChangeListener;
}
public void update(Observable o, Object theEvent) {
if (theChangeListener != null) {
theChangeListener.vizComponentChanged(this);
}
}
}
| cosmoharrigan/rl-viz | projects/rlVizLibJava/src/rlVizLib/visualization/AgentOnValueFunctionVizComponent.java | Java | apache-2.0 | 3,132 |
package junit.runner;
/**
* This class defines the current version of JUnit
*/
public class Version {
private Version() {
// don't instantiate
}
public static String id() {
return "4.12";
}
public static void main(String[] args) {
System.out.println(id());
}
}
| tascape/th-junit4 | src/main/java/junit/runner/Version.java | Java | apache-2.0 | 279 |
/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#include <aws/importexport/model/GetStatusResult.h>
#include <aws/core/utils/xml/XmlSerializer.h>
#include <aws/core/AmazonWebServiceResult.h>
#include <aws/core/utils/StringUtils.h>
#include <aws/core/utils/logging/LogMacros.h>
#include <utility>
using namespace Aws::ImportExport::Model;
using namespace Aws::Utils::Xml;
using namespace Aws::Utils::Logging;
using namespace Aws::Utils;
using namespace Aws;
GetStatusResult::GetStatusResult() :
m_jobType(JobType::NOT_SET),
m_errorCount(0)
{
}
GetStatusResult::GetStatusResult(const Aws::AmazonWebServiceResult<XmlDocument>& result) :
m_jobType(JobType::NOT_SET),
m_errorCount(0)
{
*this = result;
}
GetStatusResult& GetStatusResult::operator =(const Aws::AmazonWebServiceResult<XmlDocument>& result)
{
const XmlDocument& xmlDocument = result.GetPayload();
XmlNode rootNode = xmlDocument.GetRootElement();
XmlNode resultNode = rootNode;
if (!rootNode.IsNull() && (rootNode.GetName() != "GetStatusResult"))
{
resultNode = rootNode.FirstChild("GetStatusResult");
}
if(!resultNode.IsNull())
{
XmlNode jobIdNode = resultNode.FirstChild("JobId");
if(!jobIdNode.IsNull())
{
m_jobId = Aws::Utils::Xml::DecodeEscapedXmlText(jobIdNode.GetText());
}
XmlNode jobTypeNode = resultNode.FirstChild("JobType");
if(!jobTypeNode.IsNull())
{
m_jobType = JobTypeMapper::GetJobTypeForName(StringUtils::Trim(Aws::Utils::Xml::DecodeEscapedXmlText(jobTypeNode.GetText()).c_str()).c_str());
}
XmlNode locationCodeNode = resultNode.FirstChild("LocationCode");
if(!locationCodeNode.IsNull())
{
m_locationCode = Aws::Utils::Xml::DecodeEscapedXmlText(locationCodeNode.GetText());
}
XmlNode locationMessageNode = resultNode.FirstChild("LocationMessage");
if(!locationMessageNode.IsNull())
{
m_locationMessage = Aws::Utils::Xml::DecodeEscapedXmlText(locationMessageNode.GetText());
}
XmlNode progressCodeNode = resultNode.FirstChild("ProgressCode");
if(!progressCodeNode.IsNull())
{
m_progressCode = Aws::Utils::Xml::DecodeEscapedXmlText(progressCodeNode.GetText());
}
XmlNode progressMessageNode = resultNode.FirstChild("ProgressMessage");
if(!progressMessageNode.IsNull())
{
m_progressMessage = Aws::Utils::Xml::DecodeEscapedXmlText(progressMessageNode.GetText());
}
XmlNode carrierNode = resultNode.FirstChild("Carrier");
if(!carrierNode.IsNull())
{
m_carrier = Aws::Utils::Xml::DecodeEscapedXmlText(carrierNode.GetText());
}
XmlNode trackingNumberNode = resultNode.FirstChild("TrackingNumber");
if(!trackingNumberNode.IsNull())
{
m_trackingNumber = Aws::Utils::Xml::DecodeEscapedXmlText(trackingNumberNode.GetText());
}
XmlNode logBucketNode = resultNode.FirstChild("LogBucket");
if(!logBucketNode.IsNull())
{
m_logBucket = Aws::Utils::Xml::DecodeEscapedXmlText(logBucketNode.GetText());
}
XmlNode logKeyNode = resultNode.FirstChild("LogKey");
if(!logKeyNode.IsNull())
{
m_logKey = Aws::Utils::Xml::DecodeEscapedXmlText(logKeyNode.GetText());
}
XmlNode errorCountNode = resultNode.FirstChild("ErrorCount");
if(!errorCountNode.IsNull())
{
m_errorCount = StringUtils::ConvertToInt32(StringUtils::Trim(Aws::Utils::Xml::DecodeEscapedXmlText(errorCountNode.GetText()).c_str()).c_str());
}
XmlNode signatureNode = resultNode.FirstChild("Signature");
if(!signatureNode.IsNull())
{
m_signature = Aws::Utils::Xml::DecodeEscapedXmlText(signatureNode.GetText());
}
XmlNode signatureFileContentsNode = resultNode.FirstChild("SignatureFileContents");
if(!signatureFileContentsNode.IsNull())
{
m_signatureFileContents = Aws::Utils::Xml::DecodeEscapedXmlText(signatureFileContentsNode.GetText());
}
XmlNode currentManifestNode = resultNode.FirstChild("CurrentManifest");
if(!currentManifestNode.IsNull())
{
m_currentManifest = Aws::Utils::Xml::DecodeEscapedXmlText(currentManifestNode.GetText());
}
XmlNode creationDateNode = resultNode.FirstChild("CreationDate");
if(!creationDateNode.IsNull())
{
m_creationDate = DateTime(StringUtils::Trim(Aws::Utils::Xml::DecodeEscapedXmlText(creationDateNode.GetText()).c_str()).c_str(), DateFormat::ISO_8601);
}
XmlNode artifactListNode = resultNode.FirstChild("ArtifactList");
if(!artifactListNode.IsNull())
{
XmlNode artifactListMember = artifactListNode.FirstChild("member");
while(!artifactListMember.IsNull())
{
m_artifactList.push_back(artifactListMember);
artifactListMember = artifactListMember.NextNode("member");
}
}
}
if (!rootNode.IsNull()) {
XmlNode responseMetadataNode = rootNode.FirstChild("ResponseMetadata");
m_responseMetadata = responseMetadataNode;
AWS_LOGSTREAM_DEBUG("Aws::ImportExport::Model::GetStatusResult", "x-amzn-request-id: " << m_responseMetadata.GetRequestId() );
}
return *this;
}
| jt70471/aws-sdk-cpp | aws-cpp-sdk-importexport/source/model/GetStatusResult.cpp | C++ | apache-2.0 | 5,140 |
/*
* Copyright 2010-2013 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
using System.IO;
using System.Xml;
using System.Text;
using Amazon.S3.Util;
using Amazon.Runtime;
using Amazon.Runtime.Internal;
using Amazon.Runtime.Internal.Transform;
using System.Globalization;
using Amazon.Util;
#pragma warning disable 1591
namespace Amazon.S3.Model.Internal.MarshallTransformations
{
/// <summary>
/// Put Object Acl Request Marshaller
/// </summary>
public class PutACLRequestMarshaller : IMarshaller<IRequest, PutACLRequest> ,IMarshaller<IRequest,Amazon.Runtime.AmazonWebServiceRequest>
{
public IRequest Marshall(Amazon.Runtime.AmazonWebServiceRequest input)
{
return this.Marshall((PutACLRequest)input);
}
public IRequest Marshall(PutACLRequest putObjectAclRequest)
{
IRequest request = new DefaultRequest(putObjectAclRequest, "AmazonS3");
request.HttpMethod = "PUT";
if (putObjectAclRequest.IsSetCannedACL())
request.Headers.Add(HeaderKeys.XAmzAclHeader, S3Transforms.ToStringValue(putObjectAclRequest.CannedACL));
// if we are putting the acl onto the bucket, the keyname component will collapse to empty string
request.ResourcePath = string.Format(CultureInfo.InvariantCulture, "/{0}/{1}",
S3Transforms.ToStringValue(putObjectAclRequest.BucketName),
S3Transforms.ToStringValue(putObjectAclRequest.Key));
request.AddSubResource("acl");
if (putObjectAclRequest.IsSetVersionId())
request.AddSubResource("versionId", S3Transforms.ToStringValue(putObjectAclRequest.VersionId));
var stringWriter = new StringWriter(System.Globalization.CultureInfo.InvariantCulture);
using (
var xmlWriter = XmlWriter.Create(stringWriter,
new XmlWriterSettings()
{
Encoding = Encoding.UTF8,
OmitXmlDeclaration = true
}))
{
var accessControlPolicyAccessControlPolicy = putObjectAclRequest.AccessControlList;
if (accessControlPolicyAccessControlPolicy != null)
{
xmlWriter.WriteStartElement("AccessControlPolicy", "");
var accessControlPolicyAccessControlPolicygrantsList = accessControlPolicyAccessControlPolicy.Grants;
if (accessControlPolicyAccessControlPolicygrantsList != null &&
accessControlPolicyAccessControlPolicygrantsList.Count > 0)
{
xmlWriter.WriteStartElement("AccessControlList", "");
foreach (
var accessControlPolicyAccessControlPolicygrantsListValue in
accessControlPolicyAccessControlPolicygrantsList)
{
xmlWriter.WriteStartElement("Grant", "");
if (accessControlPolicyAccessControlPolicygrantsListValue != null)
{
var granteeGrantee = accessControlPolicyAccessControlPolicygrantsListValue.Grantee;
if (granteeGrantee != null)
{
xmlWriter.WriteStartElement("Grantee", "");
if (granteeGrantee.IsSetType())
{
xmlWriter.WriteAttributeString("xsi", "type",
"http://www.w3.org/2001/XMLSchema-instance",
granteeGrantee.Type.ToString());
}
if (granteeGrantee.IsSetDisplayName())
{
xmlWriter.WriteElementString("DisplayName", "",
S3Transforms.ToXmlStringValue(
granteeGrantee.DisplayName));
}
if (granteeGrantee.IsSetEmailAddress())
{
xmlWriter.WriteElementString("EmailAddress", "",
S3Transforms.ToXmlStringValue(
granteeGrantee.EmailAddress));
}
if (granteeGrantee.IsSetCanonicalUser())
{
xmlWriter.WriteElementString("ID", "",
S3Transforms.ToXmlStringValue(
granteeGrantee.CanonicalUser));
}
if (granteeGrantee.IsSetURI())
{
xmlWriter.WriteElementString("URI", "",
S3Transforms.ToXmlStringValue(
granteeGrantee.URI));
}
xmlWriter.WriteEndElement();
}
if (accessControlPolicyAccessControlPolicygrantsListValue.IsSetPermission())
{
xmlWriter.WriteElementString("Permission", "",
S3Transforms.ToXmlStringValue(
accessControlPolicyAccessControlPolicygrantsListValue
.Permission));
}
}
xmlWriter.WriteEndElement();
}
xmlWriter.WriteEndElement();
var ownerOwner = accessControlPolicyAccessControlPolicy.Owner;
if (ownerOwner != null)
{
xmlWriter.WriteStartElement("Owner", "");
if (ownerOwner.IsSetDisplayName())
{
xmlWriter.WriteElementString("DisplayName", "",
S3Transforms.ToXmlStringValue(ownerOwner.DisplayName));
}
if (ownerOwner.IsSetId())
{
xmlWriter.WriteElementString("ID", "", S3Transforms.ToXmlStringValue(ownerOwner.Id));
}
xmlWriter.WriteEndElement();
}
}
xmlWriter.WriteEndElement();
}
}
try
{
var content = stringWriter.ToString();
request.Content = Encoding.UTF8.GetBytes(content);
request.Headers[HeaderKeys.ContentTypeHeader] = "application/xml";
string checksum = AmazonS3Util.GenerateChecksumForContent(content, true);
request.Headers[HeaderKeys.ContentMD5Header] = checksum;
}
catch (EncoderFallbackException e)
{
throw new AmazonServiceException("Unable to marshall request to XML", e);
}
return request;
}
}
}
| rafd123/aws-sdk-net | sdk/src/Services/S3/Custom/Model/Internal/MarshallTransformations/PutACLRequestMarshaller.cs | C# | apache-2.0 | 8,804 |
/*
* Copyright 2014 The Netty Project
*
* The Netty Project licenses this file to you under the Apache License, version 2.0 (the
* "License"); you may not use this file except in compliance with the License. You may obtain a
* copy of the License at:
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License
* is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
* or implied. See the License for the specific language governing permissions and limitations under
* the License.
*/
package io.netty.handler.codec.http2;
import java.util.Collection;
/**
* Manager for the state of an HTTP/2 connection with the remote end-point.
*/
public interface Http2Connection {
/**
* Listener for life-cycle events for streams in this connection.
*/
interface Listener {
/**
* Notifies the listener that the given stream was added to the connection. This stream may
* not yet be active (i.e. open/half-closed).
*/
void streamAdded(Http2Stream stream);
/**
* Notifies the listener that the given stream was made active (i.e. open in at least one
* direction).
*/
void streamActive(Http2Stream stream);
/**
* Notifies the listener that the given stream is now half-closed. The stream can be
* inspected to determine which side is closed.
*/
void streamHalfClosed(Http2Stream stream);
/**
* Notifies the listener that the given stream is now closed in both directions.
*/
void streamInactive(Http2Stream stream);
/**
* Notifies the listener that the given stream has now been removed from the connection and
* will no longer be returned via {@link Http2Connection#stream(int)}. The connection may
* maintain inactive streams for some time before removing them.
*/
void streamRemoved(Http2Stream stream);
/**
* Notifies the listener that a priority tree parent change has occurred. This method will be invoked
* in a top down order relative to the priority tree. This method will also be invoked after all tree
* structure changes have been made and the tree is in steady state relative to the priority change
* which caused the tree structure to change.
* @param stream The stream which had a parent change (new parent and children will be steady state)
* @param oldParent The old parent which {@code stream} used to be a child of (may be {@code null})
*/
void priorityTreeParentChanged(Http2Stream stream, Http2Stream oldParent);
/**
* Notifies the listener that a parent dependency is about to change
* This is called while the tree is being restructured and so the tree
* structure is not necessarily steady state.
* @param stream The stream which the parent is about to change to {@code newParent}
* @param newParent The stream which will be the parent of {@code stream}
*/
void priorityTreeParentChanging(Http2Stream stream, Http2Stream newParent);
/**
* Notifies the listener that the weight has changed for {@code stream}
* @param stream The stream which the weight has changed
* @param oldWeight The old weight for {@code stream}
*/
void onWeightChanged(Http2Stream stream, short oldWeight);
/**
* Called when a GO_AWAY frame has either been sent or received for the connection.
*/
void goingAway();
}
/**
* A view of the connection from one endpoint (local or remote).
*/
interface Endpoint<F extends Http2FlowController> {
/**
* Returns the next valid streamId for this endpoint. If negative, the stream IDs are
* exhausted for this endpoint an no further streams may be created.
*/
int nextStreamId();
/**
* Indicates whether the given streamId is from the set of IDs used by this endpoint to
* create new streams.
*/
boolean createdStreamId(int streamId);
/**
* Indicates whether or not this endpoint is currently accepting new streams. This will be
* be false if {@link #numActiveStreams()} + 1 >= {@link #maxStreams()} or if the stream IDs
* for this endpoint have been exhausted (i.e. {@link #nextStreamId()} < 0).
*/
boolean acceptingNewStreams();
/**
* Creates a stream initiated by this endpoint. This could fail for the following reasons:
* <ul>
* <li>The requested stream ID is not the next sequential ID for this endpoint.</li>
* <li>The stream already exists.</li>
* <li>The number of concurrent streams is above the allowed threshold for this endpoint.</li>
* <li>The connection is marked as going away.</li>
* </ul>
* <p>
* The caller is expected to {@link Http2Stream#open()} the stream.
* @param streamId The ID of the stream
* @see Http2Stream#open()
* @see Http2Stream#open(boolean)
*/
Http2Stream createStream(int streamId) throws Http2Exception;
/**
* Creates a push stream in the reserved state for this endpoint and notifies all listeners.
* This could fail for the following reasons:
* <ul>
* <li>Server push is not allowed to the opposite endpoint.</li>
* <li>The requested stream ID is not the next sequential stream ID for this endpoint.</li>
* <li>The number of concurrent streams is above the allowed threshold for this endpoint.</li>
* <li>The connection is marked as going away.</li>
* <li>The parent stream ID does not exist or is not open from the side sending the push
* promise.</li>
* <li>Could not set a valid priority for the new stream.</li>
* </ul>
*
* @param streamId the ID of the push stream
* @param parent the parent stream used to initiate the push stream.
*/
Http2Stream reservePushStream(int streamId, Http2Stream parent) throws Http2Exception;
/**
* Indicates whether or not this endpoint is the server-side of the connection.
*/
boolean isServer();
/**
* Sets whether server push is allowed to this endpoint.
*/
void allowPushTo(boolean allow);
/**
* Gets whether or not server push is allowed to this endpoint. This is always false
* for a server endpoint.
*/
boolean allowPushTo();
/**
* Gets the number of currently active streams that were created by this endpoint.
*/
int numActiveStreams();
/**
* Gets the maximum number of concurrent streams allowed by this endpoint.
*/
int maxStreams();
/**
* Sets the maximum number of concurrent streams allowed by this endpoint.
*/
void maxStreams(int maxStreams);
/**
* Gets the ID of the stream last successfully created by this endpoint.
*/
int lastStreamCreated();
/**
* Gets the last stream created by this endpoint that is "known" by the opposite endpoint.
* If a GOAWAY was received for this endpoint, this will be the last stream ID from the
* GOAWAY frame. Otherwise, this will be same as {@link #lastStreamCreated()}.
*/
int lastKnownStream();
/**
* Gets the flow controller for this endpoint.
*/
F flowController();
/**
* Sets the flow controller for this endpoint.
*/
void flowController(F flowController);
/**
* Gets the {@link Endpoint} opposite this one.
*/
Endpoint<? extends Http2FlowController> opposite();
}
/**
* Adds a listener of stream life-cycle events. Adding the same listener multiple times has no effect.
*/
void addListener(Listener listener);
/**
* Removes a listener of stream life-cycle events.
*/
void removeListener(Listener listener);
/**
* Attempts to get the stream for the given ID. If it doesn't exist, throws.
*/
Http2Stream requireStream(int streamId) throws Http2Exception;
/**
* Gets the stream if it exists. If not, returns {@code null}.
*/
Http2Stream stream(int streamId);
/**
* Gets the stream object representing the connection, itself (i.e. stream zero). This object
* always exists.
*/
Http2Stream connectionStream();
/**
* Gets the number of streams that are currently either open or half-closed.
*/
int numActiveStreams();
/**
* Gets all streams that are currently either open or half-closed. The returned collection is
* sorted by priority.
*/
Collection<Http2Stream> activeStreams();
/**
* Indicates whether or not the local endpoint for this connection is the server.
*/
boolean isServer();
/**
* Gets a view of this connection from the local {@link Endpoint}.
*/
Endpoint<Http2LocalFlowController> local();
/**
* Creates a new stream initiated by the local endpoint
* @see Endpoint#createStream(int)
*/
Http2Stream createLocalStream(int streamId) throws Http2Exception;
/**
* Gets a view of this connection from the remote {@link Endpoint}.
*/
Endpoint<Http2RemoteFlowController> remote();
/**
* Creates a new stream initiated by the remote endpoint.
* @see Endpoint#createStream(int)
*/
Http2Stream createRemoteStream(int streamId) throws Http2Exception;
/**
* Indicates whether or not a {@code GOAWAY} was received from the remote endpoint.
*/
boolean goAwayReceived();
/**
* Indicates that a {@code GOAWAY} was received from the remote endpoint and sets the last known stream.
*/
void goAwayReceived(int lastKnownStream);
/**
* Indicates whether or not a {@code GOAWAY} was sent to the remote endpoint.
*/
boolean goAwaySent();
/**
* Indicates that a {@code GOAWAY} was sent to the remote endpoint and sets the last known stream.
*/
void goAwaySent(int lastKnownStream);
/**
* Indicates whether or not either endpoint has received a GOAWAY.
*/
boolean isGoAway();
}
| nat2013/netty | codec-http2/src/main/java/io/netty/handler/codec/http2/Http2Connection.java | Java | apache-2.0 | 10,653 |
/*
* Copyright 2000-2009 JetBrains s.r.o.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intellij.util.io;
import com.intellij.openapi.util.ThreadLocalCachedValue;
import com.intellij.openapi.util.ThrowableComputable;
import com.intellij.openapi.util.io.FileUtil;
import com.intellij.openapi.vfs.CharsetToolkit;
import com.intellij.util.SystemProperties;
import org.jetbrains.annotations.NonNls;
import org.jetbrains.annotations.NotNull;
import java.io.*;
import java.lang.reflect.Field;
import java.nio.charset.Charset;
public class IOUtil {
public static final boolean ourByteBuffersUseNativeByteOrder = SystemProperties.getBooleanProperty("idea.bytebuffers.use.native.byte.order", true);
private static final int STRING_HEADER_SIZE = 1;
private static final int STRING_LENGTH_THRESHOLD = 255;
@NonNls private static final String LONGER_THAN_64K_MARKER = "LONGER_THAN_64K";
private IOUtil() {}
public static String readString(@NotNull DataInput stream) throws IOException {
int length = stream.readInt();
if (length == -1) return null;
if (length == 0) return "";
byte[] bytes = new byte[length*2];
stream.readFully(bytes);
return new String(bytes, 0, length*2, CharsetToolkit.UTF_16BE_CHARSET);
}
public static void writeString(String s, @NotNull DataOutput stream) throws IOException {
if (s == null) {
stream.writeInt(-1);
return;
}
stream.writeInt(s.length());
if (s.isEmpty()) {
return;
}
char[] chars = s.toCharArray();
byte[] bytes = new byte[chars.length * 2];
for (int i = 0, i2 = 0; i < chars.length; i++, i2 += 2) {
char aChar = chars[i];
bytes[i2] = (byte)(aChar >>> 8 & 0xFF);
bytes[i2 + 1] = (byte)(aChar & 0xFF);
}
stream.write(bytes);
}
public static void writeUTFTruncated(@NotNull DataOutput stream, @NotNull String text) throws IOException {
// we should not compare number of symbols to 65635 -> it is number of bytes what should be compared
// ? 4 bytes per symbol - rough estimation
if (text.length() > 16383) {
stream.writeUTF(text.substring(0, 16383));
}
else {
stream.writeUTF(text);
}
}
private static final ThreadLocalCachedValue<byte[]> ourReadWriteBuffersCache = new ThreadLocalCachedValue<byte[]>() {
@Override
protected byte[] create() {
return allocReadWriteUTFBuffer();
}
};
public static void writeUTF(@NotNull DataOutput storage, @NotNull final String value) throws IOException {
writeUTFFast(ourReadWriteBuffersCache.getValue(), storage, value);
}
public static String readUTF(@NotNull DataInput storage) throws IOException {
return readUTFFast(ourReadWriteBuffersCache.getValue(), storage);
}
@NotNull
public static byte[] allocReadWriteUTFBuffer() {
return new byte[STRING_LENGTH_THRESHOLD + STRING_HEADER_SIZE];
}
public static void writeUTFFast(@NotNull byte[] buffer, @NotNull DataOutput storage, @NotNull final String value) throws IOException {
int len = value.length();
if (len < STRING_LENGTH_THRESHOLD) {
buffer[0] = (byte)len;
boolean isAscii = true;
for (int i = 0; i < len; i++) {
char c = value.charAt(i);
if (c >= 128) {
isAscii = false;
break;
}
buffer[i + STRING_HEADER_SIZE] = (byte)c;
}
if (isAscii) {
storage.write(buffer, 0, len + STRING_HEADER_SIZE);
return;
}
}
storage.writeByte((byte)0xFF);
try {
storage.writeUTF(value);
}
catch (UTFDataFormatException e) {
storage.writeUTF(LONGER_THAN_64K_MARKER);
writeString(value, storage);
}
}
public static final Charset US_ASCII = Charset.forName("US-ASCII");
private static final ThreadLocalCachedValue<char[]> spareBufferLocal = new ThreadLocalCachedValue<char[]>() {
@Override
protected char[] create() {
return new char[STRING_LENGTH_THRESHOLD];
}
};
public static String readUTFFast(@NotNull byte[] buffer, @NotNull DataInput storage) throws IOException {
int len = 0xFF & (int)storage.readByte();
if (len == 0xFF) {
String result = storage.readUTF();
if (LONGER_THAN_64K_MARKER.equals(result)) {
return readString(storage);
}
return result;
}
if (len == 0) return "";
storage.readFully(buffer, 0, len);
char[] chars = spareBufferLocal.getValue();
for(int i = 0; i < len; ++i) chars[i] = (char)(buffer[i] &0xFF);
return new String(chars, 0, len);
}
public static boolean isAscii(@NotNull String str) {
for (int i = 0, length = str.length(); i < length; ++ i) {
if (str.charAt(i) >= 128) return false;
}
return true;
}
public static boolean isAscii(char c) {
return c < 128;
}
public static boolean deleteAllFilesStartingWith(@NotNull File file) {
final String baseName = file.getName();
File parentFile = file.getParentFile();
final File[] files = parentFile != null ? parentFile.listFiles(new FileFilter() {
@Override
public boolean accept(final File pathname) {
return pathname.getName().startsWith(baseName);
}
}): null;
boolean ok = true;
if (files != null) {
for (File f : files) {
ok &= FileUtil.delete(f);
}
}
return ok;
}
public static void syncStream(OutputStream stream) throws IOException {
stream.flush();
try {
Field outField = FilterOutputStream.class.getDeclaredField("out");
outField.setAccessible(true);
while (stream instanceof FilterOutputStream) {
Object o = outField.get(stream);
if (o instanceof OutputStream) {
stream = (OutputStream)o;
} else {
break;
}
}
if (stream instanceof FileOutputStream) {
((FileOutputStream)stream).getFD().sync();
}
}
catch (NoSuchFieldException e) {
throw new RuntimeException(e);
}
catch (IllegalAccessException e) {
throw new RuntimeException(e);
}
}
public static <T> T openCleanOrResetBroken(@NotNull ThrowableComputable<T, IOException> factoryComputable, final File file) throws IOException {
return openCleanOrResetBroken(factoryComputable, new Runnable() {
@Override
public void run() {
deleteAllFilesStartingWith(file);
}
});
}
public static <T> T openCleanOrResetBroken(@NotNull ThrowableComputable<T, IOException> factoryComputable, Runnable cleanupCallback) throws IOException {
for(int i = 0; i < 2; ++i) {
try {
return factoryComputable.compute();
} catch (IOException ex) {
if (i == 1) throw ex;
cleanupCallback.run();
}
}
return null;
}
}
| diorcety/intellij-community | platform/util/src/com/intellij/util/io/IOUtil.java | Java | apache-2.0 | 7,277 |
/*
Copyright 2006 Jerry Huxtable
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package com.jhlabs.image;
import java.awt.*;
import java.awt.image.*;
/**
* A Filter to pixellate images.
*/
public class BlockFilter extends AbstractBufferedImageOp {
private int blockSize = 2;
/**
* Construct a BlockFilter.
*/
public BlockFilter() {
}
/**
* Construct a BlockFilter.
* @param blockSize the number of pixels along each block edge
*/
public BlockFilter( int blockSize ) {
this.blockSize = blockSize;
}
/**
* Set the pixel block size.
* @param blockSize the number of pixels along each block edge
* @min-value 1
* @max-value 100+
* @see #getBlockSize
*/
public void setBlockSize(int blockSize) {
this.blockSize = blockSize;
}
/**
* Get the pixel block size.
* @return the number of pixels along each block edge
* @see #setBlockSize
*/
public int getBlockSize() {
return blockSize;
}
public BufferedImage filter( BufferedImage src, BufferedImage dst ) {
int width = src.getWidth();
int height = src.getHeight();
int type = src.getType();
WritableRaster srcRaster = src.getRaster();
if ( dst == null )
dst = createCompatibleDestImage( src, null );
int[] pixels = new int[blockSize * blockSize];
for ( int y = 0; y < height; y += blockSize ) {
for ( int x = 0; x < width; x += blockSize ) {
int w = Math.min( blockSize, width-x );
int h = Math.min( blockSize, height-y );
int t = w*h;
getRGB( src, x, y, w, h, pixels );
int r = 0, g = 0, b = 0;
int argb;
int i = 0;
for ( int by = 0; by < h; by++ ) {
for ( int bx = 0; bx < w; bx++ ) {
argb = pixels[i];
r += (argb >> 16) & 0xff;
g += (argb >> 8) & 0xff;
b += argb & 0xff;
i++;
}
}
argb = ((r/t) << 16) | ((g/t) << 8) | (b/t);
i = 0;
for ( int by = 0; by < h; by++ ) {
for ( int bx = 0; bx < w; bx++ ) {
pixels[i] = (pixels[i] & 0xff000000) | argb;
i++;
}
}
setRGB( dst, x, y, w, h, pixels );
}
}
return dst;
}
public String toString() {
return "Pixellate/Mosaic...";
}
}
| svn2github/pixels | src/main/java/com/jhlabs/image/BlockFilter.java | Java | apache-2.0 | 3,064 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/* $Id$ */
package org.apache.fop.pdf;
import java.io.IOException;
import java.io.OutputStream;
import org.apache.commons.io.output.CountingOutputStream;
/**
* Class representing a PDF name object.
*/
public class PDFName extends PDFObject {
private String name;
/**
* Creates a new PDF name object.
* @param name the name value
*/
public PDFName(String name) {
super();
this.name = escapeName(name);
}
private static final String ESCAPED_NAME_CHARS = "/()<>[]%#";
/**
* Escapes a PDF name. It adds the leading slash and escapes characters as necessary.
* @param name the name
* @return the escaped name
*/
static String escapeName(String name) {
StringBuilder sb = new StringBuilder(Math.min(16, name.length() + 4));
boolean skipFirst = false;
sb.append('/');
if (name.startsWith("/")) {
skipFirst = true;
}
for (int i = (skipFirst ? 1 : 0), c = name.length(); i < c; i++) {
char ch = name.charAt(i);
if (ch < 33 || ch > 126 || ESCAPED_NAME_CHARS.indexOf(ch) >= 0) {
sb.append('#');
toHex(ch, sb);
} else {
sb.append(ch);
}
}
return sb.toString();
}
private static final char[] DIGITS
= {'0', '1', '2', '3', '4', '5', '6', '7',
'8', '9', 'A', 'B', 'C', 'D', 'E', 'F'};
private static void toHex(char ch, StringBuilder sb) {
if (ch >= 256) {
throw new IllegalArgumentException(
"Only 8-bit characters allowed by this implementation");
}
sb.append(DIGITS[ch >>> 4 & 0x0F]);
sb.append(DIGITS[ch & 0x0F]);
}
/** {@inheritDoc} */
@Override
public String toString() {
return this.name;
}
/**
* Returns the name without the leading slash.
* @return the name without the leading slash
*/
public String getName() {
return this.name.substring(1);
}
/** {@inheritDoc} */
public boolean equals(Object obj) {
if (!(obj instanceof PDFName)) {
return false;
}
PDFName other = (PDFName)obj;
return this.name.equals(other.name);
}
/** {@inheritDoc} */
public int hashCode() {
return name.hashCode();
}
@Override
public int output(OutputStream stream) throws IOException {
CountingOutputStream cout = new CountingOutputStream(stream);
StringBuilder textBuffer = new StringBuilder(64);
textBuffer.append(toString());
PDFDocument.flushTextBuffer(textBuffer, cout);
return cout.getCount();
}
@Override
public void outputInline(OutputStream out, StringBuilder textBuffer) throws IOException {
if (hasObjectNumber()) {
textBuffer.append(referencePDF());
} else {
textBuffer.append(toString());
}
}
}
| Distrotech/fop | src/java/org/apache/fop/pdf/PDFName.java | Java | apache-2.0 | 3,799 |
package gov.hhs.onc.sdcct.ws.impl;
import gov.hhs.onc.sdcct.logging.impl.TxTaskExecutor;
import org.apache.cxf.endpoint.DeferredConduitSelector;
public class SdcctConduitSelector extends DeferredConduitSelector {
private TxTaskExecutor taskExec;
public SdcctConduitSelector(TxTaskExecutor taskExec) {
super();
this.taskExec = taskExec;
}
public TxTaskExecutor getTaskExecutor() {
return this.taskExec;
}
}
| elizabethso/sdcct | sdcct-core/src/main/java/gov/hhs/onc/sdcct/ws/impl/SdcctConduitSelector.java | Java | apache-2.0 | 455 |
//
// CipherReference.cs - CipherReference implementation for XML Encryption
// http://www.w3.org/2001/04/xmlenc#sec-CipherReference
//
// Author:
// Tim Coleman (tim@timcoleman.com)
//
// Copyright (C) Tim Coleman, 2004
//
// Permission is hereby granted, free of charge, to any person obtaining
// a copy of this software and associated documentation files (the
// "Software"), to deal in the Software without restriction, including
// without limitation the rights to use, copy, modify, merge, publish,
// distribute, sublicense, and/or sell copies of the Software, and to
// permit persons to whom the Software is furnished to do so, subject to
// the following conditions:
//
// The above copyright notice and this permission notice shall be
// included in all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
//
#if NET_2_0
using System.Xml;
namespace System.Security.Cryptography.Xml {
public sealed class CipherReference : EncryptedReference {
#region Constructors
public CipherReference ()
: base ()
{
}
public CipherReference (string uri)
: base (uri)
{
}
public CipherReference (string uri, TransformChain tc)
: base (uri, tc)
{
}
#endregion // Constructors
#region Methods
public override XmlElement GetXml ()
{
return GetXml (new XmlDocument ());
}
internal override XmlElement GetXml (XmlDocument document)
{
XmlElement xel = document.CreateElement (XmlEncryption.ElementNames.CipherReference, EncryptedXml.XmlEncNamespaceUrl);
xel.SetAttribute (XmlEncryption.AttributeNames.URI, Uri);
if (TransformChain != null && TransformChain.Count > 0) {
XmlElement xtr = document.CreateElement (XmlEncryption.ElementNames.Transforms, EncryptedXml.XmlEncNamespaceUrl);
foreach (Transform t in TransformChain)
xtr.AppendChild (document.ImportNode (t.GetXml (), true));
xel.AppendChild (xtr);
}
return xel;
}
public override void LoadXml (XmlElement value)
{
if (value == null)
throw new ArgumentNullException ("value");
if ((value.LocalName != XmlEncryption.ElementNames.CipherReference) || (value.NamespaceURI != EncryptedXml.XmlEncNamespaceUrl))
throw new CryptographicException ("Malformed CipherReference element.");
base.LoadXml (value);
}
#endregion // Methods
}
}
#endif
| symplified/Symplified.Auth | lib/mono/mcs/class/System.Security/System.Security.Cryptography.Xml/CipherReference.cs | C# | apache-2.0 | 2,802 |
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.curator.x.async.modeled.details;
import org.apache.curator.x.async.AsyncStage;
import org.apache.zookeeper.WatchedEvent;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.CompletionStage;
import java.util.concurrent.Executor;
import java.util.function.BiConsumer;
import java.util.function.BiFunction;
import java.util.function.Consumer;
import java.util.function.Function;
class ModelStage<T> extends CompletableFuture<T> implements AsyncStage<T>
{
private final CompletionStage<WatchedEvent> event;
static <U> ModelStage<U> make()
{
return new ModelStage<>(null);
}
static <U> ModelStage<U> make(CompletionStage<WatchedEvent> event)
{
return new ModelStage<>(event);
}
static <U> ModelStage<U> completed(U value)
{
ModelStage<U> stage = new ModelStage<>(null);
stage.complete(value);
return stage;
}
static <U> ModelStage<U> exceptionally(Exception e)
{
ModelStage<U> stage = new ModelStage<>(null);
stage.completeExceptionally(e);
return stage;
}
static <U> ModelStage<U> async(Executor executor)
{
return new AsyncModelStage<>(executor);
}
static <U> ModelStage<U> asyncCompleted(U value, Executor executor)
{
ModelStage<U> stage = new AsyncModelStage<>(executor);
stage.complete(value);
return stage;
}
static <U> ModelStage<U> asyncExceptionally(Exception e, Executor executor)
{
ModelStage<U> stage = new AsyncModelStage<>(executor);
stage.completeExceptionally(e);
return stage;
}
@Override
public CompletionStage<WatchedEvent> event()
{
return event;
}
private ModelStage(CompletionStage<WatchedEvent> event)
{
this.event = event;
}
private static class AsyncModelStage<U> extends ModelStage<U>
{
private final Executor executor;
public AsyncModelStage(Executor executor)
{
super(null);
this.executor = executor;
}
@Override
public <U1> CompletableFuture<U1> thenApplyAsync(Function<? super U, ? extends U1> fn)
{
return super.thenApplyAsync(fn, executor);
}
@Override
public CompletableFuture<Void> thenAcceptAsync(Consumer<? super U> action)
{
return super.thenAcceptAsync(action, executor);
}
@Override
public CompletableFuture<Void> thenRunAsync(Runnable action)
{
return super.thenRunAsync(action, executor);
}
@Override
public <U1, V> CompletableFuture<V> thenCombineAsync(CompletionStage<? extends U1> other, BiFunction<? super U, ? super U1, ? extends V> fn)
{
return super.thenCombineAsync(other, fn, executor);
}
@Override
public <U1> CompletableFuture<Void> thenAcceptBothAsync(CompletionStage<? extends U1> other, BiConsumer<? super U, ? super U1> action)
{
return super.thenAcceptBothAsync(other, action, executor);
}
@Override
public CompletableFuture<Void> runAfterBothAsync(CompletionStage<?> other, Runnable action)
{
return super.runAfterBothAsync(other, action, executor);
}
@Override
public <U1> CompletableFuture<U1> applyToEitherAsync(CompletionStage<? extends U> other, Function<? super U, U1> fn)
{
return super.applyToEitherAsync(other, fn, executor);
}
@Override
public CompletableFuture<Void> acceptEitherAsync(CompletionStage<? extends U> other, Consumer<? super U> action)
{
return super.acceptEitherAsync(other, action, executor);
}
@Override
public CompletableFuture<Void> runAfterEitherAsync(CompletionStage<?> other, Runnable action)
{
return super.runAfterEitherAsync(other, action, executor);
}
@Override
public <U1> CompletableFuture<U1> thenComposeAsync(Function<? super U, ? extends CompletionStage<U1>> fn)
{
return super.thenComposeAsync(fn, executor);
}
@Override
public CompletableFuture<U> whenCompleteAsync(BiConsumer<? super U, ? super Throwable> action)
{
return super.whenCompleteAsync(action, executor);
}
@Override
public <U1> CompletableFuture<U1> handleAsync(BiFunction<? super U, Throwable, ? extends U1> fn)
{
return super.handleAsync(fn, executor);
}
}
}
| apache/curator | curator-x-async/src/main/java/org/apache/curator/x/async/modeled/details/ModelStage.java | Java | apache-2.0 | 5,444 |
<?php
/**
* Copyright 2016 Google Inc. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
namespace Google\Cloud\Storage\Tests\System;
use Google\Cloud\Core\Exception\BadRequestException;
use Google\Cloud\Storage\Bucket;
/**
* @group storage
* @group storage-bucket
*/
class ManageBucketsTest extends StorageTestCase
{
public function testListsBuckets()
{
$foundBuckets = [];
$bucketsToCreate = [
uniqid(self::TESTING_PREFIX),
uniqid(self::TESTING_PREFIX)
];
foreach ($bucketsToCreate as $bucketToCreate) {
self::createBucket(self::$client, $bucketToCreate);
}
$buckets = self::$client->buckets(['prefix' => self::TESTING_PREFIX]);
foreach ($buckets as $bucket) {
foreach ($bucketsToCreate as $key => $bucketToCreate) {
if ($bucket->name() === $bucketToCreate) {
$foundBuckets[$key] = $bucket->name();
}
}
}
$this->assertEquals($bucketsToCreate, $foundBuckets);
}
public function testCreatesBucket()
{
$name = uniqid(self::TESTING_PREFIX);
$options = [
'location' => 'ASIA',
'storageClass' => 'NEARLINE',
'versioning' => [
'enabled' => true
]
];
$this->assertFalse(self::$client->bucket($name)->exists());
$bucket = self::createBucket(self::$client, $name, $options);
$this->assertTrue(self::$client->bucket($name)->exists());
$this->assertEquals($name, $bucket->name());
$this->assertEquals($options['location'], $bucket->info()['location']);
$this->assertEquals($options['storageClass'], $bucket->info()['storageClass']);
$this->assertEquals($options['versioning'], $bucket->info()['versioning']);
$this->assertEquals('multi-region', $bucket->info()['locationType']);
}
public function testUpdateBucket()
{
$options = [
'website' => [
'mainPageSuffix' => 'index.html',
'notFoundPage' => '404.html'
]
];
$info = self::$bucket->update($options);
$this->assertEquals($options['website'], $info['website']);
}
/**
* @group storage-bucket-lifecycle
* @dataProvider lifecycleRules
*/
public function testCreateBucketWithLifecycleDeleteRule(array $rule, $isError = false)
{
if ($isError) {
$this->setExpectedException(BadRequestException::class);
}
$lifecycle = Bucket::lifecycle();
$lifecycle->addDeleteRule($rule);
$bucket = self::createBucket(self::$client, uniqid(self::TESTING_PREFIX), [
'lifecycle' => $lifecycle
]);
$this->assertEquals($lifecycle->toArray(), $bucket->info()['lifecycle']);
}
/**
* @group storage-bucket-lifecycle
* @dataProvider lifecycleRules
*/
public function testUpdateBucketWithLifecycleDeleteRule(array $rule, $isError = false)
{
if ($isError) {
$this->setExpectedException(BadRequestException::class);
}
$lifecycle = Bucket::lifecycle();
$lifecycle->addDeleteRule($rule);
$bucket = self::createBucket(self::$client, uniqid(self::TESTING_PREFIX));
$this->assertArrayNotHasKey('lifecycle', $bucket->info());
$bucket->update([
'lifecycle' => $lifecycle
]);
$this->assertEquals($lifecycle->toArray(), $bucket->info()['lifecycle']);
}
public function lifecycleRules()
{
return [
[['age' => 1000]],
[['daysSinceNoncurrentTime' => 25]],
[['daysSinceNoncurrentTime' => -5], true], // error case
[['daysSinceNoncurrentTime' => -5], true], // error case
[['noncurrentTimeBefore' => (new \DateTime)->format("Y-m-d")]],
[['noncurrentTimeBefore' => new \DateTime]],
[['noncurrentTimeBefore' => 'this is not a timestamp'], true], // error case
[['customTimeBefore' => (new \DateTime)->format("Y-m-d")]],
[['customTimeBefore' => new \DateTime]],
[['customTimeBefore' => 'this is not a timestamp'], true], // error case
];
}
/**
* @group storage-bucket-lifecycle
*/
public function testUpdateAndClearLifecycle()
{
$lifecycle = self::$bucket->currentLifecycle()
->addDeleteRule([
'age' => 500
]);
$info = self::$bucket->update(['lifecycle' => $lifecycle]);
$this->assertEquals($lifecycle->toArray(), $info['lifecycle']);
$lifecycle = self::$bucket->currentLifecycle()
->clearRules('Delete');
$info = self::$bucket->update(['lifecycle' => $lifecycle]);
$this->assertEmpty($lifecycle->toArray());
$this->assertArrayNotHasKey('lifecycle', $info);
}
public function testReloadBucket()
{
$this->assertEquals('storage#bucket', self::$bucket->reload()['kind']);
}
/**
* @group storageiam
*/
public function testIam()
{
$iam = self::$bucket->iam();
$policy = $iam->policy();
// pop the version off the resourceId to make the assertion below more robust.
$resourceId = explode('#', $policy['resourceId'])[0];
$bucketName = self::$bucket->name();
$this->assertEquals($resourceId, sprintf('projects/_/buckets/%s', $bucketName));
$role = 'roles/storage.admin';
$policy['bindings'][] = [
'role' => $role,
'members' => ['projectOwner:gcloud-php-integration-tests']
];
$iam->setPolicy($policy);
$policy = $iam->reload();
$newBinding = array_filter($policy['bindings'], function ($binding) use ($role) {
return ($binding['role'] === $role);
});
$this->assertCount(1, $newBinding);
$permissions = ['storage.buckets.get'];
$test = $iam->testPermissions($permissions);
$this->assertEquals($permissions, $test);
}
public function testLabels()
{
$bucket = self::$bucket;
$bucket->update([
'labels' => [
'foo' => 'bar'
]
]);
$bucket->reload();
$this->assertEquals($bucket->info()['labels']['foo'], 'bar');
$bucket->update([
'labels' => [
'foo' => 'bat'
]
]);
$bucket->reload();
$this->assertEquals($bucket->info()['labels']['foo'], 'bat');
$bucket->update([
'labels' => [
'foo' => null
]
]);
$bucket->reload();
$this->assertFalse(isset($bucket->info()['labels']['foo']));
}
/**
* @group storage-bucket-location
* @dataProvider locationTypes
*/
public function testBucketLocationType($storageClass, $location, $expectedLocationType, $updateStorageClass)
{
$bucketName = uniqid(self::TESTING_PREFIX);
$bucket = self::createBucket(self::$client, $bucketName, [
'storageClass' => $storageClass,
'location' => $location,
'retentionPolicy' => [
'retentionPeriod' => 1
]
]);
// Test create bucket response
$this->assertEquals($expectedLocationType, $bucket->info()['locationType']);
// Test get bucket response
$this->assertEquals($expectedLocationType, $bucket->reload()['locationType']);
// Test update bucket.
$bucket->update(['storageClass' => $updateStorageClass]);
$bucket->update(['storageClass' => $storageClass]);
$this->assertEquals($expectedLocationType, $bucket->info()['locationType']);
// Test list bucket response
$buckets = iterator_to_array(self::$client->buckets());
$listBucketBucket = current(array_filter($buckets, function ($bucket) use ($bucketName) {
return $bucket->name() === $bucketName;
}));
$this->assertEquals($expectedLocationType, $listBucketBucket->info()['locationType']);
// Test lock retention policy response
$bucket->lockRetentionPolicy();
$this->assertEquals($expectedLocationType, $bucket->info()['locationType']);
}
public function locationTypes()
{
return [
[
'STANDARD',
'us',
'multi-region',
'NEARLINE'
], [
'STANDARD',
'us-central1',
'region',
'NEARLINE'
], [
'COLDLINE',
'nam4',
'dual-region',
'STANDARD'
], [
'ARCHIVE',
'nam4',
'dual-region',
'STANDARD'
]
];
}
}
| googleapis/google-cloud-php-storage | tests/System/ManageBucketsTest.php | PHP | apache-2.0 | 9,520 |
// Copyright (c) 2008 The Board of Trustees of The Leland Stanford Junior University
// Copyright (c) 2011, 2012 Open Networking Foundation
// Copyright (c) 2012, 2013 Big Switch Networks, Inc.
// This library was generated by the LoxiGen Compiler.
// See the file LICENSE.txt which should have been included in the source distribution
// Automatically generated by LOXI from template of_virtual_class.java
// Do not modify
package org.projectfloodlight.openflow.protocol.ver15;
import org.projectfloodlight.openflow.protocol.*;
import org.projectfloodlight.openflow.protocol.action.*;
import org.projectfloodlight.openflow.protocol.actionid.*;
import org.projectfloodlight.openflow.protocol.bsntlv.*;
import org.projectfloodlight.openflow.protocol.errormsg.*;
import org.projectfloodlight.openflow.protocol.meterband.*;
import org.projectfloodlight.openflow.protocol.instruction.*;
import org.projectfloodlight.openflow.protocol.instructionid.*;
import org.projectfloodlight.openflow.protocol.match.*;
import org.projectfloodlight.openflow.protocol.stat.*;
import org.projectfloodlight.openflow.protocol.oxm.*;
import org.projectfloodlight.openflow.protocol.oxs.*;
import org.projectfloodlight.openflow.protocol.queueprop.*;
import org.projectfloodlight.openflow.types.*;
import org.projectfloodlight.openflow.util.*;
import org.projectfloodlight.openflow.exceptions.*;
import io.netty.buffer.ByteBuf;
abstract class OFGroupModVer15 {
// version: 1.5
final static byte WIRE_VERSION = 6;
final static int MINIMUM_LENGTH = 24;
public final static OFGroupModVer15.Reader READER = new Reader();
static class Reader implements OFMessageReader<OFGroupMod> {
@Override
public OFGroupMod readFrom(ByteBuf bb) throws OFParseError {
if(bb.readableBytes() < MINIMUM_LENGTH)
return null;
int start = bb.readerIndex();
// fixed value property version == 6
byte version = bb.readByte();
if(version != (byte) 0x6)
throw new OFParseError("Wrong version: Expected=OFVersion.OF_15(6), got="+version);
// fixed value property type == 15
byte type = bb.readByte();
if(type != (byte) 0xf)
throw new OFParseError("Wrong type: Expected=OFType.GROUP_MOD(15), got="+type);
int length = U16.f(bb.readShort());
if(length < MINIMUM_LENGTH)
throw new OFParseError("Wrong length: Expected to be >= " + MINIMUM_LENGTH + ", was: " + length);
U32.f(bb.readInt());
short command = bb.readShort();
bb.readerIndex(start);
switch(command) {
case (short) 0x0:
// discriminator value OFGroupModCommand.ADD=0 for class OFGroupAddVer15
return OFGroupAddVer15.READER.readFrom(bb);
case (short) 0x2:
// discriminator value OFGroupModCommand.DELETE=2 for class OFGroupDeleteVer15
return OFGroupDeleteVer15.READER.readFrom(bb);
case (short) 0x1:
// discriminator value OFGroupModCommand.MODIFY=1 for class OFGroupModifyVer15
return OFGroupModifyVer15.READER.readFrom(bb);
case (short) 0x3:
// discriminator value OFGroupModCommand.INSERT_BUCKET=3 for class OFGroupInsertBucketVer15
return OFGroupInsertBucketVer15.READER.readFrom(bb);
case (short) 0x5:
// discriminator value OFGroupModCommand.REMOVE_BUCKET=5 for class OFGroupRemoveBucketVer15
return OFGroupRemoveBucketVer15.READER.readFrom(bb);
default:
throw new OFParseError("Unknown value for discriminator command of class OFGroupModVer15: " + command);
}
}
}
}
| floodlight/loxigen-artifacts | openflowj/gen-src/main/java/org/projectfloodlight/openflow/protocol/ver15/OFGroupModVer15.java | Java | apache-2.0 | 3,873 |
/*
* DBeaver - Universal Database Manager
* Copyright (C) 2010-2019 Serge Rider (serge@jkiss.org)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.jkiss.dbeaver.registry.formatter;
import org.jkiss.dbeaver.model.data.DBDDataFormatter;
import org.jkiss.dbeaver.model.data.DBDDataFormatterProfile;
import org.jkiss.dbeaver.model.impl.preferences.SimplePreferenceStore;
import org.jkiss.dbeaver.model.preferences.DBPPreferenceListener;
import org.jkiss.dbeaver.model.preferences.DBPPreferenceStore;
import org.jkiss.dbeaver.model.preferences.DBPPropertyDescriptor;
import org.jkiss.dbeaver.model.struct.DBSTypedObject;
import org.jkiss.dbeaver.utils.PrefUtils;
import org.jkiss.utils.CommonUtils;
import java.io.IOException;
import java.util.HashMap;
import java.util.Locale;
import java.util.Map;
/**
* DataFormatterProfile
*/
public class DataFormatterProfile implements DBDDataFormatterProfile, DBPPreferenceListener {
private static final String PROP_LANGUAGE = "dataformat.profile.language"; //$NON-NLS-1$
private static final String PROP_COUNTRY = "dataformat.profile.country"; //$NON-NLS-1$
private static final String PROP_VARIANT = "dataformat.profile.variant"; //$NON-NLS-1$
public static final String DATAFORMAT_PREFIX = "dataformat."; //$NON-NLS-1$
public static final String DATAFORMAT_TYPE_PREFIX = DATAFORMAT_PREFIX + "type."; //$NON-NLS-1$
private DBPPreferenceStore store;
private String name;
private Locale locale;
public DataFormatterProfile(String profileName, DBPPreferenceStore store)
{
this.name = profileName;
this.store = store;
loadProfile();
}
private void loadProfile()
{
{
String language = store.getString(PROP_LANGUAGE);
String country = store.getString(PROP_COUNTRY);
String variant = store.getString(PROP_VARIANT);
if (CommonUtils.isEmpty(language)) {
this.locale = Locale.getDefault();
} else if (CommonUtils.isEmpty(country)) {
this.locale = new Locale(language);
} else if (CommonUtils.isEmpty(variant)) {
this.locale = new Locale(language, country);
} else {
this.locale = new Locale(language, country, variant);
}
}
}
@Override
public void saveProfile() throws IOException
{
store.setValue(PROP_LANGUAGE, locale.getLanguage());
store.setValue(PROP_COUNTRY, locale.getCountry());
store.setValue(PROP_VARIANT, locale.getVariant());
PrefUtils.savePreferenceStore(store);
}
@Override
public DBPPreferenceStore getPreferenceStore()
{
return store;
}
@Override
public String getProfileName()
{
return name;
}
@Override
public void setProfileName(String name)
{
this.name = name;
}
@Override
public Locale getLocale()
{
return locale;
}
@Override
public void setLocale(Locale locale)
{
this.locale = locale;
}
@Override
public Map<Object, Object> getFormatterProperties(String typeId)
{
DataFormatterDescriptor formatter = DataFormatterRegistry.getInstance().getDataFormatter(typeId);
Map<Object, Object> defaultProperties = formatter.getSample().getDefaultProperties(locale);
Map<Object, Object> formatterProps = new HashMap<>();
for (DBPPropertyDescriptor prop : formatter.getProperties()) {
Object defaultValue = defaultProperties.get(prop.getId());
Object propValue = PrefUtils.getPreferenceValue(
store,
DATAFORMAT_TYPE_PREFIX + formatter.getId() + "." + prop.getId(), prop.getDataType());
if (propValue != null && !CommonUtils.equalObjects(defaultValue, propValue)) {
formatterProps.put(prop.getId(), propValue);
}
}
return formatterProps;
}
@Override
public void setFormatterProperties(String typeId, Map<Object, Object> formatterProps)
{
DataFormatterDescriptor formatter = DataFormatterRegistry.getInstance().getDataFormatter(typeId);
for (DBPPropertyDescriptor prop : formatter.getProperties()) {
Object propValue = formatterProps == null ? null : formatterProps.get(prop.getId());
if (propValue != null) {
PrefUtils.setPreferenceValue(store, DATAFORMAT_TYPE_PREFIX + formatter.getId() + "." + prop.getId(), propValue);
} else {
store.setToDefault(DATAFORMAT_TYPE_PREFIX + formatter.getId() + "." + prop.getId());
}
}
}
@Override
public boolean isOverridesParent()
{
if (store instanceof SimplePreferenceStore) {
SimplePreferenceStore prefStore = (SimplePreferenceStore) store;
if (prefStore.isSet(PROP_LANGUAGE) || prefStore.isSet(PROP_COUNTRY) || prefStore.isSet(PROP_VARIANT)) {
return true;
}
for (DataFormatterDescriptor formatter : DataFormatterRegistry.getInstance().getDataFormatters()) {
for (DBPPropertyDescriptor prop : formatter.getProperties()) {
if (prefStore.isSet(DATAFORMAT_TYPE_PREFIX + formatter.getId() + "." + prop.getId())) {
return true;
}
}
}
return false;
}
return true;
}
@Override
public void reset()
{
if (store instanceof SimplePreferenceStore) {
// Set all formatter properties to default
store.setToDefault(PROP_LANGUAGE);
store.setToDefault(PROP_COUNTRY);
store.setToDefault(PROP_VARIANT);
for (DataFormatterDescriptor formatter : DataFormatterRegistry.getInstance().getDataFormatters()) {
for (DBPPropertyDescriptor prop : formatter.getProperties()) {
store.setToDefault(DATAFORMAT_TYPE_PREFIX + formatter.getId() + "." + prop.getId());
}
}
}
loadProfile();
}
@Override
public DBDDataFormatter createFormatter(String typeId, DBSTypedObject type)
throws IllegalAccessException, InstantiationException, IllegalArgumentException
{
DataFormatterDescriptor descriptor = DataFormatterRegistry.getInstance().getDataFormatter(typeId);
if (descriptor == null) {
throw new IllegalArgumentException("Formatter '" + typeId + "' not found");
}
DBDDataFormatter formatter = descriptor.createFormatter();
Map<Object, Object> defProps = descriptor.getSample().getDefaultProperties(locale);
Map<Object, Object> props = getFormatterProperties(typeId);
Map<Object, Object> formatterProps = new HashMap<>();
if (defProps != null && !defProps.isEmpty()) {
formatterProps.putAll(defProps);
}
if (props != null && !props.isEmpty()) {
formatterProps.putAll(props);
}
formatter.init(type, locale, formatterProps);
return formatter;
}
public static void initDefaultPreferences(DBPPreferenceStore store, Locale locale)
{
for (DataFormatterDescriptor formatter : DataFormatterRegistry.getInstance().getDataFormatters()) {
Map<Object, Object> defaultProperties = formatter.getSample().getDefaultProperties(locale);
Map<Object, Object> formatterProps = new HashMap<>();
for (DBPPropertyDescriptor prop : formatter.getProperties()) {
Object defaultValue = defaultProperties.get(prop.getId());
if (defaultValue != null) {
PrefUtils.setPreferenceDefaultValue(store, DATAFORMAT_TYPE_PREFIX + formatter.getId() + "." + prop.getId(), defaultValue);
}
}
}
}
@Override
public void preferenceChange(PreferenceChangeEvent event) {
if (event.getProperty() != null && event.getProperty().startsWith(DATAFORMAT_PREFIX)) {
// Reload this profile
loadProfile();
}
}
}
| liuyuanyuan/dbeaver | plugins/org.jkiss.dbeaver.model/src/org/jkiss/dbeaver/registry/formatter/DataFormatterProfile.java | Java | apache-2.0 | 8,702 |
package com.planet_ink.coffee_mud.Races;
import com.planet_ink.coffee_mud.core.interfaces.*;
import com.planet_ink.coffee_mud.core.*;
import com.planet_ink.coffee_mud.core.collections.*;
import com.planet_ink.coffee_mud.Abilities.interfaces.*;
import com.planet_ink.coffee_mud.Areas.interfaces.*;
import com.planet_ink.coffee_mud.Behaviors.interfaces.*;
import com.planet_ink.coffee_mud.CharClasses.interfaces.*;
import com.planet_ink.coffee_mud.Commands.interfaces.*;
import com.planet_ink.coffee_mud.Common.interfaces.*;
import com.planet_ink.coffee_mud.Exits.interfaces.*;
import com.planet_ink.coffee_mud.Items.interfaces.*;
import com.planet_ink.coffee_mud.Libraries.interfaces.*;
import com.planet_ink.coffee_mud.Locales.interfaces.*;
import com.planet_ink.coffee_mud.MOBS.interfaces.*;
import com.planet_ink.coffee_mud.Races.interfaces.*;
import java.util.*;
/*
Copyright 2004-2015 Bo Zimmerman
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
public class GiantAmphibian extends GreatAmphibian
{
@Override public String ID(){ return "GiantAmphibian"; }
@Override public String name(){ return "Giant Amphibian"; }
@Override public int shortestMale(){return 50;}
@Override public int shortestFemale(){return 55;}
@Override public int heightVariance(){return 20;}
@Override public int lightestWeight(){return 1955;}
@Override public int weightVariance(){return 405;}
@Override public long forbiddenWornBits(){return ~(Wearable.WORN_EYES);}
@Override public String racialCategory(){return "Amphibian";}
protected static Vector<RawMaterial> resources=new Vector<RawMaterial>();
@Override
public List<RawMaterial> myResources()
{
synchronized(resources)
{
if(resources.size()==0)
{
for(int i=0;i<25;i++)
resources.addElement(makeResource
("some "+name().toLowerCase(),RawMaterial.RESOURCE_FISH));
for(int i=0;i<15;i++)
resources.addElement(makeResource
("a "+name().toLowerCase()+" hide",RawMaterial.RESOURCE_HIDE));
resources.addElement(makeResource
("some "+name().toLowerCase()+" blood",RawMaterial.RESOURCE_BLOOD));
}
}
return resources;
}
}
| MaxRau/CoffeeMud | com/planet_ink/coffee_mud/Races/GiantAmphibian.java | Java | apache-2.0 | 2,689 |
class Catalog::PlatformsController < Base::PlatformsController
before_filter :find_catalog_and_platform
def index
@platforms = Cms::Relation.all(:params => {:ciId => @catalog.ciId,
:direction => 'from',
:targetClassName => 'catalog.Platform',
:relatioShortnName => 'ComposedOf'}).map(&:toCi)
render :json => @platforms
end
private
def find_catalog_and_platform
@catalog = Cms::Ci.locate(params[:catalog_id], catalogs_ns_path, 'account.Design') ||
Cms::Ci.locate(params[:catalog_id], private_catalogs_ns_path, 'account.Design')
platform_id = params[:id]
if platform_id.present?
@platform = Cms::Ci.locate(platform_id, catalog_ns_path(@catalog), 'catalog.Platform')
@platform = nil if @platform && @platform.ciClassName != 'catalog.Platform'
end
end
end
| subaan/display | app/controllers/catalog/platforms_controller.rb | Ruby | apache-2.0 | 991 |
package com.mesosphere.sdk.specification.yaml;
import com.fasterxml.jackson.annotation.JsonProperty;
/**
* Raw YAML port.
*/
public class RawPort {
private final Integer port;
private final String envKey;
private final RawVip vip;
private RawPort(
@JsonProperty("port") Integer port,
@JsonProperty("env-key") String envKey,
@JsonProperty("vip") RawVip vip) {
this.port = port;
this.envKey = envKey;
this.vip = vip;
}
public Integer getPort() {
return port;
}
public String getEnvKey() {
return envKey;
}
public RawVip getVip() {
return vip;
}
}
| adragomir/dcos-commons | sdk/scheduler/src/main/java/com/mesosphere/sdk/specification/yaml/RawPort.java | Java | apache-2.0 | 682 |
// Package customimagesearch implements the Azure ARM Customimagesearch service API version 1.0.
//
// The Bing Custom Image Search API lets you send an image search query to Bing and get back image search results
// customized to meet your custom search definition.
package customimagesearch
// Copyright (c) Microsoft and contributors. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
//
// See the License for the specific language governing permissions and
// limitations under the License.
//
// Code generated by Microsoft (R) AutoRest Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
import (
"github.com/Azure/go-autorest/autorest"
)
const (
// DefaultBaseURI is the default URI used for the service Customimagesearch
DefaultBaseURI = "https://api.cognitive.microsoft.com/bingcustomsearch/v7.0"
)
// BaseClient is the base client for Customimagesearch.
type BaseClient struct {
autorest.Client
BaseURI string
}
// New creates an instance of the BaseClient client.
func New() BaseClient {
return NewWithBaseURI(DefaultBaseURI)
}
// NewWithBaseURI creates an instance of the BaseClient client.
func NewWithBaseURI(baseURI string) BaseClient {
return BaseClient{
Client: autorest.NewClientWithUserAgent(UserAgent()),
BaseURI: baseURI,
}
}
| linzhaoming/origin | vendor/github.com/Azure/azure-sdk-for-go/services/cognitiveservices/v1.0/customimagesearch/client.go | GO | apache-2.0 | 1,734 |
"""Tests for fan platforms."""
import pytest
from homeassistant.components.fan import FanEntity
class BaseFan(FanEntity):
"""Implementation of the abstract FanEntity."""
def __init__(self):
"""Initialize the fan."""
def test_fanentity():
"""Test fan entity methods."""
fan = BaseFan()
assert fan.state == "off"
assert len(fan.speed_list) == 0
assert fan.supported_features == 0
assert fan.capability_attributes == {}
# Test set_speed not required
with pytest.raises(NotImplementedError):
fan.oscillate(True)
with pytest.raises(NotImplementedError):
fan.set_speed("slow")
with pytest.raises(NotImplementedError):
fan.turn_on()
with pytest.raises(NotImplementedError):
fan.turn_off()
| tboyce021/home-assistant | tests/components/fan/test_init.py | Python | apache-2.0 | 781 |
/*---------------------------------------------------------------------------------------------
* Copyright (c) Microsoft Corporation. All rights reserved.
* Licensed under the MIT License. See License.txt in the project root for license information.
*--------------------------------------------------------------------------------------------*/
import { Disposable, Command, EventEmitter, Event, workspace, Uri } from 'vscode';
import { Repository, Operation } from './repository';
import { anyEvent, dispose, filterEvent } from './util';
import * as nls from 'vscode-nls';
import { Branch } from './api/git';
const localize = nls.loadMessageBundle();
class CheckoutStatusBar {
private _onDidChange = new EventEmitter<void>();
get onDidChange(): Event<void> { return this._onDidChange.event; }
private disposables: Disposable[] = [];
constructor(private repository: Repository) {
repository.onDidRunGitStatus(this._onDidChange.fire, this._onDidChange, this.disposables);
}
get command(): Command | undefined {
const rebasing = !!this.repository.rebaseCommit;
const title = `$(git-branch) ${this.repository.headLabel}${rebasing ? ` (${localize('rebasing', 'Rebasing')})` : ''}`;
return {
command: 'git.checkout',
tooltip: `${this.repository.headLabel}`,
title,
arguments: [this.repository.sourceControl]
};
}
dispose(): void {
this.disposables.forEach(d => d.dispose());
}
}
interface SyncStatusBarState {
enabled: boolean;
isSyncRunning: boolean;
hasRemotes: boolean;
HEAD: Branch | undefined;
}
class SyncStatusBar {
private static StartState: SyncStatusBarState = {
enabled: true,
isSyncRunning: false,
hasRemotes: false,
HEAD: undefined
};
private _onDidChange = new EventEmitter<void>();
get onDidChange(): Event<void> { return this._onDidChange.event; }
private disposables: Disposable[] = [];
private _state: SyncStatusBarState = SyncStatusBar.StartState;
private get state() { return this._state; }
private set state(state: SyncStatusBarState) {
this._state = state;
this._onDidChange.fire();
}
constructor(private repository: Repository) {
repository.onDidRunGitStatus(this.onModelChange, this, this.disposables);
repository.onDidChangeOperations(this.onOperationsChange, this, this.disposables);
const onEnablementChange = filterEvent(workspace.onDidChangeConfiguration, e => e.affectsConfiguration('git.enableStatusBarSync'));
onEnablementChange(this.updateEnablement, this, this.disposables);
this.updateEnablement();
this._onDidChange.fire();
}
private updateEnablement(): void {
const config = workspace.getConfiguration('git', Uri.file(this.repository.root));
const enabled = config.get<boolean>('enableStatusBarSync', true);
this.state = { ... this.state, enabled };
}
private onOperationsChange(): void {
const isSyncRunning = this.repository.operations.isRunning(Operation.Sync) ||
this.repository.operations.isRunning(Operation.Push) ||
this.repository.operations.isRunning(Operation.Pull);
this.state = { ...this.state, isSyncRunning };
}
private onModelChange(): void {
this.state = {
...this.state,
hasRemotes: this.repository.remotes.length > 0,
HEAD: this.repository.HEAD
};
}
get command(): Command | undefined {
if (!this.state.enabled || !this.state.hasRemotes) {
return undefined;
}
const HEAD = this.state.HEAD;
let icon = '$(sync)';
let text = '';
let command = '';
let tooltip = '';
if (HEAD && HEAD.name && HEAD.commit) {
if (HEAD.upstream) {
if (HEAD.ahead || HEAD.behind) {
text += this.repository.syncLabel;
}
const config = workspace.getConfiguration('git', Uri.file(this.repository.root));
const rebaseWhenSync = config.get<string>('rebaseWhenSync');
command = rebaseWhenSync ? 'git.syncRebase' : 'git.sync';
tooltip = localize('sync changes', "Synchronize Changes");
} else {
icon = '$(cloud-upload)';
command = 'git.publish';
tooltip = localize('publish changes', "Publish Changes");
}
} else {
command = '';
tooltip = '';
}
if (this.state.isSyncRunning) {
icon = '$(sync~spin)';
command = '';
tooltip = localize('syncing changes', "Synchronizing Changes...");
}
return {
command,
title: [icon, text].join(' ').trim(),
tooltip,
arguments: [this.repository.sourceControl]
};
}
dispose(): void {
this.disposables.forEach(d => d.dispose());
}
}
export class StatusBarCommands {
private syncStatusBar: SyncStatusBar;
private checkoutStatusBar: CheckoutStatusBar;
private disposables: Disposable[] = [];
constructor(repository: Repository) {
this.syncStatusBar = new SyncStatusBar(repository);
this.checkoutStatusBar = new CheckoutStatusBar(repository);
}
get onDidChange(): Event<void> {
return anyEvent(
this.syncStatusBar.onDidChange,
this.checkoutStatusBar.onDidChange
);
}
get commands(): Command[] {
const result: Command[] = [];
const checkout = this.checkoutStatusBar.command;
if (checkout) {
result.push(checkout);
}
const sync = this.syncStatusBar.command;
if (sync) {
result.push(sync);
}
return result;
}
dispose(): void {
this.syncStatusBar.dispose();
this.checkoutStatusBar.dispose();
this.disposables = dispose(this.disposables);
}
}
| leafclick/intellij-community | plugins/textmate/lib/bundles/git/src/statusbar.ts | TypeScript | apache-2.0 | 5,303 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more contributor license
* agreements. See the NOTICE file distributed with this work for additional information regarding
* copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License. You may obtain a
* copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License
* is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
* or implied. See the License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.geode.distributed;
import static java.util.concurrent.TimeUnit.MINUTES;
import static org.apache.geode.distributed.AbstractLauncher.Status.STOPPED;
import static org.apache.geode.distributed.ConfigurationProperties.CLUSTER_CONFIGURATION_DIR;
import static org.apache.geode.distributed.ConfigurationProperties.DISABLE_AUTO_RECONNECT;
import static org.apache.geode.distributed.ConfigurationProperties.LOG_LEVEL;
import static org.apache.geode.distributed.ConfigurationProperties.MCAST_PORT;
import static org.apache.geode.distributed.internal.ClusterConfigurationService.CLUSTER_CONFIG_DISK_DIR_PREFIX;
import static org.apache.geode.distributed.internal.DistributionConfig.GEMFIRE_PREFIX;
import static org.apache.geode.internal.AvailablePortHelper.getRandomAvailableTCPPorts;
import static org.apache.geode.internal.DistributionLocator.TEST_OVERRIDE_DEFAULT_PORT_PROPERTY;
import static org.assertj.core.api.Assertions.assertThat;
import static org.junit.Assert.assertNotNull;
import java.io.File;
import java.io.IOException;
import java.io.UncheckedIOException;
import org.junit.After;
import org.junit.Before;
import org.junit.Rule;
import org.junit.rules.ErrorCollector;
import org.apache.geode.distributed.AbstractLauncher.Status;
import org.apache.geode.distributed.LocatorLauncher.Builder;
import org.apache.geode.distributed.LocatorLauncher.LocatorState;
import org.apache.geode.internal.process.ProcessType;
/**
* Abstract base class for integration tests of {@link LocatorLauncher}.
*
* @since GemFire 8.0
*/
public abstract class LocatorLauncherIntegrationTestCase extends LauncherIntegrationTestCase {
protected volatile int defaultLocatorPort;
protected volatile int nonDefaultLocatorPort;
protected volatile LocatorLauncher launcher;
private volatile File clusterConfigDirectory;
@Rule
public ErrorCollector errorCollector = new ErrorCollector();
@Before
public void setUpAbstractLocatorLauncherIntegrationTestCase() throws Exception {
System.setProperty(GEMFIRE_PREFIX + MCAST_PORT, Integer.toString(0));
clusterConfigDirectory =
temporaryFolder.newFolder(CLUSTER_CONFIG_DISK_DIR_PREFIX + getUniqueName());
int[] ports = getRandomAvailableTCPPorts(2);
defaultLocatorPort = ports[0];
nonDefaultLocatorPort = ports[1];
System.setProperty(TEST_OVERRIDE_DEFAULT_PORT_PROPERTY, String.valueOf(defaultLocatorPort));
}
@After
public void tearDownAbstractLocatorLauncherIntegrationTestCase() throws Exception {
if (launcher != null) {
launcher.stop();
}
}
@Override
protected ProcessType getProcessType() {
return ProcessType.LOCATOR;
}
@Override
protected void givenEmptyWorkingDirectory() {
File[] files = getWorkingDirectory().listFiles();
assertThat(files).hasSize(1);
assertThat(files[0]).isDirectory().isEqualTo(getClusterConfigDirectory());
}
protected LocatorLauncher givenLocatorLauncher() {
return givenLocatorLauncher(newBuilder());
}
private LocatorLauncher givenLocatorLauncher(final Builder builder) {
return builder.build();
}
protected LocatorLauncher givenRunningLocator() {
return givenRunningLocator(newBuilder());
}
protected LocatorLauncher givenRunningLocator(final Builder builder) {
return awaitStart(builder);
}
protected LocatorLauncher awaitStart(final LocatorLauncher launcher) {
await().atMost(2, MINUTES).until(() -> assertThat(isLauncherOnline()).isTrue());
return launcher;
}
protected Locator getLocator() {
return launcher.getLocator();
}
/**
* Returns a new Builder with helpful defaults for safe testing. If you need a Builder in a test
* without any of these defaults then simply use {@code new Builder()} instead.
*/
protected Builder newBuilder() {
return new Builder().setMemberName(getUniqueName()).setRedirectOutput(true)
.setWorkingDirectory(getWorkingDirectoryPath())
.set(CLUSTER_CONFIGURATION_DIR, getClusterConfigDirectoryPath())
.set(DISABLE_AUTO_RECONNECT, "true").set(LOG_LEVEL, "config").set(MCAST_PORT, "0");
}
protected LocatorLauncher startLocator() {
return awaitStart(newBuilder());
}
protected LocatorLauncher startLocator(final Builder builder) {
return awaitStart(builder);
}
protected void stopLocator() {
assertThat(launcher.stop().getStatus()).isEqualTo(STOPPED);
}
private LocatorLauncher awaitStart(final Builder builder) {
launcher = builder.build();
assertThat(launcher.start().getStatus()).isEqualTo(Status.ONLINE);
return awaitStart(launcher);
}
private File getClusterConfigDirectory() {
return clusterConfigDirectory;
}
private String getClusterConfigDirectoryPath() {
try {
return clusterConfigDirectory.getCanonicalPath();
} catch (IOException e) {
throw new UncheckedIOException(e);
}
}
private boolean isLauncherOnline() {
LocatorState locatorState = launcher.status();
assertNotNull(locatorState);
return Status.ONLINE.equals(locatorState.getStatus());
}
}
| charliemblack/geode | geode-core/src/test/java/org/apache/geode/distributed/LocatorLauncherIntegrationTestCase.java | Java | apache-2.0 | 5,865 |
/* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.activiti5.engine.impl.json;
import java.io.Reader;
import java.io.Writer;
import org.activiti5.engine.impl.util.json.JSONObject;
/**
* @author Tom Baeyens
*/
public abstract class JsonObjectConverter <T> {
public void toJson(T object, Writer writer) {
toJsonObject(object).write(writer);
}
public String toJson(T object) {
return toJsonObject(object).toString();
}
public String toJson(T object, int indentFactor) {
return toJsonObject(object).toString(indentFactor);
}
public abstract JSONObject toJsonObject(T object);
public abstract T toObject(Reader reader);
}
| roberthafner/flowable-engine | modules/flowable5-engine/src/main/java/org/activiti5/engine/impl/json/JsonObjectConverter.java | Java | apache-2.0 | 1,184 |
/**
* Copyright 2012 Comcast Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.comcast.cns.controller;
import java.io.IOException;
import java.io.PrintWriter;
import java.util.Map;
import javax.servlet.ServletException;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import org.apache.log4j.Logger;
import com.amazonaws.services.sns.model.GetSubscriptionAttributesRequest;
import com.amazonaws.services.sns.model.GetSubscriptionAttributesResult;
import com.amazonaws.services.sns.model.SetSubscriptionAttributesRequest;
import com.comcast.cmb.common.controller.AdminServletBase;
import com.comcast.cmb.common.controller.CMBControllerServlet;
/**
* Admin page for editing subscription delivery policy
* @author tina, aseem, bwolf
*
*/
public class CNSRawMessageDeliveryPolicyPage extends AdminServletBase {
private static final long serialVersionUID = 1L;
private static Logger logger = Logger.getLogger(CNSRawMessageDeliveryPolicyPage.class);
public void doGet(HttpServletRequest request, HttpServletResponse response) throws ServletException, IOException {
if (redirectUnauthenticatedUser(request, response)) {
return;
}
CMBControllerServlet.valueAccumulator.initializeAllCounters();
response.setContentType("text/html");
PrintWriter out = response.getWriter();
String subArn = request.getParameter("subscriptionArn");
String userId = request.getParameter("userId");
Map<?, ?> params = request.getParameterMap();
connect(request);
out.println("<html>");
simpleHeader(request, out, "Raw Message Delivery Policy");
if (params.containsKey("Update")) {
String rawMessageDeliveryParam = request.getParameter("rawmessage");
Boolean rawMessageDelivery = false;
if (rawMessageDeliveryParam.trim().length() > 0) {
rawMessageDelivery = Boolean.parseBoolean(rawMessageDeliveryParam.trim());
}
try {
SetSubscriptionAttributesRequest setSubscriptionAttributesRequest = new SetSubscriptionAttributesRequest(subArn, "RawMessageDelivery", rawMessageDelivery.toString());
sns.setSubscriptionAttributes(setSubscriptionAttributesRequest);
logger.debug("event=set_raw_message_delivery_policy sub_arn=" + subArn + " user_id= " + userId);
} catch (Exception ex) {
logger.error("event=set_raw_message_delivery_policy sub_arn=" + subArn + " user_id= " + userId, ex);
throw new ServletException(ex);
}
out.println("<body onload='javascript:window.opener.location.reload();window.close();'>");
} else {
Boolean rawMessageDelivery = false;
if (subArn != null) {
Map<String, String> attributes = null;
try {
GetSubscriptionAttributesRequest getSubscriptionAttributesRequest = new GetSubscriptionAttributesRequest(subArn);
GetSubscriptionAttributesResult getSubscriptionAttributesResult = sns.getSubscriptionAttributes(getSubscriptionAttributesRequest);
attributes = getSubscriptionAttributesResult.getAttributes();
String rawMessageDeliveryStr = attributes.get("RawMessageDelivery");
if(rawMessageDeliveryStr != null && !rawMessageDeliveryStr.isEmpty()){
rawMessageDelivery = Boolean.parseBoolean(rawMessageDeliveryStr);
}
} catch (Exception ex) {
logger.error("event=get_raw_message_delivery_attribute sub_arn=" + subArn + " user_id= " + userId, ex);
throw new ServletException(ex);
}
}
out.println("<body>");
out.println("<h1>Raw Message Delivery Policy</h1>");
out.println("<form action=\"/webui/cnsuser/subscription/rawmessagedeliverypolicy?subscriptionArn="+subArn+"\" method=POST>");
out.println("<input type='hidden' name='userId' value='"+ userId +"'>");
out.println("<table width='98%'");
out.println("<tr><td colspan=2><b><font color='orange'>Raw Message Delivery</font></b></td></tr>");
out.println("<tr><td ><input type='radio' name='rawmessage' value='true' " + (rawMessageDelivery?"checked='true'":"") + ">True</td>");
out.println("<td ><input type='radio' name='rawmessage' value='false' " + (rawMessageDelivery?"":"checked='true'") + ">False</td></tr>");
out.println("<tr><td> </td><td> </td></tr>");
out.println("<tr><td colspan=2><hr/></td></tr>");
out.println("<tr><td colspan=2 align=right><input type='button' onclick='window.close()' value='Cancel'><input type='submit' name='Update' value='Update'></td></tr></table></form>");
}
out.println("</body></html>");
CMBControllerServlet.valueAccumulator.deleteAllCounters();
}
public void doPost(HttpServletRequest request, HttpServletResponse response) throws ServletException, IOException {
doGet(request, response);
}
}
| KrithikaGanesh/cmb | src/com/comcast/cns/controller/CNSRawMessageDeliveryPolicyPage.java | Java | apache-2.0 | 5,387 |
package org.apereo.cas.ticket;
import org.apereo.cas.support.oauth.OAuth20Constants;
import org.apereo.cas.support.oauth.services.OAuthRegisteredService;
import org.apereo.cas.token.JwtBuilder;
import org.apereo.cas.util.EncodingUtils;
import com.nimbusds.jwt.JWTClaimsSet;
import lombok.AccessLevel;
import lombok.Getter;
import lombok.NoArgsConstructor;
import lombok.RequiredArgsConstructor;
import lombok.SneakyThrows;
import lombok.extern.slf4j.Slf4j;
import lombok.val;
import org.apache.commons.lang3.StringUtils;
import org.jose4j.jwk.PublicJsonWebKey;
import org.jose4j.jwt.JwtClaims;
import java.nio.charset.StandardCharsets;
import java.security.Key;
import java.util.HashMap;
import java.util.Objects;
import java.util.Optional;
/**
* This is {@link BaseTokenSigningAndEncryptionService}.
*
* @author Misagh Moayyed
* @since 6.0.0
*/
@Slf4j
@NoArgsConstructor(force = true)
@RequiredArgsConstructor(access = AccessLevel.PROTECTED)
@Getter
public abstract class BaseTokenSigningAndEncryptionService implements OAuth20TokenSigningAndEncryptionService {
private final String issuer;
/**
* Create json web encryption json web encryption.
*
* @param encryptionAlg the encryption alg
* @param encryptionEncoding the encryption encoding
* @param keyIdHeaderValue the key id header value
* @param publicKey the public key
* @param payload the payload
* @return the json web encryption
*/
@SneakyThrows
protected static String encryptToken(final String encryptionAlg,
final String encryptionEncoding,
final String keyIdHeaderValue,
final Key publicKey,
final String payload) {
return EncodingUtils.encryptValueAsJwt(publicKey, payload, encryptionAlg,
encryptionEncoding, keyIdHeaderValue, new HashMap<>(0));
}
@Override
@SneakyThrows
public JwtClaims decode(final String token, final Optional<OAuthRegisteredService> service) {
val jsonWebKey = getJsonWebKeySigningKey();
if (jsonWebKey.getPublicKey() == null) {
throw new IllegalArgumentException("JSON web key used to validate the id token signature has no associated public key");
}
val jwt = verifySignature(token, jsonWebKey);
if (jwt == null) {
throw new IllegalArgumentException("Unable to verify signature of the token using the JSON web key public key");
}
val result = new String(jwt, StandardCharsets.UTF_8);
val claims = JwtBuilder.parse(result);
if (StringUtils.isBlank(claims.getIssuer())) {
throw new IllegalArgumentException("Claims do not container an issuer");
}
validateIssuerClaim(claims);
if (StringUtils.isBlank(claims.getStringClaim(OAuth20Constants.CLIENT_ID))) {
throw new IllegalArgumentException("Claims do not contain a client id claim");
}
return JwtClaims.parse(claims.toString());
}
/**
* Validate issuer claim.
*
* @param claims the claims
*/
protected void validateIssuerClaim(final JWTClaimsSet claims) {
LOGGER.debug("Validating claims as [{}] with issuer [{}]", claims, claims.getIssuer());
val iss = determineIssuer(claims);
Objects.requireNonNull(iss, "Issuer cannot be null or undefined");
if (!claims.getIssuer().equalsIgnoreCase(iss)) {
throw new IllegalArgumentException("Issuer assigned to claims " + claims.getIssuer() + " does not match " + iss);
}
}
/**
* Determine issuer.
*
* @param claims the claims
* @return the string
*/
protected String determineIssuer(final JWTClaimsSet claims) {
return getIssuer();
}
/**
* Configure json web signature for id token signing.
*
* @param svc the svc
* @param claims the claims
* @param jsonWebKey the json web key
* @return the json web signature
*/
protected String signToken(final OAuthRegisteredService svc,
final JwtClaims claims,
final PublicJsonWebKey jsonWebKey) {
LOGGER.debug("Service [{}] is set to sign id tokens", svc.getServiceId());
return EncodingUtils.signJws(claims, jsonWebKey, getJsonWebKeySigningAlgorithm(svc), new HashMap<>(0));
}
/**
* Verify signature.
*
* @param token the token
* @param jsonWebKey the json web key
* @return the byte []
*/
protected byte[] verifySignature(final String token, final PublicJsonWebKey jsonWebKey) {
return EncodingUtils.verifyJwsSignature(jsonWebKey.getPublicKey(), token);
}
/**
* Gets signing key.
*
* @return the signing key
*/
protected abstract PublicJsonWebKey getJsonWebKeySigningKey();
}
| fogbeam/cas_mirror | support/cas-server-support-oauth-core-api/src/main/java/org/apereo/cas/ticket/BaseTokenSigningAndEncryptionService.java | Java | apache-2.0 | 5,013 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.examples.ml;
import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.sql.SQLContext;
// $example on$
import java.util.Arrays;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.ml.feature.PolynomialExpansion;
import org.apache.spark.mllib.linalg.VectorUDT;
import org.apache.spark.mllib.linalg.Vectors;
import org.apache.spark.sql.DataFrame;
import org.apache.spark.sql.Row;
import org.apache.spark.sql.RowFactory;
import org.apache.spark.sql.types.Metadata;
import org.apache.spark.sql.types.StructField;
import org.apache.spark.sql.types.StructType;
// $example off$
public class JavaPolynomialExpansionExample {
public static void main(String[] args) {
SparkConf conf = new SparkConf().setAppName("JavaPolynomialExpansionExample");
JavaSparkContext jsc = new JavaSparkContext(conf);
SQLContext jsql = new SQLContext(jsc);
// $example on$
PolynomialExpansion polyExpansion = new PolynomialExpansion()
.setInputCol("features")
.setOutputCol("polyFeatures")
.setDegree(3);
JavaRDD<Row> data = jsc.parallelize(Arrays.asList(
RowFactory.create(Vectors.dense(-2.0, 2.3)),
RowFactory.create(Vectors.dense(0.0, 0.0)),
RowFactory.create(Vectors.dense(0.6, -1.1))
));
StructType schema = new StructType(new StructField[]{
new StructField("features", new VectorUDT(), false, Metadata.empty()),
});
DataFrame df = jsql.createDataFrame(data, schema);
DataFrame polyDF = polyExpansion.transform(df);
Row[] row = polyDF.select("polyFeatures").take(3);
for (Row r : row) {
System.out.println(r.get(0));
}
// $example off$
jsc.stop();
}
} | chenc10/Spark-PAF | examples/src/main/java/org/apache/spark/examples/ml/JavaPolynomialExpansionExample.java | Java | apache-2.0 | 2,545 |
/*
* Copyright (c) 2010-2015 Pivotal Software, Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
package versioning.newWan;
import versioning.VersionBB;
import newWan.WANOperationsClientBB;
import hydra.Log;
import com.gemstone.gemfire.LogWriter;
import com.gemstone.gemfire.cache.util.GatewayConflictHelper;
import com.gemstone.gemfire.cache.util.GatewayConflictResolver;
import com.gemstone.gemfire.cache.util.TimestampedEntryEvent;
import com.gemstone.gemfire.pdx.PdxInstance;
/**
* Custom wan conflict resolver
* @author rdiyewar
*
*/
public class WANConflictResolver implements GatewayConflictResolver {
LogWriter log = Log.getLogWriter();
WANOperationsClientBB bb = WANOperationsClientBB.getBB();
public void onEvent(TimestampedEntryEvent event, GatewayConflictHelper helper) {
bb.getSharedCounters().increment(WANOperationsClientBB.WanEventResolved);
log.info("WANConflictResolver: existing timestamp=" + event.getOldTimestamp() + " existing value=" + event.getOldValue()
+ "\n proposed timestamp=" + event.getNewTimestamp() + " proposed value=" + event.getNewValue());
// use the default timestamp and ID based resolution
if (event.getOldTimestamp() > event.getNewTimestamp()) {
log.info("New event is older, disallow the event " + event);
helper.disallowEvent();
}
if (event.getOldTimestamp() == event.getNewTimestamp()
&& event.getOldDistributedSystemID() > event.getNewDistributedSystemID()) {
log.info("Both event has same timestamp, but new event's ds id small. Thus dissallow the event " + event);
helper.disallowEvent();
}
}
}
| papicella/snappy-store | tests/core/src/main/java/versioning/newWan/WANConflictResolver.java | Java | apache-2.0 | 2,212 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.beam.sdk.io.kinesis;
import static org.apache.beam.vendor.guava.v26_0_jre.com.google.common.base.Preconditions.checkArgument;
import com.amazonaws.auth.AWSCredentialsProvider;
import com.amazonaws.auth.AWSStaticCredentialsProvider;
import com.amazonaws.auth.BasicAWSCredentials;
import com.amazonaws.client.builder.AwsClientBuilder;
import com.amazonaws.regions.Regions;
import com.amazonaws.services.cloudwatch.AmazonCloudWatch;
import com.amazonaws.services.cloudwatch.AmazonCloudWatchClientBuilder;
import com.amazonaws.services.kinesis.AmazonKinesis;
import com.amazonaws.services.kinesis.AmazonKinesisClientBuilder;
import com.amazonaws.services.kinesis.producer.IKinesisProducer;
import com.amazonaws.services.kinesis.producer.KinesisProducer;
import com.amazonaws.services.kinesis.producer.KinesisProducerConfiguration;
import java.net.URI;
import org.checkerframework.checker.nullness.qual.Nullable;
/** Basic implementation of {@link AWSClientsProvider} used by default in {@link KinesisIO}. */
class BasicKinesisProvider implements AWSClientsProvider {
private final String accessKey;
private final String secretKey;
private final Regions region;
private final @Nullable String serviceEndpoint;
private final boolean verifyCertificate;
BasicKinesisProvider(
String accessKey,
String secretKey,
Regions region,
@Nullable String serviceEndpoint,
boolean verifyCertificate) {
checkArgument(accessKey != null, "accessKey can not be null");
checkArgument(secretKey != null, "secretKey can not be null");
checkArgument(region != null, "region can not be null");
this.accessKey = accessKey;
this.secretKey = secretKey;
this.region = region;
this.serviceEndpoint = serviceEndpoint;
this.verifyCertificate = verifyCertificate;
}
BasicKinesisProvider(
String accessKey, String secretKey, Regions region, @Nullable String serviceEndpoint) {
this(accessKey, secretKey, region, serviceEndpoint, true);
}
private AWSCredentialsProvider getCredentialsProvider() {
return new AWSStaticCredentialsProvider(new BasicAWSCredentials(accessKey, secretKey));
}
@Override
public AmazonKinesis getKinesisClient() {
AmazonKinesisClientBuilder clientBuilder =
AmazonKinesisClientBuilder.standard().withCredentials(getCredentialsProvider());
if (serviceEndpoint == null) {
clientBuilder.withRegion(region);
} else {
clientBuilder.withEndpointConfiguration(
new AwsClientBuilder.EndpointConfiguration(serviceEndpoint, region.getName()));
}
return clientBuilder.build();
}
@Override
public AmazonCloudWatch getCloudWatchClient() {
AmazonCloudWatchClientBuilder clientBuilder =
AmazonCloudWatchClientBuilder.standard().withCredentials(getCredentialsProvider());
if (serviceEndpoint == null) {
clientBuilder.withRegion(region);
} else {
clientBuilder.withEndpointConfiguration(
new AwsClientBuilder.EndpointConfiguration(serviceEndpoint, region.getName()));
}
return clientBuilder.build();
}
@Override
public IKinesisProducer createKinesisProducer(KinesisProducerConfiguration config) {
config.setRegion(region.getName());
config.setCredentialsProvider(getCredentialsProvider());
if (serviceEndpoint != null) {
URI uri = URI.create(serviceEndpoint);
config.setKinesisEndpoint(uri.getHost());
config.setKinesisPort(uri.getPort());
}
config.setVerifyCertificate(verifyCertificate);
return new KinesisProducer(config);
}
}
| lukecwik/incubator-beam | sdks/java/io/kinesis/src/main/java/org/apache/beam/sdk/io/kinesis/BasicKinesisProvider.java | Java | apache-2.0 | 4,391 |
<?php
/**
* Created by IntelliJ IDEA.
* User: Nikolay Chervyakov
* Date: 02.12.2014
* Time: 18:21
*/
namespace VulnModule\Vulnerability;
use VulnModule\Vulnerability;
/**
* Class SQL
* @package VulnModule\Vulnerability
* @Vuln\Vulnerability(name="Cross-site request forgery", "A type of malicious exploit of a website whereby
* unauthorized commands are transmitted from a user that the website trusts. Unlike cross-site scripting (XSS),
* which exploits the trust a user has for a particular site, CSRF exploits the trust that a site has in a user's browser.")
*/
class CSRF extends Vulnerability
{
public static $targets = [self::TARGET_CONTEXT];
} | AdrienKuhn/hackazon | modules/vulninjection/classes/VulnModule/Vulnerability/CSRF.php | PHP | apache-2.0 | 695 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*
*/
package org.apache.polygene.api.service.qualifier;
import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
import java.util.function.Predicate;
import org.apache.polygene.api.service.ServiceReference;
/**
* Filter services based on whether they are available or not.
*
* At an injection point you can do this:
*
* <pre><code>
* @Service @Available MyService service;
* </code></pre>
* to get only a service that is currently available.
*/
@Retention( RetentionPolicy.RUNTIME )
@Qualifier( Available.AvailableQualifier.class )
public @interface Available
{
/**
* Available Annotation Qualifier.
* See {@link Available}.
*/
final class AvailableQualifier
implements AnnotationQualifier<Available>
{
@Override
public <T> Predicate<ServiceReference<?>> qualifier( Available active )
{
return ServiceQualifier.whereAvailable();
}
}
}
| Qi4j/qi4j-sdk | core/api/src/main/java/org/apache/polygene/api/service/qualifier/Available.java | Java | apache-2.0 | 1,786 |
/*
* To change this template, choose Tools | Templates
* and open the template in the editor.
*/
package com.michelboudreau.test;
import com.amazonaws.services.dynamodb.datamodeling.DynamoDBAttribute;
import com.amazonaws.services.dynamodb.datamodeling.DynamoDBHashKey;
import com.amazonaws.services.dynamodb.datamodeling.DynamoDBRangeKey;
import com.amazonaws.services.dynamodb.datamodeling.DynamoDBTable;
@DynamoDBTable(tableName = "mapper.TestClassWithRangeHashKey")
public class TestClassWithHashRangeKey
{
private String hashCode;
private String rangeCode;
private String stringData;
private int intData;
@DynamoDBHashKey(attributeName = "hashCode")
public final String getHashCode()
{
return hashCode;
}
public final void setHashCode(String hashCode)
{
this.hashCode = hashCode;
}
@DynamoDBRangeKey(attributeName = "rangeCode")
public final String getRangeCode()
{
return rangeCode;
}
public final void setRangeCode(String rangeCode)
{
this.rangeCode = rangeCode;
}
@DynamoDBAttribute(attributeName = "stringData")
public String getStringData()
{
return stringData;
}
public void setStringData(String stringData)
{
this.stringData = stringData;
}
@DynamoDBAttribute(attributeName = "intData")
public int getIntData()
{
return intData;
}
public void setIntData(int intData)
{
this.intData = intData;
}
}
| mboudreau/Alternator | src/test/java/com/michelboudreau/test/TestClassWithHashRangeKey.java | Java | apache-2.0 | 1,515 |
package socialite.collection;
import gnu.trove.map.hash.TLongIntHashMap;
public class SLongIntHashMap extends TLongIntHashMap {
long prevKey;
int prevVal;
public SLongIntHashMap(int initCapacity, float f, long noEntryKey, int noEntryValue) {
super(initCapacity, f, noEntryKey, noEntryValue);
prevKey = noEntryValue;
prevVal = noEntryValue;
}
@Override
public int get(long key) {
if (prevKey!=super.no_entry_key && key==prevKey) {
return prevVal;
}
prevKey = key;
prevVal = super.get(key);
return prevVal;
}
@Override
public int put(long key, int value) {
prevKey = key;
prevVal = value;
return super.put(key, value);
}
@Override
public int remove(long key) {
prevKey = super.no_entry_key;
prevVal = super.no_entry_value;
return super.remove(key);
}
@Override
public void clear() {
prevKey=super.no_entry_key;
super.clear();
}
}
| nvoron23/socialite | src/socialite/collection/SLongIntHashMap.java | Java | apache-2.0 | 886 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.syncope.client.console.wicket.markup.html.form;
import org.apache.syncope.client.console.panels.AbstractModalPanel;
import org.apache.syncope.client.console.wicket.markup.html.bootstrap.dialog.BaseModal;
import org.apache.wicket.PageReference;
import org.apache.wicket.markup.head.IHeaderResponse;
import org.apache.wicket.markup.head.OnLoadHeaderItem;
import org.apache.wicket.markup.html.form.TextArea;
import org.apache.wicket.model.IModel;
public class XMLEditorPanel extends AbstractModalPanel<String> {
private static final long serialVersionUID = -5110368813584745668L;
private final IModel<String> content;
private final boolean readOnly;
public XMLEditorPanel(final IModel<String> content) {
this(null, content, false, null);
}
public XMLEditorPanel(
final BaseModal<String> modal,
final IModel<String> content,
final boolean readOnly,
final PageReference pageRef) {
super(modal, pageRef);
this.content = content;
this.readOnly = readOnly;
final TextArea<String> xmlEditorInfoDefArea = new TextArea<>("xmlEditorInfo", this.content);
xmlEditorInfoDefArea.setMarkupId("xmlEditorInfo").setOutputMarkupPlaceholderTag(true);
add(xmlEditorInfoDefArea);
}
@Override
public void renderHead(final IHeaderResponse response) {
super.renderHead(response);
response.render(OnLoadHeaderItem.forScript(
"CodeMirror.fromTextArea(document.getElementById('xmlEditorInfo'), {"
+ " readOnly: " + readOnly + ", "
+ " lineNumbers: true, "
+ " lineWrapping: true, "
+ " autoCloseTags: true, "
+ " mode: 'text/html', "
+ " autoRefresh: true"
+ "}).on('change', updateTextArea);"));
}
}
| apache/syncope | client/idrepo/console/src/main/java/org/apache/syncope/client/console/wicket/markup/html/form/XMLEditorPanel.java | Java | apache-2.0 | 2,701 |
/*
* Copyright (c) 2010-2015 Pivotal Software, Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
package com.pivotal.gemfirexd.internal.engine.management;
/**
* This MBean provide different Statistics aggregate for StatementStats. It only
* provide details about query node.
*
* This MBean will show only rate between two sampling interval. Sampling
* interval being DistributionConfig.JMX_MANAGER_UPDATE_RATE_NAME.
* {jmx-manager-update-rate}. Default interval is 2 secs.
*
* e.g. if Sample:1{NumExecution = 2} & Sample:2 {NumExecution = 5} this MBean
* will show a value of 3 if queried after Sample :2 & before Sample:3
*
*
* @author rishim
*
*/
public interface AggregateStatementMXBean {
/**
* Number of times this statement is compiled (including recompilations)
* between two sampling interval.
*
*/
public long getNumTimesCompiled();
/**
* Number of times this statement is executed between two sampling interval.
*/
public long getNumExecution();
/**
* Statements that are actively being processed during the statistics snapshot
* between two sampling interval.
*/
public long getNumExecutionsInProgress();
/**
* Number of times global index lookup message exchanges occurred between two
* sampling interval.
*
*/
public long getNumTimesGlobalIndexLookup();
/**
* Number of rows modified by DML operation of insert/delete/update between
* two sampling interval.
*
*/
public long getNumRowsModified();
/**
* Time spent(in milliseconds) in parsing the query string between two
* sampling interval.
*
*/
public long getParseTime();
/**
* Time spent (in milliseconds) mapping this statement with database object's
* metadata (bind) between two sampling interval.
*
*/
public long getBindTime();
/**
* Time spent (in milliseconds) determining the best execution path for this
* statement (optimize) between two sampling interval.
*
*/
public long getOptimizeTime();
/**
* Time spent (in milliseconds) compiling details about routing information of
* query strings to data node(s) (processQueryInfo) between two sampling interval.
*
*/
public long getRoutingInfoTime();
/**
* Time spent (in milliseconds) to generate query execution plan definition
* (activation class) between two sampling interval.
*
*/
public long getGenerateTime();
/**
* Total compilation time (in milliseconds) of the statement on this node
* (prepMinion) between two sampling interval.
*
*/
public long getTotalCompilationTime();
/**
* Time spent (in nanoseconds) in creation of all the layers of query
* processing (ac.execute) between two sampling interval.
*
*/
public long getExecutionTime();
/**
* Time to apply (in nanoseconds) the projection and additional filters
* between two sampling interval.
*
*/
public long getProjectionTime();
/**
* Total execution time (in nanoseconds) taken to process the statement on
* this node (execute/open/next/close) between two sampling interval.
*
*/
public long getTotalExecutionTime();
/**
* Time taken (in nanoseconds) to modify rows by DML operation of
* insert/delete/update between two sampling interval.
*
*/
public long getRowsModificationTime();
/**
* Number of rows returned from remote nodes (ResultHolder/Get convertibles)
* between two sampling interval.
*
*/
public long getQNNumRowsSeen();
/**
* TCP send time (in nanoseconds) of all the messages including serialization
* time and queue wait time between two sampling interval.
*
*/
public long getQNMsgSendTime();
/**
* Serialization time (in nanoseconds) for all the messages while sending to
* remote node(s) between two sampling interval.
*
*/
public long getQNMsgSerTime();
/**
* Response message deserialization time (in nano seconds ) from remote
* node(s) excluding resultset deserialization between two sampling interval.
*
*/
public long getQNRespDeSerTime();
}
| papicella/snappy-store | gemfirexd/core/src/main/java/com/pivotal/gemfirexd/internal/engine/management/AggregateStatementMXBean.java | Java | apache-2.0 | 4,681 |
# line 1
'A module docstring.'
import sys, inspect
# line 5
# line 7
def spam(a, b, c, d=3, (e, (f,))=(4, (5,)), *g, **h):
eggs(b + d, c + f)
# line 11
def eggs(x, y):
"A docstring."
global fr, st
fr = inspect.currentframe()
st = inspect.stack()
p = x
q = y // 0
# line 20
class StupidGit:
"""A longer,
indented
docstring."""
# line 27
def abuse(self, a, b, c):
"""Another
\tdocstring
containing
\ttabs
\t
"""
self.argue(a, b, c)
# line 40
def argue(self, a, b, c):
try:
spam(a, b, c)
except:
self.ex = sys.exc_info()
self.tr = inspect.trace()
# line 48
class MalodorousPervert(StupidGit):
pass
Tit = MalodorousPervert
class ParrotDroppings:
pass
class FesteringGob(MalodorousPervert, ParrotDroppings):
pass
currentframe = inspect.currentframe()
try:
raise Exception()
except:
tb = sys.exc_info()[2]
| slozier/ironpython2 | Src/StdLib/Lib/test/inspect_fodder.py | Python | apache-2.0 | 967 |
#!/usr/bin/env python
# Copyright 2015, Rackspace US, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import json
import maas_common
import subprocess
STATUSES = {'HEALTH_OK': 2, 'HEALTH_WARN': 1, 'HEALTH_ERR': 0}
def check_command(command):
output = subprocess.check_output(command, stderr=subprocess.STDOUT)
lines = output.strip().split('\n')
return json.loads(lines[-1])
def get_ceph_status(client, keyring, fmt='json'):
return check_command(('ceph', '--format', fmt, '--name', client,
'--keyring', keyring, 'status'))
def get_ceph_pg_dump_osds(client, keyring, fmt='json'):
return check_command(('ceph', '--format', fmt, '--name', client,
'--keyring', keyring, 'pg', 'dump', 'osds'))
def get_ceph_osd_dump(client, keyring, fmt='json'):
return check_command(('ceph', '--format', fmt, '--name', client,
'--keyring', keyring, 'osd', 'dump'))
def get_mon_statistics(client=None, keyring=None, host=None):
ceph_status = get_ceph_status(client=client, keyring=keyring)
mon = [m for m in ceph_status['monmap']['mons']
if m['name'] == host]
mon_in = mon[0]['rank'] in ceph_status['quorum']
maas_common.metric_bool('mon_in_quorum', mon_in)
health_status = 0
for each in ceph_status['health']['health']['health_services'][0]['mons']:
if each['name'] == host:
health_status = STATUSES[each['health']]
break
maas_common.metric('mon_health', 'uint32', health_status)
def get_osd_statistics(client=None, keyring=None, osd_ids=None):
osd_dump = get_ceph_osd_dump(client=client, keyring=keyring)
pg_osds_dump = get_ceph_pg_dump_osds(client=client, keyring=keyring)
for osd_id in osd_ids:
osd_ref = 'osd.%s' % osd_id
for _osd in osd_dump['osds']:
if _osd['osd'] == osd_id:
osd = _osd
break
else:
msg = 'The OSD ID %s does not exist.' % osd_id
raise maas_common.MaaSException(msg)
for key in ('up', 'in'):
name = '_'.join((osd_ref, key))
maas_common.metric_bool(name, osd[key])
for _osd in pg_osds_dump:
if _osd['osd'] == osd_id:
osd = _osd
break
for key in ('kb', 'kb_used', 'kb_avail'):
name = '_'.join((osd_ref, key))
maas_common.metric(name, 'uint64', osd[key])
def get_cluster_statistics(client=None, keyring=None):
metrics = []
ceph_status = get_ceph_status(client=client, keyring=keyring)
# Get overall cluster health
metrics.append({
'name': 'cluster_health',
'type': 'uint32',
'value': STATUSES[ceph_status['health']['overall_status']]})
# Collect epochs for the mon and osd maps
metrics.append({'name': "monmap_epoch",
'type': 'uint32',
'value': ceph_status['monmap']['epoch']})
metrics.append({'name': "osdmap_epoch",
'type': 'uint32',
'value': ceph_status['osdmap']['osdmap']['epoch']})
# Collect OSDs per state
osds = {'total': ceph_status['osdmap']['osdmap']['num_osds'],
'up': ceph_status['osdmap']['osdmap']['num_up_osds'],
'in': ceph_status['osdmap']['osdmap']['num_in_osds']}
for k in osds:
metrics.append({'name': 'osds_%s' % k,
'type': 'uint32',
'value': osds[k]})
# Collect cluster size & utilisation
metrics.append({'name': 'osds_kb_used',
'type': 'uint64',
'value': ceph_status['pgmap']['bytes_used'] / 1024})
metrics.append({'name': 'osds_kb_avail',
'type': 'uint64',
'value': ceph_status['pgmap']['bytes_avail'] / 1024})
metrics.append({'name': 'osds_kb',
'type': 'uint64',
'value': ceph_status['pgmap']['bytes_total'] / 1024})
# Collect num PGs and num healthy PGs
pgs = {'total': ceph_status['pgmap']['num_pgs'], 'active_clean': 0}
for state in ceph_status['pgmap']['pgs_by_state']:
if state['state_name'] == 'active+clean':
pgs['active_clean'] = state['count']
break
for k in pgs:
metrics.append({'name': 'pgs_%s' % k,
'type': 'uint32',
'value': pgs[k]})
# Submit gathered metrics
for m in metrics:
maas_common.metric(m['name'], m['type'], m['value'])
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument('--name', required=True, help='Ceph client name')
parser.add_argument('--keyring', required=True, help='Ceph client keyring')
subparsers = parser.add_subparsers(dest='subparser_name')
parser_mon = subparsers.add_parser('mon')
parser_mon.add_argument('--host', required=True, help='Mon hostname')
parser_osd = subparsers.add_parser('osd')
parser_osd.add_argument('--osd_ids', required=True,
help='Space separated list of OSD IDs')
subparsers.add_parser('cluster')
return parser.parse_args()
def main(args):
get_statistics = {'cluster': get_cluster_statistics,
'mon': get_mon_statistics,
'osd': get_osd_statistics}
kwargs = {'client': args.name, 'keyring': args.keyring}
if args.subparser_name == 'osd':
kwargs['osd_ids'] = [int(i) for i in args.osd_ids.split(' ')]
if args.subparser_name == 'mon':
kwargs['host'] = args.host
get_statistics[args.subparser_name](**kwargs)
maas_common.status_ok()
if __name__ == '__main__':
with maas_common.print_output():
args = get_args()
main(args)
| byronmccollum/rpc-openstack | maas/plugins/ceph_monitoring.py | Python | apache-2.0 | 6,319 |
/*
Copyright (C) 2010,2011 Wei Dong <wdong.pku@gmail.com>. All Rights Reserved.
DISTRIBUTION OF THIS PROGRAM IN EITHER BINARY OR SOURCE CODE FORM MUST BE
PERMITTED BY THE AUTHOR.
*/
#ifndef KGRAPH_VALUE_TYPE
#define KGRAPH_VALUE_TYPE float
#endif
#include <cctype>
#include <type_traits>
#include <iostream>
#include <boost/timer/timer.hpp>
#include <boost/program_options.hpp>
#include <kgraph.h>
#include <kgraph-data.h>
using namespace std;
using namespace boost;
using namespace kgraph;
namespace po = boost::program_options;
typedef KGRAPH_VALUE_TYPE value_type;
int main (int argc, char *argv[]) {
string input_path;
string query_path;
string output_path;
string eval_path;
unsigned K, P;
po::options_description desc_visible("General options");
desc_visible.add_options()
("help,h", "produce help message.")
("data", po::value(&input_path), "input path")
("query", po::value(&query_path), "query path")
("eval", po::value(&eval_path), "eval path")
(",K", po::value(&K)->default_value(default_K), "")
(",P", po::value(&P)->default_value(default_P), "")
;
po::options_description desc("Allowed options");
desc.add(desc_visible);
po::positional_options_description p;
p.add("data", 1);
p.add("query", 1);
po::variables_map vm;
po::store(po::command_line_parser(argc, argv).options(desc).positional(p).run(), vm);
po::notify(vm);
if (vm.count("help") || vm.count("data") == 0 || vm.count("query") == 0) {
cout << "search <data> <index> <query> [output]" << endl;
cout << desc_visible << endl;
return 0;
}
if (P < K) {
P = K;
}
Matrix<value_type> data;
Matrix<value_type> query;
Matrix<unsigned> result;
data.load_lshkit(input_path);
query.load_lshkit(query_path);
unsigned dim = data.dim();
VectorOracle<Matrix<value_type>, value_type const*> oracle(data,
[dim](value_type const *a, value_type const *b)
{
float r = 0;
for (unsigned i = 0; i < dim; ++i) {
float v = float(a[i]) - (b[i]);
r += v * v;
}
return r;
});
float recall = 0;
float cost = 0;
float time = 0;
result.resize(query.size(), K);
KGraph::SearchParams params;
params.K = K;
params.P = P;
KGraph *kgraph = KGraph::create();
{
KGraph::IndexParams params;
kgraph->build(oracle, params, NULL);
}
boost::timer::auto_cpu_timer timer;
cerr << "Searching..." << endl;
#pragma omp parallel for reduction(+:cost)
for (unsigned i = 0; i < query.size(); ++i) {
KGraph::SearchInfo info;
kgraph->search(oracle.query(query[i]), params, result[i], &info);
cost += info.cost;
}
cost /= query.size();
time = timer.elapsed().wall / 1e9;
delete kgraph;
if (eval_path.size()) {
Matrix<unsigned> gs;
gs.load_lshkit(eval_path);
BOOST_VERIFY(gs.dim() >= K);
BOOST_VERIFY(gs.size() >= query.size());
kgraph::Matrix<float> gs_dist(query.size(), K);
kgraph::Matrix<float> result_dist(query.size(), K);
#pragma omp parallel for
for (unsigned i = 0; i < query.size(); ++i) {
auto const Q = oracle.query(query[i]);
float *gs_dist_row = gs_dist[i];
float *result_dist_row = result_dist[i];
unsigned const *gs_row = gs[i];
unsigned const *result_row = result[i];
for (unsigned k = 0; k < K; ++k) {
gs_dist_row[k] = Q(gs_row[k]);
result_dist_row[k] = Q(result_row[k]);
}
sort(gs_dist_row, gs_dist_row + K);
sort(result_dist_row, result_dist_row + K);
}
recall = AverageRecall(gs_dist, result_dist, K);
}
cout << "Time: " << time << " Recall: " << recall << " Cost: " << cost << endl;
return 0;
}
| jonbakerfish/kgraph | test.cpp | C++ | bsd-2-clause | 4,008 |
class SpeedtestCli < Formula
desc "Command-line interface for https://speedtest.net bandwidth tests"
homepage "https://github.com/sivel/speedtest-cli"
url "https://github.com/sivel/speedtest-cli/archive/v1.0.7.tar.gz"
sha256 "3853bb7a3d16f686441d0d10ebbfc1fd49e974ecf17248ce03456ad4ef2478b9"
head "https://github.com/sivel/speedtest-cli.git"
bottle :unneeded
def install
bin.install "speedtest.py" => "speedtest"
bin.install_symlink "speedtest" => "speedtest-cli"
man1.install "speedtest-cli.1"
end
test do
system bin/"speedtest"
end
end
| robohack/homebrew-core | Formula/speedtest-cli.rb | Ruby | bsd-2-clause | 577 |
/*
* Copyright 2011-2019 Branimir Karadzic. All rights reserved.
* License: https://github.com/bkaradzic/bgfx#license-bsd-2-clause
*/
#include <bx/bx.h>
#include <bx/file.h>
#include <bx/sort.h>
#include <bgfx/bgfx.h>
#include <time.h>
#if BX_PLATFORM_EMSCRIPTEN
# include <emscripten.h>
#endif // BX_PLATFORM_EMSCRIPTEN
#include "entry_p.h"
#include "cmd.h"
#include "input.h"
extern "C" int32_t _main_(int32_t _argc, char** _argv);
namespace entry
{
static uint32_t s_debug = BGFX_DEBUG_NONE;
static uint32_t s_reset = BGFX_RESET_NONE;
static uint32_t s_width = ENTRY_DEFAULT_WIDTH;
static uint32_t s_height = ENTRY_DEFAULT_HEIGHT;
static bool s_exit = false;
static bx::FileReaderI* s_fileReader = NULL;
static bx::FileWriterI* s_fileWriter = NULL;
extern bx::AllocatorI* getDefaultAllocator();
bx::AllocatorI* g_allocator = getDefaultAllocator();
typedef bx::StringT<&g_allocator> String;
static String s_currentDir;
class FileReader : public bx::FileReader
{
typedef bx::FileReader super;
public:
virtual bool open(const bx::FilePath& _filePath, bx::Error* _err) override
{
String filePath(s_currentDir);
filePath.append(_filePath);
return super::open(filePath.getPtr(), _err);
}
};
class FileWriter : public bx::FileWriter
{
typedef bx::FileWriter super;
public:
virtual bool open(const bx::FilePath& _filePath, bool _append, bx::Error* _err) override
{
String filePath(s_currentDir);
filePath.append(_filePath);
return super::open(filePath.getPtr(), _append, _err);
}
};
void setCurrentDir(const char* _dir)
{
s_currentDir.set(_dir);
}
#if ENTRY_CONFIG_IMPLEMENT_DEFAULT_ALLOCATOR
bx::AllocatorI* getDefaultAllocator()
{
BX_PRAGMA_DIAGNOSTIC_PUSH();
BX_PRAGMA_DIAGNOSTIC_IGNORED_MSVC(4459); // warning C4459: declaration of 's_allocator' hides global declaration
BX_PRAGMA_DIAGNOSTIC_IGNORED_CLANG_GCC("-Wshadow");
static bx::DefaultAllocator s_allocator;
return &s_allocator;
BX_PRAGMA_DIAGNOSTIC_POP();
}
#endif // ENTRY_CONFIG_IMPLEMENT_DEFAULT_ALLOCATOR
static const char* s_keyName[] =
{
"None",
"Esc",
"Return",
"Tab",
"Space",
"Backspace",
"Up",
"Down",
"Left",
"Right",
"Insert",
"Delete",
"Home",
"End",
"PageUp",
"PageDown",
"Print",
"Plus",
"Minus",
"LeftBracket",
"RightBracket",
"Semicolon",
"Quote",
"Comma",
"Period",
"Slash",
"Backslash",
"Tilde",
"F1",
"F2",
"F3",
"F4",
"F5",
"F6",
"F7",
"F8",
"F9",
"F10",
"F11",
"F12",
"NumPad0",
"NumPad1",
"NumPad2",
"NumPad3",
"NumPad4",
"NumPad5",
"NumPad6",
"NumPad7",
"NumPad8",
"NumPad9",
"Key0",
"Key1",
"Key2",
"Key3",
"Key4",
"Key5",
"Key6",
"Key7",
"Key8",
"Key9",
"KeyA",
"KeyB",
"KeyC",
"KeyD",
"KeyE",
"KeyF",
"KeyG",
"KeyH",
"KeyI",
"KeyJ",
"KeyK",
"KeyL",
"KeyM",
"KeyN",
"KeyO",
"KeyP",
"KeyQ",
"KeyR",
"KeyS",
"KeyT",
"KeyU",
"KeyV",
"KeyW",
"KeyX",
"KeyY",
"KeyZ",
"GamepadA",
"GamepadB",
"GamepadX",
"GamepadY",
"GamepadThumbL",
"GamepadThumbR",
"GamepadShoulderL",
"GamepadShoulderR",
"GamepadUp",
"GamepadDown",
"GamepadLeft",
"GamepadRight",
"GamepadBack",
"GamepadStart",
"GamepadGuide",
};
BX_STATIC_ASSERT(Key::Count == BX_COUNTOF(s_keyName) );
const char* getName(Key::Enum _key)
{
BX_CHECK(_key < Key::Count, "Invalid key %d.", _key);
return s_keyName[_key];
}
char keyToAscii(Key::Enum _key, uint8_t _modifiers)
{
const bool isAscii = (Key::Key0 <= _key && _key <= Key::KeyZ)
|| (Key::Esc <= _key && _key <= Key::Minus);
if (!isAscii)
{
return '\0';
}
const bool isNumber = (Key::Key0 <= _key && _key <= Key::Key9);
if (isNumber)
{
return '0' + char(_key - Key::Key0);
}
const bool isChar = (Key::KeyA <= _key && _key <= Key::KeyZ);
if (isChar)
{
enum { ShiftMask = Modifier::LeftShift|Modifier::RightShift };
const bool shift = !!(_modifiers&ShiftMask);
return (shift ? 'A' : 'a') + char(_key - Key::KeyA);
}
switch (_key)
{
case Key::Esc: return 0x1b;
case Key::Return: return '\n';
case Key::Tab: return '\t';
case Key::Space: return ' ';
case Key::Backspace: return 0x08;
case Key::Plus: return '+';
case Key::Minus: return '-';
default: break;
}
return '\0';
}
bool setOrToggle(uint32_t& _flags, const char* _name, uint32_t _bit, int _first, int _argc, char const* const* _argv)
{
if (0 == bx::strCmp(_argv[_first], _name) )
{
int arg = _first+1;
if (_argc > arg)
{
_flags &= ~_bit;
bool set = false;
bx::fromString(&set, _argv[arg]);
_flags |= set ? _bit : 0;
}
else
{
_flags ^= _bit;
}
return true;
}
return false;
}
int cmdMouseLock(CmdContext* /*_context*/, void* /*_userData*/, int _argc, char const* const* _argv)
{
if (1 < _argc)
{
bool set = false;
if (2 < _argc)
{
bx::fromString(&set, _argv[1]);
inputSetMouseLock(set);
}
else
{
inputSetMouseLock(!inputIsMouseLocked() );
}
return bx::kExitSuccess;
}
return bx::kExitFailure;
}
int cmdGraphics(CmdContext* /*_context*/, void* /*_userData*/, int _argc, char const* const* _argv)
{
if (_argc > 1)
{
if (setOrToggle(s_reset, "vsync", BGFX_RESET_VSYNC, 1, _argc, _argv)
|| setOrToggle(s_reset, "maxaniso", BGFX_RESET_MAXANISOTROPY, 1, _argc, _argv)
|| setOrToggle(s_reset, "msaa", BGFX_RESET_MSAA_X16, 1, _argc, _argv)
|| setOrToggle(s_reset, "flush", BGFX_RESET_FLUSH_AFTER_RENDER, 1, _argc, _argv)
|| setOrToggle(s_reset, "flip", BGFX_RESET_FLIP_AFTER_RENDER, 1, _argc, _argv)
|| setOrToggle(s_reset, "hidpi", BGFX_RESET_HIDPI, 1, _argc, _argv)
|| setOrToggle(s_reset, "depthclamp", BGFX_RESET_DEPTH_CLAMP, 1, _argc, _argv)
)
{
return bx::kExitSuccess;
}
else if (setOrToggle(s_debug, "stats", BGFX_DEBUG_STATS, 1, _argc, _argv)
|| setOrToggle(s_debug, "ifh", BGFX_DEBUG_IFH, 1, _argc, _argv)
|| setOrToggle(s_debug, "text", BGFX_DEBUG_TEXT, 1, _argc, _argv)
|| setOrToggle(s_debug, "wireframe", BGFX_DEBUG_WIREFRAME, 1, _argc, _argv)
|| setOrToggle(s_debug, "profiler", BGFX_DEBUG_PROFILER, 1, _argc, _argv)
)
{
bgfx::setDebug(s_debug);
return bx::kExitSuccess;
}
else if (0 == bx::strCmp(_argv[1], "screenshot") )
{
bgfx::FrameBufferHandle fbh = BGFX_INVALID_HANDLE;
if (_argc > 2)
{
bgfx::requestScreenShot(fbh, _argv[2]);
}
else
{
time_t tt;
time(&tt);
char filePath[256];
bx::snprintf(filePath, sizeof(filePath), "temp/screenshot-%d", tt);
bgfx::requestScreenShot(fbh, filePath);
}
return bx::kExitSuccess;
}
else if (0 == bx::strCmp(_argv[1], "fullscreen") )
{
WindowHandle window = { 0 };
toggleFullscreen(window);
return bx::kExitSuccess;
}
}
return bx::kExitFailure;
}
int cmdExit(CmdContext* /*_context*/, void* /*_userData*/, int /*_argc*/, char const* const* /*_argv*/)
{
s_exit = true;
return bx::kExitSuccess;
}
static const InputBinding s_bindings[] =
{
{ entry::Key::KeyQ, entry::Modifier::LeftCtrl, 1, NULL, "exit" },
{ entry::Key::KeyQ, entry::Modifier::RightCtrl, 1, NULL, "exit" },
{ entry::Key::KeyF, entry::Modifier::LeftCtrl, 1, NULL, "graphics fullscreen" },
{ entry::Key::KeyF, entry::Modifier::RightCtrl, 1, NULL, "graphics fullscreen" },
{ entry::Key::Return, entry::Modifier::RightAlt, 1, NULL, "graphics fullscreen" },
{ entry::Key::F1, entry::Modifier::None, 1, NULL, "graphics stats" },
{ entry::Key::F1, entry::Modifier::LeftCtrl, 1, NULL, "graphics ifh" },
{ entry::Key::GamepadStart, entry::Modifier::None, 1, NULL, "graphics stats" },
{ entry::Key::F1, entry::Modifier::LeftShift, 1, NULL, "graphics stats 0\ngraphics text 0" },
{ entry::Key::F3, entry::Modifier::None, 1, NULL, "graphics wireframe" },
{ entry::Key::F4, entry::Modifier::None, 1, NULL, "graphics hmd" },
{ entry::Key::F4, entry::Modifier::LeftShift, 1, NULL, "graphics hmdrecenter" },
{ entry::Key::F4, entry::Modifier::LeftCtrl, 1, NULL, "graphics hmddbg" },
{ entry::Key::F6, entry::Modifier::None, 1, NULL, "graphics profiler" },
{ entry::Key::F7, entry::Modifier::None, 1, NULL, "graphics vsync" },
{ entry::Key::F8, entry::Modifier::None, 1, NULL, "graphics msaa" },
{ entry::Key::F9, entry::Modifier::None, 1, NULL, "graphics flush" },
{ entry::Key::F10, entry::Modifier::None, 1, NULL, "graphics hidpi" },
{ entry::Key::Print, entry::Modifier::None, 1, NULL, "graphics screenshot" },
{ entry::Key::KeyP, entry::Modifier::LeftCtrl, 1, NULL, "graphics screenshot" },
INPUT_BINDING_END
};
#if BX_PLATFORM_EMSCRIPTEN
static AppI* s_app;
static void updateApp()
{
s_app->update();
}
#endif // BX_PLATFORM_EMSCRIPTEN
static AppI* s_currentApp = NULL;
static AppI* s_apps = NULL;
static uint32_t s_numApps = 0;
static char s_restartArgs[1024] = { '\0' };
static AppI* getCurrentApp(AppI* _set = NULL)
{
if (NULL != _set)
{
s_currentApp = _set;
}
else if (NULL == s_currentApp)
{
s_currentApp = getFirstApp();
}
return s_currentApp;
}
static AppI* getNextWrap(AppI* _app)
{
AppI* next = _app->getNext();
if (NULL != next)
{
return next;
}
return getFirstApp();
}
int cmdApp(CmdContext* /*_context*/, void* /*_userData*/, int _argc, char const* const* _argv)
{
if (0 == bx::strCmp(_argv[1], "restart") )
{
if (2 == _argc)
{
bx::strCopy(s_restartArgs, BX_COUNTOF(s_restartArgs), getCurrentApp()->getName() );
return bx::kExitSuccess;
}
if (0 == bx::strCmp(_argv[2], "next") )
{
AppI* next = getNextWrap(getCurrentApp() );
bx::strCopy(s_restartArgs, BX_COUNTOF(s_restartArgs), next->getName() );
return bx::kExitSuccess;
}
else if (0 == bx::strCmp(_argv[2], "prev") )
{
AppI* prev = getCurrentApp();
for (AppI* app = getNextWrap(prev); app != getCurrentApp(); app = getNextWrap(app) )
{
prev = app;
}
bx::strCopy(s_restartArgs, BX_COUNTOF(s_restartArgs), prev->getName() );
return bx::kExitSuccess;
}
for (AppI* app = getFirstApp(); NULL != app; app = app->getNext() )
{
if (0 == bx::strCmp(_argv[2], app->getName() ) )
{
bx::strCopy(s_restartArgs, BX_COUNTOF(s_restartArgs), app->getName() );
return bx::kExitSuccess;
}
}
}
return bx::kExitFailure;
}
AppI::AppI(const char* _name, const char* _description)
{
m_name = _name;
m_description = _description;
m_next = s_apps;
s_apps = this;
s_numApps++;
}
AppI::~AppI()
{
for (AppI* prev = NULL, *app = s_apps, *next = app->getNext()
; NULL != app
; prev = app, app = next, next = app->getNext() )
{
if (app == this)
{
if (NULL != prev)
{
prev->m_next = next;
}
else
{
s_apps = next;
}
--s_numApps;
break;
}
}
}
const char* AppI::getName() const
{
return m_name;
}
const char* AppI::getDescription() const
{
return m_description;
}
AppI* AppI::getNext()
{
return m_next;
}
AppI* getFirstApp()
{
return s_apps;
}
uint32_t getNumApps()
{
return s_numApps;
}
int runApp(AppI* _app, int _argc, const char* const* _argv)
{
_app->init(_argc, _argv, s_width, s_height);
bgfx::frame();
WindowHandle defaultWindow = { 0 };
setWindowSize(defaultWindow, s_width, s_height);
#if BX_PLATFORM_EMSCRIPTEN
s_app = _app;
emscripten_set_main_loop(&updateApp, -1, 1);
#else
while (_app->update() )
{
if (0 != bx::strLen(s_restartArgs) )
{
break;
}
}
#endif // BX_PLATFORM_EMSCRIPTEN
return _app->shutdown();
}
static int32_t sortApp(const void* _lhs, const void* _rhs)
{
const AppI* lhs = *(const AppI**)_lhs;
const AppI* rhs = *(const AppI**)_rhs;
return bx::strCmpI(lhs->getName(), rhs->getName() );
}
static void sortApps()
{
if (2 > s_numApps)
{
return;
}
AppI** apps = (AppI**)BX_ALLOC(g_allocator, s_numApps*sizeof(AppI*) );
uint32_t ii = 0;
for (AppI* app = getFirstApp(); NULL != app; app = app->getNext() )
{
apps[ii++] = app;
}
bx::quickSort(apps, s_numApps, sizeof(AppI*), sortApp);
s_apps = apps[0];
for (ii = 1; ii < s_numApps; ++ii)
{
AppI* app = apps[ii-1];
app->m_next = apps[ii];
}
apps[s_numApps-1]->m_next = NULL;
BX_FREE(g_allocator, apps);
}
int main(int _argc, const char* const* _argv)
{
//DBG(BX_COMPILER_NAME " / " BX_CPU_NAME " / " BX_ARCH_NAME " / " BX_PLATFORM_NAME);
s_fileReader = BX_NEW(g_allocator, FileReader);
s_fileWriter = BX_NEW(g_allocator, FileWriter);
cmdInit();
cmdAdd("mouselock", cmdMouseLock);
cmdAdd("graphics", cmdGraphics );
cmdAdd("exit", cmdExit );
cmdAdd("app", cmdApp );
inputInit();
inputAddBindings("bindings", s_bindings);
entry::WindowHandle defaultWindow = { 0 };
bx::FilePath fp(_argv[0]);
char title[bx::kMaxFilePath];
bx::strCopy(title, BX_COUNTOF(title), fp.getBaseName() );
entry::setWindowTitle(defaultWindow, title);
setWindowSize(defaultWindow, ENTRY_DEFAULT_WIDTH, ENTRY_DEFAULT_HEIGHT);
sortApps();
const char* find = "";
if (1 < _argc)
{
find = _argv[_argc-1];
}
restart:
AppI* selected = NULL;
for (AppI* app = getFirstApp(); NULL != app; app = app->getNext() )
{
if (NULL == selected
&& !bx::strFindI(app->getName(), find).isEmpty() )
{
selected = app;
}
#if 0
DBG("%c %s, %s"
, app == selected ? '>' : ' '
, app->getName()
, app->getDescription()
);
#endif // 0
}
int32_t result = bx::kExitSuccess;
s_restartArgs[0] = '\0';
if (0 == s_numApps)
{
result = ::_main_(_argc, (char**)_argv);
}
else
{
result = runApp(getCurrentApp(selected), _argc, _argv);
}
if (0 != bx::strLen(s_restartArgs) )
{
find = s_restartArgs;
goto restart;
}
setCurrentDir("");
inputRemoveBindings("bindings");
inputShutdown();
cmdShutdown();
BX_DELETE(g_allocator, s_fileReader);
s_fileReader = NULL;
BX_DELETE(g_allocator, s_fileWriter);
s_fileWriter = NULL;
return result;
}
WindowState s_window[ENTRY_CONFIG_MAX_WINDOWS];
bool processEvents(uint32_t& _width, uint32_t& _height, uint32_t& _debug, uint32_t& _reset, MouseState* _mouse)
{
s_debug = _debug;
s_reset = _reset;
WindowHandle handle = { UINT16_MAX };
bool mouseLock = inputIsMouseLocked();
const Event* ev;
do
{
struct SE { const Event* m_ev; SE() : m_ev(poll() ) {} ~SE() { if (NULL != m_ev) { release(m_ev); } } } scopeEvent;
ev = scopeEvent.m_ev;
if (NULL != ev)
{
switch (ev->m_type)
{
case Event::Axis:
{
const AxisEvent* axis = static_cast<const AxisEvent*>(ev);
inputSetGamepadAxis(axis->m_gamepad, axis->m_axis, axis->m_value);
}
break;
case Event::Char:
{
const CharEvent* chev = static_cast<const CharEvent*>(ev);
inputChar(chev->m_len, chev->m_char);
}
break;
case Event::Exit:
return true;
case Event::Gamepad:
{
// const GamepadEvent* gev = static_cast<const GamepadEvent*>(ev);
// DBG("gamepad %d, %d", gev->m_gamepad.idx, gev->m_connected);
}
break;
case Event::Mouse:
{
const MouseEvent* mouse = static_cast<const MouseEvent*>(ev);
handle = mouse->m_handle;
inputSetMousePos(mouse->m_mx, mouse->m_my, mouse->m_mz);
if (!mouse->m_move)
{
inputSetMouseButtonState(mouse->m_button, mouse->m_down);
}
if (NULL != _mouse
&& !mouseLock)
{
_mouse->m_mx = mouse->m_mx;
_mouse->m_my = mouse->m_my;
_mouse->m_mz = mouse->m_mz;
if (!mouse->m_move)
{
_mouse->m_buttons[mouse->m_button] = mouse->m_down;
}
}
}
break;
case Event::Key:
{
const KeyEvent* key = static_cast<const KeyEvent*>(ev);
handle = key->m_handle;
inputSetKeyState(key->m_key, key->m_modifiers, key->m_down);
}
break;
case Event::Size:
{
const SizeEvent* size = static_cast<const SizeEvent*>(ev);
WindowState& win = s_window[0];
win.m_handle = size->m_handle;
win.m_width = size->m_width;
win.m_height = size->m_height;
handle = size->m_handle;
_width = size->m_width;
_height = size->m_height;
_reset = !s_reset; // force reset
}
break;
case Event::Window:
break;
case Event::Suspend:
break;
case Event::DropFile:
{
const DropFileEvent* drop = static_cast<const DropFileEvent*>(ev);
DBG("%s", drop->m_filePath.getCPtr() );
}
break;
default:
break;
}
}
inputProcess();
} while (NULL != ev);
if (handle.idx == 0
&& _reset != s_reset)
{
_reset = s_reset;
bgfx::reset(_width, _height, _reset);
inputSetMouseResolution(uint16_t(_width), uint16_t(_height) );
}
_debug = s_debug;
s_width = _width;
s_height = _height;
return s_exit;
}
bool processWindowEvents(WindowState& _state, uint32_t& _debug, uint32_t& _reset)
{
s_debug = _debug;
s_reset = _reset;
WindowHandle handle = { UINT16_MAX };
bool mouseLock = inputIsMouseLocked();
bool clearDropFile = true;
const Event* ev;
do
{
struct SE
{
SE(WindowHandle _handle)
: m_ev(poll(_handle) )
{
}
~SE()
{
if (NULL != m_ev)
{
release(m_ev);
}
}
const Event* m_ev;
} scopeEvent(handle);
ev = scopeEvent.m_ev;
if (NULL != ev)
{
handle = ev->m_handle;
WindowState& win = s_window[handle.idx];
switch (ev->m_type)
{
case Event::Axis:
{
const AxisEvent* axis = static_cast<const AxisEvent*>(ev);
inputSetGamepadAxis(axis->m_gamepad, axis->m_axis, axis->m_value);
}
break;
case Event::Char:
{
const CharEvent* chev = static_cast<const CharEvent*>(ev);
win.m_handle = chev->m_handle;
inputChar(chev->m_len, chev->m_char);
}
break;
case Event::Exit:
return true;
case Event::Gamepad:
{
const GamepadEvent* gev = static_cast<const GamepadEvent*>(ev);
DBG("gamepad %d, %d", gev->m_gamepad.idx, gev->m_connected);
}
break;
case Event::Mouse:
{
const MouseEvent* mouse = static_cast<const MouseEvent*>(ev);
win.m_handle = mouse->m_handle;
if (mouse->m_move)
{
inputSetMousePos(mouse->m_mx, mouse->m_my, mouse->m_mz);
}
else
{
inputSetMouseButtonState(mouse->m_button, mouse->m_down);
}
if (!mouseLock)
{
if (mouse->m_move)
{
win.m_mouse.m_mx = mouse->m_mx;
win.m_mouse.m_my = mouse->m_my;
win.m_mouse.m_mz = mouse->m_mz;
}
else
{
win.m_mouse.m_buttons[mouse->m_button] = mouse->m_down;
}
}
}
break;
case Event::Key:
{
const KeyEvent* key = static_cast<const KeyEvent*>(ev);
win.m_handle = key->m_handle;
inputSetKeyState(key->m_key, key->m_modifiers, key->m_down);
}
break;
case Event::Size:
{
const SizeEvent* size = static_cast<const SizeEvent*>(ev);
win.m_handle = size->m_handle;
win.m_width = size->m_width;
win.m_height = size->m_height;
_reset = win.m_handle.idx == 0
? !s_reset
: _reset
; // force reset
}
break;
case Event::Window:
{
const WindowEvent* window = static_cast<const WindowEvent*>(ev);
win.m_handle = window->m_handle;
win.m_nwh = window->m_nwh;
ev = NULL;
}
break;
case Event::Suspend:
break;
case Event::DropFile:
{
const DropFileEvent* drop = static_cast<const DropFileEvent*>(ev);
win.m_dropFile = drop->m_filePath;
clearDropFile = false;
}
break;
default:
break;
}
}
inputProcess();
} while (NULL != ev);
if (isValid(handle) )
{
WindowState& win = s_window[handle.idx];
if (clearDropFile)
{
win.m_dropFile.clear();
}
_state = win;
if (handle.idx == 0)
{
inputSetMouseResolution(uint16_t(win.m_width), uint16_t(win.m_height) );
}
}
if (_reset != s_reset)
{
_reset = s_reset;
bgfx::reset(s_window[0].m_width, s_window[0].m_height, _reset);
inputSetMouseResolution(uint16_t(s_window[0].m_width), uint16_t(s_window[0].m_height) );
}
_debug = s_debug;
return s_exit;
}
bx::FileReaderI* getFileReader()
{
return s_fileReader;
}
bx::FileWriterI* getFileWriter()
{
return s_fileWriter;
}
bx::AllocatorI* getAllocator()
{
if (NULL == g_allocator)
{
g_allocator = getDefaultAllocator();
}
return g_allocator;
}
void* TinyStlAllocator::static_allocate(size_t _bytes)
{
return BX_ALLOC(getAllocator(), _bytes);
}
void TinyStlAllocator::static_deallocate(void* _ptr, size_t /*_bytes*/)
{
if (NULL != _ptr)
{
BX_FREE(getAllocator(), _ptr);
}
}
} // namespace entry
extern "C" bool entry_process_events(uint32_t* _width, uint32_t* _height, uint32_t* _debug, uint32_t* _reset)
{
return entry::processEvents(*_width, *_height, *_debug, *_reset, NULL);
}
| attilaz/bgfx | examples/common/entry/entry.cpp | C++ | bsd-2-clause | 21,992 |
/*************************************************************************
* Copyright (c) 2015, Synopsys, Inc. *
* All rights reserved. *
* *
* Redistribution and use in source and binary forms, with or without *
* modification, are permitted provided that the following conditions are *
* met: *
* *
* 1. Redistributions of source code must retain the above copyright *
* notice, this list of conditions and the following disclaimer. *
* *
* 2. Redistributions in binary form must reproduce the above copyright *
* notice, this list of conditions and the following disclaimer in the *
* documentation and/or other materials provided with the distribution. *
* *
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS *
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT *
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR *
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT *
* HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, *
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT *
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, *
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY *
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT *
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE *
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. *
*************************************************************************/
#ifndef _application_h_
#define _application_h_
// standard
#include <string.h>
class application {
public:
application(char* name = " ");
virtual ~application();
virtual void giveName(char* name);
virtual char* getName();
virtual void save(char* );
virtual void restore(char* );
protected:
char* name;
};
#endif
| kit-transue/software-emancipation-discover | psethome/lib/Learn/src/ttt/include/application.H | C++ | bsd-2-clause | 2,370 |
// Copyright WRLD Ltd (2018-), All Rights Reserved
#include "SearchWidgetController.h"
#include "SearchResultSectionItemSelectedMessage.h"
#include "SearchServicesResult.h"
#include "IMenuSectionViewModel.h"
#include "NavigateToMessage.h"
#include "IMenuView.h"
#include "IMenuOption.h"
#include "MenuItemModel.h"
#include "IMenuModel.h"
#include "ISearchResultsRepository.h"
#include "SearchNavigationData.h"
namespace ExampleApp
{
namespace SearchMenu
{
namespace View
{
SearchWidgetController::SearchWidgetController(ISearchWidgetView& view,
ISearchResultsRepository& resultsRepository,
Modality::View::IModalBackgroundView& modalBackgroundView,
Menu::View::IMenuViewModel& viewModel,
ExampleAppMessaging::TMessageBus& messageBus,
ISearchProvider& searchProvider)
: m_view(view)
, m_modalBackgroundView(modalBackgroundView)
, m_viewModel(viewModel)
, m_messageBus(messageBus)
, m_resultsRepository(resultsRepository)
, m_onSearchResultsClearedCallback(this, &SearchWidgetController::OnSearchResultsCleared)
, m_onSearchResultSelectedCallback(this, &SearchWidgetController::OnSearchResultSelected)
, m_onNavigationRequestedCallback(this, &SearchWidgetController::OnNavigationRequested)
, m_onSearchQueryClearRequestHandler(this, &SearchWidgetController::OnSearchQueryClearRequest)
, m_onSearchQueryRefreshedHandler(this, &SearchWidgetController::OnSearchQueryRefreshedMessage)
, m_onSearchQueryResultsLoadedHandler(this, &SearchWidgetController::OnSearchResultsLoaded)
, m_deepLinkRequestedHandler(this, &SearchWidgetController::OnSearchRequestedMessage)
, m_menuContentsChanged(true)
, m_inInteriorMode(false)
, m_onAppModeChanged(this, &SearchWidgetController::OnAppModeChanged)
, m_onItemSelectedCallback(this, &SearchWidgetController::OnItemSelected)
, m_onItemAddedCallback(this, &SearchWidgetController::OnItemAdded)
, m_onItemRemovedCallback(this, &SearchWidgetController::OnItemRemoved)
, m_onScreenStateChanged(this, &SearchWidgetController::OnScreenControlStateChanged)
, m_onOpenableStateChanged(this, &SearchWidgetController::OnOpenableStateChanged)
, m_onModalBackgroundTouchCallback(this, &SearchWidgetController::OnModalBackgroundTouch)
, m_onViewOpenedCallback(this, &SearchWidgetController::OnViewOpened)
, m_onViewClosedCallback(this, &SearchWidgetController::OnViewClosed)
, m_tagCollection(m_messageBus)
, m_previousVisibleTextFromTagSearch("")
, m_shouldSelectFirstResult(false)
{
m_view.InsertSearchClearedCallback(m_onSearchResultsClearedCallback);
m_view.InsertResultSelectedCallback(m_onSearchResultSelectedCallback);
m_view.InsertOnItemSelected(m_onItemSelectedCallback);
m_view.InsertOnViewClosed(m_onViewClosedCallback);
m_view.InsertOnViewOpened(m_onViewOpenedCallback);
m_view.InsertOnNavigationRequestedCallback(m_onNavigationRequestedCallback);
m_viewModel.InsertOpenStateChangedCallback(m_onOpenableStateChanged);
m_viewModel.InsertOnScreenStateChangedCallback(m_onScreenStateChanged);
m_modalBackgroundView.InsertTouchCallback(m_onModalBackgroundTouchCallback);
m_messageBus.SubscribeUi(m_onSearchQueryRefreshedHandler);
m_messageBus.SubscribeUi(m_onSearchQueryResultsLoadedHandler);
m_messageBus.SubscribeUi(m_onSearchQueryClearRequestHandler);
m_messageBus.SubscribeUi(m_deepLinkRequestedHandler);
m_messageBus.SubscribeUi(m_onAppModeChanged);
for(size_t i = 0; i < m_viewModel.SectionsCount(); ++ i)
{
Menu::View::IMenuSectionViewModel& section(m_viewModel.GetMenuSection(static_cast<int>(i)));
SetGroupStart(section);
Menu::View::IMenuModel& menuModel = section.GetModel();
menuModel.InsertItemAddedCallback(m_onItemAddedCallback);
menuModel.InsertItemRemovedCallback(m_onItemRemovedCallback);
}
searchProvider.InsertSearchPerformedCallback(m_modalBackgroundView.GetSearchPerformedCallback());
}
SearchWidgetController::~SearchWidgetController()
{
for(int i = static_cast<int>(m_viewModel.SectionsCount()); --i >= 0;)
{
Menu::View::IMenuSectionViewModel& section(m_viewModel.GetMenuSection(i));
Menu::View::IMenuModel& menuModel = section.GetModel();
menuModel.RemoveItemAddedCallback(m_onItemAddedCallback);
menuModel.RemoveItemRemovedCallback(m_onItemRemovedCallback);
}
m_messageBus.UnsubscribeUi(m_onAppModeChanged);
m_messageBus.UnsubscribeUi(m_onSearchQueryRefreshedHandler);
m_messageBus.UnsubscribeUi(m_onSearchQueryClearRequestHandler);
m_messageBus.UnsubscribeUi(m_onSearchQueryResultsLoadedHandler);
m_messageBus.UnsubscribeUi(m_deepLinkRequestedHandler);
m_modalBackgroundView.RemoveTouchCallback(m_onModalBackgroundTouchCallback);
m_viewModel.RemoveOnScreenStateChangedCallback(m_onScreenStateChanged);
m_viewModel.RemoveOpenStateChangedCallback(m_onOpenableStateChanged);
m_view.RemoveOnNavigationRequestedCallback(m_onNavigationRequestedCallback);
m_view.RemoveOnViewClosed(m_onViewClosedCallback);
m_view.RemoveResultSelectedCallback(m_onSearchResultSelectedCallback);
m_view.RemoveSearchClearedCallback(m_onSearchResultsClearedCallback);
m_view.RemoveOnItemSelected(m_onItemSelectedCallback);
m_view.RemoveOnItemSelected(m_onItemSelectedCallback);
}
void SearchWidgetController::SetGroupStart(Menu::View::IMenuSectionViewModel& section)
{
if (section.Name() == "Find" ||
section.Name() == "Drop Pin" ||
section.Name() == "Weather")
{
section.SetGroupStart(true);
}
}
void SearchWidgetController::OnItemAdded(Menu::View::MenuItemModel& item) {
m_menuContentsChanged = true;
}
void SearchWidgetController::OnItemRemoved(Menu::View::MenuItemModel& item){
m_menuContentsChanged = true;
}
void SearchWidgetController::OnSearchResultsCleared()
{
m_messageBus.Publish(SearchResultSection::SearchResultViewClearedMessage());
}
void SearchWidgetController::OnSearchResultSelected(int& index)
{
const SearchServicesResult::TSdkSearchResult& sdkSearchResult = m_resultsRepository.GetSdkSearchResultByIndex(index);
m_messageBus.Publish(SearchResultSection::SearchResultSectionItemSelectedMessage(
sdkSearchResult.GetLocation().ToECEF(),
sdkSearchResult.IsInterior(),
sdkSearchResult.GetBuildingId(),
sdkSearchResult.GetFloor(),
m_resultsRepository.GetResultOriginalIndexFromCurrentIndex(index),
sdkSearchResult.GetIdentifier()));
}
void SearchWidgetController::OnNavigationRequested(const int& index)
{
const SearchServicesResult::TSdkSearchResult& sdkSearchResult = m_resultsRepository.GetSdkSearchResultByIndex(index);
const NavRouting::SearchNavigationData searchNavigationData(sdkSearchResult);
m_messageBus.Publish(NavRouting::NavigateToMessage(searchNavigationData));
}
void SearchWidgetController::OnSearchResultsLoaded(const Search::SearchQueryResponseReceivedMessage& message)
{
if (m_shouldSelectFirstResult && message.GetResults().size() > 0){
int val = 0;
OnSearchResultSelected(val);
m_shouldSelectFirstResult = false;
}
}
void SearchWidgetController::OnSearchQueryRefreshedMessage(const Search::SearchQueryRefreshedMessage& message)
{
const Search::SdkModel::SearchQuery &query = message.Query();
std::string visibleText = query.Query();
std::string tagText = "";
if (query.IsTag())
{
tagText = visibleText;
visibleText = m_previousVisibleTextFromTagSearch;
}
m_view.PerformSearch(visibleText,
QueryContext(false,
query.IsTag(),
tagText,
query.ShouldTryInteriorSearch(),
message.Location(),
message.Radius()));
}
void SearchWidgetController::OnSearchQueryClearRequest(const Search::SearchQueryClearRequestMessage &message)
{
m_view.ClearSearchResults();
}
void SearchWidgetController::OnSearchRequestedMessage(const Search::SearchQueryRequestMessage& message)
{
// needed to avoid a reentrant call on the reactor logic on startup queries / deeplinks
m_view.CloseMenu();
auto query = message.Query();
auto clearPreviousResults = false;
std::string visibleText = query.Query();
std::string tagText = "";
if (query.IsTag())
{
tagText = visibleText;
if (m_tagCollection.HasText(tagText))
{
const TagCollection::TagInfo& tagInfo = m_tagCollection.GetInfoByTag(tagText);
visibleText = tagInfo.VisibleText();
}
}
m_previousVisibleTextFromTagSearch = visibleText;
auto queryContext = QueryContext(clearPreviousResults,
query.IsTag(),
tagText,
query.ShouldTryInteriorSearch(),
query.Location(),
query.Radius());
m_shouldSelectFirstResult = query.SelectFirstResult();
m_view.PerformSearch(visibleText, queryContext);
}
void SearchWidgetController::UpdateUiThread(float dt)
{
RefreshPresentation();
}
void SearchWidgetController::OnAppModeChanged(const AppModes::AppModeChangedMessage &message)
{
m_menuContentsChanged = true;
m_inInteriorMode = message.GetAppMode() == AppModes::SdkModel::AppMode::InteriorMode;
RefreshPresentation();
}
void SearchWidgetController::OnItemSelected(const std::string& menuText, int& sectionIndex, int& itemIndex)
{
Menu::View::IMenuSectionViewModel& section = m_viewModel.GetMenuSection(sectionIndex);
if (m_tagCollection.HasTag(menuText))
{
m_view.ClearSearchResults();
TagCollection::TagInfo tagInfo = m_tagCollection.GetInfoByText(menuText);
m_previousVisibleTextFromTagSearch = menuText;
m_view.PerformSearch(menuText,
QueryContext(true, true, tagInfo.Tag(),
tagInfo.ShouldTryInterior()));
}
else if(!section.IsExpandable() || section.GetTotalItemCount()>0)
{
section.GetItemAtIndex(itemIndex).MenuOption().Select();
}
}
void SearchWidgetController::RefreshPresentation()
{
if (!m_menuContentsChanged)
{
return;
}
m_menuContentsChanged = false;
const size_t numSections = m_viewModel.SectionsCount();
Menu::View::TSections sections;
sections.reserve(numSections);
for(size_t groupIndex = 0; groupIndex < numSections; groupIndex++)
{
Menu::View::IMenuSectionViewModel& section = m_viewModel.GetMenuSection(static_cast<int>(groupIndex));
if (section.Name() != "Discover" || !m_inInteriorMode)
{
sections.push_back(§ion);
}
}
m_view.UpdateMenuSectionViews(sections);
}
void SearchWidgetController::OnOpenableStateChanged(OpenableControl::View::IOpenableControlViewModel& viewModel)
{
if(m_viewModel.IsOnScreen())
{
if (m_viewModel.IsOpen())
{
m_view.SetOnScreen();
}
}
else
{
m_view.SetOffScreen();
}
}
void SearchWidgetController::OnScreenControlStateChanged(ScreenControl::View::IScreenControlViewModel& viewModel)
{
if (m_viewModel.IsOnScreen())
{
m_view.SetOnScreen();
}
else if (m_viewModel.IsOffScreen())
{
m_view.SetOffScreen();
}
}
void SearchWidgetController::OnViewOpened()
{
if(!m_viewModel.IsOpen())
{
m_viewModel.Open();
}
}
void SearchWidgetController::OnViewClosed()
{
if(!m_viewModel.IsClosed())
{
m_viewModel.Close();
}
}
void SearchWidgetController::OnModalBackgroundTouch()
{
// the modal background goes away after the first touch, so no need to throttle
m_view.CloseMenu();
}
}
}
}
| eegeo/eegeo-example-app | src/SearchMenu/View/SearchWidgetController.cpp | C++ | bsd-2-clause | 14,669 |
cask :v1 => 'delivery-status' do
version '6.1.2'
sha256 'f39afd137c99df16baf149c60f1a982edb9485f6211f4aefb9cad19af7a51514'
url "http://junecloud.com/get/delivery-status-widget?#{version}"
homepage 'http://junecloud.com/software/mac/delivery-status.html'
license :oss
widget 'Delivery Status.wdgt'
caveats <<-EOS.undent
Currently, Dashboard Widgets such as '#{title}' do NOT work correctly
when installed via brew-cask. The bug is being tracked here:
https://github.com/caskroom/homebrew-cask/issues/2206
It is recommended that you do not install this Cask unless you are
a developer working on the problem.
EOS
end
| L2G/homebrew-cask | Casks/delivery-status.rb | Ruby | bsd-2-clause | 658 |
#!/usr/bin/env python
"""
@package mi.dataset.parser.metbk_a_dcl
@file marine-integrations/mi/dataset/parser/metbk_a_dcl.py
@author Ronald Ronquillo
@brief Parser for the metbk_a_dcl dataset driver
This file contains code for the metbk_a_dcl parsers and code to produce data particles.
For telemetered data, there is one parser which produces one type of data particle.
For recovered data, there is one parser which produces one type of data particle.
The input files and the content of the data particles are the same for both
recovered and telemetered.
Only the names of the output particle streams are different.
The input file is ASCII and contains 2 types of records.
Records are separated by a newline.
All records start with a timestamp.
Metadata records: timestamp [text] more text newline.
Sensor Data records: timestamp sensor_data newline.
Only sensor data records produce particles if properly formed.
Mal-formed sensor data records and all metadata records produce no particles.
Release notes:
Initial Release
"""
import re
from mi.core.log import get_logger
from mi.core.common import BaseEnum
from mi.dataset.parser.dcl_file_common import \
DclInstrumentDataParticle, \
DclFileCommonParser
from mi.core.instrument.dataset_data_particle import DataParticleKey
from mi.core.exceptions import UnexpectedDataException
log = get_logger()
__author__ = 'Phillip Tran'
__license__ = 'Apache 2.0'
# SENSOR_DATA_MATCHER produces the following groups.
# The following are indices into groups() produced by SENSOR_DATA_MATCHER
# incremented after common timestamp values.
# i.e, match.groups()[INDEX]
SENSOR_GROUP_BAROMETRIC_PRESSURE = 1
SENSOR_GROUP_RELATIVE_HUMIDITY = 2
SENSOR_GROUP_AIR_TEMPERATURE = 3
SENSOR_GROUP_LONGWAVE_IRRADIANCE = 4
SENSOR_GROUP_PRECIPITATION = 5
SENSOR_GROUP_SEA_SURFACE_TEMPERATURE = 6
SENSOR_GROUP_SEA_SURFACE_CONDUCTIVITY = 7
SENSOR_GROUP_SHORTWAVE_IRRADIANCE = 8
SENSOR_GROUP_EASTWARD_WIND_VELOCITY = 9
SENSOR_GROUP_NORTHWARD_WIND_VELOCITY = 10
# This table is used in the generation of the instrument data particle.
# This will be a list of tuples with the following columns.
# Column 1 - particle parameter name
# Column 2 - group number (index into raw_data)
# Column 3 - data encoding function (conversion required - int, float, etc)
INSTRUMENT_PARTICLE_MAP = [
('barometric_pressure', SENSOR_GROUP_BAROMETRIC_PRESSURE, float),
('relative_humidity', SENSOR_GROUP_RELATIVE_HUMIDITY, float),
('air_temperature', SENSOR_GROUP_AIR_TEMPERATURE, float),
('longwave_irradiance', SENSOR_GROUP_LONGWAVE_IRRADIANCE, float),
('precipitation', SENSOR_GROUP_PRECIPITATION, float),
('sea_surface_temperature', SENSOR_GROUP_SEA_SURFACE_TEMPERATURE, float),
('sea_surface_conductivity', SENSOR_GROUP_SEA_SURFACE_CONDUCTIVITY, float),
('shortwave_irradiance', SENSOR_GROUP_SHORTWAVE_IRRADIANCE, float),
('eastward_wind_velocity', SENSOR_GROUP_EASTWARD_WIND_VELOCITY, float),
('northward_wind_velocity', SENSOR_GROUP_NORTHWARD_WIND_VELOCITY, float)
]
class DataParticleType(BaseEnum):
REC_INSTRUMENT_PARTICLE = 'metbk_a_dcl_instrument_recovered'
TEL_INSTRUMENT_PARTICLE = 'metbk_a_dcl_instrument'
class MetbkADclInstrumentDataParticle(DclInstrumentDataParticle):
"""
Class for generating the Metbk_a instrument particle.
"""
def __init__(self, raw_data, *args, **kwargs):
super(MetbkADclInstrumentDataParticle, self).__init__(
raw_data,
INSTRUMENT_PARTICLE_MAP,
*args, **kwargs)
def _build_parsed_values(self):
"""
Build parsed values for Recovered and Telemetered Instrument Data Particle.
Will only append float values and ignore strings.
Returns the list.
"""
data_list = []
for name, group, func in INSTRUMENT_PARTICLE_MAP:
if isinstance(self.raw_data[group], func):
data_list.append(self._encode_value(name, self.raw_data[group], func))
return data_list
class MetbkADclRecoveredInstrumentDataParticle(MetbkADclInstrumentDataParticle):
"""
Class for generating Offset Data Particles from Recovered data.
"""
_data_particle_type = DataParticleType.REC_INSTRUMENT_PARTICLE
class MetbkADclTelemeteredInstrumentDataParticle(MetbkADclInstrumentDataParticle):
"""
Class for generating Offset Data Particles from Telemetered data.
"""
_data_particle_type = DataParticleType.TEL_INSTRUMENT_PARTICLE
class MetbkADclParser(DclFileCommonParser):
"""
This is the entry point for the Metbk_a_dcl parser.
"""
def __init__(self,
config,
stream_handle,
exception_callback):
super(MetbkADclParser, self).__init__(config,
stream_handle,
exception_callback,
'',
'')
self.particle_classes = None
self.instrument_particle_map = INSTRUMENT_PARTICLE_MAP
self.raw_data_length = 14
def parse_file(self):
"""
This method reads the file and parses the data within, and at
the end of this method self._record_buffer will be filled with all the particles in the file.
"""
# If not set from config & no InstrumentParameterException error from constructor
if self.particle_classes is None:
self.particle_classes = (self._particle_class,)
for particle_class in self.particle_classes:
for line in self._stream_handle:
if not re.findall(r'.*\[.*\]:\b[^\W\d_]+\b', line) and line is not None: # Disregard anything that has a word after [metbk2:DLOGP6]:
line = re.sub(r'\[.*\]:', '', line)
raw_data = line.split()
if len(raw_data) != self.raw_data_length: # The raw data should have a length of 14
self.handle_unknown_data(line)
continue
if re.findall(r'[a-zA-Z][0-9]|[0-9][a-zA-Z]', line):
self.handle_unknown_data(line)
continue
raw_data[0:2] = [' '.join(raw_data[0:2])] # Merge the first and second elements to form a timestamp
if raw_data is not None:
for i in range(1, len(raw_data)): # Ignore 0th element, because that is the timestamp
raw_data[i] = self.select_type(raw_data[i])
particle = self._extract_sample(particle_class,
None,
raw_data,
preferred_ts=DataParticleKey.PORT_TIMESTAMP)
self._record_buffer.append(particle)
def handle_unknown_data(self, line):
# Otherwise generate warning for unknown data.
error_message = 'Unknown data found in chunk %s' % line
log.warn(error_message)
self._exception_callback(UnexpectedDataException(error_message))
@staticmethod
def select_type(raw_list_element):
"""
This function will return the float value if possible
"""
try:
return float(raw_list_element)
except ValueError:
return None
| renegelinas/mi-instrument | mi/dataset/parser/metbk_a_dcl.py | Python | bsd-2-clause | 7,536 |
// Package csr implements certificate requests for CFSSL.
package csr
import (
"crypto"
"crypto/ecdsa"
"crypto/elliptic"
"crypto/rand"
"crypto/rsa"
"crypto/x509"
"crypto/x509/pkix"
"encoding/pem"
"errors"
"net"
"net/mail"
"strings"
cferr "github.com/cloudflare/cfssl/errors"
"github.com/cloudflare/cfssl/helpers"
"github.com/cloudflare/cfssl/log"
)
const (
curveP256 = 256
curveP384 = 384
curveP521 = 521
)
// A Name contains the SubjectInfo fields.
type Name struct {
C string // Country
ST string // State
L string // Locality
O string // OrganisationName
OU string // OrganisationalUnitName
}
// A KeyRequest is a generic request for a new key.
type KeyRequest interface {
Algo() string
Size() int
Generate() (crypto.PrivateKey, error)
SigAlgo() x509.SignatureAlgorithm
}
// A BasicKeyRequest contains the algorithm and key size for a new private key.
type BasicKeyRequest struct {
A string `json:"algo"`
S int `json:"size"`
}
// NewBasicKeyRequest returns a default BasicKeyRequest.
func NewBasicKeyRequest() *BasicKeyRequest {
return &BasicKeyRequest{"ecdsa", curveP256}
}
// Algo returns the requested key algorithm represented as a string.
func (kr *BasicKeyRequest) Algo() string {
return kr.A
}
// Size returns the requested key size.
func (kr *BasicKeyRequest) Size() int {
return kr.S
}
// Generate generates a key as specified in the request. Currently,
// only ECDSA and RSA are supported.
func (kr *BasicKeyRequest) Generate() (crypto.PrivateKey, error) {
log.Debugf("generate key from request: algo=%s, size=%d", kr.Algo(), kr.Size())
switch kr.Algo() {
case "rsa":
if kr.Size() < 2048 {
return nil, errors.New("RSA key is too weak")
}
if kr.Size() > 8192 {
return nil, errors.New("RSA key size too large")
}
return rsa.GenerateKey(rand.Reader, kr.Size())
case "ecdsa":
var curve elliptic.Curve
switch kr.Size() {
case curveP256:
curve = elliptic.P256()
case curveP384:
curve = elliptic.P384()
case curveP521:
curve = elliptic.P521()
default:
return nil, errors.New("invalid curve")
}
return ecdsa.GenerateKey(curve, rand.Reader)
default:
return nil, errors.New("invalid algorithm")
}
}
// SigAlgo returns an appropriate X.509 signature algorithm given the
// key request's type and size.
func (kr *BasicKeyRequest) SigAlgo() x509.SignatureAlgorithm {
switch kr.Algo() {
case "rsa":
switch {
case kr.Size() >= 4096:
return x509.SHA512WithRSA
case kr.Size() >= 3072:
return x509.SHA384WithRSA
case kr.Size() >= 2048:
return x509.SHA256WithRSA
default:
return x509.SHA1WithRSA
}
case "ecdsa":
switch kr.Size() {
case curveP521:
return x509.ECDSAWithSHA512
case curveP384:
return x509.ECDSAWithSHA384
case curveP256:
return x509.ECDSAWithSHA256
default:
return x509.ECDSAWithSHA1
}
default:
return x509.UnknownSignatureAlgorithm
}
}
// CAConfig is a section used in the requests initialising a new CA.
type CAConfig struct {
PathLength int `json:"pathlen"`
Expiry string `json:"expiry"`
}
// A CertificateRequest encapsulates the API interface to the
// certificate request functionality.
type CertificateRequest struct {
CN string
Names []Name `json:"names"`
Hosts []string `json:"hosts"`
KeyRequest KeyRequest `json:"key,omitempty"`
CA *CAConfig `json:"ca,omitempty"`
}
// New returns a new, empty CertificateRequest with a
// BasicKeyRequest.
func New() *CertificateRequest {
return &CertificateRequest{
KeyRequest: NewBasicKeyRequest(),
}
}
// appendIf appends to a if s is not an empty string.
func appendIf(s string, a *[]string) {
if s != "" {
*a = append(*a, s)
}
}
// Name returns the PKIX name for the request.
func (cr *CertificateRequest) Name() pkix.Name {
var name pkix.Name
name.CommonName = cr.CN
for _, n := range cr.Names {
appendIf(n.C, &name.Country)
appendIf(n.ST, &name.Province)
appendIf(n.L, &name.Locality)
appendIf(n.O, &name.Organization)
appendIf(n.OU, &name.OrganizationalUnit)
}
return name
}
// ParseRequest takes a certificate request and generates a key and
// CSR from it. It does no validation -- caveat emptor. It will,
// however, fail if the key request is not valid (i.e., an unsupported
// curve or RSA key size). The lack of validation was specifically
// chosen to allow the end user to define a policy and validate the
// request appropriately before calling this function.
func ParseRequest(req *CertificateRequest) (csr, key []byte, err error) {
log.Info("received CSR")
if req.KeyRequest == nil {
req.KeyRequest = NewBasicKeyRequest()
}
log.Infof("generating key: %s-%d", req.KeyRequest.Algo(), req.KeyRequest.Size())
priv, err := req.KeyRequest.Generate()
if err != nil {
err = cferr.Wrap(cferr.PrivateKeyError, cferr.GenerationFailed, err)
return
}
switch priv := priv.(type) {
case *rsa.PrivateKey:
key = x509.MarshalPKCS1PrivateKey(priv)
block := pem.Block{
Type: "RSA PRIVATE KEY",
Bytes: key,
}
key = pem.EncodeToMemory(&block)
case *ecdsa.PrivateKey:
key, err = x509.MarshalECPrivateKey(priv)
if err != nil {
err = cferr.Wrap(cferr.PrivateKeyError, cferr.Unknown, err)
return
}
block := pem.Block{
Type: "EC PRIVATE KEY",
Bytes: key,
}
key = pem.EncodeToMemory(&block)
default:
panic("Generate should have failed to produce a valid key.")
}
var tpl = x509.CertificateRequest{
Subject: req.Name(),
SignatureAlgorithm: req.KeyRequest.SigAlgo(),
}
for i := range req.Hosts {
if ip := net.ParseIP(req.Hosts[i]); ip != nil {
tpl.IPAddresses = append(tpl.IPAddresses, ip)
} else if email, err := mail.ParseAddress(req.Hosts[i]); err == nil && email != nil {
tpl.EmailAddresses = append(tpl.EmailAddresses, req.Hosts[i])
} else {
tpl.DNSNames = append(tpl.DNSNames, req.Hosts[i])
}
}
csr, err = x509.CreateCertificateRequest(rand.Reader, &tpl, priv)
if err != nil {
log.Errorf("failed to generate a CSR: %v", err)
err = cferr.Wrap(cferr.CSRError, cferr.BadRequest, err)
return
}
block := pem.Block{
Type: "CERTIFICATE REQUEST",
Bytes: csr,
}
log.Info("encoded CSR")
csr = pem.EncodeToMemory(&block)
return
}
// ExtractCertificateRequest extracts a CertificateRequest from
// x509.Certificate. It is aimed to used for generating a new certificate
// from an existing certificate. For a root certificate, the CA expiry
// length is calculated as the duration between cert.NotAfter and cert.NotBefore.
func ExtractCertificateRequest(cert *x509.Certificate) *CertificateRequest {
req := New()
req.CN = cert.Subject.CommonName
req.Names = getNames(cert.Subject)
req.Hosts = getHosts(cert)
if cert.IsCA {
req.CA = new(CAConfig)
// CA expiry length is calculated based on the input cert
// issue date and expiry date.
req.CA.Expiry = cert.NotAfter.Sub(cert.NotBefore).String()
req.CA.PathLength = cert.MaxPathLen
}
return req
}
func getHosts(cert *x509.Certificate) []string {
var hosts []string
for _, ip := range cert.IPAddresses {
hosts = append(hosts, ip.String())
}
for _, dns := range cert.DNSNames {
hosts = append(hosts, dns)
}
for _, email := range cert.EmailAddresses {
hosts = append(hosts, email)
}
return hosts
}
// getNames returns an array of Names from the certificate
// It onnly cares about Country, Organization, OrganizationalUnit, Locality, Province
func getNames(sub pkix.Name) []Name {
// anonymous func for finding the max of a list of interger
max := func(v1 int, vn ...int) (max int) {
max = v1
for i := 0; i < len(vn); i++ {
if vn[i] > max {
max = vn[i]
}
}
return max
}
nc := len(sub.Country)
norg := len(sub.Organization)
nou := len(sub.OrganizationalUnit)
nl := len(sub.Locality)
np := len(sub.Province)
n := max(nc, norg, nou, nl, np)
names := make([]Name, n)
for i := range names {
if i < nc {
names[i].C = sub.Country[i]
}
if i < norg {
names[i].O = sub.Organization[i]
}
if i < nou {
names[i].OU = sub.OrganizationalUnit[i]
}
if i < nl {
names[i].L = sub.Locality[i]
}
if i < np {
names[i].ST = sub.Province[i]
}
}
return names
}
// A Generator is responsible for validating certificate requests.
type Generator struct {
Validator func(*CertificateRequest) error
}
// ProcessRequest validates and processes the incoming request. It is
// a wrapper around a validator and the ParseRequest function.
func (g *Generator) ProcessRequest(req *CertificateRequest) (csr, key []byte, err error) {
log.Info("generate received request")
err = g.Validator(req)
if err != nil {
log.Warningf("invalid request: %v", err)
return
}
csr, key, err = ParseRequest(req)
if err != nil {
return nil, nil, err
}
return
}
// IsNameEmpty returns true if the name has no identifying information in it.
func IsNameEmpty(n Name) bool {
empty := func(s string) bool { return strings.TrimSpace(s) == "" }
if empty(n.C) && empty(n.ST) && empty(n.L) && empty(n.O) && empty(n.OU) {
return true
}
return false
}
// Regenerate uses the provided CSR as a template for signing a new
// CSR using priv.
func Regenerate(priv crypto.Signer, csr []byte) ([]byte, error) {
req, extra, err := helpers.ParseCSR(csr)
if err != nil {
return nil, err
} else if len(extra) > 0 {
return nil, errors.New("csr: trailing data in certificate request")
}
return x509.CreateCertificateRequest(rand.Reader, req, priv)
}
// Generate creates a new CSR from a CertificateRequest structure and
// an existing key. The KeyRequest field is ignored.
func Generate(priv crypto.Signer, req *CertificateRequest) (csr []byte, err error) {
sigAlgo := helpers.SignerAlgo(priv, crypto.SHA256)
if sigAlgo == x509.UnknownSignatureAlgorithm {
return nil, cferr.New(cferr.PrivateKeyError, cferr.Unavailable)
}
var tpl = x509.CertificateRequest{
Subject: req.Name(),
SignatureAlgorithm: sigAlgo,
}
for i := range req.Hosts {
if ip := net.ParseIP(req.Hosts[i]); ip != nil {
tpl.IPAddresses = append(tpl.IPAddresses, ip)
} else if email, err := mail.ParseAddress(req.Hosts[i]); err == nil && email != nil {
tpl.EmailAddresses = append(tpl.EmailAddresses, email.Address)
} else {
tpl.DNSNames = append(tpl.DNSNames, req.Hosts[i])
}
}
csr, err = x509.CreateCertificateRequest(rand.Reader, &tpl, priv)
if err != nil {
log.Errorf("failed to generate a CSR: %v", err)
err = cferr.Wrap(cferr.CSRError, cferr.BadRequest, err)
return
}
block := pem.Block{
Type: "CERTIFICATE REQUEST",
Bytes: csr,
}
log.Info("encoded CSR")
csr = pem.EncodeToMemory(&block)
return
}
| rolandshoemaker/cfssl | csr/csr.go | GO | bsd-2-clause | 10,628 |
cask 'gitahead' do
version '2.6.1'
sha256 'a4dbbbd7c72c34acfdcad94d5217dfba00a16c64440d3a2f155a937a94d87fff'
url "https://github.com/gitahead/gitahead/releases/download/v#{version}/GitAhead-#{version}.dmg"
appcast 'https://github.com/gitahead/gitahead/releases.atom'
name 'GitAhead'
homepage 'https://github.com/gitahead/gitahead'
depends_on macos: '>= :sierra'
app 'GitAhead.app'
end
| troyxmccall/homebrew-cask | Casks/gitahead.rb | Ruby | bsd-2-clause | 404 |
<?php
/**
* PHPUnit
*
* Copyright (c) 2002-2009, Sebastian Bergmann <sb@sebastian-bergmann.de>.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of Sebastian Bergmann nor the names of his
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
* @category Testing
* @package PHPUnit
* @author Sebastian Bergmann <sb@sebastian-bergmann.de>
* @copyright 2002-2009 Sebastian Bergmann <sb@sebastian-bergmann.de>
* @license http://www.opensource.org/licenses/bsd-license.php BSD License
* @version SVN: $Id: AllTests.php 4404 2008-12-31 09:27:18Z sb $
* @link http://www.phpunit.de/
* @since File available since Release 3.2.0
*/
error_reporting(E_ALL | E_STRICT);
require_once 'PHPUnit/Util/Filter.php';
PHPUnit_Util_Filter::addFileToFilter(__FILE__);
require_once 'PHPUnit/Framework/TestSuite.php';
require_once dirname(__FILE__) . DIRECTORY_SEPARATOR . 'DataSet' . DIRECTORY_SEPARATOR . 'AllTests.php';
require_once dirname(__FILE__) . DIRECTORY_SEPARATOR . 'Operation' . DIRECTORY_SEPARATOR . 'AllTests.php';
PHPUnit_Util_Filter::$filterPHPUnit = FALSE;
/**
*
*
* @category Testing
* @package PHPUnit
* @author Sebastian Bergmann <sb@sebastian-bergmann.de>
* @copyright 2002-2009 Sebastian Bergmann <sb@sebastian-bergmann.de>
* @license http://www.opensource.org/licenses/bsd-license.php BSD License
* @version Release: @package_version@
* @link http://www.phpunit.de/
* @since Class available since Release 3.2.0
*/
class Extensions_Database_AllTests
{
public static function suite()
{
$suite = new PHPUnit_Framework_TestSuite('PHPUnit_Extensions_Database');
$suite->addTest(Extensions_Database_Operation_AllTests::suite());
$suite->addTest(Extensions_Database_DataSet_AllTests::suite());
return $suite;
}
}
?>
| rogeriopradoj/frapi-without-vhost | tests/phpunit/PHPUnit/Tests/Extensions/Database/AllTests.php | PHP | bsd-2-clause | 3,257 |
/**
* Copyright 2013-2015, Facebook, Inc.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree. An additional grant
* of patent rights can be found in the PATENTS file in the same directory.
*
* @emails oncall+relay
*/
'use strict';
require('configureForRelayOSS');
const Relay = require('Relay');
const RelayTestUtils = require('RelayTestUtils');
const generateRQLFieldAlias = require('generateRQLFieldAlias');
const transformRelayQueryPayload = require('transformRelayQueryPayload');
describe('transformClientPayload()', () => {
var {getNode} = RelayTestUtils;
it('transforms singular root payloads', () => {
var query = getNode(Relay.QL`
query {
node(id: "123") {
friends(first:"1") {
count,
edges {
node {
id,
... on User {
profilePicture(size: "32") {
uri,
},
},
},
},
},
}
}
`);
var payload = {
node: {
id: '123',
friends: {
count: 1,
edges: [
{
cursor: 'friend:cursor',
node: {
id: 'client:1',
profilePicture: {
uri: 'friend.jpg',
},
},
},
],
},
},
};
expect(transformRelayQueryPayload(query, payload)).toEqual({
node: {
__typename: undefined,
id: '123',
[generateRQLFieldAlias('friends.first(1)')]: {
count: 1,
edges: [
{
cursor: 'friend:cursor',
node: {
id: 'client:1',
[generateRQLFieldAlias('profilePicture.size(32)')]: {
uri: 'friend.jpg',
},
},
},
],
pageInfo: undefined,
},
},
});
});
it('transforms plural root payloads of arrays', () => {
var query = getNode(Relay.QL`
query {
nodes(ids: ["123", "456"]) {
... on User {
profilePicture(size: "32") {
uri,
},
},
},
}
`);
var payload = {
123: {
id: '123',
profilePicture: {
uri: '123.jpg',
},
},
456: {
id: '456',
profilePicture: {
uri: '456.jpg',
},
},
};
expect(transformRelayQueryPayload(query, payload)).toEqual({
123: {
__typename: undefined,
id: '123',
[generateRQLFieldAlias('profilePicture.size(32)')]: {
uri: '123.jpg',
},
},
456: {
__typename: undefined,
id: '456',
[generateRQLFieldAlias('profilePicture.size(32)')]: {
uri: '456.jpg',
},
},
});
});
it('transforms plural root payloads of objects (OSS)', () => {
var query = getNode(Relay.QL`
query {
nodes(ids: ["123", "456"]) {
... on User {
profilePicture(size: "32") {
uri,
},
},
},
}
`);
var payload = [
{
id: '123',
profilePicture: {
uri: '123.jpg',
},
},
{
id: '456',
profilePicture: {
uri: '456.jpg',
},
},
];
expect(transformRelayQueryPayload(query, payload)).toEqual([
{
__typename: undefined,
id: '123',
[generateRQLFieldAlias('profilePicture.size(32)')]: {
uri: '123.jpg',
},
},
{
__typename: undefined,
id: '456',
[generateRQLFieldAlias('profilePicture.size(32)')]: {
uri: '456.jpg',
},
},
]);
});
it('transforms plural root payloads of objects (FB)', () => {
var query = getNode(Relay.QL`
query {
nodes(ids: ["123", "456"]) {
... on User {
profilePicture(size: "32") {
uri,
},
},
},
}
`);
var payload = {
nodes: [
{
id: '123',
profilePicture: {
uri: '123.jpg',
},
},
{
id: '456',
profilePicture: {
uri: '456.jpg',
},
},
],
};
expect(transformRelayQueryPayload(query, payload)).toEqual({
nodes: [
{
__typename: undefined,
id: '123',
[generateRQLFieldAlias('profilePicture.size(32)')]: {
uri: '123.jpg',
},
},
{
__typename: undefined,
id: '456',
[generateRQLFieldAlias('profilePicture.size(32)')]: {
uri: '456.jpg',
},
},
],
});
});
});
| Aweary/relay | src/tools/__tests__/transformRelayQueryPayload-test.js | JavaScript | bsd-3-clause | 4,969 |
import glob
import os
import sys
from jedi.evaluate.site import addsitedir
from jedi._compatibility import exec_function, unicode
from jedi.parser import tree
from jedi.parser import ParserWithRecovery
from jedi.evaluate.cache import memoize_default
from jedi import debug
from jedi import common
from jedi.evaluate.compiled import CompiledObject
from jedi.parser.utils import load_parser, save_parser
def get_venv_path(venv):
"""Get sys.path for specified virtual environment."""
sys_path = _get_venv_path_dirs(venv)
with common.ignored(ValueError):
sys_path.remove('')
sys_path = _get_sys_path_with_egglinks(sys_path)
# As of now, get_venv_path_dirs does not scan built-in pythonpath and
# user-local site-packages, let's approximate them using path from Jedi
# interpreter.
return sys_path + sys.path
def _get_sys_path_with_egglinks(sys_path):
"""Find all paths including those referenced by egg-links.
Egg-link-referenced directories are inserted into path immediately before
the directory on which their links were found. Such directories are not
taken into consideration by normal import mechanism, but they are traversed
when doing pkg_resources.require.
"""
result = []
for p in sys_path:
# pkg_resources does not define a specific order for egg-link files
# using os.listdir to enumerate them, we're sorting them to have
# reproducible tests.
for egg_link in sorted(glob.glob(os.path.join(p, '*.egg-link'))):
with open(egg_link) as fd:
for line in fd:
line = line.strip()
if line:
result.append(os.path.join(p, line))
# pkg_resources package only interprets the first
# non-empty line in egg-link files.
break
result.append(p)
return result
def _get_venv_path_dirs(venv):
"""Get sys.path for venv without starting up the interpreter."""
venv = os.path.abspath(venv)
sitedir = _get_venv_sitepackages(venv)
sys_path = []
addsitedir(sys_path, sitedir)
return sys_path
def _get_venv_sitepackages(venv):
if os.name == 'nt':
p = os.path.join(venv, 'lib', 'site-packages')
else:
p = os.path.join(venv, 'lib', 'python%d.%d' % sys.version_info[:2],
'site-packages')
return p
def _execute_code(module_path, code):
c = "import os; from os.path import *; result=%s"
variables = {'__file__': module_path}
try:
exec_function(c % code, variables)
except Exception:
debug.warning('sys.path manipulation detected, but failed to evaluate.')
else:
try:
res = variables['result']
if isinstance(res, str):
return [os.path.abspath(res)]
except KeyError:
pass
return []
def _paths_from_assignment(module_context, expr_stmt):
"""
Extracts the assigned strings from an assignment that looks as follows::
>>> sys.path[0:0] = ['module/path', 'another/module/path']
This function is in general pretty tolerant (and therefore 'buggy').
However, it's not a big issue usually to add more paths to Jedi's sys_path,
because it will only affect Jedi in very random situations and by adding
more paths than necessary, it usually benefits the general user.
"""
for assignee, operator in zip(expr_stmt.children[::2], expr_stmt.children[1::2]):
try:
assert operator in ['=', '+=']
assert assignee.type in ('power', 'atom_expr') and \
len(assignee.children) > 1
c = assignee.children
assert c[0].type == 'name' and c[0].value == 'sys'
trailer = c[1]
assert trailer.children[0] == '.' and trailer.children[1].value == 'path'
# TODO Essentially we're not checking details on sys.path
# manipulation. Both assigment of the sys.path and changing/adding
# parts of the sys.path are the same: They get added to the current
# sys.path.
"""
execution = c[2]
assert execution.children[0] == '['
subscript = execution.children[1]
assert subscript.type == 'subscript'
assert ':' in subscript.children
"""
except AssertionError:
continue
from jedi.evaluate.iterable import py__iter__
from jedi.evaluate.precedence import is_string
types = module_context.create_context(expr_stmt).eval_node(expr_stmt)
for lazy_context in py__iter__(module_context.evaluator, types, expr_stmt):
for context in lazy_context.infer():
if is_string(context):
yield context.obj
def _paths_from_list_modifications(module_path, trailer1, trailer2):
""" extract the path from either "sys.path.append" or "sys.path.insert" """
# Guarantee that both are trailers, the first one a name and the second one
# a function execution with at least one param.
if not (trailer1.type == 'trailer' and trailer1.children[0] == '.'
and trailer2.type == 'trailer' and trailer2.children[0] == '('
and len(trailer2.children) == 3):
return []
name = trailer1.children[1].value
if name not in ['insert', 'append']:
return []
arg = trailer2.children[1]
if name == 'insert' and len(arg.children) in (3, 4): # Possible trailing comma.
arg = arg.children[2]
return _execute_code(module_path, arg.get_code())
def _check_module(module_context):
"""
Detect sys.path modifications within module.
"""
def get_sys_path_powers(names):
for name in names:
power = name.parent.parent
if power.type in ('power', 'atom_expr'):
c = power.children
if isinstance(c[0], tree.Name) and c[0].value == 'sys' \
and c[1].type == 'trailer':
n = c[1].children[1]
if isinstance(n, tree.Name) and n.value == 'path':
yield name, power
sys_path = list(module_context.evaluator.sys_path) # copy
if isinstance(module_context, CompiledObject):
return sys_path
try:
possible_names = module_context.tree_node.used_names['path']
except KeyError:
# module.used_names is MergedNamesDict whose getitem never throws
# keyerror, this is superfluous.
pass
else:
for name, power in get_sys_path_powers(possible_names):
stmt = name.get_definition()
if len(power.children) >= 4:
sys_path.extend(
_paths_from_list_modifications(
module_context.py__file__(), *power.children[2:4]
)
)
elif name.get_definition().type == 'expr_stmt':
sys_path.extend(_paths_from_assignment(module_context, stmt))
return sys_path
@memoize_default(evaluator_is_first_arg=True, default=[])
def sys_path_with_modifications(evaluator, module_context):
path = module_context.py__file__()
if path is None:
# Support for modules without a path is bad, therefore return the
# normal path.
return list(evaluator.sys_path)
curdir = os.path.abspath(os.curdir)
#TODO why do we need a chdir?
with common.ignored(OSError):
os.chdir(os.path.dirname(path))
buildout_script_paths = set()
result = _check_module(module_context)
result += _detect_django_path(path)
for buildout_script in _get_buildout_scripts(path):
for path in _get_paths_from_buildout_script(evaluator, buildout_script):
buildout_script_paths.add(path)
# cleanup, back to old directory
os.chdir(curdir)
return list(result) + list(buildout_script_paths)
def _get_paths_from_buildout_script(evaluator, buildout_script):
def load(buildout_script):
try:
with open(buildout_script, 'rb') as f:
source = common.source_to_unicode(f.read())
except IOError:
debug.dbg('Error trying to read buildout_script: %s', buildout_script)
return
p = ParserWithRecovery(evaluator.grammar, source, buildout_script)
save_parser(buildout_script, p)
return p.module
cached = load_parser(buildout_script)
module_node = cached and cached.module or load(buildout_script)
if module_node is None:
return
from jedi.evaluate.representation import ModuleContext
for path in _check_module(ModuleContext(evaluator, module_node)):
yield path
def traverse_parents(path):
while True:
new = os.path.dirname(path)
if new == path:
return
path = new
yield path
def _get_parent_dir_with_file(path, filename):
for parent in traverse_parents(path):
if os.path.isfile(os.path.join(parent, filename)):
return parent
return None
def _detect_django_path(module_path):
""" Detects the path of the very well known Django library (if used) """
result = []
for parent in traverse_parents(module_path):
with common.ignored(IOError):
with open(parent + os.path.sep + 'manage.py'):
debug.dbg('Found django path: %s', module_path)
result.append(parent)
return result
def _get_buildout_scripts(module_path):
"""
if there is a 'buildout.cfg' file in one of the parent directories of the
given module it will return a list of all files in the buildout bin
directory that look like python files.
:param module_path: absolute path to the module.
:type module_path: str
"""
project_root = _get_parent_dir_with_file(module_path, 'buildout.cfg')
if not project_root:
return []
bin_path = os.path.join(project_root, 'bin')
if not os.path.exists(bin_path):
return []
extra_module_paths = []
for filename in os.listdir(bin_path):
try:
filepath = os.path.join(bin_path, filename)
with open(filepath, 'r') as f:
firstline = f.readline()
if firstline.startswith('#!') and 'python' in firstline:
extra_module_paths.append(filepath)
except (UnicodeDecodeError, IOError) as e:
# Probably a binary file; permission error or race cond. because file got deleted
# ignore
debug.warning(unicode(e))
continue
return extra_module_paths
| tequa/ammisoft | ammimain/WinPython-64bit-2.7.13.1Zero/python-2.7.13.amd64/Lib/site-packages/jedi/evaluate/sys_path.py | Python | bsd-3-clause | 10,686 |
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "ui/base/ui_base_switches.h"
namespace switches {
// Disables use of DWM composition for top level windows.
const char kDisableDwmComposition[] = "disable-dwm-composition";
// Disables the new visual style for application dialogs and controls.
const char kDisableNewDialogStyle[] = "disable-new-dialog-style";
// Disables touch adjustment.
const char kDisableTouchAdjustment[] = "disable-touch-adjustment";
// Disables touch event based drag and drop.
const char kDisableTouchDragDrop[] = "disable-touch-drag-drop";
// Disables controls that support touch base text editing.
const char kDisableTouchEditing[] = "disable-touch-editing";
// Disables the Views textfield on Windows.
const char kDisableViewsTextfield[] = "disable-views-textfield";
// Enables the new visual style for application dialogs and controls.
const char kEnableNewDialogStyle[] = "enable-new-dialog-style";
// Enable scroll prediction for scroll update events.
const char kEnableScrollPrediction[] = "enable-scroll-prediction";
// Enables touch event based drag and drop.
const char kEnableTouchDragDrop[] = "enable-touch-drag-drop";
// Enables controls that support touch base text editing.
const char kEnableTouchEditing[] = "enable-touch-editing";
// Enables the Views textfield on Windows.
const char kEnableViewsTextfield[] = "enable-views-textfield";
// Enables/Disables High DPI support (windows)
const char kHighDPISupport[] = "high-dpi-support";
// Overrides the device scale factor for the browser UI and the contents.
const char kForceDeviceScaleFactor[] = "force-device-scale-factor";
// If a resource is requested at a scale factor at which it is not available
// or the resource is the incorrect size (based on the size of the 1x resource),
// generates the missing resource and applies a red mask to the generated
// resource. Resources for which hidpi is not supported because of software
// reasons will show up pixelated.
const char kHighlightMissingScaledResources[] =
"highlight-missing-scaled-resources";
// The language file that we want to try to open. Of the form
// language[-country] where language is the 2 letter code from ISO-639.
const char kLang[] = "lang";
// Load the locale resources from the given path. When running on Mac/Unix the
// path should point to a locale.pak file.
const char kLocalePak[] = "locale_pak";
// Disable ui::MessageBox. This is useful when running as part of scripts that
// do not have a user interface.
const char kNoMessageBox[] = "no-message-box";
// Enable support for touch events.
const char kTouchEvents[] = "touch-events";
// The values the kTouchEvents switch may have, as in --touch-events=disabled.
// auto: enabled at startup when an attached touchscreen is present.
const char kTouchEventsAuto[] = "auto";
// enabled: touch events always enabled.
const char kTouchEventsEnabled[] = "enabled";
// disabled: touch events are disabled.
const char kTouchEventsDisabled[] = "disabled";
// Enables UI changes that make it easier to use with a touchscreen.
// WARNING: Do not check this flag directly when deciding what UI to draw,
// instead you must call ui::GetDisplayLayout
const char kTouchOptimizedUI[] = "touch-optimized-ui";
// The values the kTouchOptimizedUI switch may have, as in
// "--touch-optimized-ui=disabled".
// auto: Enabled on monitors which have touchscreen support (default).
const char kTouchOptimizedUIAuto[] = "auto";
// enabled: always optimized for touch (even if no touch support).
const char kTouchOptimizedUIEnabled[] = "enabled";
// disabled: never optimized for touch.
const char kTouchOptimizedUIDisabled[] = "disabled";
#if defined(USE_XI2_MT)
// The calibration factors given as "<left>,<right>,<top>,<bottom>".
const char kTouchCalibration[] = "touch-calibration";
#endif
#if defined(OS_MACOSX)
// Disables support for Core Animation plugins. This is triggered when
// accelerated compositing is disabled. See http://crbug.com/122430 .
const char kDisableCoreAnimationPlugins[] =
"disable-core-animation-plugins";
#endif
#if defined(TOOLKIT_VIEWS) && defined(OS_LINUX)
// Tells chrome to interpret events from these devices as touch events. Only
// available with XInput 2 (i.e. X server 1.8 or above). The id's of the
// devices can be retrieved from 'xinput list'.
const char kTouchDevices[] = "touch-devices";
#endif
} // namespace switches
| pozdnyakov/chromium-crosswalk | ui/base/ui_base_switches.cc | C++ | bsd-3-clause | 4,544 |
from __future__ import absolute_import
from django.conf.urls import include, url
from django.conf import settings
from django.conf.urls.static import static
from . import views
from . import settings as wooey_settings
wooey_patterns = [
url(r'^jobs/command$', views.celery_task_command, name='celery_task_command'),
url(r'^jobs/queue/global/json$', views.global_queue_json, name='global_queue_json'),
url(r'^jobs/queue/user/json$', views.user_queue_json, name='user_queue_json'),
url(r'^jobs/results/user/json$', views.user_results_json, name='user_results_json'),
url(r'^jobs/queue/all/json', views.all_queues_json, name='all_queues_json'),
url(r'^jobs/queue/global', views.GlobalQueueView.as_view(), name='global_queue'),
url(r'^jobs/queue/user', views.UserQueueView.as_view(), name='user_queue'),
url(r'^jobs/results/user', views.UserResultsView.as_view(), name='user_results'),
url(r'^jobs/(?P<job_id>[0-9\-]+)/$', views.JobView.as_view(), name='celery_results'),
url(r'^jobs/(?P<job_id>[0-9\-]+)/json$', views.JobJSON.as_view(), name='celery_results_json'),
# Global public access via uuid
url(r'^jobs/(?P<uuid>[a-f0-9]{8}-[a-f0-9]{4}-4[a-f0-9]{3}-[89aAbB][a-f0-9]{3}-[a-f0-9]{12})/$', views.JobView.as_view(), name='celery_results_uuid'),
url(r'^jobs/(?P<uuid>[a-f0-9]{8}-[a-f0-9]{4}-4[a-f0-9]{3}-[89aAbB][a-f0-9]{3}-[a-f0-9]{12})/json$', views.JobJSON.as_view(), name='celery_results_json_uuid'),
url(r'^scripts/(?P<slug>[a-zA-Z0-9\-\_]+)/$', views.WooeyScriptView.as_view(), name='wooey_script'),
url(r'^scripts/(?P<slug>[a-zA-Z0-9\-\_]+)/version/(?P<script_version>[A-Za-z\.0-9]+)$', views.WooeyScriptView.as_view(), name='wooey_script'),
url(r'^scripts/(?P<slug>[a-zA-Z0-9\-\_]+)/version/(?P<script_version>[A-Za-z\.0-9]+)/iteration/(?P<script_iteration>\d+)$', views.WooeyScriptView.as_view(), name='wooey_script'),
url(r'^scripts/(?P<slug>[a-zA-Z0-9\-\_]+)/jobs/(?P<job_id>[a-zA-Z0-9\-]+)$', views.WooeyScriptView.as_view(), name='wooey_script_clone'),
url(r'^scripts/(?P<slug>[a-zA-Z0-9\-\_]+)/$', views.WooeyScriptJSON.as_view(), name='wooey_script_json'),
url(r'^scripts/search/json$', views.WooeyScriptSearchJSON.as_view(), name='wooey_search_script_json'),
url(r'^scripts/search/jsonhtml$', views.WooeyScriptSearchJSONHTML.as_view(), name='wooey_search_script_jsonhtml'),
url(r'^profile/$', views.WooeyProfileView.as_view(), name='profile_home'),
url(r'^profile/(?P<username>[a-zA-Z0-9\-]+)$', views.WooeyProfileView.as_view(), name='profile'),
url(r'^$', views.WooeyHomeView.as_view(), name='wooey_home'),
url(r'^$', views.WooeyHomeView.as_view(), name='wooey_job_launcher'),
url('^{}'.format(wooey_settings.WOOEY_LOGIN_URL.lstrip('/')), views.wooey_login, name='wooey_login'),
url('^{}'.format(wooey_settings.WOOEY_REGISTER_URL.lstrip('/')), views.WooeyRegister.as_view(), name='wooey_register'),
url(r'^favorite/toggle$', views.toggle_favorite, name='toggle_favorite'),
url(r'^scrapbook$', views.WooeyScrapbookView.as_view(), name='scrapbook'),
]
urlpatterns = [
url('^', include(wooey_patterns, namespace='wooey')),
url('^', include('django.contrib.auth.urls')),
]
| alexkolar/Wooey | wooey/urls.py | Python | bsd-3-clause | 3,225 |
/*
* Copyright (c) 1999-2015, Ecole des Mines de Nantes
* All rights reserved.
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the Ecole des Mines de Nantes nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE REGENTS AND CONTRIBUTORS BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
package org.chocosolver.solver.search;
import org.chocosolver.memory.copy.EnvironmentCopying;
import org.chocosolver.solver.ResolutionPolicy;
import org.chocosolver.solver.Solver;
import org.chocosolver.solver.constraints.IntConstraintFactory;
import org.chocosolver.solver.search.loop.monitors.IMonitorSolution;
import org.chocosolver.solver.variables.IntVar;
import org.chocosolver.solver.variables.VariableFactory;
import org.testng.Assert;
import org.testng.annotations.Test;
/**
* Created by cprudhom on 18/02/15.
* Project: choco.
*/
public class ParetoTest {
//******************************************************************
// CP VARIABLES
//******************************************************************
// --- CP Solver
private Solver s;
// --- Decision variables
private IntVar[] occurrences;
// --- Cumulated profit_1 of selected items
private IntVar totalProfit_1;
// --- Cumulated profit_2 of selected items
private IntVar totalProfit_2;
// --- Cumulated weight of selected items
private IntVar totalWeight;
//******************************************************************
// DATA
//******************************************************************
// --- Capacity of the knapsack
private final int capacity;
// --- Maximal profit_1 of the knapsack
private int maxProfit_1;
// --- Maximal profit_2 of the knapsack
private int maxProfit_2;
// --- Number of items in each category
private final int[] nbItems;
// --- Weight of items in each category
private final int[] weights;
// --- Profit_1 of items in each category
private final int[] profits_1;
// --- Profit_2 of items in each category
private final int[] profits_2;
//******************************************************************
// CONSTRUCTOR
//******************************************************************
public ParetoTest(final int capacity, final String... items) {
this.capacity = capacity;
this.nbItems = new int[items.length];
this.weights = new int[items.length];
this.profits_1 = new int[items.length];
this.profits_2 = new int[items.length];
this.maxProfit_1 = 0;
this.maxProfit_2 = 0;
for (int it = 0; it < items.length; it++) {
String item = items[it];
item = item.trim();
final String[] itemData = item.split(";");
this.nbItems[it] = Integer.parseInt(itemData[0]);
this.weights[it] = Integer.parseInt(itemData[1]);
this.profits_1[it] = Integer.parseInt(itemData[2]);
this.profits_2[it] = Integer.parseInt(itemData[3]);
this.maxProfit_1 += this.nbItems[it] * this.profits_1[it];
this.maxProfit_2 += this.nbItems[it] * this.profits_2[it];
}
}
//******************************************************************
// METHODS
//******************************************************************
private void createSolver() {
// --- Creates a solver
s = new Solver(new EnvironmentCopying(), "Knapsack");
}
private void buildModel() {
createVariables();
postConstraints();
}
private void createVariables() {
// --- Creates decision variables
occurrences = new IntVar[nbItems.length];
for (int i = 0; i < nbItems.length; i++) {
occurrences[i] = VariableFactory.bounded("occurrences_" + i, 0, nbItems[i], s);
}
totalWeight = VariableFactory.bounded("totalWeight", 0, capacity, s);
totalProfit_1 = VariableFactory.bounded("totalProfit_1", 0, maxProfit_1, s);
totalProfit_2 = VariableFactory.bounded("totalProfit_2", 0, maxProfit_2, s);
}
private void postConstraints() {
// --- Posts a knapsack constraint on profit_1
s.post(IntConstraintFactory.knapsack(occurrences, totalWeight, totalProfit_1, weights, profits_1));
// --- Posts a knapsack constraint on profit_2
s.post(IntConstraintFactory.knapsack(occurrences, totalWeight, totalProfit_2, weights, profits_2));
}
static int bestProfit1 = 0;
private void solve() {
// --- Solves the problem
s.plugMonitor((IMonitorSolution) () -> bestProfit1 = Math.max(bestProfit1, totalProfit_1.getValue()));
// Chatterbox.showSolutions(s);
s.findParetoFront(ResolutionPolicy.MAXIMIZE, totalProfit_1, totalProfit_2);
}
//******************************************************************
// MAIN
//******************************************************************
@Test
public static void main() {
ParetoTest instance = new ParetoTest(30, "10;1;2;5", "5;3;7;4", "2;5;11;3");
instance.createSolver();
instance.buildModel();
instance.solve();
Assert.assertTrue(bestProfit1 > 60);
}
}
| piyushsh/choco3 | choco-solver/src/test/java/org/chocosolver/solver/search/ParetoTest.java | Java | bsd-3-clause | 6,565 |
<?php
/*
* Phake - Mocking Framework
*
* Copyright (c) 2010-2012, Mike Lively <m@digitalsandwich.com>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of Mike Lively nor the names of his
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
* @category Testing
* @package Phake
* @author Mike Lively <m@digitalsandwich.com>
* @copyright 2010 Mike Lively <m@digitalsandwich.com>
* @license http://www.opensource.org/licenses/bsd-license.php BSD License
* @link http://www.digitalsandwich.com/
*/
/**
* Allows providing an exception to throw to a stubbed method call.
*
* @author Brian Feaver <brian.feaver@gmail.com>
*/
class Phake_Stubber_Answers_ExceptionAnswer implements Phake_Stubber_IAnswer
{
/**
* @var mixed
*/
private $answer;
/**
* @param mixed $answer
*/
public function __construct(Exception $answer)
{
$this->answer = $answer;
}
public function getAnswerCallback($context, $method)
{
$answer = $this->answer;
return function () use ($answer) {
throw $answer;
};
}
public function processAnswer($answer)
{
}
}
| kore/Phake | src/Phake/Stubber/Answers/ExceptionAnswer.php | PHP | bsd-3-clause | 2,595 |
/*
* Copyright 2001-2005 Internet2
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* File: SAMLArtifactType0002.java
*
*/
package gov.nih.nci.cagrid.opensaml.artifact;
import gov.nih.nci.cagrid.opensaml.SAMLConfig;
import gov.nih.nci.cagrid.opensaml.artifact.Artifact;
import gov.nih.nci.cagrid.opensaml.artifact.ArtifactParseException;
import gov.nih.nci.cagrid.opensaml.artifact.SAMLArtifact;
import gov.nih.nci.cagrid.opensaml.artifact.Util;
import java.io.UnsupportedEncodingException;
import org.apache.commons.codec.binary.Base64;
/**
* <p>This class implements a type 0x0002 artifact as
* specified by SAML V1.1.</p>
*
* <pre>TypeCode := 0x0002
*RemainingArtifact := AssertionHandle SourceLocation
*AssertionHandle := 20-byte_sequence
*SourceLocation := URI</pre>
*
* <p>Since the URI is arbitrary, a type 0x0002
* artifact is of indeterminate size.</p>
*
* <p>The <code>AssertionHandle</code> is a sequence
* of random bytes that points to an
* authentication assertion at the IdP.</p>
*
* <p>Before the artifact is base64-encoded, the URI
* is converted to a sequence of bytes based on UTF-8.
* While parsing an encoded artifact, this encoding
* process is reversed.</p>
*
* @author Tom Scavo
*/
public class SAMLArtifactType0002 extends SAMLArtifact {
/**
* The type code of this <code>Artifact</code> object.
*/
public static final Artifact.TypeCode TYPE_CODE =
new TypeCode( (byte) 0x00, (byte) 0x02 );
/**
* This constructor initializes the
* <code>remainingArtifact</code> property by calling
* the corresponding constructor of this implementation
* of <code>Artifact.RemainingArtifact</code>.
* <p>
* This constructor throws an (unchecked)
* <code>NullArgumentException</code> if its argument is null.
*
* @param sourceLocation the desired source location
* of this <code>SAMLArtifactType0002</code> object
*
* @see SAMLArtifactType0002.RemainingArtifact
* @see gov.nih.nci.cagrid.opensaml.artifact.NullArgumentException
*/
public SAMLArtifactType0002( URI sourceLocation ) {
checkNullArg( sourceLocation );
this.typeCode = TYPE_CODE;
this.remainingArtifact = new RemainingArtifact( sourceLocation );
}
/**
* This constructor initializes the
* <code>remainingArtifact</code> property by calling
* the corresponding constructor of this implementation
* of <code>Artifact.RemainingArtifact</code>.
* <p>
* This constructor throws a <code>NullArgumentException</code>
* or <code>InvalidArgumentException</code> if any of its
* arguments are null or invalid, respectively.
* These exceptions are unchecked.
*
* @param assertionHandle the desired assertion handle
* of this <code>SAMLArtifactType0002</code> object
*
* @param sourceLocation the desired source location
* of this <code>SAMLArtifactType0002</code> object
*
* @see SAMLArtifactType0002.RemainingArtifact
* @see gov.nih.nci.cagrid.opensaml.artifact.NullArgumentException
* @see gov.nih.nci.cagrid.opensaml.artifact.InvalidArgumentException
*/
public SAMLArtifactType0002( byte[] assertionHandle, URI sourceLocation ) {
checkHandleArg( assertionHandle );
checkNullArg( sourceLocation );
this.typeCode = TYPE_CODE;
this.remainingArtifact =
new RemainingArtifact( assertionHandle, sourceLocation );
}
/**
* This constructor initializes the
* <code>remainingArtifact</code> property to the
* given value.
* <p>
* This constructor throws an (unchecked)
* <code>NullArgumentException</code> if its argument is null.
*
* @param remainingArtifact the desired value of
* the <code>remainingArtifact</code> property
* of this <code>SAMLArtifactType0002</code> object
*
* @see SAMLArtifactType0002.RemainingArtifact
* @see gov.nih.nci.cagrid.opensaml.artifact.NullArgumentException
*/
public SAMLArtifactType0002( Artifact.RemainingArtifact remainingArtifact ) {
checkNullArg( remainingArtifact );
this.typeCode = TYPE_CODE;
this.remainingArtifact = remainingArtifact;
}
/**
* A convenience method that returns the
* <code>assertionHandle</code> property of this implementation
* of <code>Artifact.RemainingArtifact</code>.
*
* @return the <code>assertionHandle</code> property
*
* @see SAMLArtifactType0002.RemainingArtifact
*/
public byte[] getAssertionHandle() {
return ((RemainingArtifact) this.remainingArtifact).getAssertionHandle();
}
/**
* A convenience method that returns the
* <code>sourceLocation</code> property of this implementation
* of <code>Artifact.RemainingArtifact</code>.
*
* @return the <code>sourceLocation</code> property
*
* @see SAMLArtifactType0002.RemainingArtifact
*/
public URI getSourceLocation() {
return ((RemainingArtifact) this.remainingArtifact).getSourceLocation();
}
/**
* An implementation of <code>Artifact.RemainingArtifact</code>
* for type 0x0002 artifacts (via extension of
* <code>SAMLArtifact.RemainingArtifact</code>).
* This class defines two properties
* (<code>assertionHandle</code> and <code>sourceLocation</code>).
*/
public static final class RemainingArtifact
extends SAMLArtifact.RemainingArtifact {
private byte[] assertionHandle;
private URI sourceLocation;
private byte[] sourceLocationBytes;
/**
* This constructor initializes the <code>sourceLocation</code>
* property of this <code>RemainingArtifact</code>
* object to the given value. The <code>assertionHandle</code>
* property is initialized to a sequence of random bytes.
*
* @param sourceLocation a source location
*/
public RemainingArtifact( URI sourceLocation ) {
byte[] assertionHandle = SAMLConfig.instance().getDefaultIDProvider().generateRandomBytes( HANDLE_LENGTH );
RemainingArtifact ra;
ra = new RemainingArtifact( assertionHandle, sourceLocation );
this.assertionHandle = ra.assertionHandle;
this.sourceLocation = ra.sourceLocation;
this.sourceLocationBytes = ra.sourceLocationBytes;
}
/**
* This constructor initializes the properties
* of this <code>RemainingArtifact</code>
* object to the given values.
* <p>
* This constructor throws a <code>NullArgumentException</code>
* or <code>InvalidArgumentException</code> if any of its
* arguments are null or invalid, respectively.
* These exceptions are unchecked.
*
* @param assertionHandle an assertion handle
*
* @param sourceLocation a source location
*
* @see gov.nih.nci.cagrid.opensaml.artifact.NullArgumentException
* @see gov.nih.nci.cagrid.opensaml.artifact.InvalidArgumentException
*/
public RemainingArtifact( byte[] assertionHandle, URI sourceLocation ) {
checkHandleArg( assertionHandle );
checkNullArg( sourceLocation );
this.assertionHandle = assertionHandle;
this.sourceLocation = sourceLocation;
this.sourceLocationBytes = sourceLocation.toBytes();
}
/**
* Get the <code>assertionHandle</code> property of this
* <code>Artifact.RemainingArtifact</code> object.
*
* return the <code>assertionHandle</code> property
*/
public byte[] getAssertionHandle() { return this.assertionHandle; }
/**
* Get the <code>sourceLocation</code> property of this
* <code>Artifact.RemainingArtifact</code> object.
*
* return the <code>sourceLocation</code> property
*/
public URI getSourceLocation() { return this.sourceLocation; }
public int size() {
return this.assertionHandle.length + this.sourceLocationBytes.length;
}
public byte[] getBytes() {
byte[] bytes0 = this.assertionHandle;
byte[] bytes1 = this.sourceLocationBytes;
return Util.concat( bytes0, bytes1 );
}
public int hashCode() {
return this.assertionHandle.hashCode() &
this.sourceLocationBytes.hashCode();
}
}
/**
* An implementation of <code>Artifact.Parser</code>
* for type 0x0002 artifacts.
*/
public static final class Parser implements Artifact.Parser {
/**
* Parse the given encoded string.
*
* @param s the encoded string
*
* @return an artifact that may be cast to type
* <code>SAMLArtifactType0002</code>
*
* @exception gov.nih.nci.cagrid.opensaml.artifact.ArtifactParseException
* if the length of the decoded string is
* less than the minimum length, or the
* type code is incorrect, or
* the tail portion of the parsed string
* is not a valid URI
*
* @see org.apache.commons.codec.binary.Base64
*/
public Artifact parse( String s ) throws ArtifactParseException {
// check total length:
byte[] bytes = Base64.decodeBase64( s.getBytes() );
int minLength = 2 + HANDLE_LENGTH;
if ( bytes.length < minLength ) {
throw new ArtifactParseException( bytes.length, minLength );
}
// check type code:
TypeCode typeCode =
new TypeCode( bytes[0], bytes[1] );
if ( ! typeCode.equals( TYPE_CODE ) ) {
throw new ArtifactParseException( typeCode, TYPE_CODE );
}
// extract the assertion handle:
byte[] assertionHandle = new byte[ HANDLE_LENGTH ];
System.arraycopy( bytes, 2, assertionHandle, 0, HANDLE_LENGTH );
// extract the remaining bytes:
int length = bytes.length - minLength;
byte[] remainingBytes = new byte[ length ];
System.arraycopy( bytes, minLength, remainingBytes, 0, length );
// convert the remaining bytes to a string:
URI uri;
try {
uri = new URI( remainingBytes, "UTF-8" );
}
catch (UnsupportedEncodingException e) {
throw new ArtifactParseException("UTF-8 unsupported string format, can not create artifact URI");
}
return new SAMLArtifactType0002( assertionHandle, uri );
}
}
}
| NCIP/cagrid | cagrid/Software/core/caGrid/projects/opensaml/src/gov/nih/nci/cagrid/opensaml/artifact/SAMLArtifactType0002.java | Java | bsd-3-clause | 10,727 |
<?xml version="1.0" encoding="utf-8"?>
<!DOCTYPE TS>
<TS version="2.1">
<context>
<name>FancySwitcher</name>
<message>
<source>Alt+Left</source>
<translation>Alt+Left</translation>
</message>
<message>
<source>Alt+Right</source>
<translation>Alt+Right</translation>
</message>
<message>
<source>Alt+Up</source>
<translation>Alt+Up</translation>
</message>
<message>
<source>Alt+Down</source>
<translation>Alt+Down</translation>
</message>
</context>
<context>
<name>LoginWidget</name>
<message>
<source>Select</source>
<translation>Select</translation>
</message>
<message>
<source>Select an alternate user and clear the password field</source>
<translation>Select an alternate user and clear the password field</translation>
</message>
<message>
<source>Select this user</source>
<translation>Select this user</translation>
</message>
<message>
<source>Login</source>
<translation>Login</translation>
</message>
<message>
<source>Login to the system with the current user and password</source>
<translation>Login to the system with the current user and password</translation>
</message>
<message>
<source>Password</source>
<translation>Password</translation>
</message>
<message>
<source>Hold to view the currently entered password</source>
<translation>Hold to view the currently entered password</translation>
</message>
<message>
<source>Login password for the selected user</source>
<translation>Login password for the selected user</translation>
</message>
<message>
<source>Available users</source>
<translation>Available users</translation>
</message>
<message>
<source>Login to %1</source>
<translation>Login to %1</translation>
</message>
<message>
<source>Available desktop environments</source>
<translation>Available desktop environments</translation>
</message>
<message>
<source>Please connect your PersonaCrypt device to start login procedures.</source>
<translation type="unfinished"></translation>
</message>
<message>
<source>Stealth Session</source>
<translation type="unfinished"></translation>
</message>
<message>
<source>Use a temporary home directory which is deleted on log out)</source>
<translation type="unfinished"></translation>
</message>
<message>
<source>Refresh</source>
<translation type="unfinished"></translation>
</message>
<message>
<source>Refresh available users</source>
<translation type="unfinished"></translation>
</message>
<message>
<source>Device encryption key</source>
<translation type="unfinished"></translation>
</message>
<message>
<source>Device encryption key (personacrypt users only)</source>
<translation type="unfinished"></translation>
</message>
</context>
<context>
<name>PCDMgui</name>
<message>
<source>Virtual Keyboard</source>
<translation>Virtual Keyboard</translation>
</message>
<message>
<source>Locale</source>
<translation>Locale</translation>
</message>
<message>
<source>Keyboard Layout</source>
<translation>Keyboard Layout</translation>
</message>
<message>
<source>Change Keyboard Layout</source>
<translation>Change Keyboard Layout</translation>
</message>
<message>
<source>System</source>
<translation>System</translation>
</message>
<message>
<source>Tip: Make sure that caps-lock is turned off.</source>
<translation>Tip: Make sure that caps-lock is turned off.</translation>
</message>
<message>
<source>Restart</source>
<translation>Restart</translation>
</message>
<message>
<source>Shut Down</source>
<translation>Shut Down</translation>
</message>
<message>
<source>Close PCDM</source>
<translation>Close PCDM</translation>
</message>
<message>
<source>Shutdown the computer</source>
<translation>Shutdown the computer</translation>
</message>
<message>
<source>Invalid Username/Password</source>
<translation>Invalid Username/Password</translation>
</message>
<message>
<source>Username/Password combination is invalid, please try again.</source>
<translation>Username/Password combination is invalid, please try again.</translation>
</message>
<message>
<source>System Shutdown</source>
<translation>System Shutdown</translation>
</message>
<message>
<source>You are about to shut down the system.</source>
<translation>You are about to shut down the system.</translation>
</message>
<message>
<source>Are you sure?</source>
<translation>Are you sure?</translation>
</message>
<message>
<source>System Restart</source>
<translation>System Restart</translation>
</message>
<message>
<source>You are about to restart the system.</source>
<translation>You are about to restart the system.</translation>
</message>
<message>
<source>Change locale (%1)</source>
<translation type="unfinished"></translation>
</message>
<message>
<source>Change DPI</source>
<translation type="unfinished"></translation>
</message>
<message>
<source>High (4K)</source>
<translation type="unfinished"></translation>
</message>
<message>
<source>Medium</source>
<translation type="unfinished"></translation>
</message>
<message>
<source>Standard</source>
<translation type="unfinished"></translation>
</message>
<message>
<source>Low </source>
<translation type="unfinished"></translation>
</message>
<message>
<source>Refresh PCDM</source>
<translation type="unfinished"></translation>
</message>
</context>
<context>
<name>loginDelay</name>
<message>
<source>Dialog</source>
<translation>Dialog</translation>
</message>
<message>
<source>%v/%m seconds</source>
<translation>%v/%m seconds</translation>
</message>
<message>
<source>Cancel Login</source>
<translation>Cancel Login</translation>
</message>
<message>
<source>Login Now</source>
<translation>Login Now</translation>
</message>
<message>
<source>PCDM Automatic Login</source>
<translation type="unfinished"></translation>
</message>
</context>
<context>
<name>widgetKeyboard</name>
<message>
<source>Keyboard Settings</source>
<translation>Keyboard Settings</translation>
</message>
<message>
<source>Key Layout</source>
<translation>Key Layout</translation>
</message>
<message>
<source>Variant</source>
<translation>Variant</translation>
</message>
<message>
<source>Keyboard Model</source>
<translation>Keyboard Model</translation>
</message>
<message>
<source>( you may type into the space below to test your selected settings. )</source>
<translation>( you may type into the space below to test your selected settings. )</translation>
</message>
<message>
<source>&Apply</source>
<translation>&Apply</translation>
</message>
<message>
<source>&Close</source>
<translation>&Close</translation>
</message>
</context>
<context>
<name>widgetLocale</name>
<message>
<source>Select Locale</source>
<translation>Select Locale</translation>
</message>
<message>
<source>Current Locale</source>
<translation>Current Locale</translation>
</message>
<message>
<source>Apply</source>
<translation>Apply</translation>
</message>
<message>
<source>Cancel</source>
<translation>Cancel</translation>
</message>
</context>
</TS>
| trueos/pcdm | src-qt5/PCDM/i18n/PCDM_en_ZA.ts | TypeScript | bsd-3-clause | 8,374 |
<?php
use yii\helpers\Html;
use yii\widgets\DetailView;
/* @var $this yii\web\View */
/* @var $model frontend\models\Penjualan */
$this->title = $model->idpenjualan;
$this->params['breadcrumbs'][] = ['label' => 'Penjualans', 'url' => ['index']];
$this->params['breadcrumbs'][] = $this->title;
?>
<div class="penjualan-view">
<h1><?= Html::encode($this->title) ?></h1>
<p>
<?= Html::a('Update', ['update', 'id' => $model->idpenjualan], ['class' => 'btn btn-primary']) ?>
<?= Html::a('Delete', ['delete', 'id' => $model->idpenjualan], [
'class' => 'btn btn-danger',
'data' => [
'confirm' => 'Are you sure you want to delete this item?',
'method' => 'post',
],
]) ?>
</p>
<?= DetailView::widget([
'model' => $model,
'attributes' => [
'idpenjualan',
'tglpenjualan',
'jmlbarang',
'ttlbayar',
'idpelanggan',
],
]) ?>
</div>
| programerjakarta/tokojaya | frontend/views/transaksi/view.php | PHP | bsd-3-clause | 1,021 |
import _forEachInstanceProperty from "@babel/runtime-corejs3/core-js/instance/for-each";
import _Object$getOwnPropertyDescriptor from "@babel/runtime-corejs3/core-js/object/get-own-property-descriptor";
import _filterInstanceProperty from "@babel/runtime-corejs3/core-js/instance/filter";
import _concatInstanceProperty from "@babel/runtime-corejs3/core-js/instance/concat";
import _Object$getOwnPropertySymbols from "@babel/runtime-corejs3/core-js/object/get-own-property-symbols";
import _Object$keys from "@babel/runtime-corejs3/core-js/object/keys";
import defineProperty from "@babel/runtime-corejs3/helpers/esm/defineProperty";
export default function _objectSpread(target) {
for (var i = 1; i < arguments.length; i++) {
var source = arguments[i] != null ? Object(arguments[i]) : {};
var ownKeys = _Object$keys(source);
if (typeof _Object$getOwnPropertySymbols === 'function') {
var _context;
ownKeys = _concatInstanceProperty(ownKeys).call(ownKeys, _filterInstanceProperty(_context = _Object$getOwnPropertySymbols(source)).call(_context, function (sym) {
return _Object$getOwnPropertyDescriptor(source, sym).enumerable;
}));
}
_forEachInstanceProperty(ownKeys).call(ownKeys, function (key) {
defineProperty(target, key, source[key]);
});
}
return target;
} | ChromeDevTools/devtools-frontend | node_modules/@babel/runtime-corejs3/helpers/esm/objectSpread.js | JavaScript | bsd-3-clause | 1,330 |
<?php
use yii\widgets\Menu;
/* @var $this yii\web\View */
/* @var $items array */
?>
<div class="panel panel-default">
<div class="panel-heading">Категории</div>
<?= Menu::widget([
'options' => ['class' => 'nav nav-pills nav-stacked'],
'items' => $items,
'submenuTemplate' => "\n<ul class='nav nav-pills nav-stacked'>\n{items}\n</ul>\n"
]); ?>
</div>
| peskovsb/shopgit | widgets/views/categories.php | PHP | bsd-3-clause | 404 |
# -*- coding:utf-8 -*-
#--
# Copyright (c) 2012-2014 Net-ng.
# All rights reserved.
#
# This software is licensed under the BSD License, as described in
# the file LICENSE.txt, which you should have received as part of
# this distribution.
#--
from nagare import log
import pkg_resources
from ..assetsmanager import AssetsManager
class DummyAssetsManager(AssetsManager):
def __init__(self):
super(DummyAssetsManager, self).__init__('', None)
def save(self, data, file_id=None, metadata={}):
log.debug("Save Image")
log.debug("%s" % metadata)
return 'mock_id'
def load(self, file_id):
log.debug("Load Image")
package = pkg_resources.Requirement.parse('kansha')
fname = pkg_resources.resource_filename(package, 'kansha/services/dummyassetsmanager/tie.jpg')
with open(fname, 'r') as f:
data = f.read()
return data, {}
def update_metadata(self, file_id, metadata):
pass
def get_metadata(self, file_id):
pass
| bcroq/kansha | kansha/services/dummyassetsmanager/dummyassetsmanager.py | Python | bsd-3-clause | 1,033 |
from __future__ import absolute_import
import logging
from rest_framework import serializers
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from uuid import uuid4
from sentry.api.base import Endpoint, SessionAuthentication
from sentry.api.exceptions import ResourceDoesNotExist
from sentry.api.serializers import serialize
from sentry.api.serializers.rest_framework import ListField
from sentry.models import ApiApplication, ApiApplicationStatus
from sentry.tasks.deletion import delete_api_application
delete_logger = logging.getLogger("sentry.deletions.api")
class ApiApplicationSerializer(serializers.Serializer):
name = serializers.CharField(max_length=64)
redirectUris = ListField(child=serializers.URLField(max_length=255), required=False)
allowedOrigins = ListField(
# TODO(dcramer): make this validate origins
child=serializers.CharField(max_length=255),
required=False,
)
homepageUrl = serializers.URLField(
max_length=255, required=False, allow_null=True, allow_blank=True
)
termsUrl = serializers.URLField(
max_length=255, required=False, allow_null=True, allow_blank=True
)
privacyUrl = serializers.URLField(
max_length=255, required=False, allow_null=True, allow_blank=True
)
class ApiApplicationDetailsEndpoint(Endpoint):
authentication_classes = (SessionAuthentication,)
permission_classes = (IsAuthenticated,)
def get(self, request, app_id):
try:
instance = ApiApplication.objects.get(
owner=request.user, client_id=app_id, status=ApiApplicationStatus.active
)
except ApiApplication.DoesNotExist:
raise ResourceDoesNotExist
return Response(serialize(instance, request.user))
def put(self, request, app_id):
try:
instance = ApiApplication.objects.get(
owner=request.user, client_id=app_id, status=ApiApplicationStatus.active
)
except ApiApplication.DoesNotExist:
raise ResourceDoesNotExist
serializer = ApiApplicationSerializer(data=request.data, partial=True)
if serializer.is_valid():
result = serializer.validated_data
kwargs = {}
if "name" in result:
kwargs["name"] = result["name"]
if "allowedOrigins" in result:
kwargs["allowed_origins"] = "\n".join(result["allowedOrigins"])
if "redirectUris" in result:
kwargs["redirect_uris"] = "\n".join(result["redirectUris"])
if "homepageUrl" in result:
kwargs["homepage_url"] = result["homepageUrl"]
if "privacyUrl" in result:
kwargs["privacy_url"] = result["privacyUrl"]
if "termsUrl" in result:
kwargs["terms_url"] = result["termsUrl"]
if kwargs:
instance.update(**kwargs)
return Response(serialize(instance, request.user), status=200)
return Response(serializer.errors, status=400)
def delete(self, request, app_id):
try:
instance = ApiApplication.objects.get(
owner=request.user, client_id=app_id, status=ApiApplicationStatus.active
)
except ApiApplication.DoesNotExist:
raise ResourceDoesNotExist
updated = ApiApplication.objects.filter(id=instance.id).update(
status=ApiApplicationStatus.pending_deletion
)
if updated:
transaction_id = uuid4().hex
delete_api_application.apply_async(
kwargs={"object_id": instance.id, "transaction_id": transaction_id}, countdown=3600
)
delete_logger.info(
"object.delete.queued",
extra={
"object_id": instance.id,
"transaction_id": transaction_id,
"model": type(instance).__name__,
},
)
return Response(status=204)
| mvaled/sentry | src/sentry/api/endpoints/api_application_details.py | Python | bsd-3-clause | 4,091 |
from django.contrib.admin.views.decorators import staff_member_required
from django.shortcuts import get_object_or_404
from pdfdocument.utils import pdf_response
import plata
import plata.reporting.product
import plata.reporting.order
@staff_member_required
def product_xls(request):
"""
Returns an XLS containing product information
"""
return plata.reporting.product.product_xls().to_response('products.xlsx')
@staff_member_required
def invoice_pdf(request, order_id):
"""
Returns the invoice PDF
"""
order = get_object_or_404(plata.shop_instance().order_model, pk=order_id)
pdf, response = pdf_response('invoice-%09d' % order.id)
plata.reporting.order.invoice_pdf(pdf, order)
return response
@staff_member_required
def packing_slip_pdf(request, order_id):
"""
Returns the packing slip PDF
"""
order = get_object_or_404(plata.shop_instance().order_model, pk=order_id)
pdf, response = pdf_response('packing-slip-%09d' % order.id)
plata.reporting.order.packing_slip_pdf(pdf, order)
return response
| ixc/plata | plata/reporting/views.py | Python | bsd-3-clause | 1,080 |
/*
* Copyright (C) 2007-2011 Google Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "bindings/core/v8/V8CSSStyleDeclaration.h"
#include "bindings/core/v8/ExceptionState.h"
#include "bindings/core/v8/V8Binding.h"
#include "core/CSSPropertyNames.h"
#include "core/css/CSSPrimitiveValue.h"
#include "core/css/CSSPropertyMetadata.h"
#include "core/css/CSSStyleDeclaration.h"
#include "core/css/CSSValue.h"
#include "core/css/parser/CSSParser.h"
#include "core/events/EventTarget.h"
#include "wtf/ASCIICType.h"
#include "wtf/PassRefPtr.h"
#include "wtf/RefPtr.h"
#include "wtf/StdLibExtras.h"
#include "wtf/Vector.h"
#include "wtf/text/StringBuilder.h"
#include "wtf/text/StringConcatenate.h"
#include <algorithm>
using namespace WTF;
namespace blink {
// Check for a CSS prefix.
// Passed prefix is all lowercase.
// First character of the prefix within the property name may be upper or lowercase.
// Other characters in the prefix within the property name must be lowercase.
// The prefix within the property name must be followed by a capital letter.
static bool hasCSSPropertyNamePrefix(const String& propertyName, const char* prefix)
{
#if ENABLE(ASSERT)
ASSERT(*prefix);
for (const char* p = prefix; *p; ++p)
ASSERT(isASCIILower(*p));
ASSERT(propertyName.length());
#endif
if (toASCIILower(propertyName[0]) != prefix[0])
return false;
unsigned length = propertyName.length();
for (unsigned i = 1; i < length; ++i) {
if (!prefix[i])
return isASCIIUpper(propertyName[i]);
if (propertyName[i] != prefix[i])
return false;
}
return false;
}
static CSSPropertyID parseCSSPropertyID(const String& propertyName)
{
unsigned length = propertyName.length();
if (!length)
return CSSPropertyInvalid;
StringBuilder builder;
builder.reserveCapacity(length);
unsigned i = 0;
bool hasSeenDash = false;
if (hasCSSPropertyNamePrefix(propertyName, "webkit"))
builder.append('-');
else if (isASCIIUpper(propertyName[0]))
return CSSPropertyInvalid;
bool hasSeenUpper = isASCIIUpper(propertyName[i]);
builder.append(toASCIILower(propertyName[i++]));
for (; i < length; ++i) {
UChar c = propertyName[i];
if (!isASCIIUpper(c)) {
if (c == '-')
hasSeenDash = true;
builder.append(c);
} else {
hasSeenUpper = true;
builder.append('-');
builder.append(toASCIILower(c));
}
}
// Reject names containing both dashes and upper-case characters, such as "border-rightColor".
if (hasSeenDash && hasSeenUpper)
return CSSPropertyInvalid;
String propName = builder.toString();
return unresolvedCSSPropertyID(propName);
}
// When getting properties on CSSStyleDeclarations, the name used from
// Javascript and the actual name of the property are not the same, so
// we have to do the following translation. The translation turns upper
// case characters into lower case characters and inserts dashes to
// separate words.
//
// Example: 'backgroundPositionY' -> 'background-position-y'
//
// Also, certain prefixes such as 'css-' are stripped.
static CSSPropertyID cssPropertyInfo(v8::Local<v8::String> v8PropertyName)
{
String propertyName = toCoreString(v8PropertyName);
typedef HashMap<String, CSSPropertyID> CSSPropertyIDMap;
DEFINE_STATIC_LOCAL(CSSPropertyIDMap, map, ());
CSSPropertyIDMap::iterator iter = map.find(propertyName);
if (iter != map.end())
return iter->value;
CSSPropertyID unresolvedProperty = parseCSSPropertyID(propertyName);
map.add(propertyName, unresolvedProperty);
ASSERT(!unresolvedProperty || CSSPropertyMetadata::isEnabledProperty(unresolvedProperty));
return unresolvedProperty;
}
void V8CSSStyleDeclaration::namedPropertyEnumeratorCustom(const v8::PropertyCallbackInfo<v8::Array>& info)
{
typedef Vector<String, numCSSProperties - 1> PreAllocatedPropertyVector;
DEFINE_STATIC_LOCAL(PreAllocatedPropertyVector, propertyNames, ());
static unsigned propertyNamesLength = 0;
if (propertyNames.isEmpty()) {
for (int id = firstCSSProperty; id <= lastCSSProperty; ++id) {
CSSPropertyID propertyId = static_cast<CSSPropertyID>(id);
if (CSSPropertyMetadata::isEnabledProperty(propertyId))
propertyNames.append(getJSPropertyName(propertyId));
}
std::sort(propertyNames.begin(), propertyNames.end(), codePointCompareLessThan);
propertyNamesLength = propertyNames.size();
}
v8::Local<v8::Context> context = info.GetIsolate()->GetCurrentContext();
v8::Local<v8::Array> properties = v8::Array::New(info.GetIsolate(), propertyNamesLength);
for (unsigned i = 0; i < propertyNamesLength; ++i) {
String key = propertyNames.at(i);
ASSERT(!key.isNull());
if (!v8CallBoolean(properties->CreateDataProperty(context, i, v8String(info.GetIsolate(), key))))
return;
}
v8SetReturnValue(info, properties);
}
void V8CSSStyleDeclaration::namedPropertyQueryCustom(v8::Local<v8::Name> v8Name, const v8::PropertyCallbackInfo<v8::Integer>& info)
{
// NOTE: cssPropertyInfo lookups incur several mallocs.
// Successful lookups have the same cost the first time, but are cached.
if (cssPropertyInfo(v8Name.As<v8::String>())) {
v8SetReturnValueInt(info, 0);
return;
}
}
void V8CSSStyleDeclaration::namedPropertyGetterCustom(v8::Local<v8::Name> name, const v8::PropertyCallbackInfo<v8::Value>& info)
{
// Search the style declaration.
CSSPropertyID unresolvedProperty = cssPropertyInfo(name.As<v8::String>());
// Do not handle non-property names.
if (!unresolvedProperty)
return;
CSSPropertyID resolvedProperty = resolveCSSPropertyID(unresolvedProperty);
CSSStyleDeclaration* impl = V8CSSStyleDeclaration::toImpl(info.Holder());
// TODO(leviw): This API doesn't support custom properties.
CSSValue* cssValue = impl->getPropertyCSSValueInternal(resolvedProperty);
if (cssValue) {
v8SetReturnValueStringOrNull(info, cssValue->cssText(), info.GetIsolate());
return;
}
String result = impl->getPropertyValueInternal(resolvedProperty);
v8SetReturnValueString(info, result, info.GetIsolate());
}
void V8CSSStyleDeclaration::namedPropertySetterCustom(v8::Local<v8::Name> name, v8::Local<v8::Value> value, const v8::PropertyCallbackInfo<v8::Value>& info)
{
CSSStyleDeclaration* impl = V8CSSStyleDeclaration::toImpl(info.Holder());
CSSPropertyID unresolvedProperty = cssPropertyInfo(name.As<v8::String>());
if (!unresolvedProperty)
return;
TOSTRING_VOID(V8StringResource<TreatNullAsNullString>, propertyValue, value);
ExceptionState exceptionState(ExceptionState::SetterContext, getPropertyName(resolveCSSPropertyID(unresolvedProperty)), "CSSStyleDeclaration", info.Holder(), info.GetIsolate());
// TODO(leviw): This API doesn't support custom properties.
impl->setPropertyInternal(unresolvedProperty, String(), propertyValue, false, exceptionState);
if (exceptionState.throwIfNeeded())
return;
v8SetReturnValue(info, value);
}
} // namespace blink
| axinging/chromium-crosswalk | third_party/WebKit/Source/bindings/core/v8/custom/V8CSSStyleDeclarationCustom.cpp | C++ | bsd-3-clause | 8,762 |
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/message_loop.h"
#include "base/shared_memory.h"
#include "content/common/media/video_capture_messages.h"
#include "content/renderer/media/video_capture_message_filter.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace {
class MockVideoCaptureDelegate : public VideoCaptureMessageFilter::Delegate {
public:
MockVideoCaptureDelegate() {
Reset();
device_id_received_ = false;
device_id_ = 0;
}
virtual void OnBufferCreated(base::SharedMemoryHandle handle,
int length, int buffer_id) {
buffer_created_ = true;
handle_ = handle;
}
// Called when a video frame buffer is received from the browser process.
virtual void OnBufferReceived(int buffer_id, base::Time timestamp) {
buffer_received_ = true;
buffer_id_ = buffer_id;
timestamp_ = timestamp;
}
virtual void OnStateChanged(video_capture::State state) {
state_changed_received_ = true;
state_ = state;
}
virtual void OnDeviceInfoReceived(const media::VideoCaptureParams& params) {
device_info_received_ = true;
params_.width = params.width;
params_.height = params.height;
params_.frame_per_second = params.frame_per_second;
}
virtual void OnDelegateAdded(int32 device_id) {
device_id_received_ = true;
device_id_ = device_id;
}
void Reset() {
buffer_created_ = false;
handle_ = base::SharedMemory::NULLHandle();
buffer_received_ = false;
buffer_id_ = -1;
timestamp_ = base::Time();
state_changed_received_ = false;
state_ = video_capture::kError;
device_info_received_ = false;
params_.width = 0;
params_.height = 0;
params_.frame_per_second = 0;
}
bool buffer_created() { return buffer_created_; }
base::SharedMemoryHandle received_buffer_handle() { return handle_; }
bool buffer_received() { return buffer_received_; }
int received_buffer_id() { return buffer_id_; }
base::Time received_buffer_ts() { return timestamp_; }
bool state_changed_received() { return state_changed_received_; }
video_capture::State state() { return state_; }
bool device_info_receive() { return device_info_received_; }
const media::VideoCaptureParams& received_device_info() { return params_; }
int32 device_id() { return device_id_; }
private:
bool buffer_created_;
base::SharedMemoryHandle handle_;
bool buffer_received_;
int buffer_id_;
base::Time timestamp_;
bool state_changed_received_;
video_capture::State state_;
bool device_info_received_;
media::VideoCaptureParams params_;
bool device_id_received_;
int32 device_id_;
DISALLOW_COPY_AND_ASSIGN(MockVideoCaptureDelegate);
};
} // namespace
TEST(VideoCaptureMessageFilterTest, Basic) {
MessageLoop message_loop(MessageLoop::TYPE_IO);
scoped_refptr<VideoCaptureMessageFilter> filter(
new VideoCaptureMessageFilter());
filter->channel_ = reinterpret_cast<IPC::Channel*>(1);
MockVideoCaptureDelegate delegate;
filter->AddDelegate(&delegate);
// VideoCaptureMsg_StateChanged
EXPECT_FALSE(delegate.state_changed_received());
filter->OnMessageReceived(
VideoCaptureMsg_StateChanged(delegate.device_id(),
video_capture::kStarted));
EXPECT_TRUE(delegate.state_changed_received());
EXPECT_TRUE(video_capture::kStarted == delegate.state());
delegate.Reset();
// VideoCaptureMsg_NewBuffer
const base::SharedMemoryHandle handle =
#if defined(OS_WIN)
reinterpret_cast<base::SharedMemoryHandle>(10);
#else
base::SharedMemoryHandle(10, true);
#endif
EXPECT_FALSE(delegate.buffer_created());
filter->OnMessageReceived(VideoCaptureMsg_NewBuffer(
delegate.device_id(), handle, 1, 1));
EXPECT_TRUE(delegate.buffer_created());
EXPECT_EQ(handle, delegate.received_buffer_handle());
delegate.Reset();
// VideoCaptureMsg_BufferReady
int buffer_id = 1;
base::Time timestamp = base::Time::FromInternalValue(1);
EXPECT_FALSE(delegate.buffer_received());
filter->OnMessageReceived(VideoCaptureMsg_BufferReady(
delegate.device_id(), buffer_id, timestamp));
EXPECT_TRUE(delegate.buffer_received());
EXPECT_EQ(buffer_id, delegate.received_buffer_id());
EXPECT_TRUE(timestamp == delegate.received_buffer_ts());
delegate.Reset();
// VideoCaptureMsg_DeviceInfo
media::VideoCaptureParams params;
params.width = 320;
params.height = 240;
params.frame_per_second = 30;
EXPECT_FALSE(delegate.device_info_receive());
filter->OnMessageReceived(VideoCaptureMsg_DeviceInfo(
delegate.device_id(), params));
EXPECT_TRUE(delegate.device_info_receive());
EXPECT_EQ(params.width, delegate.received_device_info().width);
EXPECT_EQ(params.height, delegate.received_device_info().height);
EXPECT_EQ(params.frame_per_second,
delegate.received_device_info().frame_per_second);
delegate.Reset();
message_loop.RunAllPending();
}
TEST(VideoCaptureMessageFilterTest, Delegates) {
MessageLoop message_loop(MessageLoop::TYPE_IO);
scoped_refptr<VideoCaptureMessageFilter> filter(
new VideoCaptureMessageFilter());
filter->channel_ = reinterpret_cast<IPC::Channel*>(1);
MockVideoCaptureDelegate delegate1;
MockVideoCaptureDelegate delegate2;
filter->AddDelegate(&delegate1);
filter->AddDelegate(&delegate2);
// Send an IPC message. Make sure the correct delegate gets called.
EXPECT_FALSE(delegate1.state_changed_received());
EXPECT_FALSE(delegate2.state_changed_received());
filter->OnMessageReceived(
VideoCaptureMsg_StateChanged(delegate1.device_id(),
video_capture::kStarted));
EXPECT_TRUE(delegate1.state_changed_received());
EXPECT_FALSE(delegate2.state_changed_received());
delegate1.Reset();
EXPECT_FALSE(delegate1.state_changed_received());
EXPECT_FALSE(delegate2.state_changed_received());
filter->OnMessageReceived(
VideoCaptureMsg_StateChanged(delegate2.device_id(),
video_capture::kStarted));
EXPECT_FALSE(delegate1.state_changed_received());
EXPECT_TRUE(delegate2.state_changed_received());
delegate2.Reset();
// Remove the delegates. Make sure they won't get called.
filter->RemoveDelegate(&delegate1);
EXPECT_FALSE(delegate1.state_changed_received());
filter->OnMessageReceived(
VideoCaptureMsg_StateChanged(delegate1.device_id(),
video_capture::kStarted));
EXPECT_FALSE(delegate1.state_changed_received());
filter->RemoveDelegate(&delegate2);
EXPECT_FALSE(delegate2.state_changed_received());
filter->OnMessageReceived(
VideoCaptureMsg_StateChanged(delegate2.device_id(),
video_capture::kStarted));
EXPECT_FALSE(delegate2.state_changed_received());
message_loop.RunAllPending();
}
| Crystalnix/BitPop | content/renderer/media/video_capture_message_filter_unittest.cc | C++ | bsd-3-clause | 6,977 |
// d. ii. If k + 2 is greater than or equal to strLen, throw a URIError exception.
decodeURI('%1');
| daejunpark/jsaf | tests/bug_detector_tests/urierror1.js | JavaScript | bsd-3-clause | 100 |
/*
* Copyright (c) 2008-present The Open Source Geospatial Foundation
*
* Published under the BSD license.
* See https://github.com/geoext/geoext2/blob/master/license.txt for the full
* text of the license.
*/
/** api: example[layeropacityslider]
* Layer Opacity Slider
* --------------------
* Use a slider to control layer opacity.
*/
var panel1, panel2, wms, slider;
Ext.require([
'Ext.container.Viewport',
'Ext.layout.container.Border',
'GeoExt.panel.Map',
'GeoExt.slider.LayerOpacity',
'GeoExt.slider.Tip'
]);
Ext.application({
name: 'LayerOpacitySlider GeoExt2',
launch: function() {
wms = new OpenLayers.Layer.WMS(
"Global Imagery",
"http://maps.opengeo.org/geowebcache/service/wms",
{layers: "bluemarble"}
);
// create a map panel with an embedded slider
panel1 = Ext.create('GeoExt.panel.Map', {
title: "Map 1",
renderTo: "map1-container",
height: 300,
width: 400,
map: {
controls: [new OpenLayers.Control.Navigation()]
},
layers: [wms],
extent: [-5, 35, 15, 55],
items: [{
xtype: "gx_opacityslider",
layer: wms,
vertical: true,
height: 120,
x: 10,
y: 10,
plugins: Ext.create("GeoExt.slider.Tip", {
getText: function(thumb) {
return Ext.String.format('Opacity: {0}%', thumb.value);
}
})
}]
});
// create a separate slider bound to the map but displayed elsewhere
slider = Ext.create('GeoExt.slider.LayerOpacity', {
layer: wms,
aggressive: true,
width: 200,
isFormField: true,
inverse: true,
fieldLabel: "opacity",
renderTo: "slider",
plugins: Ext.create("GeoExt.slider.Tip", {
getText: function(thumb) {
return Ext.String.format('Transparency: {0}%', thumb.value);
}
})
});
var clone = wms.clone();
var wms2 = new OpenLayers.Layer.WMS(
"OpenStreetMap WMS",
"http://ows.terrestris.de/osm/service?",
{layers: 'OSM-WMS'},
{
attribution: '© terrestris GmbH & Co. KG <br>' +
'Data © OpenStreetMap ' +
'<a href="http://www.openstreetmap.org/copyright/en"' +
'target="_blank">contributors<a>'
}
);
panel2 = Ext.create('GeoExt.panel.Map', {
title: "Map 2",
renderTo: "map2-container",
height: 300,
width: 400,
map: {
controls: [new OpenLayers.Control.Navigation()]
},
layers: [wms2, clone],
extent: [-5, 35, 15, 55],
items: [{
xtype: "gx_opacityslider",
layer: clone,
complementaryLayer: wms2,
changeVisibility: true,
aggressive: true,
vertical: true,
height: 120,
x: 10,
y: 10,
plugins: Ext.create("GeoExt.slider.Tip", {
getText: function(thumb) {
return Ext.String.format('{0}%', thumb.value);
}
})
}]
});
}
});
| chrismayer/geoext2 | examples/layeropacityslider/layeropacityslider.js | JavaScript | bsd-3-clause | 3,626 |
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree. An additional grant
// of patent rights can be found in the PATENTS file in the same directory.
#include "db/merge_helper.h"
#include <stdio.h>
#include <string>
#include "db/dbformat.h"
#include "rocksdb/comparator.h"
#include "rocksdb/db.h"
#include "rocksdb/merge_operator.h"
#include "table/internal_iterator.h"
#include "util/perf_context_imp.h"
#include "util/statistics.h"
namespace rocksdb {
Status MergeHelper::TimedFullMerge(const MergeOperator* merge_operator,
const Slice& key, const Slice* value,
const std::vector<Slice>& operands,
std::string* result, Logger* logger,
Statistics* statistics, Env* env,
Slice* result_operand) {
assert(merge_operator != nullptr);
if (operands.size() == 0) {
assert(value != nullptr && result != nullptr);
result->assign(value->data(), value->size());
return Status::OK();
}
bool success;
Slice tmp_result_operand(nullptr, 0);
const MergeOperator::MergeOperationInput merge_in(key, value, operands,
logger);
MergeOperator::MergeOperationOutput merge_out(*result, tmp_result_operand);
{
// Setup to time the merge
StopWatchNano timer(env, statistics != nullptr);
PERF_TIMER_GUARD(merge_operator_time_nanos);
// Do the merge
success = merge_operator->FullMergeV2(merge_in, &merge_out);
if (tmp_result_operand.data()) {
// FullMergeV2 result is an existing operand
if (result_operand != nullptr) {
*result_operand = tmp_result_operand;
} else {
result->assign(tmp_result_operand.data(), tmp_result_operand.size());
}
} else if (result_operand) {
*result_operand = Slice(nullptr, 0);
}
RecordTick(statistics, MERGE_OPERATION_TOTAL_TIME,
statistics ? timer.ElapsedNanos() : 0);
}
if (!success) {
RecordTick(statistics, NUMBER_MERGE_FAILURES);
return Status::Corruption("Error: Could not perform merge.");
}
return Status::OK();
}
// PRE: iter points to the first merge type entry
// POST: iter points to the first entry beyond the merge process (or the end)
// keys_, operands_ are updated to reflect the merge result.
// keys_ stores the list of keys encountered while merging.
// operands_ stores the list of merge operands encountered while merging.
// keys_[i] corresponds to operands_[i] for each i.
Status MergeHelper::MergeUntil(InternalIterator* iter,
RangeDelAggregator* range_del_agg,
const SequenceNumber stop_before,
const bool at_bottom) {
// Get a copy of the internal key, before it's invalidated by iter->Next()
// Also maintain the list of merge operands seen.
assert(HasOperator());
keys_.clear();
merge_context_.Clear();
assert(user_merge_operator_);
bool first_key = true;
// We need to parse the internal key again as the parsed key is
// backed by the internal key!
// Assume no internal key corruption as it has been successfully parsed
// by the caller.
// original_key_is_iter variable is just caching the information:
// original_key_is_iter == (iter->key().ToString() == original_key)
bool original_key_is_iter = true;
std::string original_key = iter->key().ToString();
// Important:
// orig_ikey is backed by original_key if keys_.empty()
// orig_ikey is backed by keys_.back() if !keys_.empty()
ParsedInternalKey orig_ikey;
ParseInternalKey(original_key, &orig_ikey);
Status s;
bool hit_the_next_user_key = false;
for (; iter->Valid(); iter->Next(), original_key_is_iter = false) {
ParsedInternalKey ikey;
assert(keys_.size() == merge_context_.GetNumOperands());
if (!ParseInternalKey(iter->key(), &ikey)) {
// stop at corrupted key
if (assert_valid_internal_key_) {
assert(!"Corrupted internal key not expected.");
return Status::Corruption("Corrupted internal key not expected.");
}
break;
} else if (first_key) {
assert(user_comparator_->Equal(ikey.user_key, orig_ikey.user_key));
first_key = false;
} else if (!user_comparator_->Equal(ikey.user_key, orig_ikey.user_key)) {
// hit a different user key, stop right here
hit_the_next_user_key = true;
break;
} else if (stop_before && ikey.sequence <= stop_before) {
// hit an entry that's visible by the previous snapshot, can't touch that
break;
}
// At this point we are guaranteed that we need to process this key.
assert(IsValueType(ikey.type));
if (ikey.type != kTypeMerge) {
// hit a put/delete/single delete
// => merge the put value or a nullptr with operands_
// => store result in operands_.back() (and update keys_.back())
// => change the entry type to kTypeValue for keys_.back()
// We are done! Success!
// If there are no operands, just return the Status::OK(). That will cause
// the compaction iterator to write out the key we're currently at, which
// is the put/delete we just encountered.
if (keys_.empty()) {
return Status::OK();
}
// TODO(noetzli) If the merge operator returns false, we are currently
// (almost) silently dropping the put/delete. That's probably not what we
// want.
const Slice val = iter->value();
const Slice* val_ptr = (kTypeValue == ikey.type) ? &val : nullptr;
std::string merge_result;
s = TimedFullMerge(user_merge_operator_, ikey.user_key, val_ptr,
merge_context_.GetOperands(), &merge_result, logger_,
stats_, env_);
// We store the result in keys_.back() and operands_.back()
// if nothing went wrong (i.e.: no operand corruption on disk)
if (s.ok()) {
// The original key encountered
original_key = std::move(keys_.back());
orig_ikey.type = kTypeValue;
UpdateInternalKey(&original_key, orig_ikey.sequence, orig_ikey.type);
keys_.clear();
merge_context_.Clear();
keys_.emplace_front(std::move(original_key));
merge_context_.PushOperand(merge_result);
}
// move iter to the next entry
iter->Next();
return s;
} else {
// hit a merge
// => if there is a compaction filter, apply it.
// => check for range tombstones covering the operand
// => merge the operand into the front of the operands_ list
// if not filtered
// => then continue because we haven't yet seen a Put/Delete.
//
// Keep queuing keys and operands until we either meet a put / delete
// request or later did a partial merge.
Slice value_slice = iter->value();
// add an operand to the list if:
// 1) it's included in one of the snapshots. in that case we *must* write
// it out, no matter what compaction filter says
// 2) it's not filtered by a compaction filter
if ((ikey.sequence <= latest_snapshot_ ||
!FilterMerge(orig_ikey.user_key, value_slice)) &&
(range_del_agg == nullptr ||
!range_del_agg->ShouldDelete(iter->key()))) {
if (original_key_is_iter) {
// this is just an optimization that saves us one memcpy
keys_.push_front(std::move(original_key));
} else {
keys_.push_front(iter->key().ToString());
}
if (keys_.size() == 1) {
// we need to re-anchor the orig_ikey because it was anchored by
// original_key before
ParseInternalKey(keys_.back(), &orig_ikey);
}
merge_context_.PushOperand(value_slice,
iter->IsValuePinned() /* operand_pinned */);
}
}
}
if (merge_context_.GetNumOperands() == 0) {
// we filtered out all the merge operands
return Status::OK();
}
// We are sure we have seen this key's entire history if we are at the
// last level and exhausted all internal keys of this user key.
// NOTE: !iter->Valid() does not necessarily mean we hit the
// beginning of a user key, as versions of a user key might be
// split into multiple files (even files on the same level)
// and some files might not be included in the compaction/merge.
//
// There are also cases where we have seen the root of history of this
// key without being sure of it. Then, we simply miss the opportunity
// to combine the keys. Since VersionSet::SetupOtherInputs() always makes
// sure that all merge-operands on the same level get compacted together,
// this will simply lead to these merge operands moving to the next level.
//
// So, we only perform the following logic (to merge all operands together
// without a Put/Delete) if we are certain that we have seen the end of key.
bool surely_seen_the_beginning = hit_the_next_user_key && at_bottom;
if (surely_seen_the_beginning) {
// do a final merge with nullptr as the existing value and say
// bye to the merge type (it's now converted to a Put)
assert(kTypeMerge == orig_ikey.type);
assert(merge_context_.GetNumOperands() >= 1);
assert(merge_context_.GetNumOperands() == keys_.size());
std::string merge_result;
s = TimedFullMerge(user_merge_operator_, orig_ikey.user_key, nullptr,
merge_context_.GetOperands(), &merge_result, logger_,
stats_, env_);
if (s.ok()) {
// The original key encountered
// We are certain that keys_ is not empty here (see assertions couple of
// lines before).
original_key = std::move(keys_.back());
orig_ikey.type = kTypeValue;
UpdateInternalKey(&original_key, orig_ikey.sequence, orig_ikey.type);
keys_.clear();
merge_context_.Clear();
keys_.emplace_front(std::move(original_key));
merge_context_.PushOperand(merge_result);
}
} else {
// We haven't seen the beginning of the key nor a Put/Delete.
// Attempt to use the user's associative merge function to
// merge the stacked merge operands into a single operand.
//
// TODO(noetzli) The docblock of MergeUntil suggests that a successful
// partial merge returns Status::OK(). Should we change the status code
// after a successful partial merge?
s = Status::MergeInProgress();
if (merge_context_.GetNumOperands() >= 2 &&
merge_context_.GetNumOperands() >= min_partial_merge_operands_) {
bool merge_success = false;
std::string merge_result;
{
StopWatchNano timer(env_, stats_ != nullptr);
PERF_TIMER_GUARD(merge_operator_time_nanos);
merge_success = user_merge_operator_->PartialMergeMulti(
orig_ikey.user_key,
std::deque<Slice>(merge_context_.GetOperands().begin(),
merge_context_.GetOperands().end()),
&merge_result, logger_);
RecordTick(stats_, MERGE_OPERATION_TOTAL_TIME,
stats_ ? timer.ElapsedNanosSafe() : 0);
}
if (merge_success) {
// Merging of operands (associative merge) was successful.
// Replace operands with the merge result
merge_context_.Clear();
merge_context_.PushOperand(merge_result);
keys_.erase(keys_.begin(), keys_.end() - 1);
}
}
}
return s;
}
MergeOutputIterator::MergeOutputIterator(const MergeHelper* merge_helper)
: merge_helper_(merge_helper) {
it_keys_ = merge_helper_->keys().rend();
it_values_ = merge_helper_->values().rend();
}
void MergeOutputIterator::SeekToFirst() {
const auto& keys = merge_helper_->keys();
const auto& values = merge_helper_->values();
assert(keys.size() == values.size());
it_keys_ = keys.rbegin();
it_values_ = values.rbegin();
}
void MergeOutputIterator::Next() {
++it_keys_;
++it_values_;
}
bool MergeHelper::FilterMerge(const Slice& user_key, const Slice& value_slice) {
if (compaction_filter_ == nullptr) {
return false;
}
if (stats_ != nullptr) {
filter_timer_.Start();
}
bool to_delete =
compaction_filter_->FilterMergeOperand(level_, user_key, value_slice);
total_filter_time_ += filter_timer_.ElapsedNanosSafe();
return to_delete;
}
} // namespace rocksdb
| tsheasha/rocksdb | db/merge_helper.cc | C++ | bsd-3-clause | 12,636 |
// Copyright 2021 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "content/browser/interest_group/storage_interest_group.h"
#include "content/services/auction_worklet/public/mojom/bidder_worklet.mojom.h"
namespace content {
StorageInterestGroup::StorageInterestGroup() = default;
StorageInterestGroup::StorageInterestGroup(
auction_worklet::mojom::BiddingInterestGroupPtr group) {
this->bidding_group = std::move(group);
}
StorageInterestGroup::StorageInterestGroup(StorageInterestGroup&&) = default;
StorageInterestGroup::~StorageInterestGroup() = default;
std::ostream& operator<<(std::ostream& out,
const StorageInterestGroup::KAnonymityData& kanon) {
return out << "KAnonymityData[key=`" << kanon.key << "`, k=" << kanon.k
<< ", last_updated=`" << kanon.last_updated << "`]";
}
} // namespace content
| scheib/chromium | content/browser/interest_group/storage_interest_group.cc | C++ | bsd-3-clause | 966 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2017-11-29 12:16
from __future__ import unicode_literals
from django.contrib.postgres.operations import BtreeGinExtension
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('product', '0037_auto_20171124_0847'),
]
operations = [
BtreeGinExtension()
]
| UITools/saleor | saleor/product/migrations/0038_auto_20171129_0616.py | Python | bsd-3-clause | 383 |
#ifndef BOOST_SIMD_INCLUDE_FUNCTIONS_SIMD_INSERT_HPP_INCLUDED
#define BOOST_SIMD_INCLUDE_FUNCTIONS_SIMD_INSERT_HPP_INCLUDED
#include <boost/simd/memory/include/functions/simd/insert.hpp>
#endif
| hainm/pythran | third_party/boost/simd/include/functions/simd/insert.hpp | C++ | bsd-3-clause | 196 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2007 Zuza Software Foundation
#
# This file is part of translate.
#
# translate is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# translate is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
"""This module represents the Afrikaans language.
.. seealso:: http://en.wikipedia.org/wiki/Afrikaans_language
"""
import re
from translate.lang import common
articlere = re.compile(r"'n\b")
class af(common.Common):
"""This class represents Afrikaans."""
validdoublewords = [u"u"]
punctuation = u"".join([common.Common.commonpunc, common.Common.quotes,
common.Common.miscpunc])
sentenceend = u".!?…"
sentencere = re.compile(r"""
(?s) # make . also match newlines
.*? # anything, but match non-greedy
[%s] # the puntuation for sentence ending
\s+ # the spacing after the puntuation
(?='n\s[A-Z]|[^'a-z\d]|'[^n])
# lookahead that next part starts with caps or 'n followed by caps
""" % sentenceend, re.VERBOSE
)
specialchars = u"ëïêôûáéíóúý"
def capsstart(cls, text):
"""Modify this for the indefinite article ('n)."""
match = articlere.search(text, 0, 20)
if match:
#construct a list of non-apostrophe punctuation:
nonapos = u"".join(cls.punctuation.split(u"'"))
stripped = text.lstrip().lstrip(nonapos)
match = articlere.match(stripped)
if match:
return common.Common.capsstart(stripped[match.end():])
return common.Common.capsstart(text)
capsstart = classmethod(capsstart)
cyr2lat = {
u"А": "A", u"а": "a",
u"Б": "B", u"б": "b",
u"В": "W", u"в": "w", # Different if at the end of a syllable see rule 2.
u"Г": "G", u"г": "g", # see rule 3 and 4
u"Д": "D", u"д": "d",
u"ДЖ": "Dj", u"дж": "dj",
u"Е": "Je", u"е": "je", # Sometimes e need to check when/why see rule 5.
u"Ё": "Jo", u"ё": "jo", # see rule 6
u"ЕЙ": "Ei", u"ей": "ei",
u"Ж": "Zj", u"ж": "zj",
u"З": "Z", u"з": "z",
u"И": "I", u"и": "i",
u"Й": "J", u"й": "j", # see rule 9 and 10
u"К": "K", u"к": "k", # see note 11
u"Л": "L", u"л": "l",
u"М": "M", u"м": "m",
u"Н": "N", u"н": "n",
u"О": "O", u"о": "o",
u"П": "P", u"п": "p",
u"Р": "R", u"р": "r",
u"С": "S", u"с": "s", # see note 12
u"Т": "T", u"т": "t",
u"У": "Oe", u"у": "oe",
u"Ф": "F", u"ф": "f",
u"Х": "Ch", u"х": "ch", # see rule 12
u"Ц": "Ts", u"ц": "ts",
u"Ч": "Tj", u"ч": "tj",
u"Ш": "Sj", u"ш": "sj",
u"Щ": "Sjtsj", u"щ": "sjtsj",
u"Ы": "I", u"ы": "i", # see note 13
u"Ъ": "", u"ъ": "", # See note 14
u"Ь": "", u"ь": "", # this letter is not in the AWS we assume it is left out as in the previous letter
u"Э": "E", u"э": "e",
u"Ю": "Joe", u"ю": "joe",
u"Я": "Ja", u"я": "ja",
}
"""Mapping of Cyrillic to Latin letters for transliteration in Afrikaans"""
cyr_vowels = u"аеёиоуыэюя"
def tranliterate_cyrillic(text):
"""Convert Cyrillic text to Latin according to the AWS transliteration rules."""
trans = u""
for i in text:
trans += cyr2lat.get(i, i)
return trans
| staranjeet/fjord | vendor/packages/translate-toolkit/translate/lang/af.py | Python | bsd-3-clause | 3,846 |
<?php
/**
* Phergie
*
* PHP version 5
*
* LICENSE
*
* This source file is subject to the new BSD license that is bundled
* with this package in the file LICENSE.
* It is also available through the world-wide-web at this URL:
* http://phergie.org/license
*
* @category Phergie
* @package Phergie_Tests
* @author Phergie Development Team <team@phergie.org>
* @copyright 2008-2012 Phergie Development Team (http://phergie.org)
* @license http://phergie.org/license New BSD License
* @link http://pear.phergie.org/package/Phergie_Tests
*/
/**
* Unit test suite for Phergie_Event_Handler.
*
* @category Phergie
* @package Phergie_Tests
* @author Phergie Development Team <team@phergie.org>
* @license http://phergie.org/license New BSD License
* @link http://pear.phergie.org/package/Phergie_Tests
*/
class Phergie_Event_HandlerTest extends PHPUnit_Framework_TestCase
{
/**
* Instance of the class to test
*
* @var Phergie_Event_Handler
*/
private $events;
/**
* Plugin associated with an event added to the handler
*
* @var Phergie_Plugin_Abstract
*/
private $plugin;
/**
* Type of event added to the handler
*
* @var string
*/
private $type = 'privmsg';
/**
* Arguments for an event added to the handler
*
* @var array
*/
private $args = array('#channel', 'text');
/**
* Instantiates the class to test.
*
* @return void
*/
public function setUp()
{
$this->events = new Phergie_Event_Handler;
$this->plugin = $this->getMockForAbstractClass('Phergie_Plugin_Abstract');
}
/**
* Tests that the handler contains no events by default.
*
* @return void
*/
public function testGetEvents()
{
$expected = array();
$actual = $this->events->getEvents();
$this->assertEquals($expected, $actual);
}
/**
* Adds a mock event to the handler.
*
* @return void
*/
private function addMockEvent($type = null, $args = null)
{
if (!$type) {
$type = $this->type;
$args = $this->args;
}
$this->events->addEvent($this->plugin, $type, $args);
}
/**
* Data provider for methods requiring a valid event type and a
* corresponding set of arguments.
*
* @return array Enumerated array of enumerated arrays each containing
* a string for an event type and an enumerated array of
* arguments for that event type
*/
public function dataProviderEventTypesAndArguments()
{
return array(
array('nick', array('nickname')),
array('oper', array('username', 'password')),
array('quit', array()),
array('quit', array('message')),
array('join', array('#channel1,#channel2')),
array('join', array('#channel1,#channel2', 'key1,key2')),
array('part', array('#channel1,#channel2')),
array('mode', array('#channel', '-l', '20')),
array('topic', array('#channel', 'message')),
array('names', array('#channel1,#channel2')),
array('list', array('#channel1,#channel2')),
array('invite', array('nickname', '#channel')),
array('kick', array('#channel', 'username1,username2')),
array('kick', array('#channel', 'username', 'comment')),
array('version', array('nick_or_server')),
array('version', array('nick', 'reply')),
array('stats', array('c')),
array('stats', array('c', 'server')),
array('links', array('mask')),
array('links', array('server', 'mask')),
array('time', array('nick_or_server')),
array('time', array('nick', 'reply')),
array('connect', array('server')),
array('connect', array('server', '6667')),
array('connect', array('target', '6667', 'remote')),
array('trace', array()),
array('trace', array('server')),
array('admin', array()),
array('admin', array('server')),
array('info', array()),
array('info', array('server')),
array('privmsg', array('receiver1,receiver2', 'text')),
array('notice', array('nickname', 'text')),
array('who', array('name')),
array('who', array('name', 'o')),
array('whois', array('mask1,mask2')),
array('whois', array('server', 'mask')),
array('whowas', array('nickname')),
array('whowas', array('nickname', '9')),
array('whowas', array('nickname', '9', 'server')),
array('kill', array('nickname', 'comment')),
array('ping', array('server1')),
array('ping', array('server1', 'server2')),
array('pong', array('daemon')),
array('pong', array('daemon', 'daemon2')),
array('finger', array('nick')),
array('finger', array('nick', 'reply')),
array('error', array('message')),
);
}
/**
* Tests that the handler can receive a new event.
*
* @param string $type Event type
* @param array $args Event arguments
* @dataProvider dataProviderEventTypesAndArguments
* @return void
*/
public function testAddEventWithValidData($type, array $args)
{
$this->addMockEvent($type, $args);
$events = $this->events->getEvents();
$event = array_shift($events);
$this->assertInstanceOf('Phergie_Event_Command', $event);
$this->assertSame($this->plugin, $event->getPlugin());
$this->assertSame($type, $event->getType());
$this->assertSame($args, $event->getArguments());
}
/**
* Tests that attempting to add an event to the handler with an invalid
* type results in an exception.
*
* @return void
*/
public function testAddEventWithInvalidType()
{
$type = 'foo';
try {
$this->events->addEvent($this->plugin, $type);
$this->fail('Expected exception was not thrown');
} catch (Phergie_Event_Exception $e) {
if ($e->getCode() != Phergie_Event_Exception::ERR_UNKNOWN_EVENT_TYPE) {
$this->fail('Unexpected exception code ' . $e->getCode());
}
}
}
/**
* Tests that the events contained within the handler can be
* collectively removed.
*
* @return void
* @depends testGetEvents
* @depends testAddEventWithValidData
*/
public function testClearEvents()
{
$this->addMockEvent();
$this->events->clearEvents();
$expected = array();
$actual = $this->events->getEvents();
$this->assertSame($expected, $actual);
}
/**
* Tests that the events contained within the handler can be replaced
* with a different set of events.
*
* @return void
* @depends testAddEventWithValidData
*/
public function testReplaceEvents()
{
$this->addMockEvent();
$expected = array();
$this->events->replaceEvents($expected);
$actual = $this->events->getEvents();
$this->assertSame($expected, $actual);
}
/**
* Tests that the handler can accurately identify whether it has an
* event of a specified type.
*
* @return void
* @depends testAddEventWithValidData
*/
public function testHasEventOfType()
{
$this->assertFalse($this->events->hasEventOfType($this->type));
$this->addMockEvent();
$this->assertTrue($this->events->hasEventOfType($this->type));
}
/**
* Tests that the handler can return events it contains that are of a
* specified type.
*
* @return void
* @depends testAddEventWithValidData
*/
public function testGetEventsOfType()
{
$expected = array();
$actual = $this->events->getEventsOfType($this->type);
$this->assertSame($expected, $actual);
$this->addMockEvent();
$expected = $this->events->getEvents();
$actual = $this->events->getEventsOfType($this->type);
$this->assertSame($expected, $actual);
}
/**
* Tests that an event can be removed from the handler.
*
* @return void
* @depends testAddEventWithValidData
*/
public function testRemoveEvent()
{
$this->addMockEvent();
$events = $this->events->getEvents();
$event = array_shift($events);
$this->events->removeEvent($event);
$expected = array();
$actual = $this->events->getEvents();
$this->assertSame($expected, $actual);
}
/**
* Tests that the handler supports iteration of the events it contains.
*
* @return void
* @depends testAddEventWithValidData
*/
public function testImplementsGetIterator()
{
$reflector = new ReflectionClass('Phergie_Event_Handler');
$this->assertTrue($reflector->implementsInterface('IteratorAggregate'));
$this->addMockEvent();
$events = $this->events->getEvents();
$expected = array_shift($events);
foreach ($this->events as $actual) {
$this->assertSame($expected, $actual);
}
}
/**
* Tests that the handler supports returning a count of the events it
* contains.
*
* @return void
* @depends testAddEventWithValidData
*/
public function testImplementsCountable()
{
$reflector = new ReflectionClass('Phergie_Event_Handler');
$this->assertTrue($reflector->implementsInterface('Countable'));
$expected = 0;
$actual = count($this->events);
$this->assertSame($expected, $actual);
$this->addMockEvent();
$expected = 1;
$actual = count($this->events);
$this->assertSame($expected, $actual);
}
}
| sudounlv/ircbot-php | Tests/Phergie/Event/HandlerTest.php | PHP | bsd-3-clause | 10,066 |
<?php
namespace Symbiote\QueuedJobs\Services;
use Symbiote\QueuedJobs\DataObjects\QueuedJobDescriptor;
/**
* Default method for handling items run via the cron
*
* @author marcus@symbiote.com.au
* @license BSD License http://silverstripe.org/bsd-license/
*/
class DefaultQueueHandler
{
public function startJobOnQueue(QueuedJobDescriptor $job)
{
$job->activateOnQueue();
}
public function scheduleJob(QueuedJobDescriptor $job, $date)
{
// noop
}
}
| nyeholt/silverstripe-queuedjobs | src/Services/DefaultQueueHandler.php | PHP | bsd-3-clause | 497 |
<?php
namespace Payment\Controller;
use Eva\Mvc\Controller\ActionController,
Payment\Service\Exception,
Eva\View\Model\ViewModel;
class ResponseController extends ActionController
{
protected $addResources = array(
);
public function indexAction()
{
$adapter = $this->params()->fromQuery('adapter');
$callback = $this->params()->fromQuery('callback');
$amount = $this->params()->fromQuery('amount');
$secretKey = $this->params()->fromQuery('secretKey');
$requestTime = $this->params()->fromQuery('time');
$signed = $this->params()->fromQuery('signed');
$responseData = $this->params()->fromQuery();
if (!$responseData) {
$responseData = $this->params()->fromPost();
}
if (isset($responseData['notify_id']) && isset($responseData['trade_status'])) {
return $this->alipayResponse();
}
if(!$amount){
throw new Exception\InvalidArgumentException(sprintf(
'No payment amount found'
));
}
if(!$adapter){
throw new Exception\InvalidArgumentException(sprintf(
'No payment adapter key found'
));
}
if(!$callback){
throw new Exception\InvalidArgumentException(sprintf(
'No payment callback found'
));
}
if(!$secretKey){
throw new Exception\InvalidArgumentException(sprintf(
'No payment secretKey found'
));
}
if(!$requestTime){
throw new Exception\InvalidArgumentException(sprintf(
'No payment request time found'
));
}
if(!$signed){
throw new Exception\InvalidArgumentException(sprintf(
'No payment signed time found'
));
}
if (!$this->authenticate($this->params()->fromQuery())) {
throw new Exception\InvalidArgumentException(sprintf(
'Signed not match'
));
return;
}
$adapter = $adapter == 'paypalec' ? 'PaypalEc' : 'AlipayEc';
$pay = new \Payment\Service\Payment($adapter);
$pay->setServiceLocator($this->getServiceLocator());
$pay->setStep('response');
$pay->saveResponseLog($secretKey, $responseData);
if ($callback == 'notify') {
return;
}
if($callback){
return $this->redirect()->toUrl($callback);
}
}
public function authenticate($params)
{
$adapter = $params['adapter'];
$callback = $params['callback'];
$amount = $params['amount'];
$secretKey = $params['secretKey'];
$requestTime = $params['time'];
$signed = $params['signed'];
$itemModel = \Eva\Api::_()->getModel('Payment\Model\Log');
$log = $itemModel->getLog($secretKey, array(
'self' => array(
'*',
'unserializeRequestData()',
'unserializeResponseData()',
),
));
if (!$log) {
return false;
}
$adapter = $adapter == 'paypalec' ? 'PaypalEc' : 'AlipayEc';
$pay = new \Payment\Service\Payment($adapter);
$pay->setServiceLocator($this->getServiceLocator());
$authenticate = $pay->setAmount($amount)
->setRequestTime($requestTime)
->setlogData($log['requestData'])
->setStep('response')
->getSigned();
if ($authenticate !== $signed) {
return false;
}
return true;
}
public function alipayResponse()
{
$callback = $this->params()->fromQuery('callback');
$responseData = $this->params()->fromQuery();
if (!isset($responseData['notify_id'])) {
$responseData = $this->params()->fromPost();
$method = 'notify';
}
$config = \Eva\Api::_()->getModuleConfig('Payment');
$options = $config['payment']['alipay'];
$pay = new \Payment\Service\Payment('AlipayEc', false ,$options);
$verify_result = $pay->verify();
if ($verify_result) {
$pay->setStep('response');
$pay->saveResponseLog($responseData['out_trade_no'], $responseData);
}
if ($callback == 'notify') {
return;
}
if($callback){
return $this->redirect()->toUrl($callback);
}
}
}
| Brother-Simon/eva-engine | module/Payment/src/Payment/Controller/ResponseController.php | PHP | bsd-3-clause | 4,610 |
// Copyright (c) 2016 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "components/update_client/updater_state.h"
#include <string>
#include <utility>
#include "base/enterprise_util.h"
#include "base/strings/string_number_conversions.h"
#include "base/strings/utf_string_conversions.h"
#include "build/branding_buildflags.h"
#include "build/build_config.h"
namespace update_client {
// The value of this constant does not reflect its name (i.e. "domainjoined"
// vs something like "isenterprisemanaged") because it is used with omaha.
// After discussion with omaha team it was decided to leave the value as is to
// keep continuity with previous chrome versions.
const char UpdaterState::kIsEnterpriseManaged[] = "domainjoined";
UpdaterState::UpdaterState(bool is_machine) : is_machine_(is_machine) {}
UpdaterState::~UpdaterState() = default;
std::unique_ptr<UpdaterState::Attributes> UpdaterState::GetState(
bool is_machine) {
#if defined(OS_WIN) || defined(OS_MAC)
UpdaterState updater_state(is_machine);
updater_state.ReadState();
return std::make_unique<Attributes>(updater_state.BuildAttributes());
#else
return nullptr;
#endif // OS_WIN or Mac
}
#if defined(OS_WIN) || defined(OS_MAC)
void UpdaterState::ReadState() {
is_enterprise_managed_ = base::IsMachineExternallyManaged();
#if BUILDFLAG(GOOGLE_CHROME_BRANDING)
updater_name_ = GetUpdaterName();
updater_version_ = GetUpdaterVersion(is_machine_);
last_autoupdate_started_ = GetUpdaterLastStartedAU(is_machine_);
last_checked_ = GetUpdaterLastChecked(is_machine_);
is_autoupdate_check_enabled_ = IsAutoupdateCheckEnabled();
update_policy_ = GetUpdatePolicy();
#endif // BUILDFLAG(GOOGLE_CHROME_BRANDING)
}
#endif // OS_WIN or Mac
UpdaterState::Attributes UpdaterState::BuildAttributes() const {
Attributes attributes;
#if defined(OS_WIN)
// Only Windows implements this attribute in a meaningful way.
attributes["ismachine"] = is_machine_ ? "1" : "0";
#endif // OS_WIN
attributes[kIsEnterpriseManaged] = is_enterprise_managed_ ? "1" : "0";
attributes["name"] = updater_name_;
if (updater_version_.IsValid())
attributes["version"] = updater_version_.GetString();
const base::Time now = base::Time::NowFromSystemTime();
if (!last_autoupdate_started_.is_null())
attributes["laststarted"] =
NormalizeTimeDelta(now - last_autoupdate_started_);
if (!last_checked_.is_null())
attributes["lastchecked"] = NormalizeTimeDelta(now - last_checked_);
attributes["autoupdatecheckenabled"] =
is_autoupdate_check_enabled_ ? "1" : "0";
DCHECK((update_policy_ >= 0 && update_policy_ <= 3) || update_policy_ == -1);
attributes["updatepolicy"] = base::NumberToString(update_policy_);
return attributes;
}
std::string UpdaterState::NormalizeTimeDelta(const base::TimeDelta& delta) {
const base::TimeDelta two_weeks = base::Days(14);
const base::TimeDelta two_months = base::Days(56);
std::string val; // Contains the value to return in hours.
if (delta <= two_weeks) {
val = "0";
} else if (two_weeks < delta && delta <= two_months) {
val = "336"; // 2 weeks in hours.
} else {
val = "1344"; // 2*28 days in hours.
}
DCHECK(!val.empty());
return val;
}
} // namespace update_client
| scheib/chromium | components/update_client/updater_state.cc | C++ | bsd-3-clause | 3,368 |
import sys
import os
import glob
import shutil
import datetime
assert 'pymel' not in sys.modules or 'PYMEL_INCLUDE_EXAMPLES' in os.environ, "to generate docs PYMEL_INCLUDE_EXAMPLES env var must be set before pymel is imported"
# remember, the processed command examples are not version specific. you must
# run cmdcache.fixCodeExamples() to bring processed examples in from the raw
# version-specific example caches
os.environ['PYMEL_INCLUDE_EXAMPLES'] = 'True'
pymel_root = os.path.dirname(os.path.dirname(sys.modules[__name__].__file__))
docsdir = os.path.join(pymel_root, 'docs')
stubdir = os.path.join(pymel_root, 'extras', 'completion', 'py')
useStubs = False
if useStubs:
sys.path.insert(0, stubdir)
import pymel
print pymel.__file__
else:
import pymel
# make sure dynamic modules are fully loaded
from pymel.core.uitypes import *
from pymel.core.nodetypes import *
version = pymel.__version__.rsplit('.',1)[0]
SOURCE = 'source'
BUILD_ROOT = 'build'
BUILD = os.path.join(BUILD_ROOT, version)
sourcedir = os.path.join(docsdir, SOURCE)
gendir = os.path.join(sourcedir, 'generated')
buildrootdir = os.path.join(docsdir, BUILD_ROOT)
builddir = os.path.join(docsdir, BUILD)
from pymel.internal.cmdcache import fixCodeExamples
def generate(clean=True):
"delete build and generated directories and generate a top-level documentation source file for each module."
print "generating %s - %s" % (docsdir, datetime.datetime.now())
from sphinx.ext.autosummary.generate import main as sphinx_autogen
if clean:
clean_build()
clean_generated()
os.chdir(sourcedir)
sphinx_autogen( [''] + '--templates ../templates modules.rst'.split() )
sphinx_autogen( [''] + '--templates ../templates'.split() + glob.glob('generated/pymel.*.rst') )
print "...done generating %s - %s" % (docsdir, datetime.datetime.now())
def clean_build():
"delete existing build directory"
if os.path.exists(buildrootdir):
print "removing %s - %s" % (buildrootdir, datetime.datetime.now())
shutil.rmtree(buildrootdir)
def clean_generated():
"delete existing generated directory"
if os.path.exists(gendir):
print "removing %s - %s" % (gendir, datetime.datetime.now())
shutil.rmtree(gendir)
def find_dot():
if os.name == 'posix':
dot_bin = 'dot'
else:
dot_bin = 'dot.exe'
for p in os.environ['PATH'].split(os.pathsep):
d = os.path.join(p, dot_bin)
if os.path.exists(d):
return d
raise TypeError('cannot find graphiz dot executable in the path (%s)' % os.environ['PATH'])
def copy_changelog():
changelog = os.path.join(pymel_root, 'CHANGELOG.rst')
whatsnew = os.path.join(pymel_root, 'docs', 'source', 'whats_new.rst')
shutil.copy2(changelog, whatsnew)
def build(clean=True, **kwargs):
from sphinx import main as sphinx_build
print "building %s - %s" % (docsdir, datetime.datetime.now())
if not os.path.isdir(gendir):
generate()
os.chdir( docsdir )
if clean:
clean_build()
copy_changelog()
#mkdir -p build/html build/doctrees
#import pymel.internal.cmdcache as cmdcache
#cmdcache.fixCodeExamples()
opts = ['']
opts += '-b html -d build/doctrees'.split()
# set some defaults
if not kwargs.get('graphviz_dot', None):
kwargs['graphviz_dot'] = find_dot()
for key, value in kwargs.iteritems():
opts.append('-D')
opts.append( key.strip() + '=' + value.strip() )
opts.append('-P')
opts.append(SOURCE)
opts.append(BUILD)
sphinx_build(opts)
print "...done building %s - %s" % (docsdir, datetime.datetime.now())
| shrtcww/pymel | maintenance/docs.py | Python | bsd-3-clause | 3,694 |
// (C) Copyright Gennadiy Rozental 2001.
// Distributed under the Boost Software License, Version 1.0.
// (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
// See http://www.boost.org/libs/test for the library home page.
//
// File : $RCSfile$
//
// Version : $Revision$
//
// Description : class basic_cstring wraps C string and provide std_string like
// interface
// ***************************************************************************
#ifndef BOOST_TEST_UTILS_BASIC_CSTRING_HPP
#define BOOST_TEST_UTILS_BASIC_CSTRING_HPP
// Boost.Test
#include <boost/test/utils/basic_cstring/basic_cstring_fwd.hpp>
#include <boost/test/utils/basic_cstring/bcs_char_traits.hpp>
// Boost
#include <boost/type_traits/remove_cv.hpp>
// STL
#include <string>
#include <boost/test/detail/suppress_warnings.hpp>
//____________________________________________________________________________//
namespace pdalboost {
namespace unit_test {
// ************************************************************************** //
// ************** basic_cstring ************** //
// ************************************************************************** //
template<typename CharT>
class basic_cstring {
typedef basic_cstring<CharT> self_type;
public:
// Subtypes
typedef ut_detail::bcs_char_traits<CharT> traits_type;
typedef typename ut_detail::bcs_char_traits<CharT>::std_string std_string;
typedef CharT value_type;
typedef typename remove_cv<value_type>::type value_ret_type;
typedef value_type* pointer;
typedef value_type const* const_pointer;
typedef value_type& reference;
typedef const value_type& const_reference;
typedef std::size_t size_type;
typedef std::ptrdiff_t difference_type;
typedef value_type const* const_iterator;
typedef value_type* iterator;
// !! should also present reverse_iterator, const_reverse_iterator
#if !BOOST_WORKAROUND(__IBMCPP__, BOOST_TESTED_AT(600))
enum npos_type { npos = static_cast<size_type>(-1) };
#else
// IBM/VisualAge version 6 is not able to handle enums larger than 4 bytes.
// But size_type is 8 bytes in 64bit mode.
static const size_type npos = -1 ;
#endif
static pointer null_str();
// Constructors; default copy constructor is generated by compiler
basic_cstring();
basic_cstring( std_string const& s );
basic_cstring( pointer s );
template<typename LenType>
basic_cstring( pointer s, LenType len ) : m_begin( s ), m_end( m_begin + len ) {}
basic_cstring( pointer first, pointer last );
// data access methods
value_ret_type operator[]( size_type index ) const;
value_ret_type at( size_type index ) const;
// size operators
size_type size() const;
bool is_empty() const;
void clear();
void resize( size_type new_len );
// !! only for STL container conformance use is_empty instead
bool empty() const;
// Trimming
self_type& trim_right( size_type trim_size );
self_type& trim_left( size_type trim_size );
self_type& trim_right( iterator it );
self_type& trim_left( iterator it );
#if !BOOST_WORKAROUND(__IBMCPP__, BOOST_TESTED_AT(800))
self_type& trim_left( self_type exclusions = self_type() ) ;
self_type& trim_right( self_type exclusions = self_type() ) ;
self_type& trim( self_type exclusions = self_type() ) ;
#else
// VA C++/XL C++ v6 and v8 has in this case a problem with the default arguments.
self_type& trim_left( self_type exclusions );
self_type& trim_right( self_type exclusions );
self_type& trim( self_type exclusions );
self_type& trim_left() { return trim_left( self_type() ); }
self_type& trim_right() { return trim_right( self_type() ); }
self_type& trim() { return trim( self_type() ); }
#endif
// Assignment operators
basic_cstring& operator=( self_type const& s );
basic_cstring& operator=( std_string const& s );
basic_cstring& operator=( pointer s );
template<typename CharT2>
basic_cstring& assign( basic_cstring<CharT2> const& s )
{
return *this = basic_cstring<CharT>( s.begin(), s.end() );
}
template<typename PosType, typename LenType>
basic_cstring& assign( self_type const& s, PosType pos, LenType len )
{
return *this = self_type( s.m_begin + pos, len );
}
basic_cstring& assign( std_string const& s );
template<typename PosType, typename LenType>
basic_cstring& assign( std_string const& s, PosType pos, LenType len )
{
return *this = self_type( s.c_str() + pos, len );
}
basic_cstring& assign( pointer s );
template<typename LenType>
basic_cstring& assign( pointer s, LenType len )
{
return *this = self_type( s, len );
}
basic_cstring& assign( pointer f, pointer l );
// swapping
void swap( self_type& s );
// Iterators
iterator begin();
const_iterator begin() const;
iterator end();
const_iterator end() const;
// !! should have rbegin, rend
// substring search operation
size_type find( basic_cstring ) const;
size_type rfind( basic_cstring ) const;
self_type substr( size_type beg_index, size_type end_index = npos ) const;
private:
static self_type default_trim_ex();
// Data members
iterator m_begin;
iterator m_end;
};
//____________________________________________________________________________//
template<typename CharT>
inline typename basic_cstring<CharT>::pointer
basic_cstring<CharT>::null_str()
{
static CharT null = 0;
return &null;
}
//____________________________________________________________________________//
template<typename CharT>
inline
basic_cstring<CharT>::basic_cstring()
: m_begin( null_str() )
, m_end( m_begin )
{
}
//____________________________________________________________________________//
template<typename CharT>
inline
basic_cstring<CharT>::basic_cstring( std_string const& s )
: m_begin( s.c_str() )
, m_end( m_begin + s.size() )
{
}
//____________________________________________________________________________//
template<typename CharT>
inline
basic_cstring<CharT>::basic_cstring( pointer s )
: m_begin( s ? s : null_str() )
, m_end ( m_begin + (s ? traits_type::length( s ) : 0 ) )
{
}
//____________________________________________________________________________//
template<typename CharT>
inline
basic_cstring<CharT>::basic_cstring( pointer first, pointer last )
: m_begin( first )
, m_end( last )
{
}
//____________________________________________________________________________//
template<typename CharT>
inline typename basic_cstring<CharT>::value_ret_type
basic_cstring<CharT>::operator[]( size_type index ) const
{
return m_begin[index];
}
//____________________________________________________________________________//
template<typename CharT>
inline typename basic_cstring<CharT>::value_ret_type
basic_cstring<CharT>::at( size_type index ) const
{
if( m_begin + index >= m_end )
return static_cast<value_type>(0);
return m_begin[index];
}
//____________________________________________________________________________//
template<typename CharT>
inline typename basic_cstring<CharT>::size_type
basic_cstring<CharT>::size() const
{
return static_cast<size_type>(m_end - m_begin);
}
//____________________________________________________________________________//
template<typename CharT>
inline bool
basic_cstring<CharT>::is_empty() const
{
return m_end == m_begin;
}
//____________________________________________________________________________//
template<typename CharT>
inline bool
basic_cstring<CharT>::empty() const
{
return is_empty();
}
//____________________________________________________________________________//
template<typename CharT>
inline void
basic_cstring<CharT>::clear()
{
m_begin = m_end;
}
//____________________________________________________________________________//
template<typename CharT>
inline void
basic_cstring<CharT>::resize( size_type new_len )
{
if( m_begin + new_len < m_end )
m_end = m_begin + new_len;
}
//____________________________________________________________________________//
template<typename CharT>
inline basic_cstring<CharT>&
basic_cstring<CharT>::trim_left( size_type trim_size )
{
m_begin += trim_size;
if( m_end <= m_begin )
clear();
return *this;
}
//____________________________________________________________________________//
template<typename CharT>
inline basic_cstring<CharT>&
basic_cstring<CharT>::trim_left( iterator it )
{
m_begin = it;
if( m_end <= m_begin )
clear();
return *this;
}
//____________________________________________________________________________//
template<typename CharT>
inline basic_cstring<CharT>&
basic_cstring<CharT>::trim_left( basic_cstring exclusions )
{
if( exclusions.is_empty() )
exclusions = default_trim_ex();
iterator it;
for( it = begin(); it != end(); ++it ) {
if( traits_type::find( exclusions.begin(), exclusions.size(), *it ) == reinterpret_cast<pointer>(0) )
break;
}
return trim_left( it );
}
//____________________________________________________________________________//
template<typename CharT>
inline basic_cstring<CharT>&
basic_cstring<CharT>::trim_right( size_type trim_size )
{
m_end -= trim_size;
if( m_end <= m_begin )
clear();
return *this;
}
//____________________________________________________________________________//
template<typename CharT>
inline basic_cstring<CharT>&
basic_cstring<CharT>::trim_right( iterator it )
{
m_end = it;
if( m_end <= m_begin )
clear();
return *this;
}
//____________________________________________________________________________//
template<typename CharT>
inline basic_cstring<CharT>&
basic_cstring<CharT>::trim_right( basic_cstring exclusions )
{
if( exclusions.is_empty() )
exclusions = default_trim_ex();
iterator it;
for( it = end()-1; it != begin()-1; --it ) {
if( self_type::traits_type::find( exclusions.begin(), exclusions.size(), *it ) == reinterpret_cast<pointer>(0) )
break;
}
return trim_right( it+1 );
}
//____________________________________________________________________________//
template<typename CharT>
inline basic_cstring<CharT>&
basic_cstring<CharT>::trim( basic_cstring exclusions )
{
trim_left( exclusions );
trim_right( exclusions );
return *this;
}
//____________________________________________________________________________//
template<typename CharT>
inline basic_cstring<CharT>&
basic_cstring<CharT>::operator=( basic_cstring<CharT> const& s )
{
m_begin = s.m_begin;
m_end = s.m_end;
return *this;
}
//____________________________________________________________________________//
template<typename CharT>
inline basic_cstring<CharT>&
basic_cstring<CharT>::operator=( std_string const& s )
{
return *this = self_type( s );
}
//____________________________________________________________________________//
template<typename CharT>
inline basic_cstring<CharT>&
basic_cstring<CharT>::operator=( pointer s )
{
return *this = self_type( s );
}
//____________________________________________________________________________//
template<typename CharT>
inline basic_cstring<CharT>&
basic_cstring<CharT>::assign( std_string const& s )
{
return *this = self_type( s );
}
//____________________________________________________________________________//
template<typename CharT>
inline basic_cstring<CharT>&
basic_cstring<CharT>::assign( pointer s )
{
return *this = self_type( s );
}
//____________________________________________________________________________//
template<typename CharT>
inline basic_cstring<CharT>&
basic_cstring<CharT>::assign( pointer f, pointer l )
{
return *this = self_type( f, l );
}
//____________________________________________________________________________//
template<typename CharT>
inline void
basic_cstring<CharT>::swap( basic_cstring<CharT>& s )
{
// do not want to include alogrithm
pointer tmp1 = m_begin;
pointer tmp2 = m_end;
m_begin = s.m_begin;
m_end = s.m_end;
s.m_begin = tmp1;
s.m_end = tmp2;
}
//____________________________________________________________________________//
template<typename CharT>
inline typename basic_cstring<CharT>::iterator
basic_cstring<CharT>::begin()
{
return m_begin;
}
//____________________________________________________________________________//
template<typename CharT>
inline typename basic_cstring<CharT>::const_iterator
basic_cstring<CharT>::begin() const
{
return m_begin;
}
//____________________________________________________________________________//
template<typename CharT>
inline typename basic_cstring<CharT>::iterator
basic_cstring<CharT>::end()
{
return m_end;
}
//____________________________________________________________________________//
template<typename CharT>
inline typename basic_cstring<CharT>::const_iterator
basic_cstring<CharT>::end() const
{
return m_end;
}
//____________________________________________________________________________//
template<typename CharT>
inline typename basic_cstring<CharT>::size_type
basic_cstring<CharT>::find( basic_cstring<CharT> str ) const
{
if( str.is_empty() || str.size() > size() )
return static_cast<size_type>(npos);
const_iterator it = begin();
const_iterator last = end() - str.size() + 1;
while( it != last ) {
if( traits_type::compare( it, str.begin(), str.size() ) == 0 )
break;
++it;
}
return it == last ? npos : static_cast<size_type>(it - begin());
}
//____________________________________________________________________________//
template<typename CharT>
inline typename basic_cstring<CharT>::size_type
basic_cstring<CharT>::rfind( basic_cstring<CharT> str ) const
{
if( str.is_empty() || str.size() > size() )
return static_cast<size_type>(npos);
const_iterator it = end() - str.size();
const_iterator last = begin()-1;
while( it != last ) {
if( traits_type::compare( it, str.begin(), str.size() ) == 0 )
break;
--it;
}
return it == last ? static_cast<size_type>(npos) : static_cast<size_type>(it - begin());
}
//____________________________________________________________________________//
template<typename CharT>
inline basic_cstring<CharT>
basic_cstring<CharT>::substr( size_type beg_index, size_type end_index ) const
{
return beg_index > size()
? self_type()
: end_index > size()
? self_type( m_begin + beg_index, m_end )
: self_type( m_begin + beg_index, m_begin + end_index );
}
//____________________________________________________________________________//
template<typename CharT>
inline basic_cstring<CharT>
basic_cstring<CharT>::default_trim_ex()
{
static CharT ws[3] = { CharT(' '), CharT('\t'), CharT('\n') }; // !! wide case
return self_type( ws, 3 );
}
//____________________________________________________________________________//
// ************************************************************************** //
// ************** comparison operators ************** //
// ************************************************************************** //
template<typename CharT1,typename CharT2>
inline bool
operator==( basic_cstring<CharT1> const& s1, basic_cstring<CharT2> const& s2 )
{
typedef typename basic_cstring<CharT1>::traits_type traits_type;
return s1.size() == s2.size() &&
traits_type::compare( s1.begin(), s2.begin(), s1.size() ) == 0;
}
//____________________________________________________________________________//
template<typename CharT1,typename CharT2>
inline bool
operator==( basic_cstring<CharT1> const& s1, CharT2* s2 )
{
#if !defined(__DMC__)
return s1 == basic_cstring<CharT2>( s2 );
#else
return s1 == basic_cstring<CharT2 const>( s2 );
#endif
}
//____________________________________________________________________________//
template<typename CharT>
inline bool
operator==( basic_cstring<CharT> const& s1, typename basic_cstring<CharT>::std_string const& s2 )
{
return s1 == basic_cstring<CharT>( s2 );
}
//____________________________________________________________________________//
template<typename CharT1,typename CharT2>
inline bool
operator==( CharT1* s2, basic_cstring<CharT2> const& s1 )
{
return s1 == s2;
}
//____________________________________________________________________________//
template<typename CharT>
inline bool
operator==( typename basic_cstring<CharT>::std_string const& s2, basic_cstring<CharT> const& s1 )
{
return s1 == s2;
}
//____________________________________________________________________________//
template<typename CharT>
inline bool
operator!=( basic_cstring<CharT> const& s1, CharT* s2 )
{
return !(s1 == s2);
}
//____________________________________________________________________________//
template<typename CharT>
inline bool
operator!=( CharT* s2, basic_cstring<CharT> const& s1 )
{
return !(s1 == s2);
}
//____________________________________________________________________________//
template<typename CharT>
inline bool
operator!=( basic_cstring<CharT> const& s1, basic_cstring<CharT> const& s2 )
{
return !(s1 == s2);
}
//____________________________________________________________________________//
template<typename CharT>
inline bool
operator!=( basic_cstring<CharT> const& s1, typename basic_cstring<CharT>::std_string const& s2 )
{
return !(s1 == s2);
}
//____________________________________________________________________________//
template<typename CharT>
inline bool
operator!=( typename basic_cstring<CharT>::std_string const& s2, basic_cstring<CharT> const& s1 )
{
return !(s1 == s2);
}
//____________________________________________________________________________//
// ************************************************************************** //
// ************** first_char ************** //
// ************************************************************************** //
template<typename CharT>
inline typename basic_cstring<CharT>::value_ret_type
first_char( basic_cstring<CharT> source )
{
typedef typename basic_cstring<CharT>::value_ret_type res_type;
return source.is_empty() ? static_cast<res_type>(0) : *source.begin();
}
//____________________________________________________________________________//
// ************************************************************************** //
// ************** last_char ************** //
// ************************************************************************** //
template<typename CharT>
inline typename basic_cstring<CharT>::value_ret_type
last_char( basic_cstring<CharT> source )
{
typedef typename basic_cstring<CharT>::value_ret_type res_type;
return source.is_empty() ? static_cast<res_type>(0) : *(source.end()-1);
}
//____________________________________________________________________________//
// ************************************************************************** //
// ************** assign_op ************** //
// ************************************************************************** //
template<typename CharT1, typename CharT2>
inline void
assign_op( std::basic_string<CharT1>& target, basic_cstring<CharT2> src, int )
{
target.assign( src.begin(), src.size() );
}
//____________________________________________________________________________//
template<typename CharT1, typename CharT2>
inline std::basic_string<CharT1>&
operator+=( std::basic_string<CharT1>& target, basic_cstring<CharT2> const& str )
{
target.append( str.begin(), str.end() );
return target;
}
//____________________________________________________________________________//
template<typename CharT1, typename CharT2>
inline std::basic_string<CharT1>
operator+( std::basic_string<CharT1> const& lhs, basic_cstring<CharT2> const& rhs )
{
std::basic_string<CharT1> res( lhs );
res.append( rhs.begin(), rhs.end() );
return res;
}
//____________________________________________________________________________//
} // namespace unit_test
} // namespace pdalboost
//____________________________________________________________________________//
#include <boost/test/detail/enable_warnings.hpp>
#endif // BOOST_TEST_UTILS_BASIC_CSTRING_HPP
| lucadelu/PDAL | vendor/pdalboost/boost/test/utils/basic_cstring/basic_cstring.hpp | C++ | bsd-3-clause | 21,254 |
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "chrome/test/base/tracing.h"
#include "base/files/file_path.h"
#include "base/files/file_util.h"
#include "base/memory/singleton.h"
#include "base/message_loop/message_loop.h"
#include "base/strings/string_util.h"
#include "base/timer/timer.h"
#include "base/trace_event/trace_event.h"
#include "content/public/browser/browser_thread.h"
#include "content/public/browser/tracing_controller.h"
#include "content/public/test/test_utils.h"
namespace {
using content::BrowserThread;
class StringTraceSink : public content::TracingController::TraceDataSink {
public:
StringTraceSink(std::string* result, const base::Closure& callback)
: result_(result), completion_callback_(callback) {}
void AddTraceChunk(const std::string& chunk) override {
*result_ += result_->empty() ? "[" : ",";
*result_ += chunk;
}
void Close() override {
if (!result_->empty())
*result_ += "]";
completion_callback_.Run();
}
private:
~StringTraceSink() override {}
std::string* result_;
base::Closure completion_callback_;
DISALLOW_COPY_AND_ASSIGN(StringTraceSink);
};
class InProcessTraceController {
public:
static InProcessTraceController* GetInstance() {
return Singleton<InProcessTraceController>::get();
}
InProcessTraceController()
: is_waiting_on_watch_(false),
watch_notification_count_(0) {}
virtual ~InProcessTraceController() {}
bool BeginTracing(const std::string& category_patterns) {
DCHECK_CURRENTLY_ON(BrowserThread::UI);
return content::TracingController::GetInstance()->EnableRecording(
base::trace_event::CategoryFilter(category_patterns),
base::trace_event::TraceOptions(),
content::TracingController::EnableRecordingDoneCallback());
}
bool BeginTracingWithWatch(const std::string& category_patterns,
const std::string& category_name,
const std::string& event_name,
int num_occurrences) {
DCHECK_CURRENTLY_ON(BrowserThread::UI);
DCHECK(num_occurrences > 0);
watch_notification_count_ = num_occurrences;
if (!content::TracingController::GetInstance()->SetWatchEvent(
category_name, event_name,
base::Bind(&InProcessTraceController::OnWatchEventMatched,
base::Unretained(this)))) {
return false;
}
if (!content::TracingController::GetInstance()->EnableRecording(
base::trace_event::CategoryFilter(category_patterns),
base::trace_event::TraceOptions(),
base::Bind(&InProcessTraceController::OnEnableTracingComplete,
base::Unretained(this)))) {
return false;
}
message_loop_runner_ = new content::MessageLoopRunner;
message_loop_runner_->Run();
return true;
}
bool WaitForWatchEvent(base::TimeDelta timeout) {
DCHECK_CURRENTLY_ON(BrowserThread::UI);
if (watch_notification_count_ == 0)
return true;
if (timeout != base::TimeDelta()) {
timer_.Start(FROM_HERE, timeout, this,
&InProcessTraceController::Timeout);
}
is_waiting_on_watch_ = true;
message_loop_runner_ = new content::MessageLoopRunner;
message_loop_runner_->Run();
is_waiting_on_watch_ = false;
return watch_notification_count_ == 0;
}
bool EndTracing(std::string* json_trace_output) {
DCHECK_CURRENTLY_ON(BrowserThread::UI);
using namespace base::debug;
if (!content::TracingController::GetInstance()->DisableRecording(
new StringTraceSink(
json_trace_output,
base::Bind(&InProcessTraceController::OnTracingComplete,
base::Unretained(this))))) {
return false;
}
// Wait for OnEndTracingComplete() to quit the message loop.
message_loop_runner_ = new content::MessageLoopRunner;
message_loop_runner_->Run();
// Watch notifications can occur during this method's message loop run, but
// not after, so clear them here.
watch_notification_count_ = 0;
return true;
}
private:
friend struct DefaultSingletonTraits<InProcessTraceController>;
void OnEnableTracingComplete() {
message_loop_runner_->Quit();
}
void OnTracingComplete() { message_loop_runner_->Quit(); }
void OnWatchEventMatched() {
if (watch_notification_count_ == 0)
return;
if (--watch_notification_count_ == 0) {
timer_.Stop();
if (is_waiting_on_watch_)
message_loop_runner_->Quit();
}
}
void Timeout() {
DCHECK(is_waiting_on_watch_);
message_loop_runner_->Quit();
}
scoped_refptr<content::MessageLoopRunner> message_loop_runner_;
base::OneShotTimer<InProcessTraceController> timer_;
bool is_waiting_on_watch_;
int watch_notification_count_;
DISALLOW_COPY_AND_ASSIGN(InProcessTraceController);
};
} // namespace
namespace tracing {
bool BeginTracing(const std::string& category_patterns) {
return InProcessTraceController::GetInstance()->BeginTracing(
category_patterns);
}
bool BeginTracingWithWatch(const std::string& category_patterns,
const std::string& category_name,
const std::string& event_name,
int num_occurrences) {
return InProcessTraceController::GetInstance()->BeginTracingWithWatch(
category_patterns, category_name, event_name, num_occurrences);
}
bool WaitForWatchEvent(base::TimeDelta timeout) {
return InProcessTraceController::GetInstance()->WaitForWatchEvent(timeout);
}
bool EndTracing(std::string* json_trace_output) {
return InProcessTraceController::GetInstance()->EndTracing(json_trace_output);
}
} // namespace tracing
| guorendong/iridium-browser-ubuntu | chrome/test/base/tracing.cc | C++ | bsd-3-clause | 5,907 |
// Copyright 2020 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "third_party/blink/renderer/modules/webcodecs/codec_logger.h"
#include <string>
#include "media/base/media_util.h"
#include "third_party/blink/public/platform/platform.h"
#include "third_party/blink/renderer/core/execution_context/execution_context.h"
#include "third_party/blink/renderer/core/inspector/inspector_media_context_impl.h"
#include "third_party/blink/renderer/platform/wtf/wtf.h"
namespace blink {
CodecLogger::CodecLogger(
ExecutionContext* context,
scoped_refptr<base::SingleThreadTaskRunner> task_runner) {
DCHECK(context);
// Owners of |this| should be ExecutionLifeCycleObservers, and should call
// Neuter() if |context| is destroyed. The MediaInspectorContextImpl must
// outlive |parent_media_log_|. If |context| is already destroyed, owners
// might never call Neuter(), and MediaInspectorContextImpl* could be garbage
// collected before |parent_media_log_| is destroyed.
if (!context->IsContextDestroyed()) {
parent_media_log_ = Platform::Current()->GetMediaLog(
MediaInspectorContextImpl::From(*context), task_runner,
/*is_on_worker=*/!IsMainThread());
}
// NullMediaLog silently and safely does nothing.
if (!parent_media_log_)
parent_media_log_ = std::make_unique<media::NullMediaLog>();
// This allows us to destroy |parent_media_log_| and stop logging,
// without causing problems to |media_log_| users.
media_log_ = parent_media_log_->Clone();
}
DOMException* CodecLogger::MakeException(std::string error_msg,
media::Status status) {
media_log_->NotifyError(status);
if (status_code_ == media::StatusCode::kOk) {
DCHECK(!status.is_ok());
status_code_ = status.code();
}
return MakeGarbageCollected<DOMException>(DOMExceptionCode::kOperationError,
error_msg.c_str());
}
DOMException* CodecLogger::MakeException(std::string error_msg,
media::StatusCode code,
const base::Location& location) {
if (status_code_ == media::StatusCode::kOk) {
DCHECK_NE(code, media::StatusCode::kOk);
status_code_ = code;
}
return MakeException(error_msg, media::Status(code, error_msg, location));
}
CodecLogger::~CodecLogger() {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
}
void CodecLogger::Neuter() {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
parent_media_log_ = nullptr;
}
} // namespace blink
| scheib/chromium | third_party/blink/renderer/modules/webcodecs/codec_logger.cc | C++ | bsd-3-clause | 2,677 |
define([
"dojo/_base/declare",
"dojo/_base/sniff",
"dojo/dom-class",
"dojo/dom-construct",
"dojo/dom-style",
"dijit/_Contained",
"dijit/_Container",
"dijit/_WidgetBase",
"./IconMenuItem"
], function(declare, has, domClass, domConstruct, domStyle, Contained, Container, WidgetBase){
// module:
// dojox/mobile/IconMenu
return declare("dojox.mobile.IconMenu", [WidgetBase, Container, Contained], {
// summary:
// A pop-up menu.
// description:
// The dojox/mobile/IconMenu widget displays a pop-up menu just
// like iPhone's call options menu that is shown while you are on a
// call. Each menu item must be dojox/mobile/IconMenuItem.
// transition: String
// The default animated transition effect for child items.
transition: "slide",
// iconBase: String
// The default icon path for child items.
iconBase: "",
// iconPos: String
// The default icon position for child items.
iconPos: "",
// cols: Number
// The number of child items in a row.
cols: 3,
// tag: String
// A name of html tag to create as domNode.
tag: "ul",
/* internal properties */
selectOne: false,
// baseClass: String
// The name of the CSS class of this widget.
baseClass: "mblIconMenu",
// childItemClass: String
// The name of the CSS class of menu items.
childItemClass: "mblIconMenuItem",
// _createTerminator: [private] Boolean
_createTerminator: false,
buildRendering: function(){
this.domNode = this.containerNode = this.srcNodeRef || domConstruct.create(this.tag);
this.inherited(arguments);
if(this._createTerminator){
var t = this._terminator = domConstruct.create("br");
t.className = this.childItemClass + "Terminator";
this.domNode.appendChild(t);
}
},
startup: function(){
if(this._started){ return; }
this.refresh();
this.inherited(arguments);
},
refresh: function(){
var p = this.getParent();
if(p){
domClass.remove(p.domNode, "mblSimpleDialogDecoration");
}
var children = this.getChildren();
if(this.cols){
var nRows = Math.ceil(children.length / this.cols);
var w = Math.floor(100/this.cols);
var _w = 100 - w*this.cols;
var h = Math.floor(100 / nRows);
var _h = 100 - h*nRows;
if(has("ie")){
_w--;
_h--;
}
}
for(var i = 0; i < children.length; i++){
var item = children[i];
if(this.cols){
var first = ((i % this.cols) === 0); // first column
var last = (((i + 1) % this.cols) === 0); // last column
var rowIdx = Math.floor(i / this.cols);
domStyle.set(item.domNode, {
width: w + (last ? _w : 0) + "%",
height: h + ((rowIdx + 1 === nRows) ? _h : 0) + "%"
});
domClass.toggle(item.domNode, this.childItemClass + "FirstColumn", first);
domClass.toggle(item.domNode, this.childItemClass + "LastColumn", last);
domClass.toggle(item.domNode, this.childItemClass + "FirstRow", rowIdx === 0);
domClass.toggle(item.domNode, this.childItemClass + "LastRow", rowIdx + 1 === nRows);
}
};
},
addChild: function(widget, /*Number?*/insertIndex){
this.inherited(arguments);
this.refresh();
},
hide: function(){
var p = this.getParent();
if(p && p.hide){
p.hide();
}
}
});
});
| kitsonk/expo | src/dojox/mobile/IconMenu.js | JavaScript | bsd-3-clause | 3,265 |
// -*-Mode: C++;-*-
// * BeginRiceCopyright *****************************************************
//
// $HeadURL$
// $Id$
//
// --------------------------------------------------------------------------
// Part of HPCToolkit (hpctoolkit.org)
//
// Information about sources of support for research and development of
// HPCToolkit is at 'hpctoolkit.org' and in 'README.Acknowledgments'.
// --------------------------------------------------------------------------
//
// Copyright ((c)) 2002-2015, Rice University
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
//
// * Neither the name of Rice University (RICE) nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// This software is provided by RICE and contributors "as is" and any
// express or implied warranties, including, but not limited to, the
// implied warranties of merchantability and fitness for a particular
// purpose are disclaimed. In no event shall RICE or contributors be
// liable for any direct, indirect, incidental, special, exemplary, or
// consequential damages (including, but not limited to, procurement of
// substitute goods or services; loss of use, data, or profits; or
// business interruption) however caused and on any theory of liability,
// whether in contract, strict liability, or tort (including negligence
// or otherwise) arising in any way out of the use of this software, even
// if advised of the possibility of such damage.
//
// ******************************************************* EndRiceCopyright *
//***************************************************************************
//
// File:
// $HeadURL$
//
// Purpose:
// [The purpose of this file]
//
// Description:
// [The set of functions, macros, etc. defined in the file]
//
//***************************************************************************
#ifndef LRULIST_H_
#define LRULIST_H_
#include <vector>
#include <list>
using std::list;
using std::vector;
namespace TraceviewerServer {
template <typename T>
class LRUList {
private:
int totalPages;
int usedPages;
int currentFront;
list<T*> useOrder;
list<T*> removed;
vector<typename list<T*>::iterator> iters;
public:
/**
* It's a special data structure that uses a little extra memory in exchange
* for all operations running in constant time. Objects are added to the
* data structure and given an index that identifies them.
* Once added, objects are not deleted; they are either on the "in use" list,
* which for VersatileMemoryPage corresponds to being mapped, or they are on
* the "not in use" list, which corresponds to not being mapped. An object
* must only be added with addNew() once. If it is removed from the list
* with remove(), it must be added with reAdd(). It is not necessary to call
* putOnTop() after adding an element as it will already be on top.
*/
LRUList(int expectedMaxSize)//Up to linear time
{
iters.reserve(expectedMaxSize);
totalPages = 0;
usedPages = 0;
currentFront = -1;
}
int addNew(T* toAdd)//constant time
{
useOrder.push_front(toAdd);
int index = totalPages++;
currentFront = index;
iters.push_back(useOrder.begin());
usedPages++;
return index;
}
int addNewUnused(T* toAdd)//constant time
{
removed.push_front(toAdd);
int index = totalPages++;
iters.push_back(removed.begin());
return index;
}
void putOnTop(int index)//Constant time
{
if (index == currentFront) return;
typename list<T*>::iterator it;
it = iters[index];
useOrder.splice(useOrder.begin(), useOrder, it);
currentFront = index;
}
T* getLast()
{
return useOrder.back();
}
void removeLast()//Constant time
{
removed.splice(removed.end(), useOrder, --useOrder.end());
usedPages--;
}
void reAdd(int index)//Constant time
{
typename list<T*>::iterator it = iters[index];
useOrder.splice(useOrder.begin(), removed, it);
currentFront = index;
usedPages++;
}
int getTotalPageCount()//Constant time
{
return totalPages;
}
int getUsedPageCount()
{
return usedPages;
}
virtual ~LRUList()
{
}
/*int dump()
{
int x = 0;
puts("Objects \"in use\" from most recently used to least recently used");
typename list<T*>::iterator it = useOrder.begin();
for(; it != useOrder.end(); ++it){
printf("%d\n", ((TestData*)*it)->index);
x++;
}
puts("Objects \"not in use\" from most recently used to least recently used");
it = removed.begin();
for(; it != removed.end(); ++it){
printf("%d\n", ((TestData*)*it)->index);
}
cout << "Used count: " << usedPages <<" supposed to be " << x<<endl;
return x;
}*/
};
} /* namespace TraceviewerServer */
#endif /* LRULIST_H_ */
| zcth428/hpctoolkit111 | src/tool/hpcserver/LRUList.hpp | C++ | bsd-3-clause | 5,239 |
// Test host codegen.
// RUN: %clang_cc1 -verify -fopenmp -fopenmp-version=45 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -o - | FileCheck %s
// RUN: %clang_cc1 -fopenmp -fopenmp-version=45 -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -o %t %s
// RUN: %clang_cc1 -fopenmp -fopenmp-version=45 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s
// RUN: %clang_cc1 -verify -fopenmp -fopenmp-version=45 -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm %s -o - | FileCheck %s
// RUN: %clang_cc1 -fopenmp -fopenmp-version=45 -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-pch -o %t %s
// RUN: %clang_cc1 -fopenmp -fopenmp-version=45 -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s
// RUN: %clang_cc1 -verify -fopenmp-simd -fopenmp-version=45 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -o - | FileCheck --check-prefix SIMD-ONLY0 %s
// RUN: %clang_cc1 -fopenmp-simd -fopenmp-version=45 -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -o %t %s
// RUN: %clang_cc1 -fopenmp-simd -fopenmp-version=45 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck --check-prefix SIMD-ONLY0 %s
// RUN: %clang_cc1 -verify -fopenmp-simd -fopenmp-version=45 -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm %s -o - | FileCheck --check-prefix SIMD-ONLY0 %s
// RUN: %clang_cc1 -fopenmp-simd -fopenmp-version=45 -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-pch -o %t %s
// RUN: %clang_cc1 -fopenmp-simd -fopenmp-version=45 -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck --check-prefix SIMD-ONLY0 %s
// SIMD-ONLY0-NOT: {{__kmpc|__tgt}}
// Test target teams distribute codegen - host bc file has to be created first.
// RUN: %clang_cc1 -verify -fopenmp -fopenmp-version=45 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm-bc %s -o %t-ppc-host.bc
// RUN: %clang_cc1 -verify -fopenmp -fopenmp-version=45 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -fopenmp-is-device -fopenmp-host-ir-file-path %t-ppc-host.bc -o - | FileCheck %s -check-prefix=TCHECK
// RUN: %clang_cc1 -fopenmp -fopenmp-version=45 -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -fopenmp-is-device -fopenmp-host-ir-file-path %t-ppc-host.bc -o %t %s
// RUN: %clang_cc1 -fopenmp -fopenmp-version=45 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -std=c++11 -fopenmp-is-device -fopenmp-host-ir-file-path %t-ppc-host.bc -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s -check-prefix=TCHECK
// RUN: %clang_cc1 -verify -fopenmp -fopenmp-version=45 -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm-bc %s -o %t-x86-host.bc
// RUN: %clang_cc1 -verify -fopenmp -fopenmp-version=45 -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm %s -fopenmp-is-device -fopenmp-host-ir-file-path %t-x86-host.bc -o - | FileCheck %s -check-prefix=TCHECK
// RUN: %clang_cc1 -fopenmp -fopenmp-version=45 -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-pch -fopenmp-is-device -fopenmp-host-ir-file-path %t-x86-host.bc -o %t %s
// RUN: %clang_cc1 -fopenmp -fopenmp-version=45 -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -std=c++11 -fopenmp-is-device -fopenmp-host-ir-file-path %t-x86-host.bc -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s -check-prefix=TCHECK
// RUN: %clang_cc1 -verify -fopenmp-simd -fopenmp-version=45 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm-bc %s -o %t-ppc-host.bc
// RUN: %clang_cc1 -verify -fopenmp-simd -fopenmp-version=45 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -fopenmp-is-device -fopenmp-host-ir-file-path %t-ppc-host.bc -o - | FileCheck --check-prefix SIMD-ONLY1 %s
// RUN: %clang_cc1 -fopenmp-simd -fopenmp-version=45 -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -fopenmp-is-device -fopenmp-host-ir-file-path %t-ppc-host.bc -o %t %s
// RUN: %clang_cc1 -fopenmp-simd -fopenmp-version=45 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -std=c++11 -fopenmp-is-device -fopenmp-host-ir-file-path %t-ppc-host.bc -include-pch %t -verify %s -emit-llvm -o - | FileCheck --check-prefix SIMD-ONLY1 %s
// RUN: %clang_cc1 -verify -fopenmp-simd -fopenmp-version=45 -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm-bc %s -o %t-x86-host.bc
// RUN: %clang_cc1 -verify -fopenmp-simd -fopenmp-version=45 -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm %s -fopenmp-is-device -fopenmp-host-ir-file-path %t-x86-host.bc -o - | FileCheck --check-prefix SIMD-ONLY1 %s
// RUN: %clang_cc1 -fopenmp-simd -fopenmp-version=45 -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-pch -fopenmp-is-device -fopenmp-host-ir-file-path %t-x86-host.bc -o %t %s
// RUN: %clang_cc1 -fopenmp-simd -fopenmp-version=45 -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -std=c++11 -fopenmp-is-device -fopenmp-host-ir-file-path %t-x86-host.bc -include-pch %t -verify %s -emit-llvm -o - | FileCheck --check-prefix SIMD-ONLY1 %s
// SIMD-ONLY1-NOT: {{__kmpc|__tgt}}
// Check that no target code is emitted if no omptests flag was provided.
// RUN: %clang_cc1 -verify -fopenmp -fopenmp-version=45 -x c++ -triple powerpc64le-unknown-unknown -emit-llvm %s -o - | FileCheck %s -check-prefix=CHECK-NTARGET
// RUN: %clang_cc1 -verify -fopenmp-simd -fopenmp-version=45 -x c++ -triple powerpc64le-unknown-unknown -emit-llvm %s -o - | FileCheck --check-prefix SIMD-ONLY2 %s
// SIMD-ONLY2-NOT: {{__kmpc|__tgt}}
// expected-no-diagnostics
#ifndef HEADER
#define HEADER
// CHECK-DAG: [[SA:%.+]] = type { [4 x i32] }
// CHECK-DAG: [[SB:%.+]] = type { [8 x i32] }
// CHECK-DAG: [[SC:%.+]] = type { [16 x i32] }
// CHECK-DAG: [[SD:%.+]] = type { [32 x i32] }
// CHECK-DAG: [[SE:%.+]] = type { [64 x i32] }
// CHECK-DAG: [[ST1:%.+]] = type { [228 x i32] }
// CHECK-DAG: [[ST2:%.+]] = type { [1128 x i32] }
// CHECK-DAG: [[ENTTY:%.+]] = type { i8*, i8*, i[[SZ:32|64]], i32, i32 }
// CHECK-DAG: [[DEVTY:%.+]] = type { i8*, i8*, [[ENTTY]]*, [[ENTTY]]* }
// CHECK-DAG: [[DSCTY:%.+]] = type { i32, [[DEVTY]]*, [[ENTTY]]*, [[ENTTY]]* }
// TCHECK: [[ENTTY:%.+]] = type { i8*, i8*, i[[SZ:32|64]], i32, i32 }
// CHECK-DAG: $[[REGFN:\.omp_offloading\..+]] = comdat
// CHECK-DAG: [[A1:@.+]] = internal global [[SA]]
// CHECK-DAG: [[A2:@.+]] = global [[SA]]
// CHECK-DAG: [[B1:@.+]] = global [[SB]]
// CHECK-DAG: [[B2:@.+]] = global [[SB]]
// CHECK-DAG: [[C1:@.+]] = internal global [[SC]]
// CHECK-DAG: [[D1:@.+]] = global [[SD]]
// CHECK-DAG: [[E1:@.+]] = global [[SE]]
// CHECK-DAG: [[T1:@.+]] = global [[ST1]]
// CHECK-DAG: [[T2:@.+]] = global [[ST2]]
// CHECK-NTARGET-DAG: [[SA:%.+]] = type { [4 x i32] }
// CHECK-NTARGET-DAG: [[SB:%.+]] = type { [8 x i32] }
// CHECK-NTARGET-DAG: [[SC:%.+]] = type { [16 x i32] }
// CHECK-NTARGET-DAG: [[SD:%.+]] = type { [32 x i32] }
// CHECK-NTARGET-DAG: [[SE:%.+]] = type { [64 x i32] }
// CHECK-NTARGET-DAG: [[ST1:%.+]] = type { [228 x i32] }
// CHECK-NTARGET-DAG: [[ST2:%.+]] = type { [1128 x i32] }
// CHECK-NTARGET-NOT: type { i8*, i8*, %
// CHECK-NTARGET-NOT: type { i32, %
// We have 7 target regions
// CHECK-DAG: {{@.+}} = weak constant i8 0
// TCHECK-NOT: {{@.+}} = weak constant i8 0
// CHECK-DAG: {{@.+}} = private unnamed_addr constant [1 x i[[SZ]]] [i[[SZ]] 4]
// CHECK-DAG: {{@.+}} = private unnamed_addr constant [1 x i64] [i64 800]
// CHECK-DAG: {{@.+}} = weak constant i8 0
// CHECK-DAG: {{@.+}} = private unnamed_addr constant [1 x i[[SZ]]] [i[[SZ]] 4]
// CHECK-DAG: {{@.+}} = private unnamed_addr constant [1 x i64] [i64 800]
// CHECK-DAG: {{@.+}} = weak constant i8 0
// CHECK-DAG: {{@.+}} = private unnamed_addr constant [1 x i[[SZ]]] [i[[SZ]] 4]
// CHECK-DAG: {{@.+}} = private unnamed_addr constant [1 x i64] [i64 800]
// CHECK-DAG: {{@.+}} = weak constant i8 0
// CHECK-DAG: {{@.+}} = private unnamed_addr constant [1 x i[[SZ]]] [i[[SZ]] 4]
// CHECK-DAG: {{@.+}} = private unnamed_addr constant [1 x i64] [i64 800]
// CHECK-DAG: {{@.+}} = weak constant i8 0
// CHECK-DAG: {{@.+}} = private unnamed_addr constant [1 x i[[SZ]]] [i[[SZ]] 4]
// CHECK-DAG: {{@.+}} = private unnamed_addr constant [1 x i64] [i64 800]
// CHECK-DAG: {{@.+}} = weak constant i8 0
// CHECK-DAG: {{@.+}} = private unnamed_addr constant [1 x i[[SZ]]] [i[[SZ]] 4]
// CHECK-DAG: {{@.+}} = private unnamed_addr constant [1 x i64] [i64 800]
// CHECK-DAG: {{@.+}} = weak constant i8 0
// CHECK-DAG: {{@.+}} = private unnamed_addr constant [1 x i[[SZ]]] [i[[SZ]] 4]
// CHECK-DAG: {{@.+}} = private unnamed_addr constant [1 x i64] [i64 800]
// CHECK-DAG: {{@.+}} = weak constant i8 0
// CHECK-DAG: {{@.+}} = private unnamed_addr constant [1 x i[[SZ]]] [i[[SZ]] 4]
// CHECK-DAG: {{@.+}} = private unnamed_addr constant [1 x i64] [i64 800]
// CHECK-DAG: {{@.+}} = weak constant i8 0
// CHECK-DAG: {{@.+}} = private unnamed_addr constant [1 x i[[SZ]]] [i[[SZ]] 4]
// CHECK-DAG: {{@.+}} = private unnamed_addr constant [1 x i64] [i64 800]
// CHECK-DAG: {{@.+}} = weak constant i8 0
// CHECK-DAG: {{@.+}} = private unnamed_addr constant [1 x i[[SZ]]] [i[[SZ]] 4]
// CHECK-DAG: {{@.+}} = private unnamed_addr constant [1 x i64] [i64 800]
// CHECK-DAG: {{@.+}} = weak constant i8 0
// CHECK-DAG: {{@.+}} = private unnamed_addr constant [1 x i[[SZ]]] [i[[SZ]] 4]
// CHECK-DAG: {{@.+}} = private unnamed_addr constant [1 x i64] [i64 800]
// CHECK-DAG: {{@.+}} = weak constant i8 0
// CHECK-DAG: {{@.+}} = private unnamed_addr constant [1 x i[[SZ]]] [i[[SZ]] 4]
// CHECK-DAG: {{@.+}} = private unnamed_addr constant [1 x i64] [i64 800]
// CHECK-NTARGET-NOT: weak constant i8 0
// CHECK-NTARGET-NOT: private unnamed_addr constant [1 x i
// CHECK-DAG: [[NAMEPTR1:@.+]] = internal unnamed_addr constant [{{.*}} x i8] c"[[NAME1:__omp_offloading_[0-9a-f]+_[0-9a-f]+__Z.+_l[0-9]+]]\00"
// CHECK-DAG: [[ENTRY1:@.+]] = weak constant [[ENTTY]] { i8* @{{.*}}, i8* getelementptr inbounds ([{{.*}} x i8], [{{.*}} x i8]* [[NAMEPTR1]], i32 0, i32 0), i[[SZ]] 0, i32 0, i32 0 }, section ".omp_offloading.entries", align 1
// CHECK-DAG: [[NAMEPTR2:@.+]] = internal unnamed_addr constant [{{.*}} x i8] c"[[NAME2:.+]]\00"
// CHECK-DAG: [[ENTRY2:@.+]] = weak constant [[ENTTY]] { i8* @{{.*}}, i8* getelementptr inbounds ([{{.*}} x i8], [{{.*}} x i8]* [[NAMEPTR2]], i32 0, i32 0), i[[SZ]] 0, i32 0, i32 0 }, section ".omp_offloading.entries", align 1
// CHECK-DAG: [[NAMEPTR3:@.+]] = internal unnamed_addr constant [{{.*}} x i8] c"[[NAME3:.+]]\00"
// CHECK-DAG: [[ENTRY3:@.+]] = weak constant [[ENTTY]] { i8* @{{.*}}, i8* getelementptr inbounds ([{{.*}} x i8], [{{.*}} x i8]* [[NAMEPTR3]], i32 0, i32 0), i[[SZ]] 0, i32 0, i32 0 }, section ".omp_offloading.entries", align 1
// CHECK-DAG: [[NAMEPTR4:@.+]] = internal unnamed_addr constant [{{.*}} x i8] c"[[NAME4:.+]]\00"
// CHECK-DAG: [[ENTRY4:@.+]] = weak constant [[ENTTY]] { i8* @{{.*}}, i8* getelementptr inbounds ([{{.*}} x i8], [{{.*}} x i8]* [[NAMEPTR4]], i32 0, i32 0), i[[SZ]] 0, i32 0, i32 0 }, section ".omp_offloading.entries", align 1
// CHECK-DAG: [[NAMEPTR5:@.+]] = internal unnamed_addr constant [{{.*}} x i8] c"[[NAME5:.+]]\00"
// CHECK-DAG: [[ENTRY5:@.+]] = weak constant [[ENTTY]] { i8* @{{.*}}, i8* getelementptr inbounds ([{{.*}} x i8], [{{.*}} x i8]* [[NAMEPTR5]], i32 0, i32 0), i[[SZ]] 0, i32 0, i32 0 }, section ".omp_offloading.entries", align 1
// CHECK-DAG: [[NAMEPTR6:@.+]] = internal unnamed_addr constant [{{.*}} x i8] c"[[NAME6:.+]]\00"
// CHECK-DAG: [[ENTRY6:@.+]] = weak constant [[ENTTY]] { i8* @{{.*}}, i8* getelementptr inbounds ([{{.*}} x i8], [{{.*}} x i8]* [[NAMEPTR6]], i32 0, i32 0), i[[SZ]] 0, i32 0, i32 0 }, section ".omp_offloading.entries", align 1
// CHECK-DAG: [[NAMEPTR7:@.+]] = internal unnamed_addr constant [{{.*}} x i8] c"[[NAME7:.+]]\00"
// CHECK-DAG: [[ENTRY7:@.+]] = weak constant [[ENTTY]] { i8* @{{.*}}, i8* getelementptr inbounds ([{{.*}} x i8], [{{.*}} x i8]* [[NAMEPTR7]], i32 0, i32 0), i[[SZ]] 0, i32 0, i32 0 }, section ".omp_offloading.entries", align 1
// CHECK-DAG: [[NAMEPTR8:@.+]] = internal unnamed_addr constant [{{.*}} x i8] c"[[NAME8:.+]]\00"
// CHECK-DAG: [[ENTRY8:@.+]] = weak constant [[ENTTY]] { i8* @{{.*}}, i8* getelementptr inbounds ([{{.*}} x i8], [{{.*}} x i8]* [[NAMEPTR8]], i32 0, i32 0), i[[SZ]] 0, i32 0, i32 0 }, section ".omp_offloading.entries", align 1
// CHECK-DAG: [[NAMEPTR9:@.+]] = internal unnamed_addr constant [{{.*}} x i8] c"[[NAME9:.+]]\00"
// CHECK-DAG: [[ENTRY9:@.+]] = weak constant [[ENTTY]] { i8* @{{.*}}, i8* getelementptr inbounds ([{{.*}} x i8], [{{.*}} x i8]* [[NAMEPTR9]], i32 0, i32 0), i[[SZ]] 0, i32 0, i32 0 }, section ".omp_offloading.entries", align 1
// CHECK-DAG: [[NAMEPTR10:@.+]] = internal unnamed_addr constant [{{.*}} x i8] c"[[NAME10:.+]]\00"
// CHECK-DAG: [[ENTRY10:@.+]] = weak constant [[ENTTY]] { i8* @{{.*}}, i8* getelementptr inbounds ([{{.*}} x i8], [{{.*}} x i8]* [[NAMEPTR10]], i32 0, i32 0), i[[SZ]] 0, i32 0, i32 0 }, section ".omp_offloading.entries", align 1
// CHECK-DAG: [[NAMEPTR11:@.+]] = internal unnamed_addr constant [{{.*}} x i8] c"[[NAME11:.+]]\00"
// CHECK-DAG: [[ENTRY11:@.+]] = weak constant [[ENTTY]] { i8* @{{.*}}, i8* getelementptr inbounds ([{{.*}} x i8], [{{.*}} x i8]* [[NAMEPTR11]], i32 0, i32 0), i[[SZ]] 0, i32 0, i32 0 }, section ".omp_offloading.entries", align 1
// CHECK-DAG: [[NAMEPTR12:@.+]] = internal unnamed_addr constant [{{.*}} x i8] c"[[NAME12:.+]]\00"
// CHECK-DAG: [[ENTRY12:@.+]] = weak constant [[ENTTY]] { i8* @{{.*}}, i8* getelementptr inbounds ([{{.*}} x i8], [{{.*}} x i8]* [[NAMEPTR12]], i32 0, i32 0), i[[SZ]] 0, i32 0, i32 0 }, section ".omp_offloading.entries", align 1
// TCHECK-DAG: [[NAMEPTR1:@.+]] = internal unnamed_addr constant [{{.*}} x i8] c"[[NAME1:__omp_offloading_[0-9a-f]+_[0-9a-f]+__Z.+_l[0-9]+]]\00"
// TCHECK-DAG: [[ENTRY1:@.+]] = weak constant [[ENTTY]] { i8* bitcast (void (i[[SZ]])* @{{.*}} to i8*), i8* getelementptr inbounds ([{{.*}} x i8], [{{.*}} x i8]* [[NAMEPTR1]], i32 0, i32 0), i[[SZ]] 0, i32 0, i32 0 }, section ".omp_offloading.entries", align 1
// TCHECK-DAG: [[NAMEPTR2:@.+]] = internal unnamed_addr constant [{{.*}} x i8] c"[[NAME2:.+]]\00"
// TCHECK-DAG: [[ENTRY2:@.+]] = weak constant [[ENTTY]] { i8* bitcast (void (i[[SZ]])* @{{.*}} to i8*), i8* getelementptr inbounds ([{{.*}} x i8], [{{.*}} x i8]* [[NAMEPTR2]], i32 0, i32 0), i[[SZ]] 0, i32 0, i32 0 }, section ".omp_offloading.entries", align 1
// TCHECK-DAG: [[NAMEPTR3:@.+]] = internal unnamed_addr constant [{{.*}} x i8] c"[[NAME3:.+]]\00"
// TCHECK-DAG: [[ENTRY3:@.+]] = weak constant [[ENTTY]] { i8* bitcast (void (i[[SZ]])* @{{.*}} to i8*), i8* getelementptr inbounds ([{{.*}} x i8], [{{.*}} x i8]* [[NAMEPTR3]], i32 0, i32 0), i[[SZ]] 0, i32 0, i32 0 }, section ".omp_offloading.entries", align 1
// TCHECK-DAG: [[NAMEPTR4:@.+]] = internal unnamed_addr constant [{{.*}} x i8] c"[[NAME4:.+]]\00"
// TCHECK-DAG: [[ENTRY4:@.+]] = weak constant [[ENTTY]] { i8* bitcast (void (i[[SZ]])* @{{.*}} to i8*), i8* getelementptr inbounds ([{{.*}} x i8], [{{.*}} x i8]* [[NAMEPTR4]], i32 0, i32 0), i[[SZ]] 0, i32 0, i32 0 }, section ".omp_offloading.entries", align 1
// TCHECK-DAG: [[NAMEPTR5:@.+]] = internal unnamed_addr constant [{{.*}} x i8] c"[[NAME5:.+]]\00"
// TCHECK-DAG: [[ENTRY5:@.+]] = weak constant [[ENTTY]] { i8* bitcast (void (i[[SZ]])* @{{.*}} to i8*), i8* getelementptr inbounds ([{{.*}} x i8], [{{.*}} x i8]* [[NAMEPTR5]], i32 0, i32 0), i[[SZ]] 0, i32 0, i32 0 }, section ".omp_offloading.entries", align 1
// TCHECK-DAG: [[NAMEPTR6:@.+]] = internal unnamed_addr constant [{{.*}} x i8] c"[[NAME6:.+]]\00"
// TCHECK-DAG: [[ENTRY6:@.+]] = weak constant [[ENTTY]] { i8* bitcast (void (i[[SZ]])* @{{.*}} to i8*), i8* getelementptr inbounds ([{{.*}} x i8], [{{.*}} x i8]* [[NAMEPTR6]], i32 0, i32 0), i[[SZ]] 0, i32 0, i32 0 }, section ".omp_offloading.entries", align 1
// TCHECK-DAG: [[NAMEPTR7:@.+]] = internal unnamed_addr constant [{{.*}} x i8] c"[[NAME7:.+]]\00"
// TCHECK-DAG: [[ENTRY7:@.+]] = weak constant [[ENTTY]] { i8* bitcast (void (i[[SZ]])* @{{.*}} to i8*), i8* getelementptr inbounds ([{{.*}} x i8], [{{.*}} x i8]* [[NAMEPTR7]], i32 0, i32 0), i[[SZ]] 0, i32 0, i32 0 }, section ".omp_offloading.entries", align 1
// TCHECK-DAG: [[NAMEPTR8:@.+]] = internal unnamed_addr constant [{{.*}} x i8] c"[[NAME8:.+]]\00"
// TCHECK-DAG: [[ENTRY8:@.+]] = weak constant [[ENTTY]] { i8* bitcast (void (i[[SZ]])* @{{.*}} to i8*), i8* getelementptr inbounds ([{{.*}} x i8], [{{.*}} x i8]* [[NAMEPTR8]], i32 0, i32 0), i[[SZ]] 0, i32 0, i32 0 }, section ".omp_offloading.entries", align 1
// TCHECK-DAG: [[NAMEPTR9:@.+]] = internal unnamed_addr constant [{{.*}} x i8] c"[[NAME9:.+]]\00"
// TCHECK-DAG: [[ENTRY9:@.+]] = weak constant [[ENTTY]] { i8* bitcast (void (i[[SZ]])* @{{.*}} to i8*), i8* getelementptr inbounds ([{{.*}} x i8], [{{.*}} x i8]* [[NAMEPTR9]], i32 0, i32 0), i[[SZ]] 0, i32 0, i32 0 }, section ".omp_offloading.entries", align 1
// TCHECK-DAG: [[NAMEPTR10:@.+]] = internal unnamed_addr constant [{{.*}} x i8] c"[[NAME10:.+]]\00"
// TCHECK-DAG: [[ENTRY10:@.+]] = weak constant [[ENTTY]] { i8* bitcast (void (i[[SZ]])* @{{.*}} to i8*), i8* getelementptr inbounds ([{{.*}} x i8], [{{.*}} x i8]* [[NAMEPTR10]], i32 0, i32 0), i[[SZ]] 0, i32 0, i32 0 }, section ".omp_offloading.entries", align 1
// TCHECK-DAG: [[NAMEPTR11:@.+]] = internal unnamed_addr constant [{{.*}} x i8] c"[[NAME11:.+]]\00"
// TCHECK-DAG: [[ENTRY11:@.+]] = weak constant [[ENTTY]] { i8* bitcast (void (i[[SZ]])* @{{.*}} to i8*), i8* getelementptr inbounds ([{{.*}} x i8], [{{.*}} x i8]* [[NAMEPTR11]], i32 0, i32 0), i[[SZ]] 0, i32 0, i32 0 }, section ".omp_offloading.entries", align 1
// TCHECK-DAG: [[NAMEPTR12:@.+]] = internal unnamed_addr constant [{{.*}} x i8] c"[[NAME12:.+]]\00"
// TCHECK-DAG: [[ENTRY12:@.+]] = weak constant [[ENTTY]] { i8* bitcast (void (i[[SZ]])* @{{.*}} to i8*), i8* getelementptr inbounds ([{{.*}} x i8], [{{.*}} x i8]* [[NAMEPTR12]], i32 0, i32 0), i[[SZ]] 0, i32 0, i32 0 }, section ".omp_offloading.entries", align 1
// CHECK: [[ENTBEGIN:@.+]] = external constant [[ENTTY]]
// CHECK: [[ENTEND:@.+]] = external constant [[ENTTY]]
// CHECK: [[DEVBEGIN:@.+]] = extern_weak constant i8
// CHECK: [[DEVEND:@.+]] = extern_weak constant i8
// CHECK: [[IMAGES:@.+]] = internal unnamed_addr constant [1 x [[DEVTY]]] [{{.+}} { i8* [[DEVBEGIN]], i8* [[DEVEND]], [[ENTTY]]* [[ENTBEGIN]], [[ENTTY]]* [[ENTEND]] }], comdat($[[REGFN]])
// CHECK: [[DESC:@.+]] = internal constant [[DSCTY]] { i32 1, [[DEVTY]]* getelementptr inbounds ([1 x [[DEVTY]]], [1 x [[DEVTY]]]* [[IMAGES]], i32 0, i32 0), [[ENTTY]]* [[ENTBEGIN]], [[ENTTY]]* [[ENTEND]] }, comdat($[[REGFN]])
// We have 4 initializers, one for the 500 priority, another one for 501, or more for the default priority, and the last one for the offloading registration function.
// CHECK: @llvm.global_ctors = appending global [4 x { i32, void ()*, i8* }] [
// CHECK-SAME: { i32, void ()*, i8* } { i32 500, void ()* [[P500:@[^,]+]], i8* null },
// CHECK-SAME: { i32, void ()*, i8* } { i32 501, void ()* [[P501:@[^,]+]], i8* null },
// CHECK-SAME: { i32, void ()*, i8* } { i32 65535, void ()* [[PMAX:@[^,]+]], i8* null },
// CHECK-SAME: { i32, void ()*, i8* } { i32 0, void ()* @[[REGFN]], i8* bitcast (void ()* @[[REGFN]] to i8*) }]
// CHECK-NTARGET: @llvm.global_ctors = appending global [3 x { i32, void ()*, i8* }] [
extern int *R;
struct SA {
int arr[4];
void foo() {
int a = *R;
a += 1;
*R = a;
}
SA() {
int a = *R;
a += 2;
*R = a;
}
~SA() {
int a = *R;
a += 3;
*R = a;
}
};
struct SB {
int arr[8];
void foo() {
int a = *R;
#pragma omp target teams distribute
for (int i = 0; i < 10; ++i)
a += 4;
*R = a;
}
SB() {
int a = *R;
a += 5;
*R = a;
}
~SB() {
int a = *R;
a += 6;
*R = a;
}
};
struct SC {
int arr[16];
void foo() {
int a = *R;
a += 7;
*R = a;
}
SC() {
int a = *R;
#pragma omp target teams distribute
for (int i = 0; i < 10; ++i)
a += 8;
*R = a;
}
~SC() {
int a = *R;
a += 9;
*R = a;
}
};
struct SD {
int arr[32];
void foo() {
int a = *R;
a += 10;
*R = a;
}
SD() {
int a = *R;
a += 11;
*R = a;
}
~SD() {
int a = *R;
#pragma omp target teams distribute
for (int i = 0; i < 10; ++i)
a += 12;
*R = a;
}
};
struct SE {
int arr[64];
void foo() {
int a = *R;
#pragma omp target teams distribute if(target: 0)
for (int i = 0; i < 10; ++i)
a += 13;
*R = a;
}
SE() {
int a = *R;
#pragma omp target teams distribute
for (int i = 0; i < 10; ++i)
a += 14;
*R = a;
}
~SE() {
int a = *R;
#pragma omp target teams distribute
for (int i = 0; i < 10; ++i)
a += 15;
*R = a;
}
};
template <int x>
struct ST {
int arr[128 + x];
void foo() {
int a = *R;
#pragma omp target teams distribute
for (int i = 0; i < 10; ++i)
a += 16 + x;
*R = a;
}
ST() {
int a = *R;
#pragma omp target teams distribute
for (int i = 0; i < 10; ++i)
a += 17 + x;
*R = a;
}
~ST() {
int a = *R;
#pragma omp target teams distribute
for (int i = 0; i < 10; ++i)
a += 18 + x;
*R = a;
}
};
// We have to make sure we us all the target regions:
//CHECK-DAG: define internal void @[[NAME1]](
//CHECK-DAG: call void @[[NAME1]](
//CHECK-DAG: define internal void @[[NAME2]](
//CHECK-DAG: call void @[[NAME2]](
//CHECK-DAG: define internal void @[[NAME3]](
//CHECK-DAG: call void @[[NAME3]](
//CHECK-DAG: define internal void @[[NAME4]](
//CHECK-DAG: call void @[[NAME4]](
//CHECK-DAG: define internal void @[[NAME5]](
//CHECK-DAG: call void @[[NAME5]](
//CHECK-DAG: define internal void @[[NAME6]](
//CHECK-DAG: call void @[[NAME6]](
//CHECK-DAG: define internal void @[[NAME7]](
//CHECK-DAG: call void @[[NAME7]](
//CHECK-DAG: define internal void @[[NAME8]](
//CHECK-DAG: call void @[[NAME8]](
//CHECK-DAG: define internal void @[[NAME9]](
//CHECK-DAG: call void @[[NAME9]](
//CHECK-DAG: define internal void @[[NAME10]](
//CHECK-DAG: call void @[[NAME10]](
//CHECK-DAG: define internal void @[[NAME11]](
//CHECK-DAG: call void @[[NAME11]](
//CHECK-DAG: define internal void @[[NAME12]](
//CHECK-DAG: call void @[[NAME12]](
//TCHECK-DAG: define weak void @[[NAME1]](
//TCHECK-DAG: define weak void @[[NAME2]](
//TCHECK-DAG: define weak void @[[NAME3]](
//TCHECK-DAG: define weak void @[[NAME4]](
//TCHECK-DAG: define weak void @[[NAME5]](
//TCHECK-DAG: define weak void @[[NAME6]](
//TCHECK-DAG: define weak void @[[NAME7]](
//TCHECK-DAG: define weak void @[[NAME8]](
//TCHECK-DAG: define weak void @[[NAME9]](
//TCHECK-DAG: define weak void @[[NAME10]](
//TCHECK-DAG: define weak void @[[NAME11]](
//TCHECK-DAG: define weak void @[[NAME12]](
// CHECK-NTARGET-NOT: __tgt_target
// CHECK-NTARGET-NOT: __tgt_register_lib
// CHECK-NTARGET-NOT: __tgt_unregister_lib
// TCHECK-NOT: __tgt_target
// TCHECK-NOT: __tgt_register_lib
// TCHECK-NOT: __tgt_unregister_lib
// We have 2 initializers with priority 500
//CHECK: define internal void [[P500]](
//CHECK: call void @{{.+}}()
//CHECK: call void @{{.+}}()
//CHECK-NOT: call void @{{.+}}()
//CHECK: ret void
// We have 1 initializers with priority 501
//CHECK: define internal void [[P501]](
//CHECK: call void @{{.+}}()
//CHECK-NOT: call void @{{.+}}()
//CHECK: ret void
// We have 6 initializers with default priority
//CHECK: define internal void [[PMAX]](
//CHECK: call void @{{.+}}()
//CHECK: call void @{{.+}}()
//CHECK: call void @{{.+}}()
//CHECK: call void @{{.+}}()
//CHECK: call void @{{.+}}()
//CHECK: call void @{{.+}}()
//CHECK-NOT: call void @{{.+}}()
//CHECK: ret void
// Check registration and unregistration
//CHECK: define internal void @[[UNREGFN:.+]](i8*)
//CHECK-SAME: comdat($[[REGFN]]) {
//CHECK: call i32 @__tgt_unregister_lib([[DSCTY]]* [[DESC]])
//CHECK: ret void
//CHECK: declare i32 @__tgt_unregister_lib([[DSCTY]]*)
//CHECK: define linkonce hidden void @[[REGFN]]()
//CHECK-SAME: comdat {
//CHECK: call i32 @__tgt_register_lib([[DSCTY]]* [[DESC]])
//CHECK: call i32 @__cxa_atexit(void (i8*)* @[[UNREGFN]], i8* bitcast ([[DSCTY]]* [[DESC]] to i8*),
//CHECK: ret void
//CHECK: declare i32 @__tgt_register_lib([[DSCTY]]*)
static __attribute__((init_priority(500))) SA a1;
SA a2;
SB __attribute__((init_priority(500))) b1;
SB __attribute__((init_priority(501))) b2;
static SC c1;
SD d1;
SE e1;
ST<100> t1;
ST<1000> t2;
int bar(int a){
int r = a;
a1.foo();
a2.foo();
b1.foo();
b2.foo();
c1.foo();
d1.foo();
e1.foo();
t1.foo();
t2.foo();
#pragma omp target teams distribute
for (int i = 0; i < 10; ++i)
++r;
return r + *R;
}
// Check metadata is properly generated:
// CHECK: !omp_offload.info = !{!{{[0-9]+}}, !{{[0-9]+}}, !{{[0-9]+}}, !{{[0-9]+}}, !{{[0-9]+}}, !{{[0-9]+}}, !{{[0-9]+}}, !{{[0-9]+}}, !{{[0-9]+}}, !{{[0-9]+}}, !{{[0-9]+}}, !{{[0-9]+}}}
// CHECK-DAG: = !{i32 0, i32 [[DEVID:-?[0-9]+]], i32 [[FILEID:-?[0-9]+]], !"_ZN2SB3fooEv", i32 216, i32 {{[0-9]+}}}
// CHECK-DAG: = !{i32 0, i32 [[DEVID]], i32 [[FILEID]], !"_ZN2SDD1Ev", i32 268, i32 {{[0-9]+}}}
// CHECK-DAG: = !{i32 0, i32 [[DEVID]], i32 [[FILEID]], !"_ZN2SEC1Ev", i32 286, i32 {{[0-9]+}}}
// CHECK-DAG: = !{i32 0, i32 [[DEVID]], i32 [[FILEID]], !"_ZN2SED1Ev", i32 293, i32 {{[0-9]+}}}
// CHECK-DAG: = !{i32 0, i32 [[DEVID]], i32 [[FILEID]], !"_ZN2STILi1000EE3fooEv", i32 305, i32 {{[0-9]+}}}
// CHECK-DAG: = !{i32 0, i32 [[DEVID]], i32 [[FILEID]], !"_ZN2STILi100EEC1Ev", i32 312, i32 {{[0-9]+}}}
// CHECK-DAG: = !{i32 0, i32 [[DEVID]], i32 [[FILEID]], !"_Z3bari", i32 436, i32 {{[0-9]+}}}
// CHECK-DAG: = !{i32 0, i32 [[DEVID]], i32 [[FILEID]], !"_ZN2STILi100EED1Ev", i32 319, i32 {{[0-9]+}}}
// CHECK-DAG: = !{i32 0, i32 [[DEVID]], i32 [[FILEID]], !"_ZN2STILi1000EEC1Ev", i32 312, i32 {{[0-9]+}}}
// CHECK-DAG: = !{i32 0, i32 [[DEVID]], i32 [[FILEID]], !"_ZN2STILi1000EED1Ev", i32 319, i32 {{[0-9]+}}}
// CHECK-DAG: = !{i32 0, i32 [[DEVID]], i32 [[FILEID]], !"_ZN2STILi100EE3fooEv", i32 305, i32 {{[0-9]+}}}
// CHECK-DAG: = !{i32 0, i32 [[DEVID]], i32 [[FILEID]], !"_ZN2SCC1Ev", i32 242, i32 {{[0-9]+}}}
// TCHECK: !omp_offload.info = !{!{{[0-9]+}}, !{{[0-9]+}}, !{{[0-9]+}}, !{{[0-9]+}}, !{{[0-9]+}}, !{{[0-9]+}}, !{{[0-9]+}}, !{{[0-9]+}}, !{{[0-9]+}}, !{{[0-9]+}}, !{{[0-9]+}}, !{{[0-9]+}}}
// TCHECK-DAG: = !{i32 0, i32 [[DEVID:-?[0-9]+]], i32 [[FILEID:-?[0-9]+]], !"_ZN2SB3fooEv", i32 216, i32 {{[0-9]+}}}
// TCHECK-DAG: = !{i32 0, i32 [[DEVID]], i32 [[FILEID]], !"_ZN2SDD1Ev", i32 268, i32 {{[0-9]+}}}
// TCHECK-DAG: = !{i32 0, i32 [[DEVID]], i32 [[FILEID]], !"_ZN2SEC1Ev", i32 286, i32 {{[0-9]+}}}
// TCHECK-DAG: = !{i32 0, i32 [[DEVID]], i32 [[FILEID]], !"_ZN2SED1Ev", i32 293, i32 {{[0-9]+}}}
// TCHECK-DAG: = !{i32 0, i32 [[DEVID]], i32 [[FILEID]], !"_ZN2STILi1000EE3fooEv", i32 305, i32 {{[0-9]+}}}
// TCHECK-DAG: = !{i32 0, i32 [[DEVID]], i32 [[FILEID]], !"_ZN2STILi100EEC1Ev", i32 312, i32 {{[0-9]+}}}
// TCHECK-DAG: = !{i32 0, i32 [[DEVID]], i32 [[FILEID]], !"_Z3bari", i32 436, i32 {{[0-9]+}}}
// TCHECK-DAG: = !{i32 0, i32 [[DEVID]], i32 [[FILEID]], !"_ZN2STILi100EED1Ev", i32 319, i32 {{[0-9]+}}}
// TCHECK-DAG: = !{i32 0, i32 [[DEVID]], i32 [[FILEID]], !"_ZN2STILi1000EEC1Ev", i32 312, i32 {{[0-9]+}}}
// TCHECK-DAG: = !{i32 0, i32 [[DEVID]], i32 [[FILEID]], !"_ZN2STILi1000EED1Ev", i32 319, i32 {{[0-9]+}}}
// TCHECK-DAG: = !{i32 0, i32 [[DEVID]], i32 [[FILEID]], !"_ZN2STILi100EE3fooEv", i32 305, i32 {{[0-9]+}}}
// TCHECK-DAG: = !{i32 0, i32 [[DEVID]], i32 [[FILEID]], !"_ZN2SCC1Ev", i32 242, i32 {{[0-9]+}}}
#endif
| youtube/cobalt | third_party/llvm-project/clang/test/OpenMP/target_teams_distribute_codegen_registration.cpp | C++ | bsd-3-clause | 28,706 |
// Copyright 2015 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "ui/platform_window/stub/stub_window.h"
#include "base/memory/scoped_refptr.h"
#include "base/notreached.h"
#include "ui/base/cursor/platform_cursor.h"
#include "ui/platform_window/platform_window_delegate.h"
namespace ui {
StubWindow::StubWindow(PlatformWindowDelegate* delegate,
bool use_default_accelerated_widget,
const gfx::Rect& bounds)
: delegate_(delegate), bounds_(bounds) {
DCHECK(delegate);
if (use_default_accelerated_widget)
delegate_->OnAcceleratedWidgetAvailable(gfx::kNullAcceleratedWidget);
}
StubWindow::~StubWindow() {}
void StubWindow::Show(bool inactive) {}
void StubWindow::Hide() {}
void StubWindow::Close() {
delegate_->OnClosed();
}
bool StubWindow::IsVisible() const {
NOTIMPLEMENTED_LOG_ONCE();
return true;
}
void StubWindow::PrepareForShutdown() {}
void StubWindow::SetBounds(const gfx::Rect& bounds) {
// Even if the pixel bounds didn't change this call to the delegate should
// still happen. The device scale factor may have changed which effectively
// changes the bounds.
bounds_ = bounds;
delegate_->OnBoundsChanged(bounds);
}
gfx::Rect StubWindow::GetBounds() const {
return bounds_;
}
void StubWindow::SetTitle(const std::u16string& title) {}
void StubWindow::SetCapture() {}
void StubWindow::ReleaseCapture() {}
bool StubWindow::HasCapture() const {
return false;
}
void StubWindow::ToggleFullscreen() {}
void StubWindow::Maximize() {}
void StubWindow::Minimize() {}
void StubWindow::Restore() {}
PlatformWindowState StubWindow::GetPlatformWindowState() const {
return PlatformWindowState::kUnknown;
}
void StubWindow::Activate() {
NOTIMPLEMENTED_LOG_ONCE();
}
void StubWindow::Deactivate() {
NOTIMPLEMENTED_LOG_ONCE();
}
void StubWindow::SetUseNativeFrame(bool use_native_frame) {}
bool StubWindow::ShouldUseNativeFrame() const {
NOTIMPLEMENTED_LOG_ONCE();
return false;
}
void StubWindow::SetCursor(scoped_refptr<PlatformCursor> cursor) {}
void StubWindow::MoveCursorTo(const gfx::Point& location) {}
void StubWindow::ConfineCursorToBounds(const gfx::Rect& bounds) {}
void StubWindow::SetRestoredBoundsInPixels(const gfx::Rect& bounds) {}
gfx::Rect StubWindow::GetRestoredBoundsInPixels() const {
return gfx::Rect();
}
void StubWindow::SetWindowIcons(const gfx::ImageSkia& window_icon,
const gfx::ImageSkia& app_icon) {}
void StubWindow::SizeConstraintsChanged() {}
} // namespace ui
| nwjs/chromium.src | ui/platform_window/stub/stub_window.cc | C++ | bsd-3-clause | 2,654 |
/*
* Copyright (c) 2015, University of Oslo
*
* All rights reserved.
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* Neither the name of the HISP project nor the names of its contributors may
* be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
* ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
package org.hisp.dhis.android.sdk.persistence.models;
import com.fasterxml.jackson.annotation.JsonAnySetter;
import com.fasterxml.jackson.annotation.JsonProperty;
import com.raizlabs.android.dbflow.annotation.Column;
import com.raizlabs.android.dbflow.annotation.PrimaryKey;
import com.raizlabs.android.dbflow.annotation.Table;
import com.raizlabs.android.dbflow.structure.BaseModel;
import org.hisp.dhis.android.sdk.persistence.Dhis2Database;
/**
* @author Simen Skogly Russnes on 24.02.15.
*/
@Table(databaseName = Dhis2Database.NAME)
public class ImportCount extends BaseModel{
@Column
@PrimaryKey(autoincrement = true)
protected int id;
@JsonProperty("imported")
@Column
private int imported;
@JsonProperty("updated")
@Column
private int updated;
@JsonProperty("ignored")
@Column
private int ignored;
@JsonProperty("deleted")
@Column
private int deleted;
@JsonAnySetter
public void handleUnknown(String key, Object value) {
// do something: put to a Map; log a warning, whatever
}
public int getId() {
return id;
}
public int getImported() {
return imported;
}
public int getUpdated() {
return updated;
}
public int getIgnored() {
return ignored;
}
public int getDeleted() {
return deleted;
}
public void setDeleted(int deleted) {
this.deleted = deleted;
}
public void setIgnored(int ignored) {
this.ignored = ignored;
}
public void setUpdated(int updated) {
this.updated = updated;
}
public void setImported(int imported) {
this.imported = imported;
}
public void setId(int id) {
this.id = id;
}
}
| arthurgwatidzo/dhis2-android-sdk | app/src/main/java/org/hisp/dhis/android/sdk/persistence/models/ImportCount.java | Java | bsd-3-clause | 3,317 |
// Protocol Buffers - Google's data interchange format
// Copyright 2008 Google Inc. All rights reserved.
// https://developers.google.com/protocol-buffers/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package com.google.protobuf;
import static com.google.protobuf.Internal.checkNotNull;
import com.google.protobuf.Internal.LongList;
import java.util.Arrays;
import java.util.Collection;
import java.util.RandomAccess;
/**
* An implementation of {@link LongList} on top of a primitive array.
*
* @author dweis@google.com (Daniel Weis)
*/
final class LongArrayList extends AbstractProtobufList<Long>
implements LongList, RandomAccess, PrimitiveNonBoxingCollection {
private static final LongArrayList EMPTY_LIST = new LongArrayList(new long[0], 0);
static {
EMPTY_LIST.makeImmutable();
}
public static LongArrayList emptyList() {
return EMPTY_LIST;
}
/** The backing store for the list. */
private long[] array;
/**
* The size of the list distinct from the length of the array. That is, it is the number of
* elements set in the list.
*/
private int size;
/** Constructs a new mutable {@code LongArrayList} with default capacity. */
LongArrayList() {
this(new long[DEFAULT_CAPACITY], 0);
}
/**
* Constructs a new mutable {@code LongArrayList} containing the same elements as {@code other}.
*/
private LongArrayList(long[] other, int size) {
array = other;
this.size = size;
}
@Override
protected void removeRange(int fromIndex, int toIndex) {
ensureIsMutable();
if (toIndex < fromIndex) {
throw new IndexOutOfBoundsException("toIndex < fromIndex");
}
System.arraycopy(array, toIndex, array, fromIndex, size - toIndex);
size -= (toIndex - fromIndex);
modCount++;
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (!(o instanceof LongArrayList)) {
return super.equals(o);
}
LongArrayList other = (LongArrayList) o;
if (size != other.size) {
return false;
}
final long[] arr = other.array;
for (int i = 0; i < size; i++) {
if (array[i] != arr[i]) {
return false;
}
}
return true;
}
@Override
public int hashCode() {
int result = 1;
for (int i = 0; i < size; i++) {
result = (31 * result) + Internal.hashLong(array[i]);
}
return result;
}
@Override
public LongList mutableCopyWithCapacity(int capacity) {
if (capacity < size) {
throw new IllegalArgumentException();
}
return new LongArrayList(Arrays.copyOf(array, capacity), size);
}
@Override
public Long get(int index) {
return getLong(index);
}
@Override
public long getLong(int index) {
ensureIndexInRange(index);
return array[index];
}
@Override
public int size() {
return size;
}
@Override
public Long set(int index, Long element) {
return setLong(index, element);
}
@Override
public long setLong(int index, long element) {
ensureIsMutable();
ensureIndexInRange(index);
long previousValue = array[index];
array[index] = element;
return previousValue;
}
@Override
public void add(int index, Long element) {
addLong(index, element);
}
/** Like {@link #add(Long)} but more efficient in that it doesn't box the element. */
@Override
public void addLong(long element) {
addLong(size, element);
}
/** Like {@link #add(int, Long)} but more efficient in that it doesn't box the element. */
private void addLong(int index, long element) {
ensureIsMutable();
if (index < 0 || index > size) {
throw new IndexOutOfBoundsException(makeOutOfBoundsExceptionMessage(index));
}
if (size < array.length) {
// Shift everything over to make room
System.arraycopy(array, index, array, index + 1, size - index);
} else {
// Resize to 1.5x the size
int length = ((size * 3) / 2) + 1;
long[] newArray = new long[length];
// Copy the first part directly
System.arraycopy(array, 0, newArray, 0, index);
// Copy the rest shifted over by one to make room
System.arraycopy(array, index, newArray, index + 1, size - index);
array = newArray;
}
array[index] = element;
size++;
modCount++;
}
@Override
public boolean addAll(Collection<? extends Long> collection) {
ensureIsMutable();
checkNotNull(collection);
// We specialize when adding another LongArrayList to avoid boxing elements.
if (!(collection instanceof LongArrayList)) {
return super.addAll(collection);
}
LongArrayList list = (LongArrayList) collection;
if (list.size == 0) {
return false;
}
int overflow = Integer.MAX_VALUE - size;
if (overflow < list.size) {
// We can't actually represent a list this large.
throw new OutOfMemoryError();
}
int newSize = size + list.size;
if (newSize > array.length) {
array = Arrays.copyOf(array, newSize);
}
System.arraycopy(list.array, 0, array, size, list.size);
size = newSize;
modCount++;
return true;
}
@Override
public boolean remove(Object o) {
ensureIsMutable();
for (int i = 0; i < size; i++) {
if (o.equals(array[i])) {
System.arraycopy(array, i + 1, array, i, size - i - 1);
size--;
modCount++;
return true;
}
}
return false;
}
@Override
public Long remove(int index) {
ensureIsMutable();
ensureIndexInRange(index);
long value = array[index];
if (index < size - 1) {
System.arraycopy(array, index + 1, array, index, size - index - 1);
}
size--;
modCount++;
return value;
}
/**
* Ensures that the provided {@code index} is within the range of {@code [0, size]}. Throws an
* {@link IndexOutOfBoundsException} if it is not.
*
* @param index the index to verify is in range
*/
private void ensureIndexInRange(int index) {
if (index < 0 || index >= size) {
throw new IndexOutOfBoundsException(makeOutOfBoundsExceptionMessage(index));
}
}
private String makeOutOfBoundsExceptionMessage(int index) {
return "Index:" + index + ", Size:" + size;
}
}
| endlessm/chromium-browser | third_party/protobuf/java/core/src/main/java/com/google/protobuf/LongArrayList.java | Java | bsd-3-clause | 7,711 |
// Copyright (c) 2011 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "content/common/gpu/media/mft_angle_video_device.h"
#include <d3d9.h>
#include "media/base/video_frame.h"
#include "third_party/angle/src/libGLESv2/main.h"
MftAngleVideoDevice::MftAngleVideoDevice()
: device_(reinterpret_cast<egl::Display*>(
eglGetCurrentDisplay())->getDevice()) {
}
void* MftAngleVideoDevice::GetDevice() {
return device_;
}
bool MftAngleVideoDevice::CreateVideoFrameFromGlTextures(
size_t width, size_t height, media::VideoFrame::Format format,
const std::vector<media::VideoFrame::GlTexture>& textures,
scoped_refptr<media::VideoFrame>* frame) {
media::VideoFrame::GlTexture texture_array[media::VideoFrame::kMaxPlanes];
memset(texture_array, 0, sizeof(texture_array));
for (size_t i = 0; i < textures.size(); ++i) {
texture_array[i] = textures[i];
}
media::VideoFrame::CreateFrameGlTexture(format,
width,
height,
texture_array,
frame);
return *frame != NULL;
}
void MftAngleVideoDevice::ReleaseVideoFrame(
const scoped_refptr<media::VideoFrame>& frame) {
// We didn't need to anything here because we didn't allocate any resources
// for the VideoFrame(s) generated.
}
bool MftAngleVideoDevice::ConvertToVideoFrame(
void* buffer, scoped_refptr<media::VideoFrame> frame) {
gl::Context* context = (gl::Context*)eglGetCurrentContext();
// TODO(hclam): Connect ANGLE to upload the surface to texture when changes
// to ANGLE is done.
return true;
}
| Crystalnix/house-of-life-chromium | content/common/gpu/media/mft_angle_video_device.cc | C++ | bsd-3-clause | 1,786 |
/*
* File: AbstractStatefulEvaluatorTest.java
* Authors: Kevin R. Dixon
* Company: Sandia National Laboratories
* Project: Cognitive Foundry
*
* Copyright Jul 7, 2009, Sandia Corporation.
* Under the terms of Contract DE-AC04-94AL85000, there is a non-exclusive
* license for use of this work by or on behalf of the U.S. Government.
* Export of this program may require a license from the United States
* Government. See CopyrightHistory.txt for complete details.
*
*/
package gov.sandia.cognition.evaluator;
import gov.sandia.cognition.math.matrix.Vector;
import gov.sandia.cognition.math.matrix.mtj.Vector3;
import gov.sandia.cognition.util.ObjectUtil;
import junit.framework.TestCase;
import java.util.Random;
/**
* Unit tests for AbstractStatefulEvaluatorTest.
*
* @author krdixon
*/
public class AbstractStatefulEvaluatorTest
extends TestCase
{
/**
* Random number generator to use for a fixed random seed.
*/
public final Random RANDOM = new Random( 1 );
/**
* Default tolerance of the regression tests, {@value}.
*/
public final double TOLERANCE = 1e-5;
/**
* Tests for class AbstractStatefulEvaluatorTest.
* @param testName Name of the test.
*/
public AbstractStatefulEvaluatorTest(
String testName)
{
super(testName);
}
public static class StatefulFunction
extends AbstractStatefulEvaluator<Vector,Vector,Vector>
{
public StatefulFunction()
{
super();
}
public StatefulFunction(
Vector state )
{
super( state );
}
public Vector createDefaultState()
{
return new Vector3(1.0,2.0,3.0);
}
public Vector evaluate(
Vector input)
{
this.getState().plusEquals(input);
return this.getState();
}
}
public Vector createRandomInput()
{
return Vector3.createRandom(RANDOM);
}
public StatefulFunction createInstance()
{
return new StatefulFunction(this.createRandomInput());
}
/**
* Tests the constructors of class AbstractStatefulEvaluatorTest.
*/
public void testConstructors()
{
System.out.println( "Constructors" );
StatefulFunction f = new StatefulFunction();
assertEquals( f.createDefaultState(), f.getState() );
Vector x = this.createRandomInput();
f = new StatefulFunction( x );
assertSame( x, f.getState() );
}
/**
* Test of clone method, of class AbstractStatefulEvaluator.
*/
public void testClone()
{
System.out.println("clone");
StatefulFunction f = this.createInstance();
StatefulFunction clone = (StatefulFunction) f.clone();
System.out.println( "f:\n" + ObjectUtil.toString(f) );
System.out.println( "clone:\n" + ObjectUtil.toString(clone) );
assertNotNull( clone );
assertNotSame( f, clone );
assertNotNull( clone.getState() );
assertNotSame( f.getState(), clone.getState() );
assertEquals( f.getState(), clone.getState() );
assertFalse( clone.getState().equals( clone.createDefaultState() ) );
Vector originalState = f.getState().clone();
assertEquals( originalState, f.getState() );
assertEquals( originalState, clone.getState() );
clone.getState().scaleEquals(RANDOM.nextDouble());
assertEquals( originalState, f.getState() );
assertFalse( originalState.equals( clone.getState() ) );
}
/**
* evaluate(state)
*/
public void testEvaluateState()
{
System.out.println( "evaluate(State)" );
StatefulFunction f = this.createInstance();
Vector defaultState = f.createDefaultState();
f.resetState();
Vector input = this.createRandomInput();
Vector o1 = f.evaluate(input);
Vector o2 = f.evaluate(input, defaultState );
assertNotSame( o1, o2 );
assertEquals( o1, o2 );
}
/**
* Test of evaluate method, of class AbstractStatefulEvaluator.
*/
public void testEvaluate()
{
System.out.println("evaluate");
StatefulFunction f = this.createInstance();
Vector originalState = f.getState().clone();
f.evaluate(this.createRandomInput());
assertFalse( originalState.equals( f.getState() ) );
}
/**
* Test of getState method, of class AbstractStatefulEvaluator.
*/
public void testGetState()
{
System.out.println("getState");
StatefulFunction f = this.createInstance();
assertNotNull( f.getState() );
}
/**
* Test of setState method, of class AbstractStatefulEvaluator.
*/
public void testSetState()
{
System.out.println("setState");
StatefulFunction f = this.createInstance();
Vector s = this.createRandomInput();
f.setState(s);
assertSame( s, f.getState() );
}
/**
* Test of resetState method, of class AbstractStatefulEvaluator.
*/
public void testResetState()
{
System.out.println("resetState");
StatefulFunction f = this.createInstance();
Vector v = f.createDefaultState();
assertFalse( v.equals( f.getState() ) );
f.resetState();
assertEquals( v, f.getState() );
}
}
| codeaudit/Foundry | Components/CommonCore/Test/gov/sandia/cognition/evaluator/AbstractStatefulEvaluatorTest.java | Java | bsd-3-clause | 5,498 |
# -*- coding: utf-8 -*-
"""
Classes that process (and maybe abort) responses based on
various conditions. They should be used with
:class:`splash.network_manager.SplashQNetworkAccessManager`.
"""
from __future__ import absolute_import
from PyQt5.QtNetwork import QNetworkRequest
from splash.qtutils import request_repr
from twisted.python import log
import fnmatch
class ContentTypeMiddleware(object):
"""
Response middleware, aborts responses depending on the content type.
A response will be aborted (and the underlying connection closed) after
receiving the response headers if the content type of the response is not
in the whitelist or it's in the blacklist. Both lists support wildcards.
"""
def __init__(self, verbosity=0):
self.verbosity = verbosity
@staticmethod
def contains(mime_set, mime):
"""
>>> ContentTypeMiddleware.contains({'*/*'}, 'any/thing')
True
>>> ContentTypeMiddleware.contains(set(), 'any/thing')
False
>>> ContentTypeMiddleware.contains({'text/css', 'image/*'}, 'image/png')
True
>>> ContentTypeMiddleware.contains({'*'}, 'any-thing')
True
"""
for pattern in mime_set:
if fnmatch.fnmatch(mime, pattern):
return True
return False
@staticmethod
def clean_mime(mime):
"""
Remove attributes from a mime string:
>>> ContentTypeMiddleware.clean_mime(' text/html; charset=utf-8\t ')
'text/html'
"""
separator = mime.find(';')
if separator > 0:
mime = mime[:separator]
return mime.strip()
def process(self, reply, render_options):
content_type = reply.header(QNetworkRequest.ContentTypeHeader)
if content_type is None:
return
mimetype = self.clean_mime(content_type)
allowed = render_options.get_allowed_content_types()
forbidden = render_options.get_forbidden_content_types()
whitelist = set(map(ContentTypeMiddleware.clean_mime, allowed))
blacklist = set(map(ContentTypeMiddleware.clean_mime, forbidden))
if self.contains(blacklist, mimetype) or not self.contains(whitelist, mimetype):
if self.verbosity >= 2:
request_str = request_repr(reply, reply.operation())
msg = "Dropping %s because of Content Type" % request_str
log.msg(msg, system='response_middleware')
reply.abort()
| pawelmhm/splash | splash/response_middleware.py | Python | bsd-3-clause | 2,510 |
//2
var __result1 = 2; // for SAFE
var __expect1 = 2; // for SAFE
| darkrsw/safe | tests/typing_tests/TAJS_micro/test2.js | JavaScript | bsd-3-clause | 68 |
// Copyright 2016 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
/** @const {number} */
const DEFAULT_BLACK_CURSOR_COLOR = 0;
/**
* @fileoverview
* 'settings-manage-a11y-page' is the subpage with the accessibility
* settings.
*/
import {afterNextRender, Polymer, html, flush, Templatizer, TemplateInstanceBase} from '//resources/polymer/v3_0/polymer/polymer_bundled.min.js';
import '//resources/cr_elements/cr_icon_button/cr_icon_button.m.js';
import '//resources/cr_elements/cr_link_row/cr_link_row.js';
import '//resources/cr_elements/icons.m.js';
import '//resources/cr_elements/shared_vars_css.m.js';
import {WebUIListenerBehavior} from '//resources/js/web_ui_listener_behavior.m.js';
import {I18nBehavior} from '//resources/js/i18n_behavior.m.js';
import {loadTimeData} from '//resources/js/load_time_data.m.js';
import '../../controls/settings_slider.js';
import '../../controls/settings_toggle_button.js';
import {DeepLinkingBehavior} from '../deep_linking_behavior.m.js';
import {routes} from '../os_route.m.js';
import {Router, Route} from '../../router.js';
import {RouteObserverBehavior} from '../route_observer_behavior.js';
import '../../settings_shared_css.js';
import {BatteryStatus, DevicePageBrowserProxy, DevicePageBrowserProxyImpl, ExternalStorage, IdleBehavior, LidClosedBehavior, NoteAppInfo, NoteAppLockScreenSupport, PowerManagementSettings, PowerSource, getDisplayApi, StorageSpaceState} from '../device_page/device_page_browser_proxy.js';
import '//resources/cr_components/chromeos/localized_link/localized_link.js';
import {RouteOriginBehaviorImpl, RouteOriginBehavior} from '../route_origin_behavior.m.js';
import {ManageA11yPageBrowserProxyImpl, ManageA11yPageBrowserProxy} from './manage_a11y_page_browser_proxy.js';
Polymer({
_template: html`{__html_template__}`,
is: 'settings-manage-a11y-page',
behaviors: [
DeepLinkingBehavior,
I18nBehavior,
RouteObserverBehavior,
RouteOriginBehavior,
WebUIListenerBehavior,
],
properties: {
/**
* Preferences state.
*/
prefs: {
type: Object,
notify: true,
},
/**
* Enum values for the 'settings.a11y.screen_magnifier_mouse_following_mode'
* preference. These values map to
* AccessibilityController::MagnifierMouseFollowingMode, and are written to
* prefs and metrics, so order should not be changed.
* @private {!Object<string, number>}
*/
screenMagnifierMouseFollowingModePrefValues_: {
readOnly: true,
type: Object,
value: {
CONTINUOUS: 0,
CENTERED: 1,
EDGE: 2,
},
},
screenMagnifierZoomOptions_: {
readOnly: true,
type: Array,
value() {
// These values correspond to the i18n values in settings_strings.grdp.
// If these values get changed then those strings need to be changed as
// well.
return [
{value: 2, name: loadTimeData.getString('screenMagnifierZoom2x')},
{value: 4, name: loadTimeData.getString('screenMagnifierZoom4x')},
{value: 6, name: loadTimeData.getString('screenMagnifierZoom6x')},
{value: 8, name: loadTimeData.getString('screenMagnifierZoom8x')},
{value: 10, name: loadTimeData.getString('screenMagnifierZoom10x')},
{value: 12, name: loadTimeData.getString('screenMagnifierZoom12x')},
{value: 14, name: loadTimeData.getString('screenMagnifierZoom14x')},
{value: 16, name: loadTimeData.getString('screenMagnifierZoom16x')},
{value: 18, name: loadTimeData.getString('screenMagnifierZoom18x')},
{value: 20, name: loadTimeData.getString('screenMagnifierZoom20x')},
];
},
},
autoClickDelayOptions_: {
readOnly: true,
type: Array,
value() {
// These values correspond to the i18n values in settings_strings.grdp.
// If these values get changed then those strings need to be changed as
// well.
return [
{
value: 600,
name: loadTimeData.getString('delayBeforeClickExtremelyShort')
},
{
value: 800,
name: loadTimeData.getString('delayBeforeClickVeryShort')
},
{value: 1000, name: loadTimeData.getString('delayBeforeClickShort')},
{value: 2000, name: loadTimeData.getString('delayBeforeClickLong')},
{
value: 4000,
name: loadTimeData.getString('delayBeforeClickVeryLong')
},
];
},
},
autoClickMovementThresholdOptions_: {
readOnly: true,
type: Array,
value() {
return [
{
value: 5,
name: loadTimeData.getString('autoclickMovementThresholdExtraSmall')
},
{
value: 10,
name: loadTimeData.getString('autoclickMovementThresholdSmall')
},
{
value: 20,
name: loadTimeData.getString('autoclickMovementThresholdDefault')
},
{
value: 30,
name: loadTimeData.getString('autoclickMovementThresholdLarge')
},
{
value: 40,
name: loadTimeData.getString('autoclickMovementThresholdExtraLarge')
},
];
},
},
/** @private {!Array<{name: string, value: number}>} */
cursorColorOptions_: {
readOnly: true,
type: Array,
value() {
return [
{
value: DEFAULT_BLACK_CURSOR_COLOR,
name: loadTimeData.getString('cursorColorBlack'),
},
{
value: 0xd93025, // Red 600
name: loadTimeData.getString('cursorColorRed'),
},
{
value: 0xf29900, // Yellow 700
name: loadTimeData.getString('cursorColorYellow'),
},
{
value: 0x1e8e3e, // Green 600
name: loadTimeData.getString('cursorColorGreen'),
},
{
value: 0x03b6be, // Cyan 600
name: loadTimeData.getString('cursorColorCyan'),
},
{
value: 0x1a73e8, // Blue 600
name: loadTimeData.getString('cursorColorBlue'),
},
{
value: 0xc61ad9, // Magenta 600
name: loadTimeData.getString('cursorColorMagenta'),
},
{
value: 0xf50057, // Pink A400
name: loadTimeData.getString('cursorColorPink'),
},
];
},
},
/** @private */
isMagnifierContinuousMouseFollowingModeSettingEnabled_: {
type: Boolean,
value() {
return loadTimeData.getBoolean(
'isMagnifierContinuousMouseFollowingModeSettingEnabled');
},
},
/**
* Whether the user is in kiosk mode.
* @private
*/
isKioskModeActive_: {
type: Boolean,
value() {
return loadTimeData.getBoolean('isKioskModeActive');
}
},
/**
* Whether a setting for enabling shelf navigation buttons in tablet mode
* should be displayed in the accessibility settings.
* @private
*/
showShelfNavigationButtonsSettings_: {
type: Boolean,
computed:
'computeShowShelfNavigationButtonsSettings_(isKioskModeActive_)',
},
/** @private */
isGuest_: {
type: Boolean,
value() {
return loadTimeData.getBoolean('isGuest');
}
},
/** @private */
screenMagnifierHintLabel_: {
type: String,
value() {
return this.i18n(
'screenMagnifierHintLabel',
this.i18n('screenMagnifierHintSearchKey'));
}
},
/** @private */
dictationSubtitle_: {
type: String,
value() {
return loadTimeData.getString('dictationDescription');
}
},
/** @private */
dictationLocaleSubtitleOverride_: {
type: String,
value: '',
},
/** @private */
useDictationLocaleSubtitleOverride_: {
type: Boolean,
value: false,
},
/** @private */
dictationLocaleMenuSubtitle_: {
type: String,
computed: 'computeDictationLocaleSubtitle_(' +
'dictationLocaleOptions_, ' +
'prefs.settings.a11y.dictation_locale.value, ' +
'dictationLocaleSubtitleOverride_)',
},
/** @private */
areDictationLocalePrefsAllowed_: {
type: Boolean,
readOnly: true,
value() {
return loadTimeData.getBoolean('areDictationLocalePrefsAllowed');
}
},
/** @private */
dictationLocaleOptions_: {
type: Array,
value() {
return [];
}
},
/** @private */
dictationLocalesList_: {
type: Array,
value() {
return [];
}
},
/** @private */
showDictationLocaleMenu_: {
type: Boolean,
value: false,
},
/**
* |hasKeyboard_|, |hasMouse_|, |hasPointingStick_|, and |hasTouchpad_|
* start undefined so observers don't trigger until they have been
* populated.
* @private
*/
hasKeyboard_: Boolean,
/** @private */
hasMouse_: Boolean,
/** @private */
hasPointingStick_: Boolean,
/** @private */
hasTouchpad_: Boolean,
/**
* Boolean indicating whether shelf navigation buttons should implicitly be
* enabled in tablet mode - the navigation buttons are implicitly enabled
* when spoken feedback, automatic clicks, or switch access are enabled.
* The buttons can also be explicitly enabled by a designated a11y setting.
* @private
*/
shelfNavigationButtonsImplicitlyEnabled_: {
type: Boolean,
computed: 'computeShelfNavigationButtonsImplicitlyEnabled_(' +
'prefs.settings.accessibility.value,' +
'prefs.settings.a11y.autoclick.value,' +
'prefs.settings.a11y.switch_access.enabled.value)',
},
/**
* The effective pref value that indicates whether shelf navigation buttons
* are enabled in tablet mode.
* @type {chrome.settingsPrivate.PrefObject}
* @private
*/
shelfNavigationButtonsPref_: {
type: Object,
computed: 'getShelfNavigationButtonsEnabledPref_(' +
'shelfNavigationButtonsImplicitlyEnabled_,' +
'prefs.settings.a11y.tablet_mode_shelf_nav_buttons_enabled)',
},
/**
* Used by DeepLinkingBehavior to focus this page's deep links.
* @type {!Set<!chromeos.settings.mojom.Setting>}
*/
supportedSettingIds: {
type: Object,
value: () => new Set([
chromeos.settings.mojom.Setting.kChromeVox,
chromeos.settings.mojom.Setting.kSelectToSpeak,
chromeos.settings.mojom.Setting.kHighContrastMode,
chromeos.settings.mojom.Setting.kFullscreenMagnifier,
chromeos.settings.mojom.Setting.kFullscreenMagnifierMouseFollowingMode,
chromeos.settings.mojom.Setting.kFullscreenMagnifierFocusFollowing,
chromeos.settings.mojom.Setting.kDockedMagnifier,
chromeos.settings.mojom.Setting.kStickyKeys,
chromeos.settings.mojom.Setting.kOnScreenKeyboard,
chromeos.settings.mojom.Setting.kDictation,
chromeos.settings.mojom.Setting.kHighlightKeyboardFocus,
chromeos.settings.mojom.Setting.kHighlightTextCaret,
chromeos.settings.mojom.Setting.kAutoClickWhenCursorStops,
chromeos.settings.mojom.Setting.kLargeCursor,
chromeos.settings.mojom.Setting.kHighlightCursorWhileMoving,
chromeos.settings.mojom.Setting.kTabletNavigationButtons,
chromeos.settings.mojom.Setting.kMonoAudio,
chromeos.settings.mojom.Setting.kStartupSound,
chromeos.settings.mojom.Setting.kEnableSwitchAccess,
chromeos.settings.mojom.Setting.kEnableCursorColor,
]),
},
},
observers: [
'pointersChanged_(hasMouse_, hasPointingStick_, hasTouchpad_, ' +
'isKioskModeActive_)',
],
/** RouteOriginBehavior override */
route_: routes.MANAGE_ACCESSIBILITY,
/** @private {?ManageA11yPageBrowserProxy} */
manageBrowserProxy_: null,
/** @private {?DevicePageBrowserProxy} */
deviceBrowserProxy_: null,
/** @override */
created() {
this.manageBrowserProxy_ = ManageA11yPageBrowserProxyImpl.getInstance();
this.deviceBrowserProxy_ = DevicePageBrowserProxyImpl.getInstance();
},
/** @override */
attached() {
this.addWebUIListener(
'has-mouse-changed', (exists) => this.set('hasMouse_', exists));
this.addWebUIListener(
'has-pointing-stick-changed',
(exists) => this.set('hasPointingStick_', exists));
this.addWebUIListener(
'has-touchpad-changed', (exists) => this.set('hasTouchpad_', exists));
this.deviceBrowserProxy_.initializePointers();
this.addWebUIListener(
'has-hardware-keyboard',
(hasKeyboard) => this.set('hasKeyboard_', hasKeyboard));
this.deviceBrowserProxy_.initializeKeyboardWatcher();
},
/** @override */
ready() {
this.addWebUIListener(
'initial-data-ready',
(startup_sound_enabled) =>
this.onManageAllyPageReady_(startup_sound_enabled));
this.addWebUIListener(
'dictation-locale-menu-subtitle-changed',
(result) => this.onDictationLocaleMenuSubtitleChanged_(result));
this.addWebUIListener(
'dictation-locales-set',
(locales) => this.onDictationLocalesSet_(locales));
this.manageBrowserProxy_.manageA11yPageReady();
const r = routes;
this.addFocusConfig_(r.MANAGE_TTS_SETTINGS, '#ttsSubpageButton');
this.addFocusConfig_(r.MANAGE_CAPTION_SETTINGS, '#captionsSubpageButton');
this.addFocusConfig_(
r.MANAGE_SWITCH_ACCESS_SETTINGS, '#switchAccessSubpageButton');
this.addFocusConfig_(r.DISPLAY, '#displaySubpageButton');
this.addFocusConfig_(r.KEYBOARD, '#keyboardSubpageButton');
this.addFocusConfig_(r.POINTERS, '#pointerSubpageButton');
},
/**
* @param {!Route} route
* @param {!Route} oldRoute
*/
currentRouteChanged(route, oldRoute) {
// Does not apply to this page.
if (route !== routes.MANAGE_ACCESSIBILITY) {
return;
}
this.attemptDeepLink();
},
/**
* @param {boolean} hasMouse
* @param {boolean} hasPointingStick
* @param {boolean} hasTouchpad
* @private
*/
pointersChanged_(hasMouse, hasTouchpad, hasPointingStick, isKioskModeActive) {
this.$.pointerSubpageButton.hidden =
(!hasMouse && !hasPointingStick && !hasTouchpad) || isKioskModeActive;
},
/**
* Updates the Select-to-Speak description text based on:
* 1. Whether Select-to-Speak is enabled.
* 2. If it is enabled, whether a physical keyboard is present.
* @param {boolean} enabled
* @param {boolean} hasKeyboard
* @param {string} disabledString String to show when Select-to-Speak is
* disabled.
* @param {string} keyboardString String to show when there is a physical
* keyboard
* @param {string} noKeyboardString String to show when there is no keyboard
* @private
*/
getSelectToSpeakDescription_(
enabled, hasKeyboard, disabledString, keyboardString, noKeyboardString) {
return !enabled ? disabledString :
hasKeyboard ? keyboardString : noKeyboardString;
},
/**
* @param {!CustomEvent<boolean>} e
* @private
*/
toggleStartupSoundEnabled_(e) {
this.manageBrowserProxy_.setStartupSoundEnabled(e.detail);
},
/** @private */
onManageTtsSettingsTap_() {
Router.getInstance().navigateTo(routes.MANAGE_TTS_SETTINGS);
},
/** @private */
onChromeVoxSettingsTap_() {
this.manageBrowserProxy_.showChromeVoxSettings();
},
/** @private */
onChromeVoxTutorialTap_() {
this.manageBrowserProxy_.showChromeVoxTutorial();
},
/** @private */
onCaptionsClick_() {
Router.getInstance().navigateTo(routes.MANAGE_CAPTION_SETTINGS);
},
/** @private */
onSelectToSpeakSettingsTap_() {
this.manageBrowserProxy_.showSelectToSpeakSettings();
},
/** @private */
onSwitchAccessSettingsTap_() {
Router.getInstance().navigateTo(routes.MANAGE_SWITCH_ACCESS_SETTINGS);
},
/** @private */
onDisplayTap_() {
Router.getInstance().navigateTo(
routes.DISPLAY,
/* dynamicParams */ null, /* removeSearch */ true);
},
/** @private */
onAppearanceTap_() {
// Open browser appearance section in a new browser tab.
window.open('chrome://settings/appearance');
},
/** @private */
onKeyboardTap_() {
Router.getInstance().navigateTo(
routes.KEYBOARD,
/* dynamicParams */ null, /* removeSearch */ true);
},
/**
* @param {!Event} event
* @private
*/
onA11yCaretBrowsingChange_(event) {
if (event.target.checked) {
chrome.metricsPrivate.recordUserAction(
'Accessibility.CaretBrowsing.EnableWithSettings');
} else {
chrome.metricsPrivate.recordUserAction(
'Accessibility.CaretBrowsing.DisableWithSettings');
}
},
/**
* @return {boolean}
* @private
*/
computeShowShelfNavigationButtonsSettings_() {
return !this.isKioskModeActive_ &&
loadTimeData.getBoolean('showTabletModeShelfNavigationButtonsSettings');
},
/**
* @return {boolean} Whether shelf navigation buttons should implicitly be
* enabled in tablet mode (due to accessibility settings different than
* shelf_navigation_buttons_enabled_in_tablet_mode).
* @private
*/
computeShelfNavigationButtonsImplicitlyEnabled_() {
/**
* Gets the bool pref value for the provided pref key.
* @param {string} key
* @return {boolean}
*/
const getBoolPrefValue = (key) => {
const pref = /** @type {chrome.settingsPrivate.PrefObject} */ (
this.get(key, this.prefs));
return pref && !!pref.value;
};
return getBoolPrefValue('settings.accessibility') ||
getBoolPrefValue('settings.a11y.autoclick') ||
getBoolPrefValue('settings.a11y.switch_access.enabled');
},
/**
* Calculates the effective value for "shelf navigation buttons enabled in
* tablet mode" setting - if the setting is implicitly enabled (by other a11y
* settings), this will return a stub pref value.
* @private
* @return {chrome.settingsPrivate.PrefObject}
*/
getShelfNavigationButtonsEnabledPref_() {
if (this.shelfNavigationButtonsImplicitlyEnabled_) {
return /** @type {!chrome.settingsPrivate.PrefObject}*/ ({
value: true,
type: chrome.settingsPrivate.PrefType.BOOLEAN,
key: ''
});
}
return /** @type {chrome.settingsPrivate.PrefObject} */ (this.get(
'settings.a11y.tablet_mode_shelf_nav_buttons_enabled', this.prefs));
},
/** @private */
onShelfNavigationButtonsLearnMoreClicked_() {
chrome.metricsPrivate.recordUserAction(
'Settings_A11y_ShelfNavigationButtonsLearnMoreClicked');
},
/**
* Handles the <code>tablet_mode_shelf_nav_buttons_enabled</code> setting's
* toggle changes. It updates the backing pref value, unless the setting is
* implicitly enabled.
* @private
*/
updateShelfNavigationButtonsEnabledPref_() {
if (this.shelfNavigationButtonsImplicitlyEnabled_) {
return;
}
const enabled = this.$$('#shelfNavigationButtonsEnabledControl').checked;
this.set(
'prefs.settings.a11y.tablet_mode_shelf_nav_buttons_enabled.value',
enabled);
this.manageBrowserProxy_.recordSelectedShowShelfNavigationButtonValue(
enabled);
},
/** @private */
onA11yCursorColorChange_() {
// Custom cursor color is enabled when the color is not set to black.
const a11yCursorColorOn =
this.get('prefs.settings.a11y.cursor_color.value') !==
DEFAULT_BLACK_CURSOR_COLOR;
this.set(
'prefs.settings.a11y.cursor_color_enabled.value', a11yCursorColorOn);
},
/** @private */
onMouseTap_() {
Router.getInstance().navigateTo(
routes.POINTERS,
/* dynamicParams */ null, /* removeSearch */ true);
},
/**
* Handles updating the visibility of the shelf navigation buttons setting
* and updating whether startupSoundEnabled is checked.
* @param {boolean} startup_sound_enabled Whether startup sound is enabled.
* @private
*/
onManageAllyPageReady_(startup_sound_enabled) {
this.$.startupSoundEnabled.checked = startup_sound_enabled;
},
/**
* Whether additional features link should be shown.
* @param {boolean} isKiosk
* @param {boolean} isGuest
* @return {boolean}
* @private
*/
shouldShowAdditionalFeaturesLink_(isKiosk, isGuest) {
return !isKiosk && !isGuest;
},
/**
* @param {string} subtitle
* @private
*/
onDictationLocaleMenuSubtitleChanged_(subtitle) {
this.useDictationLocaleSubtitleOverride_ = true;
this.dictationLocaleSubtitleOverride_ = subtitle;
},
/**
* Saves a list of locales and updates the UI to reflect the list.
* @param {!Array<!Array<string>>} locales
* @private
*/
onDictationLocalesSet_(locales) {
this.dictationLocalesList_ = locales;
this.onDictationLocalesChanged_();
},
/**
* Converts an array of locales and their human-readable equivalents to
* an array of menu options.
* TODO(crbug.com/1195916): Use 'offline' to indicate to the user which
* locales work offline with an icon in the select options.
* @private
*/
onDictationLocalesChanged_() {
const currentLocale =
this.get('prefs.settings.a11y.dictation_locale.value');
this.dictationLocaleOptions_ =
this.dictationLocalesList_.map((localeInfo) => {
return {
name: localeInfo.name,
value: localeInfo.value,
worksOffline: localeInfo.worksOffline,
installed: localeInfo.installed,
recommended:
localeInfo.recommended || localeInfo.value === currentLocale,
};
});
},
/**
* Calculates the Dictation locale subtitle based on the current
* locale from prefs and the offline availability of that locale.
* @return {string}
* @private
*/
computeDictationLocaleSubtitle_() {
if (this.useDictationLocaleSubtitleOverride_) {
// Only use the subtitle override once, since we still want the subtitle
// to repsond to changes to the dictation locale.
this.useDictationLocaleSubtitleOverride_ = false;
return this.dictationLocaleSubtitleOverride_;
}
const currentLocale =
this.get('prefs.settings.a11y.dictation_locale.value');
const locale = this.dictationLocaleOptions_.find(
(element) => element.value === currentLocale);
if (!locale) {
return '';
}
if (!locale.worksOffline) {
// If a locale is not supported offline, then use the network subtitle.
return this.i18n('dictationLocaleSubLabelNetwork', locale.name);
}
if (!locale.installed) {
// If a locale is supported offline, but isn't installed, then use the
// temporary network subtitle.
return this.i18n(
'dictationLocaleSubLabelNetworkTemporarily', locale.name);
}
// If we get here, we know a locale is both supported offline and installed.
return this.i18n('dictationLocaleSubLabelOffline', locale.name);
},
/** @private */
onChangeDictationLocaleButtonClicked_() {
if (this.areDictationLocalePrefsAllowed_) {
this.showDictationLocaleMenu_ = true;
}
},
/** @private */
onChangeDictationLocalesDialogClosed_() {
this.showDictationLocaleMenu_ = false;
},
});
| ric2b/Vivaldi-browser | chromium/chrome/browser/resources/settings/chromeos/os_a11y_page/manage_a11y_page.js | JavaScript | bsd-3-clause | 23,701 |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
using System;
using System.Threading.Tasks;
namespace Azure.Core
{
/// <summary>
/// Represents a method that can handle an event and execute either
/// synchronously or asynchronously.
/// </summary>
/// <typeparam name="T">
/// Type of the event arguments deriving or equal to
/// <see cref="SyncAsyncEventArgs"/>.
/// </typeparam>
/// <param name="e">
/// An <see cref="SyncAsyncEventArgs"/> instance that contains the event
/// data.
/// </param>
/// <returns>
/// A task that represents the handler. You can return
/// <see cref="Task.CompletedTask"/> if implementing a sync handler.
/// Please see the Remarks section for more details.
/// </returns>
/// <example>
/// <para>
/// If you're using the synchronous, blocking methods of a client (i.e.,
/// methods without an Async suffix), they will raise events that require
/// handlers to execute synchronously as well. Even though the signature
/// of your handler returns a <see cref="Task"/>, you should write regular
/// sync code that blocks and return <see cref="Task.CompletedTask"/> when
/// finished.
/// <code snippet="Snippet:Azure_Core_Samples_EventSamples_SyncHandler">
/// var client = new AlarmClient();
/// client.Ring += (SyncAsyncEventArgs e) =>
/// {
/// Console.WriteLine("Wake up!");
/// return Task.CompletedTask;
/// };
///
/// client.Snooze();
/// </code>
/// If you need to call an async method from a synchronous event handler,
/// you have two options. You can use <see cref="Task.Run(Action)"/> to
/// queue a task for execution on the ThreadPool without waiting on it to
/// complete. This "fire and forget" approach may not run before your
/// handler finishes executing. Be sure to understand
/// <see href="https://docs.microsoft.com/dotnet/standard/parallel-programming/exception-handling-task-parallel-library">
/// exception handling in the Task Parallel Library</see> to avoid
/// unhandled exceptions tearing down your process. If you absolutely need
/// the async method to execute before returning from your handler, you can
/// call <c>myAsyncTask.GetAwaiter().GetResult()</c>. Please be aware
/// this may cause ThreadPool starvation. See the sync-over-async note in
/// Remarks for more details.
/// </para>
/// <para>
/// If you're using the asynchronous, non-blocking methods of a client
/// (i.e., methods with an Async suffix), they will raise events that
/// expect handlers to execute asynchronously.
/// <code snippet="Snippet:Azure_Core_Samples_EventSamples_AsyncHandler">
/// var client = new AlarmClient();
/// client.Ring += async (SyncAsyncEventArgs e) =>
/// {
/// await Console.Out.WriteLineAsync("Wake up!");
/// };
///
/// await client.SnoozeAsync();
/// </code>
/// </para>
/// <para>
/// The same event can be raised from both synchronous and asynchronous
/// code paths depending on whether you're calling sync or async methods
/// on a client. If you write an async handler but raise it from a sync
/// method, the handler will be doing sync-over-async and may cause
/// ThreadPool starvation. See the note in Remarks for more details. You
/// should use the <see cref="SyncAsyncEventArgs.IsRunningSynchronously"/>
/// property to check how the event is being raised and implement your
/// handler accordingly. Here's an example handler that's safe to invoke
/// from both sync and async code paths.
/// <code snippet="Snippet:Azure_Core_Samples_EventSamples_CombinedHandler">
/// var client = new AlarmClient();
/// client.Ring += async (SyncAsyncEventArgs e) =>
/// {
/// if (e.IsRunningSynchronously)
/// {
/// Console.WriteLine("Wake up!");
/// }
/// else
/// {
/// await Console.Out.WriteLineAsync("Wake up!");
/// }
/// };
///
/// client.Snooze(); // sync call that blocks
/// await client.SnoozeAsync(); // async call that doesn't block
/// </code>
/// </para>
/// </example>
/// <example>
/// </example>
/// <exception cref="AggregateException">
/// Any exceptions thrown by an event handler will be wrapped in a single
/// AggregateException and thrown from the code that raised the event. You
/// can check the <see cref="AggregateException.InnerExceptions"/> property
/// to see the original exceptions thrown by your event handlers.
/// AggregateException also provides
/// <see href="https://docs.microsoft.com/en-us/archive/msdn-magazine/2009/brownfield/aggregating-exceptions">
/// a number of helpful methods</see> like
/// <see cref="AggregateException.Flatten"/> and
/// <see cref="AggregateException.Handle(Func{Exception, bool})"/> to make
/// complex failures easier to work with.
/// <code snippet="Snippet:Azure_Core_Samples_EventSamples_Exceptions">
/// var client = new AlarmClient();
/// client.Ring += (SyncAsyncEventArgs e) =>
/// throw new InvalidOperationException("Alarm unplugged.");
///
/// try
/// {
/// client.Snooze();
/// }
/// catch (AggregateException ex)
/// {
/// ex.Handle(e => e is InvalidOperationException);
/// Console.WriteLine("Please switch to your backup alarm.");
/// }
/// </code>
/// </exception>
/// <remarks>
/// <para>
/// Most Azure client libraries for .NET offer both synchronous and
/// asynchronous methods for calling Azure services. You can distinguish
/// the asynchronous methods by their Async suffix. For example,
/// BlobClient.Download and BlobClient.DownloadAsync make the same
/// underlying REST call and only differ in whether they block. We
/// recommend using our async methods for new applications, but there are
/// perfectly valid cases for using sync methods as well. These dual
/// method invocation semantics allow for flexibility, but require a little
/// extra care when writing event handlers.
/// </para>
/// <para>
/// The SyncAsyncEventHandler is a delegate used by events in Azure client
/// libraries to represent an event handler that can be invoked from either
/// sync or async code paths. It takes event arguments deriving from
/// <see cref="SyncAsyncEventArgs"/> that contain important information for
/// writing your event handler:
/// <list type="bullet">
/// <item>
/// <description>
/// <see cref="SyncAsyncEventArgs.CancellationToken"/> is a cancellation
/// token related to the original operation that raised the event. It's
/// important for your handler to pass this token along to any asynchronous
/// or long-running synchronous operations that take a token so cancellation
/// (via something like
/// <c>new CancellationTokenSource(TimeSpan.FromSeconds(10)).Token</c>,
/// for example) will correctly propagate.
/// </description>
/// </item>
/// <item>
/// <description>
/// <see cref="SyncAsyncEventArgs.IsRunningSynchronously"/> is a flag indicating
/// whether your handler was invoked synchronously or asynchronously. If
/// you're calling sync methods on your client, you should use sync methods
/// to implement your event handler (you can return
/// <see cref="Task.CompletedTask"/>). If you're calling async methods on
/// your client, you should use async methods where possible to implement
/// your event handler. If you're not in control of how the client will be
/// used or want to write safer code, you should check the
/// <see cref="SyncAsyncEventArgs.IsRunningSynchronously"/> property and call
/// either sync or async methods as directed.
/// </description>
/// </item>
/// <item>
/// <description>
/// Most events will customize the event data by deriving from
/// <see cref="SyncAsyncEventArgs"/> and including details about what
/// triggered the event or providing options to react. Many times this
/// will include a reference to the client that raised the event in case
/// you need it for additional processing.
/// </description>
/// </item>
/// </list>
/// </para>
/// <para>
/// When an event using SyncAsyncEventHandler is raised, the handlers will
/// be executed sequentially to avoid introducing any unintended
/// parallelism. The event handlers will finish before returning control
/// to the code path raising the event. This means blocking for events
/// raised synchronously and waiting for the returned <see cref="Task"/> to
/// complete for events raised asynchronously.
/// </para>
/// <para>
/// Any exceptions thrown from a handler will be wrapped in a single
/// <see cref="AggregateException"/>. If one handler throws an exception,
/// it will not prevent other handlers from running. This is also relevant
/// for cancellation because all handlers are still raised if cancellation
/// occurs. You should both pass <see cref="SyncAsyncEventArgs.CancellationToken"/>
/// to asynchronous or long-running synchronous operations and consider
/// calling <see cref="System.Threading.CancellationToken.ThrowIfCancellationRequested"/>
/// in compute heavy handlers.
/// </para>
/// <para>
/// A <see href="https://github.com/Azure/azure-sdk-for-net/blob/master/sdk/core/Azure.Core/samples/Diagnostics.md#distributed-tracing">
/// distributed tracing span</see> is wrapped around your handlers using
/// the event name so you can see how long your handlers took to run,
/// whether they made other calls to Azure services, and details about any
/// exceptions that were thrown.
/// </para>
/// <para>
/// Executing asynchronous code from a sync code path is commonly referred
/// to as sync-over-async because you're getting sync behavior but still
/// invoking all the async machinery. See
/// <see href="https://docs.microsoft.com/archive/blogs/vancem/diagnosing-net-core-threadpool-starvation-with-perfview-why-my-service-is-not-saturating-all-cores-or-seems-to-stall">
/// Diagnosing.NET Core ThreadPool Starvation with PerfView</see>
/// for a detailed explanation of how that can cause serious performance
/// problems. We recommend you use the
/// <see cref="SyncAsyncEventArgs.IsRunningSynchronously"/> flag to avoid
/// ThreadPool starvation.
/// </para>
/// </remarks>
public delegate Task SyncAsyncEventHandler<T>(T e)
where T : SyncAsyncEventArgs;
// NOTE: You should always use SyncAsyncEventHandlerExtensions.RaiseAsync
// in Azure.Core's shared source to ensure consistent event handling
// semantics.
}
| ayeletshpigelman/azure-sdk-for-net | sdk/core/Azure.Core/src/SyncAsyncEventHandler.cs | C# | mit | 11,132 |
<?php
/**
* Model for the Primary Term table.
*
* @package Yoast\YoastSEO\Models
*/
namespace Yoast\WP\SEO\Models;
use Yoast\WP\Lib\Model;
/**
* Primary Term model definition.
*
* @property int $id Identifier.
* @property int $post_id Post ID.
* @property int $term_id Term ID.
* @property string $taxonomy Taxonomy.
* @property int $blog_id Blog ID.
*
* @property string $created_at
* @property string $updated_at
*/
class Primary_Term extends Model {
/**
* Whether nor this model uses timestamps.
*
* @var bool
*/
protected $uses_timestamps = true;
/**
* Which columns contain int values.
*
* @var array
*/
protected $int_columns = [
'id',
'post_id',
'term_id',
'blog_id',
];
}
| mandino/www.bloggingshakespeare.com | wp-content/plugins/wordpress-seo/src/models/primary-term.php | PHP | mit | 750 |
// Controller Actions|Scripts|0020
namespace VRTK
{
using UnityEngine;
using System.Collections;
using System.Collections.Generic;
using Highlighters;
[System.Serializable]
public class VRTK_ControllerModelElementPaths
{
public string bodyModelPath = "";
public string triggerModelPath = "";
public string leftGripModelPath = "";
public string rightGripModelPath = "";
public string touchpadModelPath = "";
public string appMenuModelPath = "";
public string systemMenuModelPath = "";
}
[System.Serializable]
public struct VRTK_ControllerElementHighlighers
{
public VRTK_BaseHighlighter body;
public VRTK_BaseHighlighter trigger;
public VRTK_BaseHighlighter gripLeft;
public VRTK_BaseHighlighter gripRight;
public VRTK_BaseHighlighter touchpad;
public VRTK_BaseHighlighter appMenu;
public VRTK_BaseHighlighter systemMenu;
}
/// <summary>
/// Event Payload
/// </summary>
/// <param name="controllerIndex">The index of the controller that was used.</param>
public struct ControllerActionsEventArgs
{
public uint controllerIndex;
}
/// <summary>
/// Event Payload
/// </summary>
/// <param name="sender">this object</param>
/// <param name="e"><see cref="ControllerActionsEventArgs"/></param>
public delegate void ControllerActionsEventHandler(object sender, ControllerActionsEventArgs e);
/// <summary>
/// The Controller Actions script provides helper methods to deal with common controller actions. It deals with actions that can be done to the controller.
/// </summary>
/// <remarks>
/// The highlighting of the controller is defaulted to use the `VRTK_MaterialColorSwapHighlighter` if no other highlighter is applied to the Object.
/// </remarks>
/// <example>
/// `VRTK/Examples/016_Controller_HapticRumble` demonstrates the ability to hide a controller model and make the controller vibrate for a given length of time at a given intensity.
///
/// `VRTK/Examples/035_Controller_OpacityAndHighlighting` demonstrates the ability to change the opacity of a controller model and to highlight specific elements of a controller such as the buttons or even the entire controller model.
/// </example>
public class VRTK_ControllerActions : MonoBehaviour
{
[Tooltip("A collection of strings that determine the path to the controller model sub elements for identifying the model parts at runtime. If the paths are left empty they will default to the model element paths of the selected SDK Bridge.\n\n"
+ "* The available model sub elements are:\n\n"
+ " * `Body Model Path`: The overall shape of the controller.\n"
+ " * `Trigger Model Path`: The model that represents the trigger button.\n"
+ " * `Grip Left Model Path`: The model that represents the left grip button.\n"
+ " * `Grip Right Model Path`: The model that represents the right grip button.\n"
+ " * `Touchpad Model Path`: The model that represents the touchpad.\n"
+ " * `App Menu Model Path`: The model that represents the application menu button.\n"
+ " * `System Menu Model Path`: The model that represents the system menu button.")]
public VRTK_ControllerModelElementPaths modelElementPaths;
[Tooltip("A collection of highlighter overrides for each controller model sub element. If no highlighter override is given then highlighter on the Controller game object is used.\n\n"
+ "* The available model sub elements are:\n\n"
+ " * `Body`: The highlighter to use on the overall shape of the controller.\n"
+ " * `Trigger`: The highlighter to use on the trigger button.\n"
+ " * `Grip Left`: The highlighter to use on the left grip button.\n"
+ " * `Grip Right`: The highlighter to use on the right grip button.\n"
+ " * `Touchpad`: The highlighter to use on the touchpad.\n"
+ " * `App Menu`: The highlighter to use on the application menu button.\n"
+ " * `System Menu`: The highlighter to use on the system menu button.")]
public VRTK_ControllerElementHighlighers elementHighlighterOverrides;
/// <summary>
/// Emitted when the controller model is toggled to be visible.
/// </summary>
public event ControllerActionsEventHandler ControllerModelVisible;
/// <summary>
/// Emitted when the controller model is toggled to be invisible.
/// </summary>
public event ControllerActionsEventHandler ControllerModelInvisible;
private bool controllerVisible = true;
private ushort hapticPulseStrength;
private uint controllerIndex;
private ushort maxHapticVibration = 3999;
private bool controllerHighlighted = false;
private Dictionary<string, Transform> cachedElements;
private Dictionary<string, object> highlighterOptions;
public virtual void OnControllerModelVisible(ControllerActionsEventArgs e)
{
if (ControllerModelVisible != null)
{
ControllerModelVisible(this, e);
}
}
public virtual void OnControllerModelInvisible(ControllerActionsEventArgs e)
{
if (ControllerModelInvisible != null)
{
ControllerModelInvisible(this, e);
}
}
/// <summary>
/// The IsControllerVisible method returns true if the controller is currently visible by whether the renderers on the controller are enabled.
/// </summary>
/// <returns>Is true if the controller model has the renderers that are attached to it are enabled.</returns>
public bool IsControllerVisible()
{
return controllerVisible;
}
/// <summary>
/// The ToggleControllerModel method is used to turn on or off the controller model by enabling or disabling the renderers on the object. It will also work for any custom controllers. It should also not disable any objects being held by the controller if they are a child of the controller object.
/// </summary>
/// <param name="state">The visibility state to toggle the controller to, `true` will make the controller visible - `false` will hide the controller model.</param>
/// <param name="grabbedChildObject">If an object is being held by the controller then this can be passed through to prevent hiding the grabbed game object as well.</param>
public void ToggleControllerModel(bool state, GameObject grabbedChildObject)
{
if (!enabled)
{
return;
}
foreach (MeshRenderer renderer in GetComponentsInChildren<MeshRenderer>())
{
if (renderer.gameObject != grabbedChildObject && (grabbedChildObject == null || !renderer.transform.IsChildOf(grabbedChildObject.transform)))
{
renderer.enabled = state;
}
}
foreach (SkinnedMeshRenderer renderer in GetComponentsInChildren<SkinnedMeshRenderer>())
{
if (renderer.gameObject != grabbedChildObject && (grabbedChildObject == null || !renderer.transform.IsChildOf(grabbedChildObject.transform)))
{
renderer.enabled = state;
}
}
controllerVisible = state;
if (state)
{
OnControllerModelVisible(SetActionEvent(controllerIndex));
}
else
{
OnControllerModelInvisible(SetActionEvent(controllerIndex));
}
}
/// <summary>
/// The SetControllerOpacity method allows the opacity of the controller model to be changed to make the controller more transparent. A lower alpha value will make the object more transparent, such as `0.5f` will make the controller partially transparent where as `0f` will make the controller completely transparent.
/// </summary>
/// <param name="alpha">The alpha level to apply to opacity of the controller object. `0f` to `1f`.</param>
public void SetControllerOpacity(float alpha)
{
if (!enabled)
{
return;
}
alpha = Mathf.Clamp(alpha, 0f, 1f);
foreach (var renderer in gameObject.GetComponentsInChildren<Renderer>())
{
if (alpha < 1f)
{
renderer.material.SetInt("_SrcBlend", (int)UnityEngine.Rendering.BlendMode.One);
renderer.material.SetInt("_DstBlend", (int)UnityEngine.Rendering.BlendMode.OneMinusSrcAlpha);
renderer.material.SetInt("_ZWrite", 0);
renderer.material.DisableKeyword("_ALPHATEST_ON");
renderer.material.DisableKeyword("_ALPHABLEND_ON");
renderer.material.EnableKeyword("_ALPHAPREMULTIPLY_ON");
renderer.material.renderQueue = 3000;
}
else
{
renderer.material.SetInt("_SrcBlend", (int)UnityEngine.Rendering.BlendMode.One);
renderer.material.SetInt("_DstBlend", (int)UnityEngine.Rendering.BlendMode.Zero);
renderer.material.SetInt("_ZWrite", 1);
renderer.material.DisableKeyword("_ALPHATEST_ON");
renderer.material.DisableKeyword("_ALPHABLEND_ON");
renderer.material.DisableKeyword("_ALPHAPREMULTIPLY_ON");
renderer.material.renderQueue = -1;
}
if (renderer.material.HasProperty("_Color"))
{
renderer.material.color = new Color(renderer.material.color.r, renderer.material.color.g, renderer.material.color.b, alpha);
}
}
}
/// <summary>
/// The HighlightControllerElement method allows for an element of the controller to have its colour changed to simulate a highlighting effect of that element on the controller. It's useful for being able to draw a user's attention to a specific button on the controller.
/// </summary>
/// <param name="element">The element of the controller to apply the highlight to.</param>
/// <param name="highlight">The colour of the highlight.</param>
/// <param name="fadeDuration">The duration of fade from white to the highlight colour. Optional parameter defaults to `0f`.</param>
public void HighlightControllerElement(GameObject element, Color? highlight, float fadeDuration = 0f)
{
if (!enabled)
{
return;
}
var highlighter = element.GetComponent<VRTK_BaseHighlighter>();
if (highlighter)
{
highlighter.Highlight(highlight ?? Color.white, fadeDuration);
}
}
/// <summary>
/// The UnhighlightControllerElement method is the inverse of the HighlightControllerElement method and resets the controller element to its original colour.
/// </summary>
/// <param name="element">The element of the controller to remove the highlight from.</param>
public void UnhighlightControllerElement(GameObject element)
{
if (!enabled)
{
return;
}
var highlighter = element.GetComponent<VRTK_BaseHighlighter>();
if (highlighter)
{
highlighter.Unhighlight();
}
}
/// <summary>
/// The ToggleHighlightControllerElement method is a shortcut method that makes it easier to highlight and unhighlight a controller element in a single method rather than using the HighlightControllerElement and UnhighlightControllerElement methods separately.
/// </summary>
/// <param name="state">The highlight colour state, `true` will enable the highlight on the given element and `false` will remove the highlight from the given element.</param>
/// <param name="element">The element of the controller to apply the highlight to.</param>
/// <param name="highlight">The colour of the highlight.</param>
/// <param name="duration">The duration of fade from white to the highlight colour.</param>
public void ToggleHighlightControllerElement(bool state, GameObject element, Color? highlight = null, float duration = 0f)
{
if (element)
{
if (state)
{
HighlightControllerElement(element.gameObject, highlight ?? Color.white, duration);
}
else
{
UnhighlightControllerElement(element.gameObject);
}
}
}
/// <summary>
/// The ToggleHighlightTrigger method is a shortcut method that makes it easier to toggle the highlight state of the controller trigger element.
/// </summary>
/// <param name="state">The highlight colour state, `true` will enable the highlight on the trigger and `false` will remove the highlight from the trigger.</param>
/// <param name="highlight">The colour to highlight the trigger with.</param>
/// <param name="duration">The duration of fade from white to the highlight colour.</param>
public void ToggleHighlightTrigger(bool state, Color? highlight = null, float duration = 0f)
{
if (!state && controllerHighlighted)
{
return;
}
ToggleHighlightAlias(state, modelElementPaths.triggerModelPath, highlight, duration);
}
/// <summary>
/// The ToggleHighlightGrip method is a shortcut method that makes it easier to toggle the highlight state of the controller grip element.
/// </summary>
/// <param name="state">The highlight colour state, `true` will enable the highlight on the grip and `false` will remove the highlight from the grip.</param>
/// <param name="highlight">The colour to highlight the grip with.</param>
/// <param name="duration">The duration of fade from white to the highlight colour.</param>
public void ToggleHighlightGrip(bool state, Color? highlight = null, float duration = 0f)
{
if (!state && controllerHighlighted)
{
return;
}
ToggleHighlightAlias(state, modelElementPaths.leftGripModelPath, highlight, duration);
ToggleHighlightAlias(state, modelElementPaths.rightGripModelPath, highlight, duration);
}
/// <summary>
/// The ToggleHighlightTouchpad method is a shortcut method that makes it easier to toggle the highlight state of the controller touchpad element.
/// </summary>
/// <param name="state">The highlight colour state, `true` will enable the highlight on the touchpad and `false` will remove the highlight from the touchpad.</param>
/// <param name="highlight">The colour to highlight the touchpad with.</param>
/// <param name="duration">The duration of fade from white to the highlight colour.</param>
public void ToggleHighlightTouchpad(bool state, Color? highlight = null, float duration = 0f)
{
if (!state && controllerHighlighted)
{
return;
}
ToggleHighlightAlias(state, modelElementPaths.touchpadModelPath, highlight, duration);
}
/// <summary>
/// The ToggleHighlightApplicationMenu method is a shortcut method that makes it easier to toggle the highlight state of the controller application menu element.
/// </summary>
/// <param name="state">The highlight colour state, `true` will enable the highlight on the application menu and `false` will remove the highlight from the application menu.</param>
/// <param name="highlight">The colour to highlight the application menu with.</param>
/// <param name="duration">The duration of fade from white to the highlight colour.</param>
public void ToggleHighlightApplicationMenu(bool state, Color? highlight = null, float duration = 0f)
{
if (!state && controllerHighlighted)
{
return;
}
ToggleHighlightAlias(state, modelElementPaths.appMenuModelPath, highlight, duration);
}
/// <summary>
/// The ToggleHighlighBody method is a shortcut method that makes it easier to toggle the highlight state of the controller body element.
/// </summary>
/// <param name="state">The highlight colour state, `true` will enable the highlight on the body and `false` will remove the highlight from the body.</param>
/// <param name="highlight">The colour to highlight the body with.</param>
/// <param name="duration">The duration of fade from white to the highlight colour.</param>
public void ToggleHighlighBody(bool state, Color? highlight = null, float duration = 0f)
{
if (!state && controllerHighlighted)
{
return;
}
ToggleHighlightAlias(state, modelElementPaths.bodyModelPath, highlight, duration);
}
/// <summary>
/// The ToggleHighlightController method is a shortcut method that makes it easier to toggle the highlight state of the entire controller.
/// </summary>
/// <param name="state">The highlight colour state, `true` will enable the highlight on the entire controller `false` will remove the highlight from the entire controller.</param>
/// <param name="highlight">The colour to highlight the entire controller with.</param>
/// <param name="duration">The duration of fade from white to the highlight colour.</param>
public void ToggleHighlightController(bool state, Color? highlight = null, float duration = 0f)
{
controllerHighlighted = state;
ToggleHighlightTrigger(state, highlight, duration);
ToggleHighlightGrip(state, highlight, duration);
ToggleHighlightTouchpad(state, highlight, duration);
ToggleHighlightApplicationMenu(state, highlight, duration);
ToggleHighlightAlias(state, modelElementPaths.systemMenuModelPath, highlight, duration);
ToggleHighlightAlias(state, modelElementPaths.bodyModelPath, highlight, duration);
}
/// <summary>
/// The TriggerHapticPulse/1 method calls a single haptic pulse call on the controller for a single tick.
/// </summary>
/// <param name="strength">The intensity of the rumble of the controller motor. `0` to `3999`.</param>
public void TriggerHapticPulse(ushort strength)
{
if (!enabled)
{
return;
}
hapticPulseStrength = (strength <= maxHapticVibration ? strength : maxHapticVibration);
VRTK_SDK_Bridge.HapticPulseOnIndex(controllerIndex, hapticPulseStrength);
}
/// <summary>
/// The TriggerHapticPulse/3 method calls a haptic pulse for a specified amount of time rather than just a single tick. Each pulse can be separated by providing a `pulseInterval` to pause between each haptic pulse.
/// </summary>
/// <param name="strength">The intensity of the rumble of the controller motor. `0` to `3999`.</param>
/// <param name="duration">The length of time the rumble should continue for.</param>
/// <param name="pulseInterval">The interval to wait between each haptic pulse.</param>
public void TriggerHapticPulse(ushort strength, float duration, float pulseInterval)
{
if (!enabled)
{
return;
}
hapticPulseStrength = (strength <= maxHapticVibration ? strength : maxHapticVibration);
StartCoroutine(HapticPulse(duration, hapticPulseStrength, pulseInterval));
}
/// <summary>
/// The InitaliseHighlighters method sets up the highlighters on the controller model.
/// </summary>
public void InitaliseHighlighters()
{
highlighterOptions = new Dictionary<string, object>();
highlighterOptions.Add("resetMainTexture", true);
VRTK_BaseHighlighter objectHighlighter = Utilities.GetActiveHighlighter(gameObject);
if (objectHighlighter == null)
{
objectHighlighter = gameObject.AddComponent<VRTK_MaterialColorSwapHighlighter>();
}
objectHighlighter.Initialise(null, highlighterOptions);
AddHighlighterToElement(GetElementTransform(VRTK_SDK_Bridge.GetControllerElementPath(SDK_Base.ControllerElelements.ApplicationMenu)), objectHighlighter, elementHighlighterOverrides.appMenu);
AddHighlighterToElement(GetElementTransform(VRTK_SDK_Bridge.GetControllerElementPath(SDK_Base.ControllerElelements.Body)), objectHighlighter, elementHighlighterOverrides.body);
AddHighlighterToElement(GetElementTransform(VRTK_SDK_Bridge.GetControllerElementPath(SDK_Base.ControllerElelements.GripLeft)), objectHighlighter, elementHighlighterOverrides.gripLeft);
AddHighlighterToElement(GetElementTransform(VRTK_SDK_Bridge.GetControllerElementPath(SDK_Base.ControllerElelements.GripRight)), objectHighlighter, elementHighlighterOverrides.gripRight);
AddHighlighterToElement(GetElementTransform(VRTK_SDK_Bridge.GetControllerElementPath(SDK_Base.ControllerElelements.SystemMenu)), objectHighlighter, elementHighlighterOverrides.systemMenu);
AddHighlighterToElement(GetElementTransform(VRTK_SDK_Bridge.GetControllerElementPath(SDK_Base.ControllerElelements.Touchpad)), objectHighlighter, elementHighlighterOverrides.touchpad);
AddHighlighterToElement(GetElementTransform(VRTK_SDK_Bridge.GetControllerElementPath(SDK_Base.ControllerElelements.Trigger)), objectHighlighter, elementHighlighterOverrides.trigger);
}
private void Awake()
{
gameObject.layer = LayerMask.NameToLayer("Ignore Raycast");
cachedElements = new Dictionary<string, Transform>();
var controllerHand = VRTK_DeviceFinder.GetControllerHand(gameObject);
if (modelElementPaths.bodyModelPath.Trim() == "")
{
modelElementPaths.bodyModelPath = VRTK_SDK_Bridge.GetControllerElementPath(SDK_Base.ControllerElelements.Body, controllerHand);
}
if (modelElementPaths.triggerModelPath.Trim() == "")
{
modelElementPaths.triggerModelPath = VRTK_SDK_Bridge.GetControllerElementPath(SDK_Base.ControllerElelements.Trigger, controllerHand);
}
if (modelElementPaths.leftGripModelPath.Trim() == "")
{
modelElementPaths.leftGripModelPath = VRTK_SDK_Bridge.GetControllerElementPath(SDK_Base.ControllerElelements.GripLeft, controllerHand);
}
if (modelElementPaths.rightGripModelPath.Trim() == "")
{
modelElementPaths.rightGripModelPath = VRTK_SDK_Bridge.GetControllerElementPath(SDK_Base.ControllerElelements.GripRight, controllerHand);
}
if (modelElementPaths.touchpadModelPath.Trim() == "")
{
modelElementPaths.touchpadModelPath = VRTK_SDK_Bridge.GetControllerElementPath(SDK_Base.ControllerElelements.Touchpad, controllerHand);
}
if (modelElementPaths.appMenuModelPath.Trim() == "")
{
modelElementPaths.appMenuModelPath = VRTK_SDK_Bridge.GetControllerElementPath(SDK_Base.ControllerElelements.ApplicationMenu, controllerHand);
}
if (modelElementPaths.systemMenuModelPath.Trim() == "")
{
modelElementPaths.systemMenuModelPath = VRTK_SDK_Bridge.GetControllerElementPath(SDK_Base.ControllerElelements.SystemMenu, controllerHand);
}
}
private void OnEnable()
{
StartCoroutine(WaitForModel());
}
private void Update()
{
controllerIndex = VRTK_DeviceFinder.GetControllerIndex(gameObject);
}
private IEnumerator WaitForModel()
{
while (GetElementTransform(modelElementPaths.bodyModelPath) == null)
{
yield return null;
}
InitaliseHighlighters();
}
private void AddHighlighterToElement(Transform element, VRTK_BaseHighlighter parentHighlighter, VRTK_BaseHighlighter overrideHighlighter)
{
if (element)
{
var highlighter = (overrideHighlighter != null ? overrideHighlighter : parentHighlighter);
VRTK_BaseHighlighter clonedHighlighter = (VRTK_BaseHighlighter)Utilities.CloneComponent(highlighter, element.gameObject);
clonedHighlighter.Initialise(null, highlighterOptions);
}
}
private IEnumerator HapticPulse(float duration, ushort hapticPulseStrength, float pulseInterval)
{
if (pulseInterval <= 0)
{
yield break;
}
while (duration > 0)
{
VRTK_SDK_Bridge.HapticPulseOnIndex(controllerIndex, hapticPulseStrength);
yield return new WaitForSeconds(pulseInterval);
duration -= pulseInterval;
}
}
private IEnumerator CycleColor(Material material, Color startColor, Color endColor, float duration)
{
var elapsedTime = 0f;
while (elapsedTime <= duration)
{
elapsedTime += Time.deltaTime;
if (material.HasProperty("_Color"))
{
material.color = Color.Lerp(startColor, endColor, (elapsedTime / duration));
}
yield return null;
}
}
private Transform GetElementTransform(string path)
{
if (cachedElements == null)
{
return null;
}
if (!cachedElements.ContainsKey(path) || cachedElements[path] == null)
{
cachedElements[path] = transform.Find(path);
}
return cachedElements[path];
}
private void ToggleHighlightAlias(bool state, string transformPath, Color? highlight, float duration = 0f)
{
var element = GetElementTransform(transformPath);
if (element)
{
ToggleHighlightControllerElement(state, element.gameObject, highlight, duration);
}
}
private ControllerActionsEventArgs SetActionEvent(uint index)
{
ControllerActionsEventArgs e;
e.controllerIndex = index;
return e;
}
}
} | belzeba/VR | Assets/VRTK/Scripts/VRTK_ControllerActions.cs | C# | mit | 27,336 |
import { RouterConfig } from "v2/Artsy/Router"
import { buildClientApp } from "v2/Artsy/Router/buildClientApp"
import {
createMockNetworkLayer,
createMockNetworkLayer2,
} from "v2/DevTools/createMockNetworkLayer"
import { HistoryOptions } from "farce"
import { RouteConfig } from "found"
import { IMocks } from "graphql-tools/dist/Interfaces"
import React from "react"
import { getUser } from "v2/Utils/user"
interface Props {
routes: RouteConfig[]
initialRoute?: string
initialState?: object
historyOptions?: HistoryOptions
mockResolvers?: IMocks
mockData?: object
mockMutationResults?: object
context?: RouterConfig["context"]
}
export class MockRouter extends React.Component<Props> {
state = {
ClientApp: null,
}
static defaultProps = {
initialRoute: "/",
}
async componentDidMount() {
const {
routes,
initialRoute,
historyOptions,
mockResolvers,
mockData,
mockMutationResults,
context,
} = this.props
try {
const user = getUser(context && context.user)
const relayEnvironment = mockResolvers
? createMockNetworkLayer(mockResolvers)
: mockData || mockMutationResults
? createMockNetworkLayer2({ mockData, mockMutationResults })
: undefined
const { ClientApp } = await buildClientApp({
routes,
initialRoute,
history: {
protocol: "memory",
options: historyOptions,
},
context: {
...context,
user,
relayEnvironment,
} as any,
})
this.setState({
ClientApp,
})
} catch (error) {
console.error("MockRouter", error)
}
}
render() {
const { ClientApp } = this.state
return (
<React.Fragment>
{ClientApp && <ClientApp {...this.props.initialState} />}
</React.Fragment>
)
}
}
| erikdstock/force | src/v2/DevTools/MockRouter.tsx | TypeScript | mit | 1,901 |
<?php
namespace Kunstmaan\TaggingBundle\Tests\DependencyInjection;
use Kunstmaan\TaggingBundle\DependencyInjection\Configuration;
use Matthias\SymfonyConfigTest\PhpUnit\ConfigurationTestCaseTrait;
use PHPUnit\Framework\TestCase;
/**
* Class ConfigurationTest
*/
class ConfigurationTest extends TestCase
{
use ConfigurationTestCaseTrait;
/**
* @return \Symfony\Component\Config\Definition\ConfigurationInterface
*/
protected function getConfiguration()
{
return new Configuration();
}
public function testProcessedValueContainsRequiredValue()
{
$array = [];
$this->assertProcessedConfigurationEquals([$array], $array);
}
}
| mwoynarski/KunstmaanBundlesCMS | src/Kunstmaan/TaggingBundle/Tests/unit/DependencyInjection/ConfigurationTest.php | PHP | mit | 696 |
<?php
namespace Oro\Bundle\NavigationBundle\Tests\Unit\Event;
use Oro\Bundle\UserBundle\Entity\User;
use Oro\Bundle\NavigationBundle\Entity\NavigationHistoryItem;
use Oro\Bundle\NavigationBundle\Event\ResponseHistoryListener;
use Oro\Bundle\NavigationBundle\Provider\TitleService;
use Symfony\Component\HttpFoundation\Request;
use Symfony\Component\HttpFoundation\Response;
use Symfony\Component\HttpKernel\HttpKernelInterface;
class ResponseHistoryListenerTest extends \PHPUnit_Framework_TestCase
{
/**
* @var \Doctrine\ORM\EntityManager
*/
protected $em;
/**
* @var \Symfony\Component\Security\Core\SecurityContextInterface
*/
protected $securityContext;
/**
* @var ResponseHistoryListener
*/
protected $listener;
/**
* @var NavigationHistoryItem
*/
protected $item;
/**
* @var \Oro\Bundle\NavigationBundle\Entity\Builder\ItemFactory
*/
protected $factory;
/**
* @var Request
*/
protected $request;
/**
* @var TitleService
*/
protected $titleService;
/**
* @var string
*/
protected $serializedTitle;
public function setUp()
{
$this->factory = $this->getMock('Oro\Bundle\NavigationBundle\Entity\Builder\ItemFactory');
$this->securityContext = $this->getMock('Symfony\Component\Security\Core\SecurityContextInterface');
$user = new User();
$user->setEmail('some@email.com');
$token = $this->getMock('Symfony\Component\Security\Core\Authentication\Token\TokenInterface');
$token->expects($this->exactly(2))
->method('getUser')
->will($this->returnValue($user));
$this->securityContext->expects($this->any())
->method('getToken')
->will($this->returnValue($token));
$this->item = $this->getMock('Oro\Bundle\NavigationBundle\Entity\NavigationHistoryItem');
$this->serializedTitle = json_encode(array('titleTemplate' => 'Test title template'));
}
public function testOnResponse()
{
$response = $this->getResponse();
$repository = $this->getDefaultRepositoryMock($this->item);
$em = $this->getEntityManager($repository);
$listener = $this->getListener($this->factory, $this->securityContext, $em);
$listener->onResponse($this->getEventMock($this->getRequest(), $response));
}
public function testTitle()
{
$this->item->expects($this->once())
->method('setTitle')
->with($this->equalTo($this->serializedTitle));
$response = $this->getResponse();
$repository = $this->getDefaultRepositoryMock($this->item);
$em = $this->getEntityManager($repository);
$listener = $this->getListener($this->factory, $this->securityContext, $em);
$listener->onResponse($this->getEventMock($this->getRequest(), $response));
}
public function testNewItem()
{
$user = new User();
$user->setEmail('some@email.com');
$this->factory->expects($this->once())
->method('createItem')
->will($this->returnValue($this->item));
$repository = $this->getDefaultRepositoryMock(null);
$em = $this->getEntityManager($repository);
$listener = $this->getListener($this->factory, $this->securityContext, $em);
$response = $this->getResponse();
$listener->onResponse($this->getEventMock($this->getRequest(), $response));
}
public function testNotMasterRequest()
{
$event = $this->getMockBuilder('Symfony\Component\HttpKernel\Event\FilterResponseEvent')
->disableOriginalConstructor()
->getMock();
$event->expects($this->never())
->method('getRequest');
$event->expects($this->never())
->method('getResponse');
$event->expects($this->once())
->method('getRequestType')
->will($this->returnValue(HttpKernelInterface::SUB_REQUEST));
$em = $this->getMockBuilder('Doctrine\ORM\EntityManager')
->disableOriginalConstructor()
->getMock();
$em->expects($this->never())
->method('getRepository');
$titleService = $this->getMock('Oro\Bundle\NavigationBundle\Provider\TitleServiceInterface');
$listener = new ResponseHistoryListener($this->factory, $this->securityContext, $em, $titleService);
$listener->onResponse($event);
}
/**
* Get the mock of the GetResponseEvent and FilterResponseEvent.
*
* @param \Symfony\Component\HttpFoundation\Request $request
* @param null|\Symfony\Component\HttpFoundation\Response $response
* @param string $type
*
* @return mixed
*/
private function getEventMock($request, $response, $type = 'Symfony\Component\HttpKernel\Event\FilterResponseEvent')
{
$event = $this->getMockBuilder($type)
->disableOriginalConstructor()
->getMock();
$event->expects($this->any())
->method('getRequest')
->will($this->returnValue($request));
$event->expects($this->any())
->method('getRequestType')
->will($this->returnValue(HttpKernelInterface::MASTER_REQUEST));
$event->expects($this->any())
->method('getResponse')
->will($this->returnValue($response));
return $event;
}
/**
* Creates request mock object
*
* @return Request
*/
private function getRequest()
{
$this->request = $this->getMock('Symfony\Component\HttpFoundation\Request');
$this->request->expects($this->once())
->method('getRequestFormat')
->will($this->returnValue('html'));
$this->request->expects($this->once())
->method('getMethod')
->will($this->returnValue('GET'));
$this->request->expects($this->once())
->method('get')
->with('_route')
->will($this->returnValue('test_route'));
return $this->request;
}
/**
* Creates response object mock
*
* @return \PHPUnit_Framework_MockObject_MockObject
*/
private function getResponse()
{
$response = $this->getMock('Symfony\Component\HttpFoundation\Response');
$response->expects($this->once())
->method('getStatusCode')
->will($this->returnValue(200));
return $response;
}
public function getTitleService()
{
$this->titleService = $this->getMock('Oro\Bundle\NavigationBundle\Provider\TitleServiceInterface');
$this->titleService->expects($this->once())
->method('getSerialized')
->will($this->returnValue($this->serializedTitle));
return $this->titleService;
}
/**
* @param \Oro\Bundle\NavigationBundle\Entity\Builder\ItemFactory $factory
* @param \Symfony\Component\Security\Core\SecurityContextInterface $securityContext
* @param \Doctrine\ORM\EntityManager $entityManager
* @return ResponseHistoryListener
*/
private function getListener($factory, $securityContext, $entityManager)
{
return new ResponseHistoryListener($factory, $securityContext, $entityManager, $this->getTitleService());
}
/**
* Returns EntityManager
*
* @param \Oro\Bundle\NavigationBundle\Entity\Repository\HistoryItemRepository $repositoryMock
* @return \Doctrine\ORM\EntityManager $entityManager
*/
private function getEntityManager($repositoryMock)
{
$this->em = $this->getMockBuilder('Doctrine\ORM\EntityManager')
->disableOriginalConstructor()
->getMock();
$this->em->expects($this->once())
->method('getRepository')
->with($this->equalTo('Oro\Bundle\NavigationBundle\Entity\NavigationHistoryItem'))
->will($this->returnValue($repositoryMock));
return $this->em;
}
/**
* Prepare repository mock
*
* @param mixed $returnValue
* @return \PHPUnit_Framework_MockObject_MockObject
*/
private function getDefaultRepositoryMock($returnValue)
{
$repository = $this->getMockBuilder('Oro\Bundle\NavigationBundle\Entity\Repository\HistoryItemRepository')
->disableOriginalConstructor()
->getMock();
$repository->expects($this->once())
->method('findOneBy')
->will($this->returnValue($returnValue));
return $repository;
}
}
| minhnguyen-balance/oro_platform | vendor/oro/platform/src/Oro/Bundle/NavigationBundle/Tests/Unit/Event/ResponseHistoryListenerTest.php | PHP | mit | 8,766 |
<?php
declare(strict_types=1);
/*
* This file is part of SolidInvoice project.
*
* (c) Pierre du Plessis <open-source@solidworx.co>
*
* This source file is subject to the MIT license that is bundled
* with this source code in the file LICENSE.
*/
namespace SolidInvoice\CoreBundle\Form\Type;
use Symfony\Component\Form\AbstractType;
use Symfony\Component\Form\DataTransformerInterface;
use Symfony\Component\Form\Exception\TransformationFailedException;
use Symfony\Component\Form\Extension\Core\Type\FileType;
use Symfony\Component\Form\FormBuilderInterface;
use Symfony\Component\HttpFoundation\File\File;
use Symfony\Component\HttpFoundation\File\UploadedFile;
class ImageUploadType extends AbstractType
{
public function buildForm(FormBuilderInterface $builder, array $options)
{
$builder->addModelTransformer(new class() implements DataTransformerInterface {
private $file;
public function transform($value)
{
if (null !== $value) {
$this->file = $value;
}
return new File('', false);
}
public function reverseTransform($value)
{
if (null === $value && null !== $this->file) {
return $this->file;
}
if (!$value instanceof UploadedFile) {
return;
}
if (!$value->isValid()) {
throw new TransformationFailedException();
}
return $value->guessExtension().'|'.base64_encode(file_get_contents($value->getPathname()));
}
});
}
public function getParent(): string
{
return FileType::class;
}
public function getBlockPrefix()
{
return 'image_upload';
}
}
| pierredup/CSBill | src/CoreBundle/Form/Type/ImageUploadType.php | PHP | mit | 1,863 |
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
// See the LICENSE file in the project root for more information.
//
#include "common.h"
#ifdef FEATURE_INTERPRETER
#include "interpreter.h"
#include "interpreter.hpp"
#include "cgencpu.h"
#include "stublink.h"
#include "openum.h"
#include "fcall.h"
#include "frames.h"
#include "gcheaputilities.h"
#include <float.h>
#include "jitinterface.h"
#include "safemath.h"
#include "exceptmacros.h"
#include "runtimeexceptionkind.h"
#include "runtimehandles.h"
#include "vars.hpp"
#include "cycletimer.h"
inline CORINFO_CALLINFO_FLAGS combine(CORINFO_CALLINFO_FLAGS flag1, CORINFO_CALLINFO_FLAGS flag2)
{
return (CORINFO_CALLINFO_FLAGS) (flag1 | flag2);
}
static CorInfoType asCorInfoType(CORINFO_CLASS_HANDLE clsHnd)
{
TypeHandle typeHnd(clsHnd);
return CEEInfo::asCorInfoType(typeHnd.GetInternalCorElementType(), typeHnd, NULL);
}
InterpreterMethodInfo::InterpreterMethodInfo(CEEInfo* comp, CORINFO_METHOD_INFO* methInfo)
: m_method(methInfo->ftn),
m_module(methInfo->scope),
m_jittedCode(0),
m_ILCode(methInfo->ILCode),
m_ILCodeEnd(methInfo->ILCode + methInfo->ILCodeSize),
m_maxStack(methInfo->maxStack),
#if INTERP_PROFILE
m_totIlInstructionsExeced(0),
m_maxIlInstructionsExeced(0),
#endif
m_ehClauseCount(methInfo->EHcount),
m_varArgHandleArgNum(NO_VA_ARGNUM),
m_numArgs(methInfo->args.numArgs),
m_numLocals(methInfo->locals.numArgs),
m_flags(0),
m_argDescs(NULL),
m_returnType(methInfo->args.retType),
m_invocations(0),
m_methodCache(NULL)
{
// Overflow sanity check. (Can ILCodeSize ever be zero?)
assert(m_ILCode <= m_ILCodeEnd);
// Does the calling convention indicate an implicit "this" (first arg) or generic type context arg (last arg)?
SetFlag<Flag_hasThisArg>((methInfo->args.callConv & CORINFO_CALLCONV_HASTHIS) != 0);
if (GetFlag<Flag_hasThisArg>())
{
GCX_PREEMP();
CORINFO_CLASS_HANDLE methClass = comp->getMethodClass(methInfo->ftn);
DWORD attribs = comp->getClassAttribs(methClass);
SetFlag<Flag_thisArgIsObjPtr>((attribs & CORINFO_FLG_VALUECLASS) == 0);
}
#if INTERP_PROFILE || defined(_DEBUG)
{
const char* clsName;
#if defined(_DEBUG)
m_methName = ::eeGetMethodFullName(comp, methInfo->ftn, &clsName);
#else
m_methName = comp->getMethodName(methInfo->ftn, &clsName);
#endif
char* myClsName = new char[strlen(clsName) + 1];
strcpy(myClsName, clsName);
m_clsName = myClsName;
}
#endif // INTERP_PROFILE
// Do we have a ret buff? If its a struct or refany, then *maybe*, depending on architecture...
bool hasRetBuff = (methInfo->args.retType == CORINFO_TYPE_VALUECLASS || methInfo->args.retType == CORINFO_TYPE_REFANY);
#if defined(FEATURE_HFA)
// ... unless its an HFA type (and not varargs)...
if (hasRetBuff && CorInfoTypeIsFloatingPoint(comp->getHFAType(methInfo->args.retTypeClass)) && methInfo->args.getCallConv() != CORINFO_CALLCONV_VARARG)
{
hasRetBuff = false;
}
#endif
#if defined(_ARM_) || defined(_AMD64_)|| defined(_ARM64_)
// ...or it fits into one register.
if (hasRetBuff && getClassSize(methInfo->args.retTypeClass) <= sizeof(void*))
{
hasRetBuff = false;
}
#endif
SetFlag<Flag_hasRetBuffArg>(hasRetBuff);
MetaSig sig(reinterpret_cast<MethodDesc*>(methInfo->ftn));
SetFlag<Flag_hasGenericsContextArg>((methInfo->args.callConv & CORINFO_CALLCONV_PARAMTYPE) != 0);
SetFlag<Flag_isVarArg>((methInfo->args.callConv & CORINFO_CALLCONV_VARARG) != 0);
SetFlag<Flag_typeHasGenericArgs>(methInfo->args.sigInst.classInstCount > 0);
SetFlag<Flag_methHasGenericArgs>(methInfo->args.sigInst.methInstCount > 0);
_ASSERTE_MSG(!GetFlag<Flag_hasGenericsContextArg>()
|| ((GetFlag<Flag_typeHasGenericArgs>() & !(GetFlag<Flag_hasThisArg>() && GetFlag<Flag_thisArgIsObjPtr>())) || GetFlag<Flag_methHasGenericArgs>()),
"If the method takes a generic parameter, is a static method of generic class (or meth of a value class), and/or itself takes generic parameters");
if (GetFlag<Flag_hasThisArg>())
{
m_numArgs++;
}
if (GetFlag<Flag_hasRetBuffArg>())
{
m_numArgs++;
}
if (GetFlag<Flag_isVarArg>())
{
m_numArgs++;
}
if (GetFlag<Flag_hasGenericsContextArg>())
{
m_numArgs++;
}
if (m_numArgs == 0)
{
m_argDescs = NULL;
}
else
{
m_argDescs = new ArgDesc[m_numArgs];
}
// Now we'll do the locals.
m_localDescs = new LocalDesc[m_numLocals];
// Allocate space for the pinning reference bits (lazily).
m_localIsPinningRefBits = NULL;
// Now look at each local.
CORINFO_ARG_LIST_HANDLE localsPtr = methInfo->locals.args;
CORINFO_CLASS_HANDLE vcTypeRet;
unsigned curLargeStructOffset = 0;
for (unsigned k = 0; k < methInfo->locals.numArgs; k++)
{
// TODO: if this optimization succeeds, the switch below on localType
// can become much simpler.
m_localDescs[k].m_offset = 0;
#ifdef _DEBUG
vcTypeRet = NULL;
#endif
CorInfoTypeWithMod localTypWithMod = comp->getArgType(&methInfo->locals, localsPtr, &vcTypeRet);
// If the local vars is a pinning reference, set the bit to indicate this.
if ((localTypWithMod & CORINFO_TYPE_MOD_PINNED) != 0)
{
SetPinningBit(k);
}
CorInfoType localType = strip(localTypWithMod);
switch (localType)
{
case CORINFO_TYPE_VALUECLASS:
case CORINFO_TYPE_REFANY: // Just a special case: vcTypeRet is handle for TypedReference in this case...
{
InterpreterType tp = InterpreterType(comp, vcTypeRet);
unsigned size = static_cast<unsigned>(tp.Size(comp));
size = max(size, sizeof(void*));
m_localDescs[k].m_type = tp;
if (tp.IsLargeStruct(comp))
{
m_localDescs[k].m_offset = curLargeStructOffset;
curLargeStructOffset += size;
}
}
break;
case CORINFO_TYPE_VAR:
NYI_INTERP("argument of generic parameter type"); // Should not happen;
break;
default:
m_localDescs[k].m_type = InterpreterType(localType);
break;
}
m_localDescs[k].m_typeStackNormal = m_localDescs[k].m_type.StackNormalize();
localsPtr = comp->getArgNext(localsPtr);
}
m_largeStructLocalSize = curLargeStructOffset;
}
void InterpreterMethodInfo::InitArgInfo(CEEInfo* comp, CORINFO_METHOD_INFO* methInfo, short* argOffsets_)
{
unsigned numSigArgsPlusThis = methInfo->args.numArgs;
if (GetFlag<Flag_hasThisArg>())
{
numSigArgsPlusThis++;
}
// The m_argDescs array is constructed in the following "canonical" order:
// 1. 'this' pointer
// 2. signature arguments
// 3. return buffer
// 4. type parameter -or- vararg cookie
//
// argOffsets_ is passed in this order, and serves to establish the offsets to arguments
// when the interpreter is invoked using the native calling convention (i.e., not directly).
//
// When the interpreter is invoked directly, the arguments will appear in the same order
// and form as arguments passed to MethodDesc::CallDescr(). This ordering is as follows:
// 1. 'this' pointer
// 2. return buffer
// 3. signature arguments
//
// MethodDesc::CallDescr() does not support generic parameters or varargs functions.
_ASSERTE_MSG((methInfo->args.callConv & (CORINFO_CALLCONV_EXPLICITTHIS)) == 0,
"Don't yet handle EXPLICITTHIS calling convention modifier.");
switch (methInfo->args.callConv & CORINFO_CALLCONV_MASK)
{
case CORINFO_CALLCONV_DEFAULT:
case CORINFO_CALLCONV_VARARG:
{
unsigned k = 0;
ARG_SLOT* directOffset = NULL;
short directRetBuffOffset = 0;
short directVarArgOffset = 0;
short directTypeParamOffset = 0;
// If there's a "this" argument, handle it.
if (GetFlag<Flag_hasThisArg>())
{
m_argDescs[k].m_type = InterpreterType(CORINFO_TYPE_UNDEF);
#ifdef FEATURE_STUBS_AS_IL
MethodDesc *pMD = reinterpret_cast<MethodDesc*>(methInfo->ftn);
// The signature of the ILStubs may be misleading.
// If a StubTarget is ever set, we'll find the correct type by inspecting the
// target, rather than the stub.
if (pMD->IsILStub())
{
if (pMD->AsDynamicMethodDesc()->IsUnboxingILStub())
{
// This is an unboxing stub where the thisptr is passed as a boxed VT.
m_argDescs[k].m_type = InterpreterType(CORINFO_TYPE_CLASS);
}
else
{
MethodDesc *pTargetMD = pMD->AsDynamicMethodDesc()->GetILStubResolver()->GetStubTargetMethodDesc();
if (pTargetMD != NULL)
{
if (pTargetMD->GetMethodTable()->IsValueType())
{
m_argDescs[k].m_type = InterpreterType(CORINFO_TYPE_BYREF);
}
else
{
m_argDescs[k].m_type = InterpreterType(CORINFO_TYPE_CLASS);
}
}
}
}
#endif // FEATURE_STUBS_AS_IL
if (m_argDescs[k].m_type == InterpreterType(CORINFO_TYPE_UNDEF))
{
CORINFO_CLASS_HANDLE cls = comp->getMethodClass(methInfo->ftn);
DWORD attribs = comp->getClassAttribs(cls);
if (attribs & CORINFO_FLG_VALUECLASS)
{
m_argDescs[k].m_type = InterpreterType(CORINFO_TYPE_BYREF);
}
else
{
m_argDescs[k].m_type = InterpreterType(CORINFO_TYPE_CLASS);
}
}
m_argDescs[k].m_typeStackNormal = m_argDescs[k].m_type;
m_argDescs[k].m_nativeOffset = argOffsets_[k];
m_argDescs[k].m_directOffset = reinterpret_cast<short>(ArgSlotEndianessFixup(directOffset, sizeof(void*)));
directOffset++;
k++;
}
// If there is a return buffer, it will appear next in the arguments list for a direct call.
// Reserve its offset now, for use after the explicit arguments.
#if defined(_ARM_)
// On ARM, for direct calls we always treat HFA return types as having ret buffs.
// So figure out if we have an HFA return type.
bool hasHFARetType =
methInfo->args.retType == CORINFO_TYPE_VALUECLASS
&& CorInfoTypeIsFloatingPoint(comp->getHFAType(methInfo->args.retTypeClass))
&& methInfo->args.getCallConv() != CORINFO_CALLCONV_VARARG;
#endif // defined(_ARM_)
if (GetFlag<Flag_hasRetBuffArg>()
#if defined(_ARM_)
// On ARM, for direct calls we always treat HFA return types as having ret buffs.
|| hasHFARetType
#endif // defined(_ARM_)
)
{
directRetBuffOffset = reinterpret_cast<short>(ArgSlotEndianessFixup(directOffset, sizeof(void*)));
directOffset++;
}
#if defined(_AMD64_)
if (GetFlag<Flag_isVarArg>())
{
directVarArgOffset = reinterpret_cast<short>(ArgSlotEndianessFixup(directOffset, sizeof(void*)));
directOffset++;
}
if (GetFlag<Flag_hasGenericsContextArg>())
{
directTypeParamOffset = reinterpret_cast<short>(ArgSlotEndianessFixup(directOffset, sizeof(void*)));
directOffset++;
}
#endif
// Now record the argument types for the rest of the arguments.
InterpreterType it;
CORINFO_CLASS_HANDLE vcTypeRet;
CORINFO_ARG_LIST_HANDLE argPtr = methInfo->args.args;
for (; k < numSigArgsPlusThis; k++)
{
CorInfoTypeWithMod argTypWithMod = comp->getArgType(&methInfo->args, argPtr, &vcTypeRet);
CorInfoType argType = strip(argTypWithMod);
switch (argType)
{
case CORINFO_TYPE_VALUECLASS:
case CORINFO_TYPE_REFANY: // Just a special case: vcTypeRet is handle for TypedReference in this case...
it = InterpreterType(comp, vcTypeRet);
break;
default:
// Everything else is just encoded as a shifted CorInfoType.
it = InterpreterType(argType);
break;
}
m_argDescs[k].m_type = it;
m_argDescs[k].m_typeStackNormal = it.StackNormalize();
m_argDescs[k].m_nativeOffset = argOffsets_[k];
// When invoking the interpreter directly, large value types are always passed by reference.
if (it.IsLargeStruct(comp))
{
m_argDescs[k].m_directOffset = reinterpret_cast<short>(ArgSlotEndianessFixup(directOffset, sizeof(void*)));
}
else
{
m_argDescs[k].m_directOffset = reinterpret_cast<short>(ArgSlotEndianessFixup(directOffset, it.Size(comp)));
}
argPtr = comp->getArgNext(argPtr);
directOffset++;
}
if (GetFlag<Flag_hasRetBuffArg>())
{
// The generic type context is an unmanaged pointer (native int).
m_argDescs[k].m_type = InterpreterType(CORINFO_TYPE_BYREF);
m_argDescs[k].m_typeStackNormal = m_argDescs[k].m_type;
m_argDescs[k].m_nativeOffset = argOffsets_[k];
m_argDescs[k].m_directOffset = directRetBuffOffset;
k++;
}
if (GetFlag<Flag_hasGenericsContextArg>())
{
// The vararg cookie is an unmanaged pointer (native int).
m_argDescs[k].m_type = InterpreterType(CORINFO_TYPE_NATIVEINT);
m_argDescs[k].m_typeStackNormal = m_argDescs[k].m_type;
m_argDescs[k].m_nativeOffset = argOffsets_[k];
m_argDescs[k].m_directOffset = directTypeParamOffset;
directOffset++;
k++;
}
if (GetFlag<Flag_isVarArg>())
{
// The generic type context is an unmanaged pointer (native int).
m_argDescs[k].m_type = InterpreterType(CORINFO_TYPE_NATIVEINT);
m_argDescs[k].m_typeStackNormal = m_argDescs[k].m_type;
m_argDescs[k].m_nativeOffset = argOffsets_[k];
m_argDescs[k].m_directOffset = directVarArgOffset;
k++;
}
}
break;
case CORINFO_CALLCONV_C:
NYI_INTERP("InterpreterMethodInfo::InitArgInfo -- CORINFO_CALLCONV_C");
break;
case CORINFO_CALLCONV_STDCALL:
NYI_INTERP("InterpreterMethodInfo::InitArgInfo -- CORINFO_CALLCONV_STDCALL");
break;
case CORINFO_CALLCONV_THISCALL:
NYI_INTERP("InterpreterMethodInfo::InitArgInfo -- CORINFO_CALLCONV_THISCALL");
break;
case CORINFO_CALLCONV_FASTCALL:
NYI_INTERP("InterpreterMethodInfo::InitArgInfo -- CORINFO_CALLCONV_FASTCALL");
break;
case CORINFO_CALLCONV_FIELD:
NYI_INTERP("InterpreterMethodInfo::InitArgInfo -- CORINFO_CALLCONV_FIELD");
break;
case CORINFO_CALLCONV_LOCAL_SIG:
NYI_INTERP("InterpreterMethodInfo::InitArgInfo -- CORINFO_CALLCONV_LOCAL_SIG");
break;
case CORINFO_CALLCONV_PROPERTY:
NYI_INTERP("InterpreterMethodInfo::InitArgInfo -- CORINFO_CALLCONV_PROPERTY");
break;
case CORINFO_CALLCONV_NATIVEVARARG:
NYI_INTERP("InterpreterMethodInfo::InitArgInfo -- CORINFO_CALLCONV_NATIVEVARARG");
break;
default:
_ASSERTE_ALL_BUILDS(__FILE__, false); // shouldn't get here
}
}
InterpreterMethodInfo::~InterpreterMethodInfo()
{
if (m_methodCache != NULL)
{
delete reinterpret_cast<ILOffsetToItemCache*>(m_methodCache);
}
}
void InterpreterMethodInfo::AllocPinningBitsIfNeeded()
{
if (m_localIsPinningRefBits != NULL)
return;
unsigned numChars = (m_numLocals + 7) / 8;
m_localIsPinningRefBits = new char[numChars];
for (unsigned i = 0; i < numChars; i++)
{
m_localIsPinningRefBits[i] = char(0);
}
}
void InterpreterMethodInfo::SetPinningBit(unsigned locNum)
{
_ASSERTE_MSG(locNum < m_numLocals, "Precondition");
AllocPinningBitsIfNeeded();
unsigned ind = locNum / 8;
unsigned bitNum = locNum - (ind * 8);
m_localIsPinningRefBits[ind] |= (1 << bitNum);
}
bool InterpreterMethodInfo::GetPinningBit(unsigned locNum)
{
_ASSERTE_MSG(locNum < m_numLocals, "Precondition");
if (m_localIsPinningRefBits == NULL)
return false;
unsigned ind = locNum / 8;
unsigned bitNum = locNum - (ind * 8);
return (m_localIsPinningRefBits[ind] & (1 << bitNum)) != 0;
}
void Interpreter::ArgState::AddArg(unsigned canonIndex, short numSlots, bool noReg, bool twoSlotAlign)
{
#if defined(_AMD64_)
assert(!noReg);
assert(!twoSlotAlign);
AddArgAmd64(canonIndex, numSlots, /*isFloatingType*/false);
#else // !_AMD64_
#if defined(_X86_) || defined(_ARM64_)
assert(!twoSlotAlign); // Shouldn't use this flag on x86 (it wouldn't work right in the stack, at least).
#endif
// If the argument requires two-slot alignment, make sure we have it. This is the
// ARM model: both in regs and on the stack.
if (twoSlotAlign)
{
if (!noReg && numRegArgs < NumberOfIntegerRegArgs())
{
if ((numRegArgs % 2) != 0)
{
numRegArgs++;
}
}
else
{
if ((callerArgStackSlots % 2) != 0)
{
callerArgStackSlots++;
}
}
}
#if defined(_ARM64_)
// On ARM64 we're not going to place an argument 'partially' on the stack
// if all slots fits into registers, they go into registers, otherwise they go into stack.
if (!noReg && numRegArgs+numSlots <= NumberOfIntegerRegArgs())
#else
if (!noReg && numRegArgs < NumberOfIntegerRegArgs())
#endif
{
argIsReg[canonIndex] = ARS_IntReg;
argOffsets[canonIndex] = numRegArgs * sizeof(void*);
numRegArgs += numSlots;
// If we overflowed the regs, we consume some stack arg space.
if (numRegArgs > NumberOfIntegerRegArgs())
{
callerArgStackSlots += (numRegArgs - NumberOfIntegerRegArgs());
}
}
else
{
#if defined(_X86_)
// On X86, stack args are pushed in order. We will add the total size of the arguments to this offset,
// so we set this to a negative number relative to the SP before the first arg push.
callerArgStackSlots += numSlots;
ClrSafeInt<short> offset(-callerArgStackSlots);
#elif defined(_ARM_) || defined(_ARM64_)
// On ARM, args are pushed in *reverse* order. So we will create an offset relative to the address
// of the first stack arg; later, we will add the size of the non-stack arguments.
ClrSafeInt<short> offset(callerArgStackSlots);
#endif
offset *= static_cast<short>(sizeof(void*));
assert(!offset.IsOverflow());
argOffsets[canonIndex] = offset.Value();
#if defined(_ARM_) || defined(_ARM64_)
callerArgStackSlots += numSlots;
#endif
}
#endif // !_AMD64_
}
#if defined(_AMD64_)
// AMD64 calling convention allows any type that can be contained in 64 bits to be passed in registers,
// if not contained or they are of a size not a power of 2, then they are passed by reference on the stack.
// RCX, RDX, R8, R9 are the int arg registers. XMM0-3 overlap with the integer registers and are used
// for floating point arguments.
void Interpreter::ArgState::AddArgAmd64(unsigned canonIndex, unsigned short numSlots, bool isFloatingType)
{
// If floating type and there are slots use a float reg slot.
if (isFloatingType && (numFPRegArgSlots < MaxNumFPRegArgSlots))
{
assert(numSlots == 1);
argIsReg[canonIndex] = ARS_FloatReg;
argOffsets[canonIndex] = numFPRegArgSlots * sizeof(void*);
fpArgsUsed |= (0x1 << (numFPRegArgSlots + 1));
numFPRegArgSlots += 1;
numRegArgs += 1; // Increment int reg count due to shadowing.
return;
}
// If we have an integer/aligned-struct arg or a reference of a struct that got copied on
// to the stack, it would go into a register or a stack slot.
if (numRegArgs != NumberOfIntegerRegArgs())
{
argIsReg[canonIndex] = ARS_IntReg;
argOffsets[canonIndex] = numRegArgs * sizeof(void*);
numRegArgs += 1;
numFPRegArgSlots += 1; // Increment FP reg count due to shadowing.
}
else
{
argIsReg[canonIndex] = ARS_NotReg;
ClrSafeInt<short> offset(callerArgStackSlots * sizeof(void*));
assert(!offset.IsOverflow());
argOffsets[canonIndex] = offset.Value();
callerArgStackSlots += 1;
}
}
#endif
void Interpreter::ArgState::AddFPArg(unsigned canonIndex, unsigned short numSlots, bool twoSlotAlign)
{
#if defined(_AMD64_)
assert(!twoSlotAlign);
assert(numSlots == 1);
AddArgAmd64(canonIndex, numSlots, /*isFloatingType*/ true);
#elif defined(_X86_)
assert(false); // Don't call this on x86; we pass all FP on the stack.
#elif defined(_ARM_)
// We require "numSlots" alignment.
assert(numFPRegArgSlots + numSlots <= MaxNumFPRegArgSlots);
argIsReg[canonIndex] = ARS_FloatReg;
if (twoSlotAlign)
{
// If we require two slot alignment, the number of slots must be a multiple of two.
assert((numSlots % 2) == 0);
// Skip a slot if necessary.
if ((numFPRegArgSlots % 2) != 0)
{
numFPRegArgSlots++;
}
// We always use new slots for two slot aligned args precision...
argOffsets[canonIndex] = numFPRegArgSlots * sizeof(void*);
for (unsigned short i = 0; i < numSlots/2; i++)
{
fpArgsUsed |= (0x3 << (numFPRegArgSlots + i));
}
numFPRegArgSlots += numSlots;
}
else
{
if (numSlots == 1)
{
// A single-precision (float) argument. We must do "back-filling" where possible, searching
// for previous unused registers.
unsigned slot = 0;
while (slot < 32 && (fpArgsUsed & (1 << slot))) slot++;
assert(slot < 32); // Search succeeded.
assert(slot <= numFPRegArgSlots); // No bits at or above numFPRegArgSlots are set (regs used).
argOffsets[canonIndex] = slot * sizeof(void*);
fpArgsUsed |= (0x1 << slot);
if (slot == numFPRegArgSlots)
numFPRegArgSlots += numSlots;
}
else
{
// We can always allocate at after the last used slot.
argOffsets[numFPRegArgSlots] = numFPRegArgSlots * sizeof(void*);
for (unsigned i = 0; i < numSlots; i++)
{
fpArgsUsed |= (0x1 << (numFPRegArgSlots + i));
}
numFPRegArgSlots += numSlots;
}
}
#elif defined(_ARM64_)
assert(numFPRegArgSlots + numSlots <= MaxNumFPRegArgSlots);
assert(!twoSlotAlign);
argIsReg[canonIndex] = ARS_FloatReg;
argOffsets[canonIndex] = numFPRegArgSlots * sizeof(void*);
for (unsigned i = 0; i < numSlots; i++)
{
fpArgsUsed |= (0x1 << (numFPRegArgSlots + i));
}
numFPRegArgSlots += numSlots;
#else
#error "Unsupported architecture"
#endif
}
// static
CorJitResult Interpreter::GenerateInterpreterStub(CEEInfo* comp,
CORINFO_METHOD_INFO* info,
/*OUT*/ BYTE **nativeEntry,
/*OUT*/ ULONG *nativeSizeOfCode,
InterpreterMethodInfo** ppInterpMethodInfo,
bool jmpCall)
{
//
// First, ensure that the compiler-specific statics are initialized.
//
InitializeCompilerStatics(comp);
//
// Next, use switches and IL scanning to determine whether to interpret this method.
//
#if INTERP_TRACING
#define TRACE_SKIPPED(cls, meth, reason) \
if (s_DumpInterpreterStubsFlag.val(CLRConfig::INTERNAL_DumpInterpreterStubs)) { \
fprintf(GetLogFile(), "Skipping %s:%s (%s).\n", cls, meth, reason); \
}
#else
#define TRACE_SKIPPED(cls, meth, reason)
#endif
// If jmpCall, we only need to do computations involving method info.
if (!jmpCall)
{
const char* clsName;
const char* methName = comp->getMethodName(info->ftn, &clsName);
if ( !s_InterpretMeths.contains(methName, clsName, info->args.pSig)
|| s_InterpretMethsExclude.contains(methName, clsName, info->args.pSig))
{
TRACE_SKIPPED(clsName, methName, "not in set of methods to interpret");
return CORJIT_SKIPPED;
}
unsigned methHash = comp->getMethodHash(info->ftn);
if ( methHash < s_InterpretMethHashMin.val(CLRConfig::INTERNAL_InterpreterMethHashMin)
|| methHash > s_InterpretMethHashMax.val(CLRConfig::INTERNAL_InterpreterMethHashMax))
{
TRACE_SKIPPED(clsName, methName, "hash not within range to interpret");
return CORJIT_SKIPPED;
}
MethodDesc* pMD = reinterpret_cast<MethodDesc*>(info->ftn);
#if !INTERP_ILSTUBS
if (pMD->IsILStub())
{
TRACE_SKIPPED(clsName, methName, "interop stubs not supported");
return CORJIT_SKIPPED;
}
else
#endif // !INTERP_ILSTUBS
if (!s_InterpreterDoLoopMethods && MethodMayHaveLoop(info->ILCode, info->ILCodeSize))
{
TRACE_SKIPPED(clsName, methName, "has loop, not interpreting loop methods.");
return CORJIT_SKIPPED;
}
s_interpreterStubNum++;
#if INTERP_TRACING
if (s_interpreterStubNum < s_InterpreterStubMin.val(CLRConfig::INTERNAL_InterpreterStubMin)
|| s_interpreterStubNum > s_InterpreterStubMax.val(CLRConfig::INTERNAL_InterpreterStubMax))
{
TRACE_SKIPPED(clsName, methName, "stub num not in range, not interpreting.");
return CORJIT_SKIPPED;
}
if (s_DumpInterpreterStubsFlag.val(CLRConfig::INTERNAL_DumpInterpreterStubs))
{
unsigned hash = comp->getMethodHash(info->ftn);
fprintf(GetLogFile(), "Generating interpretation stub (# %d = 0x%x, hash = 0x%x) for %s:%s.\n",
s_interpreterStubNum, s_interpreterStubNum, hash, clsName, methName);
fflush(GetLogFile());
}
#endif
}
//
// Finally, generate an interpreter entry-point stub.
//
// @TODO: this structure clearly needs some sort of lifetime management. It is the moral equivalent
// of compiled code, and should be associated with an app domain. In addition, when I get to it, we should
// delete it when/if we actually compile the method. (Actually, that's complicated, since there may be
// VSD stubs still bound to the interpreter stub. The check there will get to the jitted code, but we want
// to eventually clean those up at some safe point...)
InterpreterMethodInfo* interpMethInfo = new InterpreterMethodInfo(comp, info);
if (ppInterpMethodInfo != nullptr)
{
*ppInterpMethodInfo = interpMethInfo;
}
interpMethInfo->m_stubNum = s_interpreterStubNum;
MethodDesc* methodDesc = reinterpret_cast<MethodDesc*>(info->ftn);
if (!jmpCall)
{
interpMethInfo = RecordInterpreterMethodInfoForMethodHandle(info->ftn, interpMethInfo);
}
#if FEATURE_INTERPRETER_DEADSIMPLE_OPT
unsigned offsetOfLd;
if (IsDeadSimpleGetter(comp, methodDesc, &offsetOfLd))
{
interpMethInfo->SetFlag<InterpreterMethodInfo::Flag_methIsDeadSimpleGetter>(true);
if (offsetOfLd == ILOffsetOfLdFldInDeadSimpleInstanceGetterDbg)
{
interpMethInfo->SetFlag<InterpreterMethodInfo::Flag_methIsDeadSimpleGetterIsDbgForm>(true);
}
else
{
assert(offsetOfLd == ILOffsetOfLdFldInDeadSimpleInstanceGetterOpt);
}
}
#endif // FEATURE_INTERPRETER_DEADSIMPLE_OPT
// Used to initialize the arg offset information.
Stub* stub = NULL;
// We assume that the stack contains (with addresses growing upwards, assuming a downwards-growing stack):
//
// [Non-reg arg N-1]
// ...
// [Non-reg arg <# of reg args>]
// [return PC]
//
// Then push the register args to get:
//
// [Non-reg arg N-1]
// ...
// [Non-reg arg <# of reg args>]
// [return PC]
// [reg arg <# of reg args>-1]
// ...
// [reg arg 0]
//
// Pass the address of this argument array, and the MethodDesc pointer for the method, as arguments to
// Interpret.
//
// So the structure of the code will look like this (in the non-ILstub case):
//
#if defined(_X86_) || defined(_AMD64_)
// First do "short-circuiting" if the method has JITted code, and we couldn't find/update the call site:
// eax = &interpMethInfo
// eax = [eax + offsetof(m_jittedCode)]
// if (eax == zero) goto doInterpret:
// /*else*/ jmp [eax]
// doInterpret:
// push ebp
// mov ebp, esp
// [if there are register arguments in ecx or edx, push them]
// ecx := addr of InterpretMethodInfo for the method to be intepreted.
// edx = esp /*pointer to argument structure*/
// call to Interpreter::InterpretMethod
// [if we pushed register arguments, increment esp by the right amount.]
// pop ebp
// ret <n> ; where <n> is the number of argument stack slots in the call to the stub.
#elif defined (_ARM_)
// TODO.
#endif
// The IL stub case is hard. The portion of the interpreter stub that short-circuits
// to JITted code requires an extra "scratch" volatile register, not an argument register;
// in the IL stub case, it too is using such a register, as an extra argument, to hold the stub context.
// On x86 and ARM, there is only one such extra volatile register, and we've got a conundrum.
// The cases where this short-circuiting is important is when the address of an interpreter stub
// becomes "embedded" in other code. The examples I know of are VSD stubs and delegates.
// The first of these is not a problem for IL stubs -- methods invoked via p/Invoke (the ones that
// [I think!] use IL stubs) are static, and cannot be invoked via VSD stubs. Delegates, on the other
// remain a problem [I believe].
// For the short term, we'll ignore this issue, and never do short-circuiting for IL stubs.
// So interpreter stubs embedded in delegates will continue to interpret the IL stub, even after
// the stub has been JITted.
// The long-term intention is that when we JIT a method with an interpreter stub, we keep a mapping
// from interpreter stub address to corresponding native code address. If this mapping is non-empty,
// at GC time we would visit the locations in which interpreter stub addresses might be located, like
// VSD stubs and delegate objects, and update them to point to new addresses. This would be a necessary
// part of any scheme to GC interpreter stubs, and InterpreterMethodInfos.
// If we *really* wanted to make short-circuiting work for the IL stub case, we would have to
// (in the x86 case, which should be sufficiently illustrative):
// push eax
// <get the address of JITted code, if any, into eax>
// if there is JITted code in eax, we'd have to
// push 2 non-volatile registers, say esi and edi.
// copy the JITted code address from eax into esi.
// copy the method arguments (without the return address) down the stack, using edi
// as a scratch register.
// restore the original stub context value into eax from the stack
// call (not jmp) to the JITted code address in esi
// pop esi and edi from the stack.
// now the stack has original args, followed by original return address. Do a "ret"
// that returns to the return address, and also pops the original args from the stack.
// If we did this, we'd have to give this portion of the stub proper unwind info.
// Also, we'd have to adjust the rest of the stub to pop eax from the stack.
// TODO: much of the interpreter stub code should be is shareable. In the non-IL stub case,
// at least, we could have a small per-method stub that puts the address of the method-specific
// InterpreterMethodInfo into eax, and then branches to a shared part. Probably we would want to
// always push all integer args on x86, as we do already on ARM. On ARM, we'd need several versions
// of the shared stub, for different numbers of floating point register args, cross different kinds of
// HFA return values. But these could still be shared, and the per-method stub would decide which of
// these to target.
//
// In the IL stub case, which uses eax, it would be problematic to do this sharing.
StubLinkerCPU sl;
MethodDesc* pMD = reinterpret_cast<MethodDesc*>(info->ftn);
if (!jmpCall)
{
sl.Init();
#if defined(_X86_) || defined(_AMD64_)
// First we do "short-circuiting" if the method has JITted code.
#if INTERP_ILSTUBS
if (!pMD->IsILStub()) // As discussed above, we don't do short-circuiting for IL stubs.
#endif
{
// First read the m_jittedCode field.
sl.X86EmitRegLoad(kEAX, UINT_PTR(interpMethInfo));
sl.X86EmitOffsetModRM(0x8b, kEAX, kEAX, offsetof(InterpreterMethodInfo, m_jittedCode));
// If it is still zero, then go on to do the interpretation.
sl.X86EmitCmpRegImm32(kEAX, 0);
CodeLabel* doInterpret = sl.NewCodeLabel();
sl.X86EmitCondJump(doInterpret, X86CondCode::kJE);
// Otherwise...
sl.X86EmitJumpReg(kEAX); // tail call to JITted code.
sl.EmitLabel(doInterpret);
}
#if defined(_X86_)
// Start regular interpretation
sl.X86EmitPushReg(kEBP);
sl.X86EmitMovRegReg(kEBP, static_cast<X86Reg>(kESP_Unsafe));
#endif
#elif defined(_ARM_)
// On ARM we use R12 as a "scratch" register -- callee-trashed, not used
// for arguments.
ThumbReg r11 = ThumbReg(11);
ThumbReg r12 = ThumbReg(12);
#if INTERP_ILSTUBS
if (!pMD->IsILStub()) // As discussed above, we don't do short-circuiting for IL stubs.
#endif
{
// But we also have to use r4, because ThumbEmitCondRegJump below requires a low register.
sl.ThumbEmitMovConstant(r11, 0);
sl.ThumbEmitMovConstant(r12, UINT_PTR(interpMethInfo));
sl.ThumbEmitLoadRegIndirect(r12, r12, offsetof(InterpreterMethodInfo, m_jittedCode));
sl.ThumbEmitCmpReg(r12, r11); // Set condition codes.
// If r12 is zero, then go on to do the interpretation.
CodeLabel* doInterpret = sl.NewCodeLabel();
sl.ThumbEmitCondFlagJump(doInterpret, thumbCondEq.cond);
sl.ThumbEmitJumpRegister(r12); // If non-zero, tail call to JITted code.
sl.EmitLabel(doInterpret);
}
// Start regular interpretation
#elif defined(_ARM64_)
// x8 through x15 are scratch registers on ARM64.
IntReg x8 = IntReg(8);
IntReg x9 = IntReg(9);
#if INTERP_ILSTUBS
if (!pMD->IsILStub()) // As discussed above, we don't do short-circuiting for IL stubs.
#endif
{
sl.EmitMovConstant(x8, UINT64(interpMethInfo));
sl.EmitLoadStoreRegImm(StubLinkerCPU::eLOAD, x9, x8, offsetof(InterpreterMethodInfo, m_jittedCode));
sl.EmitCmpImm(x9, 0);
CodeLabel* doInterpret = sl.NewCodeLabel();
sl.EmitCondFlagJump(doInterpret, CondEq.cond);
sl.EmitJumpRegister(x9);
sl.EmitLabel(doInterpret);
}
// Start regular interpretation
#else
#error unsupported platform
#endif
}
MetaSig sig(methodDesc);
unsigned totalArgs = info->args.numArgs;
unsigned sigArgsPlusThis = totalArgs;
bool hasThis = false;
bool hasRetBuff = false;
bool isVarArg = false;
bool hasGenericsContextArg = false;
// Below, we will increment "totalArgs" for any of the "this" argument,
// a ret buff argument, and/or a generics context argument.
//
// There will be four arrays allocated below, each with this increased "totalArgs" elements:
// argOffsets, argIsReg, argPerm, and, later, m_argDescs.
//
// They will be indexed in the order (0-based, [] indicating optional)
//
// [this] sigArgs [retBuff] [VASigCookie] [genCtxt]
//
// We will call this "canonical order". It is architecture-independent, and
// does not necessarily correspond to the architecture-dependent physical order
// in which the registers are actually passed. (That's actually the purpose of
// "argPerm": to record the correspondence between canonical order and physical
// order.) We could have chosen any order for the first three of these, but it's
// simplest to let m_argDescs have all the passed IL arguments passed contiguously
// at the beginning, allowing it to be indexed by IL argument number.
int genericsContextArgIndex = 0;
int retBuffArgIndex = 0;
int vaSigCookieIndex = 0;
if (sig.HasThis())
{
assert(info->args.callConv & CORINFO_CALLCONV_HASTHIS);
hasThis = true;
totalArgs++; sigArgsPlusThis++;
}
if (methodDesc->HasRetBuffArg())
{
hasRetBuff = true;
retBuffArgIndex = totalArgs;
totalArgs++;
}
if (sig.GetCallingConventionInfo() & CORINFO_CALLCONV_VARARG)
{
isVarArg = true;
vaSigCookieIndex = totalArgs;
totalArgs++;
}
if (sig.GetCallingConventionInfo() & CORINFO_CALLCONV_PARAMTYPE)
{
assert(info->args.callConv & CORINFO_CALLCONV_PARAMTYPE);
hasGenericsContextArg = true;
genericsContextArgIndex = totalArgs;
totalArgs++;
}
// The non-this sig args have indices starting after these.
// We will first encode the arg offsets as *negative* offsets from the address above the first
// stack arg, and later add in the total size of the stack args to get a positive offset.
// The first sigArgsPlusThis elements are the offsets of the IL-addressable arguments. After that,
// there may be up to two more: generics context arg, if present, and return buff pointer, if present.
// (Note that the latter is actually passed after the "this" pointer, or else first if no "this" pointer
// is present. We re-arrange to preserve the easy IL-addressability.)
ArgState argState(totalArgs);
// This is the permutation that translates from an index in the argOffsets/argIsReg arrays to
// the platform-specific order in which the arguments are passed.
unsigned* argPerm = new unsigned[totalArgs];
// The number of register argument slots we end up pushing.
unsigned short regArgsFound = 0;
unsigned physArgIndex = 0;
#if defined(_ARM_)
// The stub linker has a weird little limitation: all stubs it's used
// for on ARM push some callee-saved register, so the unwind info
// code was written assuming at least one would be pushed. I don't know how to
// fix it, so I'm meeting this requirement, by pushing one callee-save.
#define STUB_LINK_EMIT_PROLOG_REQUIRES_CALLEE_SAVE_PUSH 1
#if STUB_LINK_EMIT_PROLOG_REQUIRES_CALLEE_SAVE_PUSH
const int NumberOfCalleeSaveRegsToPush = 1;
#else
const int NumberOfCalleeSaveRegsToPush = 0;
#endif
// The "1" here is for the return address.
const int NumberOfFixedPushes = 1 + NumberOfCalleeSaveRegsToPush;
#elif defined(_ARM64_)
// FP, LR
const int NumberOfFixedPushes = 2;
#endif
#if defined(FEATURE_HFA)
#if defined(_ARM_) || defined(_ARM64_)
// On ARM, a non-retBuffArg method that returns a struct type might be an HFA return. Figure
// that out.
unsigned HFARetTypeSize = 0;
#endif
#if defined(_ARM64_)
unsigned cHFAVars = 0;
#endif
if (info->args.retType == CORINFO_TYPE_VALUECLASS
&& CorInfoTypeIsFloatingPoint(comp->getHFAType(info->args.retTypeClass))
&& info->args.getCallConv() != CORINFO_CALLCONV_VARARG)
{
HFARetTypeSize = getClassSize(info->args.retTypeClass);
#if defined(_ARM_)
// Round up to a double boundary;
HFARetTypeSize = ((HFARetTypeSize+ sizeof(double) - 1) / sizeof(double)) * sizeof(double);
#elif defined(_ARM64_)
// We don't need to round it up to double. Unlike ARM, whether it's a float or a double each field will
// occupy one slot. We'll handle the stack alignment in the prolog where we have all the information about
// what is going to be pushed on the stack.
// Instead on ARM64 we'll need to know how many slots we'll need.
// for instance a VT with two float fields will have the same size as a VT with 1 double field. (ARM64TODO: Verify it)
// It works on ARM because the overlapping layout of the floating point registers
// but it won't work on ARM64.
cHFAVars = (comp->getHFAType(info->args.retTypeClass) == CORINFO_TYPE_FLOAT) ? HFARetTypeSize/sizeof(float) : HFARetTypeSize/sizeof(double);
#endif
}
#endif // defined(FEATURE_HFA)
_ASSERTE_MSG((info->args.callConv & (CORINFO_CALLCONV_EXPLICITTHIS)) == 0,
"Don't yet handle EXPLICITTHIS calling convention modifier.");
switch (info->args.callConv & CORINFO_CALLCONV_MASK)
{
case CORINFO_CALLCONV_DEFAULT:
case CORINFO_CALLCONV_VARARG:
{
unsigned firstSigArgIndex = 0;
if (hasThis)
{
argPerm[0] = physArgIndex; physArgIndex++;
argState.AddArg(0);
firstSigArgIndex++;
}
if (hasRetBuff)
{
argPerm[retBuffArgIndex] = physArgIndex; physArgIndex++;
argState.AddArg(retBuffArgIndex);
}
if (isVarArg)
{
argPerm[vaSigCookieIndex] = physArgIndex; physArgIndex++;
interpMethInfo->m_varArgHandleArgNum = vaSigCookieIndex;
argState.AddArg(vaSigCookieIndex);
}
#if defined(_ARM_) || defined(_AMD64_) || defined(_ARM64_)
// Generics context comes before args on ARM. Would be better if I factored this out as a call,
// to avoid large swatches of duplicate code.
if (hasGenericsContextArg)
{
argPerm[genericsContextArgIndex] = physArgIndex; physArgIndex++;
argState.AddArg(genericsContextArgIndex);
}
#endif // _ARM_ || _AMD64_ || _ARM64_
CORINFO_ARG_LIST_HANDLE argPtr = info->args.args;
// Some arguments are have been passed in registers, some in memory. We must generate code that
// moves the register arguments to memory, and determines a pointer into the stack from which all
// the arguments can be accessed, according to the offsets in "argOffsets."
//
// In the first pass over the arguments, we will label and count the register arguments, and
// initialize entries in "argOffsets" for the non-register arguments -- relative to the SP at the
// time of the call. Then when we have counted the number of register arguments, we will adjust
// the offsets for the non-register arguments to account for those. Then, in the second pass, we
// will push the register arguments on the stack, and capture the final stack pointer value as
// the argument vector pointer.
CORINFO_CLASS_HANDLE vcTypeRet;
// This iteration starts at the first signature argument, and iterates over all the
// canonical indices for the signature arguments.
for (unsigned k = firstSigArgIndex; k < sigArgsPlusThis; k++)
{
argPerm[k] = physArgIndex; physArgIndex++;
CorInfoTypeWithMod argTypWithMod = comp->getArgType(&info->args, argPtr, &vcTypeRet);
CorInfoType argType = strip(argTypWithMod);
switch (argType)
{
case CORINFO_TYPE_UNDEF:
case CORINFO_TYPE_VOID:
case CORINFO_TYPE_VAR:
_ASSERTE_ALL_BUILDS(__FILE__, false); // Should not happen;
break;
// One integer slot arguments:
case CORINFO_TYPE_BOOL:
case CORINFO_TYPE_CHAR:
case CORINFO_TYPE_BYTE:
case CORINFO_TYPE_UBYTE:
case CORINFO_TYPE_SHORT:
case CORINFO_TYPE_USHORT:
case CORINFO_TYPE_INT:
case CORINFO_TYPE_UINT:
case CORINFO_TYPE_NATIVEINT:
case CORINFO_TYPE_NATIVEUINT:
case CORINFO_TYPE_BYREF:
case CORINFO_TYPE_CLASS:
case CORINFO_TYPE_STRING:
case CORINFO_TYPE_PTR:
argState.AddArg(k);
break;
// Two integer slot arguments.
case CORINFO_TYPE_LONG:
case CORINFO_TYPE_ULONG:
#if defined(_X86_)
// Longs are always passed on the stack -- with no obvious alignment.
argState.AddArg(k, 2, /*noReg*/true);
#elif defined(_ARM_)
// LONGS have 2-reg alignment; inc reg if necessary.
argState.AddArg(k, 2, /*noReg*/false, /*twoSlotAlign*/true);
#elif defined(_AMD64_) || defined(_ARM64_)
argState.AddArg(k);
#else
#error unknown platform
#endif
break;
// One float slot args:
case CORINFO_TYPE_FLOAT:
#if defined(_X86_)
argState.AddArg(k, 1, /*noReg*/true);
#elif defined(_ARM_)
argState.AddFPArg(k, 1, /*twoSlotAlign*/false);
#elif defined(_AMD64_) || defined(_ARM64_)
argState.AddFPArg(k, 1, false);
#else
#error unknown platform
#endif
break;
// Two float slot args
case CORINFO_TYPE_DOUBLE:
#if defined(_X86_)
argState.AddArg(k, 2, /*noReg*/true);
#elif defined(_ARM_)
argState.AddFPArg(k, 2, /*twoSlotAlign*/true);
#elif defined(_AMD64_) || defined(_ARM64_)
argState.AddFPArg(k, 1, false);
#else
#error unknown platform
#endif
break;
// Value class args:
case CORINFO_TYPE_VALUECLASS:
case CORINFO_TYPE_REFANY:
{
unsigned sz = getClassSize(vcTypeRet);
unsigned szSlots = max(1, sz / sizeof(void*));
#if defined(_X86_)
argState.AddArg(k, static_cast<short>(szSlots), /*noReg*/true);
#elif defined(_AMD64_)
argState.AddArg(k, static_cast<short>(szSlots));
#elif defined(_ARM_) || defined(_ARM64_)
CorInfoType hfaType = comp->getHFAType(vcTypeRet);
if (CorInfoTypeIsFloatingPoint(hfaType))
{
argState.AddFPArg(k, szSlots,
#if defined(_ARM_)
/*twoSlotAlign*/ (hfaType == CORINFO_TYPE_DOUBLE)
#elif defined(_ARM64_)
/*twoSlotAlign*/ false // unlike ARM32 FP args always consume 1 slot on ARM64
#endif
);
}
else
{
unsigned align = comp->getClassAlignmentRequirement(vcTypeRet, FALSE);
argState.AddArg(k, static_cast<short>(szSlots), /*noReg*/false,
#if defined(_ARM_)
/*twoSlotAlign*/ (align == 8)
#elif defined(_ARM64_)
/*twoSlotAlign*/ false
#endif
);
}
#else
#error unknown platform
#endif
}
break;
default:
_ASSERTE_MSG(false, "should not reach here, unknown arg type");
}
argPtr = comp->getArgNext(argPtr);
}
#if defined(_X86_)
// Generics context comes last on _X86_. Would be better if I factored this out as a call,
// to avoid large swatches of duplicate code.
if (hasGenericsContextArg)
{
argPerm[genericsContextArgIndex] = physArgIndex; physArgIndex++;
argState.AddArg(genericsContextArgIndex);
}
// Now we have counted the number of register arguments, so we can update the offsets for the
// non-register arguments. "+ 2" below is to account for the return address from the call, and
// pushing of EBP.
unsigned short stackArgBaseOffset = (argState.numRegArgs + 2 + argState.callerArgStackSlots) * sizeof(void*);
unsigned intRegArgBaseOffset = 0;
#elif defined(_ARM_)
// We're choosing to always push all arg regs on ARM -- this is the only option
// that ThumbEmitProlog currently gives.
argState.numRegArgs = 4;
// On ARM, we push the (integer) arg regs before we push the return address, so we don't add an
// extra constant. And the offset is the address of the last pushed argument, which is the first
// stack argument in signature order.
// Round up to a double boundary...
unsigned fpStackSlots = ((argState.numFPRegArgSlots + 1) / 2) * 2;
unsigned intRegArgBaseOffset = (fpStackSlots + NumberOfFixedPushes) * sizeof(void*);
unsigned short stackArgBaseOffset = intRegArgBaseOffset + (argState.numRegArgs) * sizeof(void*);
#elif defined(_ARM64_)
// See StubLinkerCPU::EmitProlog for the layout of the stack
unsigned intRegArgBaseOffset = (argState.numFPRegArgSlots) * sizeof(void*);
unsigned short stackArgBaseOffset = (unsigned short) ((argState.numRegArgs + argState.numFPRegArgSlots) * sizeof(void*));
#elif defined(_AMD64_)
unsigned short stackArgBaseOffset = (argState.numRegArgs) * sizeof(void*);
#else
#error unsupported platform
#endif
#if defined(_ARM_)
WORD regArgMask = 0;
#endif // defined(_ARM_)
// argPerm maps from an index into the argOffsets/argIsReg arrays to
// the order that the arguments are passed.
unsigned* argPermInverse = new unsigned[totalArgs];
for (unsigned t = 0; t < totalArgs; t++)
{
argPermInverse[argPerm[t]] = t;
}
for (unsigned kk = 0; kk < totalArgs; kk++)
{
// Let "k" be the index of the kk'th input in the argOffsets and argIsReg arrays.
// To compute "k" we need to invert argPerm permutation -- determine the "k" such
// that argPerm[k] == kk.
unsigned k = argPermInverse[kk];
assert(k < totalArgs);
if (argState.argIsReg[k] == ArgState::ARS_IntReg)
{
regArgsFound++;
// If any int reg args are used on ARM, we push them all (in ThumbEmitProlog)
#if defined(_X86_)
if (regArgsFound == 1)
{
if (!jmpCall) { sl.X86EmitPushReg(kECX); }
argState.argOffsets[k] = (argState.numRegArgs - regArgsFound)*sizeof(void*); // General form, good for general # of reg args.
}
else
{
assert(regArgsFound == 2);
if (!jmpCall) { sl.X86EmitPushReg(kEDX); }
argState.argOffsets[k] = (argState.numRegArgs - regArgsFound)*sizeof(void*);
}
#elif defined(_ARM_) || defined(_ARM64_)
argState.argOffsets[k] += intRegArgBaseOffset;
#elif defined(_AMD64_)
// First home the register arguments in the stack space allocated by the caller.
// Refer to Stack Allocation on x64 [http://msdn.microsoft.com/en-US/library/ew5tede7(v=vs.80).aspx]
X86Reg argRegs[] = { kECX, kEDX, kR8, kR9 };
if (!jmpCall) { sl.X86EmitIndexRegStoreRSP(regArgsFound * sizeof(void*), argRegs[regArgsFound - 1]); }
argState.argOffsets[k] = (regArgsFound - 1) * sizeof(void*);
#else
#error unsupported platform
#endif
}
#if defined(_AMD64_)
else if (argState.argIsReg[k] == ArgState::ARS_FloatReg)
{
// Increment regArgsFound since float/int arguments have overlapping registers.
regArgsFound++;
// Home the float arguments.
X86Reg argRegs[] = { kXMM0, kXMM1, kXMM2, kXMM3 };
if (!jmpCall) { sl.X64EmitMovSDToMem(argRegs[regArgsFound - 1], static_cast<X86Reg>(kESP_Unsafe), regArgsFound * sizeof(void*)); }
argState.argOffsets[k] = (regArgsFound - 1) * sizeof(void*);
}
#endif
else if (argState.argIsReg[k] == ArgState::ARS_NotReg)
{
argState.argOffsets[k] += stackArgBaseOffset;
}
// So far, x86 doesn't have any FP reg args, and ARM and ARM64 puts them at offset 0, so no
// adjustment is necessary (yet) for arguments passed in those registers.
}
delete[] argPermInverse;
}
break;
case CORINFO_CALLCONV_C:
NYI_INTERP("GenerateInterpreterStub -- CORINFO_CALLCONV_C");
break;
case CORINFO_CALLCONV_STDCALL:
NYI_INTERP("GenerateInterpreterStub -- CORINFO_CALLCONV_STDCALL");
break;
case CORINFO_CALLCONV_THISCALL:
NYI_INTERP("GenerateInterpreterStub -- CORINFO_CALLCONV_THISCALL");
break;
case CORINFO_CALLCONV_FASTCALL:
NYI_INTERP("GenerateInterpreterStub -- CORINFO_CALLCONV_FASTCALL");
break;
case CORINFO_CALLCONV_FIELD:
NYI_INTERP("GenerateInterpreterStub -- CORINFO_CALLCONV_FIELD");
break;
case CORINFO_CALLCONV_LOCAL_SIG:
NYI_INTERP("GenerateInterpreterStub -- CORINFO_CALLCONV_LOCAL_SIG");
break;
case CORINFO_CALLCONV_PROPERTY:
NYI_INTERP("GenerateInterpreterStub -- CORINFO_CALLCONV_PROPERTY");
break;
case CORINFO_CALLCONV_NATIVEVARARG:
NYI_INTERP("GenerateInterpreterStub -- CORINFO_CALLCONV_NATIVEVARARG");
break;
default:
_ASSERTE_ALL_BUILDS(__FILE__, false); // shouldn't get here
}
delete[] argPerm;
PCODE interpretMethodFunc;
if (!jmpCall)
{
switch (info->args.retType)
{
case CORINFO_TYPE_FLOAT:
interpretMethodFunc = reinterpret_cast<PCODE>(&InterpretMethodFloat);
break;
case CORINFO_TYPE_DOUBLE:
interpretMethodFunc = reinterpret_cast<PCODE>(&InterpretMethodDouble);
break;
default:
interpretMethodFunc = reinterpret_cast<PCODE>(&InterpretMethod);
break;
}
// The argument registers have been pushed by now, so we can use them.
#if defined(_X86_)
// First arg is pointer to the base of the ILargs arr -- i.e., the current stack value.
sl.X86EmitMovRegReg(kEDX, static_cast<X86Reg>(kESP_Unsafe));
// InterpretMethod uses F_CALL_CONV == __fastcall; pass 2 args in regs.
#if INTERP_ILSTUBS
if (pMD->IsILStub())
{
// Third argument is stubcontext, in eax.
sl.X86EmitPushReg(kEAX);
}
else
#endif
{
// For a non-ILStub method, push NULL as the StubContext argument.
sl.X86EmitZeroOutReg(kECX);
sl.X86EmitPushReg(kECX);
}
// sl.X86EmitAddReg(kECX, reinterpret_cast<UINT>(interpMethInfo));
sl.X86EmitRegLoad(kECX, reinterpret_cast<UINT>(interpMethInfo));
sl.X86EmitCall(sl.NewExternalCodeLabel(interpretMethodFunc), 0);
// Now we will deallocate the stack slots we pushed to hold register arguments.
if (argState.numRegArgs > 0)
{
sl.X86EmitAddEsp(argState.numRegArgs * sizeof(void*));
}
sl.X86EmitPopReg(kEBP);
sl.X86EmitReturn(static_cast<WORD>(argState.callerArgStackSlots * sizeof(void*)));
#elif defined(_AMD64_)
// Pass "ilArgs", i.e. just the point where registers have been homed, as 2nd arg
sl.X86EmitIndexLeaRSP(ARGUMENT_kREG2, static_cast<X86Reg>(kESP_Unsafe), 8);
// Allocate space for homing callee's (InterpretMethod's) arguments.
// Calling convention requires a default allocation space of 4,
// but to double align the stack frame, we'd allocate 5.
int interpMethodArgSize = 5 * sizeof(void*);
sl.X86EmitSubEsp(interpMethodArgSize);
// If we have IL stubs pass the stub context in R10 or else pass NULL.
#if INTERP_ILSTUBS
if (pMD->IsILStub())
{
sl.X86EmitMovRegReg(kR8, kR10);
}
else
#endif
{
// For a non-ILStub method, push NULL as the StubContext argument.
sl.X86EmitZeroOutReg(ARGUMENT_kREG1);
sl.X86EmitMovRegReg(kR8, ARGUMENT_kREG1);
}
sl.X86EmitRegLoad(ARGUMENT_kREG1, reinterpret_cast<UINT_PTR>(interpMethInfo));
sl.X86EmitCall(sl.NewExternalCodeLabel(interpretMethodFunc), 0);
sl.X86EmitAddEsp(interpMethodArgSize);
sl.X86EmitReturn(0);
#elif defined(_ARM_)
// We have to maintain 8-byte stack alignment. So if the number of
// slots we would normally push is not a multiple of two, add a random
// register. (We will not pop this register, but rather, increment
// sp by an amount that includes it.)
bool oddPushes = (((argState.numRegArgs + NumberOfFixedPushes) % 2) != 0);
UINT stackFrameSize = 0;
if (oddPushes) stackFrameSize = sizeof(void*);
// Now, if any FP regs are used as arguments, we will copy those to the stack; reserve space for that here.
// (We push doubles to keep the stack aligned...)
unsigned short doublesToPush = (argState.numFPRegArgSlots + 1)/2;
stackFrameSize += (doublesToPush*2*sizeof(void*));
// The last argument here causes this to generate code to push all int arg regs.
sl.ThumbEmitProlog(/*cCalleeSavedRegs*/NumberOfCalleeSaveRegsToPush, /*cbStackFrame*/stackFrameSize, /*fPushArgRegs*/TRUE);
// Now we will generate code to copy the floating point registers to the stack frame.
if (doublesToPush > 0)
{
sl.ThumbEmitStoreMultipleVFPDoubleReg(ThumbVFPDoubleReg(0), thumbRegSp, doublesToPush*2);
}
#if INTERP_ILSTUBS
if (pMD->IsILStub())
{
// Third argument is stubcontext, in r12.
sl.ThumbEmitMovRegReg(ThumbReg(2), ThumbReg(12));
}
else
#endif
{
// For a non-ILStub method, push NULL as the third StubContext argument.
sl.ThumbEmitMovConstant(ThumbReg(2), 0);
}
// Second arg is pointer to the base of the ILargs arr -- i.e., the current stack value.
sl.ThumbEmitMovRegReg(ThumbReg(1), thumbRegSp);
// First arg is the pointer to the interpMethInfo structure.
sl.ThumbEmitMovConstant(ThumbReg(0), reinterpret_cast<int>(interpMethInfo));
// If there's an HFA return, add space for that.
if (HFARetTypeSize > 0)
{
sl.ThumbEmitSubSp(HFARetTypeSize);
}
// Now we can call the right method.
// No "direct call" instruction, so load into register first. Can use R3.
sl.ThumbEmitMovConstant(ThumbReg(3), static_cast<int>(interpretMethodFunc));
sl.ThumbEmitCallRegister(ThumbReg(3));
// If there's an HFA return, copy to FP regs, and deallocate the stack space.
if (HFARetTypeSize > 0)
{
sl.ThumbEmitLoadMultipleVFPDoubleReg(ThumbVFPDoubleReg(0), thumbRegSp, HFARetTypeSize/sizeof(void*));
sl.ThumbEmitAddSp(HFARetTypeSize);
}
sl.ThumbEmitEpilog();
#elif defined(_ARM64_)
UINT stackFrameSize = argState.numFPRegArgSlots;
sl.EmitProlog(argState.numRegArgs, argState.numFPRegArgSlots, 0 /*cCalleeSavedRegs*/, static_cast<unsigned short>(cHFAVars*sizeof(void*)));
#if INTERP_ILSTUBS
if (pMD->IsILStub())
{
// Third argument is stubcontext, in x12 (METHODDESC_REGISTER)
sl.EmitMovReg(IntReg(2), IntReg(12));
}
else
#endif
{
// For a non-ILStub method, push NULL as the third stubContext argument
sl.EmitMovConstant(IntReg(2), 0);
}
// Second arg is pointer to the basei of the ILArgs -- i.e., the current stack value
sl.EmitAddImm(IntReg(1), RegSp, sl.GetSavedRegArgsOffset());
// First arg is the pointer to the interpMethodInfo structure
#if INTERP_ILSTUBS
if (!pMD->IsILStub())
#endif
{
// interpMethodInfo is already in x8, so copy it from x8
sl.EmitMovReg(IntReg(0), IntReg(8));
}
#if INTERP_ILSTUBS
else
{
// We didn't do the short-circuiting, therefore interpMethInfo is
// not stored in a register (x8) before. so do it now.
sl.EmitMovConstant(IntReg(0), reinterpret_cast<UINT64>(interpMethInfo));
}
#endif
sl.EmitCallLabel(sl.NewExternalCodeLabel((LPVOID)interpretMethodFunc), FALSE, FALSE);
// If there's an HFA return, copy to FP regs
if (cHFAVars > 0)
{
for (unsigned i=0; i<=(cHFAVars/2)*2;i+=2)
sl.EmitLoadStoreRegPairImm(StubLinkerCPU::eLOAD, VecReg(i), VecReg(i+1), RegSp, i*sizeof(void*));
if ((cHFAVars % 2) == 1)
sl.EmitLoadStoreRegImm(StubLinkerCPU::eLOAD,VecReg(cHFAVars-1), RegSp, cHFAVars*sizeof(void*));
}
sl.EmitEpilog();
#else
#error unsupported platform
#endif
stub = sl.Link(SystemDomain::GetGlobalLoaderAllocator()->GetStubHeap());
*nativeSizeOfCode = static_cast<ULONG>(stub->GetNumCodeBytes());
// TODO: manage reference count of interpreter stubs. Look for examples...
*nativeEntry = dac_cast<BYTE*>(stub->GetEntryPoint());
}
// Initialize the arg offset information.
interpMethInfo->InitArgInfo(comp, info, argState.argOffsets);
#ifdef _DEBUG
AddInterpMethInfo(interpMethInfo);
#endif // _DEBUG
if (!jmpCall)
{
// Remember the mapping between code address and MethodDesc*.
RecordInterpreterStubForMethodDesc(info->ftn, *nativeEntry);
}
return CORJIT_OK;
#undef TRACE_SKIPPED
}
size_t Interpreter::GetFrameSize(InterpreterMethodInfo* interpMethInfo)
{
size_t sz = interpMethInfo->LocalMemSize();
#if COMBINE_OPSTACK_VAL_TYPE
sz += (interpMethInfo->m_maxStack * sizeof(OpStackValAndType));
#else
sz += (interpMethInfo->m_maxStack * (sizeof(INT64) + sizeof(InterpreterType*)));
#endif
return sz;
}
// static
ARG_SLOT Interpreter::ExecuteMethodWrapper(struct InterpreterMethodInfo* interpMethInfo, bool directCall, BYTE* ilArgs, void* stubContext, __out bool* pDoJmpCall, CORINFO_RESOLVED_TOKEN* pResolvedToken)
{
#define INTERP_DYNAMIC_CONTRACTS 1
#if INTERP_DYNAMIC_CONTRACTS
CONTRACTL {
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
} CONTRACTL_END;
#else
// Dynamic contract occupies too much stack.
STATIC_CONTRACT_THROWS;
STATIC_CONTRACT_GC_TRIGGERS;
STATIC_CONTRACT_MODE_COOPERATIVE;
#endif
size_t sizeWithGS = GetFrameSize(interpMethInfo) + sizeof(GSCookie);
BYTE* frameMemoryGS = static_cast<BYTE*>(_alloca(sizeWithGS));
ARG_SLOT retVal = 0;
unsigned jmpCallToken = 0;
Interpreter interp(interpMethInfo, directCall, ilArgs, stubContext, frameMemoryGS);
// Make sure we can do a GC Scan properly.
FrameWithCookie<InterpreterFrame> interpFrame(&interp);
// Update the interpretation count.
InterlockedIncrement(reinterpret_cast<LONG *>(&interpMethInfo->m_invocations));
// Need to wait until this point to do this JITting, since it may trigger a GC.
JitMethodIfAppropriate(interpMethInfo);
// Pass buffers to get jmpCall flag and the token, if necessary.
interp.ExecuteMethod(&retVal, pDoJmpCall, &jmpCallToken);
if (*pDoJmpCall)
{
GCX_PREEMP();
interp.ResolveToken(pResolvedToken, jmpCallToken, CORINFO_TOKENKIND_Method InterpTracingArg(RTK_Call));
}
interpFrame.Pop();
return retVal;
}
// TODO: Add GSCookie checks
// static
inline ARG_SLOT Interpreter::InterpretMethodBody(struct InterpreterMethodInfo* interpMethInfo, bool directCall, BYTE* ilArgs, void* stubContext)
{
#if INTERP_DYNAMIC_CONTRACTS
CONTRACTL {
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
} CONTRACTL_END;
#else
// Dynamic contract occupies too much stack.
STATIC_CONTRACT_THROWS;
STATIC_CONTRACT_GC_TRIGGERS;
STATIC_CONTRACT_MODE_COOPERATIVE;
#endif
CEEInfo* jitInfo = NULL;
for (bool doJmpCall = true; doJmpCall; )
{
unsigned jmpCallToken = 0;
CORINFO_RESOLVED_TOKEN methTokPtr;
ARG_SLOT retVal = ExecuteMethodWrapper(interpMethInfo, directCall, ilArgs, stubContext, &doJmpCall, &methTokPtr);
// Clear any allocated jitInfo.
delete jitInfo;
// Nothing to do if the recent method asks not to do a jmpCall.
if (!doJmpCall)
{
return retVal;
}
// The recently executed method wants us to perform a jmpCall.
MethodDesc* pMD = GetMethod(methTokPtr.hMethod);
interpMethInfo = MethodHandleToInterpreterMethInfoPtr(CORINFO_METHOD_HANDLE(pMD));
// Allocate a new jitInfo and also a new interpMethInfo.
if (interpMethInfo == NULL)
{
assert(doJmpCall);
jitInfo = new CEEInfo(pMD, true);
CORINFO_METHOD_INFO methInfo;
GCX_PREEMP();
jitInfo->getMethodInfo(CORINFO_METHOD_HANDLE(pMD), &methInfo);
GenerateInterpreterStub(jitInfo, &methInfo, NULL, 0, &interpMethInfo, true);
}
}
UNREACHABLE();
}
void Interpreter::JitMethodIfAppropriate(InterpreterMethodInfo* interpMethInfo, bool force)
{
CONTRACTL {
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
} CONTRACTL_END;
unsigned int MaxInterpretCount = s_InterpreterJITThreshold.val(CLRConfig::INTERNAL_InterpreterJITThreshold);
if (force || interpMethInfo->m_invocations > MaxInterpretCount)
{
GCX_PREEMP();
MethodDesc *md = reinterpret_cast<MethodDesc *>(interpMethInfo->m_method);
PCODE stub = md->GetNativeCode();
if (InterpretationStubToMethodInfo(stub) == md)
{
#ifdef _DEBUG
if (s_TraceInterpreterJITTransitionFlag.val(CLRConfig::INTERNAL_TraceInterpreterJITTransition))
{
fprintf(GetLogFile(), "JITting method %s:%s.\n", md->m_pszDebugClassName, md->m_pszDebugMethodName);
}
#endif // _DEBUG
CORJIT_FLAGS jitFlags(CORJIT_FLAGS::CORJIT_FLAG_MAKEFINALCODE);
NewHolder<COR_ILMETHOD_DECODER> pDecoder(NULL);
// Dynamic methods (e.g., IL stubs) do not have an IL decoder but may
// require additional flags. Ordinary methods require the opposite.
if (md->IsDynamicMethod())
{
jitFlags.Add(md->AsDynamicMethodDesc()->GetILStubResolver()->GetJitFlags());
}
else
{
COR_ILMETHOD_DECODER::DecoderStatus status;
pDecoder = new COR_ILMETHOD_DECODER(md->GetILHeader(TRUE),
md->GetMDImport(),
&status);
}
PCODE res = md->MakeJitWorker(pDecoder, jitFlags);
interpMethInfo->m_jittedCode = res;
}
}
}
// static
HCIMPL3(float, InterpretMethodFloat, struct InterpreterMethodInfo* interpMethInfo, BYTE* ilArgs, void* stubContext)
{
FCALL_CONTRACT;
ARG_SLOT retVal = 0;
HELPER_METHOD_FRAME_BEGIN_RET_ATTRIB(Frame::FRAME_ATTR_EXACT_DEPTH|Frame::FRAME_ATTR_CAPTURE_DEPTH_2);
retVal = (ARG_SLOT)Interpreter::InterpretMethodBody(interpMethInfo, false, ilArgs, stubContext);
HELPER_METHOD_FRAME_END();
return *reinterpret_cast<float*>(ArgSlotEndianessFixup(&retVal, sizeof(float)));
}
HCIMPLEND
// static
HCIMPL3(double, InterpretMethodDouble, struct InterpreterMethodInfo* interpMethInfo, BYTE* ilArgs, void* stubContext)
{
FCALL_CONTRACT;
ARG_SLOT retVal = 0;
HELPER_METHOD_FRAME_BEGIN_RET_ATTRIB(Frame::FRAME_ATTR_EXACT_DEPTH|Frame::FRAME_ATTR_CAPTURE_DEPTH_2);
retVal = Interpreter::InterpretMethodBody(interpMethInfo, false, ilArgs, stubContext);
HELPER_METHOD_FRAME_END();
return *reinterpret_cast<double*>(ArgSlotEndianessFixup(&retVal, sizeof(double)));
}
HCIMPLEND
// static
HCIMPL3(INT64, InterpretMethod, struct InterpreterMethodInfo* interpMethInfo, BYTE* ilArgs, void* stubContext)
{
FCALL_CONTRACT;
ARG_SLOT retVal = 0;
HELPER_METHOD_FRAME_BEGIN_RET_ATTRIB(Frame::FRAME_ATTR_EXACT_DEPTH|Frame::FRAME_ATTR_CAPTURE_DEPTH_2);
retVal = Interpreter::InterpretMethodBody(interpMethInfo, false, ilArgs, stubContext);
HELPER_METHOD_FRAME_END();
return static_cast<INT64>(retVal);
}
HCIMPLEND
bool Interpreter::IsInCalleesFrames(void* stackPtr)
{
// We assume a downwards_growing stack.
return stackPtr < (m_localVarMemory - sizeof(GSCookie));
}
// I want an enumeration with values for the second byte of 2-byte opcodes.
enum OPCODE_2BYTE {
#define OPDEF(c,s,pop,push,args,type,l,s1,s2,ctrl) TWOBYTE_##c = unsigned(s2),
#include "opcode.def"
#undef OPDEF
};
// Optimize the interpreter loop for speed.
#ifdef _MSC_VER
#pragma optimize("t", on)
#endif
// Duplicating code from JitHelpers for MonEnter,MonExit,MonEnter_Static,
// MonExit_Static because it sets up helper frame for the JIT.
static void MonitorEnter(Object* obj, BYTE* pbLockTaken)
{
OBJECTREF objRef = ObjectToOBJECTREF(obj);
if (objRef == NULL)
COMPlusThrow(kArgumentNullException);
GCPROTECT_BEGININTERIOR(pbLockTaken);
#ifdef _DEBUG
Thread *pThread = GetThread();
DWORD lockCount = pThread->m_dwLockCount;
#endif
if (GET_THREAD()->CatchAtSafePointOpportunistic())
{
GET_THREAD()->PulseGCMode();
}
objRef->EnterObjMonitor();
_ASSERTE ((objRef->GetSyncBlock()->GetMonitor()->m_Recursion == 1 && pThread->m_dwLockCount == lockCount + 1) ||
pThread->m_dwLockCount == lockCount);
if (pbLockTaken != 0) *pbLockTaken = 1;
GCPROTECT_END();
}
static void MonitorExit(Object* obj, BYTE* pbLockTaken)
{
OBJECTREF objRef = ObjectToOBJECTREF(obj);
if (objRef == NULL)
COMPlusThrow(kArgumentNullException);
if (!objRef->LeaveObjMonitor())
COMPlusThrow(kSynchronizationLockException);
if (pbLockTaken != 0) *pbLockTaken = 0;
TESTHOOKCALL(AppDomainCanBeUnloaded(GET_THREAD()->GetDomain()->GetId().m_dwId,FALSE));
if (GET_THREAD()->IsAbortRequested()) {
GET_THREAD()->HandleThreadAbort();
}
}
static void MonitorEnterStatic(AwareLock *lock, BYTE* pbLockTaken)
{
lock->Enter();
MONHELPER_STATE(*pbLockTaken = 1;)
}
static void MonitorExitStatic(AwareLock *lock, BYTE* pbLockTaken)
{
// Error, yield or contention
if (!lock->Leave())
COMPlusThrow(kSynchronizationLockException);
TESTHOOKCALL(AppDomainCanBeUnloaded(GET_THREAD()->GetDomain()->GetId().m_dwId,FALSE));
if (GET_THREAD()->IsAbortRequested()) {
GET_THREAD()->HandleThreadAbort();
}
}
AwareLock* Interpreter::GetMonitorForStaticMethod()
{
MethodDesc* pMD = reinterpret_cast<MethodDesc*>(m_methInfo->m_method);
CORINFO_LOOKUP_KIND kind;
{
GCX_PREEMP();
kind = m_interpCeeInfo.getLocationOfThisType(m_methInfo->m_method);
}
if (!kind.needsRuntimeLookup)
{
OBJECTREF ref = pMD->GetMethodTable()->GetManagedClassObject();
return (AwareLock*) ref->GetSyncBlock()->GetMonitor();
}
else
{
CORINFO_CLASS_HANDLE classHnd = nullptr;
switch (kind.runtimeLookupKind)
{
case CORINFO_LOOKUP_CLASSPARAM:
{
classHnd = (CORINFO_CLASS_HANDLE) GetPreciseGenericsContext();
}
break;
case CORINFO_LOOKUP_METHODPARAM:
{
MethodDesc* pMD = (MethodDesc*) GetPreciseGenericsContext();
classHnd = (CORINFO_CLASS_HANDLE) pMD->GetMethodTable();
}
break;
default:
NYI_INTERP("Unknown lookup for synchronized methods");
break;
}
MethodTable* pMT = GetMethodTableFromClsHnd(classHnd);
OBJECTREF ref = pMT->GetManagedClassObject();
ASSERT(ref);
return (AwareLock*) ref->GetSyncBlock()->GetMonitor();
}
}
void Interpreter::DoMonitorEnterWork()
{
MethodDesc* pMD = reinterpret_cast<MethodDesc*>(m_methInfo->m_method);
if (pMD->IsSynchronized())
{
if (pMD->IsStatic())
{
AwareLock* lock = GetMonitorForStaticMethod();
MonitorEnterStatic(lock, &m_monAcquired);
}
else
{
MonitorEnter((Object*) m_thisArg, &m_monAcquired);
}
}
}
void Interpreter::DoMonitorExitWork()
{
MethodDesc* pMD = reinterpret_cast<MethodDesc*>(m_methInfo->m_method);
if (pMD->IsSynchronized())
{
if (pMD->IsStatic())
{
AwareLock* lock = GetMonitorForStaticMethod();
MonitorExitStatic(lock, &m_monAcquired);
}
else
{
MonitorExit((Object*) m_thisArg, &m_monAcquired);
}
}
}
void Interpreter::ExecuteMethod(ARG_SLOT* retVal, __out bool* pDoJmpCall, __out unsigned* pJmpCallToken)
{
#if INTERP_DYNAMIC_CONTRACTS
CONTRACTL {
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
} CONTRACTL_END;
#else
// Dynamic contract occupies too much stack.
STATIC_CONTRACT_THROWS;
STATIC_CONTRACT_GC_TRIGGERS;
STATIC_CONTRACT_MODE_COOPERATIVE;
#endif
*pDoJmpCall = false;
// Normally I'd prefer to declare these in small case-block scopes, but most C++ compilers
// do not realize that their lifetimes do not overlap, so that makes for a large stack frame.
// So I avoid that by outside declarations (sigh).
char offsetc, valc;
unsigned char argNumc;
unsigned short argNums;
INT32 vali;
INT64 vall;
InterpreterType it;
size_t sz;
unsigned short ops;
// Make sure that the .cctor for the current method's class has been run.
MethodDesc* pMD = reinterpret_cast<MethodDesc*>(m_methInfo->m_method);
EnsureClassInit(pMD->GetMethodTable());
#if INTERP_TRACING
const char* methName = eeGetMethodFullName(m_methInfo->m_method);
unsigned ilOffset = 0;
unsigned curInvocation = InterlockedIncrement(&s_totalInvocations);
if (s_TraceInterpreterEntriesFlag.val(CLRConfig::INTERNAL_TraceInterpreterEntries))
{
fprintf(GetLogFile(), "Entering method #%d (= 0x%x): %s.\n", curInvocation, curInvocation, methName);
fprintf(GetLogFile(), " arguments:\n");
PrintArgs();
}
#endif // INTERP_TRACING
#if LOOPS_VIA_INSTRS
unsigned instrs = 0;
#else
#if INTERP_PROFILE
unsigned instrs = 0;
#endif
#endif
EvalLoop:
GCX_ASSERT_COOP();
// Catch any exceptions raised.
EX_TRY {
// Optional features...
#define INTERPRETER_CHECK_LARGE_STRUCT_STACK_HEIGHT 1
#if INTERP_ILCYCLE_PROFILE
m_instr = CEE_COUNT; // Flag to indicate first instruction.
m_exemptCycles = 0;
#endif // INTERP_ILCYCLE_PROFILE
DoMonitorEnterWork();
INTERPLOG("START %d, %s\n", m_methInfo->m_stubNum, methName);
for (;;)
{
// TODO: verify that m_ILCodePtr is legal, and we haven't walked off the end of the IL array? (i.e., bad IL).
// Note that ExecuteBranch() should be called for every branch. That checks that we aren't either before or
// after the IL range. Here, we would only need to check that we haven't gone past the end (not before the beginning)
// because everything that doesn't call ExecuteBranch() should only add to m_ILCodePtr.
#if INTERP_TRACING
ilOffset = CurOffset();
#endif // _DEBUG
#if INTERP_TRACING
if (s_TraceInterpreterOstackFlag.val(CLRConfig::INTERNAL_TraceInterpreterOstack))
{
PrintOStack();
}
#if INTERPRETER_CHECK_LARGE_STRUCT_STACK_HEIGHT
_ASSERTE_MSG(LargeStructStackHeightIsValid(), "Large structure stack height invariant violated."); // Check the large struct stack invariant.
#endif
if (s_TraceInterpreterILFlag.val(CLRConfig::INTERNAL_TraceInterpreterIL))
{
fprintf(GetLogFile(), " %#4x: %s\n", ilOffset, ILOp(m_ILCodePtr));
fflush(GetLogFile());
}
#endif // INTERP_TRACING
#if LOOPS_VIA_INSTRS
instrs++;
#else
#if INTERP_PROFILE
instrs++;
#endif
#endif
#if INTERP_ILINSTR_PROFILE
#if INTERP_ILCYCLE_PROFILE
UpdateCycleCount();
#endif // INTERP_ILCYCLE_PROFILE
InterlockedIncrement(&s_ILInstrExecs[*m_ILCodePtr]);
#endif // INTERP_ILINSTR_PROFILE
switch (*m_ILCodePtr)
{
case CEE_NOP:
m_ILCodePtr++;
continue;
case CEE_BREAK: // TODO: interact with the debugger?
m_ILCodePtr++;
continue;
case CEE_LDARG_0:
LdArg(0);
break;
case CEE_LDARG_1:
LdArg(1);
break;
case CEE_LDARG_2:
LdArg(2);
break;
case CEE_LDARG_3:
LdArg(3);
break;
case CEE_LDLOC_0:
LdLoc(0);
m_ILCodePtr++;
continue;
case CEE_LDLOC_1:
LdLoc(1);
break;
case CEE_LDLOC_2:
LdLoc(2);
break;
case CEE_LDLOC_3:
LdLoc(3);
break;
case CEE_STLOC_0:
StLoc(0);
break;
case CEE_STLOC_1:
StLoc(1);
break;
case CEE_STLOC_2:
StLoc(2);
break;
case CEE_STLOC_3:
StLoc(3);
break;
case CEE_LDARG_S:
m_ILCodePtr++;
argNumc = *m_ILCodePtr;
LdArg(argNumc);
break;
case CEE_LDARGA_S:
m_ILCodePtr++;
argNumc = *m_ILCodePtr;
LdArgA(argNumc);
break;
case CEE_STARG_S:
m_ILCodePtr++;
argNumc = *m_ILCodePtr;
StArg(argNumc);
break;
case CEE_LDLOC_S:
argNumc = *(m_ILCodePtr + 1);
LdLoc(argNumc);
m_ILCodePtr += 2;
continue;
case CEE_LDLOCA_S:
m_ILCodePtr++;
argNumc = *m_ILCodePtr;
LdLocA(argNumc);
break;
case CEE_STLOC_S:
argNumc = *(m_ILCodePtr + 1);
StLoc(argNumc);
m_ILCodePtr += 2;
continue;
case CEE_LDNULL:
LdNull();
break;
case CEE_LDC_I4_M1:
LdIcon(-1);
break;
case CEE_LDC_I4_0:
LdIcon(0);
break;
case CEE_LDC_I4_1:
LdIcon(1);
m_ILCodePtr++;
continue;
case CEE_LDC_I4_2:
LdIcon(2);
break;
case CEE_LDC_I4_3:
LdIcon(3);
break;
case CEE_LDC_I4_4:
LdIcon(4);
break;
case CEE_LDC_I4_5:
LdIcon(5);
break;
case CEE_LDC_I4_6:
LdIcon(6);
break;
case CEE_LDC_I4_7:
LdIcon(7);
break;
case CEE_LDC_I4_8:
LdIcon(8);
break;
case CEE_LDC_I4_S:
valc = getI1(m_ILCodePtr + 1);
LdIcon(valc);
m_ILCodePtr += 2;
continue;
case CEE_LDC_I4:
vali = getI4LittleEndian(m_ILCodePtr + 1);
LdIcon(vali);
m_ILCodePtr += 5;
continue;
case CEE_LDC_I8:
vall = getI8LittleEndian(m_ILCodePtr + 1);
LdLcon(vall);
m_ILCodePtr += 9;
continue;
case CEE_LDC_R4:
// We use I4 here because we just care about the bit pattern.
// LdR4Con will push the right InterpreterType.
vali = getI4LittleEndian(m_ILCodePtr + 1);
LdR4con(vali);
m_ILCodePtr += 5;
continue;
case CEE_LDC_R8:
// We use I4 here because we just care about the bit pattern.
// LdR8Con will push the right InterpreterType.
vall = getI8LittleEndian(m_ILCodePtr + 1);
LdR8con(vall);
m_ILCodePtr += 9;
continue;
case CEE_DUP:
assert(m_curStackHt > 0);
it = OpStackTypeGet(m_curStackHt - 1);
OpStackTypeSet(m_curStackHt, it);
if (it.IsLargeStruct(&m_interpCeeInfo))
{
sz = it.Size(&m_interpCeeInfo);
void* dest = LargeStructOperandStackPush(sz);
memcpy(dest, OpStackGet<void*>(m_curStackHt - 1), sz);
OpStackSet<void*>(m_curStackHt, dest);
}
else
{
OpStackSet<INT64>(m_curStackHt, OpStackGet<INT64>(m_curStackHt - 1));
}
m_curStackHt++;
break;
case CEE_POP:
assert(m_curStackHt > 0);
m_curStackHt--;
it = OpStackTypeGet(m_curStackHt);
if (it.IsLargeStruct(&m_interpCeeInfo))
{
LargeStructOperandStackPop(it.Size(&m_interpCeeInfo), OpStackGet<void*>(m_curStackHt));
}
break;
case CEE_JMP:
*pJmpCallToken = getU4LittleEndian(m_ILCodePtr + sizeof(BYTE));
*pDoJmpCall = true;
goto ExitEvalLoop;
case CEE_CALL:
DoCall(/*virtualCall*/false);
#ifdef _DEBUG
if (s_TraceInterpreterILFlag.val(CLRConfig::INTERNAL_TraceInterpreterIL))
{
fprintf(GetLogFile(), " Returning to method %s, stub num %d.\n", methName, m_methInfo->m_stubNum);
}
#endif // _DEBUG
continue;
case CEE_CALLVIRT:
DoCall(/*virtualCall*/true);
#ifdef _DEBUG
if (s_TraceInterpreterILFlag.val(CLRConfig::INTERNAL_TraceInterpreterIL))
{
fprintf(GetLogFile(), " Returning to method %s, stub num %d.\n", methName, m_methInfo->m_stubNum);
}
#endif // _DEBUG
continue;
// HARD
case CEE_CALLI:
CallI();
continue;
case CEE_RET:
if (m_methInfo->m_returnType == CORINFO_TYPE_VOID)
{
assert(m_curStackHt == 0);
}
else
{
assert(m_curStackHt == 1);
InterpreterType retValIt = OpStackTypeGet(0);
bool looseInt = s_InterpreterLooseRules &&
CorInfoTypeIsIntegral(m_methInfo->m_returnType) &&
(CorInfoTypeIsIntegral(retValIt.ToCorInfoType()) || CorInfoTypeIsPointer(retValIt.ToCorInfoType())) &&
(m_methInfo->m_returnType != retValIt.ToCorInfoType());
bool looseFloat = s_InterpreterLooseRules &&
CorInfoTypeIsFloatingPoint(m_methInfo->m_returnType) &&
CorInfoTypeIsFloatingPoint(retValIt.ToCorInfoType()) &&
(m_methInfo->m_returnType != retValIt.ToCorInfoType());
// Make sure that the return value "matches" (which allows certain relaxations) the declared return type.
assert((m_methInfo->m_returnType == CORINFO_TYPE_VALUECLASS && retValIt.ToCorInfoType() == CORINFO_TYPE_VALUECLASS) ||
(m_methInfo->m_returnType == CORINFO_TYPE_REFANY && retValIt.ToCorInfoType() == CORINFO_TYPE_VALUECLASS) ||
(m_methInfo->m_returnType == CORINFO_TYPE_REFANY && retValIt.ToCorInfoType() == CORINFO_TYPE_REFANY) ||
(looseInt || looseFloat) ||
InterpreterType(m_methInfo->m_returnType).StackNormalize().Matches(retValIt, &m_interpCeeInfo));
size_t sz = retValIt.Size(&m_interpCeeInfo);
#if defined(FEATURE_HFA)
CorInfoType cit = CORINFO_TYPE_UNDEF;
{
GCX_PREEMP();
if(m_methInfo->m_returnType == CORINFO_TYPE_VALUECLASS)
cit = m_interpCeeInfo.getHFAType(retValIt.ToClassHandle());
}
#endif
if (m_methInfo->GetFlag<InterpreterMethodInfo::Flag_hasRetBuffArg>())
{
assert((m_methInfo->m_returnType == CORINFO_TYPE_VALUECLASS && retValIt.ToCorInfoType() == CORINFO_TYPE_VALUECLASS) ||
(m_methInfo->m_returnType == CORINFO_TYPE_REFANY && retValIt.ToCorInfoType() == CORINFO_TYPE_VALUECLASS) ||
(m_methInfo->m_returnType == CORINFO_TYPE_REFANY && retValIt.ToCorInfoType() == CORINFO_TYPE_REFANY));
if (retValIt.ToCorInfoType() == CORINFO_TYPE_REFANY)
{
InterpreterType typedRefIT = GetTypedRefIT(&m_interpCeeInfo);
TypedByRef* ptr = OpStackGet<TypedByRef*>(0);
*((TypedByRef*) m_retBufArg) = *ptr;
}
else if (retValIt.IsLargeStruct(&m_interpCeeInfo))
{
MethodTable* clsMt = GetMethodTableFromClsHnd(retValIt.ToClassHandle());
// The ostack value is a pointer to the struct value.
CopyValueClassUnchecked(m_retBufArg, OpStackGet<void*>(0), clsMt);
}
else
{
MethodTable* clsMt = GetMethodTableFromClsHnd(retValIt.ToClassHandle());
// The ostack value *is* the struct value.
CopyValueClassUnchecked(m_retBufArg, OpStackGetAddr(0, sz), clsMt);
}
}
#if defined(FEATURE_HFA)
// Is it an HFA?
else if (m_methInfo->m_returnType == CORINFO_TYPE_VALUECLASS
&& CorInfoTypeIsFloatingPoint(cit)
&& (MetaSig(reinterpret_cast<MethodDesc*>(m_methInfo->m_method)).GetCallingConventionInfo() & CORINFO_CALLCONV_VARARG) == 0)
{
if (retValIt.IsLargeStruct(&m_interpCeeInfo))
{
// The ostack value is a pointer to the struct value.
memcpy(GetHFARetBuffAddr(static_cast<unsigned>(sz)), OpStackGet<void*>(0), sz);
}
else
{
// The ostack value *is* the struct value.
memcpy(GetHFARetBuffAddr(static_cast<unsigned>(sz)), OpStackGetAddr(0, sz), sz);
}
}
#endif
else if (CorInfoTypeIsFloatingPoint(m_methInfo->m_returnType) &&
CorInfoTypeIsFloatingPoint(retValIt.ToCorInfoType()))
{
double val = (sz <= sizeof(INT32)) ? OpStackGet<float>(0) : OpStackGet<double>(0);
if (m_methInfo->m_returnType == CORINFO_TYPE_DOUBLE)
{
memcpy(retVal, &val, sizeof(double));
}
else
{
float val2 = (float) val;
memcpy(retVal, &val2, sizeof(float));
}
}
else
{
if (sz <= sizeof(INT32))
{
*retVal = OpStackGet<INT32>(0);
}
else
{
// If looseInt is true, we are relying on auto-downcast in case *retVal
// is small (but this is guaranteed not to happen by def'n of ARG_SLOT.)
assert(sz == sizeof(INT64));
*retVal = OpStackGet<INT64>(0);
}
}
}
#if INTERP_PROFILE
// We're not capturing instructions executed in a method that terminates via exception,
// but that's OK...
m_methInfo->RecordExecInstrs(instrs);
#endif
#if INTERP_TRACING
// We keep this live until we leave.
delete methName;
#endif // INTERP_TRACING
#if INTERP_ILCYCLE_PROFILE
// Finish off accounting for the "RET" before we return
UpdateCycleCount();
#endif // INTERP_ILCYCLE_PROFILE
goto ExitEvalLoop;
case CEE_BR_S:
m_ILCodePtr++;
offsetc = *m_ILCodePtr;
// The offset is wrt the beginning of the following instruction, so the +1 is to get to that
// m_ILCodePtr value before adding the offset.
ExecuteBranch(m_ILCodePtr + offsetc + 1);
continue; // Skip the default m_ILCodePtr++ at bottom of loop.
case CEE_LEAVE_S:
// LEAVE empties the operand stack.
m_curStackHt = 0;
m_largeStructOperandStackHt = 0;
offsetc = getI1(m_ILCodePtr + 1);
{
// The offset is wrt the beginning of the following instruction, so the +2 is to get to that
// m_ILCodePtr value before adding the offset.
BYTE* leaveTarget = m_ILCodePtr + offsetc + 2;
unsigned leaveOffset = CurOffset();
m_leaveInfoStack.Push(LeaveInfo(leaveOffset, leaveTarget));
if (!SearchForCoveringFinally())
{
m_leaveInfoStack.Pop();
ExecuteBranch(leaveTarget);
}
}
continue; // Skip the default m_ILCodePtr++ at bottom of loop.
// Abstract the next pair out to something common with templates.
case CEE_BRFALSE_S:
BrOnValue<false, 1>();
continue;
case CEE_BRTRUE_S:
BrOnValue<true, 1>();
continue;
case CEE_BEQ_S:
BrOnComparison<CO_EQ, false, 1>();
continue;
case CEE_BGE_S:
assert(m_curStackHt >= 2);
// ECMA spec gives different semantics for different operand types:
switch (OpStackTypeGet(m_curStackHt-1).ToCorInfoType())
{
case CORINFO_TYPE_FLOAT:
case CORINFO_TYPE_DOUBLE:
BrOnComparison<CO_LT_UN, true, 1>();
break;
default:
BrOnComparison<CO_LT, true, 1>();
break;
}
continue;
case CEE_BGT_S:
BrOnComparison<CO_GT, false, 1>();
continue;
case CEE_BLE_S:
assert(m_curStackHt >= 2);
// ECMA spec gives different semantics for different operand types:
switch (OpStackTypeGet(m_curStackHt-1).ToCorInfoType())
{
case CORINFO_TYPE_FLOAT:
case CORINFO_TYPE_DOUBLE:
BrOnComparison<CO_GT_UN, true, 1>();
break;
default:
BrOnComparison<CO_GT, true, 1>();
break;
}
continue;
case CEE_BLT_S:
BrOnComparison<CO_LT, false, 1>();
continue;
case CEE_BNE_UN_S:
BrOnComparison<CO_EQ, true, 1>();
continue;
case CEE_BGE_UN_S:
assert(m_curStackHt >= 2);
// ECMA spec gives different semantics for different operand types:
switch (OpStackTypeGet(m_curStackHt-1).ToCorInfoType())
{
case CORINFO_TYPE_FLOAT:
case CORINFO_TYPE_DOUBLE:
BrOnComparison<CO_LT, true, 1>();
break;
default:
BrOnComparison<CO_LT_UN, true, 1>();
break;
}
continue;
case CEE_BGT_UN_S:
BrOnComparison<CO_GT_UN, false, 1>();
continue;
case CEE_BLE_UN_S:
assert(m_curStackHt >= 2);
// ECMA spec gives different semantics for different operand types:
switch (OpStackTypeGet(m_curStackHt-1).ToCorInfoType())
{
case CORINFO_TYPE_FLOAT:
case CORINFO_TYPE_DOUBLE:
BrOnComparison<CO_GT, true, 1>();
break;
default:
BrOnComparison<CO_GT_UN, true, 1>();
break;
}
continue;
case CEE_BLT_UN_S:
BrOnComparison<CO_LT_UN, false, 1>();
continue;
case CEE_BR:
m_ILCodePtr++;
vali = getI4LittleEndian(m_ILCodePtr);
vali += 4; // +4 for the length of the offset.
ExecuteBranch(m_ILCodePtr + vali);
if (vali < 0)
{
// Backwards branch -- enable caching.
BackwardsBranchActions(vali);
}
continue;
case CEE_LEAVE:
// LEAVE empties the operand stack.
m_curStackHt = 0;
m_largeStructOperandStackHt = 0;
vali = getI4LittleEndian(m_ILCodePtr + 1);
{
// The offset is wrt the beginning of the following instruction, so the +5 is to get to that
// m_ILCodePtr value before adding the offset.
BYTE* leaveTarget = m_ILCodePtr + (vali + 5);
unsigned leaveOffset = CurOffset();
m_leaveInfoStack.Push(LeaveInfo(leaveOffset, leaveTarget));
if (!SearchForCoveringFinally())
{
(void)m_leaveInfoStack.Pop();
if (vali < 0)
{
// Backwards branch -- enable caching.
BackwardsBranchActions(vali);
}
ExecuteBranch(leaveTarget);
}
}
continue; // Skip the default m_ILCodePtr++ at bottom of loop.
case CEE_BRFALSE:
BrOnValue<false, 4>();
continue;
case CEE_BRTRUE:
BrOnValue<true, 4>();
continue;
case CEE_BEQ:
BrOnComparison<CO_EQ, false, 4>();
continue;
case CEE_BGE:
assert(m_curStackHt >= 2);
// ECMA spec gives different semantics for different operand types:
switch (OpStackTypeGet(m_curStackHt-1).ToCorInfoType())
{
case CORINFO_TYPE_FLOAT:
case CORINFO_TYPE_DOUBLE:
BrOnComparison<CO_LT_UN, true, 4>();
break;
default:
BrOnComparison<CO_LT, true, 4>();
break;
}
continue;
case CEE_BGT:
BrOnComparison<CO_GT, false, 4>();
continue;
case CEE_BLE:
assert(m_curStackHt >= 2);
// ECMA spec gives different semantics for different operand types:
switch (OpStackTypeGet(m_curStackHt-1).ToCorInfoType())
{
case CORINFO_TYPE_FLOAT:
case CORINFO_TYPE_DOUBLE:
BrOnComparison<CO_GT_UN, true, 4>();
break;
default:
BrOnComparison<CO_GT, true, 4>();
break;
}
continue;
case CEE_BLT:
BrOnComparison<CO_LT, false, 4>();
continue;
case CEE_BNE_UN:
BrOnComparison<CO_EQ, true, 4>();
continue;
case CEE_BGE_UN:
assert(m_curStackHt >= 2);
// ECMA spec gives different semantics for different operand types:
switch (OpStackTypeGet(m_curStackHt-1).ToCorInfoType())
{
case CORINFO_TYPE_FLOAT:
case CORINFO_TYPE_DOUBLE:
BrOnComparison<CO_LT, true, 4>();
break;
default:
BrOnComparison<CO_LT_UN, true, 4>();
break;
}
continue;
case CEE_BGT_UN:
BrOnComparison<CO_GT_UN, false, 4>();
continue;
case CEE_BLE_UN:
assert(m_curStackHt >= 2);
// ECMA spec gives different semantics for different operand types:
switch (OpStackTypeGet(m_curStackHt-1).ToCorInfoType())
{
case CORINFO_TYPE_FLOAT:
case CORINFO_TYPE_DOUBLE:
BrOnComparison<CO_GT, true, 4>();
break;
default:
BrOnComparison<CO_GT_UN, true, 4>();
break;
}
continue;
case CEE_BLT_UN:
BrOnComparison<CO_LT_UN, false, 4>();
continue;
case CEE_SWITCH:
{
assert(m_curStackHt > 0);
m_curStackHt--;
#if defined(_DEBUG) || defined(_AMD64_)
CorInfoType cit = OpStackTypeGet(m_curStackHt).ToCorInfoType();
#endif // _DEBUG || _AMD64_
#ifdef _DEBUG
assert(cit == CORINFO_TYPE_INT || cit == CORINFO_TYPE_UINT || cit == CORINFO_TYPE_NATIVEINT);
#endif // _DEBUG
#if defined(_AMD64_)
UINT32 val = (cit == CORINFO_TYPE_NATIVEINT) ? (INT32) OpStackGet<NativeInt>(m_curStackHt)
: OpStackGet<INT32>(m_curStackHt);
#else
UINT32 val = OpStackGet<INT32>(m_curStackHt);
#endif
UINT32 n = getU4LittleEndian(m_ILCodePtr + 1);
UINT32 instrSize = 1 + (n + 1)*4;
if (val < n)
{
vali = getI4LittleEndian(m_ILCodePtr + (5 + val * 4));
ExecuteBranch(m_ILCodePtr + instrSize + vali);
}
else
{
m_ILCodePtr += instrSize;
}
}
continue;
case CEE_LDIND_I1:
LdIndShort<INT8, /*isUnsigned*/false>();
break;
case CEE_LDIND_U1:
LdIndShort<UINT8, /*isUnsigned*/true>();
break;
case CEE_LDIND_I2:
LdIndShort<INT16, /*isUnsigned*/false>();
break;
case CEE_LDIND_U2:
LdIndShort<UINT16, /*isUnsigned*/true>();
break;
case CEE_LDIND_I4:
LdInd<INT32, CORINFO_TYPE_INT>();
break;
case CEE_LDIND_U4:
LdInd<UINT32, CORINFO_TYPE_INT>();
break;
case CEE_LDIND_I8:
LdInd<INT64, CORINFO_TYPE_LONG>();
break;
case CEE_LDIND_I:
LdInd<NativeInt, CORINFO_TYPE_NATIVEINT>();
break;
case CEE_LDIND_R4:
LdInd<float, CORINFO_TYPE_FLOAT>();
break;
case CEE_LDIND_R8:
LdInd<double, CORINFO_TYPE_DOUBLE>();
break;
case CEE_LDIND_REF:
LdInd<Object*, CORINFO_TYPE_CLASS>();
break;
case CEE_STIND_REF:
StInd_Ref();
break;
case CEE_STIND_I1:
StInd<INT8>();
break;
case CEE_STIND_I2:
StInd<INT16>();
break;
case CEE_STIND_I4:
StInd<INT32>();
break;
case CEE_STIND_I8:
StInd<INT64>();
break;
case CEE_STIND_R4:
StInd<float>();
break;
case CEE_STIND_R8:
StInd<double>();
break;
case CEE_ADD:
BinaryArithOp<BA_Add>();
m_ILCodePtr++;
continue;
case CEE_SUB:
BinaryArithOp<BA_Sub>();
break;
case CEE_MUL:
BinaryArithOp<BA_Mul>();
break;
case CEE_DIV:
BinaryArithOp<BA_Div>();
break;
case CEE_DIV_UN:
BinaryIntOp<BIO_DivUn>();
break;
case CEE_REM:
BinaryArithOp<BA_Rem>();
break;
case CEE_REM_UN:
BinaryIntOp<BIO_RemUn>();
break;
case CEE_AND:
BinaryIntOp<BIO_And>();
break;
case CEE_OR:
BinaryIntOp<BIO_Or>();
break;
case CEE_XOR:
BinaryIntOp<BIO_Xor>();
break;
case CEE_SHL:
ShiftOp<CEE_SHL>();
break;
case CEE_SHR:
ShiftOp<CEE_SHR>();
break;
case CEE_SHR_UN:
ShiftOp<CEE_SHR_UN>();
break;
case CEE_NEG:
Neg();
break;
case CEE_NOT:
Not();
break;
case CEE_CONV_I1:
Conv<INT8, /*TIsUnsigned*/false, /*TCanHoldPtr*/false, /*TIsShort*/true, CORINFO_TYPE_INT>();
break;
case CEE_CONV_I2:
Conv<INT16, /*TIsUnsigned*/false, /*TCanHoldPtr*/false, /*TIsShort*/true, CORINFO_TYPE_INT>();
break;
case CEE_CONV_I4:
Conv<INT32, /*TIsUnsigned*/false, /*TCanHoldPtr*/false, /*TIsShort*/false, CORINFO_TYPE_INT>();
break;
case CEE_CONV_I8:
Conv<INT64, /*TIsUnsigned*/false, /*TCanHoldPtr*/true, /*TIsShort*/false, CORINFO_TYPE_LONG>();
break;
case CEE_CONV_R4:
Conv<float, /*TIsUnsigned*/false, /*TCanHoldPtr*/false, /*TIsShort*/false, CORINFO_TYPE_FLOAT>();
break;
case CEE_CONV_R8:
Conv<double, /*TIsUnsigned*/false, /*TCanHoldPtr*/false, /*TIsShort*/false, CORINFO_TYPE_DOUBLE>();
break;
case CEE_CONV_U4:
Conv<UINT32, /*TIsUnsigned*/true, /*TCanHoldPtr*/false, /*TIsShort*/false, CORINFO_TYPE_INT>();
break;
case CEE_CONV_U8:
Conv<UINT64, /*TIsUnsigned*/true, /*TCanHoldPtr*/true, /*TIsShort*/false, CORINFO_TYPE_LONG>();
break;
case CEE_CPOBJ:
CpObj();
continue;
case CEE_LDOBJ:
LdObj();
continue;
case CEE_LDSTR:
LdStr();
continue;
case CEE_NEWOBJ:
NewObj();
#ifdef _DEBUG
if (s_TraceInterpreterILFlag.val(CLRConfig::INTERNAL_TraceInterpreterIL))
{
fprintf(GetLogFile(), " Returning to method %s, stub num %d.\n", methName, m_methInfo->m_stubNum);
}
#endif // _DEBUG
continue;
case CEE_CASTCLASS:
CastClass();
continue;
case CEE_ISINST:
IsInst();
continue;
case CEE_CONV_R_UN:
ConvRUn();
break;
case CEE_UNBOX:
Unbox();
continue;
case CEE_THROW:
Throw();
break;
case CEE_LDFLD:
LdFld();
continue;
case CEE_LDFLDA:
LdFldA();
continue;
case CEE_STFLD:
StFld();
continue;
case CEE_LDSFLD:
LdSFld();
continue;
case CEE_LDSFLDA:
LdSFldA();
continue;
case CEE_STSFLD:
StSFld();
continue;
case CEE_STOBJ:
StObj();
continue;
case CEE_CONV_OVF_I1_UN:
ConvOvfUn<INT8, SCHAR_MIN, SCHAR_MAX, /*TCanHoldPtr*/false, CORINFO_TYPE_INT>();
break;
case CEE_CONV_OVF_I2_UN:
ConvOvfUn<INT16, SHRT_MIN, SHRT_MAX, /*TCanHoldPtr*/false, CORINFO_TYPE_INT>();
break;
case CEE_CONV_OVF_I4_UN:
ConvOvfUn<INT32, INT_MIN, INT_MAX, /*TCanHoldPtr*/false, CORINFO_TYPE_INT>();
break;
case CEE_CONV_OVF_I8_UN:
ConvOvfUn<INT64, _I64_MIN, _I64_MAX, /*TCanHoldPtr*/true, CORINFO_TYPE_LONG>();
break;
case CEE_CONV_OVF_U1_UN:
ConvOvfUn<UINT8, 0, UCHAR_MAX, /*TCanHoldPtr*/false, CORINFO_TYPE_INT>();
break;
case CEE_CONV_OVF_U2_UN:
ConvOvfUn<UINT16, 0, USHRT_MAX, /*TCanHoldPtr*/false, CORINFO_TYPE_INT>();
break;
case CEE_CONV_OVF_U4_UN:
ConvOvfUn<UINT32, 0, UINT_MAX, /*TCanHoldPtr*/false, CORINFO_TYPE_INT>();
break;
case CEE_CONV_OVF_U8_UN:
ConvOvfUn<UINT64, 0, _UI64_MAX, /*TCanHoldPtr*/true, CORINFO_TYPE_LONG>();
break;
case CEE_CONV_OVF_I_UN:
if (sizeof(NativeInt) == 4)
{
ConvOvfUn<NativeInt, INT_MIN, INT_MAX, /*TCanHoldPtr*/true, CORINFO_TYPE_NATIVEINT>();
}
else
{
assert(sizeof(NativeInt) == 8);
ConvOvfUn<NativeInt, _I64_MIN, _I64_MAX, /*TCanHoldPtr*/true, CORINFO_TYPE_NATIVEINT>();
}
break;
case CEE_CONV_OVF_U_UN:
if (sizeof(NativeUInt) == 4)
{
ConvOvfUn<NativeUInt, 0, UINT_MAX, /*TCanHoldPtr*/true, CORINFO_TYPE_NATIVEINT>();
}
else
{
assert(sizeof(NativeUInt) == 8);
ConvOvfUn<NativeUInt, 0, _UI64_MAX, /*TCanHoldPtr*/true, CORINFO_TYPE_NATIVEINT>();
}
break;
case CEE_BOX:
Box();
continue;
case CEE_NEWARR:
NewArr();
continue;
case CEE_LDLEN:
LdLen();
break;
case CEE_LDELEMA:
LdElem</*takeAddr*/true>();
continue;
case CEE_LDELEM_I1:
LdElemWithType<INT8, false, CORINFO_TYPE_INT>();
break;
case CEE_LDELEM_U1:
LdElemWithType<UINT8, false, CORINFO_TYPE_INT>();
break;
case CEE_LDELEM_I2:
LdElemWithType<INT16, false, CORINFO_TYPE_INT>();
break;
case CEE_LDELEM_U2:
LdElemWithType<UINT16, false, CORINFO_TYPE_INT>();
break;
case CEE_LDELEM_I4:
LdElemWithType<INT32, false, CORINFO_TYPE_INT>();
break;
case CEE_LDELEM_U4:
LdElemWithType<UINT32, false, CORINFO_TYPE_INT>();
break;
case CEE_LDELEM_I8:
LdElemWithType<INT64, false, CORINFO_TYPE_LONG>();
break;
// Note that the ECMA spec defines a "LDELEM_U8", but it is the same instruction number as LDELEM_I8 (since
// when loading to the widest width, signed/unsigned doesn't matter).
case CEE_LDELEM_I:
LdElemWithType<NativeInt, false, CORINFO_TYPE_NATIVEINT>();
break;
case CEE_LDELEM_R4:
LdElemWithType<float, false, CORINFO_TYPE_FLOAT>();
break;
case CEE_LDELEM_R8:
LdElemWithType<double, false, CORINFO_TYPE_DOUBLE>();
break;
case CEE_LDELEM_REF:
LdElemWithType<Object*, true, CORINFO_TYPE_CLASS>();
break;
case CEE_STELEM_I:
StElemWithType<NativeInt, false>();
break;
case CEE_STELEM_I1:
StElemWithType<INT8, false>();
break;
case CEE_STELEM_I2:
StElemWithType<INT16, false>();
break;
case CEE_STELEM_I4:
StElemWithType<INT32, false>();
break;
case CEE_STELEM_I8:
StElemWithType<INT64, false>();
break;
case CEE_STELEM_R4:
StElemWithType<float, false>();
break;
case CEE_STELEM_R8:
StElemWithType<double, false>();
break;
case CEE_STELEM_REF:
StElemWithType<Object*, true>();
break;
case CEE_LDELEM:
LdElem</*takeAddr*/false>();
continue;
case CEE_STELEM:
StElem();
continue;
case CEE_UNBOX_ANY:
UnboxAny();
continue;
case CEE_CONV_OVF_I1:
ConvOvf<INT8, SCHAR_MIN, SCHAR_MAX, /*TCanHoldPtr*/false, CORINFO_TYPE_INT>();
break;
case CEE_CONV_OVF_U1:
ConvOvf<UINT8, 0, UCHAR_MAX, /*TCanHoldPtr*/false, CORINFO_TYPE_INT>();
break;
case CEE_CONV_OVF_I2:
ConvOvf<INT16, SHRT_MIN, SHRT_MAX, /*TCanHoldPtr*/false, CORINFO_TYPE_INT>();
break;
case CEE_CONV_OVF_U2:
ConvOvf<UINT16, 0, USHRT_MAX, /*TCanHoldPtr*/false, CORINFO_TYPE_INT>();
break;
case CEE_CONV_OVF_I4:
ConvOvf<INT32, INT_MIN, INT_MAX, /*TCanHoldPtr*/false, CORINFO_TYPE_INT>();
break;
case CEE_CONV_OVF_U4:
ConvOvf<UINT32, 0, UINT_MAX, /*TCanHoldPtr*/false, CORINFO_TYPE_INT>();
break;
case CEE_CONV_OVF_I8:
ConvOvf<INT64, _I64_MIN, _I64_MAX, /*TCanHoldPtr*/true, CORINFO_TYPE_LONG>();
break;
case CEE_CONV_OVF_U8:
ConvOvf<UINT64, 0, _UI64_MAX, /*TCanHoldPtr*/true, CORINFO_TYPE_LONG>();
break;
case CEE_REFANYVAL:
RefanyVal();
continue;
case CEE_CKFINITE:
CkFinite();
break;
case CEE_MKREFANY:
MkRefany();
continue;
case CEE_LDTOKEN:
LdToken();
continue;
case CEE_CONV_U2:
Conv<UINT16, /*TIsUnsigned*/true, /*TCanHoldPtr*/false, /*TIsShort*/true, CORINFO_TYPE_INT>();
break;
case CEE_CONV_U1:
Conv<UINT8, /*TIsUnsigned*/true, /*TCanHoldPtr*/false, /*TIsShort*/true, CORINFO_TYPE_INT>();
break;
case CEE_CONV_I:
Conv<NativeInt, /*TIsUnsigned*/false, /*TCanHoldPtr*/true, /*TIsShort*/false, CORINFO_TYPE_NATIVEINT>();
break;
case CEE_CONV_OVF_I:
if (sizeof(NativeInt) == 4)
{
ConvOvf<NativeInt, INT_MIN, INT_MAX, /*TCanHoldPtr*/true, CORINFO_TYPE_NATIVEINT>();
}
else
{
assert(sizeof(NativeInt) == 8);
ConvOvf<NativeInt, _I64_MIN, _I64_MAX, /*TCanHoldPtr*/true, CORINFO_TYPE_NATIVEINT>();
}
break;
case CEE_CONV_OVF_U:
if (sizeof(NativeUInt) == 4)
{
ConvOvf<NativeUInt, 0, UINT_MAX, /*TCanHoldPtr*/true, CORINFO_TYPE_NATIVEINT>();
}
else
{
assert(sizeof(NativeUInt) == 8);
ConvOvf<NativeUInt, 0, _UI64_MAX, /*TCanHoldPtr*/true, CORINFO_TYPE_NATIVEINT>();
}
break;
case CEE_ADD_OVF:
BinaryArithOvfOp<BA_Add, /*asUnsigned*/false>();
break;
case CEE_ADD_OVF_UN:
BinaryArithOvfOp<BA_Add, /*asUnsigned*/true>();
break;
case CEE_MUL_OVF:
BinaryArithOvfOp<BA_Mul, /*asUnsigned*/false>();
break;
case CEE_MUL_OVF_UN:
BinaryArithOvfOp<BA_Mul, /*asUnsigned*/true>();
break;
case CEE_SUB_OVF:
BinaryArithOvfOp<BA_Sub, /*asUnsigned*/false>();
break;
case CEE_SUB_OVF_UN:
BinaryArithOvfOp<BA_Sub, /*asUnsigned*/true>();
break;
case CEE_ENDFINALLY:
// We have just ended a finally.
// If we were called during exception dispatch,
// rethrow the exception on our way out.
if (m_leaveInfoStack.IsEmpty())
{
Object* finallyException = NULL;
{
GCX_FORBID();
assert(m_inFlightException != NULL);
finallyException = m_inFlightException;
INTERPLOG("endfinally handling for %s, %p, %p\n", methName, m_methInfo, finallyException);
m_inFlightException = NULL;
}
COMPlusThrow(ObjectToOBJECTREF(finallyException));
UNREACHABLE();
}
// Otherwise, see if there's another finally block to
// execute as part of processing the current LEAVE...
else if (!SearchForCoveringFinally())
{
// No, there isn't -- go to the leave target.
assert(!m_leaveInfoStack.IsEmpty());
LeaveInfo li = m_leaveInfoStack.Pop();
ExecuteBranch(li.m_target);
}
// Yes, there, is, and SearchForCoveringFinally set us up to start executing it.
continue; // Skip the default m_ILCodePtr++ at bottom of loop.
case CEE_STIND_I:
StInd<NativeInt>();
break;
case CEE_CONV_U:
Conv<NativeUInt, /*TIsUnsigned*/true, /*TCanHoldPtr*/true, /*TIsShort*/false, CORINFO_TYPE_NATIVEINT>();
break;
case CEE_PREFIX7:
NYI_INTERP("Unimplemented opcode: CEE_PREFIX7");
break;
case CEE_PREFIX6:
NYI_INTERP("Unimplemented opcode: CEE_PREFIX6");
break;
case CEE_PREFIX5:
NYI_INTERP("Unimplemented opcode: CEE_PREFIX5");
break;
case CEE_PREFIX4:
NYI_INTERP("Unimplemented opcode: CEE_PREFIX4");
break;
case CEE_PREFIX3:
NYI_INTERP("Unimplemented opcode: CEE_PREFIX3");
break;
case CEE_PREFIX2:
NYI_INTERP("Unimplemented opcode: CEE_PREFIX2");
break;
case CEE_PREFIX1:
// This is the prefix for all the 2-byte opcodes.
// Figure out the second byte of the 2-byte opcode.
ops = *(m_ILCodePtr + 1);
#if INTERP_ILINSTR_PROFILE
// Take one away from PREFIX1, which we won't count.
InterlockedDecrement(&s_ILInstrExecs[CEE_PREFIX1]);
// Credit instead to the 2-byte instruction index.
InterlockedIncrement(&s_ILInstr2ByteExecs[ops]);
#endif // INTERP_ILINSTR_PROFILE
switch (ops)
{
case TWOBYTE_CEE_ARGLIST:
// NYI_INTERP("Unimplemented opcode: TWOBYTE_CEE_ARGLIST");
assert(m_methInfo->m_varArgHandleArgNum != NO_VA_ARGNUM);
LdArgA(m_methInfo->m_varArgHandleArgNum);
m_ILCodePtr += 2;
break;
case TWOBYTE_CEE_CEQ:
CompareOp<CO_EQ>();
m_ILCodePtr += 2;
break;
case TWOBYTE_CEE_CGT:
CompareOp<CO_GT>();
m_ILCodePtr += 2;
break;
case TWOBYTE_CEE_CGT_UN:
CompareOp<CO_GT_UN>();
m_ILCodePtr += 2;
break;
case TWOBYTE_CEE_CLT:
CompareOp<CO_LT>();
m_ILCodePtr += 2;
break;
case TWOBYTE_CEE_CLT_UN:
CompareOp<CO_LT_UN>();
m_ILCodePtr += 2;
break;
case TWOBYTE_CEE_LDARG:
m_ILCodePtr += 2;
argNums = getU2LittleEndian(m_ILCodePtr);
LdArg(argNums);
m_ILCodePtr += 2;
break;
case TWOBYTE_CEE_LDARGA:
m_ILCodePtr += 2;
argNums = getU2LittleEndian(m_ILCodePtr);
LdArgA(argNums);
m_ILCodePtr += 2;
break;
case TWOBYTE_CEE_STARG:
m_ILCodePtr += 2;
argNums = getU2LittleEndian(m_ILCodePtr);
StArg(argNums);
m_ILCodePtr += 2;
break;
case TWOBYTE_CEE_LDLOC:
m_ILCodePtr += 2;
argNums = getU2LittleEndian(m_ILCodePtr);
LdLoc(argNums);
m_ILCodePtr += 2;
break;
case TWOBYTE_CEE_LDLOCA:
m_ILCodePtr += 2;
argNums = getU2LittleEndian(m_ILCodePtr);
LdLocA(argNums);
m_ILCodePtr += 2;
break;
case TWOBYTE_CEE_STLOC:
m_ILCodePtr += 2;
argNums = getU2LittleEndian(m_ILCodePtr);
StLoc(argNums);
m_ILCodePtr += 2;
break;
case TWOBYTE_CEE_CONSTRAINED:
RecordConstrainedCall();
break;
case TWOBYTE_CEE_VOLATILE:
// Set a flag that causes a memory barrier to be associated with the next load or store.
m_volatileFlag = true;
m_ILCodePtr += 2;
break;
case TWOBYTE_CEE_LDFTN:
LdFtn();
break;
case TWOBYTE_CEE_INITOBJ:
InitObj();
break;
case TWOBYTE_CEE_LOCALLOC:
LocAlloc();
m_ILCodePtr += 2;
break;
case TWOBYTE_CEE_LDVIRTFTN:
LdVirtFtn();
break;
case TWOBYTE_CEE_SIZEOF:
Sizeof();
break;
case TWOBYTE_CEE_RETHROW:
Rethrow();
break;
case TWOBYTE_CEE_READONLY:
m_readonlyFlag = true;
m_ILCodePtr += 2;
// A comment in importer.cpp indicates that READONLY may also apply to calls. We'll see.
_ASSERTE_MSG(*m_ILCodePtr == CEE_LDELEMA, "According to the ECMA spec, READONLY may only precede LDELEMA");
break;
case TWOBYTE_CEE_INITBLK:
InitBlk();
break;
case TWOBYTE_CEE_CPBLK:
CpBlk();
break;
case TWOBYTE_CEE_ENDFILTER:
EndFilter();
break;
case TWOBYTE_CEE_UNALIGNED:
// Nothing to do here.
m_ILCodePtr += 3;
break;
case TWOBYTE_CEE_TAILCALL:
// TODO: Needs revisiting when implementing tail call.
// NYI_INTERP("Unimplemented opcode: TWOBYTE_CEE_TAILCALL");
m_ILCodePtr += 2;
break;
case TWOBYTE_CEE_REFANYTYPE:
RefanyType();
break;
default:
UNREACHABLE();
break;
}
continue;
case CEE_PREFIXREF:
NYI_INTERP("Unimplemented opcode: CEE_PREFIXREF");
m_ILCodePtr++;
continue;
default:
UNREACHABLE();
continue;
}
m_ILCodePtr++;
}
ExitEvalLoop:;
INTERPLOG("DONE %d, %s\n", m_methInfo->m_stubNum, m_methInfo->m_methName);
}
EX_CATCH
{
INTERPLOG("EXCEPTION %d (throw), %s\n", m_methInfo->m_stubNum, m_methInfo->m_methName);
bool handleException = false;
OBJECTREF orThrowable = NULL;
GCX_COOP_NO_DTOR();
orThrowable = GET_THROWABLE();
if (m_filterNextScan != 0)
{
// We are in the middle of a filter scan and an exception is thrown inside
// a filter. We are supposed to swallow it and assume the filter did not
// handle the exception.
m_curStackHt = 0;
m_largeStructOperandStackHt = 0;
LdIcon(0);
EndFilter();
handleException = true;
}
else
{
// orThrowable must be protected. MethodHandlesException() will place orThrowable
// into the operand stack (a permanently protected area) if it returns true.
GCPROTECT_BEGIN(orThrowable);
handleException = MethodHandlesException(orThrowable);
GCPROTECT_END();
}
if (handleException)
{
GetThread()->SafeSetThrowables(orThrowable
DEBUG_ARG(ThreadExceptionState::STEC_CurrentTrackerEqualNullOkForInterpreter));
goto EvalLoop;
}
else
{
INTERPLOG("EXCEPTION %d (rethrow), %s\n", m_methInfo->m_stubNum, m_methInfo->m_methName);
EX_RETHROW;
}
}
EX_END_CATCH(RethrowTransientExceptions)
}
#ifdef _MSC_VER
#pragma optimize("", on)
#endif
void Interpreter::EndFilter()
{
unsigned handles = OpStackGet<unsigned>(0);
// If the filter decides to handle the exception, then go to the handler offset.
if (handles)
{
// We decided to handle the exception, so give all EH entries a chance to
// handle future exceptions. Clear scan.
m_filterNextScan = 0;
ExecuteBranch(m_methInfo->m_ILCode + m_filterHandlerOffset);
}
// The filter decided not to handle the exception, ask if there is some other filter
// lined up to try to handle it or some other catch/finally handlers will handle it.
// If no one handles the exception, rethrow and be done with it.
else
{
bool handlesEx = false;
{
OBJECTREF orThrowable = ObjectToOBJECTREF(m_inFlightException);
GCPROTECT_BEGIN(orThrowable);
handlesEx = MethodHandlesException(orThrowable);
GCPROTECT_END();
}
if (!handlesEx)
{
// Just clear scan before rethrowing to give any EH entry a chance to handle
// the "rethrow".
m_filterNextScan = 0;
Object* filterException = NULL;
{
GCX_FORBID();
assert(m_inFlightException != NULL);
filterException = m_inFlightException;
INTERPLOG("endfilter handling for %s, %p, %p\n", m_methInfo->m_methName, m_methInfo, filterException);
m_inFlightException = NULL;
}
COMPlusThrow(ObjectToOBJECTREF(filterException));
UNREACHABLE();
}
else
{
// Let it do another round of filter:end-filter or handler block.
// During the next end filter, we will reuse m_filterNextScan and
// continue searching where we left off. Note however, while searching,
// any of the filters could throw an exception. But this is supposed to
// be swallowed and endfilter should be called with a value of 0 on the
// stack.
}
}
}
bool Interpreter::MethodHandlesException(OBJECTREF orThrowable)
{
CONTRACTL {
SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
} CONTRACTL_END;
bool handlesEx = false;
if (orThrowable != NULL)
{
PTR_Thread pCurThread = GetThread();
// Don't catch ThreadAbort and other uncatchable exceptions
if (!IsUncatchable(&orThrowable))
{
// Does the current method catch this? The clauses are defined by offsets, so get that.
// However, if we are in the middle of a filter scan, make sure we get the offset of the
// excepting code, rather than the offset of the filter body.
DWORD curOffset = (m_filterNextScan != 0) ? m_filterExcILOffset : CurOffset();
TypeHandle orThrowableTH = TypeHandle(orThrowable->GetMethodTable());
GCPROTECT_BEGIN(orThrowable);
GCX_PREEMP();
// Perform a filter scan or regular walk of the EH Table. Filter scan is performed when
// we are evaluating a series of filters to handle the exception until the first handler
// (filter's or otherwise) that will handle the exception.
for (unsigned XTnum = m_filterNextScan; XTnum < m_methInfo->m_ehClauseCount; XTnum++)
{
CORINFO_EH_CLAUSE clause;
m_interpCeeInfo.getEHinfo(m_methInfo->m_method, XTnum, &clause);
assert(clause.HandlerLength != (unsigned)-1); // @DEPRECATED
// First, is the current offset in the try block?
if (clause.TryOffset <= curOffset && curOffset < clause.TryOffset + clause.TryLength)
{
unsigned handlerOffset = 0;
// CORINFO_EH_CLAUSE_NONE represents 'catch' blocks
if (clause.Flags == CORINFO_EH_CLAUSE_NONE)
{
// Now, does the catch block handle the thrown exception type?
CORINFO_CLASS_HANDLE excType = FindClass(clause.ClassToken InterpTracingArg(RTK_CheckHandlesException));
if (ExceptionIsOfRightType(TypeHandle::FromPtr(excType), orThrowableTH))
{
GCX_COOP();
// Push the exception object onto the operand stack.
OpStackSet<OBJECTREF>(0, orThrowable);
OpStackTypeSet(0, InterpreterType(CORINFO_TYPE_CLASS));
m_curStackHt = 1;
m_largeStructOperandStackHt = 0;
handlerOffset = clause.HandlerOffset;
handlesEx = true;
m_filterNextScan = 0;
}
else
{
GCX_COOP();
// Handle a wrapped exception.
OBJECTREF orUnwrapped = PossiblyUnwrapThrowable(orThrowable, GetMethodDesc()->GetAssembly());
if (ExceptionIsOfRightType(TypeHandle::FromPtr(excType), orUnwrapped->GetTrueTypeHandle()))
{
// Push the exception object onto the operand stack.
OpStackSet<OBJECTREF>(0, orUnwrapped);
OpStackTypeSet(0, InterpreterType(CORINFO_TYPE_CLASS));
m_curStackHt = 1;
m_largeStructOperandStackHt = 0;
handlerOffset = clause.HandlerOffset;
handlesEx = true;
m_filterNextScan = 0;
}
}
}
else if (clause.Flags == CORINFO_EH_CLAUSE_FILTER)
{
GCX_COOP();
// Push the exception object onto the operand stack.
OpStackSet<OBJECTREF>(0, orThrowable);
OpStackTypeSet(0, InterpreterType(CORINFO_TYPE_CLASS));
m_curStackHt = 1;
m_largeStructOperandStackHt = 0;
handlerOffset = clause.FilterOffset;
m_inFlightException = OBJECTREFToObject(orThrowable);
handlesEx = true;
m_filterHandlerOffset = clause.HandlerOffset;
m_filterNextScan = XTnum + 1;
m_filterExcILOffset = curOffset;
}
else if (clause.Flags == CORINFO_EH_CLAUSE_FAULT ||
clause.Flags == CORINFO_EH_CLAUSE_FINALLY)
{
GCX_COOP();
// Save the exception object to rethrow.
m_inFlightException = OBJECTREFToObject(orThrowable);
// Empty the operand stack.
m_curStackHt = 0;
m_largeStructOperandStackHt = 0;
handlerOffset = clause.HandlerOffset;
handlesEx = true;
m_filterNextScan = 0;
}
// Reset the interpreter loop in preparation of calling the handler.
if (handlesEx)
{
// Set the IL offset of the handler.
ExecuteBranch(m_methInfo->m_ILCode + handlerOffset);
// If an exception occurs while attempting to leave a protected scope,
// we empty the 'leave' info stack upon entering the handler.
while (!m_leaveInfoStack.IsEmpty())
{
m_leaveInfoStack.Pop();
}
// Some things are set up before a call, and must be cleared on an exception caught be the caller.
// A method that returns a struct allocates local space for the return value, and "registers" that
// space and the type so that it's scanned if a GC happens. "Unregister" it if we throw an exception
// in the call, and handle it in the caller. (If it's not handled by the caller, the Interpreter is
// deallocated, so it's value doesn't matter.)
m_structRetValITPtr = NULL;
m_callThisArg = NULL;
m_argsSize = 0;
break;
}
}
}
GCPROTECT_END();
}
if (!handlesEx)
{
DoMonitorExitWork();
}
}
return handlesEx;
}
static unsigned OpFormatExtraSize(opcode_format_t format) {
switch (format)
{
case InlineNone:
return 0;
case InlineVar:
return 2;
case InlineI:
case InlineBrTarget:
case InlineMethod:
case InlineField:
case InlineType:
case InlineString:
case InlineSig:
case InlineRVA:
case InlineTok:
case ShortInlineR:
return 4;
case InlineR:
case InlineI8:
return 8;
case InlineSwitch:
return 0; // We'll handle this specially.
case ShortInlineVar:
case ShortInlineI:
case ShortInlineBrTarget:
return 1;
default:
assert(false);
return 0;
}
}
static unsigned opSizes1Byte[CEE_COUNT];
static bool opSizes1ByteInit = false;
static void OpSizes1ByteInit()
{
if (opSizes1ByteInit) return;
#define OPDEF(name, stringname, stackpop, stackpush, params, kind, len, byte1, byte2, ctrl) \
opSizes1Byte[name] = len + OpFormatExtraSize(params);
#include "opcode.def"
#undef OPDEF
opSizes1ByteInit = true;
};
// static
bool Interpreter::MethodMayHaveLoop(BYTE* ilCode, unsigned codeSize)
{
OpSizes1ByteInit();
int delta;
BYTE* ilCodeLim = ilCode + codeSize;
while (ilCode < ilCodeLim)
{
unsigned op = *ilCode;
switch (op)
{
case CEE_BR_S: case CEE_BRFALSE_S: case CEE_BRTRUE_S:
case CEE_BEQ_S: case CEE_BGE_S: case CEE_BGT_S: case CEE_BLE_S: case CEE_BLT_S:
case CEE_BNE_UN_S: case CEE_BGE_UN_S: case CEE_BGT_UN_S: case CEE_BLE_UN_S: case CEE_BLT_UN_S:
case CEE_LEAVE_S:
delta = getI1(ilCode + 1);
if (delta < 0) return true;
ilCode += 2;
break;
case CEE_BR: case CEE_BRFALSE: case CEE_BRTRUE:
case CEE_BEQ: case CEE_BGE: case CEE_BGT: case CEE_BLE: case CEE_BLT:
case CEE_BNE_UN: case CEE_BGE_UN: case CEE_BGT_UN: case CEE_BLE_UN: case CEE_BLT_UN:
case CEE_LEAVE:
delta = getI4LittleEndian(ilCode + 1);
if (delta < 0) return true;
ilCode += 5;
break;
case CEE_SWITCH:
{
UINT32 n = getU4LittleEndian(ilCode + 1);
UINT32 instrSize = 1 + (n + 1)*4;
for (unsigned i = 0; i < n; i++) {
delta = getI4LittleEndian(ilCode + (5 + i * 4));
if (delta < 0) return true;
}
ilCode += instrSize;
break;
}
case CEE_PREFIX1:
op = *(ilCode + 1) + 0x100;
assert(op < CEE_COUNT); // Bounds check for below.
// deliberate fall-through here.
default:
// For the rest of the 1-byte instructions, we'll use a table-driven approach.
ilCode += opSizes1Byte[op];
break;
}
}
return false;
}
void Interpreter::BackwardsBranchActions(int offset)
{
// TODO: Figure out how to do a GC poll.
}
bool Interpreter::SearchForCoveringFinally()
{
CONTRACTL {
SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_ANY;
} CONTRACTL_END;
_ASSERTE_MSG(!m_leaveInfoStack.IsEmpty(), "precondition");
LeaveInfo& li = m_leaveInfoStack.PeekRef();
GCX_PREEMP();
for (unsigned XTnum = li.m_nextEHIndex; XTnum < m_methInfo->m_ehClauseCount; XTnum++)
{
CORINFO_EH_CLAUSE clause;
m_interpCeeInfo.getEHinfo(m_methInfo->m_method, XTnum, &clause);
assert(clause.HandlerLength != (unsigned)-1); // @DEPRECATED
// First, is the offset of the leave instruction in the try block?
unsigned tryEndOffset = clause.TryOffset + clause.TryLength;
if (clause.TryOffset <= li.m_offset && li.m_offset < tryEndOffset)
{
// Yes: is it a finally, and is its target outside the try block?
size_t targOffset = (li.m_target - m_methInfo->m_ILCode);
if (clause.Flags == CORINFO_EH_CLAUSE_FINALLY
&& !(clause.TryOffset <= targOffset && targOffset < tryEndOffset))
{
m_ILCodePtr = m_methInfo->m_ILCode + clause.HandlerOffset;
li.m_nextEHIndex = XTnum + 1;
return true;
}
}
}
// Caller will handle popping the leave info stack.
return false;
}
// static
void Interpreter::GCScanRoots(promote_func* pf, ScanContext* sc, void* interp0)
{
Interpreter* interp = reinterpret_cast<Interpreter*>(interp0);
interp->GCScanRoots(pf, sc);
}
void Interpreter::GCScanRoots(promote_func* pf, ScanContext* sc)
{
// Report inbound arguments, if the interpreter has not been invoked directly.
// (In the latter case, the arguments are reported by the calling method.)
if (!m_directCall)
{
for (unsigned i = 0; i < m_methInfo->m_numArgs; i++)
{
GCScanRootAtLoc(reinterpret_cast<Object**>(GetArgAddr(i)), GetArgType(i), pf, sc);
}
}
if (m_methInfo->GetFlag<InterpreterMethodInfo::Flag_hasThisArg>())
{
if (m_methInfo->GetFlag<InterpreterMethodInfo::Flag_thisArgIsObjPtr>())
{
GCScanRootAtLoc(&m_thisArg, InterpreterType(CORINFO_TYPE_CLASS), pf, sc);
}
else
{
GCScanRootAtLoc(&m_thisArg, InterpreterType(CORINFO_TYPE_BYREF), pf, sc);
}
}
// This is the "this" argument passed in to DoCallWork. (Note that we treat this as a byref; it
// might be, for a struct instance method, and this covers the object pointer case as well.)
GCScanRootAtLoc(reinterpret_cast<Object**>(&m_callThisArg), InterpreterType(CORINFO_TYPE_BYREF), pf, sc);
// Scan the exception object that we'll rethrow at the end of the finally block.
GCScanRootAtLoc(reinterpret_cast<Object**>(&m_inFlightException), InterpreterType(CORINFO_TYPE_CLASS), pf, sc);
// A retBufArg, may, in some cases, be a byref into the heap.
if (m_retBufArg != NULL)
{
GCScanRootAtLoc(reinterpret_cast<Object**>(&m_retBufArg), InterpreterType(CORINFO_TYPE_BYREF), pf, sc);
}
if (m_structRetValITPtr != NULL)
{
GCScanRootAtLoc(reinterpret_cast<Object**>(m_structRetValTempSpace), *m_structRetValITPtr, pf, sc);
}
// We'll conservatively assume that we might have a security object.
GCScanRootAtLoc(reinterpret_cast<Object**>(&m_securityObject), InterpreterType(CORINFO_TYPE_CLASS), pf, sc);
// Do locals.
for (unsigned i = 0; i < m_methInfo->m_numLocals; i++)
{
InterpreterType it = m_methInfo->m_localDescs[i].m_type;
void* localPtr = NULL;
if (it.IsLargeStruct(&m_interpCeeInfo))
{
void* structPtr = ArgSlotEndianessFixup(reinterpret_cast<ARG_SLOT*>(FixedSizeLocalSlot(i)), sizeof(void**));
localPtr = *reinterpret_cast<void**>(structPtr);
}
else
{
localPtr = ArgSlotEndianessFixup(reinterpret_cast<ARG_SLOT*>(FixedSizeLocalSlot(i)), it.Size(&m_interpCeeInfo));
}
GCScanRootAtLoc(reinterpret_cast<Object**>(localPtr), it, pf, sc, m_methInfo->GetPinningBit(i));
}
// Do current ostack.
for (unsigned i = 0; i < m_curStackHt; i++)
{
InterpreterType it = OpStackTypeGet(i);
if (it.IsLargeStruct(&m_interpCeeInfo))
{
Object** structPtr = reinterpret_cast<Object**>(OpStackGet<void*>(i));
// If the ostack value is a pointer to a local var value, don't scan, since we already
// scanned the variable value above.
if (!IsInLargeStructLocalArea(structPtr))
{
GCScanRootAtLoc(structPtr, it, pf, sc);
}
}
else
{
void* stackPtr = OpStackGetAddr(i, it.Size(&m_interpCeeInfo));
GCScanRootAtLoc(reinterpret_cast<Object**>(stackPtr), it, pf, sc);
}
}
// Any outgoing arguments for a call in progress.
for (unsigned i = 0; i < m_argsSize; i++)
{
// If a call has a large struct argument, we'll have pushed a pointer to the entry for that argument on the
// largeStructStack of the current Interpreter. That will be scanned by the code above, so just skip it.
InterpreterType undef(CORINFO_TYPE_UNDEF);
InterpreterType it = m_argTypes[i];
if (it != undef && !it.IsLargeStruct(&m_interpCeeInfo))
{
BYTE* argPtr = ArgSlotEndianessFixup(&m_args[i], it.Size(&m_interpCeeInfo));
GCScanRootAtLoc(reinterpret_cast<Object**>(argPtr), it, pf, sc);
}
}
}
void Interpreter::GCScanRootAtLoc(Object** loc, InterpreterType it, promote_func* pf, ScanContext* sc, bool pinningRef)
{
switch (it.ToCorInfoType())
{
case CORINFO_TYPE_CLASS:
case CORINFO_TYPE_STRING:
{
DWORD flags = 0;
if (pinningRef) flags |= GC_CALL_PINNED;
(*pf)(loc, sc, flags);
}
break;
case CORINFO_TYPE_BYREF:
case CORINFO_TYPE_REFANY:
{
DWORD flags = GC_CALL_INTERIOR;
if (pinningRef) flags |= GC_CALL_PINNED;
(*pf)(loc, sc, flags);
}
break;
case CORINFO_TYPE_VALUECLASS:
assert(!pinningRef);
GCScanValueClassRootAtLoc(loc, it.ToClassHandle(), pf, sc);
break;
default:
assert(!pinningRef);
break;
}
}
void Interpreter::GCScanValueClassRootAtLoc(Object** loc, CORINFO_CLASS_HANDLE valueClsHnd, promote_func* pf, ScanContext* sc)
{
MethodTable* valClsMT = GetMethodTableFromClsHnd(valueClsHnd);
ReportPointersFromValueType(pf, sc, valClsMT, loc);
}
// Returns "true" iff "cit" is "stack-normal": all integer types with byte size less than 4
// are folded to CORINFO_TYPE_INT; all remaining unsigned types are folded to their signed counterparts.
bool IsStackNormalType(CorInfoType cit)
{
LIMITED_METHOD_CONTRACT;
switch (cit)
{
case CORINFO_TYPE_UNDEF:
case CORINFO_TYPE_VOID:
case CORINFO_TYPE_BOOL:
case CORINFO_TYPE_CHAR:
case CORINFO_TYPE_BYTE:
case CORINFO_TYPE_UBYTE:
case CORINFO_TYPE_SHORT:
case CORINFO_TYPE_USHORT:
case CORINFO_TYPE_UINT:
case CORINFO_TYPE_NATIVEUINT:
case CORINFO_TYPE_ULONG:
case CORINFO_TYPE_VAR:
case CORINFO_TYPE_STRING:
case CORINFO_TYPE_PTR:
return false;
case CORINFO_TYPE_INT:
case CORINFO_TYPE_NATIVEINT:
case CORINFO_TYPE_BYREF:
case CORINFO_TYPE_CLASS:
case CORINFO_TYPE_LONG:
case CORINFO_TYPE_VALUECLASS:
case CORINFO_TYPE_REFANY:
// I chose to consider both float and double stack-normal; together these comprise
// the "F" type of the ECMA spec. This means I have to consider these to freely
// interconvert.
case CORINFO_TYPE_FLOAT:
case CORINFO_TYPE_DOUBLE:
return true;
default:
UNREACHABLE();
}
}
CorInfoType CorInfoTypeStackNormalize(CorInfoType cit)
{
LIMITED_METHOD_CONTRACT;
switch (cit)
{
case CORINFO_TYPE_UNDEF:
return CORINFO_TYPE_UNDEF;
case CORINFO_TYPE_VOID:
case CORINFO_TYPE_VAR:
_ASSERTE_MSG(false, "Type that cannot be on the ostack.");
return CORINFO_TYPE_UNDEF;
case CORINFO_TYPE_BOOL:
case CORINFO_TYPE_CHAR:
case CORINFO_TYPE_BYTE:
case CORINFO_TYPE_UBYTE:
case CORINFO_TYPE_SHORT:
case CORINFO_TYPE_USHORT:
case CORINFO_TYPE_UINT:
return CORINFO_TYPE_INT;
case CORINFO_TYPE_NATIVEUINT:
case CORINFO_TYPE_PTR:
return CORINFO_TYPE_NATIVEINT;
case CORINFO_TYPE_ULONG:
return CORINFO_TYPE_LONG;
case CORINFO_TYPE_STRING:
return CORINFO_TYPE_CLASS;
case CORINFO_TYPE_INT:
case CORINFO_TYPE_NATIVEINT:
case CORINFO_TYPE_BYREF:
case CORINFO_TYPE_CLASS:
case CORINFO_TYPE_LONG:
case CORINFO_TYPE_VALUECLASS:
case CORINFO_TYPE_REFANY:
// I chose to consider both float and double stack-normal; together these comprise
// the "F" type of the ECMA spec. This means I have to consider these to freely
// interconvert.
case CORINFO_TYPE_FLOAT:
case CORINFO_TYPE_DOUBLE:
assert(IsStackNormalType(cit));
return cit;
default:
UNREACHABLE();
}
}
InterpreterType InterpreterType::StackNormalize() const
{
LIMITED_METHOD_CONTRACT;
switch (ToCorInfoType())
{
case CORINFO_TYPE_BOOL:
case CORINFO_TYPE_CHAR:
case CORINFO_TYPE_BYTE:
case CORINFO_TYPE_UBYTE:
case CORINFO_TYPE_SHORT:
case CORINFO_TYPE_USHORT:
case CORINFO_TYPE_UINT:
return InterpreterType(CORINFO_TYPE_INT);
case CORINFO_TYPE_NATIVEUINT:
case CORINFO_TYPE_PTR:
return InterpreterType(CORINFO_TYPE_NATIVEINT);
case CORINFO_TYPE_ULONG:
return InterpreterType(CORINFO_TYPE_LONG);
case CORINFO_TYPE_STRING:
return InterpreterType(CORINFO_TYPE_CLASS);
case CORINFO_TYPE_INT:
case CORINFO_TYPE_NATIVEINT:
case CORINFO_TYPE_BYREF:
case CORINFO_TYPE_CLASS:
case CORINFO_TYPE_LONG:
case CORINFO_TYPE_VALUECLASS:
case CORINFO_TYPE_REFANY:
case CORINFO_TYPE_FLOAT:
case CORINFO_TYPE_DOUBLE:
return *const_cast<InterpreterType*>(this);
case CORINFO_TYPE_UNDEF:
case CORINFO_TYPE_VOID:
case CORINFO_TYPE_VAR:
default:
_ASSERTE_MSG(false, "should not reach here");
return *const_cast<InterpreterType*>(this);
}
}
#ifdef _DEBUG
bool InterpreterType::MatchesWork(const InterpreterType it2, CEEInfo* info) const
{
CONTRACTL {
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
} CONTRACTL_END;
if (*this == it2) return true;
// Otherwise...
CorInfoType cit1 = ToCorInfoType();
CorInfoType cit2 = it2.ToCorInfoType();
GCX_PREEMP();
// An approximation: valueclasses of the same size match.
if (cit1 == CORINFO_TYPE_VALUECLASS &&
cit2 == CORINFO_TYPE_VALUECLASS &&
Size(info) == it2.Size(info))
{
return true;
}
// NativeInt matches byref. (In unsafe code).
if ((cit1 == CORINFO_TYPE_BYREF && cit2 == CORINFO_TYPE_NATIVEINT))
return true;
// apparently the VM may do the optimization of reporting the return type of a method that
// returns a struct of a single nativeint field *as* nativeint; and similarly with at least some other primitive types.
// So weaken this check to allow that.
// (The check is actually a little weaker still, since I don't want to crack the return type and make sure
// that it has only a single nativeint member -- so I just ensure that the total size is correct).
switch (cit1)
{
case CORINFO_TYPE_NATIVEINT:
case CORINFO_TYPE_NATIVEUINT:
assert(sizeof(NativeInt) == sizeof(NativeUInt));
if (it2.Size(info) == sizeof(NativeInt))
return true;
break;
case CORINFO_TYPE_INT:
case CORINFO_TYPE_UINT:
assert(sizeof(INT32) == sizeof(UINT32));
if (it2.Size(info) == sizeof(INT32))
return true;
break;
default:
break;
}
// See if the second is a value type synonym for a primitive.
if (cit2 == CORINFO_TYPE_VALUECLASS)
{
CorInfoType cit2prim = info->getTypeForPrimitiveValueClass(it2.ToClassHandle());
if (cit2prim != CORINFO_TYPE_UNDEF)
{
InterpreterType it2prim(cit2prim);
if (*this == it2prim.StackNormalize())
return true;
}
}
// Otherwise...
return false;
}
#endif // _DEBUG
// Static
size_t CorInfoTypeSizeArray[] =
{
/*CORINFO_TYPE_UNDEF = 0x0*/0,
/*CORINFO_TYPE_VOID = 0x1*/0,
/*CORINFO_TYPE_BOOL = 0x2*/1,
/*CORINFO_TYPE_CHAR = 0x3*/2,
/*CORINFO_TYPE_BYTE = 0x4*/1,
/*CORINFO_TYPE_UBYTE = 0x5*/1,
/*CORINFO_TYPE_SHORT = 0x6*/2,
/*CORINFO_TYPE_USHORT = 0x7*/2,
/*CORINFO_TYPE_INT = 0x8*/4,
/*CORINFO_TYPE_UINT = 0x9*/4,
/*CORINFO_TYPE_LONG = 0xa*/8,
/*CORINFO_TYPE_ULONG = 0xb*/8,
/*CORINFO_TYPE_NATIVEINT = 0xc*/sizeof(void*),
/*CORINFO_TYPE_NATIVEUINT = 0xd*/sizeof(void*),
/*CORINFO_TYPE_FLOAT = 0xe*/4,
/*CORINFO_TYPE_DOUBLE = 0xf*/8,
/*CORINFO_TYPE_STRING = 0x10*/sizeof(void*),
/*CORINFO_TYPE_PTR = 0x11*/sizeof(void*),
/*CORINFO_TYPE_BYREF = 0x12*/sizeof(void*),
/*CORINFO_TYPE_VALUECLASS = 0x13*/0,
/*CORINFO_TYPE_CLASS = 0x14*/sizeof(void*),
/*CORINFO_TYPE_REFANY = 0x15*/sizeof(void*)*2,
/*CORINFO_TYPE_VAR = 0x16*/0,
};
bool CorInfoTypeIsUnsigned(CorInfoType cit)
{
LIMITED_METHOD_CONTRACT;
switch (cit)
{
case CORINFO_TYPE_UINT:
case CORINFO_TYPE_NATIVEUINT:
case CORINFO_TYPE_ULONG:
case CORINFO_TYPE_UBYTE:
case CORINFO_TYPE_USHORT:
case CORINFO_TYPE_CHAR:
return true;
default:
return false;
}
}
bool CorInfoTypeIsIntegral(CorInfoType cit)
{
LIMITED_METHOD_CONTRACT;
switch (cit)
{
case CORINFO_TYPE_UINT:
case CORINFO_TYPE_NATIVEUINT:
case CORINFO_TYPE_ULONG:
case CORINFO_TYPE_UBYTE:
case CORINFO_TYPE_USHORT:
case CORINFO_TYPE_INT:
case CORINFO_TYPE_NATIVEINT:
case CORINFO_TYPE_LONG:
case CORINFO_TYPE_BYTE:
case CORINFO_TYPE_BOOL:
case CORINFO_TYPE_SHORT:
return true;
default:
return false;
}
}
bool CorInfoTypeIsFloatingPoint(CorInfoType cit)
{
return cit == CORINFO_TYPE_FLOAT || cit == CORINFO_TYPE_DOUBLE;
}
bool CorElemTypeIsUnsigned(CorElementType cet)
{
LIMITED_METHOD_CONTRACT;
switch (cet)
{
case ELEMENT_TYPE_U1:
case ELEMENT_TYPE_U2:
case ELEMENT_TYPE_U4:
case ELEMENT_TYPE_U8:
case ELEMENT_TYPE_U:
return true;
default:
return false;
}
}
bool CorInfoTypeIsPointer(CorInfoType cit)
{
LIMITED_METHOD_CONTRACT;
switch (cit)
{
case CORINFO_TYPE_PTR:
case CORINFO_TYPE_BYREF:
case CORINFO_TYPE_NATIVEINT:
case CORINFO_TYPE_NATIVEUINT:
return true;
// It seems like the ECMA spec doesn't allow this, but (at least) the managed C++
// compiler expects the explicitly-sized pointer type of the platform pointer size to work:
case CORINFO_TYPE_INT:
case CORINFO_TYPE_UINT:
return sizeof(NativeInt) == sizeof(INT32);
case CORINFO_TYPE_LONG:
case CORINFO_TYPE_ULONG:
return sizeof(NativeInt) == sizeof(INT64);
default:
return false;
}
}
void Interpreter::LdArg(int argNum)
{
CONTRACTL {
SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
} CONTRACTL_END;
LdFromMemAddr(GetArgAddr(argNum), GetArgType(argNum));
}
void Interpreter::LdArgA(int argNum)
{
CONTRACTL {
SO_TOLERANT;
NOTHROW;
GC_NOTRIGGER;
MODE_COOPERATIVE;
} CONTRACTL_END;
OpStackTypeSet(m_curStackHt, InterpreterType(CORINFO_TYPE_BYREF));
OpStackSet<void*>(m_curStackHt, reinterpret_cast<void*>(GetArgAddr(argNum)));
m_curStackHt++;
}
void Interpreter::StArg(int argNum)
{
CONTRACTL {
SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
} CONTRACTL_END;
StToLocalMemAddr(GetArgAddr(argNum), GetArgType(argNum));
}
void Interpreter::LdLocA(int locNum)
{
CONTRACTL {
SO_TOLERANT;
NOTHROW;
GC_NOTRIGGER;
MODE_COOPERATIVE;
} CONTRACTL_END;
InterpreterType tp = m_methInfo->m_localDescs[locNum].m_type;
void* addr;
if (tp.IsLargeStruct(&m_interpCeeInfo))
{
void* structPtr = ArgSlotEndianessFixup(reinterpret_cast<ARG_SLOT*>(FixedSizeLocalSlot(locNum)), sizeof(void**));
addr = *reinterpret_cast<void**>(structPtr);
}
else
{
addr = ArgSlotEndianessFixup(reinterpret_cast<ARG_SLOT*>(FixedSizeLocalSlot(locNum)), tp.Size(&m_interpCeeInfo));
}
// The "addr" above, while a byref, is never a heap pointer, so we're robust if
// any of these were to cause a GC.
OpStackSet<void*>(m_curStackHt, addr);
OpStackTypeSet(m_curStackHt, InterpreterType(CORINFO_TYPE_BYREF));
m_curStackHt++;
}
void Interpreter::LdIcon(INT32 c)
{
CONTRACTL {
SO_TOLERANT;
NOTHROW;
GC_NOTRIGGER;
MODE_COOPERATIVE;
} CONTRACTL_END;
OpStackTypeSet(m_curStackHt, InterpreterType(CORINFO_TYPE_INT));
OpStackSet<INT32>(m_curStackHt, c);
m_curStackHt++;
}
void Interpreter::LdR4con(INT32 c)
{
CONTRACTL {
SO_TOLERANT;
NOTHROW;
GC_NOTRIGGER;
MODE_COOPERATIVE;
} CONTRACTL_END;
OpStackTypeSet(m_curStackHt, InterpreterType(CORINFO_TYPE_FLOAT));
OpStackSet<INT32>(m_curStackHt, c);
m_curStackHt++;
}
void Interpreter::LdLcon(INT64 c)
{
CONTRACTL {
SO_TOLERANT;
NOTHROW;
GC_NOTRIGGER;
MODE_COOPERATIVE;
} CONTRACTL_END;
OpStackTypeSet(m_curStackHt, InterpreterType(CORINFO_TYPE_LONG));
OpStackSet<INT64>(m_curStackHt, c);
m_curStackHt++;
}
void Interpreter::LdR8con(INT64 c)
{
CONTRACTL {
SO_TOLERANT;
NOTHROW;
GC_NOTRIGGER;
MODE_COOPERATIVE;
} CONTRACTL_END;
OpStackTypeSet(m_curStackHt, InterpreterType(CORINFO_TYPE_DOUBLE));
OpStackSet<INT64>(m_curStackHt, c);
m_curStackHt++;
}
void Interpreter::LdNull()
{
CONTRACTL {
SO_TOLERANT;
NOTHROW;
GC_NOTRIGGER;
MODE_COOPERATIVE;
} CONTRACTL_END;
OpStackTypeSet(m_curStackHt, InterpreterType(CORINFO_TYPE_CLASS));
OpStackSet<void*>(m_curStackHt, NULL);
m_curStackHt++;
}
template<typename T, CorInfoType cit>
void Interpreter::LdInd()
{
assert(TOSIsPtr());
assert(IsStackNormalType(cit));
unsigned curStackInd = m_curStackHt-1;
T* ptr = OpStackGet<T*>(curStackInd);
ThrowOnInvalidPointer(ptr);
OpStackSet<T>(curStackInd, *ptr);
OpStackTypeSet(curStackInd, InterpreterType(cit));
BarrierIfVolatile();
}
template<typename T, bool isUnsigned>
void Interpreter::LdIndShort()
{
assert(TOSIsPtr());
assert(sizeof(T) < 4);
unsigned curStackInd = m_curStackHt-1;
T* ptr = OpStackGet<T*>(curStackInd);
ThrowOnInvalidPointer(ptr);
if (isUnsigned)
{
OpStackSet<UINT32>(curStackInd, *ptr);
}
else
{
OpStackSet<INT32>(curStackInd, *ptr);
}
// All short integers are normalized to INT as their stack type.
OpStackTypeSet(curStackInd, InterpreterType(CORINFO_TYPE_INT));
BarrierIfVolatile();
}
template<typename T>
void Interpreter::StInd()
{
assert(m_curStackHt >= 2);
assert(CorInfoTypeIsPointer(OpStackTypeGet(m_curStackHt-2).ToCorInfoType()));
BarrierIfVolatile();
unsigned stackInd0 = m_curStackHt-2;
unsigned stackInd1 = m_curStackHt-1;
T val = OpStackGet<T>(stackInd1);
T* ptr = OpStackGet<T*>(stackInd0);
ThrowOnInvalidPointer(ptr);
*ptr = val;
m_curStackHt -= 2;
#ifdef _DEBUG
if (s_TraceInterpreterILFlag.val(CLRConfig::INTERNAL_TraceInterpreterIL) &&
IsInLocalArea(ptr))
{
PrintLocals();
}
#endif // _DEBUG
}
void Interpreter::StInd_Ref()
{
assert(m_curStackHt >= 2);
assert(CorInfoTypeIsPointer(OpStackTypeGet(m_curStackHt-2).ToCorInfoType()));
BarrierIfVolatile();
unsigned stackInd0 = m_curStackHt-2;
unsigned stackInd1 = m_curStackHt-1;
OBJECTREF val = ObjectToOBJECTREF(OpStackGet<Object*>(stackInd1));
OBJECTREF* ptr = OpStackGet<OBJECTREF*>(stackInd0);
ThrowOnInvalidPointer(ptr);
SetObjectReferenceUnchecked(ptr, val);
m_curStackHt -= 2;
#ifdef _DEBUG
if (s_TraceInterpreterILFlag.val(CLRConfig::INTERNAL_TraceInterpreterIL) &&
IsInLocalArea(ptr))
{
PrintLocals();
}
#endif // _DEBUG
}
template<int op>
void Interpreter::BinaryArithOp()
{
CONTRACTL {
SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
} CONTRACTL_END;
assert(m_curStackHt >= 2);
unsigned op1idx = m_curStackHt - 2;
unsigned op2idx = m_curStackHt - 1;
InterpreterType t1 = OpStackTypeGet(op1idx);
assert(IsStackNormalType(t1.ToCorInfoType()));
// Looking at the generated code, it does seem to save some instructions to use the "shifted
// types," though the effect on end-to-end time is variable. So I'll leave it set.
InterpreterType t2 = OpStackTypeGet(op2idx);
assert(IsStackNormalType(t2.ToCorInfoType()));
// In all cases belows, since "op" is compile-time constant, "if" chains on it should fold away.
switch (t1.ToCorInfoTypeShifted())
{
case CORINFO_TYPE_SHIFTED_INT:
if (t1 == t2)
{
// Int op Int = Int
INT32 val1 = OpStackGet<INT32>(op1idx);
INT32 val2 = OpStackGet<INT32>(op2idx);
BinaryArithOpWork<op, INT32, /*IsIntType*/true, CORINFO_TYPE_INT, /*TypeIsUnchanged*/true>(val1, val2);
}
else
{
CorInfoTypeShifted cits2 = t2.ToCorInfoTypeShifted();
if (cits2 == CORINFO_TYPE_SHIFTED_NATIVEINT)
{
// Int op NativeInt = NativeInt
NativeInt val1 = static_cast<NativeInt>(OpStackGet<INT32>(op1idx));
NativeInt val2 = OpStackGet<NativeInt>(op2idx);
BinaryArithOpWork<op, NativeInt, /*IsIntType*/true, CORINFO_TYPE_NATIVEINT, /*TypeIsUnchanged*/false>(val1, val2);
}
else if (s_InterpreterLooseRules && cits2 == CORINFO_TYPE_SHIFTED_LONG)
{
// Int op Long = Long
INT64 val1 = static_cast<INT64>(OpStackGet<INT32>(op1idx));
INT64 val2 = OpStackGet<INT64>(op2idx);
BinaryArithOpWork<op, INT64, /*IsIntType*/true, CORINFO_TYPE_LONG, /*TypeIsUnchanged*/false>(val1, val2);
}
else if (cits2 == CORINFO_TYPE_SHIFTED_BYREF)
{
if (op == BA_Add || (s_InterpreterLooseRules && op == BA_Sub))
{
// Int + ByRef = ByRef
NativeInt val1 = static_cast<NativeInt>(OpStackGet<INT32>(op1idx));
NativeInt val2 = OpStackGet<NativeInt>(op2idx);
BinaryArithOpWork<op, NativeInt, /*IsIntType*/true, CORINFO_TYPE_BYREF, /*TypeIsUnchanged*/false>(val1, val2);
}
else
{
VerificationError("Operation not permitted on int and managed pointer.");
}
}
else
{
VerificationError("Binary arithmetic operation type mismatch (int and ?)");
}
}
break;
case CORINFO_TYPE_SHIFTED_NATIVEINT:
{
NativeInt val1 = OpStackGet<NativeInt>(op1idx);
if (t1 == t2)
{
// NativeInt op NativeInt = NativeInt
NativeInt val2 = OpStackGet<NativeInt>(op2idx);
BinaryArithOpWork<op, NativeInt, /*IsIntType*/true, CORINFO_TYPE_NATIVEINT, /*TypeIsUnchanged*/true>(val1, val2);
}
else
{
CorInfoTypeShifted cits2 = t2.ToCorInfoTypeShifted();
if (cits2 == CORINFO_TYPE_SHIFTED_INT)
{
// NativeInt op Int = NativeInt
NativeInt val2 = static_cast<NativeInt>(OpStackGet<INT32>(op2idx));
BinaryArithOpWork<op, NativeInt, /*IsIntType*/true, CORINFO_TYPE_NATIVEINT, /*TypeIsUnchanged*/true>(val1, val2);
}
// CLI spec does not allow adding a native int and an int64. So use loose rules.
else if (s_InterpreterLooseRules && cits2 == CORINFO_TYPE_SHIFTED_LONG)
{
// NativeInt op Int = NativeInt
NativeInt val2 = static_cast<NativeInt>(OpStackGet<INT64>(op2idx));
BinaryArithOpWork<op, NativeInt, /*IsIntType*/true, CORINFO_TYPE_NATIVEINT, /*TypeIsUnchanged*/true>(val1, val2);
}
else if (cits2 == CORINFO_TYPE_SHIFTED_BYREF)
{
if (op == BA_Add || (s_InterpreterLooseRules && op == BA_Sub))
{
// NativeInt + ByRef = ByRef
NativeInt val2 = OpStackGet<NativeInt>(op2idx);
BinaryArithOpWork<op, NativeInt, /*IsIntType*/true, CORINFO_TYPE_BYREF, /*TypeIsUnchanged*/false>(val1, val2);
}
else
{
VerificationError("Operation not permitted on native int and managed pointer.");
}
}
else
{
VerificationError("Binary arithmetic operation type mismatch (native int and ?)");
}
}
}
break;
case CORINFO_TYPE_SHIFTED_LONG:
{
bool looseLong = false;
#if defined(_AMD64_)
looseLong = (s_InterpreterLooseRules && (t2.ToCorInfoType() == CORINFO_TYPE_NATIVEINT ||
t2.ToCorInfoType() == CORINFO_TYPE_BYREF));
#endif
if (t1 == t2 || looseLong)
{
// Long op Long = Long
INT64 val1 = OpStackGet<INT64>(op1idx);
INT64 val2 = OpStackGet<INT64>(op2idx);
BinaryArithOpWork<op, INT64, /*IsIntType*/true, CORINFO_TYPE_LONG, /*TypeIsUnchanged*/true>(val1, val2);
}
else
{
VerificationError("Binary arithmetic operation type mismatch (long and ?)");
}
}
break;
case CORINFO_TYPE_SHIFTED_FLOAT:
{
if (t1 == t2)
{
// Float op Float = Float
float val1 = OpStackGet<float>(op1idx);
float val2 = OpStackGet<float>(op2idx);
BinaryArithOpWork<op, float, /*IsIntType*/false, CORINFO_TYPE_FLOAT, /*TypeIsUnchanged*/true>(val1, val2);
}
else
{
CorInfoTypeShifted cits2 = t2.ToCorInfoTypeShifted();
if (cits2 == CORINFO_TYPE_SHIFTED_DOUBLE)
{
// Float op Double = Double
double val1 = static_cast<double>(OpStackGet<float>(op1idx));
double val2 = OpStackGet<double>(op2idx);
BinaryArithOpWork<op, double, /*IsIntType*/false, CORINFO_TYPE_DOUBLE, /*TypeIsUnchanged*/false>(val1, val2);
}
else
{
VerificationError("Binary arithmetic operation type mismatch (float and ?)");
}
}
}
break;
case CORINFO_TYPE_SHIFTED_DOUBLE:
{
if (t1 == t2)
{
// Double op Double = Double
double val1 = OpStackGet<double>(op1idx);
double val2 = OpStackGet<double>(op2idx);
BinaryArithOpWork<op, double, /*IsIntType*/false, CORINFO_TYPE_DOUBLE, /*TypeIsUnchanged*/true>(val1, val2);
}
else
{
CorInfoTypeShifted cits2 = t2.ToCorInfoTypeShifted();
if (cits2 == CORINFO_TYPE_SHIFTED_FLOAT)
{
// Double op Float = Double
double val1 = OpStackGet<double>(op1idx);
double val2 = static_cast<double>(OpStackGet<float>(op2idx));
BinaryArithOpWork<op, double, /*IsIntType*/false, CORINFO_TYPE_DOUBLE, /*TypeIsUnchanged*/true>(val1, val2);
}
else
{
VerificationError("Binary arithmetic operation type mismatch (double and ?)");
}
}
}
break;
case CORINFO_TYPE_SHIFTED_BYREF:
{
NativeInt val1 = OpStackGet<NativeInt>(op1idx);
CorInfoTypeShifted cits2 = t2.ToCorInfoTypeShifted();
if (cits2 == CORINFO_TYPE_SHIFTED_INT)
{
if (op == BA_Add || op == BA_Sub)
{
// ByRef +- Int = ByRef
NativeInt val2 = static_cast<NativeInt>(OpStackGet<INT32>(op2idx));
BinaryArithOpWork<op, NativeInt, /*IsIntType*/true, CORINFO_TYPE_BYREF, /*TypeIsUnchanged*/true>(val1, val2);
}
else
{
VerificationError("May only add/subtract managed pointer and integral value.");
}
}
else if (cits2 == CORINFO_TYPE_SHIFTED_NATIVEINT)
{
if (op == BA_Add || op == BA_Sub)
{
// ByRef +- NativeInt = ByRef
NativeInt val2 = OpStackGet<NativeInt>(op2idx);
BinaryArithOpWork<op, NativeInt, /*IsIntType*/true, CORINFO_TYPE_BYREF, /*TypeIsUnchanged*/true>(val1, val2);
}
else
{
VerificationError("May only add/subtract managed pointer and integral value.");
}
}
else if (cits2 == CORINFO_TYPE_SHIFTED_BYREF)
{
if (op == BA_Sub)
{
// ByRef - ByRef = NativeInt
NativeInt val2 = OpStackGet<NativeInt>(op2idx);
BinaryArithOpWork<op, NativeInt, /*IsIntType*/true, CORINFO_TYPE_NATIVEINT, /*TypeIsUnchanged*/false>(val1, val2);
}
else
{
VerificationError("May only subtract managed pointer values.");
}
}
// CLI spec does not allow adding a native int and an int64. So use loose rules.
else if (s_InterpreterLooseRules && cits2 == CORINFO_TYPE_SHIFTED_LONG)
{
// NativeInt op Int = NativeInt
NativeInt val2 = static_cast<NativeInt>(OpStackGet<INT64>(op2idx));
BinaryArithOpWork<op, NativeInt, /*IsIntType*/true, CORINFO_TYPE_NATIVEINT, /*TypeIsUnchanged*/true>(val1, val2);
}
else
{
VerificationError("Binary arithmetic operation not permitted on byref");
}
}
break;
case CORINFO_TYPE_SHIFTED_CLASS:
VerificationError("Can't do binary arithmetic on object references.");
break;
default:
_ASSERTE_MSG(false, "Non-stack-normal type on stack.");
}
// In all cases:
m_curStackHt--;
}
template<int op, bool asUnsigned>
void Interpreter::BinaryArithOvfOp()
{
CONTRACTL {
SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
} CONTRACTL_END;
assert(m_curStackHt >= 2);
unsigned op1idx = m_curStackHt - 2;
unsigned op2idx = m_curStackHt - 1;
InterpreterType t1 = OpStackTypeGet(op1idx);
CorInfoType cit1 = t1.ToCorInfoType();
assert(IsStackNormalType(cit1));
InterpreterType t2 = OpStackTypeGet(op2idx);
CorInfoType cit2 = t2.ToCorInfoType();
assert(IsStackNormalType(cit2));
// In all cases belows, since "op" is compile-time constant, "if" chains on it should fold away.
switch (cit1)
{
case CORINFO_TYPE_INT:
if (cit2 == CORINFO_TYPE_INT)
{
if (asUnsigned)
{
// UnsignedInt op UnsignedInt = UnsignedInt
UINT32 val1 = OpStackGet<UINT32>(op1idx);
UINT32 val2 = OpStackGet<UINT32>(op2idx);
BinaryArithOvfOpWork<op, UINT32, CORINFO_TYPE_INT, /*TypeIsUnchanged*/true>(val1, val2);
}
else
{
// Int op Int = Int
INT32 val1 = OpStackGet<INT32>(op1idx);
INT32 val2 = OpStackGet<INT32>(op2idx);
BinaryArithOvfOpWork<op, INT32, CORINFO_TYPE_INT, /*TypeIsUnchanged*/true>(val1, val2);
}
}
else if (cit2 == CORINFO_TYPE_NATIVEINT)
{
if (asUnsigned)
{
// UnsignedInt op UnsignedNativeInt = UnsignedNativeInt
NativeUInt val1 = static_cast<NativeUInt>(OpStackGet<UINT32>(op1idx));
NativeUInt val2 = OpStackGet<NativeUInt>(op2idx);
BinaryArithOvfOpWork<op, NativeUInt, CORINFO_TYPE_NATIVEINT, /*TypeIsUnchanged*/false>(val1, val2);
}
else
{
// Int op NativeInt = NativeInt
NativeInt val1 = static_cast<NativeInt>(OpStackGet<INT32>(op1idx));
NativeInt val2 = OpStackGet<NativeInt>(op2idx);
BinaryArithOvfOpWork<op, NativeInt, CORINFO_TYPE_NATIVEINT, /*TypeIsUnchanged*/false>(val1, val2);
}
}
else if (cit2 == CORINFO_TYPE_BYREF)
{
if (asUnsigned && op == BA_Add)
{
// UnsignedInt + ByRef = ByRef
NativeUInt val1 = static_cast<NativeUInt>(OpStackGet<UINT32>(op1idx));
NativeUInt val2 = OpStackGet<NativeUInt>(op2idx);
BinaryArithOvfOpWork<op, NativeUInt, CORINFO_TYPE_BYREF, /*TypeIsUnchanged*/false>(val1, val2);
}
else
{
VerificationError("Illegal arithmetic overflow operation for int and byref.");
}
}
else
{
VerificationError("Binary arithmetic overflow operation type mismatch (int and ?)");
}
break;
case CORINFO_TYPE_NATIVEINT:
if (cit2 == CORINFO_TYPE_INT)
{
if (asUnsigned)
{
// UnsignedNativeInt op UnsignedInt = UnsignedNativeInt
NativeUInt val1 = OpStackGet<NativeUInt>(op1idx);
NativeUInt val2 = static_cast<NativeUInt>(OpStackGet<UINT32>(op2idx));
BinaryArithOvfOpWork<op, NativeUInt, CORINFO_TYPE_NATIVEINT, /*TypeIsUnchanged*/true>(val1, val2);
}
else
{
// NativeInt op Int = NativeInt
NativeInt val1 = OpStackGet<NativeInt>(op1idx);
NativeInt val2 = static_cast<NativeInt>(OpStackGet<INT32>(op2idx));
BinaryArithOvfOpWork<op, NativeInt, CORINFO_TYPE_NATIVEINT, /*TypeIsUnchanged*/true>(val1, val2);
}
}
else if (cit2 == CORINFO_TYPE_NATIVEINT)
{
if (asUnsigned)
{
// UnsignedNativeInt op UnsignedNativeInt = UnsignedNativeInt
NativeUInt val1 = OpStackGet<NativeUInt>(op1idx);
NativeUInt val2 = OpStackGet<NativeUInt>(op2idx);
BinaryArithOvfOpWork<op, NativeUInt, CORINFO_TYPE_NATIVEINT, /*TypeIsUnchanged*/true>(val1, val2);
}
else
{
// NativeInt op NativeInt = NativeInt
NativeInt val1 = OpStackGet<NativeInt>(op1idx);
NativeInt val2 = OpStackGet<NativeInt>(op2idx);
BinaryArithOvfOpWork<op, NativeInt, CORINFO_TYPE_NATIVEINT, /*TypeIsUnchanged*/true>(val1, val2);
}
}
else if (cit2 == CORINFO_TYPE_BYREF)
{
if (asUnsigned && op == BA_Add)
{
// UnsignedNativeInt op ByRef = ByRef
NativeUInt val1 = OpStackGet<UINT32>(op1idx);
NativeUInt val2 = OpStackGet<NativeUInt>(op2idx);
BinaryArithOvfOpWork<op, NativeUInt, CORINFO_TYPE_BYREF, /*TypeIsUnchanged*/false>(val1, val2);
}
else
{
VerificationError("Illegal arithmetic overflow operation for native int and byref.");
}
}
else
{
VerificationError("Binary arithmetic overflow operation type mismatch (native int and ?)");
}
break;
case CORINFO_TYPE_LONG:
if (cit2 == CORINFO_TYPE_LONG || (s_InterpreterLooseRules && cit2 == CORINFO_TYPE_NATIVEINT))
{
if (asUnsigned)
{
// UnsignedLong op UnsignedLong = UnsignedLong
UINT64 val1 = OpStackGet<UINT64>(op1idx);
UINT64 val2 = OpStackGet<UINT64>(op2idx);
BinaryArithOvfOpWork<op, UINT64, CORINFO_TYPE_LONG, /*TypeIsUnchanged*/true>(val1, val2);
}
else
{
// Long op Long = Long
INT64 val1 = OpStackGet<INT64>(op1idx);
INT64 val2 = OpStackGet<INT64>(op2idx);
BinaryArithOvfOpWork<op, INT64, CORINFO_TYPE_LONG, /*TypeIsUnchanged*/true>(val1, val2);
}
}
else
{
VerificationError("Binary arithmetic overflow operation type mismatch (long and ?)");
}
break;
case CORINFO_TYPE_BYREF:
if (asUnsigned && (op == BA_Add || op == BA_Sub))
{
NativeUInt val1 = OpStackGet<NativeUInt>(op1idx);
if (cit2 == CORINFO_TYPE_INT)
{
// ByRef +- UnsignedInt = ByRef
NativeUInt val2 = static_cast<NativeUInt>(OpStackGet<INT32>(op2idx));
BinaryArithOvfOpWork<op, NativeUInt, CORINFO_TYPE_BYREF, /*TypeIsUnchanged*/true>(val1, val2);
}
else if (cit2 == CORINFO_TYPE_NATIVEINT)
{
// ByRef +- UnsignedNativeInt = ByRef
NativeUInt val2 = OpStackGet<NativeUInt>(op2idx);
BinaryArithOvfOpWork<op, NativeUInt, CORINFO_TYPE_BYREF, /*TypeIsUnchanged*/true>(val1, val2);
}
else if (cit2 == CORINFO_TYPE_BYREF)
{
if (op == BA_Sub)
{
// ByRef - ByRef = UnsignedNativeInt
NativeUInt val2 = OpStackGet<NativeUInt>(op2idx);
BinaryArithOvfOpWork<op, NativeUInt, CORINFO_TYPE_NATIVEINT, /*TypeIsUnchanged*/false>(val1, val2);
}
else
{
VerificationError("Illegal arithmetic overflow operation for byref and byref: may only subtract managed pointer values.");
}
}
else
{
VerificationError("Binary arithmetic overflow operation not permitted on byref");
}
}
else
{
if (!asUnsigned)
{
VerificationError("Signed binary arithmetic overflow operation not permitted on managed pointer values.");
}
else
{
_ASSERTE_MSG(op == BA_Mul, "Must be an overflow operation; tested for Add || Sub above.");
VerificationError("Cannot multiply managed pointer values.");
}
}
break;
default:
_ASSERTE_MSG(false, "Non-stack-normal type on stack.");
}
// In all cases:
m_curStackHt--;
}
template<int op, typename T, CorInfoType cit, bool TypeIsUnchanged>
void Interpreter::BinaryArithOvfOpWork(T val1, T val2)
{
CONTRACTL {
SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
} CONTRACTL_END;
ClrSafeInt<T> res;
ClrSafeInt<T> safeV1(val1);
ClrSafeInt<T> safeV2(val2);
if (op == BA_Add)
{
res = safeV1 + safeV2;
}
else if (op == BA_Sub)
{
res = safeV1 - safeV2;
}
else if (op == BA_Mul)
{
res = safeV1 * safeV2;
}
else
{
_ASSERTE_MSG(false, "op should be one of the overflow ops...");
}
if (res.IsOverflow())
{
ThrowOverflowException();
}
unsigned residx = m_curStackHt - 2;
OpStackSet<T>(residx, res.Value());
if (!TypeIsUnchanged)
{
OpStackTypeSet(residx, InterpreterType(cit));
}
}
template<int op>
void Interpreter::BinaryIntOp()
{
CONTRACTL {
SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
} CONTRACTL_END;
assert(m_curStackHt >= 2);
unsigned op1idx = m_curStackHt - 2;
unsigned op2idx = m_curStackHt - 1;
InterpreterType t1 = OpStackTypeGet(op1idx);
CorInfoType cit1 = t1.ToCorInfoType();
assert(IsStackNormalType(cit1));
InterpreterType t2 = OpStackTypeGet(op2idx);
CorInfoType cit2 = t2.ToCorInfoType();
assert(IsStackNormalType(cit2));
// In all cases belows, since "op" is compile-time constant, "if" chains on it should fold away.
switch (cit1)
{
case CORINFO_TYPE_INT:
if (cit2 == CORINFO_TYPE_INT)
{
// Int op Int = Int
UINT32 val1 = OpStackGet<UINT32>(op1idx);
UINT32 val2 = OpStackGet<UINT32>(op2idx);
BinaryIntOpWork<op, UINT32, CORINFO_TYPE_INT, /*TypeIsUnchanged*/true>(val1, val2);
}
else if (cit2 == CORINFO_TYPE_NATIVEINT)
{
// Int op NativeInt = NativeInt
NativeUInt val1 = static_cast<NativeUInt>(OpStackGet<INT32>(op1idx));
NativeUInt val2 = OpStackGet<NativeUInt>(op2idx);
BinaryIntOpWork<op, NativeUInt, CORINFO_TYPE_NATIVEINT, /*TypeIsUnchanged*/false>(val1, val2);
}
else if (s_InterpreterLooseRules && cit2 == CORINFO_TYPE_BYREF)
{
// Int op NativeUInt = NativeUInt
NativeUInt val1 = static_cast<NativeUInt>(OpStackGet<INT32>(op1idx));
NativeUInt val2 = OpStackGet<NativeUInt>(op2idx);
BinaryIntOpWork<op, NativeUInt, CORINFO_TYPE_BYREF, /*TypeIsUnchanged*/false>(val1, val2);
}
else
{
VerificationError("Binary arithmetic operation type mismatch (int and ?)");
}
break;
case CORINFO_TYPE_NATIVEINT:
if (cit2 == CORINFO_TYPE_NATIVEINT)
{
// NativeInt op NativeInt = NativeInt
NativeUInt val1 = OpStackGet<NativeUInt>(op1idx);
NativeUInt val2 = OpStackGet<NativeUInt>(op2idx);
BinaryIntOpWork<op, NativeUInt, CORINFO_TYPE_NATIVEINT, /*TypeIsUnchanged*/true>(val1, val2);
}
else if (cit2 == CORINFO_TYPE_INT)
{
// NativeInt op Int = NativeInt
NativeUInt val1 = OpStackGet<NativeUInt>(op1idx);
NativeUInt val2 = static_cast<NativeUInt>(OpStackGet<INT32>(op2idx));
BinaryIntOpWork<op, NativeUInt, CORINFO_TYPE_NATIVEINT, /*TypeIsUnchanged*/true>(val1, val2);
}
// CLI spec does not allow adding a native int and an int64. So use loose rules.
else if (s_InterpreterLooseRules && cit2 == CORINFO_TYPE_LONG)
{
// NativeInt op Int = NativeInt
NativeUInt val1 = OpStackGet<NativeUInt>(op1idx);
NativeUInt val2 = static_cast<NativeUInt>(OpStackGet<INT64>(op2idx));
BinaryIntOpWork<op, NativeUInt, CORINFO_TYPE_NATIVEINT, /*TypeIsUnchanged*/true>(val1, val2);
}
else
{
VerificationError("Binary arithmetic operation type mismatch (native int and ?)");
}
break;
case CORINFO_TYPE_LONG:
if (cit2 == CORINFO_TYPE_LONG || (s_InterpreterLooseRules && cit2 == CORINFO_TYPE_NATIVEINT))
{
// Long op Long = Long
UINT64 val1 = OpStackGet<UINT64>(op1idx);
UINT64 val2 = OpStackGet<UINT64>(op2idx);
BinaryIntOpWork<op, UINT64, CORINFO_TYPE_LONG, /*TypeIsUnchanged*/true>(val1, val2);
}
else
{
VerificationError("Binary arithmetic operation type mismatch (long and ?)");
}
break;
default:
VerificationError("Illegal operation for non-integral data type.");
}
// In all cases:
m_curStackHt--;
}
template<int op, typename T, CorInfoType cit, bool TypeIsUnchanged>
void Interpreter::BinaryIntOpWork(T val1, T val2)
{
T res;
if (op == BIO_And)
{
res = val1 & val2;
}
else if (op == BIO_Or)
{
res = val1 | val2;
}
else if (op == BIO_Xor)
{
res = val1 ^ val2;
}
else
{
assert(op == BIO_DivUn || op == BIO_RemUn);
if (val2 == 0)
{
ThrowDivideByZero();
}
else if (val2 == -1 && val1 == static_cast<T>(((UINT64)1) << (sizeof(T)*8 - 1))) // min int / -1 is not representable.
{
ThrowSysArithException();
}
// Otherwise...
if (op == BIO_DivUn)
{
res = val1 / val2;
}
else
{
res = val1 % val2;
}
}
unsigned residx = m_curStackHt - 2;
OpStackSet<T>(residx, res);
if (!TypeIsUnchanged)
{
OpStackTypeSet(residx, InterpreterType(cit));
}
}
template<int op>
void Interpreter::ShiftOp()
{
CONTRACTL {
SO_TOLERANT;
NOTHROW;
GC_NOTRIGGER;
MODE_COOPERATIVE;
} CONTRACTL_END;
assert(m_curStackHt >= 2);
unsigned op1idx = m_curStackHt - 2;
unsigned op2idx = m_curStackHt - 1;
InterpreterType t1 = OpStackTypeGet(op1idx);
CorInfoType cit1 = t1.ToCorInfoType();
assert(IsStackNormalType(cit1));
InterpreterType t2 = OpStackTypeGet(op2idx);
CorInfoType cit2 = t2.ToCorInfoType();
assert(IsStackNormalType(cit2));
// In all cases belows, since "op" is compile-time constant, "if" chains on it should fold away.
switch (cit1)
{
case CORINFO_TYPE_INT:
ShiftOpWork<op, INT32, UINT32>(op1idx, cit2);
break;
case CORINFO_TYPE_NATIVEINT:
ShiftOpWork<op, NativeInt, NativeUInt>(op1idx, cit2);
break;
case CORINFO_TYPE_LONG:
ShiftOpWork<op, INT64, UINT64>(op1idx, cit2);
break;
default:
VerificationError("Illegal value type for shift operation.");
break;
}
m_curStackHt--;
}
template<int op, typename T, typename UT>
void Interpreter::ShiftOpWork(unsigned op1idx, CorInfoType cit2)
{
T val = OpStackGet<T>(op1idx);
unsigned op2idx = op1idx + 1;
T res = 0;
if (cit2 == CORINFO_TYPE_INT)
{
INT32 shiftAmt = OpStackGet<INT32>(op2idx);
if (op == CEE_SHL)
{
res = val << shiftAmt; // TODO: Check that C++ semantics matches IL.
}
else if (op == CEE_SHR)
{
res = val >> shiftAmt;
}
else
{
assert(op == CEE_SHR_UN);
res = (static_cast<UT>(val)) >> shiftAmt;
}
}
else if (cit2 == CORINFO_TYPE_NATIVEINT)
{
NativeInt shiftAmt = OpStackGet<NativeInt>(op2idx);
if (op == CEE_SHL)
{
res = val << shiftAmt; // TODO: Check that C++ semantics matches IL.
}
else if (op == CEE_SHR)
{
res = val >> shiftAmt;
}
else
{
assert(op == CEE_SHR_UN);
res = (static_cast<UT>(val)) >> shiftAmt;
}
}
else
{
VerificationError("Operand type mismatch for shift operator.");
}
OpStackSet<T>(op1idx, res);
}
void Interpreter::Neg()
{
CONTRACTL {
SO_TOLERANT;
NOTHROW;
GC_NOTRIGGER;
MODE_COOPERATIVE;
} CONTRACTL_END;
assert(m_curStackHt >= 1);
unsigned opidx = m_curStackHt - 1;
InterpreterType t1 = OpStackTypeGet(opidx);
CorInfoType cit1 = t1.ToCorInfoType();
assert(IsStackNormalType(cit1));
switch (cit1)
{
case CORINFO_TYPE_INT:
OpStackSet<INT32>(opidx, -OpStackGet<INT32>(opidx));
break;
case CORINFO_TYPE_NATIVEINT:
OpStackSet<NativeInt>(opidx, -OpStackGet<NativeInt>(opidx));
break;
case CORINFO_TYPE_LONG:
OpStackSet<INT64>(opidx, -OpStackGet<INT64>(opidx));
break;
case CORINFO_TYPE_FLOAT:
OpStackSet<float>(opidx, -OpStackGet<float>(opidx));
break;
case CORINFO_TYPE_DOUBLE:
OpStackSet<double>(opidx, -OpStackGet<double>(opidx));
break;
default:
VerificationError("Illegal operand type for Neg operation.");
}
}
void Interpreter::Not()
{
CONTRACTL {
SO_TOLERANT;
NOTHROW;
GC_NOTRIGGER;
MODE_COOPERATIVE;
} CONTRACTL_END;
assert(m_curStackHt >= 1);
unsigned opidx = m_curStackHt - 1;
InterpreterType t1 = OpStackTypeGet(opidx);
CorInfoType cit1 = t1.ToCorInfoType();
assert(IsStackNormalType(cit1));
switch (cit1)
{
case CORINFO_TYPE_INT:
OpStackSet<INT32>(opidx, ~OpStackGet<INT32>(opidx));
break;
case CORINFO_TYPE_NATIVEINT:
OpStackSet<NativeInt>(opidx, ~OpStackGet<NativeInt>(opidx));
break;
case CORINFO_TYPE_LONG:
OpStackSet<INT64>(opidx, ~OpStackGet<INT64>(opidx));
break;
default:
VerificationError("Illegal operand type for Not operation.");
}
}
template<typename T, bool TIsUnsigned, bool TCanHoldPtr, bool TIsShort, CorInfoType cit>
void Interpreter::Conv()
{
CONTRACTL {
SO_TOLERANT;
NOTHROW;
GC_NOTRIGGER;
MODE_COOPERATIVE;
} CONTRACTL_END;
assert(m_curStackHt >= 1);
unsigned opidx = m_curStackHt - 1;
InterpreterType t1 = OpStackTypeGet(opidx);
CorInfoType cit1 = t1.ToCorInfoType();
assert(IsStackNormalType(cit1));
T val;
switch (cit1)
{
case CORINFO_TYPE_INT:
if (TIsUnsigned)
{
// Must convert the 32 bit value to unsigned first, so that we zero-extend if necessary.
val = static_cast<T>(static_cast<UINT32>(OpStackGet<INT32>(opidx)));
}
else
{
val = static_cast<T>(OpStackGet<INT32>(opidx));
}
break;
case CORINFO_TYPE_NATIVEINT:
if (TIsUnsigned)
{
// NativeInt might be 32 bits, so convert to unsigned before possibly widening.
val = static_cast<T>(static_cast<NativeUInt>(OpStackGet<NativeInt>(opidx)));
}
else
{
val = static_cast<T>(OpStackGet<NativeInt>(opidx));
}
break;
case CORINFO_TYPE_LONG:
val = static_cast<T>(OpStackGet<INT64>(opidx));
break;
// TODO: Make sure that the C++ conversions do the right thing (truncate to zero...)
case CORINFO_TYPE_FLOAT:
val = static_cast<T>(OpStackGet<float>(opidx));
break;
case CORINFO_TYPE_DOUBLE:
val = static_cast<T>(OpStackGet<double>(opidx));
break;
case CORINFO_TYPE_BYREF:
case CORINFO_TYPE_CLASS:
case CORINFO_TYPE_STRING:
if (!TCanHoldPtr && !s_InterpreterLooseRules)
{
VerificationError("Conversion of pointer value to type that can't hold its value.");
}
// Otherwise...
// (Must first convert to NativeInt, because the compiler believes this might be applied for T =
// float or double. It won't, by the test above, and the extra cast shouldn't generate any code...)
val = static_cast<T>(reinterpret_cast<NativeInt>(OpStackGet<void*>(opidx)));
break;
default:
VerificationError("Illegal operand type for conv.* operation.");
UNREACHABLE();
}
if (TIsShort)
{
OpStackSet<INT32>(opidx, static_cast<INT32>(val));
}
else
{
OpStackSet<T>(opidx, val);
}
OpStackTypeSet(opidx, InterpreterType(cit));
}
void Interpreter::ConvRUn()
{
CONTRACTL {
SO_TOLERANT;
NOTHROW;
GC_NOTRIGGER;
MODE_COOPERATIVE;
} CONTRACTL_END;
assert(m_curStackHt >= 1);
unsigned opidx = m_curStackHt - 1;
InterpreterType t1 = OpStackTypeGet(opidx);
CorInfoType cit1 = t1.ToCorInfoType();
assert(IsStackNormalType(cit1));
switch (cit1)
{
case CORINFO_TYPE_INT:
OpStackSet<double>(opidx, static_cast<double>(OpStackGet<UINT32>(opidx)));
break;
case CORINFO_TYPE_NATIVEINT:
OpStackSet<double>(opidx, static_cast<double>(OpStackGet<NativeUInt>(opidx)));
break;
case CORINFO_TYPE_LONG:
OpStackSet<double>(opidx, static_cast<double>(OpStackGet<UINT64>(opidx)));
break;
case CORINFO_TYPE_DOUBLE:
return;
default:
VerificationError("Illegal operand type for conv.r.un operation.");
}
OpStackTypeSet(opidx, InterpreterType(CORINFO_TYPE_DOUBLE));
}
template<typename T, INT64 TMin, UINT64 TMax, bool TCanHoldPtr, CorInfoType cit>
void Interpreter::ConvOvf()
{
CONTRACTL {
SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
} CONTRACTL_END;
assert(m_curStackHt >= 1);
unsigned opidx = m_curStackHt - 1;
InterpreterType t1 = OpStackTypeGet(opidx);
CorInfoType cit1 = t1.ToCorInfoType();
assert(IsStackNormalType(cit1));
switch (cit1)
{
case CORINFO_TYPE_INT:
{
INT32 i4 = OpStackGet<INT32>(opidx);
if (!FitsIn<T>(i4))
{
ThrowOverflowException();
}
OpStackSet<T>(opidx, static_cast<T>(i4));
}
break;
case CORINFO_TYPE_NATIVEINT:
{
NativeInt i = OpStackGet<NativeInt>(opidx);
if (!FitsIn<T>(i))
{
ThrowOverflowException();
}
OpStackSet<T>(opidx, static_cast<T>(i));
}
break;
case CORINFO_TYPE_LONG:
{
INT64 i8 = OpStackGet<INT64>(opidx);
if (!FitsIn<T>(i8))
{
ThrowOverflowException();
}
OpStackSet<T>(opidx, static_cast<T>(i8));
}
break;
// Make sure that the C++ conversions do the right thing (truncate to zero...)
case CORINFO_TYPE_FLOAT:
{
float f = OpStackGet<float>(opidx);
if (!FloatFitsInIntType<TMin, TMax>(f))
{
ThrowOverflowException();
}
OpStackSet<T>(opidx, static_cast<T>(f));
}
break;
case CORINFO_TYPE_DOUBLE:
{
double d = OpStackGet<double>(opidx);
if (!DoubleFitsInIntType<TMin, TMax>(d))
{
ThrowOverflowException();
}
OpStackSet<T>(opidx, static_cast<T>(d));
}
break;
case CORINFO_TYPE_BYREF:
case CORINFO_TYPE_CLASS:
case CORINFO_TYPE_STRING:
if (!TCanHoldPtr)
{
VerificationError("Conversion of pointer value to type that can't hold its value.");
}
// Otherwise...
// (Must first convert to NativeInt, because the compiler believes this might be applied for T =
// float or double. It won't, by the test above, and the extra cast shouldn't generate any code...
OpStackSet<T>(opidx, static_cast<T>(reinterpret_cast<NativeInt>(OpStackGet<void*>(opidx))));
break;
default:
VerificationError("Illegal operand type for conv.ovf.* operation.");
}
_ASSERTE_MSG(IsStackNormalType(cit), "Precondition.");
OpStackTypeSet(opidx, InterpreterType(cit));
}
template<typename T, INT64 TMin, UINT64 TMax, bool TCanHoldPtr, CorInfoType cit>
void Interpreter::ConvOvfUn()
{
CONTRACTL {
SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
} CONTRACTL_END;
assert(m_curStackHt >= 1);
unsigned opidx = m_curStackHt - 1;
InterpreterType t1 = OpStackTypeGet(opidx);
CorInfoType cit1 = t1.ToCorInfoType();
assert(IsStackNormalType(cit1));
switch (cit1)
{
case CORINFO_TYPE_INT:
{
UINT32 ui4 = OpStackGet<UINT32>(opidx);
if (!FitsIn<T>(ui4))
{
ThrowOverflowException();
}
OpStackSet<T>(opidx, static_cast<T>(ui4));
}
break;
case CORINFO_TYPE_NATIVEINT:
{
NativeUInt ui = OpStackGet<NativeUInt>(opidx);
if (!FitsIn<T>(ui))
{
ThrowOverflowException();
}
OpStackSet<T>(opidx, static_cast<T>(ui));
}
break;
case CORINFO_TYPE_LONG:
{
UINT64 ui8 = OpStackGet<UINT64>(opidx);
if (!FitsIn<T>(ui8))
{
ThrowOverflowException();
}
OpStackSet<T>(opidx, static_cast<T>(ui8));
}
break;
// Make sure that the C++ conversions do the right thing (truncate to zero...)
case CORINFO_TYPE_FLOAT:
{
float f = OpStackGet<float>(opidx);
if (!FloatFitsInIntType<TMin, TMax>(f))
{
ThrowOverflowException();
}
OpStackSet<T>(opidx, static_cast<T>(f));
}
break;
case CORINFO_TYPE_DOUBLE:
{
double d = OpStackGet<double>(opidx);
if (!DoubleFitsInIntType<TMin, TMax>(d))
{
ThrowOverflowException();
}
OpStackSet<T>(opidx, static_cast<T>(d));
}
break;
case CORINFO_TYPE_BYREF:
case CORINFO_TYPE_CLASS:
case CORINFO_TYPE_STRING:
if (!TCanHoldPtr)
{
VerificationError("Conversion of pointer value to type that can't hold its value.");
}
// Otherwise...
// (Must first convert to NativeInt, because the compiler believes this might be applied for T =
// float or double. It won't, by the test above, and the extra cast shouldn't generate any code...
OpStackSet<T>(opidx, static_cast<T>(reinterpret_cast<NativeInt>(OpStackGet<void*>(opidx))));
break;
default:
VerificationError("Illegal operand type for conv.ovf.*.un operation.");
}
_ASSERTE_MSG(IsStackNormalType(cit), "Precondition.");
OpStackTypeSet(opidx, InterpreterType(cit));
}
void Interpreter::LdObj()
{
CONTRACTL {
SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
} CONTRACTL_END;
BarrierIfVolatile();
assert(m_curStackHt > 0);
unsigned ind = m_curStackHt - 1;
#ifdef _DEBUG
CorInfoType cit = OpStackTypeGet(ind).ToCorInfoType();
_ASSERTE_MSG(IsValidPointerType(cit), "Expect pointer on stack");
#endif // _DEBUG
#if INTERP_TRACING
InterlockedIncrement(&s_tokenResolutionOpportunities[RTK_LdObj]);
#endif // INTERP_TRACING
// TODO: GetTypeFromToken also uses GCX_PREEMP(); can we merge it with the getClassAttribs() block below, and do it just once?
CORINFO_CLASS_HANDLE clsHnd = GetTypeFromToken(m_ILCodePtr + 1, CORINFO_TOKENKIND_Class InterpTracingArg(RTK_LdObj));
DWORD clsAttribs;
{
GCX_PREEMP();
clsAttribs = m_interpCeeInfo.getClassAttribs(clsHnd);
}
void* src = OpStackGet<void*>(ind);
ThrowOnInvalidPointer(src);
if (clsAttribs & CORINFO_FLG_VALUECLASS)
{
LdObjValueClassWork(clsHnd, ind, src);
}
else
{
OpStackSet<void*>(ind, *reinterpret_cast<void**>(src));
OpStackTypeSet(ind, InterpreterType(CORINFO_TYPE_CLASS));
}
m_ILCodePtr += 5;
}
void Interpreter::LdObjValueClassWork(CORINFO_CLASS_HANDLE valueClsHnd, unsigned ind, void* src)
{
CONTRACTL {
SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
} CONTRACTL_END;
// "src" is a byref, which may be into an object. GCPROTECT for the call below.
GCPROTECT_BEGININTERIOR(src);
InterpreterType it = InterpreterType(&m_interpCeeInfo, valueClsHnd);
size_t sz = it.Size(&m_interpCeeInfo);
// Note that the memcpy's below are permissible because the destination is in the operand stack.
if (sz > sizeof(INT64))
{
void* dest = LargeStructOperandStackPush(sz);
memcpy(dest, src, sz);
OpStackSet<void*>(ind, dest);
}
else
{
OpStackSet<INT64>(ind, GetSmallStructValue(src, sz));
}
OpStackTypeSet(ind, it.StackNormalize());
GCPROTECT_END();
}
CORINFO_CLASS_HANDLE Interpreter::GetTypeFromToken(BYTE* codePtr, CorInfoTokenKind tokKind InterpTracingArg(ResolveTokenKind rtk))
{
CONTRACTL {
SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
} CONTRACTL_END;
GCX_PREEMP();
CORINFO_GENERICHANDLE_RESULT embedInfo;
CORINFO_RESOLVED_TOKEN typeTok;
ResolveToken(&typeTok, getU4LittleEndian(codePtr), tokKind InterpTracingArg(rtk));
return typeTok.hClass;
}
bool Interpreter::IsValidPointerType(CorInfoType cit)
{
bool isValid = (cit == CORINFO_TYPE_NATIVEINT || cit == CORINFO_TYPE_BYREF);
#if defined(_AMD64_)
isValid = isValid || (s_InterpreterLooseRules && cit == CORINFO_TYPE_LONG);
#endif
return isValid;
}
void Interpreter::CpObj()
{
CONTRACTL {
SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
} CONTRACTL_END;
assert(m_curStackHt >= 2);
unsigned destInd = m_curStackHt - 2;
unsigned srcInd = m_curStackHt - 1;
#ifdef _DEBUG
// Check that src and dest are both pointer types.
CorInfoType cit = OpStackTypeGet(destInd).ToCorInfoType();
_ASSERTE_MSG(IsValidPointerType(cit), "Expect pointer on stack for dest of cpobj");
cit = OpStackTypeGet(srcInd).ToCorInfoType();
_ASSERTE_MSG(IsValidPointerType(cit), "Expect pointer on stack for src of cpobj");
#endif // _DEBUG
#if INTERP_TRACING
InterlockedIncrement(&s_tokenResolutionOpportunities[RTK_CpObj]);
#endif // INTERP_TRACING
CORINFO_CLASS_HANDLE clsHnd = GetTypeFromToken(m_ILCodePtr + 1, CORINFO_TOKENKIND_Class InterpTracingArg(RTK_CpObj));
DWORD clsAttribs;
{
GCX_PREEMP();
clsAttribs = m_interpCeeInfo.getClassAttribs(clsHnd);
}
void* dest = OpStackGet<void*>(destInd);
void* src = OpStackGet<void*>(srcInd);
ThrowOnInvalidPointer(dest);
ThrowOnInvalidPointer(src);
// dest and src are vulnerable byrefs.
GCX_FORBID();
if (clsAttribs & CORINFO_FLG_VALUECLASS)
{
CopyValueClassUnchecked(dest, src, GetMethodTableFromClsHnd(clsHnd));
}
else
{
OBJECTREF val = *reinterpret_cast<OBJECTREF*>(src);
SetObjectReferenceUnchecked(reinterpret_cast<OBJECTREF*>(dest), val);
}
m_curStackHt -= 2;
m_ILCodePtr += 5;
}
void Interpreter::StObj()
{
CONTRACTL {
SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
} CONTRACTL_END;
assert(m_curStackHt >= 2);
unsigned destInd = m_curStackHt - 2;
unsigned valInd = m_curStackHt - 1;
#ifdef _DEBUG
// Check that dest is a pointer type.
CorInfoType cit = OpStackTypeGet(destInd).ToCorInfoType();
_ASSERTE_MSG(IsValidPointerType(cit), "Expect pointer on stack for dest of stobj");
#endif // _DEBUG
#if INTERP_TRACING
InterlockedIncrement(&s_tokenResolutionOpportunities[RTK_StObj]);
#endif // INTERP_TRACING
CORINFO_CLASS_HANDLE clsHnd = GetTypeFromToken(m_ILCodePtr + 1, CORINFO_TOKENKIND_Class InterpTracingArg(RTK_StObj));
DWORD clsAttribs;
{
GCX_PREEMP();
clsAttribs = m_interpCeeInfo.getClassAttribs(clsHnd);
}
if (clsAttribs & CORINFO_FLG_VALUECLASS)
{
MethodTable* clsMT = GetMethodTableFromClsHnd(clsHnd);
size_t sz;
{
GCX_PREEMP();
sz = getClassSize(clsHnd);
}
// Note that "dest" might be a pointer into the heap. It is therefore important
// to calculate it *after* any PREEMP transitions at which we might do a GC.
void* dest = OpStackGet<void*>(destInd);
ThrowOnInvalidPointer(dest);
assert( (OpStackTypeGet(valInd).ToCorInfoType() == CORINFO_TYPE_VALUECLASS &&
OpStackTypeGet(valInd).ToClassHandle() == clsHnd)
||
(OpStackTypeGet(valInd).ToCorInfoType() ==
CorInfoTypeStackNormalize(GetTypeForPrimitiveValueClass(clsHnd)))
|| (s_InterpreterLooseRules && sz <= sizeof(dest)));
GCX_FORBID();
if (sz > sizeof(INT64))
{
// Large struct case -- ostack entry is pointer.
void* src = OpStackGet<void*>(valInd);
CopyValueClassUnchecked(dest, src, clsMT);
LargeStructOperandStackPop(sz, src);
}
else
{
// The ostack entry contains the struct value.
CopyValueClassUnchecked(dest, OpStackGetAddr(valInd, sz), clsMT);
}
}
else
{
// The ostack entry is an object reference.
assert(OpStackTypeGet(valInd).ToCorInfoType() == CORINFO_TYPE_CLASS);
// Note that "dest" might be a pointer into the heap. It is therefore important
// to calculate it *after* any PREEMP transitions at which we might do a GC. (Thus,
// we have to duplicate this code with the case above.
void* dest = OpStackGet<void*>(destInd);
ThrowOnInvalidPointer(dest);
GCX_FORBID();
OBJECTREF val = ObjectToOBJECTREF(OpStackGet<Object*>(valInd));
SetObjectReferenceUnchecked(reinterpret_cast<OBJECTREF*>(dest), val);
}
m_curStackHt -= 2;
m_ILCodePtr += 5;
BarrierIfVolatile();
}
void Interpreter::InitObj()
{
CONTRACTL {
SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
} CONTRACTL_END;
assert(m_curStackHt >= 1);
unsigned destInd = m_curStackHt - 1;
#ifdef _DEBUG
// Check that src and dest are both pointer types.
CorInfoType cit = OpStackTypeGet(destInd).ToCorInfoType();
_ASSERTE_MSG(IsValidPointerType(cit), "Expect pointer on stack");
#endif // _DEBUG
#if INTERP_TRACING
InterlockedIncrement(&s_tokenResolutionOpportunities[RTK_InitObj]);
#endif // INTERP_TRACING
CORINFO_CLASS_HANDLE clsHnd = GetTypeFromToken(m_ILCodePtr + 2, CORINFO_TOKENKIND_Class InterpTracingArg(RTK_InitObj));
size_t valueClassSz = 0;
DWORD clsAttribs;
{
GCX_PREEMP();
clsAttribs = m_interpCeeInfo.getClassAttribs(clsHnd);
if (clsAttribs & CORINFO_FLG_VALUECLASS)
{
valueClassSz = getClassSize(clsHnd);
}
}
void* dest = OpStackGet<void*>(destInd);
ThrowOnInvalidPointer(dest);
// dest is a vulnerable byref.
GCX_FORBID();
if (clsAttribs & CORINFO_FLG_VALUECLASS)
{
memset(dest, 0, valueClassSz);
}
else
{
// The ostack entry is an object reference.
SetObjectReferenceUnchecked(reinterpret_cast<OBJECTREF*>(dest), NULL);
}
m_curStackHt -= 1;
m_ILCodePtr += 6;
}
void Interpreter::LdStr()
{
CONTRACTL {
SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
} CONTRACTL_END;
OBJECTHANDLE res = ConstructStringLiteral(m_methInfo->m_module, getU4LittleEndian(m_ILCodePtr + 1));
{
GCX_FORBID();
OpStackSet<Object*>(m_curStackHt, *reinterpret_cast<Object**>(res));
OpStackTypeSet(m_curStackHt, InterpreterType(CORINFO_TYPE_CLASS)); // Stack-normal type for "string"
m_curStackHt++;
}
m_ILCodePtr += 5;
}
void Interpreter::NewObj()
{
#if INTERP_DYNAMIC_CONTRACTS
CONTRACTL {
SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
} CONTRACTL_END;
#else
// Dynamic contract occupies too much stack.
STATIC_CONTRACT_SO_TOLERANT;
STATIC_CONTRACT_THROWS;
STATIC_CONTRACT_GC_TRIGGERS;
STATIC_CONTRACT_MODE_COOPERATIVE;
#endif
unsigned ctorTok = getU4LittleEndian(m_ILCodePtr + 1);
#if INTERP_TRACING
InterlockedIncrement(&s_tokenResolutionOpportunities[RTK_NewObj]);
#endif // INTERP_TRACING
CORINFO_CALL_INFO callInfo;
CORINFO_RESOLVED_TOKEN methTok;
{
GCX_PREEMP();
ResolveToken(&methTok, ctorTok, CORINFO_TOKENKIND_Ldtoken InterpTracingArg(RTK_NewObj));
m_interpCeeInfo.getCallInfo(&methTok, NULL,
m_methInfo->m_method,
CORINFO_CALLINFO_FLAGS(0),
&callInfo);
}
unsigned mflags = callInfo.methodFlags;
if ((mflags & (CORINFO_FLG_STATIC|CORINFO_FLG_ABSTRACT)) != 0)
{
VerificationError("newobj on static or abstract method");
}
unsigned clsFlags = callInfo.classFlags;
#ifdef _DEBUG
// What class are we allocating?
const char* clsName;
{
GCX_PREEMP();
clsName = m_interpCeeInfo.getClassName(methTok.hClass);
}
#endif // _DEBUG
// There are four cases:
// 1) Value types (ordinary constructor, resulting VALUECLASS pushed)
// 2) String (var-args constructor, result automatically pushed)
// 3) MDArray (var-args constructor, resulting OBJECTREF pushed)
// 4) Reference types (ordinary constructor, resulting OBJECTREF pushed)
if (clsFlags & CORINFO_FLG_VALUECLASS)
{
void* tempDest;
INT64 smallTempDest = 0;
size_t sz = 0;
{
GCX_PREEMP();
sz = getClassSize(methTok.hClass);
}
if (sz > sizeof(INT64))
{
// TODO: Make sure this is deleted in the face of exceptions.
tempDest = new BYTE[sz];
}
else
{
tempDest = &smallTempDest;
}
memset(tempDest, 0, sz);
InterpreterType structValRetIT(&m_interpCeeInfo, methTok.hClass);
m_structRetValITPtr = &structValRetIT;
m_structRetValTempSpace = tempDest;
DoCallWork(/*virtCall*/false, tempDest, &methTok, &callInfo);
if (sz > sizeof(INT64))
{
void* dest = LargeStructOperandStackPush(sz);
memcpy(dest, tempDest, sz);
delete[] reinterpret_cast<BYTE*>(tempDest);
OpStackSet<void*>(m_curStackHt, dest);
}
else
{
OpStackSet<INT64>(m_curStackHt, GetSmallStructValue(tempDest, sz));
}
if (m_structRetValITPtr->IsStruct())
{
OpStackTypeSet(m_curStackHt, *m_structRetValITPtr);
}
else
{
// Must stack-normalize primitive types.
OpStackTypeSet(m_curStackHt, m_structRetValITPtr->StackNormalize());
}
// "Unregister" the temp space for GC scanning...
m_structRetValITPtr = NULL;
m_curStackHt++;
}
else if ((clsFlags & CORINFO_FLG_VAROBJSIZE) && !(clsFlags & CORINFO_FLG_ARRAY))
{
// For a VAROBJSIZE class (currently == String), pass NULL as this to "pseudo-constructor."
void* specialFlagArg = reinterpret_cast<void*>(0x1); // Special value for "thisArg" argument of "DoCallWork": push NULL that's not on op stack.
DoCallWork(/*virtCall*/false, specialFlagArg, &methTok, &callInfo); // pushes result automatically
}
else
{
OBJECTREF thisArgObj = NULL;
GCPROTECT_BEGIN(thisArgObj);
if (clsFlags & CORINFO_FLG_ARRAY)
{
assert(clsFlags & CORINFO_FLG_VAROBJSIZE);
MethodDesc* methDesc = GetMethod(methTok.hMethod);
PCCOR_SIGNATURE pSig;
DWORD cbSigSize;
methDesc->GetSig(&pSig, &cbSigSize);
MetaSig msig(pSig, cbSigSize, methDesc->GetModule(), NULL);
unsigned dwNumArgs = msig.NumFixedArgs();
assert(m_curStackHt >= dwNumArgs);
m_curStackHt -= dwNumArgs;
INT32* args = (INT32*)_alloca(dwNumArgs * sizeof(INT32));
unsigned dwArg;
for (dwArg = 0; dwArg < dwNumArgs; dwArg++)
{
unsigned stkInd = m_curStackHt + dwArg;
bool loose = s_InterpreterLooseRules && (OpStackTypeGet(stkInd).ToCorInfoType() == CORINFO_TYPE_NATIVEINT);
if (OpStackTypeGet(stkInd).ToCorInfoType() != CORINFO_TYPE_INT && !loose)
{
VerificationError("MD array dimension bounds and sizes must be int.");
}
args[dwArg] = loose ? (INT32) OpStackGet<NativeInt>(stkInd) : OpStackGet<INT32>(stkInd);
}
thisArgObj = AllocateArrayEx(TypeHandle(methTok.hClass), args, dwNumArgs);
}
else
{
CorInfoHelpFunc newHelper;
{
GCX_PREEMP();
newHelper = m_interpCeeInfo.getNewHelper(&methTok, m_methInfo->m_method);
}
MethodTable * pNewObjMT = GetMethodTableFromClsHnd(methTok.hClass);
switch (newHelper)
{
case CORINFO_HELP_NEWFAST:
default:
thisArgObj = AllocateObject(pNewObjMT);
break;
}
DoCallWork(/*virtCall*/false, OBJECTREFToObject(thisArgObj), &methTok, &callInfo);
}
{
GCX_FORBID();
OpStackSet<Object*>(m_curStackHt, OBJECTREFToObject(thisArgObj));
OpStackTypeSet(m_curStackHt, InterpreterType(CORINFO_TYPE_CLASS));
m_curStackHt++;
}
GCPROTECT_END(); // For "thisArgObj"
}
m_ILCodePtr += 5;
}
void Interpreter::NewArr()
{
CONTRACTL {
SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
} CONTRACTL_END;
assert(m_curStackHt > 0);
unsigned stkInd = m_curStackHt-1;
CorInfoType cit = OpStackTypeGet(stkInd).ToCorInfoType();
NativeInt sz = 0;
switch (cit)
{
case CORINFO_TYPE_INT:
sz = static_cast<NativeInt>(OpStackGet<INT32>(stkInd));
break;
case CORINFO_TYPE_NATIVEINT:
sz = OpStackGet<NativeInt>(stkInd);
break;
default:
VerificationError("Size operand of 'newarr' must be int or native int.");
}
unsigned elemTypeTok = getU4LittleEndian(m_ILCodePtr + 1);
CORINFO_CLASS_HANDLE elemClsHnd;
#if INTERP_TRACING
InterlockedIncrement(&s_tokenResolutionOpportunities[RTK_NewArr]);
#endif // INTERP_TRACING
CORINFO_RESOLVED_TOKEN elemTypeResolvedTok;
{
GCX_PREEMP();
ResolveToken(&elemTypeResolvedTok, elemTypeTok, CORINFO_TOKENKIND_Newarr InterpTracingArg(RTK_NewArr));
elemClsHnd = elemTypeResolvedTok.hClass;
}
{
if (sz < 0)
{
COMPlusThrow(kOverflowException);
}
#ifdef _WIN64
// Even though ECMA allows using a native int as the argument to newarr instruction
// (therefore size is INT_PTR), ArrayBase::m_NumComponents is 32-bit, so even on 64-bit
// platforms we can't create an array whose size exceeds 32 bits.
if (sz > INT_MAX)
{
EX_THROW(EEMessageException, (kOverflowException, IDS_EE_ARRAY_DIMENSIONS_EXCEEDED));
}
#endif
TypeHandle typeHnd(elemClsHnd);
ArrayTypeDesc* pArrayClassRef = typeHnd.AsArray();
pArrayClassRef->GetMethodTable()->CheckRunClassInitThrowing();
INT32 size32 = (INT32)sz;
Object* newarray = OBJECTREFToObject(AllocateArrayEx(typeHnd, &size32, 1));
GCX_FORBID();
OpStackTypeSet(stkInd, InterpreterType(CORINFO_TYPE_CLASS));
OpStackSet<Object*>(stkInd, newarray);
}
m_ILCodePtr += 5;
}
void Interpreter::IsInst()
{
CONTRACTL {
SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
} CONTRACTL_END;
#if INTERP_TRACING
InterlockedIncrement(&s_tokenResolutionOpportunities[RTK_IsInst]);
#endif // INTERP_TRACING
CORINFO_CLASS_HANDLE cls = GetTypeFromToken(m_ILCodePtr + 1, CORINFO_TOKENKIND_Casting InterpTracingArg(RTK_IsInst));
assert(m_curStackHt >= 1);
unsigned idx = m_curStackHt - 1;
#ifdef _DEBUG
CorInfoType cit = OpStackTypeGet(idx).ToCorInfoType();
assert(cit == CORINFO_TYPE_CLASS || cit == CORINFO_TYPE_STRING);
#endif // DEBUG
Object * pObj = OpStackGet<Object*>(idx);
if (pObj != NULL)
{
if (!ObjIsInstanceOf(pObj, TypeHandle(cls)))
OpStackSet<Object*>(idx, NULL);
}
// Type stack stays unmodified.
m_ILCodePtr += 5;
}
void Interpreter::CastClass()
{
CONTRACTL {
SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
} CONTRACTL_END;
#if INTERP_TRACING
InterlockedIncrement(&s_tokenResolutionOpportunities[RTK_CastClass]);
#endif // INTERP_TRACING
CORINFO_CLASS_HANDLE cls = GetTypeFromToken(m_ILCodePtr + 1, CORINFO_TOKENKIND_Casting InterpTracingArg(RTK_CastClass));
assert(m_curStackHt >= 1);
unsigned idx = m_curStackHt - 1;
#ifdef _DEBUG
CorInfoType cit = OpStackTypeGet(idx).ToCorInfoType();
assert(cit == CORINFO_TYPE_CLASS || cit == CORINFO_TYPE_STRING);
#endif // _DEBUG
Object * pObj = OpStackGet<Object*>(idx);
if (pObj != NULL)
{
if (!ObjIsInstanceOf(pObj, TypeHandle(cls), TRUE))
{
UNREACHABLE(); //ObjIsInstanceOf will throw if cast can't be done
}
}
// Type stack stays unmodified.
m_ILCodePtr += 5;
}
void Interpreter::LocAlloc()
{
CONTRACTL {
SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
} CONTRACTL_END;
assert(m_curStackHt >= 1);
unsigned idx = m_curStackHt - 1;
CorInfoType cit = OpStackTypeGet(idx).ToCorInfoType();
NativeUInt sz = 0;
if (cit == CORINFO_TYPE_INT || cit == CORINFO_TYPE_UINT)
{
sz = static_cast<NativeUInt>(OpStackGet<UINT32>(idx));
}
else if (cit == CORINFO_TYPE_NATIVEINT || cit == CORINFO_TYPE_NATIVEUINT)
{
sz = OpStackGet<NativeUInt>(idx);
}
else if (s_InterpreterLooseRules && cit == CORINFO_TYPE_LONG)
{
sz = (NativeUInt) OpStackGet<INT64>(idx);
}
else
{
VerificationError("localloc requires int or nativeint argument.");
}
if (sz == 0)
{
OpStackSet<void*>(idx, NULL);
}
else
{
void* res = GetLocAllocData()->Alloc(sz);
if (res == NULL) ThrowStackOverflow();
OpStackSet<void*>(idx, res);
}
OpStackTypeSet(idx, InterpreterType(CORINFO_TYPE_NATIVEINT));
}
void Interpreter::MkRefany()
{
CONTRACTL {
SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
} CONTRACTL_END;
#if INTERP_TRACING
InterlockedIncrement(&s_tokenResolutionOpportunities[RTK_MkRefAny]);
#endif // INTERP_TRACING
CORINFO_CLASS_HANDLE cls = GetTypeFromToken(m_ILCodePtr + 1, CORINFO_TOKENKIND_Class InterpTracingArg(RTK_MkRefAny));
assert(m_curStackHt >= 1);
unsigned idx = m_curStackHt - 1;
CorInfoType cit = OpStackTypeGet(idx).ToCorInfoType();
if (!(cit == CORINFO_TYPE_BYREF || cit == CORINFO_TYPE_NATIVEINT))
VerificationError("MkRefany requires byref or native int (pointer) on the stack.");
void* ptr = OpStackGet<void*>(idx);
InterpreterType typedRefIT = GetTypedRefIT(&m_interpCeeInfo);
TypedByRef* tbr;
#if defined(_AMD64_)
assert(typedRefIT.IsLargeStruct(&m_interpCeeInfo));
tbr = (TypedByRef*) LargeStructOperandStackPush(GetTypedRefSize(&m_interpCeeInfo));
OpStackSet<void*>(idx, tbr);
#elif defined(_X86_) || defined(_ARM_)
assert(!typedRefIT.IsLargeStruct(&m_interpCeeInfo));
tbr = OpStackGetAddr<TypedByRef>(idx);
#elif defined(_ARM64_)
tbr = NULL;
NYI_INTERP("Unimplemented code: MkRefAny");
#else
#error "unsupported platform"
#endif
tbr->data = ptr;
tbr->type = TypeHandle(cls);
OpStackTypeSet(idx, typedRefIT);
m_ILCodePtr += 5;
}
void Interpreter::RefanyType()
{
CONTRACTL {
SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
} CONTRACTL_END;
assert(m_curStackHt > 0);
unsigned idx = m_curStackHt - 1;
if (OpStackTypeGet(idx) != GetTypedRefIT(&m_interpCeeInfo))
VerificationError("RefAnyVal requires a TypedRef on the stack.");
TypedByRef* ptbr = OpStackGet<TypedByRef*>(idx);
LargeStructOperandStackPop(sizeof(TypedByRef), ptbr);
TypeHandle* pth = &ptbr->type;
{
OBJECTREF classobj = TypeHandleToTypeRef(pth);
GCX_FORBID();
OpStackSet<Object*>(idx, OBJECTREFToObject(classobj));
OpStackTypeSet(idx, InterpreterType(CORINFO_TYPE_CLASS));
}
m_ILCodePtr += 2;
}
// This (unfortunately) duplicates code in JIT_GetRuntimeTypeHandle, which
// isn't callable because it sets up a Helper Method Frame.
OBJECTREF Interpreter::TypeHandleToTypeRef(TypeHandle* pth)
{
OBJECTREF typePtr = NULL;
if (!pth->IsTypeDesc())
{
// Most common... and fastest case
typePtr = pth->AsMethodTable()->GetManagedClassObjectIfExists();
if (typePtr == NULL)
{
typePtr = pth->GetManagedClassObject();
}
}
else
{
typePtr = pth->GetManagedClassObject();
}
return typePtr;
}
CorInfoType Interpreter::GetTypeForPrimitiveValueClass(CORINFO_CLASS_HANDLE clsHnd)
{
CONTRACTL {
SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
} CONTRACTL_END;
GCX_PREEMP();
return m_interpCeeInfo.getTypeForPrimitiveValueClass(clsHnd);
}
void Interpreter::RefanyVal()
{
CONTRACTL {
SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
} CONTRACTL_END;
assert(m_curStackHt > 0);
unsigned idx = m_curStackHt - 1;
if (OpStackTypeGet(idx) != GetTypedRefIT(&m_interpCeeInfo))
VerificationError("RefAnyVal requires a TypedRef on the stack.");
#if INTERP_TRACING
InterlockedIncrement(&s_tokenResolutionOpportunities[RTK_RefAnyVal]);
#endif // INTERP_TRACING
CORINFO_CLASS_HANDLE cls = GetTypeFromToken(m_ILCodePtr + 1, CORINFO_TOKENKIND_Class InterpTracingArg(RTK_RefAnyVal));
TypeHandle expected(cls);
TypedByRef* ptbr = OpStackGet<TypedByRef*>(idx);
LargeStructOperandStackPop(sizeof(TypedByRef), ptbr);
if (expected != ptbr->type) ThrowInvalidCastException();
OpStackSet<void*>(idx, static_cast<void*>(ptbr->data));
OpStackTypeSet(idx, InterpreterType(CORINFO_TYPE_BYREF));
m_ILCodePtr += 5;
}
void Interpreter::CkFinite()
{
CONTRACTL {
SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
} CONTRACTL_END;
assert(m_curStackHt > 0);
unsigned idx = m_curStackHt - 1;
CorInfoType cit = OpStackTypeGet(idx).ToCorInfoType();
double val = 0.0;
switch (cit)
{
case CORINFO_TYPE_FLOAT:
val = (double)OpStackGet<float>(idx);
break;
case CORINFO_TYPE_DOUBLE:
val = OpStackGet<double>(idx);
break;
default:
VerificationError("CkFinite requires a floating-point value on the stack.");
break;
}
if (!_finite(val))
ThrowSysArithException();
}
void Interpreter::LdToken()
{
CONTRACTL {
SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
} CONTRACTL_END;
unsigned tokVal = getU4LittleEndian(m_ILCodePtr + 1);
#if INTERP_TRACING
InterlockedIncrement(&s_tokenResolutionOpportunities[RTK_LdToken]);
#endif // INTERP_TRACING
CORINFO_RESOLVED_TOKEN tok;
{
GCX_PREEMP();
ResolveToken(&tok, tokVal, CORINFO_TOKENKIND_Ldtoken InterpTracingArg(RTK_LdToken));
}
// To save duplication of the factored code at the bottom, I don't do GCX_FORBID for
// these Object* values, but this comment documents the intent.
if (tok.hMethod != NULL)
{
MethodDesc* pMethod = (MethodDesc*)tok.hMethod;
Object* objPtr = OBJECTREFToObject((OBJECTREF)pMethod->GetStubMethodInfo());
OpStackSet<Object*>(m_curStackHt, objPtr);
}
else if (tok.hField != NULL)
{
FieldDesc * pField = (FieldDesc *)tok.hField;
Object* objPtr = OBJECTREFToObject((OBJECTREF)pField->GetStubFieldInfo());
OpStackSet<Object*>(m_curStackHt, objPtr);
}
else
{
TypeHandle th(tok.hClass);
Object* objPtr = OBJECTREFToObject(th.GetManagedClassObject());
OpStackSet<Object*>(m_curStackHt, objPtr);
}
{
GCX_FORBID();
OpStackTypeSet(m_curStackHt, InterpreterType(CORINFO_TYPE_CLASS));
m_curStackHt++;
}
m_ILCodePtr += 5;
}
void Interpreter::LdFtn()
{
CONTRACTL {
SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
} CONTRACTL_END;
unsigned tokVal = getU4LittleEndian(m_ILCodePtr + 2);
#if INTERP_TRACING
InterlockedIncrement(&s_tokenResolutionOpportunities[RTK_LdFtn]);
#endif // INTERP_TRACING
CORINFO_RESOLVED_TOKEN tok;
CORINFO_CALL_INFO callInfo;
{
GCX_PREEMP();
ResolveToken(&tok, tokVal, CORINFO_TOKENKIND_Method InterpTracingArg(RTK_LdFtn));
m_interpCeeInfo.getCallInfo(&tok, NULL, m_methInfo->m_method,
combine(CORINFO_CALLINFO_SECURITYCHECKS,CORINFO_CALLINFO_LDFTN),
&callInfo);
}
switch (callInfo.kind)
{
case CORINFO_CALL:
{
PCODE pCode = ((MethodDesc *)callInfo.hMethod)->GetMultiCallableAddrOfCode();
OpStackSet<void*>(m_curStackHt, (void *)pCode);
GetFunctionPointerStack()[m_curStackHt] = callInfo.hMethod;
}
break;
case CORINFO_CALL_CODE_POINTER:
NYI_INTERP("Indirect code pointer.");
break;
default:
_ASSERTE_MSG(false, "Should not reach here: unknown call kind.");
break;
}
OpStackTypeSet(m_curStackHt, InterpreterType(CORINFO_TYPE_NATIVEINT));
m_curStackHt++;
m_ILCodePtr += 6;
}
void Interpreter::LdVirtFtn()
{
CONTRACTL {
SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
} CONTRACTL_END;
assert(m_curStackHt >= 1);
unsigned ind = m_curStackHt - 1;
unsigned tokVal = getU4LittleEndian(m_ILCodePtr + 2);
#if INTERP_TRACING
InterlockedIncrement(&s_tokenResolutionOpportunities[RTK_LdVirtFtn]);
#endif // INTERP_TRACING
CORINFO_RESOLVED_TOKEN tok;
CORINFO_CALL_INFO callInfo;
CORINFO_CLASS_HANDLE classHnd;
CORINFO_METHOD_HANDLE methodHnd;
{
GCX_PREEMP();
ResolveToken(&tok, tokVal, CORINFO_TOKENKIND_Method InterpTracingArg(RTK_LdVirtFtn));
m_interpCeeInfo.getCallInfo(&tok, NULL, m_methInfo->m_method,
combine(CORINFO_CALLINFO_SECURITYCHECKS,CORINFO_CALLINFO_LDFTN),
&callInfo);
classHnd = tok.hClass;
methodHnd = tok.hMethod;
}
MethodDesc * pMD = (MethodDesc *)methodHnd;
PCODE pCode;
if (pMD->IsVtableMethod())
{
Object* obj = OpStackGet<Object*>(ind);
ThrowOnInvalidPointer(obj);
OBJECTREF objRef = ObjectToOBJECTREF(obj);
GCPROTECT_BEGIN(objRef);
pCode = pMD->GetMultiCallableAddrOfVirtualizedCode(&objRef, TypeHandle(classHnd));
GCPROTECT_END();
pMD = Entry2MethodDesc(pCode, TypeHandle(classHnd).GetMethodTable());
}
else
{
pCode = pMD->GetMultiCallableAddrOfCode();
}
OpStackSet<void*>(ind, (void *)pCode);
GetFunctionPointerStack()[ind] = (CORINFO_METHOD_HANDLE)pMD;
OpStackTypeSet(ind, InterpreterType(CORINFO_TYPE_NATIVEINT));
m_ILCodePtr += 6;
}
void Interpreter::Sizeof()
{
CONTRACTL {
SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
} CONTRACTL_END;
#if INTERP_TRACING
InterlockedIncrement(&s_tokenResolutionOpportunities[RTK_Sizeof]);
#endif // INTERP_TRACING
CORINFO_CLASS_HANDLE cls = GetTypeFromToken(m_ILCodePtr + 2, CORINFO_TOKENKIND_Class InterpTracingArg(RTK_Sizeof));
unsigned sz;
{
GCX_PREEMP();
CorInfoType cit = ::asCorInfoType(cls);
// For class types, the ECMA spec says to return the size of the object reference, not the referent
// object. Everything else should be a value type, for which we can just return the size as reported
// by the EE.
switch (cit)
{
case CORINFO_TYPE_CLASS:
sz = sizeof(Object*);
break;
default:
sz = getClassSize(cls);
break;
}
}
OpStackSet<UINT32>(m_curStackHt, sz);
OpStackTypeSet(m_curStackHt, InterpreterType(CORINFO_TYPE_INT));
m_curStackHt++;
m_ILCodePtr += 6;
}
// static:
bool Interpreter::s_initialized = false;
bool Interpreter::s_compilerStaticsInitialized = false;
size_t Interpreter::s_TypedRefSize;
CORINFO_CLASS_HANDLE Interpreter::s_TypedRefClsHnd;
InterpreterType Interpreter::s_TypedRefIT;
// Must call GetTypedRefIT
size_t Interpreter::GetTypedRefSize(CEEInfo* info)
{
_ASSERTE_MSG(s_compilerStaticsInitialized, "Precondition");
return s_TypedRefSize;
}
InterpreterType Interpreter::GetTypedRefIT(CEEInfo* info)
{
_ASSERTE_MSG(s_compilerStaticsInitialized, "Precondition");
return s_TypedRefIT;
}
CORINFO_CLASS_HANDLE Interpreter::GetTypedRefClsHnd(CEEInfo* info)
{
_ASSERTE_MSG(s_compilerStaticsInitialized, "Precondition");
return s_TypedRefClsHnd;
}
void Interpreter::Initialize()
{
assert(!s_initialized);
s_InterpretMeths.ensureInit(CLRConfig::INTERNAL_Interpret);
s_InterpretMethsExclude.ensureInit(CLRConfig::INTERNAL_InterpretExclude);
s_InterpreterUseCaching = (s_InterpreterUseCachingFlag.val(CLRConfig::INTERNAL_InterpreterUseCaching) != 0);
s_InterpreterLooseRules = (s_InterpreterLooseRulesFlag.val(CLRConfig::INTERNAL_InterpreterLooseRules) != 0);
s_InterpreterDoLoopMethods = (s_InterpreterDoLoopMethodsFlag.val(CLRConfig::INTERNAL_InterpreterDoLoopMethods) != 0);
// Initialize the lock used to protect method locks.
// TODO: it would be better if this were a reader/writer lock.
s_methodCacheLock.Init(CrstLeafLock, CRST_DEFAULT);
// Similarly, initialize the lock used to protect the map from
// interpreter stub addresses to their method descs.
s_interpStubToMDMapLock.Init(CrstLeafLock, CRST_DEFAULT);
s_initialized = true;
#if INTERP_ILINSTR_PROFILE
SetILInstrCategories();
#endif // INTERP_ILINSTR_PROFILE
}
void Interpreter::InitializeCompilerStatics(CEEInfo* info)
{
if (!s_compilerStaticsInitialized)
{
// TODO: I believe I need no synchronization around this on x86, but I do
// on more permissive memory models. (Why it's OK on x86: each thread executes this
// before any access to the initialized static variables; if several threads do
// so, they perform idempotent initializing writes to the statics.
GCX_PREEMP();
s_TypedRefClsHnd = info->getBuiltinClass(CLASSID_TYPED_BYREF);
s_TypedRefIT = InterpreterType(info, s_TypedRefClsHnd);
s_TypedRefSize = getClassSize(s_TypedRefClsHnd);
s_compilerStaticsInitialized = true;
// TODO: Need store-store memory barrier here.
}
}
void Interpreter::Terminate()
{
if (s_initialized)
{
s_methodCacheLock.Destroy();
s_interpStubToMDMapLock.Destroy();
s_initialized = false;
}
}
#if INTERP_ILINSTR_PROFILE
void Interpreter::SetILInstrCategories()
{
// Start with the indentity maps
for (unsigned short instr = 0; instr < 512; instr++) s_ILInstrCategories[instr] = instr;
// Now make exceptions.
for (unsigned instr = CEE_LDARG_0; instr <= CEE_LDARG_3; instr++) s_ILInstrCategories[instr] = CEE_LDARG;
s_ILInstrCategories[CEE_LDARG_S] = CEE_LDARG;
for (unsigned instr = CEE_LDLOC_0; instr <= CEE_LDLOC_3; instr++) s_ILInstrCategories[instr] = CEE_LDLOC;
s_ILInstrCategories[CEE_LDLOC_S] = CEE_LDLOC;
for (unsigned instr = CEE_STLOC_0; instr <= CEE_STLOC_3; instr++) s_ILInstrCategories[instr] = CEE_STLOC;
s_ILInstrCategories[CEE_STLOC_S] = CEE_STLOC;
s_ILInstrCategories[CEE_LDLOCA_S] = CEE_LDLOCA;
for (unsigned instr = CEE_LDC_I4_M1; instr <= CEE_LDC_I4_S; instr++) s_ILInstrCategories[instr] = CEE_LDC_I4;
for (unsigned instr = CEE_BR_S; instr <= CEE_BLT_UN; instr++) s_ILInstrCategories[instr] = CEE_BR;
for (unsigned instr = CEE_LDIND_I1; instr <= CEE_LDIND_REF; instr++) s_ILInstrCategories[instr] = CEE_LDIND_I;
for (unsigned instr = CEE_STIND_REF; instr <= CEE_STIND_R8; instr++) s_ILInstrCategories[instr] = CEE_STIND_I;
for (unsigned instr = CEE_ADD; instr <= CEE_REM_UN; instr++) s_ILInstrCategories[instr] = CEE_ADD;
for (unsigned instr = CEE_AND; instr <= CEE_NOT; instr++) s_ILInstrCategories[instr] = CEE_AND;
for (unsigned instr = CEE_CONV_I1; instr <= CEE_CONV_U8; instr++) s_ILInstrCategories[instr] = CEE_CONV_I;
for (unsigned instr = CEE_CONV_OVF_I1_UN; instr <= CEE_CONV_OVF_U_UN; instr++) s_ILInstrCategories[instr] = CEE_CONV_I;
for (unsigned instr = CEE_LDELEM_I1; instr <= CEE_LDELEM_REF; instr++) s_ILInstrCategories[instr] = CEE_LDELEM;
for (unsigned instr = CEE_STELEM_I; instr <= CEE_STELEM_REF; instr++) s_ILInstrCategories[instr] = CEE_STELEM;
for (unsigned instr = CEE_CONV_OVF_I1; instr <= CEE_CONV_OVF_U8; instr++) s_ILInstrCategories[instr] = CEE_CONV_I;
for (unsigned instr = CEE_CONV_U2; instr <= CEE_CONV_U1; instr++) s_ILInstrCategories[instr] = CEE_CONV_I;
for (unsigned instr = CEE_CONV_OVF_I; instr <= CEE_CONV_OVF_U; instr++) s_ILInstrCategories[instr] = CEE_CONV_I;
for (unsigned instr = CEE_ADD_OVF; instr <= CEE_SUB_OVF; instr++) s_ILInstrCategories[instr] = CEE_ADD_OVF;
s_ILInstrCategories[CEE_LEAVE_S] = CEE_LEAVE;
s_ILInstrCategories[CEE_CONV_U] = CEE_CONV_I;
}
#endif // INTERP_ILINSTR_PROFILE
template<int op>
void Interpreter::CompareOp()
{
CONTRACTL {
SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
} CONTRACTL_END;
assert(m_curStackHt >= 2);
unsigned op1idx = m_curStackHt - 2;
INT32 res = CompareOpRes<op>(op1idx);
OpStackSet<INT32>(op1idx, res);
OpStackTypeSet(op1idx, InterpreterType(CORINFO_TYPE_INT));
m_curStackHt--;
}
template<int op>
INT32 Interpreter::CompareOpRes(unsigned op1idx)
{
CONTRACTL {
SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
} CONTRACTL_END;
assert(m_curStackHt >= op1idx + 2);
unsigned op2idx = op1idx + 1;
InterpreterType t1 = OpStackTypeGet(op1idx);
CorInfoType cit1 = t1.ToCorInfoType();
assert(IsStackNormalType(cit1));
InterpreterType t2 = OpStackTypeGet(op2idx);
CorInfoType cit2 = t2.ToCorInfoType();
assert(IsStackNormalType(cit2));
INT32 res = 0;
switch (cit1)
{
case CORINFO_TYPE_INT:
if (cit2 == CORINFO_TYPE_INT)
{
INT32 val1 = OpStackGet<INT32>(op1idx);
INT32 val2 = OpStackGet<INT32>(op2idx);
if (op == CO_EQ)
{
if (val1 == val2) res = 1;
}
else if (op == CO_GT)
{
if (val1 > val2) res = 1;
}
else if (op == CO_GT_UN)
{
if (static_cast<UINT32>(val1) > static_cast<UINT32>(val2)) res = 1;
}
else if (op == CO_LT)
{
if (val1 < val2) res = 1;
}
else
{
assert(op == CO_LT_UN);
if (static_cast<UINT32>(val1) < static_cast<UINT32>(val2)) res = 1;
}
}
else if (cit2 == CORINFO_TYPE_NATIVEINT ||
(s_InterpreterLooseRules && cit2 == CORINFO_TYPE_BYREF) ||
(cit2 == CORINFO_TYPE_VALUECLASS
&& CorInfoTypeStackNormalize(GetTypeForPrimitiveValueClass(t2.ToClassHandle())) == CORINFO_TYPE_NATIVEINT))
{
NativeInt val1 = OpStackGet<NativeInt>(op1idx);
NativeInt val2 = OpStackGet<NativeInt>(op2idx);
if (op == CO_EQ)
{
if (val1 == val2) res = 1;
}
else if (op == CO_GT)
{
if (val1 > val2) res = 1;
}
else if (op == CO_GT_UN)
{
if (static_cast<NativeUInt>(val1) > static_cast<NativeUInt>(val2)) res = 1;
}
else if (op == CO_LT)
{
if (val1 < val2) res = 1;
}
else
{
assert(op == CO_LT_UN);
if (static_cast<NativeUInt>(val1) < static_cast<NativeUInt>(val2)) res = 1;
}
}
else if (cit2 == CORINFO_TYPE_VALUECLASS)
{
cit2 = GetTypeForPrimitiveValueClass(t2.ToClassHandle());
INT32 val1 = OpStackGet<INT32>(op1idx);
INT32 val2 = 0;
if (CorInfoTypeStackNormalize(cit2) == CORINFO_TYPE_INT)
{
size_t sz = t2.Size(&m_interpCeeInfo);
switch (sz)
{
case 1:
if (CorInfoTypeIsUnsigned(cit2))
{
val2 = OpStackGet<UINT8>(op2idx);
}
else
{
val2 = OpStackGet<INT8>(op2idx);
}
break;
case 2:
if (CorInfoTypeIsUnsigned(cit2))
{
val2 = OpStackGet<UINT16>(op2idx);
}
else
{
val2 = OpStackGet<INT16>(op2idx);
}
break;
case 4:
val2 = OpStackGet<INT32>(op2idx);
break;
default:
UNREACHABLE();
}
}
else
{
VerificationError("Can't compare with struct type.");
}
if (op == CO_EQ)
{
if (val1 == val2) res = 1;
}
else if (op == CO_GT)
{
if (val1 > val2) res = 1;
}
else if (op == CO_GT_UN)
{
if (static_cast<UINT32>(val1) > static_cast<UINT32>(val2)) res = 1;
}
else if (op == CO_LT)
{
if (val1 < val2) res = 1;
}
else
{
assert(op == CO_LT_UN);
if (static_cast<UINT32>(val1) < static_cast<UINT32>(val2)) res = 1;
}
}
else
{
VerificationError("Binary comparision operation: type mismatch.");
}
break;
case CORINFO_TYPE_NATIVEINT:
if (cit2 == CORINFO_TYPE_NATIVEINT || cit2 == CORINFO_TYPE_INT
|| (s_InterpreterLooseRules && cit2 == CORINFO_TYPE_LONG)
|| (s_InterpreterLooseRules && cit2 == CORINFO_TYPE_BYREF)
|| (s_InterpreterLooseRules && cit2 == CORINFO_TYPE_CLASS && OpStackGet<void*>(op2idx) == 0))
{
NativeInt val1 = OpStackGet<NativeInt>(op1idx);
NativeInt val2;
if (cit2 == CORINFO_TYPE_NATIVEINT)
{
val2 = OpStackGet<NativeInt>(op2idx);
}
else if (cit2 == CORINFO_TYPE_INT)
{
val2 = static_cast<NativeInt>(OpStackGet<INT32>(op2idx));
}
else if (s_InterpreterLooseRules && cit2 == CORINFO_TYPE_LONG)
{
val2 = static_cast<NativeInt>(OpStackGet<INT64>(op2idx));
}
else if (cit2 == CORINFO_TYPE_CLASS)
{
assert(OpStackGet<void*>(op2idx) == 0);
val2 = 0;
}
else
{
assert(s_InterpreterLooseRules && cit2 == CORINFO_TYPE_BYREF);
val2 = reinterpret_cast<NativeInt>(OpStackGet<void*>(op2idx));
}
if (op == CO_EQ)
{
if (val1 == val2) res = 1;
}
else if (op == CO_GT)
{
if (val1 > val2) res = 1;
}
else if (op == CO_GT_UN)
{
if (static_cast<NativeUInt>(val1) > static_cast<NativeUInt>(val2)) res = 1;
}
else if (op == CO_LT)
{
if (val1 < val2) res = 1;
}
else
{
assert(op == CO_LT_UN);
if (static_cast<NativeUInt>(val1) < static_cast<NativeUInt>(val2)) res = 1;
}
}
else
{
VerificationError("Binary comparision operation: type mismatch.");
}
break;
case CORINFO_TYPE_LONG:
{
bool looseLong = false;
#if defined(_AMD64_)
looseLong = s_InterpreterLooseRules && (cit2 == CORINFO_TYPE_NATIVEINT || cit2 == CORINFO_TYPE_BYREF);
#endif
if (cit2 == CORINFO_TYPE_LONG || looseLong)
{
INT64 val1 = OpStackGet<INT64>(op1idx);
INT64 val2 = OpStackGet<INT64>(op2idx);
if (op == CO_EQ)
{
if (val1 == val2) res = 1;
}
else if (op == CO_GT)
{
if (val1 > val2) res = 1;
}
else if (op == CO_GT_UN)
{
if (static_cast<UINT64>(val1) > static_cast<UINT64>(val2)) res = 1;
}
else if (op == CO_LT)
{
if (val1 < val2) res = 1;
}
else
{
assert(op == CO_LT_UN);
if (static_cast<UINT64>(val1) < static_cast<UINT64>(val2)) res = 1;
}
}
else
{
VerificationError("Binary comparision operation: type mismatch.");
}
}
break;
case CORINFO_TYPE_CLASS:
case CORINFO_TYPE_STRING:
if (cit2 == CORINFO_TYPE_CLASS || cit2 == CORINFO_TYPE_STRING)
{
GCX_FORBID();
Object* val1 = OpStackGet<Object*>(op1idx);
Object* val2 = OpStackGet<Object*>(op2idx);
if (op == CO_EQ)
{
if (val1 == val2) res = 1;
}
else if (op == CO_GT_UN)
{
if (val1 != val2) res = 1;
}
else
{
VerificationError("Binary comparision operation: type mismatch.");
}
}
else
{
VerificationError("Binary comparision operation: type mismatch.");
}
break;
case CORINFO_TYPE_FLOAT:
{
bool isDouble = (s_InterpreterLooseRules && cit2 == CORINFO_TYPE_DOUBLE);
if (cit2 == CORINFO_TYPE_FLOAT || isDouble)
{
float val1 = OpStackGet<float>(op1idx);
float val2 = (isDouble) ? (float) OpStackGet<double>(op2idx) : OpStackGet<float>(op2idx);
if (op == CO_EQ)
{
// I'm assuming IEEE math here, so that if at least one is a NAN, the comparison will fail...
if (val1 == val2) res = 1;
}
else if (op == CO_GT)
{
// I'm assuming that C++ arithmetic does the right thing here with infinities and NANs.
if (val1 > val2) res = 1;
}
else if (op == CO_GT_UN)
{
// Check for NAN's here: if either is a NAN, they're unordered, so this comparison returns true.
if (_isnan(val1) || _isnan(val2)) res = 1;
else if (val1 > val2) res = 1;
}
else if (op == CO_LT)
{
if (val1 < val2) res = 1;
}
else
{
assert(op == CO_LT_UN);
// Check for NAN's here: if either is a NAN, they're unordered, so this comparison returns true.
if (_isnan(val1) || _isnan(val2)) res = 1;
else if (val1 < val2) res = 1;
}
}
else
{
VerificationError("Binary comparision operation: type mismatch.");
}
}
break;
case CORINFO_TYPE_DOUBLE:
{
bool isFloat = (s_InterpreterLooseRules && cit2 == CORINFO_TYPE_FLOAT);
if (cit2 == CORINFO_TYPE_DOUBLE || isFloat)
{
double val1 = OpStackGet<double>(op1idx);
double val2 = (isFloat) ? (double) OpStackGet<float>(op2idx) : OpStackGet<double>(op2idx);
if (op == CO_EQ)
{
// I'm assuming IEEE math here, so that if at least one is a NAN, the comparison will fail...
if (val1 == val2) res = 1;
}
else if (op == CO_GT)
{
// I'm assuming that C++ arithmetic does the right thing here with infinities and NANs.
if (val1 > val2) res = 1;
}
else if (op == CO_GT_UN)
{
// Check for NAN's here: if either is a NAN, they're unordered, so this comparison returns true.
if (_isnan(val1) || _isnan(val2)) res = 1;
else if (val1 > val2) res = 1;
}
else if (op == CO_LT)
{
if (val1 < val2) res = 1;
}
else
{
assert(op == CO_LT_UN);
// Check for NAN's here: if either is a NAN, they're unordered, so this comparison returns true.
if (_isnan(val1) || _isnan(val2)) res = 1;
else if (val1 < val2) res = 1;
}
}
else
{
VerificationError("Binary comparision operation: type mismatch.");
}
}
break;
case CORINFO_TYPE_BYREF:
if (cit2 == CORINFO_TYPE_BYREF || (s_InterpreterLooseRules && cit2 == CORINFO_TYPE_NATIVEINT))
{
NativeInt val1 = reinterpret_cast<NativeInt>(OpStackGet<void*>(op1idx));
NativeInt val2;
if (cit2 == CORINFO_TYPE_BYREF)
{
val2 = reinterpret_cast<NativeInt>(OpStackGet<void*>(op2idx));
}
else
{
assert(s_InterpreterLooseRules && cit2 == CORINFO_TYPE_NATIVEINT);
val2 = OpStackGet<NativeInt>(op2idx);
}
if (op == CO_EQ)
{
if (val1 == val2) res = 1;
}
else if (op == CO_GT)
{
if (val1 > val2) res = 1;
}
else if (op == CO_GT_UN)
{
if (static_cast<NativeUInt>(val1) > static_cast<NativeUInt>(val2)) res = 1;
}
else if (op == CO_LT)
{
if (val1 < val2) res = 1;
}
else
{
assert(op == CO_LT_UN);
if (static_cast<NativeUInt>(val1) < static_cast<NativeUInt>(val2)) res = 1;
}
}
else
{
VerificationError("Binary comparision operation: type mismatch.");
}
break;
case CORINFO_TYPE_VALUECLASS:
{
CorInfoType newCit1 = GetTypeForPrimitiveValueClass(t1.ToClassHandle());
if (newCit1 == CORINFO_TYPE_UNDEF)
{
VerificationError("Can't compare a value class.");
}
else
{
NYI_INTERP("Must eliminate 'punning' value classes from the ostack.");
}
}
break;
default:
assert(false); // Should not be here if the type is stack-normal.
}
return res;
}
template<bool val, int targetLen>
void Interpreter::BrOnValue()
{
assert(targetLen == 1 || targetLen == 4);
assert(m_curStackHt > 0);
unsigned stackInd = m_curStackHt - 1;
InterpreterType it = OpStackTypeGet(stackInd);
// It shouldn't be a value class, unless it's a punning name for a primitive integral type.
if (it.ToCorInfoType() == CORINFO_TYPE_VALUECLASS)
{
GCX_PREEMP();
CorInfoType cit = m_interpCeeInfo.getTypeForPrimitiveValueClass(it.ToClassHandle());
if (CorInfoTypeIsIntegral(cit))
{
it = InterpreterType(cit);
}
else
{
VerificationError("Can't branch on the value of a value type that is not a primitive type.");
}
}
#ifdef _DEBUG
switch (it.ToCorInfoType())
{
case CORINFO_TYPE_FLOAT:
case CORINFO_TYPE_DOUBLE:
VerificationError("Can't branch on the value of a float or double.");
break;
default:
break;
}
#endif // _DEBUG
switch (it.SizeNotStruct())
{
case 4:
{
INT32 branchVal = OpStackGet<INT32>(stackInd);
BrOnValueTakeBranch((branchVal != 0) == val, targetLen);
}
break;
case 8:
{
INT64 branchVal = OpStackGet<INT64>(stackInd);
BrOnValueTakeBranch((branchVal != 0) == val, targetLen);
}
break;
// The value-class case handled above makes sizes 1 and 2 possible.
case 1:
{
INT8 branchVal = OpStackGet<INT8>(stackInd);
BrOnValueTakeBranch((branchVal != 0) == val, targetLen);
}
break;
case 2:
{
INT16 branchVal = OpStackGet<INT16>(stackInd);
BrOnValueTakeBranch((branchVal != 0) == val, targetLen);
}
break;
default:
UNREACHABLE();
break;
}
m_curStackHt = stackInd;
}
// compOp is a member of the BranchComparisonOp enumeration.
template<int compOp, bool reverse, int targetLen>
void Interpreter::BrOnComparison()
{
CONTRACTL {
SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
} CONTRACTL_END;
assert(targetLen == 1 || targetLen == 4);
assert(m_curStackHt >= 2);
unsigned v1Ind = m_curStackHt - 2;
INT32 res = CompareOpRes<compOp>(v1Ind);
if (reverse)
{
res = (res == 0) ? 1 : 0;
}
if (res)
{
int offset;
if (targetLen == 1)
{
// BYTE is unsigned...
offset = getI1(m_ILCodePtr + 1);
}
else
{
offset = getI4LittleEndian(m_ILCodePtr + 1);
}
// 1 is the size of the current instruction; offset is relative to start of next.
if (offset < 0)
{
// Backwards branch; enable caching.
BackwardsBranchActions(offset);
}
ExecuteBranch(m_ILCodePtr + 1 + targetLen + offset);
}
else
{
m_ILCodePtr += targetLen + 1;
}
m_curStackHt -= 2;
}
void Interpreter::LdFld(FieldDesc* fldIn)
{
CONTRACTL {
SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
} CONTRACTL_END;
BarrierIfVolatile();
FieldDesc* fld = fldIn;
CORINFO_CLASS_HANDLE valClsHnd = NULL;
DWORD fldOffset;
{
GCX_PREEMP();
unsigned ilOffset = CurOffset();
if (fld == NULL && s_InterpreterUseCaching)
{
#if INTERP_TRACING
InterlockedIncrement(&s_tokenResolutionOpportunities[RTK_LdFld]);
#endif // INTERP_TRACING
fld = GetCachedInstanceField(ilOffset);
}
if (fld == NULL)
{
unsigned tok = getU4LittleEndian(m_ILCodePtr + sizeof(BYTE));
fld = FindField(tok InterpTracingArg(RTK_LdFld));
assert(fld != NULL);
fldOffset = fld->GetOffset();
if (s_InterpreterUseCaching && fldOffset < FIELD_OFFSET_LAST_REAL_OFFSET)
CacheInstanceField(ilOffset, fld);
}
else
{
fldOffset = fld->GetOffset();
}
}
CorInfoType valCit = CEEInfo::asCorInfoType(fld->GetFieldType());
// If "fldIn" is non-NULL, it's not a "real" LdFld -- the caller should handle updating the instruction pointer.
if (fldIn == NULL)
m_ILCodePtr += 5; // Last use above, so update now.
// We need to construct the interpreter type for a struct type before we try to do coordinated
// pushes of the value and type on the opstacks -- these must be atomic wrt GC, and constructing
// a struct InterpreterType transitions to preemptive mode.
InterpreterType structValIT;
if (valCit == CORINFO_TYPE_VALUECLASS)
{
GCX_PREEMP();
valCit = m_interpCeeInfo.getFieldType(CORINFO_FIELD_HANDLE(fld), &valClsHnd);
structValIT = InterpreterType(&m_interpCeeInfo, valClsHnd);
}
UINT sz = fld->GetSize();
// Live vars: valCit, structValIt
assert(m_curStackHt > 0);
unsigned stackInd = m_curStackHt - 1;
InterpreterType addrIt = OpStackTypeGet(stackInd);
CorInfoType addrCit = addrIt.ToCorInfoType();
bool isUnsigned;
if (addrCit == CORINFO_TYPE_CLASS)
{
OBJECTREF obj = OBJECTREF(OpStackGet<Object*>(stackInd));
ThrowOnInvalidPointer(OBJECTREFToObject(obj));
if (valCit == CORINFO_TYPE_VALUECLASS)
{
void* srcPtr = fld->GetInstanceAddress(obj);
// srcPtr is now vulnerable.
GCX_FORBID();
MethodTable* valClsMT = GetMethodTableFromClsHnd(valClsHnd);
if (sz > sizeof(INT64))
{
// Large struct case: allocate space on the large struct operand stack.
void* destPtr = LargeStructOperandStackPush(sz);
OpStackSet<void*>(stackInd, destPtr);
CopyValueClass(destPtr, srcPtr, valClsMT, obj->GetAppDomain());
}
else
{
// Small struct case -- is inline in operand stack.
OpStackSet<INT64>(stackInd, GetSmallStructValue(srcPtr, sz));
}
}
else
{
BYTE* fldStart = dac_cast<PTR_BYTE>(OBJECTREFToObject(obj)) + sizeof(Object) + fldOffset;
// fldStart is now a vulnerable byref
GCX_FORBID();
switch (sz)
{
case 1:
isUnsigned = CorInfoTypeIsUnsigned(valCit);
if (isUnsigned)
{
OpStackSet<UINT32>(stackInd, *reinterpret_cast<UINT8*>(fldStart));
}
else
{
OpStackSet<INT32>(stackInd, *reinterpret_cast<INT8*>(fldStart));
}
break;
case 2:
isUnsigned = CorInfoTypeIsUnsigned(valCit);
if (isUnsigned)
{
OpStackSet<UINT32>(stackInd, *reinterpret_cast<UINT16*>(fldStart));
}
else
{
OpStackSet<INT32>(stackInd, *reinterpret_cast<INT16*>(fldStart));
}
break;
case 4:
OpStackSet<INT32>(stackInd, *reinterpret_cast<INT32*>(fldStart));
break;
case 8:
OpStackSet<INT64>(stackInd, *reinterpret_cast<INT64*>(fldStart));
break;
default:
_ASSERTE_MSG(false, "Should not reach here.");
break;
}
}
}
else
{
INT8* ptr = NULL;
if (addrCit == CORINFO_TYPE_VALUECLASS)
{
size_t addrSize = addrIt.Size(&m_interpCeeInfo);
// The ECMA spec allows ldfld to be applied to "an instance of a value type."
// We will take the address of the ostack entry.
if (addrIt.IsLargeStruct(&m_interpCeeInfo))
{
ptr = reinterpret_cast<INT8*>(OpStackGet<void*>(stackInd));
// This is delicate. I'm going to pop the large struct off the large-struct stack
// now, even though the field value we push may go back on the large object stack.
// We rely on the fact that this instruction doesn't do any other pushing, and
// we assume that LargeStructOperandStackPop does not actually deallocate any memory,
// and we rely on memcpy properly handling possibly-overlapping regions being copied.
// Finally (wow, this really *is* delicate), we rely on the property that the large-struct
// stack pop operation doesn't deallocate memory (the size of the allocated memory for the
// large-struct stack only grows in a method execution), and that if we push the field value
// on the large struct stack below, the size of the pushed item is at most the size of the
// popped item, so the stack won't grow (which would allow a dealloc/realloc).
// (All in all, maybe it would be better to just copy the value elsewhere then pop...but
// that wouldn't be very aggressive.)
LargeStructOperandStackPop(addrSize, ptr);
}
else
{
ptr = reinterpret_cast<INT8*>(OpStackGetAddr(stackInd, addrSize));
}
}
else
{
assert(CorInfoTypeIsPointer(addrCit));
ptr = OpStackGet<INT8*>(stackInd);
ThrowOnInvalidPointer(ptr);
}
assert(ptr != NULL);
ptr += fldOffset;
if (valCit == CORINFO_TYPE_VALUECLASS)
{
if (sz > sizeof(INT64))
{
// Large struct case.
void* dstPtr = LargeStructOperandStackPush(sz);
memcpy(dstPtr, ptr, sz);
OpStackSet<void*>(stackInd, dstPtr);
}
else
{
// Small struct case -- is inline in operand stack.
OpStackSet<INT64>(stackInd, GetSmallStructValue(ptr, sz));
}
OpStackTypeSet(stackInd, structValIT.StackNormalize());
return;
}
// Otherwise...
switch (sz)
{
case 1:
isUnsigned = CorInfoTypeIsUnsigned(valCit);
if (isUnsigned)
{
OpStackSet<UINT32>(stackInd, *reinterpret_cast<UINT8*>(ptr));
}
else
{
OpStackSet<INT32>(stackInd, *reinterpret_cast<INT8*>(ptr));
}
break;
case 2:
isUnsigned = CorInfoTypeIsUnsigned(valCit);
if (isUnsigned)
{
OpStackSet<UINT32>(stackInd, *reinterpret_cast<UINT16*>(ptr));
}
else
{
OpStackSet<INT32>(stackInd, *reinterpret_cast<INT16*>(ptr));
}
break;
case 4:
OpStackSet<INT32>(stackInd, *reinterpret_cast<INT32*>(ptr));
break;
case 8:
OpStackSet<INT64>(stackInd, *reinterpret_cast<INT64*>(ptr));
break;
}
}
if (valCit == CORINFO_TYPE_VALUECLASS)
{
OpStackTypeSet(stackInd, structValIT.StackNormalize());
}
else
{
OpStackTypeSet(stackInd, InterpreterType(valCit).StackNormalize());
}
}
void Interpreter::LdFldA()
{
CONTRACTL {
SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
} CONTRACTL_END;
unsigned tok = getU4LittleEndian(m_ILCodePtr + sizeof(BYTE));
#if INTERP_TRACING
InterlockedIncrement(&s_tokenResolutionOpportunities[RTK_LdFldA]);
#endif // INTERP_TRACING
unsigned offset = CurOffset();
m_ILCodePtr += 5; // Last use above, so update now.
FieldDesc* fld = NULL;
if (s_InterpreterUseCaching) fld = GetCachedInstanceField(offset);
if (fld == NULL)
{
GCX_PREEMP();
fld = FindField(tok InterpTracingArg(RTK_LdFldA));
if (s_InterpreterUseCaching) CacheInstanceField(offset, fld);
}
assert(m_curStackHt > 0);
unsigned stackInd = m_curStackHt - 1;
CorInfoType addrCit = OpStackTypeGet(stackInd).ToCorInfoType();
if (addrCit == CORINFO_TYPE_BYREF || addrCit == CORINFO_TYPE_CLASS || addrCit == CORINFO_TYPE_NATIVEINT)
{
NativeInt ptr = OpStackGet<NativeInt>(stackInd);
ThrowOnInvalidPointer((void*)ptr);
// The "offset" below does not include the Object (i.e., the MethodTable pointer) for object pointers, so add that in first.
if (addrCit == CORINFO_TYPE_CLASS) ptr += sizeof(Object);
// Now add the offset.
ptr += fld->GetOffset();
OpStackSet<NativeInt>(stackInd, ptr);
if (addrCit == CORINFO_TYPE_NATIVEINT)
{
OpStackTypeSet(stackInd, InterpreterType(CORINFO_TYPE_NATIVEINT));
}
else
{
OpStackTypeSet(stackInd, InterpreterType(CORINFO_TYPE_BYREF));
}
}
else
{
VerificationError("LdfldA requires object reference, managed or unmanaged pointer type.");
}
}
void Interpreter::StFld()
{
CONTRACTL {
SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
} CONTRACTL_END;
#if INTERP_TRACING
InterlockedIncrement(&s_tokenResolutionOpportunities[RTK_StFld]);
#endif // INTERP_TRACING
FieldDesc* fld = NULL;
DWORD fldOffset;
{
unsigned ilOffset = CurOffset();
if (s_InterpreterUseCaching) fld = GetCachedInstanceField(ilOffset);
if (fld == NULL)
{
unsigned tok = getU4LittleEndian(m_ILCodePtr + sizeof(BYTE));
GCX_PREEMP();
fld = FindField(tok InterpTracingArg(RTK_StFld));
assert(fld != NULL);
fldOffset = fld->GetOffset();
if (s_InterpreterUseCaching && fldOffset < FIELD_OFFSET_LAST_REAL_OFFSET)
CacheInstanceField(ilOffset, fld);
}
else
{
fldOffset = fld->GetOffset();
}
}
m_ILCodePtr += 5; // Last use above, so update now.
UINT sz = fld->GetSize();
assert(m_curStackHt >= 2);
unsigned addrInd = m_curStackHt - 2;
CorInfoType addrCit = OpStackTypeGet(addrInd).ToCorInfoType();
unsigned valInd = m_curStackHt - 1;
CorInfoType valCit = OpStackTypeGet(valInd).ToCorInfoType();
assert(IsStackNormalType(addrCit) && IsStackNormalType(valCit));
m_curStackHt -= 2;
if (addrCit == CORINFO_TYPE_CLASS)
{
OBJECTREF obj = OBJECTREF(OpStackGet<Object*>(addrInd));
ThrowOnInvalidPointer(OBJECTREFToObject(obj));
if (valCit == CORINFO_TYPE_CLASS)
{
fld->SetRefValue(obj, ObjectToOBJECTREF(OpStackGet<Object*>(valInd)));
}
else if (valCit == CORINFO_TYPE_VALUECLASS)
{
MethodTable* valClsMT = GetMethodTableFromClsHnd(OpStackTypeGet(valInd).ToClassHandle());
void* destPtr = fld->GetInstanceAddress(obj);
// destPtr is now a vulnerable byref, so can't do GC.
GCX_FORBID();
// I use GCSafeMemCpy below to ensure that write barriers happen for the case in which
// the value class contains GC pointers. We could do better...
if (sz > sizeof(INT64))
{
// Large struct case: stack slot contains pointer...
void* srcPtr = OpStackGet<void*>(valInd);
CopyValueClassUnchecked(destPtr, srcPtr, valClsMT);
LargeStructOperandStackPop(sz, srcPtr);
}
else
{
// Small struct case -- is inline in operand stack.
CopyValueClassUnchecked(destPtr, OpStackGetAddr(valInd, sz), valClsMT);
}
BarrierIfVolatile();
return;
}
else
{
#ifdef _DEBUG
if (obj->IsTransparentProxy()) NYI_INTERP("Stores to thunking objects.");
#endif
BYTE* fldStart = dac_cast<PTR_BYTE>(OBJECTREFToObject(obj)) + sizeof(Object) + fldOffset;
// fldStart is now a vulnerable byref
GCX_FORBID();
switch (sz)
{
case 1:
*reinterpret_cast<INT8*>(fldStart) = OpStackGet<INT8>(valInd);
break;
case 2:
*reinterpret_cast<INT16*>(fldStart) = OpStackGet<INT16>(valInd);
break;
case 4:
*reinterpret_cast<INT32*>(fldStart) = OpStackGet<INT32>(valInd);
break;
case 8:
*reinterpret_cast<INT64*>(fldStart) = OpStackGet<INT64>(valInd);
break;
}
}
}
else
{
assert(addrCit == CORINFO_TYPE_BYREF || addrCit == CORINFO_TYPE_NATIVEINT);
INT8* destPtr = OpStackGet<INT8*>(addrInd);
ThrowOnInvalidPointer(destPtr);
destPtr += fldOffset;
if (valCit == CORINFO_TYPE_VALUECLASS)
{
MethodTable* valClsMT = GetMethodTableFromClsHnd(OpStackTypeGet(valInd).ToClassHandle());
// I use GCSafeMemCpy below to ensure that write barriers happen for the case in which
// the value class contains GC pointers. We could do better...
if (sz > sizeof(INT64))
{
// Large struct case: stack slot contains pointer...
void* srcPtr = OpStackGet<void*>(valInd);
CopyValueClassUnchecked(destPtr, srcPtr, valClsMT);
LargeStructOperandStackPop(sz, srcPtr);
}
else
{
// Small struct case -- is inline in operand stack.
CopyValueClassUnchecked(destPtr, OpStackGetAddr(valInd, sz), valClsMT);
}
BarrierIfVolatile();
return;
}
else if (valCit == CORINFO_TYPE_CLASS)
{
OBJECTREF val = ObjectToOBJECTREF(OpStackGet<Object*>(valInd));
SetObjectReferenceUnchecked(reinterpret_cast<OBJECTREF*>(destPtr), val);
}
else
{
switch (sz)
{
case 1:
*reinterpret_cast<INT8*>(destPtr) = OpStackGet<INT8>(valInd);
break;
case 2:
*reinterpret_cast<INT16*>(destPtr) = OpStackGet<INT16>(valInd);
break;
case 4:
*reinterpret_cast<INT32*>(destPtr) = OpStackGet<INT32>(valInd);
break;
case 8:
*reinterpret_cast<INT64*>(destPtr) = OpStackGet<INT64>(valInd);
break;
}
}
}
BarrierIfVolatile();
}
bool Interpreter::StaticFldAddrWork(CORINFO_ACCESS_FLAGS accessFlgs, /*out (byref)*/void** pStaticFieldAddr, /*out*/InterpreterType* pit, /*out*/UINT* pFldSize, /*out*/bool* pManagedMem)
{
CONTRACTL {
SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
} CONTRACTL_END;
bool isCacheable = true;
*pManagedMem = true; // Default result.
unsigned tok = getU4LittleEndian(m_ILCodePtr + sizeof(BYTE));
m_ILCodePtr += 5; // Above is last use of m_ILCodePtr in this method, so update now.
FieldDesc* fld;
CORINFO_FIELD_INFO fldInfo;
CORINFO_RESOLVED_TOKEN fldTok;
void* pFldAddr = NULL;
{
{
GCX_PREEMP();
ResolveToken(&fldTok, tok, CORINFO_TOKENKIND_Field InterpTracingArg(RTK_SFldAddr));
fld = reinterpret_cast<FieldDesc*>(fldTok.hField);
m_interpCeeInfo.getFieldInfo(&fldTok, m_methInfo->m_method, accessFlgs, &fldInfo);
}
EnsureClassInit(GetMethodTableFromClsHnd(fldTok.hClass));
if (fldInfo.fieldAccessor == CORINFO_FIELD_STATIC_TLS)
{
NYI_INTERP("Thread-local static.");
}
else if (fldInfo.fieldAccessor == CORINFO_FIELD_STATIC_SHARED_STATIC_HELPER
|| fldInfo.fieldAccessor == CORINFO_FIELD_STATIC_GENERICS_STATIC_HELPER)
{
*pStaticFieldAddr = fld->GetCurrentStaticAddress();
isCacheable = false;
}
else
{
*pStaticFieldAddr = fld->GetCurrentStaticAddress();
}
}
if (fldInfo.structType != NULL && fldInfo.fieldType != CORINFO_TYPE_CLASS && fldInfo.fieldType != CORINFO_TYPE_PTR)
{
*pit = InterpreterType(&m_interpCeeInfo, fldInfo.structType);
if ((fldInfo.fieldFlags & CORINFO_FLG_FIELD_UNMANAGED) == 0)
{
// For valuetypes in managed memory, the address returned contains a pointer into the heap, to a boxed version of the
// static variable; return a pointer to the boxed struct.
isCacheable = false;
}
else
{
*pManagedMem = false;
}
}
else
{
*pit = InterpreterType(fldInfo.fieldType);
}
*pFldSize = fld->GetSize();
return isCacheable;
}
void Interpreter::LdSFld()
{
CONTRACTL {
SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
} CONTRACTL_END;
InterpreterType fldIt;
UINT sz;
bool managedMem;
void* srcPtr = NULL;
BarrierIfVolatile();
GCPROTECT_BEGININTERIOR(srcPtr);
StaticFldAddr(CORINFO_ACCESS_GET, &srcPtr, &fldIt, &sz, &managedMem);
bool isUnsigned;
if (fldIt.IsStruct())
{
// Large struct case.
CORINFO_CLASS_HANDLE sh = fldIt.ToClassHandle();
// This call is GC_TRIGGERS, so do it before we copy the value: no GC after this,
// until the op stacks and ht are consistent.
OpStackTypeSet(m_curStackHt, InterpreterType(&m_interpCeeInfo, sh).StackNormalize());
if (fldIt.IsLargeStruct(&m_interpCeeInfo))
{
void* dstPtr = LargeStructOperandStackPush(sz);
memcpy(dstPtr, srcPtr, sz);
OpStackSet<void*>(m_curStackHt, dstPtr);
}
else
{
OpStackSet<INT64>(m_curStackHt, GetSmallStructValue(srcPtr, sz));
}
}
else
{
CorInfoType valCit = fldIt.ToCorInfoType();
switch (sz)
{
case 1:
isUnsigned = CorInfoTypeIsUnsigned(valCit);
if (isUnsigned)
{
OpStackSet<UINT32>(m_curStackHt, *reinterpret_cast<UINT8*>(srcPtr));
}
else
{
OpStackSet<INT32>(m_curStackHt, *reinterpret_cast<INT8*>(srcPtr));
}
break;
case 2:
isUnsigned = CorInfoTypeIsUnsigned(valCit);
if (isUnsigned)
{
OpStackSet<UINT32>(m_curStackHt, *reinterpret_cast<UINT16*>(srcPtr));
}
else
{
OpStackSet<INT32>(m_curStackHt, *reinterpret_cast<INT16*>(srcPtr));
}
break;
case 4:
OpStackSet<INT32>(m_curStackHt, *reinterpret_cast<INT32*>(srcPtr));
break;
case 8:
OpStackSet<INT64>(m_curStackHt, *reinterpret_cast<INT64*>(srcPtr));
break;
default:
_ASSERTE_MSG(false, "LdSFld: this should have exhausted all the possible sizes.");
break;
}
OpStackTypeSet(m_curStackHt, fldIt.StackNormalize());
}
m_curStackHt++;
GCPROTECT_END();
}
void Interpreter::EnsureClassInit(MethodTable* pMT)
{
if (!pMT->IsClassInited())
{
pMT->CheckRestore();
// This is tantamount to a call, so exempt it from the cycle count.
#if INTERP_ILCYCLE_PROFILE
unsigned __int64 startCycles;
bool b = CycleTimer::GetThreadCyclesS(&startCycles); assert(b);
#endif // INTERP_ILCYCLE_PROFILE
pMT->CheckRunClassInitThrowing();
#if INTERP_ILCYCLE_PROFILE
unsigned __int64 endCycles;
b = CycleTimer::GetThreadCyclesS(&endCycles); assert(b);
m_exemptCycles += (endCycles - startCycles);
#endif // INTERP_ILCYCLE_PROFILE
}
}
void Interpreter::LdSFldA()
{
CONTRACTL {
SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
} CONTRACTL_END;
InterpreterType fldIt;
UINT fldSz;
bool managedMem;
void* srcPtr = NULL;
GCPROTECT_BEGININTERIOR(srcPtr);
StaticFldAddr(CORINFO_ACCESS_ADDRESS, &srcPtr, &fldIt, &fldSz, &managedMem);
OpStackSet<void*>(m_curStackHt, srcPtr);
if (managedMem)
{
// Static variable in managed memory...
OpStackTypeSet(m_curStackHt, InterpreterType(CORINFO_TYPE_BYREF));
}
else
{
// RVA is in unmanaged memory.
OpStackTypeSet(m_curStackHt, InterpreterType(CORINFO_TYPE_NATIVEINT));
}
m_curStackHt++;
GCPROTECT_END();
}
void Interpreter::StSFld()
{
CONTRACTL {
SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
} CONTRACTL_END;
InterpreterType fldIt;
UINT sz;
bool managedMem;
void* dstPtr = NULL;
GCPROTECT_BEGININTERIOR(dstPtr);
StaticFldAddr(CORINFO_ACCESS_SET, &dstPtr, &fldIt, &sz, &managedMem);
m_curStackHt--;
InterpreterType valIt = OpStackTypeGet(m_curStackHt);
CorInfoType valCit = valIt.ToCorInfoType();
if (valCit == CORINFO_TYPE_VALUECLASS)
{
MethodTable* valClsMT = GetMethodTableFromClsHnd(valIt.ToClassHandle());
if (sz > sizeof(INT64))
{
// Large struct case: value in operand stack is indirect pointer.
void* srcPtr = OpStackGet<void*>(m_curStackHt);
CopyValueClassUnchecked(dstPtr, srcPtr, valClsMT);
LargeStructOperandStackPop(sz, srcPtr);
}
else
{
// Struct value is inline in the operand stack.
CopyValueClassUnchecked(dstPtr, OpStackGetAddr(m_curStackHt, sz), valClsMT);
}
}
else if (valCit == CORINFO_TYPE_CLASS)
{
SetObjectReferenceUnchecked(reinterpret_cast<OBJECTREF*>(dstPtr), ObjectToOBJECTREF(OpStackGet<Object*>(m_curStackHt)));
}
else
{
switch (sz)
{
case 1:
*reinterpret_cast<UINT8*>(dstPtr) = OpStackGet<UINT8>(m_curStackHt);
break;
case 2:
*reinterpret_cast<UINT16*>(dstPtr) = OpStackGet<UINT16>(m_curStackHt);
break;
case 4:
*reinterpret_cast<UINT32*>(dstPtr) = OpStackGet<UINT32>(m_curStackHt);
break;
case 8:
*reinterpret_cast<UINT64*>(dstPtr) = OpStackGet<UINT64>(m_curStackHt);
break;
default:
_ASSERTE_MSG(false, "This should have exhausted all the possible sizes.");
break;
}
}
GCPROTECT_END();
BarrierIfVolatile();
}
template<typename T, bool IsObjType, CorInfoType cit>
void Interpreter::LdElemWithType()
{
CONTRACTL {
SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
} CONTRACTL_END;
assert(m_curStackHt >= 2);
unsigned arrInd = m_curStackHt - 2;
unsigned indexInd = m_curStackHt - 1;
assert(OpStackTypeGet(arrInd).ToCorInfoType() == CORINFO_TYPE_CLASS);
ArrayBase* a = OpStackGet<ArrayBase*>(arrInd);
ThrowOnInvalidPointer(a);
int len = a->GetNumComponents();
CorInfoType indexCit = OpStackTypeGet(indexInd).ToCorInfoType();
if (indexCit == CORINFO_TYPE_INT)
{
int index = OpStackGet<INT32>(indexInd);
if (index < 0 || index >= len) ThrowArrayBoundsException();
GCX_FORBID();
if (IsObjType)
{
OBJECTREF res = reinterpret_cast<PtrArray*>(a)->GetAt(index);
OpStackSet<OBJECTREF>(arrInd, res);
}
else
{
T res = reinterpret_cast<Array<T>*>(a)->GetDirectConstPointerToNonObjectElements()[index];
if (cit == CORINFO_TYPE_INT)
{
// Widen narrow types.
int ires = (int)res;
OpStackSet<int>(arrInd, ires);
}
else
{
OpStackSet<T>(arrInd, res);
}
}
}
else
{
assert(indexCit == CORINFO_TYPE_NATIVEINT);
NativeInt index = OpStackGet<NativeInt>(indexInd);
if (index < 0 || index >= NativeInt(len)) ThrowArrayBoundsException();
GCX_FORBID();
if (IsObjType)
{
OBJECTREF res = reinterpret_cast<PtrArray*>(a)->GetAt(index);
OpStackSet<OBJECTREF>(arrInd, res);
}
else
{
T res = reinterpret_cast<Array<T>*>(a)->GetDirectConstPointerToNonObjectElements()[index];
OpStackSet<T>(arrInd, res);
}
}
OpStackTypeSet(arrInd, InterpreterType(cit));
m_curStackHt--;
}
template<typename T, bool IsObjType>
void Interpreter::StElemWithType()
{
CONTRACTL {
SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
} CONTRACTL_END;
assert(m_curStackHt >= 3);
unsigned arrInd = m_curStackHt - 3;
unsigned indexInd = m_curStackHt - 2;
unsigned valInd = m_curStackHt - 1;
assert(OpStackTypeGet(arrInd).ToCorInfoType() == CORINFO_TYPE_CLASS);
ArrayBase* a = OpStackGet<ArrayBase*>(arrInd);
ThrowOnInvalidPointer(a);
int len = a->GetNumComponents();
CorInfoType indexCit = OpStackTypeGet(indexInd).ToCorInfoType();
if (indexCit == CORINFO_TYPE_INT)
{
int index = OpStackGet<INT32>(indexInd);
if (index < 0 || index >= len) ThrowArrayBoundsException();
if (IsObjType)
{
struct _gc {
OBJECTREF val;
OBJECTREF a;
} gc;
gc.val = ObjectToOBJECTREF(OpStackGet<Object*>(valInd));
gc.a = ObjectToOBJECTREF(a);
GCPROTECT_BEGIN(gc);
if (gc.val != NULL &&
!ObjIsInstanceOf(OBJECTREFToObject(gc.val), reinterpret_cast<PtrArray*>(a)->GetArrayElementTypeHandle()))
COMPlusThrow(kArrayTypeMismatchException);
reinterpret_cast<PtrArray*>(OBJECTREFToObject(gc.a))->SetAt(index, gc.val);
GCPROTECT_END();
}
else
{
GCX_FORBID();
T val = OpStackGet<T>(valInd);
reinterpret_cast<Array<T>*>(a)->GetDirectPointerToNonObjectElements()[index] = val;
}
}
else
{
assert(indexCit == CORINFO_TYPE_NATIVEINT);
NativeInt index = OpStackGet<NativeInt>(indexInd);
if (index < 0 || index >= NativeInt(len)) ThrowArrayBoundsException();
if (IsObjType)
{
struct _gc {
OBJECTREF val;
OBJECTREF a;
} gc;
gc.val = ObjectToOBJECTREF(OpStackGet<Object*>(valInd));
gc.a = ObjectToOBJECTREF(a);
GCPROTECT_BEGIN(gc);
if (gc.val != NULL &&
!ObjIsInstanceOf(OBJECTREFToObject(gc.val), reinterpret_cast<PtrArray*>(a)->GetArrayElementTypeHandle()))
COMPlusThrow(kArrayTypeMismatchException);
reinterpret_cast<PtrArray*>(OBJECTREFToObject(gc.a))->SetAt(index, gc.val);
GCPROTECT_END();
}
else
{
GCX_FORBID();
T val = OpStackGet<T>(valInd);
reinterpret_cast<Array<T>*>(a)->GetDirectPointerToNonObjectElements()[index] = val;
}
}
m_curStackHt -= 3;
}
template<bool takeAddress>
void Interpreter::LdElem()
{
CONTRACTL {
SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
} CONTRACTL_END;
assert(m_curStackHt >= 2);
unsigned arrInd = m_curStackHt - 2;
unsigned indexInd = m_curStackHt - 1;
unsigned elemTypeTok = getU4LittleEndian(m_ILCodePtr + 1);
#if INTERP_TRACING
InterlockedIncrement(&s_tokenResolutionOpportunities[RTK_LdElem]);
#endif // INTERP_TRACING
unsigned ilOffset = CurOffset();
CORINFO_CLASS_HANDLE clsHnd = NULL;
if (s_InterpreterUseCaching) clsHnd = GetCachedClassHandle(ilOffset);
if (clsHnd == NULL)
{
CORINFO_RESOLVED_TOKEN elemTypeResolvedTok;
{
GCX_PREEMP();
ResolveToken(&elemTypeResolvedTok, elemTypeTok, CORINFO_TOKENKIND_Class InterpTracingArg(RTK_LdElem));
clsHnd = elemTypeResolvedTok.hClass;
}
if (s_InterpreterUseCaching) CacheClassHandle(ilOffset, clsHnd);
}
CorInfoType elemCit = ::asCorInfoType(clsHnd);
m_ILCodePtr += 5;
InterpreterType elemIt;
if (elemCit == CORINFO_TYPE_VALUECLASS)
{
elemIt = InterpreterType(&m_interpCeeInfo, clsHnd);
}
else
{
elemIt = InterpreterType(elemCit);
}
assert(OpStackTypeGet(arrInd).ToCorInfoType() == CORINFO_TYPE_CLASS);
ArrayBase* a = OpStackGet<ArrayBase*>(arrInd);
ThrowOnInvalidPointer(a);
int len = a->GetNumComponents();
NativeInt index;
{
GCX_FORBID();
CorInfoType indexCit = OpStackTypeGet(indexInd).ToCorInfoType();
if (indexCit == CORINFO_TYPE_INT)
{
index = static_cast<NativeInt>(OpStackGet<INT32>(indexInd));
}
else
{
assert(indexCit == CORINFO_TYPE_NATIVEINT);
index = OpStackGet<NativeInt>(indexInd);
}
}
if (index < 0 || index >= len) ThrowArrayBoundsException();
bool throwTypeMismatch = NULL;
{
void* elemPtr = a->GetDataPtr() + a->GetComponentSize() * index;
// elemPtr is now a vulnerable byref.
GCX_FORBID();
if (takeAddress)
{
// If the element type is a class type, may have to do a type check.
if (elemCit == CORINFO_TYPE_CLASS)
{
// Unless there was a readonly prefix, which removes the need to
// do the (dynamic) type check.
if (m_readonlyFlag)
{
// Consume the readonly prefix, and don't do the type check below.
m_readonlyFlag = false;
}
else
{
PtrArray* pa = reinterpret_cast<PtrArray*>(a);
// The element array type must be exactly the referent type of the managed
// pointer we'll be creating.
if (pa->GetArrayElementTypeHandle() != TypeHandle(clsHnd))
{
throwTypeMismatch = true;
}
}
}
if (!throwTypeMismatch)
{
// If we're not going to throw the exception, we can take the address.
OpStackSet<void*>(arrInd, elemPtr);
OpStackTypeSet(arrInd, InterpreterType(CORINFO_TYPE_BYREF));
m_curStackHt--;
}
}
else
{
m_curStackHt -= 2;
LdFromMemAddr(elemPtr, elemIt);
return;
}
}
// If we're going to throw, we do the throw outside the GCX_FORBID region above, since it requires GC_TRIGGERS.
if (throwTypeMismatch)
{
COMPlusThrow(kArrayTypeMismatchException);
}
}
void Interpreter::StElem()
{
CONTRACTL {
SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
} CONTRACTL_END;
assert(m_curStackHt >= 3);
unsigned arrInd = m_curStackHt - 3;
unsigned indexInd = m_curStackHt - 2;
unsigned valInd = m_curStackHt - 1;
CorInfoType valCit = OpStackTypeGet(valInd).ToCorInfoType();
#if INTERP_TRACING
InterlockedIncrement(&s_tokenResolutionOpportunities[RTK_StElem]);
#endif // INTERP_TRACING
CORINFO_CLASS_HANDLE typeFromTok = GetTypeFromToken(m_ILCodePtr + 1, CORINFO_TOKENKIND_Class InterpTracingArg(RTK_StElem));
m_ILCodePtr += 5;
CorInfoType typeFromTokCit;
{
GCX_PREEMP();
typeFromTokCit = ::asCorInfoType(typeFromTok);
}
size_t sz;
#ifdef _DEBUG
InterpreterType typeFromTokIt;
#endif // _DEBUG
if (typeFromTokCit == CORINFO_TYPE_VALUECLASS)
{
GCX_PREEMP();
sz = getClassSize(typeFromTok);
#ifdef _DEBUG
typeFromTokIt = InterpreterType(&m_interpCeeInfo, typeFromTok);
#endif // _DEBUG
}
else
{
sz = CorInfoTypeSize(typeFromTokCit);
#ifdef _DEBUG
typeFromTokIt = InterpreterType(typeFromTokCit);
#endif // _DEBUG
}
#ifdef _DEBUG
// Instead of debug, I need to parameterize the interpreter at the top level over whether
// to do checks corresponding to verification.
if (typeFromTokIt.StackNormalize().ToCorInfoType() != valCit)
{
// This is obviously only a partial test of the required condition.
VerificationError("Value in stelem does not have the required type.");
}
#endif // _DEBUG
assert(OpStackTypeGet(arrInd).ToCorInfoType() == CORINFO_TYPE_CLASS);
ArrayBase* a = OpStackGet<ArrayBase*>(arrInd);
ThrowOnInvalidPointer(a);
int len = a->GetNumComponents();
CorInfoType indexCit = OpStackTypeGet(indexInd).ToCorInfoType();
NativeInt index = 0;
if (indexCit == CORINFO_TYPE_INT)
{
index = static_cast<NativeInt>(OpStackGet<INT32>(indexInd));
}
else
{
index = OpStackGet<NativeInt>(indexInd);
}
if (index < 0 || index >= len) ThrowArrayBoundsException();
if (typeFromTokCit == CORINFO_TYPE_CLASS)
{
struct _gc {
OBJECTREF val;
OBJECTREF a;
} gc;
gc.val = ObjectToOBJECTREF(OpStackGet<Object*>(valInd));
gc.a = ObjectToOBJECTREF(a);
GCPROTECT_BEGIN(gc);
if (gc.val != NULL &&
!ObjIsInstanceOf(OBJECTREFToObject(gc.val), reinterpret_cast<PtrArray*>(a)->GetArrayElementTypeHandle()))
COMPlusThrow(kArrayTypeMismatchException);
reinterpret_cast<PtrArray*>(OBJECTREFToObject(gc.a))->SetAt(index, gc.val);
GCPROTECT_END();
}
else
{
GCX_FORBID();
void* destPtr = a->GetDataPtr() + index * sz;;
if (typeFromTokCit == CORINFO_TYPE_VALUECLASS)
{
MethodTable* valClsMT = GetMethodTableFromClsHnd(OpStackTypeGet(valInd).ToClassHandle());
// I use GCSafeMemCpy below to ensure that write barriers happen for the case in which
// the value class contains GC pointers. We could do better...
if (sz > sizeof(UINT64))
{
// Large struct case: stack slot contains pointer...
void* src = OpStackGet<void*>(valInd);
CopyValueClassUnchecked(destPtr, src, valClsMT);
LargeStructOperandStackPop(sz, src);
}
else
{
// Small struct case -- is inline in operand stack.
CopyValueClassUnchecked(destPtr, OpStackGetAddr(valInd, sz), valClsMT);
}
}
else
{
switch (sz)
{
case 1:
*reinterpret_cast<INT8*>(destPtr) = OpStackGet<INT8>(valInd);
break;
case 2:
*reinterpret_cast<INT16*>(destPtr) = OpStackGet<INT16>(valInd);
break;
case 4:
*reinterpret_cast<INT32*>(destPtr) = OpStackGet<INT32>(valInd);
break;
case 8:
*reinterpret_cast<INT64*>(destPtr) = OpStackGet<INT64>(valInd);
break;
}
}
}
m_curStackHt -= 3;
}
void Interpreter::InitBlk()
{
CONTRACTL {
SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
} CONTRACTL_END;
assert(m_curStackHt >= 3);
unsigned addrInd = m_curStackHt - 3;
unsigned valInd = m_curStackHt - 2;
unsigned sizeInd = m_curStackHt - 1;
#ifdef _DEBUG
CorInfoType addrCIT = OpStackTypeGet(addrInd).ToCorInfoType();
bool addrValidType = (addrCIT == CORINFO_TYPE_NATIVEINT || addrCIT == CORINFO_TYPE_BYREF);
#if defined(_AMD64_)
if (s_InterpreterLooseRules && addrCIT == CORINFO_TYPE_LONG)
addrValidType = true;
#endif
if (!addrValidType)
VerificationError("Addr of InitBlk must be native int or &.");
CorInfoType valCIT = OpStackTypeGet(valInd).ToCorInfoType();
if (valCIT != CORINFO_TYPE_INT)
VerificationError("Value of InitBlk must be int");
#endif // _DEBUG
CorInfoType sizeCIT = OpStackTypeGet(sizeInd).ToCorInfoType();
bool isLong = s_InterpreterLooseRules && (sizeCIT == CORINFO_TYPE_LONG);
#ifdef _DEBUG
if (sizeCIT != CORINFO_TYPE_INT && !isLong)
VerificationError("Size of InitBlk must be int");
#endif // _DEBUG
void* addr = OpStackGet<void*>(addrInd);
ThrowOnInvalidPointer(addr);
GCX_FORBID(); // addr is a potentially vulnerable byref.
INT8 val = OpStackGet<INT8>(valInd);
size_t size = (size_t) ((isLong) ? OpStackGet<UINT64>(sizeInd) : OpStackGet<UINT32>(sizeInd));
memset(addr, val, size);
m_curStackHt = addrInd;
m_ILCodePtr += 2;
BarrierIfVolatile();
}
void Interpreter::CpBlk()
{
CONTRACTL {
SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
} CONTRACTL_END;
assert(m_curStackHt >= 3);
unsigned destInd = m_curStackHt - 3;
unsigned srcInd = m_curStackHt - 2;
unsigned sizeInd = m_curStackHt - 1;
#ifdef _DEBUG
CorInfoType destCIT = OpStackTypeGet(destInd).ToCorInfoType();
bool destValidType = (destCIT == CORINFO_TYPE_NATIVEINT || destCIT == CORINFO_TYPE_BYREF);
#if defined(_AMD64_)
if (s_InterpreterLooseRules && destCIT == CORINFO_TYPE_LONG)
destValidType = true;
#endif
if (!destValidType)
{
VerificationError("Dest addr of CpBlk must be native int or &.");
}
CorInfoType srcCIT = OpStackTypeGet(srcInd).ToCorInfoType();
bool srcValidType = (srcCIT == CORINFO_TYPE_NATIVEINT || srcCIT == CORINFO_TYPE_BYREF);
#if defined(_AMD64_)
if (s_InterpreterLooseRules && srcCIT == CORINFO_TYPE_LONG)
srcValidType = true;
#endif
if (!srcValidType)
VerificationError("Src addr of CpBlk must be native int or &.");
#endif // _DEBUG
CorInfoType sizeCIT = OpStackTypeGet(sizeInd).ToCorInfoType();
bool isLong = s_InterpreterLooseRules && (sizeCIT == CORINFO_TYPE_LONG);
#ifdef _DEBUG
if (sizeCIT != CORINFO_TYPE_INT && !isLong)
VerificationError("Size of CpBlk must be int");
#endif // _DEBUG
void* destAddr = OpStackGet<void*>(destInd);
void* srcAddr = OpStackGet<void*>(srcInd);
ThrowOnInvalidPointer(destAddr);
ThrowOnInvalidPointer(srcAddr);
GCX_FORBID(); // destAddr & srcAddr are potentially vulnerable byrefs.
size_t size = (size_t)((isLong) ? OpStackGet<UINT64>(sizeInd) : OpStackGet<UINT32>(sizeInd));
memcpyNoGCRefs(destAddr, srcAddr, size);
m_curStackHt = destInd;
m_ILCodePtr += 2;
BarrierIfVolatile();
}
void Interpreter::Box()
{
CONTRACTL {
SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
} CONTRACTL_END;
assert(m_curStackHt >= 1);
unsigned ind = m_curStackHt - 1;
DWORD boxTypeAttribs = 0;
#if INTERP_TRACING
InterlockedIncrement(&s_tokenResolutionOpportunities[RTK_Box]);
#endif // INTERP_TRACING
CORINFO_CLASS_HANDLE boxTypeClsHnd = GetTypeFromToken(m_ILCodePtr + 1, CORINFO_TOKENKIND_Class InterpTracingArg(RTK_Box));
{
GCX_PREEMP();
boxTypeAttribs = m_interpCeeInfo.getClassAttribs(boxTypeClsHnd);
}
m_ILCodePtr += 5;
if (boxTypeAttribs & CORINFO_FLG_VALUECLASS)
{
InterpreterType valIt = OpStackTypeGet(ind);
void* valPtr;
if (valIt.IsLargeStruct(&m_interpCeeInfo))
{
// Operand stack entry is pointer to the data.
valPtr = OpStackGet<void*>(ind);
}
else
{
// Operand stack entry *is* the data.
size_t classSize = getClassSize(boxTypeClsHnd);
valPtr = OpStackGetAddr(ind, classSize);
}
TypeHandle th(boxTypeClsHnd);
if (th.IsTypeDesc())
{
COMPlusThrow(kInvalidOperationException, W("InvalidOperation_TypeCannotBeBoxed"));
}
MethodTable* pMT = th.AsMethodTable();
{
Object* res = OBJECTREFToObject(pMT->Box(valPtr));
GCX_FORBID();
// If we're popping a large struct off the operand stack, make sure we clean up.
if (valIt.IsLargeStruct(&m_interpCeeInfo))
{
LargeStructOperandStackPop(valIt.Size(&m_interpCeeInfo), valPtr);
}
OpStackSet<Object*>(ind, res);
OpStackTypeSet(ind, InterpreterType(CORINFO_TYPE_CLASS));
}
}
}
void Interpreter::BoxStructRefAt(unsigned ind, CORINFO_CLASS_HANDLE valCls)
{
CONTRACTL {
SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
} CONTRACTL_END;
_ASSERTE_MSG(ind < m_curStackHt, "Precondition");
{
GCX_PREEMP();
_ASSERTE_MSG(m_interpCeeInfo.getClassAttribs(valCls) & CORINFO_FLG_VALUECLASS, "Precondition");
}
_ASSERTE_MSG(OpStackTypeGet(ind).ToCorInfoType() == CORINFO_TYPE_BYREF, "Precondition");
InterpreterType valIt = InterpreterType(&m_interpCeeInfo, valCls);
void* valPtr = OpStackGet<void*>(ind);
TypeHandle th(valCls);
if (th.IsTypeDesc())
COMPlusThrow(kInvalidOperationException,W("InvalidOperation_TypeCannotBeBoxed"));
MethodTable* pMT = th.AsMethodTable();
{
Object* res = OBJECTREFToObject(pMT->Box(valPtr));
GCX_FORBID();
OpStackSet<Object*>(ind, res);
OpStackTypeSet(ind, InterpreterType(CORINFO_TYPE_CLASS));
}
}
void Interpreter::Unbox()
{
CONTRACTL {
SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
} CONTRACTL_END
assert(m_curStackHt > 0);
unsigned tos = m_curStackHt - 1;
#ifdef _DEBUG
CorInfoType tosCIT = OpStackTypeGet(tos).ToCorInfoType();
if (tosCIT != CORINFO_TYPE_CLASS)
VerificationError("Unbox requires that TOS is an object pointer.");
#endif // _DEBUG
#if INTERP_TRACING
InterlockedIncrement(&s_tokenResolutionOpportunities[RTK_Unbox]);
#endif // INTERP_TRACING
CORINFO_CLASS_HANDLE boxTypeClsHnd = GetTypeFromToken(m_ILCodePtr + 1, CORINFO_TOKENKIND_Class InterpTracingArg(RTK_Unbox));
CorInfoHelpFunc unboxHelper;
{
GCX_PREEMP();
unboxHelper = m_interpCeeInfo.getUnBoxHelper(boxTypeClsHnd);
}
void* res = NULL;
Object* obj = OpStackGet<Object*>(tos);
switch (unboxHelper)
{
case CORINFO_HELP_UNBOX:
{
ThrowOnInvalidPointer(obj);
MethodTable* pMT1 = (MethodTable*)boxTypeClsHnd;
MethodTable* pMT2 = obj->GetMethodTable();
if (pMT1->IsEquivalentTo(pMT2))
{
res = OpStackGet<Object*>(tos)->UnBox();
}
else
{
CorElementType type1 = pMT1->GetInternalCorElementType();
CorElementType type2 = pMT2->GetInternalCorElementType();
// we allow enums and their primtive type to be interchangable
if (type1 == type2)
{
if ((pMT1->IsEnum() || pMT1->IsTruePrimitive()) &&
(pMT2->IsEnum() || pMT2->IsTruePrimitive()))
{
res = OpStackGet<Object*>(tos)->UnBox();
}
}
}
if (res == NULL)
{
COMPlusThrow(kInvalidCastException);
}
}
break;
case CORINFO_HELP_UNBOX_NULLABLE:
{
// For "unbox Nullable<T>", we need to create a new object (maybe in some temporary local
// space (that we reuse every time we hit this IL instruction?), that gets reported to the GC,
// maybe in the GC heap itself). That object will contain an embedded Nullable<T>. Then, we need to
// get a byref to the data within the object.
NYI_INTERP("Unhandled 'unbox' of Nullable<T>.");
}
break;
default:
NYI_INTERP("Unhandled 'unbox' helper.");
}
{
GCX_FORBID();
OpStackSet<void*>(tos, res);
OpStackTypeSet(tos, InterpreterType(CORINFO_TYPE_BYREF));
}
m_ILCodePtr += 5;
}
void Interpreter::Throw()
{
CONTRACTL {
SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
} CONTRACTL_END
assert(m_curStackHt >= 1);
// Note that we can't decrement the stack height here, since the operand stack
// protects the thrown object. Nor do we need to, since the ostack will be cleared on
// any catch within this method.
unsigned exInd = m_curStackHt - 1;
#ifdef _DEBUG
CorInfoType exCIT = OpStackTypeGet(exInd).ToCorInfoType();
if (exCIT != CORINFO_TYPE_CLASS)
{
VerificationError("Can only throw an object.");
}
#endif // _DEBUG
Object* obj = OpStackGet<Object*>(exInd);
ThrowOnInvalidPointer(obj);
OBJECTREF oref = ObjectToOBJECTREF(obj);
if (!IsException(oref->GetMethodTable()))
{
GCPROTECT_BEGIN(oref);
WrapNonCompliantException(&oref);
GCPROTECT_END();
}
COMPlusThrow(oref);
}
void Interpreter::Rethrow()
{
CONTRACTL {
SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
} CONTRACTL_END
OBJECTREF throwable = GetThread()->LastThrownObject();
COMPlusThrow(throwable);
}
void Interpreter::UnboxAny()
{
CONTRACTL {
SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
} CONTRACTL_END;
assert(m_curStackHt > 0);
unsigned tos = m_curStackHt - 1;
unsigned boxTypeTok = getU4LittleEndian(m_ILCodePtr + 1);
m_ILCodePtr += 5;
#if INTERP_TRACING
InterlockedIncrement(&s_tokenResolutionOpportunities[RTK_UnboxAny]);
#endif // INTERP_TRACING
CORINFO_RESOLVED_TOKEN boxTypeResolvedTok;
CORINFO_CLASS_HANDLE boxTypeClsHnd;
DWORD boxTypeAttribs = 0;
{
GCX_PREEMP();
ResolveToken(&boxTypeResolvedTok, boxTypeTok, CORINFO_TOKENKIND_Class InterpTracingArg(RTK_UnboxAny));
boxTypeClsHnd = boxTypeResolvedTok.hClass;
boxTypeAttribs = m_interpCeeInfo.getClassAttribs(boxTypeClsHnd);
}
CorInfoType unboxCIT = OpStackTypeGet(tos).ToCorInfoType();
if (unboxCIT != CORINFO_TYPE_CLASS)
VerificationError("Type mismatch in UNBOXANY.");
if ((boxTypeAttribs & CORINFO_FLG_VALUECLASS) == 0)
{
Object* obj = OpStackGet<Object*>(tos);
if (obj != NULL && !ObjIsInstanceOf(obj, TypeHandle(boxTypeClsHnd), TRUE))
{
UNREACHABLE(); //ObjIsInstanceOf will throw if cast can't be done
}
}
else
{
CorInfoHelpFunc unboxHelper;
{
GCX_PREEMP();
unboxHelper = m_interpCeeInfo.getUnBoxHelper(boxTypeClsHnd);
}
// Important that this *not* be factored out with the identical statement in the "if" branch:
// delay read from GC-protected operand stack until after COOP-->PREEMP transition above.
Object* obj = OpStackGet<Object*>(tos);
switch (unboxHelper)
{
case CORINFO_HELP_UNBOX:
{
ThrowOnInvalidPointer(obj);
MethodTable* pMT1 = (MethodTable*)boxTypeClsHnd;
MethodTable* pMT2 = obj->GetMethodTable();
void* res = NULL;
if (pMT1->IsEquivalentTo(pMT2))
{
res = OpStackGet<Object*>(tos)->UnBox();
}
else
{
CorElementType type1 = pMT1->GetInternalCorElementType();
CorElementType type2 = pMT2->GetInternalCorElementType();
// we allow enums and their primtive type to be interchangable
if (type1 == type2)
{
if ((pMT1->IsEnum() || pMT1->IsTruePrimitive()) &&
(pMT2->IsEnum() || pMT2->IsTruePrimitive()))
{
res = OpStackGet<Object*>(tos)->UnBox();
}
}
}
if (res == NULL)
{
COMPlusThrow(kInvalidCastException);
}
// As the ECMA spec says, the rest is like a "ldobj".
LdObjValueClassWork(boxTypeClsHnd, tos, res);
}
break;
case CORINFO_HELP_UNBOX_NULLABLE:
{
InterpreterType it = InterpreterType(&m_interpCeeInfo, boxTypeClsHnd);
size_t sz = it.Size(&m_interpCeeInfo);
if (sz > sizeof(INT64))
{
void* destPtr = LargeStructOperandStackPush(sz);
if (!Nullable::UnBox(destPtr, ObjectToOBJECTREF(obj), (MethodTable*)boxTypeClsHnd))
{
COMPlusThrow(kInvalidCastException);
}
OpStackSet<void*>(tos, destPtr);
}
else
{
INT64 dest = 0;
if (!Nullable::UnBox(&dest, ObjectToOBJECTREF(obj), (MethodTable*)boxTypeClsHnd))
{
COMPlusThrow(kInvalidCastException);
}
OpStackSet<INT64>(tos, dest);
}
OpStackTypeSet(tos, it.StackNormalize());
}
break;
default:
NYI_INTERP("Unhandled 'unbox.any' helper.");
}
}
}
void Interpreter::LdLen()
{
CONTRACTL {
SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
} CONTRACTL_END;
assert(m_curStackHt >= 1);
unsigned arrInd = m_curStackHt - 1;
assert(OpStackTypeGet(arrInd).ToCorInfoType() == CORINFO_TYPE_CLASS);
GCX_FORBID();
ArrayBase* a = OpStackGet<ArrayBase*>(arrInd);
ThrowOnInvalidPointer(a);
int len = a->GetNumComponents();
OpStackSet<NativeUInt>(arrInd, NativeUInt(len));
// The ECMA spec says that the type of the length value is NATIVEUINT, but this
// doesn't make any sense -- unsigned types are not stack-normalized. So I'm
// using NATIVEINT, to get the width right.
OpStackTypeSet(arrInd, InterpreterType(CORINFO_TYPE_NATIVEINT));
}
void Interpreter::DoCall(bool virtualCall)
{
#if INTERP_DYNAMIC_CONTRACTS
CONTRACTL {
SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
} CONTRACTL_END;
#else
// Dynamic contract occupies too much stack.
STATIC_CONTRACT_SO_TOLERANT;
STATIC_CONTRACT_THROWS;
STATIC_CONTRACT_GC_TRIGGERS;
STATIC_CONTRACT_MODE_COOPERATIVE;
#endif
#if INTERP_TRACING
InterlockedIncrement(&s_tokenResolutionOpportunities[RTK_Call]);
#endif // INTERP_TRACING
DoCallWork(virtualCall);
m_ILCodePtr += 5;
}
CORINFO_CONTEXT_HANDLE InterpreterMethodInfo::GetPreciseGenericsContext(Object* thisArg, void* genericsCtxtArg)
{
// If the caller has a generic argument, then we need to get the exact methodContext.
// There are several possibilities that lead to a generic argument:
// 1) Static method of generic class: generic argument is the method table of the class.
// 2) generic method of a class: generic argument is the precise MethodDesc* of the method.
if (GetFlag<InterpreterMethodInfo::Flag_hasGenericsContextArg>())
{
assert(GetFlag<InterpreterMethodInfo::Flag_methHasGenericArgs>() || GetFlag<InterpreterMethodInfo::Flag_typeHasGenericArgs>());
if (GetFlag<InterpreterMethodInfo::Flag_methHasGenericArgs>())
{
return MAKE_METHODCONTEXT(reinterpret_cast<CORINFO_METHOD_HANDLE>(genericsCtxtArg));
}
else
{
MethodTable* methodClass = reinterpret_cast<MethodDesc*>(m_method)->GetMethodTable();
MethodTable* contextClass = reinterpret_cast<MethodTable*>(genericsCtxtArg)->GetMethodTableMatchingParentClass(methodClass);
return MAKE_CLASSCONTEXT(contextClass);
}
}
// TODO: This condition isn't quite right. If the actual class is a subtype of the declaring type of the method,
// then it might be in another module, the scope and context won't agree.
else if (GetFlag<InterpreterMethodInfo::Flag_typeHasGenericArgs>()
&& !GetFlag<InterpreterMethodInfo::Flag_methHasGenericArgs>()
&& GetFlag<InterpreterMethodInfo::Flag_hasThisArg>()
&& GetFlag<InterpreterMethodInfo::Flag_thisArgIsObjPtr>() && thisArg != NULL)
{
MethodTable* methodClass = reinterpret_cast<MethodDesc*>(m_method)->GetMethodTable();
MethodTable* contextClass = thisArg->GetMethodTable()->GetMethodTableMatchingParentClass(methodClass);
return MAKE_CLASSCONTEXT(contextClass);
}
else
{
return MAKE_METHODCONTEXT(m_method);
}
}
void Interpreter::DoCallWork(bool virtualCall, void* thisArg, CORINFO_RESOLVED_TOKEN* methTokPtr, CORINFO_CALL_INFO* callInfoPtr)
{
#if INTERP_DYNAMIC_CONTRACTS
CONTRACTL {
SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
} CONTRACTL_END;
#else
// Dynamic contract occupies too much stack.
STATIC_CONTRACT_SO_TOLERANT;
STATIC_CONTRACT_THROWS;
STATIC_CONTRACT_GC_TRIGGERS;
STATIC_CONTRACT_MODE_COOPERATIVE;
#endif
#if INTERP_ILCYCLE_PROFILE
#if 0
// XXX
unsigned __int64 callStartCycles;
bool b = CycleTimer::GetThreadCyclesS(&callStartCycles); assert(b);
unsigned __int64 callStartExemptCycles = m_exemptCycles;
#endif
#endif // INTERP_ILCYCLE_PROFILE
#if INTERP_TRACING
InterlockedIncrement(&s_totalInterpCalls);
#endif // INTERP_TRACING
unsigned tok = getU4LittleEndian(m_ILCodePtr + sizeof(BYTE));
// It's possible for an IL method to push a capital-F Frame. If so, we pop it and save it;
// we'll push it back on after our GCPROTECT frame is popped.
Frame* ilPushedFrame = NULL;
// We can't protect "thisArg" with a GCPROTECT, because this pushes a Frame, and there
// exist managed methods that push (and pop) Frames -- so that the Frame chain does not return
// to its original state after a call. Therefore, we can't have a Frame on the stack over the duration
// of a call. (I assume that any method that calls a Frame-pushing IL method performs a matching
// call to pop that Frame before the caller method completes. If this were not true, if one method could push
// a Frame, but defer the pop to its caller, then we could *never* use a Frame in the interpreter, and
// our implementation plan would be doomed.)
assert(m_callThisArg == NULL);
m_callThisArg = thisArg;
// Have we already cached a MethodDescCallSite for this call? (We do this only in loops
// in the current execution).
unsigned iloffset = CurOffset();
CallSiteCacheData* pCscd = NULL;
if (s_InterpreterUseCaching) pCscd = GetCachedCallInfo(iloffset);
// If this is true, then we should not cache this call site.
bool doNotCache;
CORINFO_RESOLVED_TOKEN methTok;
CORINFO_CALL_INFO callInfo;
MethodDesc* methToCall = NULL;
CORINFO_CLASS_HANDLE exactClass = NULL;
CORINFO_SIG_INFO_SMALL sigInfo;
if (pCscd != NULL)
{
GCX_PREEMP();
methToCall = pCscd->m_pMD;
sigInfo = pCscd->m_sigInfo;
doNotCache = true; // We already have a cache entry.
}
else
{
doNotCache = false; // Until we determine otherwise.
if (callInfoPtr == NULL)
{
GCX_PREEMP();
// callInfoPtr and methTokPtr must either both be NULL, or neither.
assert(methTokPtr == NULL);
methTokPtr = &methTok;
ResolveToken(methTokPtr, tok, CORINFO_TOKENKIND_Method InterpTracingArg(RTK_Call));
OPCODE opcode = (OPCODE)(*m_ILCodePtr);
m_interpCeeInfo.getCallInfo(methTokPtr,
m_constrainedFlag ? & m_constrainedResolvedToken : NULL,
m_methInfo->m_method,
//this is how impImportCall invokes getCallInfo
combine(combine(CORINFO_CALLINFO_ALLOWINSTPARAM,
CORINFO_CALLINFO_SECURITYCHECKS),
(opcode == CEE_CALLVIRT) ? CORINFO_CALLINFO_CALLVIRT
: CORINFO_CALLINFO_NONE),
&callInfo);
#if INTERP_ILCYCLE_PROFILE
#if 0
if (virtualCall)
{
unsigned __int64 callEndCycles;
b = CycleTimer::GetThreadCyclesS(&callEndCycles); assert(b);
unsigned __int64 delta = (callEndCycles - callStartCycles);
delta -= (m_exemptCycles - callStartExemptCycles);
s_callCycles += delta;
s_calls++;
}
#endif
#endif // INTERP_ILCYCLE_PROFILE
callInfoPtr = &callInfo;
assert(!callInfoPtr->exactContextNeedsRuntimeLookup);
methToCall = reinterpret_cast<MethodDesc*>(methTok.hMethod);
exactClass = methTok.hClass;
}
else
{
// callInfoPtr and methTokPtr must either both be NULL, or neither.
assert(methTokPtr != NULL);
assert(!callInfoPtr->exactContextNeedsRuntimeLookup);
methToCall = reinterpret_cast<MethodDesc*>(callInfoPtr->hMethod);
exactClass = methTokPtr->hClass;
}
// We used to take the sigInfo from the callInfo here, but that isn't precise, since
// we may have made "methToCall" more precise wrt generics than the method handle in
// the callinfo. So look up th emore precise signature.
GCX_PREEMP();
CORINFO_SIG_INFO sigInfoFull;
m_interpCeeInfo.getMethodSig(CORINFO_METHOD_HANDLE(methToCall), &sigInfoFull);
sigInfo.retTypeClass = sigInfoFull.retTypeClass;
sigInfo.numArgs = sigInfoFull.numArgs;
sigInfo.callConv = sigInfoFull.callConv;
sigInfo.retType = sigInfoFull.retType;
}
// Point A in our cycle count.
// Is the method an intrinsic? If so, and if it's one we've written special-case code for
// handle intrinsically.
CorInfoIntrinsics intrinsicId;
{
GCX_PREEMP();
intrinsicId = m_interpCeeInfo.getIntrinsicID(CORINFO_METHOD_HANDLE(methToCall));
}
#if INTERP_TRACING
if (intrinsicId != CORINFO_INTRINSIC_Illegal)
InterlockedIncrement(&s_totalInterpCallsToIntrinsics);
#endif // INTERP_TRACING
bool didIntrinsic = false;
if (!m_constrainedFlag)
{
switch (intrinsicId)
{
case CORINFO_INTRINSIC_StringLength:
DoStringLength(); didIntrinsic = true;
break;
case CORINFO_INTRINSIC_StringGetChar:
DoStringGetChar(); didIntrinsic = true;
break;
case CORINFO_INTRINSIC_GetTypeFromHandle:
// This is an identity transformation. (At least until I change LdToken to
// return a RuntimeTypeHandle struct...which is a TODO.)
DoGetTypeFromHandle();
didIntrinsic = true;
break;
#if INTERP_ILSTUBS
case CORINFO_INTRINSIC_StubHelpers_GetStubContext:
OpStackSet<void*>(m_curStackHt, GetStubContext());
OpStackTypeSet(m_curStackHt, InterpreterType(CORINFO_TYPE_NATIVEINT));
m_curStackHt++; didIntrinsic = true;
break;
case CORINFO_INTRINSIC_StubHelpers_GetStubContextAddr:
OpStackSet<void*>(m_curStackHt, GetStubContextAddr());
OpStackTypeSet(m_curStackHt, InterpreterType(CORINFO_TYPE_NATIVEINT));
m_curStackHt++; didIntrinsic = true;
break;
#endif // INTERP_ILSTUBS
default:
#if INTERP_TRACING
InterlockedIncrement(&s_totalInterpCallsToIntrinsicsUnhandled);
#endif // INTERP_TRACING
break;
}
// Plus some other calls that we're going to treat "like" intrinsics...
if (methToCall == MscorlibBinder::GetMethod(METHOD__STUBHELPERS__SET_LAST_ERROR))
{
// If we're interpreting a method that calls "SetLastError", it's very likely that the call(i) whose
// error we're trying to capture was performed with MethodDescCallSite machinery that itself trashes
// the last error. We solve this by saving the last error in a special interpreter-specific field of
// "Thread" in that case, and essentially implement SetLastError here, taking that field as the
// source for the last error.
Thread* thrd = GetThread();
thrd->m_dwLastError = thrd->m_dwLastErrorInterp;
didIntrinsic = true;
}
}
if (didIntrinsic)
{
if (s_InterpreterUseCaching && !doNotCache)
{
// Cache the token resolution result...
pCscd = new CallSiteCacheData(methToCall, sigInfo);
CacheCallInfo(iloffset, pCscd);
}
// Now we can return.
return;
}
// Handle other simple special cases:
#if FEATURE_INTERPRETER_DEADSIMPLE_OPT
#ifndef DACCESS_COMPILE
// Dead simple static getters.
InterpreterMethodInfo* calleeInterpMethInfo;
if (GetMethodHandleToInterpMethInfoPtrMap()->Lookup(CORINFO_METHOD_HANDLE(methToCall), &calleeInterpMethInfo))
{
if (calleeInterpMethInfo->GetFlag<InterpreterMethodInfo::Flag_methIsDeadSimpleGetter>())
{
if (methToCall->IsStatic())
{
// TODO
}
else
{
ILOffsetToItemCache* calleeCache;
{
Object* thisArg = OpStackGet<Object*>(m_curStackHt-1);
GCX_FORBID();
// We pass NULL for the generic context arg, because a dead simple getter takes none, by definition.
calleeCache = calleeInterpMethInfo->GetCacheForCall(thisArg, /*genericsContextArg*/NULL);
}
// We've interpreted the getter at least once, so the cache for *some* generics context is populated -- but maybe not
// this one. We're hoping that it usually is.
if (calleeCache != NULL)
{
CachedItem cachedItem;
unsigned offsetOfLd;
if (calleeInterpMethInfo->GetFlag<InterpreterMethodInfo::Flag_methIsDeadSimpleGetterIsDbgForm>())
offsetOfLd = ILOffsetOfLdFldInDeadSimpleInstanceGetterOpt;
else
offsetOfLd = ILOffsetOfLdFldInDeadSimpleInstanceGetterOpt;
bool b = calleeCache->GetItem(offsetOfLd, cachedItem);
_ASSERTE_MSG(b, "If the cache exists for this generic context, it should an entry for the LdFld.");
_ASSERTE_MSG(cachedItem.m_tag == CIK_InstanceField, "If it's there, it should be an instance field cache.");
LdFld(cachedItem.m_value.m_instanceField);
#if INTERP_TRACING
InterlockedIncrement(&s_totalInterpCallsToDeadSimpleGetters);
InterlockedIncrement(&s_totalInterpCallsToDeadSimpleGettersShortCircuited);
#endif // INTERP_TRACING
return;
}
}
}
}
#endif // DACCESS_COMPILE
#endif // FEATURE_INTERPRETER_DEADSIMPLE_OPT
unsigned totalSigArgs;
CORINFO_VARARGS_HANDLE vaSigCookie = nullptr;
if ((sigInfo.callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG ||
(sigInfo.callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_NATIVEVARARG)
{
GCX_PREEMP();
CORINFO_SIG_INFO sig;
m_interpCeeInfo.findCallSiteSig(m_methInfo->m_module, methTokPtr->token, MAKE_METHODCONTEXT(m_methInfo->m_method), &sig);
sigInfo.retTypeClass = sig.retTypeClass;
sigInfo.numArgs = sig.numArgs;
sigInfo.callConv = sig.callConv;
sigInfo.retType = sig.retType;
// Adding 'this' pointer because, numArgs doesn't include the this pointer.
totalSigArgs = sigInfo.numArgs + sigInfo.hasThis();
if ((sigInfo.callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG)
{
Module* module = GetModule(sig.scope);
vaSigCookie = CORINFO_VARARGS_HANDLE(module->GetVASigCookie(Signature(sig.pSig, sig.cbSig)));
}
doNotCache = true;
}
else
{
totalSigArgs = sigInfo.totalILArgs();
}
// Note that "totalNativeArgs()" includes space for ret buff arg.
unsigned nSlots = totalSigArgs + 1;
if (sigInfo.hasTypeArg()) nSlots++;
if (sigInfo.isVarArg()) nSlots++;
DelegateCtorArgs ctorData;
// If any of these are non-null, they will be pushed as extra arguments (see the code below).
ctorData.pArg3 = NULL;
ctorData.pArg4 = NULL;
ctorData.pArg5 = NULL;
// Since we make "doNotCache" true below, well never have a non-null "pCscd" for a delegate
// constructor. But we have to check for a cached method first, since callInfoPtr may be null in the cached case.
if (pCscd == NULL && callInfoPtr->classFlags & CORINFO_FLG_DELEGATE && callInfoPtr->methodFlags & CORINFO_FLG_CONSTRUCTOR)
{
// We won't cache this case.
doNotCache = true;
_ASSERTE_MSG(!sigInfo.hasTypeArg(), "I assume that this isn't possible.");
GCX_PREEMP();
ctorData.pMethod = methToCall;
// Second argument to delegate constructor will be code address of the function the delegate wraps.
assert(TOSIsPtr() && OpStackTypeGet(m_curStackHt-1).ToCorInfoType() != CORINFO_TYPE_BYREF);
CORINFO_METHOD_HANDLE targetMethodHnd = GetFunctionPointerStack()[m_curStackHt-1];
assert(targetMethodHnd != NULL);
CORINFO_METHOD_HANDLE alternateCtorHnd = m_interpCeeInfo.GetDelegateCtor(reinterpret_cast<CORINFO_METHOD_HANDLE>(methToCall), methTokPtr->hClass, targetMethodHnd, &ctorData);
MethodDesc* alternateCtor = reinterpret_cast<MethodDesc*>(alternateCtorHnd);
if (alternateCtor != methToCall)
{
methToCall = alternateCtor;
// Translate the method address argument from a method handle to the actual callable code address.
void* val = (void *)((MethodDesc *)targetMethodHnd)->GetMultiCallableAddrOfCode();
// Change the method argument to the code pointer.
OpStackSet<void*>(m_curStackHt-1, val);
// Now if there are extra arguments, add them to the number of slots; we'll push them on the
// arg list later.
if (ctorData.pArg3) nSlots++;
if (ctorData.pArg4) nSlots++;
if (ctorData.pArg5) nSlots++;
}
}
// Make sure that the operand stack has the required number of arguments.
// (Note that this is IL args, not native.)
//
// The total number of arguments on the IL stack. Initially we assume that all the IL arguments
// the callee expects are on the stack, but may be adjusted downwards if the "this" argument
// is provided by an allocation (the call is to a constructor).
unsigned totalArgsOnILStack = totalSigArgs;
if (m_callThisArg != NULL)
{
assert(totalArgsOnILStack > 0);
totalArgsOnILStack--;
}
#if defined(FEATURE_HFA)
// Does the callee have an HFA return type?
unsigned HFAReturnArgSlots = 0;
{
GCX_PREEMP();
if (sigInfo.retType == CORINFO_TYPE_VALUECLASS
&& CorInfoTypeIsFloatingPoint(m_interpCeeInfo.getHFAType(sigInfo.retTypeClass))
&& (sigInfo.getCallConv() & CORINFO_CALLCONV_VARARG) == 0)
{
HFAReturnArgSlots = getClassSize(sigInfo.retTypeClass);
// Round up to a multiple of double size.
HFAReturnArgSlots = (HFAReturnArgSlots + sizeof(ARG_SLOT) - 1) / sizeof(ARG_SLOT);
}
}
#endif
// Point B
const unsigned LOCAL_ARG_SLOTS = 8;
ARG_SLOT localArgs[LOCAL_ARG_SLOTS];
InterpreterType localArgTypes[LOCAL_ARG_SLOTS];
ARG_SLOT* args;
InterpreterType* argTypes;
#if defined(_X86_)
unsigned totalArgSlots = nSlots;
#elif defined(_ARM_) || defined(_ARM64_)
// ARM64TODO: Verify that the following statement is correct for ARM64.
unsigned totalArgSlots = nSlots + HFAReturnArgSlots;
#elif defined(_AMD64_)
unsigned totalArgSlots = nSlots;
#else
#error "unsupported platform"
#endif
if (totalArgSlots <= LOCAL_ARG_SLOTS)
{
args = &localArgs[0];
argTypes = &localArgTypes[0];
}
else
{
args = (ARG_SLOT*)_alloca(totalArgSlots * sizeof(ARG_SLOT));
#if defined(_ARM_)
// The HFA return buffer, if any, is assumed to be at a negative
// offset from the IL arg pointer, so adjust that pointer upward.
args = args + HFAReturnArgSlots;
#endif // defined(_ARM_)
argTypes = (InterpreterType*)_alloca(nSlots * sizeof(InterpreterType));
}
// Make sure that we don't scan any of these until we overwrite them with
// the real types of the arguments.
InterpreterType undefIt(CORINFO_TYPE_UNDEF);
for (unsigned i = 0; i < nSlots; i++) argTypes[i] = undefIt;
// GC-protect the argument array (as byrefs).
m_args = args; m_argsSize = nSlots; m_argTypes = argTypes;
// This is the index into the "args" array (where we copy the value to).
int curArgSlot = 0;
// The operand stack index of the first IL argument.
assert(m_curStackHt >= totalArgsOnILStack);
int argsBase = m_curStackHt - totalArgsOnILStack;
// Current on-stack argument index.
unsigned arg = 0;
// We do "this" -- in the case of a constructor, we "shuffle" the "m_callThisArg" argument in as the first
// argument -- it isn't on the IL operand stack.
if (m_constrainedFlag)
{
_ASSERT(m_callThisArg == NULL); // "m_callThisArg" non-null only for .ctor, which are not callvirts.
CorInfoType argCIT = OpStackTypeGet(argsBase + arg).ToCorInfoType();
if (argCIT != CORINFO_TYPE_BYREF)
VerificationError("This arg of constrained call must be managed pointer.");
// We only cache for the CORINFO_NO_THIS_TRANSFORM case, so we may assume that if we have a cached call site,
// there's no thisTransform to perform.
if (pCscd == NULL)
{
switch (callInfoPtr->thisTransform)
{
case CORINFO_NO_THIS_TRANSFORM:
// It is a constrained call on a method implemented by a value type; this is already the proper managed pointer.
break;
case CORINFO_DEREF_THIS:
#ifdef _DEBUG
{
GCX_PREEMP();
DWORD clsAttribs = m_interpCeeInfo.getClassAttribs(m_constrainedResolvedToken.hClass);
assert((clsAttribs & CORINFO_FLG_VALUECLASS) == 0);
}
#endif // _DEBUG
{
// As per the spec, dereference the byref to the "this" pointer, and substitute it as the new "this" pointer.
GCX_FORBID();
Object** objPtrPtr = OpStackGet<Object**>(argsBase + arg);
OpStackSet<Object*>(argsBase + arg, *objPtrPtr);
OpStackTypeSet(argsBase + arg, InterpreterType(CORINFO_TYPE_CLASS));
}
doNotCache = true;
break;
case CORINFO_BOX_THIS:
// This is the case where the call is to a virtual method of Object the given
// struct class does not override -- the struct must be boxed, so that the
// method can be invoked as a virtual.
BoxStructRefAt(argsBase + arg, m_constrainedResolvedToken.hClass);
doNotCache = true;
break;
}
exactClass = m_constrainedResolvedToken.hClass;
{
GCX_PREEMP();
DWORD exactClassAttribs = m_interpCeeInfo.getClassAttribs(exactClass);
// If the constraint type is a value class, then it is the exact class (which will be the
// "owner type" in the MDCS below.) If it is not, leave it as the (precise) interface method.
if (exactClassAttribs & CORINFO_FLG_VALUECLASS)
{
MethodTable* exactClassMT = GetMethodTableFromClsHnd(exactClass);
// Find the method on exactClass corresponding to methToCall.
methToCall = MethodDesc::FindOrCreateAssociatedMethodDesc(
reinterpret_cast<MethodDesc*>(callInfoPtr->hMethod), // pPrimaryMD
exactClassMT, // pExactMT
FALSE, // forceBoxedEntryPoint
methToCall->GetMethodInstantiation(), // methodInst
FALSE); // allowInstParam
}
else
{
exactClass = methTokPtr->hClass;
}
}
}
// We've consumed the constraint, so reset the flag.
m_constrainedFlag = false;
}
if (pCscd == NULL)
{
if (callInfoPtr->methodFlags & CORINFO_FLG_STATIC)
{
MethodDesc* pMD = reinterpret_cast<MethodDesc*>(callInfoPtr->hMethod);
EnsureClassInit(pMD->GetMethodTable());
}
}
// Point C
// We must do anything that might make a COOP->PREEMP transition before copying arguments out of the
// operand stack (where they are GC-protected) into the args array (where they are not).
#ifdef _DEBUG
const char* clsOfMethToCallName;;
const char* methToCallName = NULL;
{
GCX_PREEMP();
methToCallName = m_interpCeeInfo.getMethodName(CORINFO_METHOD_HANDLE(methToCall), &clsOfMethToCallName);
}
#if INTERP_TRACING
if (strncmp(methToCallName, "get_", 4) == 0)
{
InterlockedIncrement(&s_totalInterpCallsToGetters);
size_t offsetOfLd;
if (IsDeadSimpleGetter(&m_interpCeeInfo, methToCall, &offsetOfLd))
{
InterlockedIncrement(&s_totalInterpCallsToDeadSimpleGetters);
}
}
else if (strncmp(methToCallName, "set_", 4) == 0)
{
InterlockedIncrement(&s_totalInterpCallsToSetters);
}
#endif // INTERP_TRACING
// Only do this check on the first call, since it should be the same each time.
if (pCscd == NULL)
{
// Ensure that any value types used as argument types are loaded. This property is checked
// by the MethodDescCall site mechanisms. Since enums are freely convertible with their underlying
// integer type, this is at least one case where a caller may push a value convertible to a value type
// without any code having caused the value type to be loaded. This is DEBUG-only because if the callee
// the integer-type value as the enum value type, it will have loaded the value type.
MetaSig ms(methToCall);
CorElementType argType;
while ((argType = ms.NextArg()) != ELEMENT_TYPE_END)
{
if (argType == ELEMENT_TYPE_VALUETYPE)
{
TypeHandle th = ms.GetLastTypeHandleThrowing(ClassLoader::LoadTypes);
CONSISTENCY_CHECK(th.CheckFullyLoaded());
CONSISTENCY_CHECK(th.IsRestored_NoLogging());
}
}
}
#endif
// CYCLE PROFILE: BEFORE ARG PROCESSING.
if (sigInfo.hasThis())
{
if (m_callThisArg != NULL)
{
if (size_t(m_callThisArg) == 0x1)
{
args[curArgSlot] = NULL;
}
else
{
args[curArgSlot] = PtrToArgSlot(m_callThisArg);
}
argTypes[curArgSlot] = InterpreterType(CORINFO_TYPE_BYREF);
}
else
{
args[curArgSlot] = PtrToArgSlot(OpStackGet<void*>(argsBase + arg));
argTypes[curArgSlot] = OpStackTypeGet(argsBase + arg);
arg++;
}
// AV -> NullRef translation is NYI for the interpreter,
// so we should manually check and throw the correct exception.
if (args[curArgSlot] == NULL)
{
// If we're calling a constructor, we bypass this check since the runtime
// should have thrown OOM if it was unable to allocate an instance.
if (m_callThisArg == NULL)
{
assert(!methToCall->IsStatic());
ThrowNullPointerException();
}
// ...except in the case of strings, which are both
// allocated and initialized by their special constructor.
else
{
assert(methToCall->IsCtor() && methToCall->GetMethodTable()->IsString());
}
}
curArgSlot++;
}
// This is the argument slot that will be used to hold the return value.
ARG_SLOT retVal = 0;
#if !defined(_ARM_) && !defined(UNIX_AMD64_ABI)
_ASSERTE (NUMBER_RETURNVALUE_SLOTS == 1);
#endif
// If the return type is a structure, then these will be initialized.
CORINFO_CLASS_HANDLE retTypeClsHnd = NULL;
InterpreterType retTypeIt;
size_t retTypeSz = 0;
// If non-null, space allocated to hold a large struct return value. Should be deleted later.
// (I could probably optimize this pop all the arguments first, then allocate space for the return value
// on the large structure operand stack, and pass a pointer directly to that space, avoiding the extra
// copy we have below. But this seemed more expedient, and this should be a pretty rare case.)
BYTE* pLargeStructRetVal = NULL;
// If there's a "GetFlag<Flag_hasRetBuffArg>()" struct return value, it will be stored in this variable if it fits,
// otherwise, we'll dynamically allocate memory for it.
ARG_SLOT smallStructRetVal = 0;
// We should have no return buffer temp space registered here...unless this is a constructor, in which
// case it will return void. In particular, if the return type VALUE_CLASS, then this should be NULL.
_ASSERTE_MSG((pCscd != NULL) || sigInfo.retType == CORINFO_TYPE_VOID || m_structRetValITPtr == NULL, "Invariant.");
// Is it the return value a struct with a ret buff?
_ASSERTE_MSG(methToCall != NULL, "assumption");
bool hasRetBuffArg = false;
if (sigInfo.retType == CORINFO_TYPE_VALUECLASS || sigInfo.retType == CORINFO_TYPE_REFANY)
{
hasRetBuffArg = !!methToCall->HasRetBuffArg();
retTypeClsHnd = sigInfo.retTypeClass;
MetaSig ms(methToCall);
// On ARM, if there's an HFA return type, we must also allocate a return buffer, since the
// MDCS calling convention requires it.
if (hasRetBuffArg
#if defined(_ARM_)
|| HFAReturnArgSlots > 0
#endif // defined(_ARM_)
)
{
assert(retTypeClsHnd != NULL);
retTypeIt = InterpreterType(&m_interpCeeInfo, retTypeClsHnd);
retTypeSz = retTypeIt.Size(&m_interpCeeInfo);
#if defined(_ARM_)
if (HFAReturnArgSlots > 0)
{
args[curArgSlot] = PtrToArgSlot(args - HFAReturnArgSlots);
}
else
#endif // defined(_ARM_)
if (retTypeIt.IsLargeStruct(&m_interpCeeInfo))
{
size_t retBuffSize = retTypeSz;
// If the target architecture can sometimes return a struct in several registers,
// MethodDescCallSite will reserve a return value array big enough to hold the maximum.
// It will then copy *all* of this into the return buffer area we allocate. So make sure
// we allocate at least that much.
#ifdef ENREGISTERED_RETURNTYPE_MAXSIZE
retBuffSize = max(retTypeSz, ENREGISTERED_RETURNTYPE_MAXSIZE);
#endif // ENREGISTERED_RETURNTYPE_MAXSIZE
pLargeStructRetVal = (BYTE*)_alloca(retBuffSize);
// Clear this in case a GC happens.
for (unsigned i = 0; i < retTypeSz; i++) pLargeStructRetVal[i] = 0;
// Register this as location needing GC.
m_structRetValTempSpace = pLargeStructRetVal;
// Set it as the return buffer.
args[curArgSlot] = PtrToArgSlot(pLargeStructRetVal);
}
else
{
// Clear this in case a GC happens.
smallStructRetVal = 0;
// Register this as location needing GC.
m_structRetValTempSpace = &smallStructRetVal;
// Set it as the return buffer.
args[curArgSlot] = PtrToArgSlot(&smallStructRetVal);
}
m_structRetValITPtr = &retTypeIt;
argTypes[curArgSlot] = InterpreterType(CORINFO_TYPE_NATIVEINT);
curArgSlot++;
}
else
{
// The struct type might "normalize" to a primitive type.
if (retTypeClsHnd == NULL)
{
retTypeIt = InterpreterType(CEEInfo::asCorInfoType(ms.GetReturnTypeNormalized()));
}
else
{
retTypeIt = InterpreterType(&m_interpCeeInfo, retTypeClsHnd);
}
}
}
if (((sigInfo.callConv & CORINFO_CALLCONV_VARARG) != 0) && sigInfo.isVarArg())
{
assert(vaSigCookie != nullptr);
args[curArgSlot] = PtrToArgSlot(vaSigCookie);
argTypes[curArgSlot] = InterpreterType(CORINFO_TYPE_NATIVEINT);
curArgSlot++;
}
if (pCscd == NULL)
{
if (sigInfo.hasTypeArg())
{
GCX_PREEMP();
// We will find the instantiating stub for the method, and call that instead.
CORINFO_SIG_INFO sigInfoFull;
Instantiation methodInst = methToCall->GetMethodInstantiation();
BOOL fNeedUnboxingStub = virtualCall && TypeHandle(exactClass).IsValueType() && methToCall->IsVirtual();
methToCall = MethodDesc::FindOrCreateAssociatedMethodDesc(methToCall,
TypeHandle(exactClass).GetMethodTable(), fNeedUnboxingStub, methodInst, FALSE, TRUE);
m_interpCeeInfo.getMethodSig(CORINFO_METHOD_HANDLE(methToCall), &sigInfoFull);
sigInfo.retTypeClass = sigInfoFull.retTypeClass;
sigInfo.numArgs = sigInfoFull.numArgs;
sigInfo.callConv = sigInfoFull.callConv;
sigInfo.retType = sigInfoFull.retType;
}
if (sigInfo.hasTypeArg())
{
// If we still have a type argument, we're calling an ArrayOpStub and need to pass the array TypeHandle.
assert(methToCall->IsArray());
doNotCache = true;
args[curArgSlot] = PtrToArgSlot(exactClass);
argTypes[curArgSlot] = InterpreterType(CORINFO_TYPE_NATIVEINT);
curArgSlot++;
}
}
// Now we do the non-this arguments.
size_t largeStructSpaceToPop = 0;
for (; arg < totalArgsOnILStack; arg++)
{
InterpreterType argIt = OpStackTypeGet(argsBase + arg);
size_t sz = OpStackTypeGet(argsBase + arg).Size(&m_interpCeeInfo);
switch (sz)
{
case 1:
args[curArgSlot] = OpStackGet<INT8>(argsBase + arg);
break;
case 2:
args[curArgSlot] = OpStackGet<INT16>(argsBase + arg);
break;
case 4:
args[curArgSlot] = OpStackGet<INT32>(argsBase + arg);
break;
case 8:
default:
if (sz > 8)
{
void* srcPtr = OpStackGet<void*>(argsBase + arg);
args[curArgSlot] = PtrToArgSlot(srcPtr);
if (!IsInLargeStructLocalArea(srcPtr))
largeStructSpaceToPop += sz;
}
else
{
args[curArgSlot] = OpStackGet<INT64>(argsBase + arg);
}
break;
}
argTypes[curArgSlot] = argIt;
curArgSlot++;
}
if (ctorData.pArg3)
{
args[curArgSlot] = PtrToArgSlot(ctorData.pArg3);
argTypes[curArgSlot] = InterpreterType(CORINFO_TYPE_NATIVEINT);
curArgSlot++;
}
if (ctorData.pArg4)
{
args[curArgSlot] = PtrToArgSlot(ctorData.pArg4);
argTypes[curArgSlot] = InterpreterType(CORINFO_TYPE_NATIVEINT);
curArgSlot++;
}
if (ctorData.pArg5)
{
args[curArgSlot] = PtrToArgSlot(ctorData.pArg5);
argTypes[curArgSlot] = InterpreterType(CORINFO_TYPE_NATIVEINT);
curArgSlot++;
}
// CYCLE PROFILE: AFTER ARG PROCESSING.
{
Thread* thr = GetThread();
Object** thisArgHnd = NULL;
ARG_SLOT nullThisArg = NULL;
if (sigInfo.hasThis())
{
if (m_callThisArg != NULL)
{
if (size_t(m_callThisArg) == 0x1)
{
thisArgHnd = reinterpret_cast<Object**>(&nullThisArg);
}
else
{
thisArgHnd = reinterpret_cast<Object**>(&m_callThisArg);
}
}
else
{
thisArgHnd = OpStackGetAddr<Object*>(argsBase);
}
}
Frame* topFrameBefore = thr->GetFrame();
#if INTERP_ILCYCLE_PROFILE
unsigned __int64 startCycles;
#endif // INTERP_ILCYCLE_PROFILE
// CYCLE PROFILE: BEFORE MDCS CREATION.
PCODE target = NULL;
MethodDesc *exactMethToCall = methToCall;
// Determine the target of virtual calls.
if (virtualCall && methToCall->IsVtableMethod())
{
PCODE pCode;
assert(thisArgHnd != NULL);
OBJECTREF objRef = ObjectToOBJECTREF(*thisArgHnd);
GCPROTECT_BEGIN(objRef);
pCode = methToCall->GetMultiCallableAddrOfVirtualizedCode(&objRef, methToCall->GetMethodTable());
GCPROTECT_END();
exactMethToCall = Entry2MethodDesc(pCode, objRef->GetTrueMethodTable());
}
// Compile the target in advance of calling.
if (exactMethToCall->IsPointingToPrestub())
{
MethodTable* dispatchingMT = NULL;
if (exactMethToCall->IsVtableMethod())
{
assert(thisArgHnd != NULL);
dispatchingMT = (*thisArgHnd)->GetMethodTable();
}
GCX_PREEMP();
target = exactMethToCall->DoPrestub(dispatchingMT);
}
else
{
target = exactMethToCall->GetMethodEntryPoint();
}
// If we're interpreting the method, simply call it directly.
if (InterpretationStubToMethodInfo(target) == exactMethToCall)
{
assert(!exactMethToCall->IsILStub());
InterpreterMethodInfo* methInfo = MethodHandleToInterpreterMethInfoPtr(CORINFO_METHOD_HANDLE(exactMethToCall));
assert(methInfo != NULL);
#if INTERP_ILCYCLE_PROFILE
bool b = CycleTimer::GetThreadCyclesS(&startCycles); assert(b);
#endif // INTERP_ILCYCLE_PROFILE
retVal = InterpretMethodBody(methInfo, true, reinterpret_cast<BYTE*>(args), NULL);
pCscd = NULL; // Nothing to cache.
}
else
{
MetaSig msig(exactMethToCall);
// We've already resolved the virtual call target above, so there is no need to do it again.
MethodDescCallSite mdcs(exactMethToCall, &msig, target);
#if INTERP_ILCYCLE_PROFILE
bool b = CycleTimer::GetThreadCyclesS(&startCycles); assert(b);
#endif // INTERP_ILCYCLE_PROFILE
mdcs.CallTargetWorker(args, &retVal, sizeof(retVal));
if (pCscd != NULL)
{
// We will do a check at the end to determine whether to cache pCscd, to set
// to NULL here to make sure we don't.
pCscd = NULL;
}
else
{
// For now, we won't cache virtual calls to virtual methods.
// TODO: fix this somehow.
if (virtualCall && (callInfoPtr->methodFlags & CORINFO_FLG_VIRTUAL)) doNotCache = true;
if (s_InterpreterUseCaching && !doNotCache)
{
// We will add this to the cache later; the locking provokes a GC,
// and "retVal" is vulnerable.
pCscd = new CallSiteCacheData(exactMethToCall, sigInfo);
}
}
}
#if INTERP_ILCYCLE_PROFILE
unsigned __int64 endCycles;
bool b = CycleTimer::GetThreadCyclesS(&endCycles); assert(b);
m_exemptCycles += (endCycles - startCycles);
#endif // INTERP_ILCYCLE_PROFILE
// retVal is now vulnerable.
GCX_FORBID();
// Some managed methods, believe it or not, can push capital-F Frames on the Frame chain.
// If this happens, executing the EX_CATCH below will pop it, which is bad.
// So detect that case, pop the explicitly-pushed frame, and push it again after the EX_CATCH.
// (Asserting that there is only 1 such frame!)
if (thr->GetFrame() != topFrameBefore)
{
ilPushedFrame = thr->GetFrame();
if (ilPushedFrame != NULL)
{
ilPushedFrame->Pop(thr);
if (thr->GetFrame() != topFrameBefore)
{
// This wasn't an IL-pushed frame, so restore.
ilPushedFrame->Push(thr);
ilPushedFrame = NULL;
}
}
}
}
// retVal is still vulnerable.
{
GCX_FORBID();
m_argsSize = 0;
// At this point, the call has happened successfully. We can delete the arguments from the operand stack.
m_curStackHt -= totalArgsOnILStack;
// We've already checked that "largeStructSpaceToPop
LargeStructOperandStackPop(largeStructSpaceToPop, NULL);
if (size_t(m_callThisArg) == 0x1)
{
_ASSERTE_MSG(sigInfo.retType == CORINFO_TYPE_VOID, "Constructor for var-sized object becomes factory method that returns result.");
OpStackSet<Object*>(m_curStackHt, reinterpret_cast<Object*>(retVal));
OpStackTypeSet(m_curStackHt, InterpreterType(CORINFO_TYPE_CLASS));
m_curStackHt++;
}
else if (sigInfo.retType != CORINFO_TYPE_VOID)
{
switch (sigInfo.retType)
{
case CORINFO_TYPE_BOOL:
case CORINFO_TYPE_BYTE:
OpStackSet<INT32>(m_curStackHt, static_cast<INT8>(retVal));
break;
case CORINFO_TYPE_UBYTE:
OpStackSet<UINT32>(m_curStackHt, static_cast<UINT8>(retVal));
break;
case CORINFO_TYPE_SHORT:
OpStackSet<INT32>(m_curStackHt, static_cast<INT16>(retVal));
break;
case CORINFO_TYPE_USHORT:
case CORINFO_TYPE_CHAR:
OpStackSet<UINT32>(m_curStackHt, static_cast<UINT16>(retVal));
break;
case CORINFO_TYPE_INT:
case CORINFO_TYPE_UINT:
case CORINFO_TYPE_FLOAT:
OpStackSet<INT32>(m_curStackHt, static_cast<INT32>(retVal));
break;
case CORINFO_TYPE_LONG:
case CORINFO_TYPE_ULONG:
case CORINFO_TYPE_DOUBLE:
OpStackSet<INT64>(m_curStackHt, static_cast<INT64>(retVal));
break;
case CORINFO_TYPE_NATIVEINT:
case CORINFO_TYPE_NATIVEUINT:
case CORINFO_TYPE_PTR:
OpStackSet<NativeInt>(m_curStackHt, static_cast<NativeInt>(retVal));
break;
case CORINFO_TYPE_CLASS:
OpStackSet<Object*>(m_curStackHt, reinterpret_cast<Object*>(retVal));
break;
case CORINFO_TYPE_BYREF:
OpStackSet<void*>(m_curStackHt, reinterpret_cast<void*>(retVal));
break;
case CORINFO_TYPE_VALUECLASS:
case CORINFO_TYPE_REFANY:
{
// We must be careful here to write the value, the type, and update the stack height in one
// sequence that has no COOP->PREEMP transitions in it, so no GC's happen until the value
// is protected by being fully "on" the operandStack.
#if defined(_ARM_)
// Is the return type an HFA?
if (HFAReturnArgSlots > 0)
{
ARG_SLOT* hfaRetBuff = args - HFAReturnArgSlots;
if (retTypeIt.IsLargeStruct(&m_interpCeeInfo))
{
void* dst = LargeStructOperandStackPush(retTypeSz);
memcpy(dst, hfaRetBuff, retTypeSz);
OpStackSet<void*>(m_curStackHt, dst);
}
else
{
memcpy(OpStackGetAddr<UINT64>(m_curStackHt), hfaRetBuff, retTypeSz);
}
}
else
#endif // defined(_ARM_)
if (pLargeStructRetVal != NULL)
{
assert(hasRetBuffArg);
void* dst = LargeStructOperandStackPush(retTypeSz);
CopyValueClassUnchecked(dst, pLargeStructRetVal, GetMethodTableFromClsHnd(retTypeClsHnd));
OpStackSet<void*>(m_curStackHt, dst);
}
else if (hasRetBuffArg)
{
OpStackSet<INT64>(m_curStackHt, GetSmallStructValue(&smallStructRetVal, retTypeSz));
}
else
{
OpStackSet<UINT64>(m_curStackHt, retVal);
}
// We already created this interpreter type, so use it.
OpStackTypeSet(m_curStackHt, retTypeIt.StackNormalize());
m_curStackHt++;
// In the value-class case, the call might have used a ret buff, which we would have registered for GC scanning.
// Make sure it's unregistered.
m_structRetValITPtr = NULL;
}
break;
default:
NYI_INTERP("Unhandled return type");
break;
}
_ASSERTE_MSG(m_structRetValITPtr == NULL, "Invariant.");
// The valueclass case is handled fully in the switch above.
if (sigInfo.retType != CORINFO_TYPE_VALUECLASS &&
sigInfo.retType != CORINFO_TYPE_REFANY)
{
OpStackTypeSet(m_curStackHt, InterpreterType(sigInfo.retType).StackNormalize());
m_curStackHt++;
}
}
}
// Originally, this assertion was in the ValueClass case above, but it does a COOP->PREEMP
// transition, and therefore causes a GC, and we're GCX_FORBIDden from doing a GC while retVal
// is vulnerable. So, for completeness, do it here.
assert(sigInfo.retType != CORINFO_TYPE_VALUECLASS || retTypeIt == InterpreterType(&m_interpCeeInfo, retTypeClsHnd));
// If we created a cached call site, cache it now (when it's safe to take a GC).
if (pCscd != NULL && !doNotCache)
{
CacheCallInfo(iloffset, pCscd);
}
m_callThisArg = NULL;
// If the call we just made pushed a Frame, we popped it above, so re-push it.
if (ilPushedFrame != NULL) ilPushedFrame->Push();
}
#include "metadata.h"
void Interpreter::CallI()
{
#if INTERP_DYNAMIC_CONTRACTS
CONTRACTL {
SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
} CONTRACTL_END;
#else
// Dynamic contract occupies too much stack.
STATIC_CONTRACT_SO_TOLERANT;
STATIC_CONTRACT_THROWS;
STATIC_CONTRACT_GC_TRIGGERS;
STATIC_CONTRACT_MODE_COOPERATIVE;
#endif
#if INTERP_TRACING
InterlockedIncrement(&s_totalInterpCalls);
#endif // INTERP_TRACING
unsigned tok = getU4LittleEndian(m_ILCodePtr + sizeof(BYTE));
CORINFO_SIG_INFO sigInfo;
{
GCX_PREEMP();
m_interpCeeInfo.findSig(m_methInfo->m_module, tok, GetPreciseGenericsContext(), &sigInfo);
}
// I'm assuming that a calli can't depend on the generics context, so the simple form of type
// context should suffice?
MethodDesc* pMD = reinterpret_cast<MethodDesc*>(m_methInfo->m_method);
SigTypeContext sigTypeCtxt(pMD);
MetaSig mSig(sigInfo.pSig, sigInfo.cbSig, GetModule(sigInfo.scope), &sigTypeCtxt);
unsigned totalSigArgs = sigInfo.totalILArgs();
// Note that "totalNativeArgs()" includes space for ret buff arg.
unsigned nSlots = totalSigArgs + 1;
if ((sigInfo.callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG)
{
nSlots++;
}
// Make sure that the operand stack has the required number of arguments.
// (Note that this is IL args, not native.)
//
// The total number of arguments on the IL stack. Initially we assume that all the IL arguments
// the callee expects are on the stack, but may be adjusted downwards if the "this" argument
// is provided by an allocation (the call is to a constructor).
unsigned totalArgsOnILStack = totalSigArgs;
const unsigned LOCAL_ARG_SLOTS = 8;
ARG_SLOT localArgs[LOCAL_ARG_SLOTS];
InterpreterType localArgTypes[LOCAL_ARG_SLOTS];
ARG_SLOT* args;
InterpreterType* argTypes;
if (nSlots <= LOCAL_ARG_SLOTS)
{
args = &localArgs[0];
argTypes = &localArgTypes[0];
}
else
{
args = (ARG_SLOT*)_alloca(nSlots * sizeof(ARG_SLOT));
argTypes = (InterpreterType*)_alloca(nSlots * sizeof(InterpreterType));
}
// Make sure that we don't scan any of these until we overwrite them with
// the real types of the arguments.
InterpreterType undefIt(CORINFO_TYPE_UNDEF);
for (unsigned i = 0; i < nSlots; i++)
{
argTypes[i] = undefIt;
}
// GC-protect the argument array (as byrefs).
m_args = args;
m_argsSize = nSlots;
m_argTypes = argTypes;
// This is the index into the "args" array (where we copy the value to).
int curArgSlot = 0;
// The operand stack index of the first IL argument.
unsigned totalArgPositions = totalArgsOnILStack + 1; // + 1 for the ftn argument.
assert(m_curStackHt >= totalArgPositions);
int argsBase = m_curStackHt - totalArgPositions;
// Current on-stack argument index.
unsigned arg = 0;
if (sigInfo.hasThis())
{
args[curArgSlot] = PtrToArgSlot(OpStackGet<void*>(argsBase + arg));
argTypes[curArgSlot] = OpStackTypeGet(argsBase + arg);
// AV -> NullRef translation is NYI for the interpreter,
// so we should manually check and throw the correct exception.
ThrowOnInvalidPointer((void*)args[curArgSlot]);
arg++;
curArgSlot++;
}
// This is the argument slot that will be used to hold the return value.
ARG_SLOT retVal = 0;
// If the return type is a structure, then these will be initialized.
CORINFO_CLASS_HANDLE retTypeClsHnd = NULL;
InterpreterType retTypeIt;
size_t retTypeSz = 0;
// If non-null, space allocated to hold a large struct return value. Should be deleted later.
// (I could probably optimize this pop all the arguments first, then allocate space for the return value
// on the large structure operand stack, and pass a pointer directly to that space, avoiding the extra
// copy we have below. But this seemed more expedient, and this should be a pretty rare case.)
BYTE* pLargeStructRetVal = NULL;
// If there's a "GetFlag<Flag_hasRetBuffArg>()" struct return value, it will be stored in this variable if it fits,
// otherwise, we'll dynamically allocate memory for it.
ARG_SLOT smallStructRetVal = 0;
// We should have no return buffer temp space registered here...unless this is a constructor, in which
// case it will return void. In particular, if the return type VALUE_CLASS, then this should be NULL.
_ASSERTE_MSG(sigInfo.retType == CORINFO_TYPE_VOID || m_structRetValITPtr == NULL, "Invariant.");
// Is it the return value a struct with a ret buff?
bool hasRetBuffArg = false;
if (sigInfo.retType == CORINFO_TYPE_VALUECLASS)
{
retTypeClsHnd = sigInfo.retTypeClass;
retTypeIt = InterpreterType(&m_interpCeeInfo, retTypeClsHnd);
retTypeSz = retTypeIt.Size(&m_interpCeeInfo);
#if defined(_AMD64_)
// TODO: Investigate why HasRetBuffArg can't be used. pMD is a hacked up MD for the
// calli because it belongs to the current method. Doing what the JIT does.
hasRetBuffArg = (retTypeSz > sizeof(void*)) || ((retTypeSz & (retTypeSz - 1)) != 0);
#else
hasRetBuffArg = !!pMD->HasRetBuffArg();
#endif
if (hasRetBuffArg)
{
if (retTypeIt.IsLargeStruct(&m_interpCeeInfo))
{
size_t retBuffSize = retTypeSz;
// If the target architecture can sometimes return a struct in several registers,
// MethodDescCallSite will reserve a return value array big enough to hold the maximum.
// It will then copy *all* of this into the return buffer area we allocate. So make sure
// we allocate at least that much.
#ifdef ENREGISTERED_RETURNTYPE_MAXSIZE
retBuffSize = max(retTypeSz, ENREGISTERED_RETURNTYPE_MAXSIZE);
#endif // ENREGISTERED_RETURNTYPE_MAXSIZE
pLargeStructRetVal = (BYTE*)_alloca(retBuffSize);
// Clear this in case a GC happens.
for (unsigned i = 0; i < retTypeSz; i++)
{
pLargeStructRetVal[i] = 0;
}
// Register this as location needing GC.
m_structRetValTempSpace = pLargeStructRetVal;
// Set it as the return buffer.
args[curArgSlot] = PtrToArgSlot(pLargeStructRetVal);
}
else
{
// Clear this in case a GC happens.
smallStructRetVal = 0;
// Register this as location needing GC.
m_structRetValTempSpace = &smallStructRetVal;
// Set it as the return buffer.
args[curArgSlot] = PtrToArgSlot(&smallStructRetVal);
}
m_structRetValITPtr = &retTypeIt;
argTypes[curArgSlot] = InterpreterType(CORINFO_TYPE_NATIVEINT);
curArgSlot++;
}
}
if ((sigInfo.callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG)
{
Module* module = GetModule(sigInfo.scope);
CORINFO_VARARGS_HANDLE handle = CORINFO_VARARGS_HANDLE(module->GetVASigCookie(Signature(sigInfo.pSig, sigInfo.cbSig)));
args[curArgSlot] = PtrToArgSlot(handle);
argTypes[curArgSlot] = InterpreterType(CORINFO_TYPE_NATIVEINT);
curArgSlot++;
}
// Now we do the non-this arguments.
size_t largeStructSpaceToPop = 0;
for (; arg < totalArgsOnILStack; arg++)
{
InterpreterType argIt = OpStackTypeGet(argsBase + arg);
size_t sz = OpStackTypeGet(argsBase + arg).Size(&m_interpCeeInfo);
switch (sz)
{
case 1:
args[curArgSlot] = OpStackGet<INT8>(argsBase + arg);
break;
case 2:
args[curArgSlot] = OpStackGet<INT16>(argsBase + arg);
break;
case 4:
args[curArgSlot] = OpStackGet<INT32>(argsBase + arg);
break;
case 8:
default:
if (sz > 8)
{
void* srcPtr = OpStackGet<void*>(argsBase + arg);
args[curArgSlot] = PtrToArgSlot(srcPtr);
if (!IsInLargeStructLocalArea(srcPtr))
{
largeStructSpaceToPop += sz;
}
}
else
{
args[curArgSlot] = OpStackGet<INT64>(argsBase + arg);
}
break;
}
argTypes[curArgSlot] = argIt;
curArgSlot++;
}
// Finally, we get the code pointer.
unsigned ftnInd = m_curStackHt - 1;
#ifdef _DEBUG
CorInfoType ftnType = OpStackTypeGet(ftnInd).ToCorInfoType();
assert(ftnType == CORINFO_TYPE_NATIVEINT
|| ftnType == CORINFO_TYPE_INT
|| ftnType == CORINFO_TYPE_LONG);
#endif // DEBUG
PCODE ftnPtr = OpStackGet<PCODE>(ftnInd);
{
MethodDesc* methToCall;
// If we're interpreting the target, simply call it directly.
if ((methToCall = InterpretationStubToMethodInfo((PCODE)ftnPtr)) != NULL)
{
InterpreterMethodInfo* methInfo = MethodHandleToInterpreterMethInfoPtr(CORINFO_METHOD_HANDLE(methToCall));
assert(methInfo != NULL);
#if INTERP_ILCYCLE_PROFILE
bool b = CycleTimer::GetThreadCyclesS(&startCycles); assert(b);
#endif // INTERP_ILCYCLE_PROFILE
retVal = InterpretMethodBody(methInfo, true, reinterpret_cast<BYTE*>(args), NULL);
}
else
{
// This is not a great workaround. For the most part, we really don't care what method desc we're using, since
// we're providing the signature and function pointer -- other than that it's well-formed and "activated."
// And also, one more thing: whether it is static or not. Which is actually determined by the signature.
// So we query the signature we have to determine whether we need a static or instance MethodDesc, and then
// use one of the appropriate staticness that happens to be sitting around in global variables. For static
// we use "RuntimeHelpers.PrepareConstrainedRegions", for instance we use the default constructor of "Object."
// TODO: make this cleaner -- maybe invent a couple of empty methods with instructive names, just for this purpose.
MethodDesc* pMD;
if (mSig.HasThis())
{
pMD = g_pObjectCtorMD;
}
else
{
pMD = g_pExecuteBackoutCodeHelperMethod; // A random static method.
}
MethodDescCallSite mdcs(pMD, &mSig, ftnPtr);
#if 0
// If the current method being interpreted is an IL stub, we're calling native code, so
// change the GC mode. (We'll only do this at the call if the calling convention turns out
// to be a managed calling convention.)
MethodDesc* pStubContextMD = reinterpret_cast<MethodDesc*>(m_stubContext);
bool transitionToPreemptive = (pStubContextMD != NULL && !pStubContextMD->IsIL());
mdcs.CallTargetWorker(args, &retVal, sizeof(retVal), transitionToPreemptive);
#else
// TODO The code above triggers assertion at threads.cpp:6861:
// _ASSERTE(thread->PreemptiveGCDisabled()); // Should have been in managed code
// The workaround will likely break more things than what it is fixing:
// just do not make transition to preemptive GC for now.
mdcs.CallTargetWorker(args, &retVal, sizeof(retVal));
#endif
}
// retVal is now vulnerable.
GCX_FORBID();
}
// retVal is still vulnerable.
{
GCX_FORBID();
m_argsSize = 0;
// At this point, the call has happened successfully. We can delete the arguments from the operand stack.
m_curStackHt -= totalArgPositions;
// We've already checked that "largeStructSpaceToPop
LargeStructOperandStackPop(largeStructSpaceToPop, NULL);
if (size_t(m_callThisArg) == 0x1)
{
_ASSERTE_MSG(sigInfo.retType == CORINFO_TYPE_VOID, "Constructor for var-sized object becomes factory method that returns result.");
OpStackSet<Object*>(m_curStackHt, reinterpret_cast<Object*>(retVal));
OpStackTypeSet(m_curStackHt, InterpreterType(CORINFO_TYPE_CLASS));
m_curStackHt++;
}
else if (sigInfo.retType != CORINFO_TYPE_VOID)
{
switch (sigInfo.retType)
{
case CORINFO_TYPE_BOOL:
case CORINFO_TYPE_BYTE:
OpStackSet<INT32>(m_curStackHt, static_cast<INT8>(retVal));
break;
case CORINFO_TYPE_UBYTE:
OpStackSet<UINT32>(m_curStackHt, static_cast<UINT8>(retVal));
break;
case CORINFO_TYPE_SHORT:
OpStackSet<INT32>(m_curStackHt, static_cast<INT16>(retVal));
break;
case CORINFO_TYPE_USHORT:
case CORINFO_TYPE_CHAR:
OpStackSet<UINT32>(m_curStackHt, static_cast<UINT16>(retVal));
break;
case CORINFO_TYPE_INT:
case CORINFO_TYPE_UINT:
case CORINFO_TYPE_FLOAT:
OpStackSet<INT32>(m_curStackHt, static_cast<INT32>(retVal));
break;
case CORINFO_TYPE_LONG:
case CORINFO_TYPE_ULONG:
case CORINFO_TYPE_DOUBLE:
OpStackSet<INT64>(m_curStackHt, static_cast<INT64>(retVal));
break;
case CORINFO_TYPE_NATIVEINT:
case CORINFO_TYPE_NATIVEUINT:
case CORINFO_TYPE_PTR:
OpStackSet<NativeInt>(m_curStackHt, static_cast<NativeInt>(retVal));
break;
case CORINFO_TYPE_CLASS:
OpStackSet<Object*>(m_curStackHt, reinterpret_cast<Object*>(retVal));
break;
case CORINFO_TYPE_VALUECLASS:
{
// We must be careful here to write the value, the type, and update the stack height in one
// sequence that has no COOP->PREEMP transitions in it, so no GC's happen until the value
// is protected by being fully "on" the operandStack.
if (pLargeStructRetVal != NULL)
{
assert(hasRetBuffArg);
void* dst = LargeStructOperandStackPush(retTypeSz);
CopyValueClassUnchecked(dst, pLargeStructRetVal, GetMethodTableFromClsHnd(retTypeClsHnd));
OpStackSet<void*>(m_curStackHt, dst);
}
else if (hasRetBuffArg)
{
OpStackSet<INT64>(m_curStackHt, GetSmallStructValue(&smallStructRetVal, retTypeSz));
}
else
{
OpStackSet<UINT64>(m_curStackHt, retVal);
}
// We already created this interpreter type, so use it.
OpStackTypeSet(m_curStackHt, retTypeIt.StackNormalize());
m_curStackHt++;
// In the value-class case, the call might have used a ret buff, which we would have registered for GC scanning.
// Make sure it's unregistered.
m_structRetValITPtr = NULL;
}
break;
default:
NYI_INTERP("Unhandled return type");
break;
}
_ASSERTE_MSG(m_structRetValITPtr == NULL, "Invariant.");
// The valueclass case is handled fully in the switch above.
if (sigInfo.retType != CORINFO_TYPE_VALUECLASS)
{
OpStackTypeSet(m_curStackHt, InterpreterType(sigInfo.retType).StackNormalize());
m_curStackHt++;
}
}
}
// Originally, this assertion was in the ValueClass case above, but it does a COOP->PREEMP
// transition, and therefore causes a GC, and we're GCX_FORBIDden from doing a GC while retVal
// is vulnerable. So, for completeness, do it here.
assert(sigInfo.retType != CORINFO_TYPE_VALUECLASS || retTypeIt == InterpreterType(&m_interpCeeInfo, retTypeClsHnd));
m_ILCodePtr += 5;
}
// static
bool Interpreter::IsDeadSimpleGetter(CEEInfo* info, MethodDesc* pMD, size_t* offsetOfLd)
{
CONTRACTL {
SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_ANY;
} CONTRACTL_END;
DWORD flags = pMD->GetAttrs();
CORINFO_METHOD_INFO methInfo;
{
GCX_PREEMP();
bool b = info->getMethodInfo(CORINFO_METHOD_HANDLE(pMD), &methInfo);
if (!b) return false;
}
// If the method takes a generic type argument, it's not dead simple...
if (methInfo.args.callConv & CORINFO_CALLCONV_PARAMTYPE) return false;
BYTE* codePtr = methInfo.ILCode;
if (flags & CORINFO_FLG_STATIC)
{
if (methInfo.ILCodeSize != 6)
return false;
if (*codePtr != CEE_LDSFLD)
return false;
assert(ILOffsetOfLdSFldInDeadSimpleStaticGetter == 0);
*offsetOfLd = 0;
codePtr += 5;
return (*codePtr == CEE_RET);
}
else
{
// We handle two forms, one for DBG IL, and one for OPT IL.
bool dbg = false;
if (methInfo.ILCodeSize == 0xc)
dbg = true;
else if (methInfo.ILCodeSize != 7)
return false;
if (dbg)
{
if (*codePtr != CEE_NOP)
return false;
codePtr += 1;
}
if (*codePtr != CEE_LDARG_0)
return false;
codePtr += 1;
if (*codePtr != CEE_LDFLD)
return false;
*offsetOfLd = codePtr - methInfo.ILCode;
assert((dbg && ILOffsetOfLdFldInDeadSimpleInstanceGetterDbg == *offsetOfLd)
|| (!dbg && ILOffsetOfLdFldInDeadSimpleInstanceGetterOpt == *offsetOfLd));
codePtr += 5;
if (dbg)
{
if (*codePtr != CEE_STLOC_0)
return false;
codePtr += 1;
if (*codePtr != CEE_BR)
return false;
if (getU4LittleEndian(codePtr + 1) != 0)
return false;
codePtr += 5;
if (*codePtr != CEE_LDLOC_0)
return false;
}
return (*codePtr == CEE_RET);
}
}
void Interpreter::DoStringLength()
{
CONTRACTL {
SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
} CONTRACTL_END;
assert(m_curStackHt > 0);
unsigned ind = m_curStackHt - 1;
#ifdef _DEBUG
CorInfoType stringCIT = OpStackTypeGet(ind).ToCorInfoType();
if (stringCIT != CORINFO_TYPE_CLASS)
{
VerificationError("StringLength called on non-string.");
}
#endif // _DEBUG
Object* obj = OpStackGet<Object*>(ind);
#ifdef _DEBUG
if (obj->GetMethodTable() != g_pStringClass)
{
VerificationError("StringLength called on non-string.");
}
#endif // _DEBUG
StringObject* str = reinterpret_cast<StringObject*>(obj);
INT32 len = str->GetStringLength();
OpStackSet<INT32>(ind, len);
OpStackTypeSet(ind, InterpreterType(CORINFO_TYPE_INT));
}
void Interpreter::DoStringGetChar()
{
CONTRACTL {
SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
} CONTRACTL_END;
assert(m_curStackHt >= 2);
unsigned strInd = m_curStackHt - 2;
unsigned indexInd = strInd + 1;
#ifdef _DEBUG
CorInfoType stringCIT = OpStackTypeGet(strInd).ToCorInfoType();
if (stringCIT != CORINFO_TYPE_CLASS)
{
VerificationError("StringGetChar called on non-string.");
}
#endif // _DEBUG
Object* obj = OpStackGet<Object*>(strInd);
#ifdef _DEBUG
if (obj->GetMethodTable() != g_pStringClass)
{
VerificationError("StringGetChar called on non-string.");
}
#endif // _DEBUG
StringObject* str = reinterpret_cast<StringObject*>(obj);
#ifdef _DEBUG
CorInfoType indexCIT = OpStackTypeGet(indexInd).ToCorInfoType();
if (indexCIT != CORINFO_TYPE_INT)
{
VerificationError("StringGetChar needs integer index.");
}
#endif // _DEBUG
INT32 ind = OpStackGet<INT32>(indexInd);
if (ind < 0)
ThrowArrayBoundsException();
UINT32 uind = static_cast<UINT32>(ind);
if (uind >= str->GetStringLength())
ThrowArrayBoundsException();
// Otherwise...
GCX_FORBID(); // str is vulnerable.
UINT16* dataPtr = reinterpret_cast<UINT16*>(reinterpret_cast<INT8*>(str) + StringObject::GetBufferOffset());
UINT32 filledChar = dataPtr[ind];
OpStackSet<UINT32>(strInd, filledChar);
OpStackTypeSet(strInd, InterpreterType(CORINFO_TYPE_INT));
m_curStackHt = indexInd;
}
void Interpreter::DoGetTypeFromHandle()
{
CONTRACTL {
SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
} CONTRACTL_END;
assert(m_curStackHt > 0);
unsigned ind = m_curStackHt - 1;
#ifdef _DEBUG
CorInfoType handleCIT = OpStackTypeGet(ind).ToCorInfoType();
if (handleCIT != CORINFO_TYPE_VALUECLASS && handleCIT != CORINFO_TYPE_CLASS)
{
VerificationError("HandleGetTypeFromHandle called on non-RuntimeTypeHandle/non-RuntimeType.");
}
Object* obj = OpStackGet<Object*>(ind);
if (obj->GetMethodTable() != g_pRuntimeTypeClass)
{
VerificationError("HandleGetTypeFromHandle called on non-RuntimeTypeHandle/non-RuntimeType.");
}
#endif // _DEBUG
OpStackTypeSet(ind, InterpreterType(CORINFO_TYPE_CLASS));
}
void Interpreter::RecordConstrainedCall()
{
CONTRACTL {
SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
} CONTRACTL_END;
#if INTERP_TRACING
InterlockedIncrement(&s_tokenResolutionOpportunities[RTK_Constrained]);
#endif // INTERP_TRACING
{
GCX_PREEMP();
ResolveToken(&m_constrainedResolvedToken, getU4LittleEndian(m_ILCodePtr + 2), CORINFO_TOKENKIND_Constrained InterpTracingArg(RTK_Constrained));
}
m_constrainedFlag = true;
m_ILCodePtr += 6;
}
void Interpreter::LargeStructOperandStackEnsureCanPush(size_t sz)
{
size_t remaining = m_largeStructOperandStackAllocSize - m_largeStructOperandStackHt;
if (remaining < sz)
{
size_t newAllocSize = max(m_largeStructOperandStackAllocSize + sz * 4, m_largeStructOperandStackAllocSize * 2);
BYTE* newStack = new BYTE[newAllocSize];
m_largeStructOperandStackAllocSize = newAllocSize;
if (m_largeStructOperandStack != NULL)
{
memcpy(newStack, m_largeStructOperandStack, m_largeStructOperandStackHt);
delete[] m_largeStructOperandStack;
}
m_largeStructOperandStack = newStack;
}
}
void* Interpreter::LargeStructOperandStackPush(size_t sz)
{
LargeStructOperandStackEnsureCanPush(sz);
assert(m_largeStructOperandStackAllocSize >= m_largeStructOperandStackHt + sz);
void* res = &m_largeStructOperandStack[m_largeStructOperandStackHt];
m_largeStructOperandStackHt += sz;
return res;
}
void Interpreter::LargeStructOperandStackPop(size_t sz, void* fromAddr)
{
if (!IsInLargeStructLocalArea(fromAddr))
{
assert(m_largeStructOperandStackHt >= sz);
m_largeStructOperandStackHt -= sz;
}
}
#ifdef _DEBUG
bool Interpreter::LargeStructStackHeightIsValid()
{
size_t sz2 = 0;
for (unsigned k = 0; k < m_curStackHt; k++)
{
if (OpStackTypeGet(k).IsLargeStruct(&m_interpCeeInfo) && !IsInLargeStructLocalArea(OpStackGet<void*>(k)))
{
sz2 += OpStackTypeGet(k).Size(&m_interpCeeInfo);
}
}
assert(sz2 == m_largeStructOperandStackHt);
return sz2 == m_largeStructOperandStackHt;
}
#endif // _DEBUG
void Interpreter::VerificationError(const char* msg)
{
// TODO: Should raise an exception eventually; for now:
const char* const msgPrefix = "Verification Error: ";
size_t len = strlen(msgPrefix) + strlen(msg) + 1;
char* msgFinal = (char*)_alloca(len);
strcpy_s(msgFinal, len, msgPrefix);
strcat_s(msgFinal, len, msg);
_ASSERTE_MSG(false, msgFinal);
}
void Interpreter::ThrowDivideByZero()
{
CONTRACTL {
SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
} CONTRACTL_END;
COMPlusThrow(kDivideByZeroException);
}
void Interpreter::ThrowSysArithException()
{
CONTRACTL {
SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
} CONTRACTL_END;
// According to the ECMA spec, this should be an ArithmeticException; however,
// the JITs throw an OverflowException and consistency is top priority...
COMPlusThrow(kOverflowException);
}
void Interpreter::ThrowNullPointerException()
{
CONTRACTL {
SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
} CONTRACTL_END;
COMPlusThrow(kNullReferenceException);
}
void Interpreter::ThrowOverflowException()
{
CONTRACTL {
SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
} CONTRACTL_END;
COMPlusThrow(kOverflowException);
}
void Interpreter::ThrowArrayBoundsException()
{
CONTRACTL {
SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
} CONTRACTL_END;
COMPlusThrow(kIndexOutOfRangeException);
}
void Interpreter::ThrowInvalidCastException()
{
CONTRACTL {
SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
} CONTRACTL_END;
COMPlusThrow(kInvalidCastException);
}
void Interpreter::ThrowStackOverflow()
{
CONTRACTL {
SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
} CONTRACTL_END;
COMPlusThrow(kStackOverflowException);
}
float Interpreter::RemFunc(float v1, float v2)
{
return fmodf(v1, v2);
}
double Interpreter::RemFunc(double v1, double v2)
{
return fmod(v1, v2);
}
// Static members and methods.
Interpreter::AddrToMDMap* Interpreter::s_addrToMDMap = NULL;
unsigned Interpreter::s_interpreterStubNum = 0;
// TODO: contracts and synchronization for the AddrToMDMap methods.
// Requires caller to hold "s_interpStubToMDMapLock".
Interpreter::AddrToMDMap* Interpreter::GetAddrToMdMap()
{
#if 0
CONTRACTL {
SO_TOLERANT;
THROWS;
GC_NOTRIGGER;
} CONTRACTL_END;
#endif
if (s_addrToMDMap == NULL)
{
s_addrToMDMap = new AddrToMDMap();
}
return s_addrToMDMap;
}
void Interpreter::RecordInterpreterStubForMethodDesc(CORINFO_METHOD_HANDLE md, void* addr)
{
#if 0
CONTRACTL {
SO_TOLERANT;
NOTHROW;
GC_NOTRIGGER;
} CONTRACTL_END;
#endif
CrstHolder ch(&s_interpStubToMDMapLock);
AddrToMDMap* map = Interpreter::GetAddrToMdMap();
#ifdef _DEBUG
CORINFO_METHOD_HANDLE dummy;
assert(!map->Lookup(addr, &dummy));
#endif // DEBUG
map->AddOrReplace(KeyValuePair<void*,CORINFO_METHOD_HANDLE>(addr, md));
}
MethodDesc* Interpreter::InterpretationStubToMethodInfo(PCODE addr)
{
CONTRACTL {
SO_TOLERANT;
NOTHROW;
GC_NOTRIGGER;
} CONTRACTL_END;
// This query function will never allocate the table...
if (s_addrToMDMap == NULL)
return NULL;
// Otherwise...if we observe s_addrToMdMap non-null, the lock below must be initialized.
// CrstHolder ch(&s_interpStubToMDMapLock);
AddrToMDMap* map = Interpreter::GetAddrToMdMap();
CORINFO_METHOD_HANDLE result = NULL;
(void)map->Lookup((void*)addr, &result);
return (MethodDesc*)result;
}
Interpreter::MethodHandleToInterpMethInfoPtrMap* Interpreter::s_methodHandleToInterpMethInfoPtrMap = NULL;
// Requires caller to hold "s_interpStubToMDMapLock".
Interpreter::MethodHandleToInterpMethInfoPtrMap* Interpreter::GetMethodHandleToInterpMethInfoPtrMap()
{
#if 0
CONTRACTL {
SO_TOLERANT;
THROWS;
GC_NOTRIGGER;
} CONTRACTL_END;
#endif
if (s_methodHandleToInterpMethInfoPtrMap == NULL)
{
s_methodHandleToInterpMethInfoPtrMap = new MethodHandleToInterpMethInfoPtrMap();
}
return s_methodHandleToInterpMethInfoPtrMap;
}
InterpreterMethodInfo* Interpreter::RecordInterpreterMethodInfoForMethodHandle(CORINFO_METHOD_HANDLE md, InterpreterMethodInfo* methInfo)
{
#if 0
CONTRACTL {
SO_TOLERANT;
NOTHROW;
GC_NOTRIGGER;
} CONTRACTL_END;
#endif
CrstHolder ch(&s_interpStubToMDMapLock);
MethodHandleToInterpMethInfoPtrMap* map = Interpreter::GetMethodHandleToInterpMethInfoPtrMap();
MethInfo mi;
if (map->Lookup(md, &mi))
{
// If there's already an entry, make sure it was created by another thread -- the same thread shouldn't create two
// of these.
_ASSERTE_MSG(mi.m_thread != GetThread(), "Two InterpMethInfo's for same meth by same thread.");
// If we were creating an interpreter stub at the same time as another thread, and we lost the race to
// insert it, use the already-existing one, and delete this one.
delete methInfo;
return mi.m_info;
}
mi.m_info = methInfo;
#ifdef _DEBUG
mi.m_thread = GetThread();
#endif
_ASSERTE_MSG(map->LookupPtr(md) == NULL, "Multiple InterpMethInfos for method desc.");
map->Add(md, mi);
return methInfo;
}
InterpreterMethodInfo* Interpreter::MethodHandleToInterpreterMethInfoPtr(CORINFO_METHOD_HANDLE md)
{
CONTRACTL {
SO_TOLERANT;
NOTHROW;
GC_TRIGGERS;
} CONTRACTL_END;
// This query function will never allocate the table...
if (s_methodHandleToInterpMethInfoPtrMap == NULL)
return NULL;
// Otherwise...if we observe s_addrToMdMap non-null, the lock below must be initialized.
CrstHolder ch(&s_interpStubToMDMapLock);
MethodHandleToInterpMethInfoPtrMap* map = Interpreter::GetMethodHandleToInterpMethInfoPtrMap();
MethInfo mi;
mi.m_info = NULL;
(void)map->Lookup(md, &mi);
return mi.m_info;
}
#ifndef DACCESS_COMPILE
// Requires that the current thread holds "s_methodCacheLock."
ILOffsetToItemCache* InterpreterMethodInfo::GetCacheForCall(Object* thisArg, void* genericsCtxtArg, bool alloc)
{
// First, does the current method have dynamic generic information, and, if so,
// what kind?
CORINFO_CONTEXT_HANDLE context = GetPreciseGenericsContext(thisArg, genericsCtxtArg);
if (context == MAKE_METHODCONTEXT(m_method))
{
// No dynamic generics context information. The caching field in "m_methInfo" is the
// ILoffset->Item cache directly.
// First, ensure that it's allocated.
if (m_methodCache == NULL && alloc)
{
// Lazy init via compare-exchange.
ILOffsetToItemCache* cache = new ILOffsetToItemCache();
void* prev = InterlockedCompareExchangeT<void*>(&m_methodCache, cache, NULL);
if (prev != NULL) delete cache;
}
return reinterpret_cast<ILOffsetToItemCache*>(m_methodCache);
}
else
{
// Otherwise, it does have generic info, so find the right cache.
// First ensure that the top-level generics-context --> cache cache exists.
GenericContextToInnerCache* outerCache = reinterpret_cast<GenericContextToInnerCache*>(m_methodCache);
if (outerCache == NULL)
{
if (alloc)
{
// Lazy init via compare-exchange.
outerCache = new GenericContextToInnerCache();
void* prev = InterlockedCompareExchangeT<void*>(&m_methodCache, outerCache, NULL);
if (prev != NULL)
{
delete outerCache;
outerCache = reinterpret_cast<GenericContextToInnerCache*>(prev);
}
}
else
{
return NULL;
}
}
// Does the outerCache already have an entry for this instantiation?
ILOffsetToItemCache* innerCache = NULL;
if (!outerCache->GetItem(size_t(context), innerCache) && alloc)
{
innerCache = new ILOffsetToItemCache();
outerCache->AddItem(size_t(context), innerCache);
}
return innerCache;
}
}
void Interpreter::CacheCallInfo(unsigned iloffset, CallSiteCacheData* callInfo)
{
CrstHolder ch(&s_methodCacheLock);
ILOffsetToItemCache* cache = GetThisExecCache(true);
// Insert, but if the item is already there, delete "mdcs" (which would have been owned
// by the cache).
// (Duplicate entries can happen because of recursive calls -- F makes a recursive call to F, and when it
// returns wants to cache it, but the recursive call makes a furher recursive call, and caches that, so the
// first call finds the iloffset already occupied.)
if (!cache->AddItem(iloffset, CachedItem(callInfo)))
{
delete callInfo;
}
}
CallSiteCacheData* Interpreter::GetCachedCallInfo(unsigned iloffset)
{
CrstHolder ch(&s_methodCacheLock);
ILOffsetToItemCache* cache = GetThisExecCache(false);
if (cache == NULL) return NULL;
// Otherwise...
CachedItem item;
if (cache->GetItem(iloffset, item))
{
_ASSERTE_MSG(item.m_tag == CIK_CallSite, "Wrong cached item tag.");
return item.m_value.m_callSiteInfo;
}
else
{
return NULL;
}
}
void Interpreter::CacheInstanceField(unsigned iloffset, FieldDesc* fld)
{
CrstHolder ch(&s_methodCacheLock);
ILOffsetToItemCache* cache = GetThisExecCache(true);
cache->AddItem(iloffset, CachedItem(fld));
}
FieldDesc* Interpreter::GetCachedInstanceField(unsigned iloffset)
{
CrstHolder ch(&s_methodCacheLock);
ILOffsetToItemCache* cache = GetThisExecCache(false);
if (cache == NULL) return NULL;
// Otherwise...
CachedItem item;
if (cache->GetItem(iloffset, item))
{
_ASSERTE_MSG(item.m_tag == CIK_InstanceField, "Wrong cached item tag.");
return item.m_value.m_instanceField;
}
else
{
return NULL;
}
}
void Interpreter::CacheStaticField(unsigned iloffset, StaticFieldCacheEntry* pEntry)
{
CrstHolder ch(&s_methodCacheLock);
ILOffsetToItemCache* cache = GetThisExecCache(true);
// If (say) a concurrent thread has beaten us to this, delete the entry (which otherwise would have
// been owned by the cache).
if (!cache->AddItem(iloffset, CachedItem(pEntry)))
{
delete pEntry;
}
}
StaticFieldCacheEntry* Interpreter::GetCachedStaticField(unsigned iloffset)
{
CrstHolder ch(&s_methodCacheLock);
ILOffsetToItemCache* cache = GetThisExecCache(false);
if (cache == NULL)
return NULL;
// Otherwise...
CachedItem item;
if (cache->GetItem(iloffset, item))
{
_ASSERTE_MSG(item.m_tag == CIK_StaticField, "Wrong cached item tag.");
return item.m_value.m_staticFieldAddr;
}
else
{
return NULL;
}
}
void Interpreter::CacheClassHandle(unsigned iloffset, CORINFO_CLASS_HANDLE clsHnd)
{
CrstHolder ch(&s_methodCacheLock);
ILOffsetToItemCache* cache = GetThisExecCache(true);
cache->AddItem(iloffset, CachedItem(clsHnd));
}
CORINFO_CLASS_HANDLE Interpreter::GetCachedClassHandle(unsigned iloffset)
{
CrstHolder ch(&s_methodCacheLock);
ILOffsetToItemCache* cache = GetThisExecCache(false);
if (cache == NULL)
return NULL;
// Otherwise...
CachedItem item;
if (cache->GetItem(iloffset, item))
{
_ASSERTE_MSG(item.m_tag == CIK_ClassHandle, "Wrong cached item tag.");
return item.m_value.m_clsHnd;
}
else
{
return NULL;
}
}
#endif // DACCESS_COMPILE
// Statics
// Theses are not debug-only.
ConfigMethodSet Interpreter::s_InterpretMeths;
ConfigMethodSet Interpreter::s_InterpretMethsExclude;
ConfigDWORD Interpreter::s_InterpretMethHashMin;
ConfigDWORD Interpreter::s_InterpretMethHashMax;
ConfigDWORD Interpreter::s_InterpreterJITThreshold;
ConfigDWORD Interpreter::s_InterpreterDoLoopMethodsFlag;
ConfigDWORD Interpreter::s_InterpreterUseCachingFlag;
ConfigDWORD Interpreter::s_InterpreterLooseRulesFlag;
bool Interpreter::s_InterpreterDoLoopMethods;
bool Interpreter::s_InterpreterUseCaching;
bool Interpreter::s_InterpreterLooseRules;
CrstExplicitInit Interpreter::s_methodCacheLock;
CrstExplicitInit Interpreter::s_interpStubToMDMapLock;
// The static variables below are debug-only.
#if INTERP_TRACING
LONG Interpreter::s_totalInvocations = 0;
LONG Interpreter::s_totalInterpCalls = 0;
LONG Interpreter::s_totalInterpCallsToGetters = 0;
LONG Interpreter::s_totalInterpCallsToDeadSimpleGetters = 0;
LONG Interpreter::s_totalInterpCallsToDeadSimpleGettersShortCircuited = 0;
LONG Interpreter::s_totalInterpCallsToSetters = 0;
LONG Interpreter::s_totalInterpCallsToIntrinsics = 0;
LONG Interpreter::s_totalInterpCallsToIntrinsicsUnhandled = 0;
LONG Interpreter::s_tokenResolutionOpportunities[RTK_Count] = {0, };
LONG Interpreter::s_tokenResolutionCalls[RTK_Count] = {0, };
const char* Interpreter::s_tokenResolutionKindNames[RTK_Count] =
{
"Undefined",
"Constrained",
"NewObj",
"NewArr",
"LdToken",
"LdFtn",
"LdVirtFtn",
"SFldAddr",
"LdElem",
"Call",
"LdObj",
"StObj",
"CpObj",
"InitObj",
"IsInst",
"CastClass",
"MkRefAny",
"RefAnyVal",
"Sizeof",
"StElem",
"Box",
"Unbox",
"UnboxAny",
"LdFld",
"LdFldA",
"StFld",
"FindClass",
"Exception",
};
FILE* Interpreter::s_InterpreterLogFile = NULL;
ConfigDWORD Interpreter::s_DumpInterpreterStubsFlag;
ConfigDWORD Interpreter::s_TraceInterpreterEntriesFlag;
ConfigDWORD Interpreter::s_TraceInterpreterILFlag;
ConfigDWORD Interpreter::s_TraceInterpreterOstackFlag;
ConfigDWORD Interpreter::s_TraceInterpreterVerboseFlag;
ConfigDWORD Interpreter::s_TraceInterpreterJITTransitionFlag;
ConfigDWORD Interpreter::s_InterpreterStubMin;
ConfigDWORD Interpreter::s_InterpreterStubMax;
#endif // INTERP_TRACING
#if INTERP_ILINSTR_PROFILE
unsigned short Interpreter::s_ILInstrCategories[512];
int Interpreter::s_ILInstrExecs[256] = {0, };
int Interpreter::s_ILInstrExecsByCategory[512] = {0, };
int Interpreter::s_ILInstr2ByteExecs[Interpreter::CountIlInstr2Byte] = {0, };
#if INTERP_ILCYCLE_PROFILE
unsigned __int64 Interpreter::s_ILInstrCycles[512] = { 0, };
unsigned __int64 Interpreter::s_ILInstrCyclesByCategory[512] = { 0, };
// XXX
unsigned __int64 Interpreter::s_callCycles = 0;
unsigned Interpreter::s_calls = 0;
void Interpreter::UpdateCycleCount()
{
unsigned __int64 endCycles;
bool b = CycleTimer::GetThreadCyclesS(&endCycles); assert(b);
if (m_instr != CEE_COUNT)
{
unsigned __int64 delta = (endCycles - m_startCycles);
if (m_exemptCycles > 0)
{
delta = delta - m_exemptCycles;
m_exemptCycles = 0;
}
CycleTimer::InterlockedAddU64(&s_ILInstrCycles[m_instr], delta);
}
// In any case, set the instruction to the current one, and record it's start time.
m_instr = (*m_ILCodePtr);
if (m_instr == CEE_PREFIX1) {
m_instr = *(m_ILCodePtr + 1) + 0x100;
}
b = CycleTimer::GetThreadCyclesS(&m_startCycles); assert(b);
}
#endif // INTERP_ILCYCLE_PROFILE
#endif // INTERP_ILINSTR_PROFILE
#ifdef _DEBUG
InterpreterMethodInfo** Interpreter::s_interpMethInfos = NULL;
unsigned Interpreter::s_interpMethInfosAllocSize = 0;
unsigned Interpreter::s_interpMethInfosCount = 0;
bool Interpreter::TOSIsPtr()
{
if (m_curStackHt == 0)
return false;
return CorInfoTypeIsPointer(OpStackTypeGet(m_curStackHt - 1).ToCorInfoType());
}
#endif // DEBUG
ConfigDWORD Interpreter::s_PrintPostMortemFlag;
// InterpreterCache.
template<typename Key, typename Val>
InterpreterCache<Key,Val>::InterpreterCache() : m_pairs(NULL), m_allocSize(0), m_count(0)
{
#ifdef _DEBUG
AddAllocBytes(sizeof(*this));
#endif
}
#ifdef _DEBUG
// static
static unsigned InterpreterCacheAllocBytes = 0;
const unsigned KBYTE = 1024;
const unsigned MBYTE = KBYTE*KBYTE;
const unsigned InterpreterCacheAllocBytesIncrement = 16*KBYTE;
static unsigned InterpreterCacheAllocBytesNextTarget = InterpreterCacheAllocBytesIncrement;
template<typename Key, typename Val>
void InterpreterCache<Key,Val>::AddAllocBytes(unsigned bytes)
{
// Reinstate this code if you want to track bytes attributable to caching.
#if 0
InterpreterCacheAllocBytes += bytes;
if (InterpreterCacheAllocBytes > InterpreterCacheAllocBytesNextTarget)
{
printf("Total cache alloc = %d bytes.\n", InterpreterCacheAllocBytes);
fflush(stdout);
InterpreterCacheAllocBytesNextTarget += InterpreterCacheAllocBytesIncrement;
}
#endif
}
#endif // _DEBUG
template<typename Key, typename Val>
void InterpreterCache<Key,Val>::EnsureCanInsert()
{
if (m_count < m_allocSize)
return;
// Otherwise, must make room.
if (m_allocSize == 0)
{
assert(m_count == 0);
m_pairs = new KeyValPair[InitSize];
m_allocSize = InitSize;
#ifdef _DEBUG
AddAllocBytes(m_allocSize * sizeof(KeyValPair));
#endif
}
else
{
unsigned short newSize = min(m_allocSize * 2, USHRT_MAX);
KeyValPair* newPairs = new KeyValPair[newSize];
memcpy(newPairs, m_pairs, m_count * sizeof(KeyValPair));
delete[] m_pairs;
m_pairs = newPairs;
#ifdef _DEBUG
AddAllocBytes((newSize - m_allocSize) * sizeof(KeyValPair));
#endif
m_allocSize = newSize;
}
}
template<typename Key, typename Val>
bool InterpreterCache<Key,Val>::AddItem(Key key, Val val)
{
EnsureCanInsert();
// Find the index to insert before.
unsigned firstGreaterOrEqual = 0;
for (; firstGreaterOrEqual < m_count; firstGreaterOrEqual++)
{
if (m_pairs[firstGreaterOrEqual].m_key >= key)
break;
}
if (firstGreaterOrEqual < m_count && m_pairs[firstGreaterOrEqual].m_key == key)
{
assert(m_pairs[firstGreaterOrEqual].m_val == val);
return false;
}
// Move everything starting at firstGreater up one index (if necessary)
if (m_count > 0)
{
for (unsigned k = m_count-1; k >= firstGreaterOrEqual; k--)
{
m_pairs[k + 1] = m_pairs[k];
if (k == 0)
break;
}
}
// Now we can insert the new element.
m_pairs[firstGreaterOrEqual].m_key = key;
m_pairs[firstGreaterOrEqual].m_val = val;
m_count++;
return true;
}
template<typename Key, typename Val>
bool InterpreterCache<Key,Val>::GetItem(Key key, Val& v)
{
unsigned lo = 0;
unsigned hi = m_count;
// Invariant: we've determined that the pair for "iloffset", if present,
// is in the index interval [lo, hi).
while (lo < hi)
{
unsigned mid = (hi + lo)/2;
Key midKey = m_pairs[mid].m_key;
if (key == midKey)
{
v = m_pairs[mid].m_val;
return true;
}
else if (key < midKey)
{
hi = mid;
}
else
{
assert(key > midKey);
lo = mid + 1;
}
}
// If we reach here without returning, it's not here.
return false;
}
// TODO: add a header comment here describing this function.
void Interpreter::OpStackNormalize()
{
size_t largeStructStackOffset = 0;
// Yes, I've written a quadratic algorithm here. I don't think it will matter in practice.
for (unsigned i = 0; i < m_curStackHt; i++)
{
InterpreterType tp = OpStackTypeGet(i);
if (tp.IsLargeStruct(&m_interpCeeInfo))
{
size_t sz = tp.Size(&m_interpCeeInfo);
void* addr = OpStackGet<void*>(i);
if (IsInLargeStructLocalArea(addr))
{
// We're going to allocate space at the top for the new value, then copy everything above the current slot
// up into that new space, then copy the value into the vacated space.
// How much will we have to copy?
size_t toCopy = m_largeStructOperandStackHt - largeStructStackOffset;
// Allocate space for the new value.
void* dummy = LargeStructOperandStackPush(sz);
// Remember where we're going to write to.
BYTE* fromAddr = m_largeStructOperandStack + largeStructStackOffset;
BYTE* toAddr = fromAddr + sz;
memcpy(toAddr, fromAddr, toCopy);
// Now copy the local variable value.
memcpy(fromAddr, addr, sz);
OpStackSet<void*>(i, fromAddr);
}
largeStructStackOffset += sz;
}
}
// When we've normalized the stack, it contains no pointers to locals.
m_orOfPushedInterpreterTypes = 0;
}
#if INTERP_TRACING
// Code copied from eeinterface.cpp in "compiler". Should be common...
static const char* CorInfoTypeNames[] = {
"undef",
"void",
"bool",
"char",
"byte",
"ubyte",
"short",
"ushort",
"int",
"uint",
"long",
"ulong",
"nativeint",
"nativeuint",
"float",
"double",
"string",
"ptr",
"byref",
"valueclass",
"class",
"refany",
"var"
};
const char* eeGetMethodFullName(CEEInfo* info, CORINFO_METHOD_HANDLE hnd, const char** clsName)
{
CONTRACTL {
SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_ANY;
} CONTRACTL_END;
GCX_PREEMP();
const char* returnType = NULL;
const char* className;
const char* methodName = info->getMethodName(hnd, &className);
if (clsName != NULL)
{
*clsName = className;
}
size_t length = 0;
unsigned i;
/* Generating the full signature is a two-pass process. First we have to walk
the components in order to assess the total size, then we allocate the buffer
and copy the elements into it.
*/
/* Right now there is a race-condition in the EE, className can be NULL */
/* initialize length with length of className and '.' */
if (className)
{
length = strlen(className) + 1;
}
else
{
assert(strlen("<NULL>.") == 7);
length = 7;
}
/* add length of methodName and opening bracket */
length += strlen(methodName) + 1;
CORINFO_SIG_INFO sig;
info->getMethodSig(hnd, &sig);
CORINFO_ARG_LIST_HANDLE argLst = sig.args;
CORINFO_CLASS_HANDLE dummyCls;
for (i = 0; i < sig.numArgs; i++)
{
CorInfoType type = strip(info->getArgType(&sig, argLst, &dummyCls));
length += strlen(CorInfoTypeNames[type]);
argLst = info->getArgNext(argLst);
}
/* add ',' if there is more than one argument */
if (sig.numArgs > 1)
{
length += (sig.numArgs - 1);
}
if (sig.retType != CORINFO_TYPE_VOID)
{
returnType = CorInfoTypeNames[sig.retType];
length += strlen(returnType) + 1; // don't forget the delimiter ':'
}
/* add closing bracket and null terminator */
length += 2;
char* retName = new char[length];
/* Now generate the full signature string in the allocated buffer */
if (className)
{
strcpy_s(retName, length, className);
strcat_s(retName, length, ":");
}
else
{
strcpy_s(retName, length, "<NULL>.");
}
strcat_s(retName, length, methodName);
// append the signature
strcat_s(retName, length, "(");
argLst = sig.args;
for (i = 0; i < sig.numArgs; i++)
{
CorInfoType type = strip(info->getArgType(&sig, argLst, &dummyCls));
strcat_s(retName, length, CorInfoTypeNames[type]);
argLst = info->getArgNext(argLst);
if (i + 1 < sig.numArgs)
{
strcat_s(retName, length, ",");
}
}
strcat_s(retName, length, ")");
if (returnType)
{
strcat_s(retName, length, ":");
strcat_s(retName, length, returnType);
}
assert(strlen(retName) == length - 1);
return(retName);
}
const char* Interpreter::eeGetMethodFullName(CORINFO_METHOD_HANDLE hnd)
{
return ::eeGetMethodFullName(&m_interpCeeInfo, hnd);
}
const char* ILOpNames[256*2];
bool ILOpNamesInited = false;
void InitILOpNames()
{
if (!ILOpNamesInited)
{
// Initialize the array.
#define OPDEF(c,s,pop,push,args,type,l,s1,s2,ctrl) if (s1 == 0xfe || s1 == 0xff) { int ind ((unsigned(s1) << 8) + unsigned(s2)); ind -= 0xfe00; ILOpNames[ind] = s; }
#include "opcode.def"
#undef OPDEF
ILOpNamesInited = true;
}
};
const char* Interpreter::ILOp(BYTE* m_ILCodePtr)
{
InitILOpNames();
BYTE b = *m_ILCodePtr;
if (b == 0xfe)
{
return ILOpNames[*(m_ILCodePtr + 1)];
}
else
{
return ILOpNames[(0x1 << 8) + b];
}
}
const char* Interpreter::ILOp1Byte(unsigned short ilInstrVal)
{
InitILOpNames();
return ILOpNames[(0x1 << 8) + ilInstrVal];
}
const char* Interpreter::ILOp2Byte(unsigned short ilInstrVal)
{
InitILOpNames();
return ILOpNames[ilInstrVal];
}
void Interpreter::PrintOStack()
{
if (m_curStackHt == 0)
{
fprintf(GetLogFile(), " <empty>\n");
}
else
{
for (unsigned k = 0; k < m_curStackHt; k++)
{
CorInfoType cit = OpStackTypeGet(k).ToCorInfoType();
assert(IsStackNormalType(cit));
fprintf(GetLogFile(), " %4d: %10s: ", k, CorInfoTypeNames[cit]);
PrintOStackValue(k);
fprintf(GetLogFile(), "\n");
}
}
fflush(GetLogFile());
}
void Interpreter::PrintOStackValue(unsigned index)
{
_ASSERTE_MSG(index < m_curStackHt, "precondition");
InterpreterType it = OpStackTypeGet(index);
if (it.IsLargeStruct(&m_interpCeeInfo))
{
PrintValue(it, OpStackGet<BYTE*>(index));
}
else
{
PrintValue(it, reinterpret_cast<BYTE*>(OpStackGetAddr(index, it.Size(&m_interpCeeInfo))));
}
}
void Interpreter::PrintLocals()
{
if (m_methInfo->m_numLocals == 0)
{
fprintf(GetLogFile(), " <no locals>\n");
}
else
{
for (unsigned i = 0; i < m_methInfo->m_numLocals; i++)
{
InterpreterType it = m_methInfo->m_localDescs[i].m_type;
CorInfoType cit = it.ToCorInfoType();
void* localPtr = NULL;
if (it.IsLargeStruct(&m_interpCeeInfo))
{
void* structPtr = ArgSlotEndianessFixup(reinterpret_cast<ARG_SLOT*>(FixedSizeLocalSlot(i)), sizeof(void**));
localPtr = *reinterpret_cast<void**>(structPtr);
}
else
{
localPtr = ArgSlotEndianessFixup(reinterpret_cast<ARG_SLOT*>(FixedSizeLocalSlot(i)), it.Size(&m_interpCeeInfo));
}
fprintf(GetLogFile(), " loc%-4d: %10s: ", i, CorInfoTypeNames[cit]);
PrintValue(it, reinterpret_cast<BYTE*>(localPtr));
fprintf(GetLogFile(), "\n");
}
}
fflush(GetLogFile());
}
void Interpreter::PrintArgs()
{
for (unsigned k = 0; k < m_methInfo->m_numArgs; k++)
{
CorInfoType cit = GetArgType(k).ToCorInfoType();
fprintf(GetLogFile(), " %4d: %10s: ", k, CorInfoTypeNames[cit]);
PrintArgValue(k);
fprintf(GetLogFile(), "\n");
}
fprintf(GetLogFile(), "\n");
fflush(GetLogFile());
}
void Interpreter::PrintArgValue(unsigned argNum)
{
_ASSERTE_MSG(argNum < m_methInfo->m_numArgs, "precondition");
InterpreterType it = GetArgType(argNum);
PrintValue(it, GetArgAddr(argNum));
}
// Note that this is used to print non-stack-normal values, so
// it must handle all cases.
void Interpreter::PrintValue(InterpreterType it, BYTE* valAddr)
{
switch (it.ToCorInfoType())
{
case CORINFO_TYPE_BOOL:
fprintf(GetLogFile(), "%s", ((*reinterpret_cast<INT8*>(valAddr)) ? "true" : "false"));
break;
case CORINFO_TYPE_BYTE:
fprintf(GetLogFile(), "%d", *reinterpret_cast<INT8*>(valAddr));
break;
case CORINFO_TYPE_UBYTE:
fprintf(GetLogFile(), "%u", *reinterpret_cast<UINT8*>(valAddr));
break;
case CORINFO_TYPE_SHORT:
fprintf(GetLogFile(), "%d", *reinterpret_cast<INT16*>(valAddr));
break;
case CORINFO_TYPE_USHORT: case CORINFO_TYPE_CHAR:
fprintf(GetLogFile(), "%u", *reinterpret_cast<UINT16*>(valAddr));
break;
case CORINFO_TYPE_INT:
fprintf(GetLogFile(), "%d", *reinterpret_cast<INT32*>(valAddr));
break;
case CORINFO_TYPE_UINT:
fprintf(GetLogFile(), "%u", *reinterpret_cast<UINT32*>(valAddr));
break;
case CORINFO_TYPE_NATIVEINT:
{
INT64 val = static_cast<INT64>(*reinterpret_cast<NativeInt*>(valAddr));
fprintf(GetLogFile(), "%lld (= 0x%llx)", val, val);
}
break;
case CORINFO_TYPE_NATIVEUINT:
{
UINT64 val = static_cast<UINT64>(*reinterpret_cast<NativeUInt*>(valAddr));
fprintf(GetLogFile(), "%lld (= 0x%llx)", val, val);
}
break;
case CORINFO_TYPE_BYREF:
fprintf(GetLogFile(), "0x%p", *reinterpret_cast<void**>(valAddr));
break;
case CORINFO_TYPE_LONG:
{
INT64 val = *reinterpret_cast<INT64*>(valAddr);
fprintf(GetLogFile(), "%lld (= 0x%llx)", val, val);
}
break;
case CORINFO_TYPE_ULONG:
fprintf(GetLogFile(), "%lld", *reinterpret_cast<UINT64*>(valAddr));
break;
case CORINFO_TYPE_CLASS:
{
Object* obj = *reinterpret_cast<Object**>(valAddr);
if (obj == NULL)
{
fprintf(GetLogFile(), "null");
}
else
{
#ifdef _DEBUG
fprintf(GetLogFile(), "0x%p (%s) [", obj, obj->GetMethodTable()->GetDebugClassName());
#else
fprintf(GetLogFile(), "0x%p (MT=0x%p) [", obj, obj->GetMethodTable());
#endif
unsigned sz = obj->GetMethodTable()->GetBaseSize();
BYTE* objBytes = reinterpret_cast<BYTE*>(obj);
for (unsigned i = 0; i < sz; i++)
{
if (i > 0)
{
fprintf(GetLogFile(), " ");
}
fprintf(GetLogFile(), "0x%x", objBytes[i]);
}
fprintf(GetLogFile(), "]");
}
}
break;
case CORINFO_TYPE_VALUECLASS:
{
GCX_PREEMP();
fprintf(GetLogFile(), "<%s>: [", m_interpCeeInfo.getClassName(it.ToClassHandle()));
unsigned sz = getClassSize(it.ToClassHandle());
for (unsigned i = 0; i < sz; i++)
{
if (i > 0)
{
fprintf(GetLogFile(), " ");
}
fprintf(GetLogFile(), "0x%02x", valAddr[i]);
}
fprintf(GetLogFile(), "]");
}
break;
case CORINFO_TYPE_REFANY:
fprintf(GetLogFile(), "<refany>");
break;
case CORINFO_TYPE_FLOAT:
fprintf(GetLogFile(), "%f", *reinterpret_cast<float*>(valAddr));
break;
case CORINFO_TYPE_DOUBLE:
fprintf(GetLogFile(), "%g", *reinterpret_cast<double*>(valAddr));
break;
case CORINFO_TYPE_PTR:
fprintf(GetLogFile(), "0x%p", *reinterpret_cast<void**>(valAddr));
break;
default:
_ASSERTE_MSG(false, "Unknown type in PrintValue.");
break;
}
}
#endif // INTERP_TRACING
#ifdef _DEBUG
void Interpreter::AddInterpMethInfo(InterpreterMethodInfo* methInfo)
{
typedef InterpreterMethodInfo* InterpreterMethodInfoPtr;
// TODO: this requires synchronization.
const unsigned InitSize = 128;
if (s_interpMethInfos == NULL)
{
s_interpMethInfos = new InterpreterMethodInfoPtr[InitSize];
s_interpMethInfosAllocSize = InitSize;
}
if (s_interpMethInfosAllocSize == s_interpMethInfosCount)
{
unsigned newSize = s_interpMethInfosAllocSize * 2;
InterpreterMethodInfoPtr* tmp = new InterpreterMethodInfoPtr[newSize];
memcpy(tmp, s_interpMethInfos, s_interpMethInfosCount * sizeof(InterpreterMethodInfoPtr));
delete[] s_interpMethInfos;
s_interpMethInfos = tmp;
s_interpMethInfosAllocSize = newSize;
}
s_interpMethInfos[s_interpMethInfosCount] = methInfo;
s_interpMethInfosCount++;
}
int _cdecl Interpreter::CompareMethInfosByInvocations(const void* mi0in, const void* mi1in)
{
const InterpreterMethodInfo* mi0 = *((const InterpreterMethodInfo**)mi0in);
const InterpreterMethodInfo* mi1 = *((const InterpreterMethodInfo**)mi1in);
if (mi0->m_invocations < mi1->m_invocations)
{
return -1;
}
else if (mi0->m_invocations == mi1->m_invocations)
{
return 0;
}
else
{
assert(mi0->m_invocations > mi1->m_invocations);
return 1;
}
}
#if INTERP_PROFILE
int _cdecl Interpreter::CompareMethInfosByILInstrs(const void* mi0in, const void* mi1in)
{
const InterpreterMethodInfo* mi0 = *((const InterpreterMethodInfo**)mi0in);
const InterpreterMethodInfo* mi1 = *((const InterpreterMethodInfo**)mi1in);
if (mi0->m_totIlInstructionsExeced < mi1->m_totIlInstructionsExeced) return 1;
else if (mi0->m_totIlInstructionsExeced == mi1->m_totIlInstructionsExeced) return 0;
else
{
assert(mi0->m_totIlInstructionsExeced > mi1->m_totIlInstructionsExeced);
return -1;
}
}
#endif // INTERP_PROFILE
#endif // _DEBUG
const int MIL = 1000000;
// Leaving this disabled for now.
#if 0
unsigned __int64 ForceSigWalkCycles = 0;
#endif
void Interpreter::PrintPostMortemData()
{
if (s_PrintPostMortemFlag.val(CLRConfig::INTERNAL_InterpreterPrintPostMortem) == 0)
return;
// Otherwise...
#ifdef _DEBUG
// Let's print two things: the number of methods that are 0-10, or more, and
// For each 10% of methods, cumulative % of invocations they represent. By 1% for last 10%.
// First one doesn't require any sorting.
const unsigned HistoMax = 11;
unsigned histo[HistoMax];
unsigned numExecs[HistoMax];
for (unsigned k = 0; k < HistoMax; k++)
{
histo[k] = 0; numExecs[k] = 0;
}
for (unsigned k = 0; k < s_interpMethInfosCount; k++)
{
unsigned invokes = s_interpMethInfos[k]->m_invocations;
if (invokes > HistoMax - 1)
{
invokes = HistoMax - 1;
}
histo[invokes]++;
numExecs[invokes] += s_interpMethInfos[k]->m_invocations;
}
fprintf(GetLogFile(), "Histogram of method executions:\n");
fprintf(GetLogFile(), " # of execs | # meths (%%) | cum %% | %% cum execs\n");
fprintf(GetLogFile(), " -------------------------------------------------------\n");
float fTotMeths = float(s_interpMethInfosCount);
float fTotExecs = float(s_totalInvocations);
float numPct = 0.0f;
float numExecPct = 0.0f;
for (unsigned k = 0; k < HistoMax; k++)
{
fprintf(GetLogFile(), " %10d", k);
if (k == HistoMax)
{
fprintf(GetLogFile(), "+ ");
}
else
{
fprintf(GetLogFile(), " ");
}
float pct = float(histo[k])*100.0f/fTotMeths;
numPct += pct;
float execPct = float(numExecs[k])*100.0f/fTotExecs;
numExecPct += execPct;
fprintf(GetLogFile(), "| %7d (%5.2f%%) | %6.2f%% | %6.2f%%\n", histo[k], pct, numPct, numExecPct);
}
// This sorts them in ascending order of number of invocations.
qsort(&s_interpMethInfos[0], s_interpMethInfosCount, sizeof(InterpreterMethodInfo*), &CompareMethInfosByInvocations);
fprintf(GetLogFile(), "\nFor methods sorted in ascending # of executions order, cumulative %% of executions:\n");
if (s_totalInvocations > 0)
{
fprintf(GetLogFile(), " %% of methods | max execs | cum %% of execs\n");
fprintf(GetLogFile(), " ------------------------------------------\n");
unsigned methNum = 0;
unsigned nNumExecs = 0;
float totExecsF = float(s_totalInvocations);
for (unsigned k = 10; k < 100; k += 10)
{
unsigned targ = unsigned((float(k)/100.0f)*float(s_interpMethInfosCount));
unsigned targLess1 = (targ > 0 ? targ - 1 : 0);
while (methNum < targ)
{
nNumExecs += s_interpMethInfos[methNum]->m_invocations;
methNum++;
}
float pctExecs = float(nNumExecs) * 100.0f / totExecsF;
fprintf(GetLogFile(), " %8d%% | %9d | %8.2f%%\n", k, s_interpMethInfos[targLess1]->m_invocations, pctExecs);
if (k == 90)
{
k++;
for (; k < 100; k++)
{
unsigned targ = unsigned((float(k)/100.0f)*float(s_interpMethInfosCount));
while (methNum < targ)
{
nNumExecs += s_interpMethInfos[methNum]->m_invocations;
methNum++;
}
pctExecs = float(nNumExecs) * 100.0f / totExecsF;
fprintf(GetLogFile(), " %8d%% | %9d | %8.2f%%\n", k, s_interpMethInfos[targLess1]->m_invocations, pctExecs);
}
// Now do 100%.
targ = s_interpMethInfosCount;
while (methNum < targ)
{
nNumExecs += s_interpMethInfos[methNum]->m_invocations;
methNum++;
}
pctExecs = float(nNumExecs) * 100.0f / totExecsF;
fprintf(GetLogFile(), " %8d%% | %9d | %8.2f%%\n", k, s_interpMethInfos[targLess1]->m_invocations, pctExecs);
}
}
}
fprintf(GetLogFile(), "\nTotal number of calls from interpreted code: %d.\n", s_totalInterpCalls);
fprintf(GetLogFile(), " Also, %d are intrinsics; %d of these are not currently handled intrinsically.\n",
s_totalInterpCallsToIntrinsics, s_totalInterpCallsToIntrinsicsUnhandled);
fprintf(GetLogFile(), " Of these, %d to potential property getters (%d of these dead simple), %d to setters.\n",
s_totalInterpCallsToGetters, s_totalInterpCallsToDeadSimpleGetters, s_totalInterpCallsToSetters);
fprintf(GetLogFile(), " Of the dead simple getter calls, %d have been short-circuited.\n",
s_totalInterpCallsToDeadSimpleGettersShortCircuited);
fprintf(GetLogFile(), "\nToken resolutions by category:\n");
fprintf(GetLogFile(), "Category | opportunities | calls | %%\n");
fprintf(GetLogFile(), "---------------------------------------------------\n");
for (unsigned i = RTK_Undefined; i < RTK_Count; i++)
{
float pct = 0.0;
if (s_tokenResolutionOpportunities[i] > 0)
pct = 100.0f * float(s_tokenResolutionCalls[i]) / float(s_tokenResolutionOpportunities[i]);
fprintf(GetLogFile(), "%12s | %15d | %9d | %6.2f%%\n",
s_tokenResolutionKindNames[i], s_tokenResolutionOpportunities[i], s_tokenResolutionCalls[i], pct);
}
#if INTERP_PROFILE
fprintf(GetLogFile(), "Information on num of execs:\n");
UINT64 totILInstrs = 0;
for (unsigned i = 0; i < s_interpMethInfosCount; i++) totILInstrs += s_interpMethInfos[i]->m_totIlInstructionsExeced;
float totILInstrsF = float(totILInstrs);
fprintf(GetLogFile(), "\nTotal instructions = %lld.\n", totILInstrs);
fprintf(GetLogFile(), "\nTop <=10 methods by # of IL instructions executed.\n");
fprintf(GetLogFile(), "%10s | %9s | %10s | %10s | %8s | %s\n", "tot execs", "# invokes", "code size", "ratio", "% of tot", "Method");
fprintf(GetLogFile(), "----------------------------------------------------------------------------\n");
qsort(&s_interpMethInfos[0], s_interpMethInfosCount, sizeof(InterpreterMethodInfo*), &CompareMethInfosByILInstrs);
for (unsigned i = 0; i < min(10, s_interpMethInfosCount); i++)
{
unsigned ilCodeSize = unsigned(s_interpMethInfos[i]->m_ILCodeEnd - s_interpMethInfos[i]->m_ILCode);
fprintf(GetLogFile(), "%10lld | %9d | %10d | %10.2f | %8.2f%% | %s:%s\n",
s_interpMethInfos[i]->m_totIlInstructionsExeced,
s_interpMethInfos[i]->m_invocations,
ilCodeSize,
float(s_interpMethInfos[i]->m_totIlInstructionsExeced) / float(ilCodeSize),
float(s_interpMethInfos[i]->m_totIlInstructionsExeced) * 100.0f / totILInstrsF,
s_interpMethInfos[i]->m_clsName,
s_interpMethInfos[i]->m_methName);
}
#endif // INTERP_PROFILE
#endif // _DEBUG
#if INTERP_ILINSTR_PROFILE
fprintf(GetLogFile(), "\nIL instruction profiling:\n");
// First, classify by categories.
unsigned totInstrs = 0;
#if INTERP_ILCYCLE_PROFILE
unsigned __int64 totCycles = 0;
unsigned __int64 perMeasurementOverhead = CycleTimer::QueryOverhead();
#endif // INTERP_ILCYCLE_PROFILE
for (unsigned i = 0; i < 256; i++)
{
s_ILInstrExecsByCategory[s_ILInstrCategories[i]] += s_ILInstrExecs[i];
totInstrs += s_ILInstrExecs[i];
#if INTERP_ILCYCLE_PROFILE
unsigned __int64 cycles = s_ILInstrCycles[i];
if (cycles > s_ILInstrExecs[i] * perMeasurementOverhead) cycles -= s_ILInstrExecs[i] * perMeasurementOverhead;
else cycles = 0;
s_ILInstrCycles[i] = cycles;
s_ILInstrCyclesByCategory[s_ILInstrCategories[i]] += cycles;
totCycles += cycles;
#endif // INTERP_ILCYCLE_PROFILE
}
unsigned totInstrs2Byte = 0;
#if INTERP_ILCYCLE_PROFILE
unsigned __int64 totCycles2Byte = 0;
#endif // INTERP_ILCYCLE_PROFILE
for (unsigned i = 0; i < CountIlInstr2Byte; i++)
{
unsigned ind = 0x100 + i;
s_ILInstrExecsByCategory[s_ILInstrCategories[ind]] += s_ILInstr2ByteExecs[i];
totInstrs += s_ILInstr2ByteExecs[i];
totInstrs2Byte += s_ILInstr2ByteExecs[i];
#if INTERP_ILCYCLE_PROFILE
unsigned __int64 cycles = s_ILInstrCycles[ind];
if (cycles > s_ILInstrExecs[ind] * perMeasurementOverhead) cycles -= s_ILInstrExecs[ind] * perMeasurementOverhead;
else cycles = 0;
s_ILInstrCycles[i] = cycles;
s_ILInstrCyclesByCategory[s_ILInstrCategories[ind]] += cycles;
totCycles += cycles;
totCycles2Byte += cycles;
#endif // INTERP_ILCYCLE_PROFILE
}
// Now sort the categories by # of occurrences.
InstrExecRecord ieps[256 + CountIlInstr2Byte];
for (unsigned short i = 0; i < 256; i++)
{
ieps[i].m_instr = i; ieps[i].m_is2byte = false; ieps[i].m_execs = s_ILInstrExecs[i];
#if INTERP_ILCYCLE_PROFILE
if (i == CEE_BREAK)
{
ieps[i].m_cycles = 0;
continue; // Don't count these if they occur...
}
ieps[i].m_cycles = s_ILInstrCycles[i];
assert((ieps[i].m_execs != 0) || (ieps[i].m_cycles == 0)); // Cycles can be zero for non-zero execs because of measurement correction.
#endif // INTERP_ILCYCLE_PROFILE
}
for (unsigned short i = 0; i < CountIlInstr2Byte; i++)
{
int ind = 256 + i;
ieps[ind].m_instr = i; ieps[ind].m_is2byte = true; ieps[ind].m_execs = s_ILInstr2ByteExecs[i];
#if INTERP_ILCYCLE_PROFILE
ieps[ind].m_cycles = s_ILInstrCycles[ind];
assert((ieps[i].m_execs != 0) || (ieps[i].m_cycles == 0)); // Cycles can be zero for non-zero execs because of measurement correction.
#endif // INTERP_ILCYCLE_PROFILE
}
qsort(&ieps[0], 256 + CountIlInstr2Byte, sizeof(InstrExecRecord), &InstrExecRecord::Compare);
fprintf(GetLogFile(), "\nInstructions (%d total, %d 1-byte):\n", totInstrs, totInstrs - totInstrs2Byte);
#if INTERP_ILCYCLE_PROFILE
if (s_callCycles > s_calls * perMeasurementOverhead) s_callCycles -= s_calls * perMeasurementOverhead;
else s_callCycles = 0;
fprintf(GetLogFile(), " MCycles (%lld total, %lld 1-byte, %lld calls (%d calls, %10.2f cyc/call):\n",
totCycles/MIL, (totCycles - totCycles2Byte)/MIL, s_callCycles/MIL, s_calls, float(s_callCycles)/float(s_calls));
#if 0
extern unsigned __int64 MetaSigCtor1Cycles;
fprintf(GetLogFile(), " MetaSig(MethodDesc, TypeHandle) ctor: %lld MCycles.\n",
MetaSigCtor1Cycles/MIL);
fprintf(GetLogFile(), " ForceSigWalk: %lld MCycles.\n",
ForceSigWalkCycles/MIL);
#endif
#endif // INTERP_ILCYCLE_PROFILE
PrintILProfile(&ieps[0], totInstrs
#if INTERP_ILCYCLE_PROFILE
, totCycles
#endif // INTERP_ILCYCLE_PROFILE
);
fprintf(GetLogFile(), "\nInstructions grouped by category: (%d total, %d 1-byte):\n", totInstrs, totInstrs - totInstrs2Byte);
#if INTERP_ILCYCLE_PROFILE
fprintf(GetLogFile(), " MCycles (%lld total, %lld 1-byte):\n",
totCycles/MIL, (totCycles - totCycles2Byte)/MIL);
#endif // INTERP_ILCYCLE_PROFILE
for (unsigned short i = 0; i < 256 + CountIlInstr2Byte; i++)
{
if (i < 256)
{
ieps[i].m_instr = i; ieps[i].m_is2byte = false;
}
else
{
ieps[i].m_instr = i - 256; ieps[i].m_is2byte = true;
}
ieps[i].m_execs = s_ILInstrExecsByCategory[i];
#if INTERP_ILCYCLE_PROFILE
ieps[i].m_cycles = s_ILInstrCyclesByCategory[i];
#endif // INTERP_ILCYCLE_PROFILE
}
qsort(&ieps[0], 256 + CountIlInstr2Byte, sizeof(InstrExecRecord), &InstrExecRecord::Compare);
PrintILProfile(&ieps[0], totInstrs
#if INTERP_ILCYCLE_PROFILE
, totCycles
#endif // INTERP_ILCYCLE_PROFILE
);
#if 0
// Early debugging code.
fprintf(GetLogFile(), "\nInstructions grouped category mapping:\n", totInstrs, totInstrs - totInstrs2Byte);
for (unsigned short i = 0; i < 256; i++)
{
unsigned short cat = s_ILInstrCategories[i];
if (cat < 256) {
fprintf(GetLogFile(), "Instr: %12s ==> %12s.\n", ILOp1Byte(i), ILOp1Byte(cat));
} else {
fprintf(GetLogFile(), "Instr: %12s ==> %12s.\n", ILOp1Byte(i), ILOp2Byte(cat - 256));
}
}
for (unsigned short i = 0; i < CountIlInstr2Byte; i++)
{
unsigned ind = 256 + i;
unsigned short cat = s_ILInstrCategories[ind];
if (cat < 256) {
fprintf(GetLogFile(), "Instr: %12s ==> %12s.\n", ILOp2Byte(i), ILOp1Byte(cat));
} else {
fprintf(GetLogFile(), "Instr: %12s ==> %12s.\n", ILOp2Byte(i), ILOp2Byte(cat - 256));
}
}
#endif
#endif // INTERP_ILINSTR_PROFILE
}
#if INTERP_ILINSTR_PROFILE
const int K = 1000;
// static
void Interpreter::PrintILProfile(Interpreter::InstrExecRecord *recs, unsigned int totInstrs
#if INTERP_ILCYCLE_PROFILE
, unsigned __int64 totCycles
#endif // INTERP_ILCYCLE_PROFILE
)
{
float fTotInstrs = float(totInstrs);
fprintf(GetLogFile(), "Instruction | execs | %% | cum %%");
#if INTERP_ILCYCLE_PROFILE
float fTotCycles = float(totCycles);
fprintf(GetLogFile(), "| KCycles | %% | cum %% | cyc/inst\n");
fprintf(GetLogFile(), "--------------------------------------------------"
"-----------------------------------------\n");
#else
fprintf(GetLogFile(), "\n-------------------------------------------\n");
#endif
float numPct = 0.0f;
#if INTERP_ILCYCLE_PROFILE
float numCyclePct = 0.0f;
#endif // INTERP_ILCYCLE_PROFILE
for (unsigned i = 0; i < 256 + CountIlInstr2Byte; i++)
{
float pct = 0.0f;
if (totInstrs > 0) pct = float(recs[i].m_execs) * 100.0f / fTotInstrs;
numPct += pct;
if (recs[i].m_execs > 0)
{
fprintf(GetLogFile(), "%12s | %9d | %6.2f%% | %6.2f%%",
(recs[i].m_is2byte ? ILOp2Byte(recs[i].m_instr) : ILOp1Byte(recs[i].m_instr)), recs[i].m_execs,
pct, numPct);
#if INTERP_ILCYCLE_PROFILE
pct = 0.0f;
if (totCycles > 0) pct = float(recs[i].m_cycles) * 100.0f / fTotCycles;
numCyclePct += pct;
float cyclesPerInst = float(recs[i].m_cycles) / float(recs[i].m_execs);
fprintf(GetLogFile(), "| %12llu | %6.2f%% | %6.2f%% | %11.2f",
recs[i].m_cycles/K, pct, numCyclePct, cyclesPerInst);
#endif // INTERP_ILCYCLE_PROFILE
fprintf(GetLogFile(), "\n");
}
}
}
#endif // INTERP_ILINSTR_PROFILE
#endif // FEATURE_INTERPRETER
| qiudesong/coreclr | src/vm/interpreter.cpp | C++ | mit | 418,371 |
using System;
using System.Collections.Generic;
using FFImageLoading.Transformations;
using FFImageLoading.Work;
namespace FFImageLoading.MvvmCross.Sample.Core
{
public class Image
{
public string Url { get; }
public double DownsampleWidth => 200d;
public List<ITransformation> Transformations => new List<ITransformation> { new CircleTransformation() };
public Image(string url)
{
Url = url;
}
}
}
| luberda-molinet/FFImageLoading | samples/ImageLoading.MvvmCross.Sample/FFImageLoading.MvvmCross.Sample.Core/ViewModels/Image.cs | C# | mit | 476 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.